aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/barrier.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/barrier.h')
-rw-r--r--arch/arm/include/asm/barrier.h86
1 files changed, 86 insertions, 0 deletions
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
new file mode 100644
index 00000000000..c6a3e73a6e2
--- /dev/null
+++ b/arch/arm/include/asm/barrier.h
@@ -0,0 +1,86 @@
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+#include <asm/outercache.h>
+
+#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
+
+#if __LINUX_ARM_ARCH__ >= 7 || \
+ (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
+#define sev() __asm__ __volatile__ ("sev" : : : "memory")
+#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
+#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
+#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+ : : "r" (0) : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+ : : "r" (0) : "memory")
+#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+ : : "r" (0) : "memory")
+#elif defined(CONFIG_CPU_FA526)
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+ : : "r" (0) : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+ : : "r" (0) : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
+#else
+#define isb(x) __asm__ __volatile__ ("" : : : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+ : : "r" (0) : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
+#endif
+
+#ifdef CONFIG_ARCH_HAS_BARRIERS
+#include <mach/barriers.h>
+#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
+#define mb() do { dsb(); outer_sync(); } while (0)
+#define rmb() dsb()
+#define wmb() do { dsb(st); outer_sync(); } while (0)
+#else
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#endif
+
+#ifndef CONFIG_SMP
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#else
+#define smp_mb() dmb(ish)
+#define smp_rmb() smp_mb()
+#define smp_wmb() dmb(ishst)
+#endif
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
+#define read_barrier_depends() do { } while(0)
+#define smp_read_barrier_depends() do { } while(0)
+
+#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic() smp_mb()
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
inux/diff/drivers/media/platform/davinci/dm644x_ccdc.c?id2=cef5076987dd545ac74f4efcf1c962be8eac34b0'>drivers/media/platform/davinci/dm644x_ccdc.c1044
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc_regs.h153
-rw-r--r--drivers/media/platform/davinci/isif.c1145
-rw-r--r--drivers/media/platform/davinci/isif_regs.h269
-rw-r--r--drivers/media/platform/davinci/vpbe.c872
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c1858
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c1597
-rw-r--r--drivers/media/platform/davinci/vpbe_osd_regs.h364
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c699
-rw-r--r--drivers/media/platform/davinci/vpbe_venc_regs.h177
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2044
-rw-r--r--drivers/media/platform/davinci/vpif.c487
-rw-r--r--drivers/media/platform/davinci/vpif.h688
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c1809
-rw-r--r--drivers/media/platform/davinci/vpif_capture.h133
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1481
-rw-r--r--drivers/media/platform/davinci/vpif_display.h127
-rw-r--r--drivers/media/platform/davinci/vpss.c533
-rw-r--r--drivers/media/platform/exynos-gsc/Makefile3
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c1266
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h534
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c795
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-regs.c430
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-regs.h172
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig77
-rw-r--r--drivers/media/platform/exynos4-is/Makefile17
-rw-r--r--drivers/media/platform/exynos4-is/common.c53
-rw-r--r--drivers/media/platform/exynos4-is/common.h16
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c1917
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c1308
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h728
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-command.h137
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-errno.c272
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-errno.h248
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c149
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.h15
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-param.c898
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-param.h1025
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.c233
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.h164
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-sensor.c34
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-sensor.h56
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c998
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.h344
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c659
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.h44
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c786
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.h195
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite-reg.c349
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite-reg.h156
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c1729
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.h227
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c772
-rw-r--r--drivers/media/platform/exynos4-is/fimc-reg.c842
-rw-r--r--drivers/media/platform/exynos4-is/fimc-reg.h338
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c1498
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.h210
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c1052
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.h26
-rw-r--r--drivers/media/platform/fsl-viu.c1693
-rw-r--r--drivers/media/platform/indycam.c378
-rw-r--r--drivers/media/platform/indycam.h93
-rw-r--r--drivers/media/platform/m2m-deinterlace.c1111
-rw-r--r--drivers/media/platform/marvell-ccic/Kconfig23
-rw-r--r--drivers/media/platform/marvell-ccic/Makefile6
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c656
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c2034
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h382
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c537
-rw-r--r--drivers/media/platform/mem2mem_testdev.c1090
-rw-r--r--drivers/media/platform/mx2_emmaprp.c1016
-rw-r--r--drivers/media/platform/omap/Kconfig14
-rw-r--r--drivers/media/platform/omap/Makefile8
-rw-r--r--drivers/media/platform/omap/omap_vout.c2294
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c393
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.h40
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h225
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c357
-rw-r--r--drivers/media/platform/omap/omap_voutlib.h39
-rw-r--r--drivers/media/platform/omap3isp/Makefile11
-rw-r--r--drivers/media/platform/omap3isp/cfa_coef_table.h61
-rw-r--r--drivers/media/platform/omap3isp/gamma_table.h90
-rw-r--r--drivers/media/platform/omap3isp/isp.c2406
-rw-r--r--drivers/media/platform/omap3isp/isp.h356
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c2586
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.h176
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c1179
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.h98
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c1329
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.h165
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c353
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h53
-rw-r--r--drivers/media/platform/omap3isp/isph3a.h117
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c352
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c407
-rw-r--r--drivers/media/platform/omap3isp/isphist.c521
-rw-r--r--drivers/media/platform/omap3isp/isphist.h40
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c2376
-rw-r--r--drivers/media/platform/omap3isp/isppreview.h174
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h1531
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c1794
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.h146
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c1073
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h168
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c1407
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h219
-rw-r--r--drivers/media/platform/omap3isp/luma_enhance_table.h42
-rw-r--r--drivers/media/platform/omap3isp/noise_filter_table.h30
-rw-r--r--drivers/media/platform/s3c-camif/Makefile5
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c1681
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c665
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h393
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c606
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.h269
-rw-r--r--drivers/media/platform/s5p-g2d/Makefile3
-rw-r--r--drivers/media/platform/s5p-g2d/g2d-hw.c117
-rw-r--r--drivers/media/platform/s5p-g2d/g2d-regs.h122
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c825
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h96
-rw-r--r--drivers/media/platform/s5p-jpeg/Makefile2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c2208
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h209
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c279
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h42
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c343
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h63
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h377
-rw-r--r--drivers/media/platform/s5p-mfc/Makefile6
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h409
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v7.h60
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v8.h124
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc.h459
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c1511
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c29
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h35
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c166
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h20
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c159
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h20
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h714
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c464
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h34
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_debug.h48
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c1223
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.h24
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c2130
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.h24
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_intr.c91
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_intr.h26
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c67
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.h343
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c1743
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h85
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c2342
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h45
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c117
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.h24
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig85
-rw-r--r--drivers/media/platform/s5p-tv/Makefile19
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c1054
-rw-r--r--drivers/media/platform/s5p-tv/hdmiphy_drv.c325
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h366
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c521
-rw-r--r--drivers/media/platform/s5p-tv/mixer_grp_layer.c270
-rw-r--r--drivers/media/platform/s5p-tv/mixer_reg.c553
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c1153
-rw-r--r--drivers/media/platform/s5p-tv/mixer_vp_layer.c241
-rw-r--r--drivers/media/platform/s5p-tv/regs-hdmi.h146
-rw-r--r--drivers/media/platform/s5p-tv/regs-mixer.h122
-rw-r--r--drivers/media/platform/s5p-tv/regs-sdo.h63
-rw-r--r--drivers/media/platform/s5p-tv/regs-vp.h88
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c490
-rw-r--r--drivers/media/platform/s5p-tv/sii9234_drv.c408
-rw-r--r--drivers/media/platform/sh_veu.c1250
-rw-r--r--drivers/media/platform/sh_vou.c1462
-rw-r--r--drivers/media/platform/soc_camera/Kconfig91
-rw-r--r--drivers/media/platform/soc_camera/Makefile17
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c1016
-rw-r--r--drivers/media/platform/soc_camera/mx1_camera.c866
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c1627
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c1270
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c1723
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c1817
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c1498
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2039
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c403
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c2102
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c194
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c529
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c402
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.h47
-rw-r--r--drivers/media/platform/ti-vpe/Makefile5
-rw-r--r--drivers/media/platform/ti-vpe/csc.c196
-rw-r--r--drivers/media/platform/ti-vpe/csc.h68
-rw-r--r--drivers/media/platform/ti-vpe/sc.c311
-rw-r--r--drivers/media/platform/ti-vpe/sc.h208
-rw-r--r--drivers/media/platform/ti-vpe/sc_coeff.h1342
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c912
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h216
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h641
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2388
-rw-r--r--drivers/media/platform/ti-vpe/vpe_regs.h309
-rw-r--r--drivers/media/platform/timblogiw.c871
-rw-r--r--drivers/media/platform/via-camera.c1472
-rw-r--r--drivers/media/platform/via-camera.h93
-rw-r--r--drivers/media/platform/vino.c4352
-rw-r--r--drivers/media/platform/vino.h138
-rw-r--r--drivers/media/platform/vivi.c1541
-rw-r--r--drivers/media/platform/vsp1/Makefile6
-rw-r--r--drivers/media/platform/vsp1/vsp1.h82
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c395
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.h39
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c581
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c191
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h92
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c219
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c237
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.h37
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c251
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h695
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c226
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c220
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h67
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c355
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.h41
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c344
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.h40
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c1102
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h145
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c243
246 files changed, 141699 insertions, 0 deletions
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
new file mode 100644
index 00000000000..8108c698b54
--- /dev/null
+++ b/drivers/media/platform/Kconfig
@@ -0,0 +1,266 @@
+#
+# Platform drivers
+# All drivers here are currently for webcam support
+
+menuconfig V4L_PLATFORM_DRIVERS
+ bool "V4L platform devices"
+ depends on MEDIA_CAMERA_SUPPORT
+ default n
+ ---help---
+ Say Y here to enable support for platform-specific V4L drivers.
+
+if V4L_PLATFORM_DRIVERS
+
+source "drivers/media/platform/marvell-ccic/Kconfig"
+
+config VIDEO_VIA_CAMERA
+ tristate "VIAFB camera controller support"
+ depends on FB_VIA
+ select VIDEOBUF_DMA_SG
+ select VIDEO_OV7670
+ help
+ Driver support for the integrated camera controller in VIA
+ Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
+ with ov7670 sensors.
+
+#
+# Platform multimedia device configuration
+#
+
+source "drivers/media/platform/davinci/Kconfig"
+
+source "drivers/media/platform/omap/Kconfig"
+
+source "drivers/media/platform/blackfin/Kconfig"
+
+config VIDEO_SH_VOU
+ tristate "SuperH VOU video output driver"
+ depends on MEDIA_CAMERA_SUPPORT
+ depends on VIDEO_DEV && I2C && HAS_DMA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Support for the Video Output Unit (VOU) on SuperH SoCs.
+
+config VIDEO_VIU
+ tristate "Freescale VIU Video Driver"
+ depends on VIDEO_V4L2 && PPC_MPC512x
+ select VIDEOBUF_DMA_CONTIG
+ default y
+ ---help---
+ Support for Freescale VIU video driver. This device captures
+ video data, or overlays video on DIU frame buffer.
+
+ Say Y here if you want to enable VIU device on MPC5121e Rev2+.
+ In doubt, say N.
+
+config VIDEO_TIMBERDALE
+ tristate "Support for timberdale Video In/LogiWIN"
+ depends on MFD_TIMBERDALE && VIDEO_V4L2 && I2C && DMADEVICES
+ select DMA_ENGINE
+ select TIMB_DMA
+ select VIDEO_ADV7180
+ select VIDEOBUF_DMA_CONTIG
+ ---help---
+ Add support for the Video In peripherial of the timberdale FPGA.
+
+config VIDEO_VINO
+ tristate "SGI Vino Video For Linux"
+ depends on I2C && SGI_IP22 && VIDEO_V4L2
+ select VIDEO_SAA7191 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to build in support for the Vino video input system found
+ on SGI Indy machines.
+
+config VIDEO_M32R_AR
+ tristate "AR devices"
+ depends on M32R && VIDEO_V4L2
+ ---help---
+ This is a video4linux driver for the Renesas AR (Artificial Retina)
+ camera module.
+
+config VIDEO_M32R_AR_M64278
+ tristate "AR device with color module M64278(VGA)"
+ depends on PLAT_M32700UT
+ select VIDEO_M32R_AR
+ ---help---
+ This is a video4linux driver for the Renesas AR (Artificial
+ Retina) with M64278E-800 camera module.
+ This module supports VGA(640x480 pixels) resolutions.
+
+ To compile this driver as a module, choose M here: the
+ module will be called arv.
+
+config VIDEO_OMAP3
+ tristate "OMAP 3 Camera support"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
+ select ARM_DMA_USE_IOMMU
+ select OMAP_IOMMU
+ ---help---
+ Driver for an OMAP 3 camera controller.
+
+config VIDEO_OMAP3_DEBUG
+ bool "OMAP 3 Camera debug messages"
+ depends on VIDEO_OMAP3
+ ---help---
+ Enable debug messages on OMAP 3 camera controller driver.
+
+config VIDEO_S3C_CAMIF
+ tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on (ARCH_S3C64XX || PLAT_S3C24XX) && PM_RUNTIME
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
+ host interface (CAMIF).
+
+ To compile this driver as a module, choose M here: the module
+ will be called s3c-camif.
+
+source "drivers/media/platform/soc_camera/Kconfig"
+source "drivers/media/platform/exynos4-is/Kconfig"
+source "drivers/media/platform/s5p-tv/Kconfig"
+
+endif # V4L_PLATFORM_DRIVERS
+
+menuconfig V4L_MEM2MEM_DRIVERS
+ bool "Memory-to-memory multimedia devices"
+ depends on VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
+ default n
+ ---help---
+ Say Y here to enable selecting drivers for V4L devices that
+ use system memory for both source and destination buffers, as opposed
+ to capture and output drivers, which use memory buffers for just
+ one of those.
+
+if V4L_MEM2MEM_DRIVERS
+
+config VIDEO_CODA
+ tristate "Chips&Media Coda multi-standard codec IP"
+ depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_MXC
+ select SRAM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ Coda is a range of video codec IPs that supports
+ H.264, MPEG-4, and other video formats.
+
+config VIDEO_MEM2MEM_DEINTERLACE
+ tristate "Deinterlace support"
+ depends on VIDEO_DEV && VIDEO_V4L2 && DMA_ENGINE
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Generic deinterlacing V4L2 driver.
+
+config VIDEO_SAMSUNG_S5P_G2D
+ tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a v4l2 driver for Samsung S5P and EXYNOS4 G2D
+ 2d graphics accelerator.
+
+config VIDEO_SAMSUNG_S5P_JPEG
+ tristate "Samsung S5P/Exynos4 JPEG codec driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a v4l2 driver for Samsung S5P and EXYNOS4 JPEG codec
+
+config VIDEO_SAMSUNG_S5P_MFC
+ tristate "Samsung S5P MFC Video Codec"
+ depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
+ select VIDEOBUF2_DMA_CONTIG
+ default n
+ help
+ MFC 5.1 and 6.x driver for V4L2
+
+config VIDEO_MX2_EMMAPRP
+ tristate "MX2 eMMa-PrP support"
+ depends on VIDEO_DEV && VIDEO_V4L2 && SOC_IMX27
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ MX2X chips have a PrP that can be used to process buffers from
+ memory to memory. Operations include resizing and format
+ conversion.
+
+config VIDEO_SAMSUNG_EXYNOS_GSC
+ tristate "Samsung Exynos G-Scaler driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_EXYNOS5
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a v4l2 driver for Samsung EXYNOS5 SoC G-Scaler.
+
+config VIDEO_SH_VEU
+ tristate "SuperH VEU mem2mem video processing driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Video Engine Unit (VEU) on SuperH and
+ SH-Mobile SoCs.
+
+config VIDEO_RENESAS_VSP1
+ tristate "Renesas VSP1 Video Processing Engine"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a V4L2 driver for the Renesas VSP1 video processing engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vsp1.
+
+config VIDEO_TI_VPE
+ tristate "TI VPE (Video Processing Engine) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && SOC_DRA7XX
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ Support for the TI VPE(Video Processing Engine) block
+ found on DRA7XX SoC.
+
+config VIDEO_TI_VPE_DEBUG
+ bool "VPE debug messages"
+ depends on VIDEO_TI_VPE
+ ---help---
+ Enable debug messages on VPE driver.
+
+endif # V4L_MEM2MEM_DRIVERS
+
+menuconfig V4L_TEST_DRIVERS
+ bool "Media test drivers"
+ depends on MEDIA_CAMERA_SUPPORT
+
+if V4L_TEST_DRIVERS
+config VIDEO_VIVI
+ tristate "Virtual Video Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
+ select FONT_SUPPORT
+ select FONT_8x16
+ select VIDEOBUF2_VMALLOC
+ default n
+ ---help---
+ Enables a virtual video driver. This device shows a color bar
+ and a timestamp, as a real device would generate by using V4L2
+ api.
+ Say Y here if you want to test video apps or debug V4L devices.
+ In doubt, say N.
+
+config VIDEO_MEM2MEM_TESTDEV
+ tristate "Virtual test device for mem2mem framework"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a virtual test device for the memory-to-memory driver
+ framework.
+endif #V4L_TEST_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
new file mode 100644
index 00000000000..e5269da9190
--- /dev/null
+++ b/drivers/media/platform/Makefile
@@ -0,0 +1,54 @@
+#
+# Makefile for the video capture/playback device drivers.
+#
+
+obj-$(CONFIG_VIDEO_VINO) += indycam.o
+obj-$(CONFIG_VIDEO_VINO) += vino.o
+
+obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
+obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
+
+obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
+
+obj-$(CONFIG_VIDEO_OMAP3) += omap3isp/
+
+obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
+obj-$(CONFIG_VIDEO_VIVI) += vivi.o
+
+obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
+
+obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
+
+obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
+obj-$(CONFIG_VIDEO_CODA) += coda.o
+
+obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
+
+obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
+
+obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
+obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS) += exynos4-is/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_TV) += s5p-tv/
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d/
+obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC) += exynos-gsc/
+
+obj-$(CONFIG_BLACKFIN) += blackfin/
+
+obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+
+obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
+
+obj-$(CONFIG_SOC_CAMERA) += soc_camera/
+
+obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
+
+obj-y += davinci/
+
+obj-$(CONFIG_ARCH_OMAP) += omap/
+
+ccflags-y += -I$(srctree)/drivers/media/i2c
diff --git a/drivers/media/platform/arv.c b/drivers/media/platform/arv.c
new file mode 100644
index 00000000000..e9410e41ae0
--- /dev/null
+++ b/drivers/media/platform/arv.c
@@ -0,0 +1,885 @@
+/*
+ * Colour AR M64278(VGA) driver for Video4Linux
+ *
+ * Copyright (C) 2003 Takeo Takahashi <takahashi.takeo@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Some code is taken from AR driver sample program for M3T-M32700UT.
+ *
+ * AR driver sample (M32R SDK):
+ * Copyright (c) 2003 RENESAS TECHNOROGY CORPORATION
+ * AND RENESAS SOLUTIONS CORPORATION
+ * All Rights Reserved.
+ *
+ * 2003-09-01: Support w3cam by Takeo Takahashi
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <linux/mutex.h>
+
+#include <asm/uaccess.h>
+#include <asm/m32r.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#if 0
+#define DEBUG(n, args...) printk(KERN_INFO args)
+#define CHECK_LOST 1
+#else
+#define DEBUG(n, args...)
+#define CHECK_LOST 0
+#endif
+
+/*
+ * USE_INT is always 0, interrupt mode is not available
+ * on linux due to lack of speed
+ */
+#define USE_INT 0 /* Don't modify */
+
+#define VERSION "0.0.5"
+
+#define ar_inl(addr) inl((unsigned long)(addr))
+#define ar_outl(val, addr) outl((unsigned long)(val), (unsigned long)(addr))
+
+extern struct cpuinfo_m32r boot_cpu_data;
+
+/*
+ * CCD pixel size
+ * Note that M32700UT does not support CIF mode, but QVGA is
+ * supported by M32700UT hardware using VGA mode of AR LSI.
+ *
+ * Supported: VGA (Normal mode, Interlace mode)
+ * QVGA (Always Interlace mode of VGA)
+ *
+ */
+#define AR_WIDTH_VGA 640
+#define AR_HEIGHT_VGA 480
+#define AR_WIDTH_QVGA 320
+#define AR_HEIGHT_QVGA 240
+#define MIN_AR_WIDTH AR_WIDTH_QVGA
+#define MIN_AR_HEIGHT AR_HEIGHT_QVGA
+#define MAX_AR_WIDTH AR_WIDTH_VGA
+#define MAX_AR_HEIGHT AR_HEIGHT_VGA
+
+/* bits & bytes per pixel */
+#define AR_BITS_PER_PIXEL 16
+#define AR_BYTES_PER_PIXEL (AR_BITS_PER_PIXEL / 8)
+
+/* line buffer size */
+#define AR_LINE_BYTES_VGA (AR_WIDTH_VGA * AR_BYTES_PER_PIXEL)
+#define AR_LINE_BYTES_QVGA (AR_WIDTH_QVGA * AR_BYTES_PER_PIXEL)
+#define MAX_AR_LINE_BYTES AR_LINE_BYTES_VGA
+
+/* frame size & type */
+#define AR_FRAME_BYTES_VGA \
+ (AR_WIDTH_VGA * AR_HEIGHT_VGA * AR_BYTES_PER_PIXEL)
+#define AR_FRAME_BYTES_QVGA \
+ (AR_WIDTH_QVGA * AR_HEIGHT_QVGA * AR_BYTES_PER_PIXEL)
+#define MAX_AR_FRAME_BYTES \
+ (MAX_AR_WIDTH * MAX_AR_HEIGHT * AR_BYTES_PER_PIXEL)
+
+#define AR_MAX_FRAME 15
+
+/* capture size */
+#define AR_SIZE_VGA 0
+#define AR_SIZE_QVGA 1
+
+/* capture mode */
+#define AR_MODE_INTERLACE 0
+#define AR_MODE_NORMAL 1
+
+struct ar {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ int start_capture; /* duaring capture in INT. mode. */
+#if USE_INT
+ unsigned char *line_buff; /* DMA line buffer */
+#endif
+ unsigned char *frame[MAX_AR_HEIGHT]; /* frame data */
+ short size; /* capture size */
+ short mode; /* capture mode */
+ int width, height;
+ int frame_bytes, line_bytes;
+ wait_queue_head_t wait;
+ struct mutex lock;
+};
+
+static struct ar ardev;
+
+static int video_nr = -1; /* video device number (first free) */
+static unsigned char yuv[MAX_AR_FRAME_BYTES];
+
+/* module parameters */
+/* default frequency */
+#define DEFAULT_FREQ 50 /* 50 or 75 (MHz) is available as BCLK */
+static int freq = DEFAULT_FREQ; /* BCLK: available 50 or 70 (MHz) */
+static int vga; /* default mode(0:QVGA mode, other:VGA mode) */
+static int vga_interlace; /* 0 is normal mode for, else interlace mode */
+module_param(freq, int, 0);
+module_param(vga, int, 0);
+module_param(vga_interlace, int, 0);
+
+static void wait_for_vsync(void)
+{
+ while (ar_inl(ARVCR0) & ARVCR0_VDS) /* wait for VSYNC */
+ cpu_relax();
+ while (!(ar_inl(ARVCR0) & ARVCR0_VDS)) /* wait for VSYNC */
+ cpu_relax();
+}
+
+static void wait_acknowledge(void)
+{
+ int i;
+
+ for (i = 0; i < 1000; i++)
+ cpu_relax();
+ while (ar_inl(PLDI2CSTS) & PLDI2CSTS_NOACK)
+ cpu_relax();
+}
+
+/*******************************************************************
+ * I2C functions
+ *******************************************************************/
+static void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
+ unsigned long data3)
+{
+ int i;
+
+ /* Slave Address */
+ ar_outl(addr, PLDI2CDATA);
+ wait_for_vsync();
+
+ /* Start */
+ ar_outl(1, PLDI2CCND);
+ wait_acknowledge();
+
+ /* Transfer data 1 */
+ ar_outl(data1, PLDI2CDATA);
+ wait_for_vsync();
+ ar_outl(PLDI2CSTEN_STEN, PLDI2CSTEN);
+ wait_acknowledge();
+
+ /* Transfer data 2 */
+ ar_outl(data2, PLDI2CDATA);
+ wait_for_vsync();
+ ar_outl(PLDI2CSTEN_STEN, PLDI2CSTEN);
+ wait_acknowledge();
+
+ if (n == 3) {
+ /* Transfer data 3 */
+ ar_outl(data3, PLDI2CDATA);
+ wait_for_vsync();
+ ar_outl(PLDI2CSTEN_STEN, PLDI2CSTEN);
+ wait_acknowledge();
+ }
+
+ /* Stop */
+ for (i = 0; i < 100; i++)
+ cpu_relax();
+ ar_outl(2, PLDI2CCND);
+ ar_outl(2, PLDI2CCND);
+
+ while (ar_inl(PLDI2CSTS) & PLDI2CSTS_BB)
+ cpu_relax();
+}
+
+
+static void init_iic(void)
+{
+ DEBUG(1, "init_iic:\n");
+
+ /*
+ * ICU Setting (iic)
+ */
+ /* I2C Setting */
+ ar_outl(0x0, PLDI2CCR); /* I2CCR Disable */
+ ar_outl(0x0300, PLDI2CMOD); /* I2CMOD ACK/8b-data/7b-addr/auto */
+ ar_outl(0x1, PLDI2CACK); /* I2CACK ACK */
+
+ /* I2C CLK */
+ /* 50MH-100k */
+ if (freq == 75)
+ ar_outl(369, PLDI2CFREQ); /* BCLK = 75MHz */
+ else if (freq == 50)
+ ar_outl(244, PLDI2CFREQ); /* BCLK = 50MHz */
+ else
+ ar_outl(244, PLDI2CFREQ); /* default: BCLK = 50MHz */
+ ar_outl(0x1, PLDI2CCR); /* I2CCR Enable */
+}
+
+/**************************************************************************
+ *
+ * Video4Linux Interface functions
+ *
+ **************************************************************************/
+
+static inline void disable_dma(void)
+{
+ ar_outl(0x8000, M32R_DMAEN_PORTL); /* disable DMA0 */
+}
+
+static inline void enable_dma(void)
+{
+ ar_outl(0x8080, M32R_DMAEN_PORTL); /* enable DMA0 */
+}
+
+static inline void clear_dma_status(void)
+{
+ ar_outl(0x8000, M32R_DMAEDET_PORTL); /* clear status */
+}
+
+static void wait_for_vertical_sync(struct ar *ar, int exp_line)
+{
+#if CHECK_LOST
+ int tmout = 10000; /* FIXME */
+ int l;
+
+ /*
+ * check HCOUNT because we cannot check vertical sync.
+ */
+ for (; tmout >= 0; tmout--) {
+ l = ar_inl(ARVHCOUNT);
+ if (l == exp_line)
+ break;
+ }
+ if (tmout < 0)
+ v4l2_err(&ar->v4l2_dev, "lost %d -> %d\n", exp_line, l);
+#else
+ while (ar_inl(ARVHCOUNT) != exp_line)
+ cpu_relax();
+#endif
+}
+
+static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+ struct ar *ar = video_drvdata(file);
+ long ret = ar->frame_bytes; /* return read bytes */
+ unsigned long arvcr1 = 0;
+ unsigned long flags;
+ unsigned char *p;
+ int h, w;
+ unsigned char *py, *pu, *pv;
+#if !USE_INT
+ int l;
+#endif
+
+ DEBUG(1, "ar_read()\n");
+
+ if (ar->size == AR_SIZE_QVGA)
+ arvcr1 |= ARVCR1_QVGA;
+ if (ar->mode == AR_MODE_NORMAL)
+ arvcr1 |= ARVCR1_NORMAL;
+
+ mutex_lock(&ar->lock);
+
+#if USE_INT
+ local_irq_save(flags);
+ disable_dma();
+ ar_outl(0xa1871300, M32R_DMA0CR0_PORTL);
+ ar_outl(0x01000000, M32R_DMA0CR1_PORTL);
+
+ /* set AR FIFO address as source(BSEL5) */
+ ar_outl(ARDATA32, M32R_DMA0CSA_PORTL);
+ ar_outl(ARDATA32, M32R_DMA0RSA_PORTL);
+ ar_outl(ar->line_buff, M32R_DMA0CDA_PORTL); /* destination addr. */
+ ar_outl(ar->line_buff, M32R_DMA0RDA_PORTL); /* reload address */
+ ar_outl(ar->line_bytes, M32R_DMA0CBCUT_PORTL); /* byte count (bytes) */
+ ar_outl(ar->line_bytes, M32R_DMA0RBCUT_PORTL); /* reload count (bytes) */
+
+ /*
+ * Okay, kick AR LSI to invoke an interrupt
+ */
+ ar->start_capture = -1;
+ ar_outl(arvcr1 | ARVCR1_HIEN, ARVCR1);
+ local_irq_restore(flags);
+ /* .... AR interrupts .... */
+ wait_event_interruptible(ar->wait, ar->start_capture == 0);
+ if (signal_pending(current)) {
+ printk(KERN_ERR "arv: interrupted while get frame data.\n");
+ ret = -EINTR;
+ goto out_up;
+ }
+#else /* ! USE_INT */
+ /* polling */
+ ar_outl(arvcr1, ARVCR1);
+ disable_dma();
+ ar_outl(0x8000, M32R_DMAEDET_PORTL);
+ ar_outl(0xa0861300, M32R_DMA0CR0_PORTL);
+ ar_outl(0x01000000, M32R_DMA0CR1_PORTL);
+ ar_outl(ARDATA32, M32R_DMA0CSA_PORTL);
+ ar_outl(ARDATA32, M32R_DMA0RSA_PORTL);
+ ar_outl(ar->line_bytes, M32R_DMA0CBCUT_PORTL);
+ ar_outl(ar->line_bytes, M32R_DMA0RBCUT_PORTL);
+
+ local_irq_save(flags);
+ while (ar_inl(ARVHCOUNT) != 0) /* wait for 0 */
+ cpu_relax();
+ if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
+ for (h = 0; h < ar->height; h++) {
+ wait_for_vertical_sync(ar, h);
+ if (h < (AR_HEIGHT_VGA/2))
+ l = h << 1;
+ else
+ l = (((h - (AR_HEIGHT_VGA/2)) << 1) + 1);
+ ar_outl(virt_to_phys(ar->frame[l]), M32R_DMA0CDA_PORTL);
+ enable_dma();
+ while (!(ar_inl(M32R_DMAEDET_PORTL) & 0x8000))
+ cpu_relax();
+ disable_dma();
+ clear_dma_status();
+ ar_outl(0xa0861300, M32R_DMA0CR0_PORTL);
+ }
+ } else {
+ for (h = 0; h < ar->height; h++) {
+ wait_for_vertical_sync(ar, h);
+ ar_outl(virt_to_phys(ar->frame[h]), M32R_DMA0CDA_PORTL);
+ enable_dma();
+ while (!(ar_inl(M32R_DMAEDET_PORTL) & 0x8000))
+ cpu_relax();
+ disable_dma();
+ clear_dma_status();
+ ar_outl(0xa0861300, M32R_DMA0CR0_PORTL);
+ }
+ }
+ local_irq_restore(flags);
+#endif /* ! USE_INT */
+
+ /*
+ * convert YUV422 to YUV422P
+ * +--------------------+
+ * | Y0,Y1,... |
+ * | ..............Yn |
+ * +--------------------+
+ * | U0,U1,........Un |
+ * +--------------------+
+ * | V0,V1,........Vn |
+ * +--------------------+
+ */
+ py = yuv;
+ pu = py + (ar->frame_bytes / 2);
+ pv = pu + (ar->frame_bytes / 4);
+ for (h = 0; h < ar->height; h++) {
+ p = ar->frame[h];
+ for (w = 0; w < ar->line_bytes; w += 4) {
+ *py++ = *p++;
+ *pu++ = *p++;
+ *py++ = *p++;
+ *pv++ = *p++;
+ }
+ }
+ if (copy_to_user(buf, yuv, ar->frame_bytes)) {
+ v4l2_err(&ar->v4l2_dev, "failed while copy_to_user yuv.\n");
+ ret = -EFAULT;
+ goto out_up;
+ }
+ DEBUG(1, "ret = %d\n", ret);
+out_up:
+ mutex_unlock(&ar->lock);
+ return ret;
+}
+
+static int ar_querycap(struct file *file, void *priv,
+ struct v4l2_capability *vcap)
+{
+ struct ar *ar = video_drvdata(file);
+
+ strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
+ strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
+ strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int ar_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
+{
+ if (vin->index > 0)
+ return -EINVAL;
+ strlcpy(vin->name, "Camera", sizeof(vin->name));
+ vin->type = V4L2_INPUT_TYPE_CAMERA;
+ vin->audioset = 0;
+ vin->tuner = 0;
+ vin->std = V4L2_STD_ALL;
+ vin->status = 0;
+ return 0;
+}
+
+static int ar_g_input(struct file *file, void *fh, unsigned int *inp)
+{
+ *inp = 0;
+ return 0;
+}
+
+static int ar_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return inp ? -EINVAL : 0;
+}
+
+static int ar_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ pix->width = ar->width;
+ pix->height = ar->height;
+ pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+ pix->field = (ar->mode == AR_MODE_NORMAL) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED;
+ pix->bytesperline = ar->width;
+ pix->sizeimage = 2 * ar->width * ar->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int ar_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->height <= AR_HEIGHT_QVGA || pix->width <= AR_WIDTH_QVGA) {
+ pix->height = AR_HEIGHT_QVGA;
+ pix->width = AR_WIDTH_QVGA;
+ pix->field = V4L2_FIELD_INTERLACED;
+ } else {
+ pix->height = AR_HEIGHT_VGA;
+ pix->width = AR_WIDTH_VGA;
+ pix->field = vga_interlace ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE;
+ }
+ pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+ pix->bytesperline = ar->width;
+ pix->sizeimage = 2 * ar->width * ar->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int ar_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int ret = ar_try_fmt_vid_cap(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ mutex_lock(&ar->lock);
+ ar->width = pix->width;
+ ar->height = pix->height;
+ if (ar->width == AR_WIDTH_VGA) {
+ ar->size = AR_SIZE_VGA;
+ ar->frame_bytes = AR_FRAME_BYTES_VGA;
+ ar->line_bytes = AR_LINE_BYTES_VGA;
+ if (vga_interlace)
+ ar->mode = AR_MODE_INTERLACE;
+ else
+ ar->mode = AR_MODE_NORMAL;
+ } else {
+ ar->size = AR_SIZE_QVGA;
+ ar->frame_bytes = AR_FRAME_BYTES_QVGA;
+ ar->line_bytes = AR_LINE_BYTES_QVGA;
+ ar->mode = AR_MODE_INTERLACE;
+ }
+ /* Ok we figured out what to use from our wide choice */
+ mutex_unlock(&ar->lock);
+ return 0;
+}
+
+static int ar_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
+{
+ static struct v4l2_fmtdesc formats[] = {
+ { 0, 0, 0,
+ "YUV 4:2:2 Planar", V4L2_PIX_FMT_YUV422P,
+ { 0, 0, 0, 0 }
+ },
+ };
+ enum v4l2_buf_type type = fmt->type;
+
+ if (fmt->index > 0)
+ return -EINVAL;
+
+ *fmt = formats[fmt->index];
+ fmt->type = type;
+ return 0;
+}
+
+#if USE_INT
+/*
+ * Interrupt handler
+ */
+static void ar_interrupt(int irq, void *dev)
+{
+ struct ar *ar = dev;
+ unsigned int line_count;
+ unsigned int line_number;
+ unsigned int arvcr1;
+
+ line_count = ar_inl(ARVHCOUNT); /* line number */
+ if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
+ /* operations for interlace mode */
+ if (line_count < (AR_HEIGHT_VGA / 2)) /* even line */
+ line_number = (line_count << 1);
+ else /* odd line */
+ line_number =
+ (((line_count - (AR_HEIGHT_VGA / 2)) << 1) + 1);
+ } else {
+ line_number = line_count;
+ }
+
+ if (line_number == 0) {
+ /*
+ * It is an interrupt for line 0.
+ * we have to start capture.
+ */
+ disable_dma();
+#if 0
+ ar_outl(ar->line_buff, M32R_DMA0CDA_PORTL); /* needless? */
+#endif
+ memcpy(ar->frame[0], ar->line_buff, ar->line_bytes);
+#if 0
+ ar_outl(0xa1861300, M32R_DMA0CR0_PORTL);
+#endif
+ enable_dma();
+ ar->start_capture = 1; /* during capture */
+ return;
+ }
+
+ if (ar->start_capture == 1 && line_number <= (ar->height - 1)) {
+ disable_dma();
+ memcpy(ar->frame[line_number], ar->line_buff, ar->line_bytes);
+
+ /*
+ * if captured all line of a frame, disable AR interrupt
+ * and wake a process up.
+ */
+ if (line_number == (ar->height - 1)) { /* end of line */
+
+ ar->start_capture = 0;
+
+ /* disable AR interrupt request */
+ arvcr1 = ar_inl(ARVCR1);
+ arvcr1 &= ~ARVCR1_HIEN; /* clear int. flag */
+ ar_outl(arvcr1, ARVCR1); /* disable */
+ wake_up_interruptible(&ar->wait);
+ } else {
+#if 0
+ ar_outl(ar->line_buff, M32R_DMA0CDA_PORTL);
+ ar_outl(0xa1861300, M32R_DMA0CR0_PORTL);
+#endif
+ enable_dma();
+ }
+ }
+}
+#endif
+
+/*
+ * ar_initialize()
+ * ar_initialize() is called by video_register_device() and
+ * initializes AR LSI and peripherals.
+ *
+ * -1 is returned in all failures.
+ * 0 is returned in success.
+ *
+ */
+static int ar_initialize(struct ar *ar)
+{
+ unsigned long cr = 0;
+ int i, found = 0;
+
+ DEBUG(1, "ar_initialize:\n");
+
+ /*
+ * initialize AR LSI
+ */
+ ar_outl(0, ARVCR0); /* assert reset of AR LSI */
+ for (i = 0; i < 0x18; i++) /* wait for over 10 cycles @ 27MHz */
+ cpu_relax();
+ ar_outl(ARVCR0_RST, ARVCR0); /* negate reset of AR LSI (enable) */
+ for (i = 0; i < 0x40d; i++) /* wait for over 420 cycles @ 27MHz */
+ cpu_relax();
+
+ /* AR uses INT3 of CPU as interrupt pin. */
+ ar_outl(ARINTSEL_INT3, ARINTSEL);
+
+ if (ar->size == AR_SIZE_QVGA)
+ cr |= ARVCR1_QVGA;
+ if (ar->mode == AR_MODE_NORMAL)
+ cr |= ARVCR1_NORMAL;
+ ar_outl(cr, ARVCR1);
+
+ /*
+ * Initialize IIC so that CPU can communicate with AR LSI,
+ * and send boot commands to AR LSI.
+ */
+ init_iic();
+
+ for (i = 0; i < 0x100000; i++) { /* > 0xa1d10, 56ms */
+ if ((ar_inl(ARVCR0) & ARVCR0_VDS)) { /* VSYNC */
+ found = 1;
+ break;
+ }
+ }
+
+ if (found == 0)
+ return -ENODEV;
+
+ v4l2_info(&ar->v4l2_dev, "Initializing ");
+
+ iic(2, 0x78, 0x11, 0x01, 0x00); /* start */
+ iic(3, 0x78, 0x12, 0x00, 0x06);
+ iic(3, 0x78, 0x12, 0x12, 0x30);
+ iic(3, 0x78, 0x12, 0x15, 0x58);
+ iic(3, 0x78, 0x12, 0x17, 0x30);
+ printk(KERN_CONT ".");
+ iic(3, 0x78, 0x12, 0x1a, 0x97);
+ iic(3, 0x78, 0x12, 0x1b, 0xff);
+ iic(3, 0x78, 0x12, 0x1c, 0xff);
+ iic(3, 0x78, 0x12, 0x26, 0x10);
+ iic(3, 0x78, 0x12, 0x27, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x34, 0x02, 0x00);
+ iic(2, 0x78, 0x7a, 0x10, 0x00);
+ iic(2, 0x78, 0x80, 0x39, 0x00);
+ iic(2, 0x78, 0x81, 0xe6, 0x00);
+ iic(2, 0x78, 0x8d, 0x00, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x8e, 0x0c, 0x00);
+ iic(2, 0x78, 0x8f, 0x00, 0x00);
+#if 0
+ iic(2, 0x78, 0x90, 0x00, 0x00); /* AWB on=1 off=0 */
+#endif
+ iic(2, 0x78, 0x93, 0x01, 0x00);
+ iic(2, 0x78, 0x94, 0xcd, 0x00);
+ iic(2, 0x78, 0x95, 0x00, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x96, 0xa0, 0x00);
+ iic(2, 0x78, 0x97, 0x00, 0x00);
+ iic(2, 0x78, 0x98, 0x60, 0x00);
+ iic(2, 0x78, 0x99, 0x01, 0x00);
+ iic(2, 0x78, 0x9a, 0x19, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x9b, 0x02, 0x00);
+ iic(2, 0x78, 0x9c, 0xe8, 0x00);
+ iic(2, 0x78, 0x9d, 0x02, 0x00);
+ iic(2, 0x78, 0x9e, 0x2e, 0x00);
+ iic(2, 0x78, 0xb8, 0x78, 0x00);
+ iic(2, 0x78, 0xba, 0x05, 0x00);
+#if 0
+ iic(2, 0x78, 0x83, 0x8c, 0x00); /* brightness */
+#endif
+ printk(KERN_CONT ".");
+
+ /* color correction */
+ iic(3, 0x78, 0x49, 0x00, 0x95); /* a */
+ iic(3, 0x78, 0x49, 0x01, 0x96); /* b */
+ iic(3, 0x78, 0x49, 0x03, 0x85); /* c */
+ iic(3, 0x78, 0x49, 0x04, 0x97); /* d */
+ iic(3, 0x78, 0x49, 0x02, 0x7e); /* e(Lo) */
+ iic(3, 0x78, 0x49, 0x05, 0xa4); /* f(Lo) */
+ iic(3, 0x78, 0x49, 0x06, 0x04); /* e(Hi) */
+ iic(3, 0x78, 0x49, 0x07, 0x04); /* e(Hi) */
+ iic(2, 0x78, 0x48, 0x01, 0x00); /* on=1 off=0 */
+
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x11, 0x00, 0x00); /* end */
+ printk(KERN_CONT " done\n");
+ return 0;
+}
+
+
+/****************************************************************************
+ *
+ * Video4Linux Module functions
+ *
+ ****************************************************************************/
+
+static const struct v4l2_file_operations ar_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .read = ar_read,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops ar_ioctl_ops = {
+ .vidioc_querycap = ar_querycap,
+ .vidioc_g_input = ar_g_input,
+ .vidioc_s_input = ar_s_input,
+ .vidioc_enum_input = ar_enum_input,
+ .vidioc_enum_fmt_vid_cap = ar_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = ar_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = ar_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = ar_try_fmt_vid_cap,
+};
+
+#define ALIGN4(x) ((((int)(x)) & 0x3) == 0)
+
+static int __init ar_init(void)
+{
+ struct ar *ar;
+ struct v4l2_device *v4l2_dev;
+ int ret;
+ int i;
+
+ ar = &ardev;
+ v4l2_dev = &ar->v4l2_dev;
+ strlcpy(v4l2_dev->name, "arv", sizeof(v4l2_dev->name));
+ v4l2_info(v4l2_dev, "Colour AR VGA driver %s\n", VERSION);
+
+ ret = v4l2_device_register(NULL, v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return ret;
+ }
+ ret = -EIO;
+
+#if USE_INT
+ /* allocate a DMA buffer for 1 line. */
+ ar->line_buff = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL | GFP_DMA);
+ if (ar->line_buff == NULL || !ALIGN4(ar->line_buff)) {
+ v4l2_err(v4l2_dev, "buffer allocation failed for DMA.\n");
+ ret = -ENOMEM;
+ goto out_end;
+ }
+#endif
+ /* allocate buffers for a frame */
+ for (i = 0; i < MAX_AR_HEIGHT; i++) {
+ ar->frame[i] = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL);
+ if (ar->frame[i] == NULL || !ALIGN4(ar->frame[i])) {
+ v4l2_err(v4l2_dev, "buffer allocation failed for frame.\n");
+ ret = -ENOMEM;
+ goto out_line_buff;
+ }
+ }
+
+ strlcpy(ar->vdev.name, "Colour AR VGA", sizeof(ar->vdev.name));
+ ar->vdev.v4l2_dev = v4l2_dev;
+ ar->vdev.fops = &ar_fops;
+ ar->vdev.ioctl_ops = &ar_ioctl_ops;
+ ar->vdev.release = video_device_release_empty;
+ set_bit(V4L2_FL_USE_FH_PRIO, &ar->vdev.flags);
+ video_set_drvdata(&ar->vdev, ar);
+
+ if (vga) {
+ ar->width = AR_WIDTH_VGA;
+ ar->height = AR_HEIGHT_VGA;
+ ar->size = AR_SIZE_VGA;
+ ar->frame_bytes = AR_FRAME_BYTES_VGA;
+ ar->line_bytes = AR_LINE_BYTES_VGA;
+ if (vga_interlace)
+ ar->mode = AR_MODE_INTERLACE;
+ else
+ ar->mode = AR_MODE_NORMAL;
+ } else {
+ ar->width = AR_WIDTH_QVGA;
+ ar->height = AR_HEIGHT_QVGA;
+ ar->size = AR_SIZE_QVGA;
+ ar->frame_bytes = AR_FRAME_BYTES_QVGA;
+ ar->line_bytes = AR_LINE_BYTES_QVGA;
+ ar->mode = AR_MODE_INTERLACE;
+ }
+ mutex_init(&ar->lock);
+ init_waitqueue_head(&ar->wait);
+
+#if USE_INT
+ if (request_irq(M32R_IRQ_INT3, ar_interrupt, 0, "arv", ar)) {
+ v4l2_err("request_irq(%d) failed.\n", M32R_IRQ_INT3);
+ ret = -EIO;
+ goto out_irq;
+ }
+#endif
+
+ if (ar_initialize(ar) != 0) {
+ v4l2_err(v4l2_dev, "M64278 not found.\n");
+ ret = -ENODEV;
+ goto out_dev;
+ }
+
+ /*
+ * ok, we can initialize h/w according to parameters,
+ * so register video device as a frame grabber type.
+ * device is named "video[0-64]".
+ * video_register_device() initializes h/w using ar_initialize().
+ */
+ if (video_register_device(&ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) {
+ /* return -1, -ENFILE(full) or others */
+ v4l2_err(v4l2_dev, "register video (Colour AR) failed.\n");
+ ret = -ENODEV;
+ goto out_dev;
+ }
+
+ v4l2_info(v4l2_dev, "%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
+ video_device_node_name(&ar->vdev), M32R_IRQ_INT3, freq);
+
+ return 0;
+
+out_dev:
+#if USE_INT
+ free_irq(M32R_IRQ_INT3, ar);
+
+out_irq:
+#endif
+ for (i = 0; i < MAX_AR_HEIGHT; i++)
+ kfree(ar->frame[i]);
+
+out_line_buff:
+#if USE_INT
+ kfree(ar->line_buff);
+
+out_end:
+#endif
+ v4l2_device_unregister(&ar->v4l2_dev);
+ return ret;
+}
+
+
+static int __init ar_init_module(void)
+{
+ freq = (boot_cpu_data.bus_clock / 1000000);
+ printk(KERN_INFO "arv: Bus clock %d\n", freq);
+ if (freq != 50 && freq != 75)
+ freq = DEFAULT_FREQ;
+ return ar_init();
+}
+
+static void __exit ar_cleanup_module(void)
+{
+ struct ar *ar;
+ int i;
+
+ ar = &ardev;
+ video_unregister_device(&ar->vdev);
+#if USE_INT
+ free_irq(M32R_IRQ_INT3, ar);
+#endif
+ for (i = 0; i < MAX_AR_HEIGHT; i++)
+ kfree(ar->frame[i]);
+#if USE_INT
+ kfree(ar->line_buff);
+#endif
+ v4l2_device_unregister(&ar->v4l2_dev);
+}
+
+module_init(ar_init_module);
+module_exit(ar_cleanup_module);
+
+MODULE_AUTHOR("Takeo Takahashi <takahashi.takeo@renesas.com>");
+MODULE_DESCRIPTION("Colour AR M64278(VGA) for Video4Linux");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
diff --git a/drivers/media/platform/blackfin/Kconfig b/drivers/media/platform/blackfin/Kconfig
new file mode 100644
index 00000000000..cc239972fa2
--- /dev/null
+++ b/drivers/media/platform/blackfin/Kconfig
@@ -0,0 +1,15 @@
+config VIDEO_BLACKFIN_CAPTURE
+ tristate "Blackfin Video Capture Driver"
+ depends on VIDEO_V4L2 && BLACKFIN && I2C
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ V4L2 bridge driver for Blackfin video capture device.
+ Choose PPI or EPPI as its interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_capture.
+
+config VIDEO_BLACKFIN_PPI
+ tristate
+ depends on VIDEO_BLACKFIN_CAPTURE
+ default VIDEO_BLACKFIN_CAPTURE
diff --git a/drivers/media/platform/blackfin/Makefile b/drivers/media/platform/blackfin/Makefile
new file mode 100644
index 00000000000..30421bc2308
--- /dev/null
+++ b/drivers/media/platform/blackfin/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VIDEO_BLACKFIN_CAPTURE) += bfin_capture.o
+obj-$(CONFIG_VIDEO_BLACKFIN_PPI) += ppi.o
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
new file mode 100644
index 00000000000..16e4b1c525c
--- /dev/null
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -0,0 +1,1143 @@
+/*
+ * Analog Devices video capture driver
+ *
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <asm/dma.h>
+
+#include <media/blackfin/bfin_capture.h>
+#include <media/blackfin/ppi.h>
+
+#define CAPTURE_DRV_NAME "bfin_capture"
+#define BCAP_MIN_NUM_BUF 2
+
+struct bcap_format {
+ char *desc;
+ u32 pixelformat;
+ enum v4l2_mbus_pixelcode mbus_code;
+ int bpp; /* bits per pixel */
+ int dlen; /* data length for ppi in bits */
+};
+
+struct bcap_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+struct bcap_device {
+ /* capture device instance */
+ struct v4l2_device v4l2_dev;
+ /* v4l2 control handler */
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* device node data */
+ struct video_device *video_dev;
+ /* sub device instance */
+ struct v4l2_subdev *sd;
+ /* capture config */
+ struct bfin_capture_config *cfg;
+ /* ppi interface */
+ struct ppi_if *ppi;
+ /* current input */
+ unsigned int cur_input;
+ /* current selected standard */
+ v4l2_std_id std;
+ /* current selected dv_timings */
+ struct v4l2_dv_timings dv_timings;
+ /* used to store pixel format */
+ struct v4l2_pix_format fmt;
+ /* bits per pixel*/
+ int bpp;
+ /* data length for ppi in bits */
+ int dlen;
+ /* used to store sensor supported format */
+ struct bcap_format *sensor_formats;
+ /* number of sensor formats array */
+ int num_sensor_formats;
+ /* pointing to current video buffer */
+ struct bcap_buffer *cur_frm;
+ /* buffer queue used in videobuf2 */
+ struct vb2_queue buffer_queue;
+ /* allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
+ /* queue of filled frames */
+ struct list_head dma_queue;
+ /* used in videobuf2 callback */
+ spinlock_t lock;
+ /* used to access capture device */
+ struct mutex mutex;
+ /* used to wait ppi to complete one transfer */
+ struct completion comp;
+ /* prepare to stop */
+ bool stop;
+};
+
+struct bcap_fh {
+ struct v4l2_fh fh;
+ /* indicates whether this file handle is doing IO */
+ bool io_allowed;
+};
+
+static const struct bcap_format bcap_formats[] = {
+ {
+ .desc = "YCbCr 4:2:2 Interleaved UYVY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .bpp = 16,
+ .dlen = 8,
+ },
+ {
+ .desc = "YCbCr 4:2:2 Interleaved YUYV",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 16,
+ .dlen = 8,
+ },
+ {
+ .desc = "YCbCr 4:2:2 Interleaved UYVY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_1X16,
+ .bpp = 16,
+ .dlen = 16,
+ },
+ {
+ .desc = "RGB 565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .bpp = 16,
+ .dlen = 8,
+ },
+ {
+ .desc = "RGB 444",
+ .pixelformat = V4L2_PIX_FMT_RGB444,
+ .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .bpp = 16,
+ .dlen = 8,
+ },
+
+};
+#define BCAP_MAX_FMTS ARRAY_SIZE(bcap_formats)
+
+static irqreturn_t bcap_isr(int irq, void *dev_id);
+
+static struct bcap_buffer *to_bcap_vb(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct bcap_buffer, vb);
+}
+
+static int bcap_init_sensor_formats(struct bcap_device *bcap_dev)
+{
+ enum v4l2_mbus_pixelcode code;
+ struct bcap_format *sf;
+ unsigned int num_formats = 0;
+ int i, j;
+
+ while (!v4l2_subdev_call(bcap_dev->sd, video,
+ enum_mbus_fmt, num_formats, &code))
+ num_formats++;
+ if (!num_formats)
+ return -ENXIO;
+
+ sf = kzalloc(num_formats * sizeof(*sf), GFP_KERNEL);
+ if (!sf)
+ return -ENOMEM;
+
+ for (i = 0; i < num_formats; i++) {
+ v4l2_subdev_call(bcap_dev->sd, video,
+ enum_mbus_fmt, i, &code);
+ for (j = 0; j < BCAP_MAX_FMTS; j++)
+ if (code == bcap_formats[j].mbus_code)
+ break;
+ if (j == BCAP_MAX_FMTS) {
+ /* we don't allow this sensor working with our bridge */
+ kfree(sf);
+ return -EINVAL;
+ }
+ sf[i] = bcap_formats[j];
+ }
+ bcap_dev->sensor_formats = sf;
+ bcap_dev->num_sensor_formats = num_formats;
+ return 0;
+}
+
+static void bcap_free_sensor_formats(struct bcap_device *bcap_dev)
+{
+ bcap_dev->num_sensor_formats = 0;
+ kfree(bcap_dev->sensor_formats);
+ bcap_dev->sensor_formats = NULL;
+}
+
+static int bcap_open(struct file *file)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct video_device *vfd = bcap_dev->video_dev;
+ struct bcap_fh *bcap_fh;
+
+ if (!bcap_dev->sd) {
+ v4l2_err(&bcap_dev->v4l2_dev, "No sub device registered\n");
+ return -ENODEV;
+ }
+
+ bcap_fh = kzalloc(sizeof(*bcap_fh), GFP_KERNEL);
+ if (!bcap_fh) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "unable to allocate memory for file handle object\n");
+ return -ENOMEM;
+ }
+
+ v4l2_fh_init(&bcap_fh->fh, vfd);
+
+ /* store pointer to v4l2_fh in private_data member of file */
+ file->private_data = &bcap_fh->fh;
+ v4l2_fh_add(&bcap_fh->fh);
+ bcap_fh->io_allowed = false;
+ return 0;
+}
+
+static int bcap_release(struct file *file)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_fh *fh = file->private_data;
+ struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
+
+ /* if this instance is doing IO */
+ if (bcap_fh->io_allowed)
+ vb2_queue_release(&bcap_dev->buffer_queue);
+
+ file->private_data = NULL;
+ v4l2_fh_del(&bcap_fh->fh);
+ v4l2_fh_exit(&bcap_fh->fh);
+ kfree(bcap_fh);
+ return 0;
+}
+
+static int bcap_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&bcap_dev->mutex))
+ return -ERESTARTSYS;
+ ret = vb2_mmap(&bcap_dev->buffer_queue, vma);
+ mutex_unlock(&bcap_dev->mutex);
+ return ret;
+}
+
+#ifndef CONFIG_MMU
+static unsigned long bcap_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ return vb2_get_unmapped_area(&bcap_dev->buffer_queue,
+ addr,
+ len,
+ pgoff,
+ flags);
+}
+#endif
+
+static unsigned int bcap_poll(struct file *file, poll_table *wait)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ unsigned int res;
+
+ mutex_lock(&bcap_dev->mutex);
+ res = vb2_poll(&bcap_dev->buffer_queue, file, wait);
+ mutex_unlock(&bcap_dev->mutex);
+ return res;
+}
+
+static int bcap_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+
+ if (*nbuffers < BCAP_MIN_NUM_BUF)
+ *nbuffers = BCAP_MIN_NUM_BUF;
+
+ *nplanes = 1;
+ sizes[0] = bcap_dev->fmt.sizeimage;
+ alloc_ctxs[0] = bcap_dev->alloc_ctx;
+
+ return 0;
+}
+
+static int bcap_buffer_init(struct vb2_buffer *vb)
+{
+ struct bcap_buffer *buf = to_bcap_vb(vb);
+
+ INIT_LIST_HEAD(&buf->list);
+ return 0;
+}
+
+static int bcap_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcap_buffer *buf = to_bcap_vb(vb);
+ unsigned long size;
+
+ size = bcap_dev->fmt.sizeimage;
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(&bcap_dev->v4l2_dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(&buf->vb, 0, size);
+
+ return 0;
+}
+
+static void bcap_buffer_queue(struct vb2_buffer *vb)
+{
+ struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcap_buffer *buf = to_bcap_vb(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcap_dev->lock, flags);
+ list_add_tail(&buf->list, &bcap_dev->dma_queue);
+ spin_unlock_irqrestore(&bcap_dev->lock, flags);
+}
+
+static void bcap_buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcap_buffer *buf = to_bcap_vb(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcap_dev->lock, flags);
+ list_del_init(&buf->list);
+ spin_unlock_irqrestore(&bcap_dev->lock, flags);
+}
+
+static void bcap_lock(struct vb2_queue *vq)
+{
+ struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+ mutex_lock(&bcap_dev->mutex);
+}
+
+static void bcap_unlock(struct vb2_queue *vq)
+{
+ struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+ mutex_unlock(&bcap_dev->mutex);
+}
+
+static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+ struct ppi_if *ppi = bcap_dev->ppi;
+ struct ppi_params params;
+ int ret;
+
+ /* enable streamon on the sub device */
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_stream, 1);
+ if (ret && (ret != -ENOIOCTLCMD)) {
+ v4l2_err(&bcap_dev->v4l2_dev, "stream on failed in subdev\n");
+ return ret;
+ }
+
+ /* set ppi params */
+ params.width = bcap_dev->fmt.width;
+ params.height = bcap_dev->fmt.height;
+ params.bpp = bcap_dev->bpp;
+ params.dlen = bcap_dev->dlen;
+ params.ppi_control = bcap_dev->cfg->ppi_control;
+ params.int_mask = bcap_dev->cfg->int_mask;
+ if (bcap_dev->cfg->inputs[bcap_dev->cur_input].capabilities
+ & V4L2_IN_CAP_DV_TIMINGS) {
+ struct v4l2_bt_timings *bt = &bcap_dev->dv_timings.bt;
+
+ params.hdelay = bt->hsync + bt->hbackporch;
+ params.vdelay = bt->vsync + bt->vbackporch;
+ params.line = V4L2_DV_BT_FRAME_WIDTH(bt);
+ params.frame = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ } else if (bcap_dev->cfg->inputs[bcap_dev->cur_input].capabilities
+ & V4L2_IN_CAP_STD) {
+ params.hdelay = 0;
+ params.vdelay = 0;
+ if (bcap_dev->std & V4L2_STD_525_60) {
+ params.line = 858;
+ params.frame = 525;
+ } else {
+ params.line = 864;
+ params.frame = 625;
+ }
+ } else {
+ params.hdelay = 0;
+ params.vdelay = 0;
+ params.line = params.width + bcap_dev->cfg->blank_pixels;
+ params.frame = params.height;
+ }
+ ret = ppi->ops->set_params(ppi, &params);
+ if (ret < 0) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Error in setting ppi params\n");
+ return ret;
+ }
+
+ /* attach ppi DMA irq handler */
+ ret = ppi->ops->attach_irq(ppi, bcap_isr);
+ if (ret < 0) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Error in attaching interrupt handler\n");
+ return ret;
+ }
+
+ reinit_completion(&bcap_dev->comp);
+ bcap_dev->stop = false;
+ return 0;
+}
+
+static void bcap_stop_streaming(struct vb2_queue *vq)
+{
+ struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+ struct ppi_if *ppi = bcap_dev->ppi;
+ int ret;
+
+ bcap_dev->stop = true;
+ wait_for_completion(&bcap_dev->comp);
+ ppi->ops->stop(ppi);
+ ppi->ops->detach_irq(ppi);
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_stream, 0);
+ if (ret && (ret != -ENOIOCTLCMD))
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "stream off failed in subdev\n");
+
+ /* release all active buffers */
+ while (!list_empty(&bcap_dev->dma_queue)) {
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
+ struct bcap_buffer, list);
+ list_del(&bcap_dev->cur_frm->list);
+ vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static struct vb2_ops bcap_video_qops = {
+ .queue_setup = bcap_queue_setup,
+ .buf_init = bcap_buffer_init,
+ .buf_prepare = bcap_buffer_prepare,
+ .buf_cleanup = bcap_buffer_cleanup,
+ .buf_queue = bcap_buffer_queue,
+ .wait_prepare = bcap_unlock,
+ .wait_finish = bcap_lock,
+ .start_streaming = bcap_start_streaming,
+ .stop_streaming = bcap_stop_streaming,
+};
+
+static int bcap_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req_buf)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct vb2_queue *vq = &bcap_dev->buffer_queue;
+ struct v4l2_fh *fh = file->private_data;
+ struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
+
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ bcap_fh->io_allowed = true;
+
+ return vb2_reqbufs(vq, req_buf);
+}
+
+static int bcap_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ return vb2_querybuf(&bcap_dev->buffer_queue, buf);
+}
+
+static int bcap_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_fh *fh = file->private_data;
+ struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
+
+ if (!bcap_fh->io_allowed)
+ return -EBUSY;
+
+ return vb2_qbuf(&bcap_dev->buffer_queue, buf);
+}
+
+static int bcap_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_fh *fh = file->private_data;
+ struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
+
+ if (!bcap_fh->io_allowed)
+ return -EBUSY;
+
+ return vb2_dqbuf(&bcap_dev->buffer_queue,
+ buf, file->f_flags & O_NONBLOCK);
+}
+
+static irqreturn_t bcap_isr(int irq, void *dev_id)
+{
+ struct ppi_if *ppi = dev_id;
+ struct bcap_device *bcap_dev = ppi->priv;
+ struct vb2_buffer *vb = &bcap_dev->cur_frm->vb;
+ dma_addr_t addr;
+
+ spin_lock(&bcap_dev->lock);
+
+ if (!list_empty(&bcap_dev->dma_queue)) {
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+ if (ppi->err) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ ppi->err = false;
+ } else {
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
+ struct bcap_buffer, list);
+ list_del(&bcap_dev->cur_frm->list);
+ } else {
+ /* clear error flag, we will get a new frame */
+ if (ppi->err)
+ ppi->err = false;
+ }
+
+ ppi->ops->stop(ppi);
+
+ if (bcap_dev->stop) {
+ complete(&bcap_dev->comp);
+ } else {
+ addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+ ppi->ops->update_addr(ppi, (unsigned long)addr);
+ ppi->ops->start(ppi);
+ }
+
+ spin_unlock(&bcap_dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int bcap_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct bcap_fh *fh = file->private_data;
+ struct ppi_if *ppi = bcap_dev->ppi;
+ dma_addr_t addr;
+ int ret;
+
+ if (!fh->io_allowed)
+ return -EBUSY;
+
+ /* call streamon to start streaming in videobuf */
+ ret = vb2_streamon(&bcap_dev->buffer_queue, buf_type);
+ if (ret)
+ return ret;
+
+ /* if dma queue is empty, return error */
+ if (list_empty(&bcap_dev->dma_queue)) {
+ v4l2_err(&bcap_dev->v4l2_dev, "dma queue is empty\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* get the next frame from the dma queue */
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
+ struct bcap_buffer, list);
+ /* remove buffer from the dma queue */
+ list_del(&bcap_dev->cur_frm->list);
+ addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+ /* update DMA address */
+ ppi->ops->update_addr(ppi, (unsigned long)addr);
+ /* enable ppi */
+ ppi->ops->start(ppi);
+
+ return 0;
+err:
+ vb2_streamoff(&bcap_dev->buffer_queue, buf_type);
+ return ret;
+}
+
+static int bcap_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct bcap_fh *fh = file->private_data;
+
+ if (!fh->io_allowed)
+ return -EBUSY;
+
+ return vb2_streamoff(&bcap_dev->buffer_queue, buf_type);
+}
+
+static int bcap_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ return v4l2_subdev_call(bcap_dev->sd, video, querystd, std);
+}
+
+static int bcap_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ *std = bcap_dev->std;
+ return 0;
+}
+
+static int bcap_s_std(struct file *file, void *priv, v4l2_std_id std)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ int ret;
+
+ if (vb2_is_busy(&bcap_dev->buffer_queue))
+ return -EBUSY;
+
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_std, std);
+ if (ret < 0)
+ return ret;
+
+ bcap_dev->std = std;
+ return 0;
+}
+
+static int bcap_enum_dv_timings(struct file *file, void *priv,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ timings->pad = 0;
+
+ return v4l2_subdev_call(bcap_dev->sd, pad,
+ enum_dv_timings, timings);
+}
+
+static int bcap_query_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ return v4l2_subdev_call(bcap_dev->sd, video,
+ query_dv_timings, timings);
+}
+
+static int bcap_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ *timings = bcap_dev->dv_timings;
+ return 0;
+}
+
+static int bcap_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ int ret;
+ if (vb2_is_busy(&bcap_dev->buffer_queue))
+ return -EBUSY;
+
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_dv_timings, timings);
+ if (ret < 0)
+ return ret;
+
+ bcap_dev->dv_timings = *timings;
+ return 0;
+}
+
+static int bcap_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct bfin_capture_config *config = bcap_dev->cfg;
+ int ret;
+ u32 status;
+
+ if (input->index >= config->num_inputs)
+ return -EINVAL;
+
+ *input = config->inputs[input->index];
+ /* get input status */
+ ret = v4l2_subdev_call(bcap_dev->sd, video, g_input_status, &status);
+ if (!ret)
+ input->status = status;
+ return 0;
+}
+
+static int bcap_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ *index = bcap_dev->cur_input;
+ return 0;
+}
+
+static int bcap_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct bfin_capture_config *config = bcap_dev->cfg;
+ struct bcap_route *route;
+ int ret;
+
+ if (vb2_is_busy(&bcap_dev->buffer_queue))
+ return -EBUSY;
+
+ if (index >= config->num_inputs)
+ return -EINVAL;
+
+ route = &config->routes[index];
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_routing,
+ route->input, route->output, 0);
+ if ((ret < 0) && (ret != -ENOIOCTLCMD)) {
+ v4l2_err(&bcap_dev->v4l2_dev, "Failed to set input\n");
+ return ret;
+ }
+ bcap_dev->cur_input = index;
+ /* if this route has specific config, update ppi control */
+ if (route->ppi_control)
+ config->ppi_control = route->ppi_control;
+ return 0;
+}
+
+static int bcap_try_format(struct bcap_device *bcap,
+ struct v4l2_pix_format *pixfmt,
+ struct bcap_format *bcap_fmt)
+{
+ struct bcap_format *sf = bcap->sensor_formats;
+ struct bcap_format *fmt = NULL;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret, i;
+
+ for (i = 0; i < bcap->num_sensor_formats; i++) {
+ fmt = &sf[i];
+ if (pixfmt->pixelformat == fmt->pixelformat)
+ break;
+ }
+ if (i == bcap->num_sensor_formats)
+ fmt = &sf[0];
+
+ v4l2_fill_mbus_format(&mbus_fmt, pixfmt, fmt->mbus_code);
+ ret = v4l2_subdev_call(bcap->sd, video,
+ try_mbus_fmt, &mbus_fmt);
+ if (ret < 0)
+ return ret;
+ v4l2_fill_pix_format(pixfmt, &mbus_fmt);
+ if (bcap_fmt) {
+ for (i = 0; i < bcap->num_sensor_formats; i++) {
+ fmt = &sf[i];
+ if (mbus_fmt.code == fmt->mbus_code)
+ break;
+ }
+ *bcap_fmt = *fmt;
+ }
+ pixfmt->bytesperline = pixfmt->width * fmt->bpp / 8;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+ return 0;
+}
+
+static int bcap_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct bcap_format *sf = bcap_dev->sensor_formats;
+
+ if (fmt->index >= bcap_dev->num_sensor_formats)
+ return -EINVAL;
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ strlcpy(fmt->description,
+ sf[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = sf[fmt->index].pixelformat;
+ return 0;
+}
+
+static int bcap_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+ return bcap_try_format(bcap_dev, pixfmt, NULL);
+}
+
+static int bcap_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ fmt->fmt.pix = bcap_dev->fmt;
+ return 0;
+}
+
+static int bcap_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct bcap_format bcap_fmt;
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+ int ret;
+
+ if (vb2_is_busy(&bcap_dev->buffer_queue))
+ return -EBUSY;
+
+ /* see if format works */
+ ret = bcap_try_format(bcap_dev, pixfmt, &bcap_fmt);
+ if (ret < 0)
+ return ret;
+
+ v4l2_fill_mbus_format(&mbus_fmt, pixfmt, bcap_fmt.mbus_code);
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_mbus_fmt, &mbus_fmt);
+ if (ret < 0)
+ return ret;
+ bcap_dev->fmt = *pixfmt;
+ bcap_dev->bpp = bcap_fmt.bpp;
+ bcap_dev->dlen = bcap_fmt.dlen;
+ return 0;
+}
+
+static int bcap_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, "Blackfin Platform", sizeof(cap->bus_info));
+ strlcpy(cap->card, bcap_dev->cfg->card_name, sizeof(cap->card));
+ return 0;
+}
+
+static int bcap_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ return v4l2_subdev_call(bcap_dev->sd, video, g_parm, a);
+}
+
+static int bcap_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ return v4l2_subdev_call(bcap_dev->sd, video, s_parm, a);
+}
+
+static int bcap_log_status(struct file *file, void *priv)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ /* status for sub devices */
+ v4l2_device_call_all(&bcap_dev->v4l2_dev, 0, core, log_status);
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops bcap_ioctl_ops = {
+ .vidioc_querycap = bcap_querycap,
+ .vidioc_g_fmt_vid_cap = bcap_g_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = bcap_enum_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = bcap_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = bcap_try_fmt_vid_cap,
+ .vidioc_enum_input = bcap_enum_input,
+ .vidioc_g_input = bcap_g_input,
+ .vidioc_s_input = bcap_s_input,
+ .vidioc_querystd = bcap_querystd,
+ .vidioc_s_std = bcap_s_std,
+ .vidioc_g_std = bcap_g_std,
+ .vidioc_s_dv_timings = bcap_s_dv_timings,
+ .vidioc_g_dv_timings = bcap_g_dv_timings,
+ .vidioc_query_dv_timings = bcap_query_dv_timings,
+ .vidioc_enum_dv_timings = bcap_enum_dv_timings,
+ .vidioc_reqbufs = bcap_reqbufs,
+ .vidioc_querybuf = bcap_querybuf,
+ .vidioc_qbuf = bcap_qbuf,
+ .vidioc_dqbuf = bcap_dqbuf,
+ .vidioc_streamon = bcap_streamon,
+ .vidioc_streamoff = bcap_streamoff,
+ .vidioc_g_parm = bcap_g_parm,
+ .vidioc_s_parm = bcap_s_parm,
+ .vidioc_log_status = bcap_log_status,
+};
+
+static struct v4l2_file_operations bcap_fops = {
+ .owner = THIS_MODULE,
+ .open = bcap_open,
+ .release = bcap_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = bcap_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = bcap_get_unmapped_area,
+#endif
+ .poll = bcap_poll
+};
+
+static int bcap_probe(struct platform_device *pdev)
+{
+ struct bcap_device *bcap_dev;
+ struct video_device *vfd;
+ struct i2c_adapter *i2c_adap;
+ struct bfin_capture_config *config;
+ struct vb2_queue *q;
+ struct bcap_route *route;
+ int ret;
+
+ config = pdev->dev.platform_data;
+ if (!config || !config->num_inputs) {
+ v4l2_err(pdev->dev.driver, "Unable to get board config\n");
+ return -ENODEV;
+ }
+
+ bcap_dev = kzalloc(sizeof(*bcap_dev), GFP_KERNEL);
+ if (!bcap_dev) {
+ v4l2_err(pdev->dev.driver, "Unable to alloc bcap_dev\n");
+ return -ENOMEM;
+ }
+
+ bcap_dev->cfg = config;
+
+ bcap_dev->ppi = ppi_create_instance(config->ppi_info);
+ if (!bcap_dev->ppi) {
+ v4l2_err(pdev->dev.driver, "Unable to create ppi\n");
+ ret = -ENODEV;
+ goto err_free_dev;
+ }
+ bcap_dev->ppi->priv = bcap_dev;
+
+ bcap_dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(bcap_dev->alloc_ctx)) {
+ ret = PTR_ERR(bcap_dev->alloc_ctx);
+ goto err_free_ppi;
+ }
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ ret = -ENOMEM;
+ v4l2_err(pdev->dev.driver, "Unable to alloc video device\n");
+ goto err_cleanup_ctx;
+ }
+
+ /* initialize field of video device */
+ vfd->release = video_device_release;
+ vfd->fops = &bcap_fops;
+ vfd->ioctl_ops = &bcap_ioctl_ops;
+ vfd->tvnorms = 0;
+ vfd->v4l2_dev = &bcap_dev->v4l2_dev;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+ strncpy(vfd->name, CAPTURE_DRV_NAME, sizeof(vfd->name));
+ bcap_dev->video_dev = vfd;
+
+ ret = v4l2_device_register(&pdev->dev, &bcap_dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to register v4l2 device\n");
+ goto err_release_vdev;
+ }
+ v4l2_info(&bcap_dev->v4l2_dev, "v4l2 device registered\n");
+
+ bcap_dev->v4l2_dev.ctrl_handler = &bcap_dev->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(&bcap_dev->ctrl_handler, 0);
+ if (ret) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to init control handler\n");
+ goto err_unreg_v4l2;
+ }
+
+ spin_lock_init(&bcap_dev->lock);
+ /* initialize queue */
+ q = &bcap_dev->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP;
+ q->drv_priv = bcap_dev;
+ q->buf_struct_size = sizeof(struct bcap_buffer);
+ q->ops = &bcap_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto err_free_handler;
+
+ mutex_init(&bcap_dev->mutex);
+ init_completion(&bcap_dev->comp);
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&bcap_dev->dma_queue);
+
+ vfd->lock = &bcap_dev->mutex;
+
+ /* register video device */
+ ret = video_register_device(bcap_dev->video_dev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to register video device\n");
+ goto err_free_handler;
+ }
+ video_set_drvdata(bcap_dev->video_dev, bcap_dev);
+ v4l2_info(&bcap_dev->v4l2_dev, "video device registered as: %s\n",
+ video_device_node_name(vfd));
+
+ /* load up the subdevice */
+ i2c_adap = i2c_get_adapter(config->i2c_adapter_id);
+ if (!i2c_adap) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to find i2c adapter\n");
+ ret = -ENODEV;
+ goto err_unreg_vdev;
+
+ }
+ bcap_dev->sd = v4l2_i2c_new_subdev_board(&bcap_dev->v4l2_dev,
+ i2c_adap,
+ &config->board_info,
+ NULL);
+ if (bcap_dev->sd) {
+ int i;
+
+ /* update tvnorms from the sub devices */
+ for (i = 0; i < config->num_inputs; i++)
+ vfd->tvnorms |= config->inputs[i].std;
+ } else {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to register sub device\n");
+ ret = -ENODEV;
+ goto err_unreg_vdev;
+ }
+
+ v4l2_info(&bcap_dev->v4l2_dev, "v4l2 sub device registered\n");
+
+ /*
+ * explicitly set input, otherwise some boards
+ * may not work at the state as we expected
+ */
+ route = &config->routes[0];
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_routing,
+ route->input, route->output, 0);
+ if ((ret < 0) && (ret != -ENOIOCTLCMD)) {
+ v4l2_err(&bcap_dev->v4l2_dev, "Failed to set input\n");
+ goto err_unreg_vdev;
+ }
+ bcap_dev->cur_input = 0;
+ /* if this route has specific config, update ppi control */
+ if (route->ppi_control)
+ config->ppi_control = route->ppi_control;
+
+ /* now we can probe the default state */
+ if (config->inputs[0].capabilities & V4L2_IN_CAP_STD) {
+ v4l2_std_id std;
+ ret = v4l2_subdev_call(bcap_dev->sd, video, g_std, &std);
+ if (ret) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to get std\n");
+ goto err_unreg_vdev;
+ }
+ bcap_dev->std = std;
+ }
+ if (config->inputs[0].capabilities & V4L2_IN_CAP_DV_TIMINGS) {
+ struct v4l2_dv_timings dv_timings;
+ ret = v4l2_subdev_call(bcap_dev->sd, video,
+ g_dv_timings, &dv_timings);
+ if (ret) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to get dv timings\n");
+ goto err_unreg_vdev;
+ }
+ bcap_dev->dv_timings = dv_timings;
+ }
+ ret = bcap_init_sensor_formats(bcap_dev);
+ if (ret) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to create sensor formats table\n");
+ goto err_unreg_vdev;
+ }
+ return 0;
+err_unreg_vdev:
+ video_unregister_device(bcap_dev->video_dev);
+ bcap_dev->video_dev = NULL;
+err_free_handler:
+ v4l2_ctrl_handler_free(&bcap_dev->ctrl_handler);
+err_unreg_v4l2:
+ v4l2_device_unregister(&bcap_dev->v4l2_dev);
+err_release_vdev:
+ if (bcap_dev->video_dev)
+ video_device_release(bcap_dev->video_dev);
+err_cleanup_ctx:
+ vb2_dma_contig_cleanup_ctx(bcap_dev->alloc_ctx);
+err_free_ppi:
+ ppi_delete_instance(bcap_dev->ppi);
+err_free_dev:
+ kfree(bcap_dev);
+ return ret;
+}
+
+static int bcap_remove(struct platform_device *pdev)
+{
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct bcap_device *bcap_dev = container_of(v4l2_dev,
+ struct bcap_device, v4l2_dev);
+
+ bcap_free_sensor_formats(bcap_dev);
+ video_unregister_device(bcap_dev->video_dev);
+ v4l2_ctrl_handler_free(&bcap_dev->ctrl_handler);
+ v4l2_device_unregister(v4l2_dev);
+ vb2_dma_contig_cleanup_ctx(bcap_dev->alloc_ctx);
+ ppi_delete_instance(bcap_dev->ppi);
+ kfree(bcap_dev);
+ return 0;
+}
+
+static struct platform_driver bcap_driver = {
+ .driver = {
+ .name = CAPTURE_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = bcap_probe,
+ .remove = bcap_remove,
+};
+module_platform_driver(bcap_driver);
+
+MODULE_DESCRIPTION("Analog Devices blackfin video capture driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
new file mode 100644
index 00000000000..15e9c2bac2b
--- /dev/null
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -0,0 +1,345 @@
+/*
+ * ppi.c Analog Devices Parallel Peripheral Interface driver
+ *
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/bfin_ppi.h>
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+#include <media/blackfin/ppi.h>
+
+static int ppi_attach_irq(struct ppi_if *ppi, irq_handler_t handler);
+static void ppi_detach_irq(struct ppi_if *ppi);
+static int ppi_start(struct ppi_if *ppi);
+static int ppi_stop(struct ppi_if *ppi);
+static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params);
+static void ppi_update_addr(struct ppi_if *ppi, unsigned long addr);
+
+static const struct ppi_ops ppi_ops = {
+ .attach_irq = ppi_attach_irq,
+ .detach_irq = ppi_detach_irq,
+ .start = ppi_start,
+ .stop = ppi_stop,
+ .set_params = ppi_set_params,
+ .update_addr = ppi_update_addr,
+};
+
+static irqreturn_t ppi_irq_err(int irq, void *dev_id)
+{
+ struct ppi_if *ppi = dev_id;
+ const struct ppi_info *info = ppi->info;
+
+ switch (info->type) {
+ case PPI_TYPE_PPI:
+ {
+ struct bfin_ppi_regs *reg = info->base;
+ unsigned short status;
+
+ /* register on bf561 is cleared when read
+ * others are W1C
+ */
+ status = bfin_read16(&reg->status);
+ if (status & 0x3000)
+ ppi->err = true;
+ bfin_write16(&reg->status, 0xff00);
+ break;
+ }
+ case PPI_TYPE_EPPI:
+ {
+ struct bfin_eppi_regs *reg = info->base;
+ unsigned short status;
+
+ status = bfin_read16(&reg->status);
+ if (status & 0x2)
+ ppi->err = true;
+ bfin_write16(&reg->status, 0xffff);
+ break;
+ }
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+ unsigned long stat;
+
+ stat = bfin_read32(&reg->stat);
+ if (stat & 0x2)
+ ppi->err = true;
+ bfin_write32(&reg->stat, 0xc0ff);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ppi_attach_irq(struct ppi_if *ppi, irq_handler_t handler)
+{
+ const struct ppi_info *info = ppi->info;
+ int ret;
+
+ ret = request_dma(info->dma_ch, "PPI_DMA");
+
+ if (ret) {
+ pr_err("Unable to allocate DMA channel for PPI\n");
+ return ret;
+ }
+ set_dma_callback(info->dma_ch, handler, ppi);
+
+ if (ppi->err_int) {
+ ret = request_irq(info->irq_err, ppi_irq_err, 0, "PPI ERROR", ppi);
+ if (ret) {
+ pr_err("Unable to allocate IRQ for PPI\n");
+ free_dma(info->dma_ch);
+ }
+ }
+ return ret;
+}
+
+static void ppi_detach_irq(struct ppi_if *ppi)
+{
+ const struct ppi_info *info = ppi->info;
+
+ if (ppi->err_int)
+ free_irq(info->irq_err, ppi);
+ free_dma(info->dma_ch);
+}
+
+static int ppi_start(struct ppi_if *ppi)
+{
+ const struct ppi_info *info = ppi->info;
+
+ /* enable DMA */
+ enable_dma(info->dma_ch);
+
+ /* enable PPI */
+ ppi->ppi_control |= PORT_EN;
+ switch (info->type) {
+ case PPI_TYPE_PPI:
+ {
+ struct bfin_ppi_regs *reg = info->base;
+ bfin_write16(&reg->control, ppi->ppi_control);
+ break;
+ }
+ case PPI_TYPE_EPPI:
+ {
+ struct bfin_eppi_regs *reg = info->base;
+ bfin_write32(&reg->control, ppi->ppi_control);
+ break;
+ }
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+ bfin_write32(&reg->ctl, ppi->ppi_control);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ SSYNC();
+ return 0;
+}
+
+static int ppi_stop(struct ppi_if *ppi)
+{
+ const struct ppi_info *info = ppi->info;
+
+ /* disable PPI */
+ ppi->ppi_control &= ~PORT_EN;
+ switch (info->type) {
+ case PPI_TYPE_PPI:
+ {
+ struct bfin_ppi_regs *reg = info->base;
+ bfin_write16(&reg->control, ppi->ppi_control);
+ break;
+ }
+ case PPI_TYPE_EPPI:
+ {
+ struct bfin_eppi_regs *reg = info->base;
+ bfin_write32(&reg->control, ppi->ppi_control);
+ break;
+ }
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+ bfin_write32(&reg->ctl, ppi->ppi_control);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ /* disable DMA */
+ clear_dma_irqstat(info->dma_ch);
+ disable_dma(info->dma_ch);
+
+ SSYNC();
+ return 0;
+}
+
+static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
+{
+ const struct ppi_info *info = ppi->info;
+ int dma32 = 0;
+ int dma_config, bytes_per_line;
+ int hcount, hdelay, samples_per_line;
+
+ bytes_per_line = params->width * params->bpp / 8;
+ /* convert parameters unit from pixels to samples */
+ hcount = params->width * params->bpp / params->dlen;
+ hdelay = params->hdelay * params->bpp / params->dlen;
+ samples_per_line = params->line * params->bpp / params->dlen;
+ if (params->int_mask == 0xFFFFFFFF)
+ ppi->err_int = false;
+ else
+ ppi->err_int = true;
+
+ dma_config = (DMA_FLOW_STOP | RESTART | DMA2D | DI_EN_Y);
+ ppi->ppi_control = params->ppi_control & ~PORT_EN;
+ if (!(ppi->ppi_control & PORT_DIR))
+ dma_config |= WNR;
+ switch (info->type) {
+ case PPI_TYPE_PPI:
+ {
+ struct bfin_ppi_regs *reg = info->base;
+
+ if (params->ppi_control & DMA32)
+ dma32 = 1;
+
+ bfin_write16(&reg->control, ppi->ppi_control);
+ bfin_write16(&reg->count, samples_per_line - 1);
+ bfin_write16(&reg->frame, params->frame);
+ break;
+ }
+ case PPI_TYPE_EPPI:
+ {
+ struct bfin_eppi_regs *reg = info->base;
+
+ if ((params->ppi_control & PACK_EN)
+ || (params->ppi_control & 0x38000) > DLEN_16)
+ dma32 = 1;
+
+ bfin_write32(&reg->control, ppi->ppi_control);
+ bfin_write16(&reg->line, samples_per_line);
+ bfin_write16(&reg->frame, params->frame);
+ bfin_write16(&reg->hdelay, hdelay);
+ bfin_write16(&reg->vdelay, params->vdelay);
+ bfin_write16(&reg->hcount, hcount);
+ bfin_write16(&reg->vcount, params->height);
+ break;
+ }
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+
+ if ((params->ppi_control & PACK_EN)
+ || (params->ppi_control & 0x70000) > DLEN_16)
+ dma32 = 1;
+
+ bfin_write32(&reg->ctl, ppi->ppi_control);
+ bfin_write32(&reg->line, samples_per_line);
+ bfin_write32(&reg->frame, params->frame);
+ bfin_write32(&reg->hdly, hdelay);
+ bfin_write32(&reg->vdly, params->vdelay);
+ bfin_write32(&reg->hcnt, hcount);
+ bfin_write32(&reg->vcnt, params->height);
+ if (params->int_mask)
+ bfin_write32(&reg->imsk, params->int_mask & 0xFF);
+ if (ppi->ppi_control & PORT_DIR) {
+ u32 hsync_width, vsync_width, vsync_period;
+
+ hsync_width = params->hsync
+ * params->bpp / params->dlen;
+ vsync_width = params->vsync * samples_per_line;
+ vsync_period = samples_per_line * params->frame;
+ bfin_write32(&reg->fs1_wlhb, hsync_width);
+ bfin_write32(&reg->fs1_paspl, samples_per_line);
+ bfin_write32(&reg->fs2_wlvb, vsync_width);
+ bfin_write32(&reg->fs2_palpf, vsync_period);
+ }
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ if (dma32) {
+ dma_config |= WDSIZE_32 | PSIZE_32;
+ set_dma_x_count(info->dma_ch, bytes_per_line >> 2);
+ set_dma_x_modify(info->dma_ch, 4);
+ set_dma_y_modify(info->dma_ch, 4);
+ } else {
+ dma_config |= WDSIZE_16 | PSIZE_16;
+ set_dma_x_count(info->dma_ch, bytes_per_line >> 1);
+ set_dma_x_modify(info->dma_ch, 2);
+ set_dma_y_modify(info->dma_ch, 2);
+ }
+ set_dma_y_count(info->dma_ch, params->height);
+ set_dma_config(info->dma_ch, dma_config);
+
+ SSYNC();
+ return 0;
+}
+
+static void ppi_update_addr(struct ppi_if *ppi, unsigned long addr)
+{
+ set_dma_start_addr(ppi->info->dma_ch, addr);
+}
+
+struct ppi_if *ppi_create_instance(const struct ppi_info *info)
+{
+ struct ppi_if *ppi;
+
+ if (!info || !info->pin_req)
+ return NULL;
+
+ if (peripheral_request_list(info->pin_req, KBUILD_MODNAME)) {
+ pr_err("request peripheral failed\n");
+ return NULL;
+ }
+
+ ppi = kzalloc(sizeof(*ppi), GFP_KERNEL);
+ if (!ppi) {
+ peripheral_free_list(info->pin_req);
+ pr_err("unable to allocate memory for ppi handle\n");
+ return NULL;
+ }
+ ppi->ops = &ppi_ops;
+ ppi->info = info;
+
+ pr_info("ppi probe success\n");
+ return ppi;
+}
+EXPORT_SYMBOL(ppi_create_instance);
+
+void ppi_delete_instance(struct ppi_if *ppi)
+{
+ peripheral_free_list(ppi->info->pin_req);
+ kfree(ppi);
+}
+EXPORT_SYMBOL(ppi_delete_instance);
+
+MODULE_DESCRIPTION("Analog Devices PPI driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
new file mode 100644
index 00000000000..b1783791d42
--- /dev/null
+++ b/drivers/media/platform/coda.c
@@ -0,0 +1,3347 @@
+/*
+ * Coda multi-standard codec IP
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ * Javier Martin, <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/of.h>
+#include <linux/platform_data/coda.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "coda.h"
+
+#define CODA_NAME "coda"
+
+#define CODADX6_MAX_INSTANCES 4
+
+#define CODA_FMO_BUF_SIZE 32
+#define CODADX6_WORK_BUF_SIZE (288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
+#define CODA7_WORK_BUF_SIZE (128 * 1024)
+#define CODA7_TEMP_BUF_SIZE (304 * 1024)
+#define CODA_PARA_BUF_SIZE (10 * 1024)
+#define CODA_ISRAM_SIZE (2048 * 2)
+#define CODADX6_IRAM_SIZE 0xb000
+#define CODA7_IRAM_SIZE 0x14000
+
+#define CODA7_PS_BUF_SIZE 0x28000
+
+#define CODA_MAX_FRAMEBUFFERS 8
+
+#define CODA_MAX_FRAME_SIZE 0x100000
+#define FMO_SLICE_SAVE_BUF_SIZE (32)
+#define CODA_DEFAULT_GAMMA 4096
+
+#define MIN_W 176
+#define MIN_H 144
+
+#define S_ALIGN 1 /* multiple of 2 */
+#define W_ALIGN 1 /* multiple of 2 */
+#define H_ALIGN 1 /* multiple of 2 */
+
+#define fh_to_ctx(__fh) container_of(__fh, struct coda_ctx, fh)
+
+static int coda_debug;
+module_param(coda_debug, int, 0644);
+MODULE_PARM_DESC(coda_debug, "Debug level (0-1)");
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+enum coda_inst_type {
+ CODA_INST_ENCODER,
+ CODA_INST_DECODER,
+};
+
+enum coda_product {
+ CODA_DX6 = 0xf001,
+ CODA_7541 = 0xf012,
+};
+
+struct coda_fmt {
+ char *name;
+ u32 fourcc;
+};
+
+struct coda_codec {
+ u32 mode;
+ u32 src_fourcc;
+ u32 dst_fourcc;
+ u32 max_w;
+ u32 max_h;
+};
+
+struct coda_devtype {
+ char *firmware;
+ enum coda_product product;
+ struct coda_codec *codecs;
+ unsigned int num_codecs;
+ size_t workbuf_size;
+};
+
+/* Per-queue, driver-specific private data */
+struct coda_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ unsigned int fourcc;
+};
+
+struct coda_aux_buf {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+struct coda_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct platform_device *plat_dev;
+ const struct coda_devtype *devtype;
+
+ void __iomem *regs_base;
+ struct clk *clk_per;
+ struct clk *clk_ahb;
+
+ struct coda_aux_buf codebuf;
+ struct coda_aux_buf tempbuf;
+ struct coda_aux_buf workbuf;
+ struct gen_pool *iram_pool;
+ long unsigned int iram_vaddr;
+ long unsigned int iram_paddr;
+ unsigned long iram_size;
+
+ spinlock_t irqlock;
+ struct mutex dev_mutex;
+ struct mutex coda_mutex;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct list_head instances;
+ unsigned long instance_mask;
+ struct delayed_work timeout;
+};
+
+struct coda_params {
+ u8 rot_mode;
+ u8 h264_intra_qp;
+ u8 h264_inter_qp;
+ u8 mpeg4_intra_qp;
+ u8 mpeg4_inter_qp;
+ u8 gop_size;
+ int codec_mode;
+ int codec_mode_aux;
+ enum v4l2_mpeg_video_multi_slice_mode slice_mode;
+ u32 framerate;
+ u16 bitrate;
+ u32 slice_max_bits;
+ u32 slice_max_mb;
+};
+
+struct coda_iram_info {
+ u32 axi_sram_use;
+ phys_addr_t buf_bit_use;
+ phys_addr_t buf_ip_ac_dc_use;
+ phys_addr_t buf_dbk_y_use;
+ phys_addr_t buf_dbk_c_use;
+ phys_addr_t buf_ovl_use;
+ phys_addr_t buf_btp_use;
+ phys_addr_t search_ram_paddr;
+ int search_ram_size;
+};
+
+struct coda_ctx {
+ struct coda_dev *dev;
+ struct mutex buffer_mutex;
+ struct list_head list;
+ struct work_struct skip_run;
+ int aborting;
+ int initialized;
+ int streamon_out;
+ int streamon_cap;
+ u32 isequence;
+ u32 qsequence;
+ u32 osequence;
+ struct coda_q_data q_data[2];
+ enum coda_inst_type inst_type;
+ struct coda_codec *codec;
+ enum v4l2_colorspace colorspace;
+ struct coda_params params;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_fh fh;
+ int gopcounter;
+ int runcounter;
+ char vpu_header[3][64];
+ int vpu_header_size[3];
+ struct kfifo bitstream_fifo;
+ struct mutex bitstream_mutex;
+ struct coda_aux_buf bitstream;
+ bool prescan_failed;
+ struct coda_aux_buf parabuf;
+ struct coda_aux_buf psbuf;
+ struct coda_aux_buf slicebuf;
+ struct coda_aux_buf internal_frames[CODA_MAX_FRAMEBUFFERS];
+ struct coda_aux_buf workbuf;
+ int num_internal_frames;
+ int idx;
+ int reg_idx;
+ struct coda_iram_info iram_info;
+ u32 bit_stream_param;
+ u32 frm_dis_flg;
+ int display_idx;
+};
+
+static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80 };
+static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
+
+static inline void coda_write(struct coda_dev *dev, u32 data, u32 reg)
+{
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
+ writel(data, dev->regs_base + reg);
+}
+
+static inline unsigned int coda_read(struct coda_dev *dev, u32 reg)
+{
+ u32 data;
+ data = readl(dev->regs_base + reg);
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
+ return data;
+}
+
+static inline unsigned long coda_isbusy(struct coda_dev *dev)
+{
+ return coda_read(dev, CODA_REG_BIT_BUSY);
+}
+
+static inline int coda_is_initialized(struct coda_dev *dev)
+{
+ return (coda_read(dev, CODA_REG_BIT_CUR_PC) != 0);
+}
+
+static int coda_wait_timeout(struct coda_dev *dev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (coda_isbusy(dev)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static void coda_command_async(struct coda_ctx *ctx, int cmd)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ if (dev->devtype->product == CODA_7541) {
+ /* Restore context related registers to CODA */
+ coda_write(dev, ctx->bit_stream_param,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+ coda_write(dev, ctx->frm_dis_flg,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+ coda_write(dev, ctx->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR);
+ }
+
+ coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+
+ coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX);
+ coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
+ coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD);
+
+ coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
+}
+
+static int coda_command_sync(struct coda_ctx *ctx, int cmd)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ coda_command_async(ctx, cmd);
+ return coda_wait_timeout(dev);
+}
+
+static struct coda_q_data *get_q_data(struct coda_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &(ctx->q_data[V4L2_M2M_SRC]);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &(ctx->q_data[V4L2_M2M_DST]);
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/*
+ * Array of all formats supported by any version of Coda:
+ */
+static struct coda_fmt coda_formats[] = {
+ {
+ .name = "YUV 4:2:0 Planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .name = "YUV 4:2:0 Planar, YCrCb",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H264,
+ },
+ {
+ .name = "MPEG4 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ },
+};
+
+#define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \
+ { mode, src_fourcc, dst_fourcc, max_w, max_h }
+
+/*
+ * Arrays of codecs supported by each given version of Coda:
+ * i.MX27 -> codadx6
+ * i.MX5x -> coda7
+ * i.MX6 -> coda960
+ * Use V4L2_PIX_FMT_YUV420 as placeholder for all supported YUV 4:2:0 variants
+ */
+static struct coda_codec codadx6_codecs[] = {
+ CODA_CODEC(CODADX6_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 720, 576),
+ CODA_CODEC(CODADX6_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 720, 576),
+};
+
+static struct coda_codec coda7_codecs[] = {
+ CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720),
+ CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720),
+ CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1080),
+ CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1080),
+};
+
+static bool coda_format_is_yuv(u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Normalize all supported YUV 4:2:0 formats to the value used in the codec
+ * tables.
+ */
+static u32 coda_format_normalize_yuv(u32 fourcc)
+{
+ return coda_format_is_yuv(fourcc) ? V4L2_PIX_FMT_YUV420 : fourcc;
+}
+
+static struct coda_codec *coda_find_codec(struct coda_dev *dev, int src_fourcc,
+ int dst_fourcc)
+{
+ struct coda_codec *codecs = dev->devtype->codecs;
+ int num_codecs = dev->devtype->num_codecs;
+ int k;
+
+ src_fourcc = coda_format_normalize_yuv(src_fourcc);
+ dst_fourcc = coda_format_normalize_yuv(dst_fourcc);
+ if (src_fourcc == dst_fourcc)
+ return NULL;
+
+ for (k = 0; k < num_codecs; k++) {
+ if (codecs[k].src_fourcc == src_fourcc &&
+ codecs[k].dst_fourcc == dst_fourcc)
+ break;
+ }
+
+ if (k == num_codecs)
+ return NULL;
+
+ return &codecs[k];
+}
+
+static void coda_get_max_dimensions(struct coda_dev *dev,
+ struct coda_codec *codec,
+ int *max_w, int *max_h)
+{
+ struct coda_codec *codecs = dev->devtype->codecs;
+ int num_codecs = dev->devtype->num_codecs;
+ unsigned int w, h;
+ int k;
+
+ if (codec) {
+ w = codec->max_w;
+ h = codec->max_h;
+ } else {
+ for (k = 0, w = 0, h = 0; k < num_codecs; k++) {
+ w = max(w, codecs[k].max_w);
+ h = max(h, codecs[k].max_h);
+ }
+ }
+
+ if (max_w)
+ *max_w = w;
+ if (max_h)
+ *max_h = h;
+}
+
+static char *coda_product_name(int product)
+{
+ static char buf[9];
+
+ switch (product) {
+ case CODA_DX6:
+ return "CodaDx6";
+ case CODA_7541:
+ return "CODA7541";
+ default:
+ snprintf(buf, sizeof(buf), "(0x%04x)", product);
+ return buf;
+ }
+}
+
+/*
+ * V4L2 ioctl() operations.
+ */
+static int coda_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product),
+ sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info));
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
+ enum v4l2_buf_type type, int src_fourcc)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_codec *codecs = ctx->dev->devtype->codecs;
+ struct coda_fmt *formats = coda_formats;
+ struct coda_fmt *fmt;
+ int num_codecs = ctx->dev->devtype->num_codecs;
+ int num_formats = ARRAY_SIZE(coda_formats);
+ int i, k, num = 0;
+
+ for (i = 0; i < num_formats; i++) {
+ /* Both uncompressed formats are always supported */
+ if (coda_format_is_yuv(formats[i].fourcc) &&
+ !coda_format_is_yuv(src_fourcc)) {
+ if (num == f->index)
+ break;
+ ++num;
+ continue;
+ }
+ /* Compressed formats may be supported, check the codec list */
+ for (k = 0; k < num_codecs; k++) {
+ /* if src_fourcc is set, only consider matching codecs */
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ formats[i].fourcc == codecs[k].dst_fourcc &&
+ (!src_fourcc || src_fourcc == codecs[k].src_fourcc))
+ break;
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ formats[i].fourcc == codecs[k].src_fourcc)
+ break;
+ }
+ if (k < num_codecs) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ if (i < num_formats) {
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ if (!coda_format_is_yuv(fmt->fourcc))
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int coda_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *src_vq;
+ struct coda_q_data *q_data_src;
+
+ /* If the source format is already fixed, only list matching formats */
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (vb2_is_streaming(src_vq)) {
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ q_data_src->fourcc);
+ }
+
+ return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0);
+}
+
+static int coda_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0);
+}
+
+static int coda_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct coda_q_data *q_data;
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = q_data->fourcc;
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ if (coda_format_is_yuv(f->fmt.pix.pixelformat))
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 2);
+ else /* encoded formats h.264/mpeg4 */
+ f->fmt.pix.bytesperline = 0;
+
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ return 0;
+}
+
+static int coda_try_fmt(struct coda_ctx *ctx, struct coda_codec *codec,
+ struct v4l2_format *f)
+{
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data;
+ unsigned int max_w, max_h;
+ enum v4l2_field field;
+
+ field = f->fmt.pix.field;
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != field)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ f->fmt.pix.field = field;
+
+ coda_get_max_dimensions(dev, codec, &max_w, &max_h);
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN,
+ &f->fmt.pix.height, MIN_H, max_h, H_ALIGN,
+ S_ALIGN);
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_JPEG:
+ break;
+ default:
+ q_data = get_q_data(ctx, f->type);
+ f->fmt.pix.pixelformat = q_data->fourcc;
+ }
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ /* Frame stride must be multiple of 8 */
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 8);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_JPEG:
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE;
+ break;
+ default:
+ BUG();
+ }
+
+ f->fmt.pix.priv = 0;
+
+ return 0;
+}
+
+static int coda_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_codec *codec;
+ struct vb2_queue *src_vq;
+ int ret;
+
+ /*
+ * If the source format is already fixed, try to find a codec that
+ * converts to the given destination format
+ */
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (vb2_is_streaming(src_vq)) {
+ struct coda_q_data *q_data_src;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+ f->fmt.pix.pixelformat);
+ if (!codec)
+ return -EINVAL;
+ } else {
+ /* Otherwise determine codec by encoded format, if possible */
+ codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
+ f->fmt.pix.pixelformat);
+ }
+
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ ret = coda_try_fmt(ctx, codec, f);
+ if (ret < 0)
+ return ret;
+
+ /* The h.264 decoder only returns complete 16x16 macroblocks */
+ if (codec && codec->src_fourcc == V4L2_PIX_FMT_H264) {
+ f->fmt.pix.width = round_up(f->fmt.pix.width, 16);
+ f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 3 / 2;
+ }
+
+ return 0;
+}
+
+static int coda_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_codec *codec;
+
+ /* Determine codec by encoded format, returns NULL if raw or invalid */
+ codec = coda_find_codec(ctx->dev, f->fmt.pix.pixelformat,
+ V4L2_PIX_FMT_YUV420);
+
+ if (!f->fmt.pix.colorspace)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+ return coda_try_fmt(ctx, codec, f);
+}
+
+static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
+{
+ struct coda_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fourcc = f->fmt.pix.pixelformat;
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->sizeimage = f->fmt.pix.sizeimage;
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fourcc);
+
+ return 0;
+}
+
+static int coda_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = coda_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return coda_s_fmt(ctx, f);
+}
+
+static int coda_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = coda_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = coda_s_fmt(ctx, f);
+ if (ret)
+ ctx->colorspace = f->fmt.pix.colorspace;
+
+ return ret;
+}
+
+static int coda_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int coda_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int coda_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int coda_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
+}
+
+static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *src_vq;
+
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
+ (buf->sequence == (ctx->qsequence - 1)));
+}
+
+static int coda_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+
+ /* If this is the last capture buffer, emit an end-of-stream event */
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ coda_buf_is_end_of_stream(ctx, buf)) {
+ const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+
+ return ret;
+}
+
+static int coda_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
+}
+
+static int coda_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int coda_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ /*
+ * This indirectly calls __vb2_queue_cancel, which dequeues all buffers.
+ * We therefore have to lock it against running hardware in this context,
+ * which still needs the buffers.
+ */
+ mutex_lock(&ctx->buffer_mutex);
+ ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+ mutex_unlock(&ctx->buffer_mutex);
+
+ return ret;
+}
+
+static int coda_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+
+ if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int coda_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ int ret;
+
+ ret = coda_try_decoder_cmd(file, fh, dc);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore decoder stop command silently in encoder context */
+ if (ctx->inst_type != CODA_INST_DECODER)
+ return 0;
+
+ /* Set the strem-end flag on this context */
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+ return 0;
+}
+
+static int coda_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static const struct v4l2_ioctl_ops coda_ioctl_ops = {
+ .vidioc_querycap = coda_querycap,
+
+ .vidioc_enum_fmt_vid_cap = coda_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = coda_g_fmt,
+ .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = coda_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = coda_g_fmt,
+ .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out,
+
+ .vidioc_reqbufs = coda_reqbufs,
+ .vidioc_querybuf = coda_querybuf,
+
+ .vidioc_qbuf = coda_qbuf,
+ .vidioc_expbuf = coda_expbuf,
+ .vidioc_dqbuf = coda_dqbuf,
+ .vidioc_create_bufs = coda_create_bufs,
+
+ .vidioc_streamon = coda_streamon,
+ .vidioc_streamoff = coda_streamoff,
+
+ .vidioc_try_decoder_cmd = coda_try_decoder_cmd,
+ .vidioc_decoder_cmd = coda_decoder_cmd,
+
+ .vidioc_subscribe_event = coda_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int coda_start_decoding(struct coda_ctx *ctx);
+
+static void coda_skip_run(struct work_struct *work)
+{
+ struct coda_ctx *ctx = container_of(work, struct coda_ctx, skip_run);
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static inline int coda_get_bitstream_payload(struct coda_ctx *ctx)
+{
+ return kfifo_len(&ctx->bitstream_fifo);
+}
+
+static void coda_kfifo_sync_from_device(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 rd_ptr;
+
+ rd_ptr = coda_read(dev, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ kfifo->out = (kfifo->in & ~kfifo->mask) |
+ (rd_ptr - ctx->bitstream.paddr);
+ if (kfifo->out > kfifo->in)
+ kfifo->out -= kfifo->mask + 1;
+}
+
+static void coda_kfifo_sync_to_device_full(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 rd_ptr, wr_ptr;
+
+ rd_ptr = ctx->bitstream.paddr + (kfifo->out & kfifo->mask);
+ coda_write(dev, rd_ptr, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+ coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 wr_ptr;
+
+ wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+ coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static int coda_bitstream_queue(struct coda_ctx *ctx, struct vb2_buffer *src_buf)
+{
+ u32 src_size = vb2_get_plane_payload(src_buf, 0);
+ u32 n;
+
+ n = kfifo_in(&ctx->bitstream_fifo, vb2_plane_vaddr(src_buf, 0), src_size);
+ if (n < src_size)
+ return -ENOSPC;
+
+ dma_sync_single_for_device(&ctx->dev->plat_dev->dev, ctx->bitstream.paddr,
+ ctx->bitstream.size, DMA_TO_DEVICE);
+
+ ctx->qsequence++;
+
+ return 0;
+}
+
+static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
+ struct vb2_buffer *src_buf)
+{
+ int ret;
+
+ if (coda_get_bitstream_payload(ctx) +
+ vb2_get_plane_payload(src_buf, 0) + 512 >= ctx->bitstream.size)
+ return false;
+
+ if (vb2_plane_vaddr(src_buf, 0) == NULL) {
+ v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
+ return true;
+ }
+
+ ret = coda_bitstream_queue(ctx, src_buf);
+ if (ret < 0) {
+ v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
+ return false;
+ }
+ /* Sync read pointer to device */
+ if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev))
+ coda_kfifo_sync_to_device_write(ctx);
+
+ ctx->prescan_failed = false;
+
+ return true;
+}
+
+static void coda_fill_bitstream(struct coda_ctx *ctx)
+{
+ struct vb2_buffer *src_buf;
+
+ while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+
+ if (coda_bitstream_try_queue(ctx, src_buf)) {
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ } else {
+ break;
+ }
+ }
+}
+
+/*
+ * Mem-to-mem operations.
+ */
+static int coda_prepare_decode(struct coda_ctx *ctx)
+{
+ struct vb2_buffer *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_dst;
+ u32 stridey, height;
+ u32 picture_y, picture_cb, picture_cr;
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ if (ctx->params.rot_mode & CODA_ROT_90) {
+ stridey = q_data_dst->height;
+ height = q_data_dst->width;
+ } else {
+ stridey = q_data_dst->width;
+ height = q_data_dst->height;
+ }
+
+ /* Try to copy source buffer contents into the bitstream ringbuffer */
+ mutex_lock(&ctx->bitstream_mutex);
+ coda_fill_bitstream(ctx);
+ mutex_unlock(&ctx->bitstream_mutex);
+
+ if (coda_get_bitstream_payload(ctx) < 512 &&
+ (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "bitstream payload: %d, skipping\n",
+ coda_get_bitstream_payload(ctx));
+ schedule_work(&ctx->skip_run);
+ return -EAGAIN;
+ }
+
+ /* Run coda_start_decoding (again) if not yet initialized */
+ if (!ctx->initialized) {
+ int ret = coda_start_decoding(ctx);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to start decoding\n");
+ schedule_work(&ctx->skip_run);
+ return -EAGAIN;
+ } else {
+ ctx->initialized = 1;
+ }
+ }
+
+ /* Set rotator output */
+ picture_y = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ if (q_data_dst->fourcc == V4L2_PIX_FMT_YVU420) {
+ /* Switch Cr and Cb for YVU420 format */
+ picture_cr = picture_y + stridey * height;
+ picture_cb = picture_cr + stridey / 2 * height / 2;
+ } else {
+ picture_cb = picture_y + stridey * height;
+ picture_cr = picture_cb + stridey / 2 * height / 2;
+ }
+ coda_write(dev, picture_y, CODA_CMD_DEC_PIC_ROT_ADDR_Y);
+ coda_write(dev, picture_cb, CODA_CMD_DEC_PIC_ROT_ADDR_CB);
+ coda_write(dev, picture_cr, CODA_CMD_DEC_PIC_ROT_ADDR_CR);
+ coda_write(dev, stridey, CODA_CMD_DEC_PIC_ROT_STRIDE);
+ coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
+ CODA_CMD_DEC_PIC_ROT_MODE);
+
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ /* TBD */
+ case CODA_7541:
+ coda_write(dev, CODA_PRE_SCAN_EN, CODA_CMD_DEC_PIC_OPTION);
+ break;
+ }
+
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_SKIP_NUM);
+
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_BB_START);
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_START_BYTE);
+
+ return 0;
+}
+
+static void coda_prepare_encode(struct coda_ctx *ctx)
+{
+ struct coda_q_data *q_data_src, *q_data_dst;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ int force_ipicture;
+ int quant_param = 0;
+ u32 picture_y, picture_cb, picture_cr;
+ u32 pic_stream_buffer_addr, pic_stream_buffer_size;
+ u32 dst_fourcc;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_fourcc = q_data_dst->fourcc;
+
+ src_buf->v4l2_buf.sequence = ctx->osequence;
+ dst_buf->v4l2_buf.sequence = ctx->osequence;
+ ctx->osequence++;
+
+ /*
+ * Workaround coda firmware BUG that only marks the first
+ * frame as IDR. This is a problem for some decoders that can't
+ * recover when a frame is lost.
+ */
+ if (src_buf->v4l2_buf.sequence % ctx->params.gop_size) {
+ src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ } else {
+ src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+ }
+
+ /*
+ * Copy headers at the beginning of the first frame for H.264 only.
+ * In MPEG4 they are already copied by the coda.
+ */
+ if (src_buf->v4l2_buf.sequence == 0) {
+ pic_stream_buffer_addr =
+ vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+ ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1] +
+ ctx->vpu_header_size[2];
+ pic_stream_buffer_size = CODA_MAX_FRAME_SIZE -
+ ctx->vpu_header_size[0] -
+ ctx->vpu_header_size[1] -
+ ctx->vpu_header_size[2];
+ memcpy(vb2_plane_vaddr(dst_buf, 0),
+ &ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
+ memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0],
+ &ctx->vpu_header[1][0], ctx->vpu_header_size[1]);
+ memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1], &ctx->vpu_header[2][0],
+ ctx->vpu_header_size[2]);
+ } else {
+ pic_stream_buffer_addr =
+ vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pic_stream_buffer_size = CODA_MAX_FRAME_SIZE;
+ }
+
+ if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
+ force_ipicture = 1;
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ quant_param = ctx->params.h264_intra_qp;
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ quant_param = ctx->params.mpeg4_intra_qp;
+ break;
+ default:
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "cannot set intra qp, fmt not supported\n");
+ break;
+ }
+ } else {
+ force_ipicture = 0;
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ quant_param = ctx->params.h264_inter_qp;
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ quant_param = ctx->params.mpeg4_inter_qp;
+ break;
+ default:
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "cannot set inter qp, fmt not supported\n");
+ break;
+ }
+ }
+
+ /* submit */
+ coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode, CODA_CMD_ENC_PIC_ROT_MODE);
+ coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS);
+
+
+ picture_y = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ switch (q_data_src->fourcc) {
+ case V4L2_PIX_FMT_YVU420:
+ /* Switch Cb and Cr for YVU420 format */
+ picture_cr = picture_y + q_data_src->width * q_data_src->height;
+ picture_cb = picture_cr + q_data_src->width / 2 *
+ q_data_src->height / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ default:
+ picture_cb = picture_y + q_data_src->width * q_data_src->height;
+ picture_cr = picture_cb + q_data_src->width / 2 *
+ q_data_src->height / 2;
+ break;
+ }
+
+ coda_write(dev, picture_y, CODA_CMD_ENC_PIC_SRC_ADDR_Y);
+ coda_write(dev, picture_cb, CODA_CMD_ENC_PIC_SRC_ADDR_CB);
+ coda_write(dev, picture_cr, CODA_CMD_ENC_PIC_SRC_ADDR_CR);
+ coda_write(dev, force_ipicture << 1 & 0x2,
+ CODA_CMD_ENC_PIC_OPTION);
+
+ coda_write(dev, pic_stream_buffer_addr, CODA_CMD_ENC_PIC_BB_START);
+ coda_write(dev, pic_stream_buffer_size / 1024,
+ CODA_CMD_ENC_PIC_BB_SIZE);
+}
+
+static void coda_device_run(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ mutex_lock(&ctx->buffer_mutex);
+
+ /*
+ * If streamoff dequeued all buffers before we could get the lock,
+ * just bail out immediately.
+ */
+ if ((!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) &&
+ ctx->inst_type != CODA_INST_DECODER) ||
+ !v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%d: device_run without buffers\n", ctx->idx);
+ mutex_unlock(&ctx->buffer_mutex);
+ schedule_work(&ctx->skip_run);
+ return;
+ }
+
+ mutex_lock(&dev->coda_mutex);
+
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ ret = coda_prepare_decode(ctx);
+ if (ret < 0) {
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+ /* job_finish scheduled by prepare_decode */
+ return;
+ }
+ } else {
+ coda_prepare_encode(ctx);
+ }
+
+ if (dev->devtype->product != CODA_DX6)
+ coda_write(dev, ctx->iram_info.axi_sram_use,
+ CODA7_REG_BIT_AXI_SRAM_USE);
+
+ /* 1 second timeout in case CODA locks up */
+ schedule_delayed_work(&dev->timeout, HZ);
+
+ if (ctx->inst_type == CODA_INST_DECODER)
+ coda_kfifo_sync_to_device_full(ctx);
+ coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
+}
+
+static int coda_job_ready(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+
+ /*
+ * For both 'P' and 'key' frame cases 1 picture
+ * and 1 frame are needed. In the decoder case,
+ * the compressed frame can be in the bitstream.
+ */
+ if (!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) &&
+ ctx->inst_type != CODA_INST_DECODER) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: not enough video buffers.\n");
+ return 0;
+ }
+
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: not enough video capture buffers.\n");
+ return 0;
+ }
+
+ if (ctx->prescan_failed ||
+ ((ctx->inst_type == CODA_INST_DECODER) &&
+ (coda_get_bitstream_payload(ctx) < 512) &&
+ !(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: not enough bitstream data.\n",
+ ctx->idx);
+ return 0;
+ }
+
+ if (ctx->aborting) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: aborting\n");
+ return 0;
+ }
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "job ready\n");
+ return 1;
+}
+
+static void coda_job_abort(void *priv)
+{
+ struct coda_ctx *ctx = priv;
+
+ ctx->aborting = 1;
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "Aborting task\n");
+}
+
+static void coda_lock(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+ struct coda_dev *pcdev = ctx->dev;
+ mutex_lock(&pcdev->dev_mutex);
+}
+
+static void coda_unlock(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+ struct coda_dev *pcdev = ctx->dev;
+ mutex_unlock(&pcdev->dev_mutex);
+}
+
+static struct v4l2_m2m_ops coda_m2m_ops = {
+ .device_run = coda_device_run,
+ .job_ready = coda_job_ready,
+ .job_abort = coda_job_abort,
+ .lock = coda_lock,
+ .unlock = coda_unlock,
+};
+
+static void set_default_params(struct coda_ctx *ctx)
+{
+ int max_w;
+ int max_h;
+
+ ctx->codec = &ctx->dev->devtype->codecs[0];
+ max_w = ctx->codec->max_w;
+ max_h = ctx->codec->max_h;
+
+ ctx->params.codec_mode = CODA_MODE_INVALID;
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->params.framerate = 30;
+ ctx->aborting = 0;
+
+ /* Default formats for output and input queues */
+ ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->codec->src_fourcc;
+ ctx->q_data[V4L2_M2M_DST].fourcc = ctx->codec->dst_fourcc;
+ ctx->q_data[V4L2_M2M_SRC].width = max_w;
+ ctx->q_data[V4L2_M2M_SRC].height = max_h;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = (max_w * max_h * 3) / 2;
+ ctx->q_data[V4L2_M2M_DST].width = max_w;
+ ctx->q_data[V4L2_M2M_DST].height = max_h;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = CODA_MAX_FRAME_SIZE;
+}
+
+/*
+ * Queue operations
+ */
+static int coda_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(vq);
+ struct coda_q_data *q_data;
+ unsigned int size;
+
+ q_data = get_q_data(ctx, vq->type);
+ size = q_data->sizeimage;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ alloc_ctxs[0] = ctx->dev->alloc_ctx;
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "get %d buffer(s) of size %d each.\n", *nbuffers, size);
+
+ return 0;
+}
+
+static int coda_buf_prepare(struct vb2_buffer *vb)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct coda_q_data *q_data;
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void coda_buf_queue(struct vb2_buffer *vb)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct coda_q_data *q_data;
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+ /*
+ * In the decoder case, immediately try to copy the buffer into the
+ * bitstream ringbuffer and mark it as ready to be dequeued.
+ */
+ if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
+ vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /*
+ * For backwards compatibility, queuing an empty buffer marks
+ * the stream end
+ */
+ if (vb2_get_plane_payload(vb, 0) == 0)
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+ mutex_lock(&ctx->bitstream_mutex);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ coda_fill_bitstream(ctx);
+ mutex_unlock(&ctx->bitstream_mutex);
+ } else {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ }
+}
+
+static void coda_wait_prepare(struct vb2_queue *q)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(q);
+ coda_unlock(ctx);
+}
+
+static void coda_wait_finish(struct vb2_queue *q)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(q);
+ coda_lock(ctx);
+}
+
+static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
+{
+ struct coda_dev *dev = ctx->dev;
+ u32 *p = ctx->parabuf.vaddr;
+
+ if (dev->devtype->product == CODA_DX6)
+ p[index] = value;
+ else
+ p[index ^ 1] = value;
+}
+
+static int coda_alloc_aux_buf(struct coda_dev *dev,
+ struct coda_aux_buf *buf, size_t size)
+{
+ buf->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, &buf->paddr,
+ GFP_KERNEL);
+ if (!buf->vaddr)
+ return -ENOMEM;
+
+ buf->size = size;
+
+ return 0;
+}
+
+static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
+ struct coda_aux_buf *buf, size_t size)
+{
+ return coda_alloc_aux_buf(ctx->dev, buf, size);
+}
+
+static void coda_free_aux_buf(struct coda_dev *dev,
+ struct coda_aux_buf *buf)
+{
+ if (buf->vaddr) {
+ dma_free_coherent(&dev->plat_dev->dev, buf->size,
+ buf->vaddr, buf->paddr);
+ buf->vaddr = NULL;
+ buf->size = 0;
+ }
+}
+
+static void coda_free_framebuffers(struct coda_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++)
+ coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i]);
+}
+
+static int coda_alloc_framebuffers(struct coda_ctx *ctx, struct coda_q_data *q_data, u32 fourcc)
+{
+ struct coda_dev *dev = ctx->dev;
+ int height = q_data->height;
+ dma_addr_t paddr;
+ int ysize;
+ int ret;
+ int i;
+
+ if (ctx->codec && ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
+ height = round_up(height, 16);
+ ysize = round_up(q_data->width, 8) * height;
+
+ /* Allocate frame buffers */
+ for (i = 0; i < ctx->num_internal_frames; i++) {
+ size_t size;
+
+ size = q_data->sizeimage;
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
+ dev->devtype->product != CODA_DX6)
+ ctx->internal_frames[i].size += ysize/4;
+ ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i], size);
+ if (ret < 0) {
+ coda_free_framebuffers(ctx);
+ return ret;
+ }
+ }
+
+ /* Register frame buffers in the parameter buffer */
+ for (i = 0; i < ctx->num_internal_frames; i++) {
+ paddr = ctx->internal_frames[i].paddr;
+ coda_parabuf_write(ctx, i * 3 + 0, paddr); /* Y */
+ coda_parabuf_write(ctx, i * 3 + 1, paddr + ysize); /* Cb */
+ coda_parabuf_write(ctx, i * 3 + 2, paddr + ysize + ysize/4); /* Cr */
+
+ /* mvcol buffer for h.264 */
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
+ dev->devtype->product != CODA_DX6)
+ coda_parabuf_write(ctx, 96 + i,
+ ctx->internal_frames[i].paddr +
+ ysize + ysize/4 + ysize/4);
+ }
+
+ /* mvcol buffer for mpeg4 */
+ if ((dev->devtype->product != CODA_DX6) &&
+ (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4))
+ coda_parabuf_write(ctx, 97, ctx->internal_frames[i].paddr +
+ ysize + ysize/4 + ysize/4);
+
+ return 0;
+}
+
+static int coda_h264_padding(int size, char *p)
+{
+ int nal_size;
+ int diff;
+
+ diff = size - (size & ~0x7);
+ if (diff == 0)
+ return 0;
+
+ nal_size = coda_filler_size[diff];
+ memcpy(p, coda_filler_nal, nal_size);
+
+ /* Add rbsp stop bit and trailing at the end */
+ *(p + nal_size - 1) = 0x80;
+
+ return nal_size;
+}
+
+static void coda_setup_iram(struct coda_ctx *ctx)
+{
+ struct coda_iram_info *iram_info = &ctx->iram_info;
+ struct coda_dev *dev = ctx->dev;
+ int ipacdc_size;
+ int bitram_size;
+ int dbk_size;
+ int ovl_size;
+ int mb_width;
+ int me_size;
+ int size;
+
+ memset(iram_info, 0, sizeof(*iram_info));
+ size = dev->iram_size;
+
+ if (dev->devtype->product == CODA_DX6)
+ return;
+
+ if (ctx->inst_type == CODA_INST_ENCODER) {
+ struct coda_q_data *q_data_src;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ mb_width = DIV_ROUND_UP(q_data_src->width, 16);
+
+ /* Prioritize in case IRAM is too small for everything */
+ me_size = round_up(round_up(q_data_src->width, 16) * 36 + 2048,
+ 1024);
+ iram_info->search_ram_size = me_size;
+ if (size >= iram_info->search_ram_size) {
+ if (dev->devtype->product == CODA_7541)
+ iram_info->axi_sram_use |= CODA7_USE_HOST_ME_ENABLE;
+ iram_info->search_ram_paddr = dev->iram_paddr;
+ size -= iram_info->search_ram_size;
+ } else {
+ pr_err("IRAM is smaller than the search ram size\n");
+ goto out;
+ }
+
+ /* Only H.264BP and H.263P3 are considered */
+ dbk_size = round_up(128 * mb_width, 1024);
+ if (size >= dbk_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_DBK_ENABLE;
+ iram_info->buf_dbk_y_use = dev->iram_paddr +
+ iram_info->search_ram_size;
+ iram_info->buf_dbk_c_use = iram_info->buf_dbk_y_use +
+ dbk_size / 2;
+ size -= dbk_size;
+ } else {
+ goto out;
+ }
+
+ bitram_size = round_up(128 * mb_width, 1024);
+ if (size >= bitram_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_BIT_ENABLE;
+ iram_info->buf_bit_use = iram_info->buf_dbk_c_use +
+ dbk_size / 2;
+ size -= bitram_size;
+ } else {
+ goto out;
+ }
+
+ ipacdc_size = round_up(128 * mb_width, 1024);
+ if (size >= ipacdc_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_IP_ENABLE;
+ iram_info->buf_ip_ac_dc_use = iram_info->buf_bit_use +
+ bitram_size;
+ size -= ipacdc_size;
+ }
+
+ /* OVL and BTP disabled for encoder */
+ } else if (ctx->inst_type == CODA_INST_DECODER) {
+ struct coda_q_data *q_data_dst;
+ int mb_height;
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ mb_width = DIV_ROUND_UP(q_data_dst->width, 16);
+ mb_height = DIV_ROUND_UP(q_data_dst->height, 16);
+
+ dbk_size = round_up(256 * mb_width, 1024);
+ if (size >= dbk_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_DBK_ENABLE;
+ iram_info->buf_dbk_y_use = dev->iram_paddr;
+ iram_info->buf_dbk_c_use = dev->iram_paddr +
+ dbk_size / 2;
+ size -= dbk_size;
+ } else {
+ goto out;
+ }
+
+ bitram_size = round_up(128 * mb_width, 1024);
+ if (size >= bitram_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_BIT_ENABLE;
+ iram_info->buf_bit_use = iram_info->buf_dbk_c_use +
+ dbk_size / 2;
+ size -= bitram_size;
+ } else {
+ goto out;
+ }
+
+ ipacdc_size = round_up(128 * mb_width, 1024);
+ if (size >= ipacdc_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_IP_ENABLE;
+ iram_info->buf_ip_ac_dc_use = iram_info->buf_bit_use +
+ bitram_size;
+ size -= ipacdc_size;
+ } else {
+ goto out;
+ }
+
+ ovl_size = round_up(80 * mb_width, 1024);
+ }
+
+out:
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ break;
+ case CODA_7541:
+ /* i.MX53 uses secondary AXI for IRAM access */
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_BIT_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_BIT_ENABLE;
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_IP_ENABLE;
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_DBK_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_DBK_ENABLE;
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_OVL_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_OVL_ENABLE;
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_ME_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_ME_ENABLE;
+ }
+
+ if (!(iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE))
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "IRAM smaller than needed\n");
+
+ if (dev->devtype->product == CODA_7541) {
+ /* TODO - Enabling these causes picture errors on CODA7541 */
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ /* fw 1.4.50 */
+ iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+ CODA7_USE_IP_ENABLE);
+ } else {
+ /* fw 13.4.29 */
+ iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+ CODA7_USE_HOST_DBK_ENABLE |
+ CODA7_USE_IP_ENABLE |
+ CODA7_USE_DBK_ENABLE);
+ }
+ }
+}
+
+static void coda_free_context_buffers(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ coda_free_aux_buf(dev, &ctx->slicebuf);
+ coda_free_aux_buf(dev, &ctx->psbuf);
+ if (dev->devtype->product != CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+}
+
+static int coda_alloc_context_buffers(struct coda_ctx *ctx,
+ struct coda_q_data *q_data)
+{
+ struct coda_dev *dev = ctx->dev;
+ size_t size;
+ int ret;
+
+ switch (dev->devtype->product) {
+ case CODA_7541:
+ size = CODA7_WORK_BUF_SIZE;
+ break;
+ default:
+ return 0;
+ }
+
+ if (ctx->psbuf.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "psmembuf still allocated\n");
+ return -EBUSY;
+ }
+ if (ctx->slicebuf.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "slicebuf still allocated\n");
+ return -EBUSY;
+ }
+ if (ctx->workbuf.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "context buffer still allocated\n");
+ ret = -EBUSY;
+ return -ENOMEM;
+ }
+
+ if (q_data->fourcc == V4L2_PIX_FMT_H264) {
+ /* worst case slice size */
+ size = (DIV_ROUND_UP(q_data->width, 16) *
+ DIV_ROUND_UP(q_data->height, 16)) * 3200 / 8 + 512;
+ ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate %d byte slice buffer",
+ ctx->slicebuf.size);
+ return ret;
+ }
+ }
+
+ if (dev->devtype->product == CODA_7541) {
+ ret = coda_alloc_context_buf(ctx, &ctx->psbuf, CODA7_PS_BUF_SIZE);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate psmem buffer");
+ goto err;
+ }
+ }
+
+ ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate %d byte context buffer",
+ ctx->workbuf.size);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ coda_free_context_buffers(ctx);
+ return ret;
+}
+
+static int coda_start_decoding(struct coda_ctx *ctx)
+{
+ struct coda_q_data *q_data_src, *q_data_dst;
+ u32 bitstream_buf, bitstream_size;
+ struct coda_dev *dev = ctx->dev;
+ int width, height;
+ u32 src_fourcc;
+ u32 val;
+ int ret;
+
+ /* Start decoding */
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ bitstream_buf = ctx->bitstream.paddr;
+ bitstream_size = ctx->bitstream.size;
+ src_fourcc = q_data_src->fourcc;
+
+ coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+
+ /* Update coda bitstream read and write pointers from kfifo */
+ coda_kfifo_sync_to_device_full(ctx);
+
+ ctx->display_idx = -1;
+ ctx->frm_dis_flg = 0;
+ coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+ coda_write(dev, CODA_BIT_DEC_SEQ_INIT_ESCAPE,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+
+ coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
+ coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
+ val = 0;
+ if (dev->devtype->product == CODA_7541)
+ val |= CODA_REORDER_ENABLE;
+ coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
+
+ ctx->params.codec_mode = ctx->codec->mode;
+ ctx->params.codec_mode_aux = 0;
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ if (dev->devtype->product == CODA_7541) {
+ coda_write(dev, ctx->psbuf.paddr,
+ CODA_CMD_DEC_SEQ_PS_BB_START);
+ coda_write(dev, (CODA7_PS_BUF_SIZE / 1024),
+ CODA_CMD_DEC_SEQ_PS_BB_SIZE);
+ }
+ }
+
+ if (coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT)) {
+ v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+ return -ETIMEDOUT;
+ }
+
+ /* Update kfifo out pointer from coda bitstream read pointer */
+ coda_kfifo_sync_from_device(ctx);
+
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+
+ if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_INIT failed, error code = %d\n",
+ coda_read(dev, CODA_RET_DEC_SEQ_ERR_REASON));
+ return -EAGAIN;
+ }
+
+ val = coda_read(dev, CODA_RET_DEC_SEQ_SRC_SIZE);
+ if (dev->devtype->product == CODA_DX6) {
+ width = (val >> CODADX6_PICWIDTH_OFFSET) & CODADX6_PICWIDTH_MASK;
+ height = val & CODADX6_PICHEIGHT_MASK;
+ } else {
+ width = (val >> CODA7_PICWIDTH_OFFSET) & CODA7_PICWIDTH_MASK;
+ height = val & CODA7_PICHEIGHT_MASK;
+ }
+
+ if (width > q_data_dst->width || height > q_data_dst->height) {
+ v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
+ width, height, q_data_dst->width, q_data_dst->height);
+ return -EINVAL;
+ }
+
+ width = round_up(width, 16);
+ height = round_up(height, 16);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "%s instance %d now: %dx%d\n",
+ __func__, ctx->idx, width, height);
+
+ ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED) + 1;
+ if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) {
+ v4l2_err(&dev->v4l2_dev,
+ "not enough framebuffers to decode (%d < %d)\n",
+ CODA_MAX_FRAMEBUFFERS, ctx->num_internal_frames);
+ return -EINVAL;
+ }
+
+ ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
+ if (ret < 0)
+ return ret;
+
+ /* Tell the decoder how many frame buffers we allocated. */
+ coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
+ coda_write(dev, width, CODA_CMD_SET_FRAME_BUF_STRIDE);
+
+ if (dev->devtype->product != CODA_DX6) {
+ /* Set secondary AXI IRAM */
+ coda_setup_iram(ctx);
+
+ coda_write(dev, ctx->iram_info.buf_bit_use,
+ CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+ CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ovl_use,
+ CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+ }
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ coda_write(dev, ctx->slicebuf.paddr,
+ CODA_CMD_SET_FRAME_SLICE_BB_START);
+ coda_write(dev, ctx->slicebuf.size / 1024,
+ CODA_CMD_SET_FRAME_SLICE_BB_SIZE);
+ }
+
+ if (dev->devtype->product == CODA_7541) {
+ int max_mb_x = 1920 / 16;
+ int max_mb_y = 1088 / 16;
+ int max_mb_num = max_mb_x * max_mb_y;
+ coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
+ CODA7_CMD_SET_FRAME_MAX_DEC_SIZE);
+ }
+
+ if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "CODA_COMMAND_SET_FRAME_BUF timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
+ int header_code, u8 *header, int *size)
+{
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0),
+ CODA_CMD_ENC_HEADER_BB_START);
+ coda_write(dev, vb2_plane_size(buf, 0), CODA_CMD_ENC_HEADER_BB_SIZE);
+ coda_write(dev, header_code, CODA_CMD_ENC_HEADER_CODE);
+ ret = coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
+ return ret;
+ }
+ *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
+ coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
+ memcpy(header, vb2_plane_vaddr(buf, 0), *size);
+
+ return 0;
+}
+
+static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(q);
+ struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
+ u32 bitstream_buf, bitstream_size;
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_src, *q_data_dst;
+ struct vb2_buffer *buf;
+ u32 dst_fourcc;
+ u32 value;
+ int ret = 0;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (q_data_src->fourcc == V4L2_PIX_FMT_H264) {
+ if (coda_get_bitstream_payload(ctx) < 512)
+ return -EINVAL;
+ } else {
+ if (count < 1)
+ return -EINVAL;
+ }
+
+ ctx->streamon_out = 1;
+
+ if (coda_format_is_yuv(q_data_src->fourcc))
+ ctx->inst_type = CODA_INST_ENCODER;
+ else
+ ctx->inst_type = CODA_INST_DECODER;
+ } else {
+ if (count < 1)
+ return -EINVAL;
+
+ ctx->streamon_cap = 1;
+ }
+
+ /* Don't start the coda unless both queues are on */
+ if (!(ctx->streamon_out & ctx->streamon_cap))
+ return 0;
+
+ /* Allow decoder device_run with no new buffers queued */
+ if (ctx->inst_type == CODA_INST_DECODER)
+ v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true);
+
+ ctx->gopcounter = ctx->params.gop_size - 1;
+ buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ bitstream_size = q_data_dst->sizeimage;
+ dst_fourcc = q_data_dst->fourcc;
+
+ ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+ q_data_dst->fourcc);
+ if (!ctx->codec) {
+ v4l2_err(v4l2_dev, "couldn't tell instance type.\n");
+ return -EINVAL;
+ }
+
+ /* Allocate per-instance buffers */
+ ret = coda_alloc_context_buffers(ctx, q_data_src);
+ if (ret < 0)
+ return ret;
+
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ mutex_lock(&dev->coda_mutex);
+ ret = coda_start_decoding(ctx);
+ mutex_unlock(&dev->coda_mutex);
+ if (ret == -EAGAIN) {
+ return 0;
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ ctx->initialized = 1;
+ return 0;
+ }
+ }
+
+ if (!coda_is_initialized(dev)) {
+ v4l2_err(v4l2_dev, "coda is not initialized.\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&dev->coda_mutex);
+
+ coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+ coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ coda_write(dev, CODADX6_STREAM_BUF_DYNALLOC_EN |
+ CODADX6_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
+ break;
+ default:
+ coda_write(dev, CODA7_STREAM_BUF_DYNALLOC_EN |
+ CODA7_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
+ }
+
+ if (dev->devtype->product == CODA_DX6) {
+ /* Configure the coda */
+ coda_write(dev, dev->iram_paddr, CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR);
+ }
+
+ /* Could set rotation here if needed */
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ value = (q_data_src->width & CODADX6_PICWIDTH_MASK) << CODADX6_PICWIDTH_OFFSET;
+ value |= (q_data_src->height & CODADX6_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
+ break;
+ default:
+ value = (q_data_src->width & CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
+ value |= (q_data_src->height & CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
+ coda_write(dev, ctx->params.framerate,
+ CODA_CMD_ENC_SEQ_SRC_F_RATE);
+
+ ctx->params.codec_mode = ctx->codec->mode;
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_MPEG4:
+ coda_write(dev, CODA_STD_MPEG4, CODA_CMD_ENC_SEQ_COD_STD);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_MP4_PARA);
+ break;
+ case V4L2_PIX_FMT_H264:
+ coda_write(dev, CODA_STD_H264, CODA_CMD_ENC_SEQ_COD_STD);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_264_PARA);
+ break;
+ default:
+ v4l2_err(v4l2_dev,
+ "dst format (0x%08x) invalid.\n", dst_fourcc);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (ctx->params.slice_mode) {
+ case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
+ value = 0;
+ break;
+ case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB:
+ value = (ctx->params.slice_max_mb & CODA_SLICING_SIZE_MASK) << CODA_SLICING_SIZE_OFFSET;
+ value |= (1 & CODA_SLICING_UNIT_MASK) << CODA_SLICING_UNIT_OFFSET;
+ value |= 1 & CODA_SLICING_MODE_MASK;
+ break;
+ case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES:
+ value = (ctx->params.slice_max_bits & CODA_SLICING_SIZE_MASK) << CODA_SLICING_SIZE_OFFSET;
+ value |= (0 & CODA_SLICING_UNIT_MASK) << CODA_SLICING_UNIT_OFFSET;
+ value |= 1 & CODA_SLICING_MODE_MASK;
+ break;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
+ value = ctx->params.gop_size & CODA_GOP_SIZE_MASK;
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
+
+ if (ctx->params.bitrate) {
+ /* Rate control enabled */
+ value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK) << CODA_RATECONTROL_BITRATE_OFFSET;
+ value |= 1 & CODA_RATECONTROL_ENABLE_MASK;
+ } else {
+ value = 0;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_PARA);
+
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_INTRA_REFRESH);
+
+ coda_write(dev, bitstream_buf, CODA_CMD_ENC_SEQ_BB_START);
+ coda_write(dev, bitstream_size / 1024, CODA_CMD_ENC_SEQ_BB_SIZE);
+
+ /* set default gamma */
+ value = (CODA_DEFAULT_GAMMA & CODA_GAMMA_MASK) << CODA_GAMMA_OFFSET;
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_GAMMA);
+
+ if (CODA_DEFAULT_GAMMA > 0) {
+ if (dev->devtype->product == CODA_DX6)
+ value = 1 << CODADX6_OPTION_GAMMA_OFFSET;
+ else
+ value = 1 << CODA7_OPTION_GAMMA_OFFSET;
+ } else {
+ value = 0;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
+
+ coda_setup_iram(ctx);
+
+ if (dst_fourcc == V4L2_PIX_FMT_H264) {
+ if (dev->devtype->product == CODA_DX6) {
+ value = FMO_SLICE_SAVE_BUF_SIZE << 7;
+ coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
+ } else {
+ coda_write(dev, ctx->iram_info.search_ram_paddr,
+ CODA7_CMD_ENC_SEQ_SEARCH_BASE);
+ coda_write(dev, ctx->iram_info.search_ram_size,
+ CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
+ }
+ }
+
+ ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+ goto out;
+ }
+
+ if (coda_read(dev, CODA_RET_ENC_SEQ_SUCCESS) == 0) {
+ v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT failed\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ctx->num_internal_frames = 2;
+ ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
+ goto out;
+ }
+
+ coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
+ coda_write(dev, round_up(q_data_src->width, 8), CODA_CMD_SET_FRAME_BUF_STRIDE);
+ if (dev->devtype->product == CODA_7541)
+ coda_write(dev, round_up(q_data_src->width, 8),
+ CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
+ if (dev->devtype->product != CODA_DX6) {
+ coda_write(dev, ctx->iram_info.buf_bit_use,
+ CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+ CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ovl_use,
+ CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+ }
+ ret = coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "CODA_COMMAND_SET_FRAME_BUF timeout\n");
+ goto out;
+ }
+
+ /* Save stream headers */
+ buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ /*
+ * Get SPS in the first frame and copy it to an
+ * intermediate buffer.
+ */
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_SPS,
+ &ctx->vpu_header[0][0],
+ &ctx->vpu_header_size[0]);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Get PPS in the first frame and copy it to an
+ * intermediate buffer.
+ */
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_PPS,
+ &ctx->vpu_header[1][0],
+ &ctx->vpu_header_size[1]);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Length of H.264 headers is variable and thus it might not be
+ * aligned for the coda to append the encoded frame. In that is
+ * the case a filler NAL must be added to header 2.
+ */
+ ctx->vpu_header_size[2] = coda_h264_padding(
+ (ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1]),
+ ctx->vpu_header[2]);
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ /*
+ * Get VOS in the first frame and copy it to an
+ * intermediate buffer
+ */
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOS,
+ &ctx->vpu_header[0][0],
+ &ctx->vpu_header_size[0]);
+ if (ret < 0)
+ goto out;
+
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VIS,
+ &ctx->vpu_header[1][0],
+ &ctx->vpu_header_size[1]);
+ if (ret < 0)
+ goto out;
+
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOL,
+ &ctx->vpu_header[2][0],
+ &ctx->vpu_header_size[2]);
+ if (ret < 0)
+ goto out;
+ break;
+ default:
+ /* No more formats need to save headers at the moment */
+ break;
+ }
+
+out:
+ mutex_unlock(&dev->coda_mutex);
+ return ret;
+}
+
+static void coda_stop_streaming(struct vb2_queue *q)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(q);
+ struct coda_dev *dev = ctx->dev;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: output\n", __func__);
+ ctx->streamon_out = 0;
+
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+ ctx->isequence = 0;
+ } else {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: capture\n", __func__);
+ ctx->streamon_cap = 0;
+
+ ctx->osequence = 0;
+ }
+
+ if (!ctx->streamon_out && !ctx->streamon_cap) {
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ ctx->runcounter = 0;
+ }
+}
+
+static struct vb2_ops coda_qops = {
+ .queue_setup = coda_queue_setup,
+ .buf_prepare = coda_buf_prepare,
+ .buf_queue = coda_buf_queue,
+ .wait_prepare = coda_wait_prepare,
+ .wait_finish = coda_wait_finish,
+ .start_streaming = coda_start_streaming,
+ .stop_streaming = coda_stop_streaming,
+};
+
+static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct coda_ctx *ctx =
+ container_of(ctrl->handler, struct coda_ctx, ctrls);
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ ctx->params.rot_mode |= CODA_MIR_HOR;
+ else
+ ctx->params.rot_mode &= ~CODA_MIR_HOR;
+ break;
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ ctx->params.rot_mode |= CODA_MIR_VER;
+ else
+ ctx->params.rot_mode &= ~CODA_MIR_VER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctx->params.bitrate = ctrl->val / 1000;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctx->params.gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ ctx->params.h264_intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ ctx->params.h264_inter_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
+ ctx->params.mpeg4_intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
+ ctx->params.mpeg4_inter_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ ctx->params.slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ ctx->params.slice_max_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ ctx->params.slice_max_bits = ctrl->val * 8;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ break;
+ default:
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "Invalid control, id=%d, val=%d\n",
+ ctrl->id, ctrl->val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct v4l2_ctrl_ops coda_ctrl_ops = {
+ .s_ctrl = coda_s_ctrl,
+};
+
+static int coda_ctrls_setup(struct coda_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrls, 9);
+
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 60, 1, 16);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 25);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 25);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP, 1, 31, 1, 2);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 1, 31, 1, 2);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, 0x0,
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, 0x3fffffff, 1, 1);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, 1, 0x3fffffff, 1, 500);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ (1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE),
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
+
+ if (ctx->ctrls.error) {
+ v4l2_err(&ctx->dev->v4l2_dev, "control initialization error (%d)",
+ ctx->ctrls.error);
+ return -EINVAL;
+ }
+
+ return v4l2_ctrl_handler_setup(&ctx->ctrls);
+}
+
+static int coda_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct coda_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &coda_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &coda_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int coda_next_free_instance(struct coda_dev *dev)
+{
+ int idx = ffz(dev->instance_mask);
+
+ if ((idx < 0) ||
+ (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES))
+ return -EBUSY;
+
+ return idx;
+}
+
+static int coda_open(struct file *file)
+{
+ struct coda_dev *dev = video_drvdata(file);
+ struct coda_ctx *ctx = NULL;
+ int ret;
+ int idx;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ idx = coda_next_free_instance(dev);
+ if (idx < 0) {
+ ret = idx;
+ goto err_coda_max;
+ }
+ set_bit(idx, &dev->instance_mask);
+
+ INIT_WORK(&ctx->skip_run, coda_skip_run);
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ ctx->dev = dev;
+ ctx->idx = idx;
+ switch (dev->devtype->product) {
+ case CODA_7541:
+ ctx->reg_idx = 0;
+ break;
+ default:
+ ctx->reg_idx = idx;
+ }
+
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ goto err_clk_per;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
+
+ set_default_params(ctx);
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ &coda_queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ ret = PTR_ERR(ctx->m2m_ctx);
+
+ v4l2_err(&dev->v4l2_dev, "%s return error (%d)\n",
+ __func__, ret);
+ goto err_ctx_init;
+ }
+ ret = coda_ctrls_setup(ctx);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "failed to setup coda controls\n");
+ goto err_ctrls_setup;
+ }
+
+ ctx->fh.ctrl_handler = &ctx->ctrls;
+
+ ret = coda_alloc_context_buf(ctx, &ctx->parabuf, CODA_PARA_BUF_SIZE);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
+ goto err_dma_alloc;
+ }
+
+ ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
+ ctx->bitstream.vaddr = dma_alloc_writecombine(&dev->plat_dev->dev,
+ ctx->bitstream.size, &ctx->bitstream.paddr, GFP_KERNEL);
+ if (!ctx->bitstream.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate bitstream ringbuffer");
+ ret = -ENOMEM;
+ goto err_dma_writecombine;
+ }
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ mutex_init(&ctx->bitstream_mutex);
+ mutex_init(&ctx->buffer_mutex);
+
+ coda_lock(ctx);
+ list_add(&ctx->list, &dev->instances);
+ coda_unlock(ctx);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n",
+ ctx->idx, ctx);
+
+ return 0;
+
+err_dma_writecombine:
+ coda_free_context_buffers(ctx);
+ if (ctx->dev->devtype->product == CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+ coda_free_aux_buf(dev, &ctx->parabuf);
+err_dma_alloc:
+ v4l2_ctrl_handler_free(&ctx->ctrls);
+err_ctrls_setup:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_ctx_init:
+ clk_disable_unprepare(dev->clk_ahb);
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+err_clk_per:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ clear_bit(ctx->idx, &dev->instance_mask);
+err_coda_max:
+ kfree(ctx);
+ return ret;
+}
+
+static int coda_release(struct file *file)
+{
+ struct coda_dev *dev = video_drvdata(file);
+ struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
+ ctx);
+
+ /* If this instance is running, call .job_abort and wait for it to end */
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+
+ /* In case the instance was not running, we still need to call SEQ_END */
+ mutex_lock(&dev->coda_mutex);
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: sent command 'SEQ_END' to coda\n", __func__);
+ if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_END failed\n");
+ mutex_unlock(&dev->coda_mutex);
+ return -ETIMEDOUT;
+ }
+ mutex_unlock(&dev->coda_mutex);
+
+ coda_free_framebuffers(ctx);
+
+ coda_lock(ctx);
+ list_del(&ctx->list);
+ coda_unlock(ctx);
+
+ dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size,
+ ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ coda_free_context_buffers(ctx);
+ if (ctx->dev->devtype->product == CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+
+ coda_free_aux_buf(dev, &ctx->parabuf);
+ v4l2_ctrl_handler_free(&ctx->ctrls);
+ clk_disable_unprepare(dev->clk_ahb);
+ clk_disable_unprepare(dev->clk_per);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ clear_bit(ctx->idx, &dev->instance_mask);
+ kfree(ctx);
+
+ return 0;
+}
+
+static unsigned int coda_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+ int ret;
+
+ coda_lock(ctx);
+ ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ coda_unlock(ctx);
+ return ret;
+}
+
+static int coda_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations coda_fops = {
+ .owner = THIS_MODULE,
+ .open = coda_open,
+ .release = coda_release,
+ .poll = coda_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = coda_mmap,
+};
+
+static void coda_finish_decode(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_src;
+ struct coda_q_data *q_data_dst;
+ struct vb2_buffer *dst_buf;
+ int width, height;
+ int decoded_idx;
+ int display_idx;
+ u32 src_fourcc;
+ int success;
+ u32 val;
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+ /* Update kfifo out pointer from coda bitstream read pointer */
+ coda_kfifo_sync_from_device(ctx);
+
+ /*
+ * in stream-end mode, the read pointer can overshoot the write pointer
+ * by up to 512 bytes
+ */
+ if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) {
+ if (coda_get_bitstream_payload(ctx) >= 0x100000 - 512)
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ }
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ src_fourcc = q_data_src->fourcc;
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_SUCCESS);
+ if (val != 1)
+ pr_err("DEC_PIC_SUCCESS = %d\n", val);
+
+ success = val & 0x1;
+ if (!success)
+ v4l2_err(&dev->v4l2_dev, "decode failed\n");
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ if (val & (1 << 3))
+ v4l2_err(&dev->v4l2_dev,
+ "insufficient PS buffer space (%d bytes)\n",
+ ctx->psbuf.size);
+ if (val & (1 << 2))
+ v4l2_err(&dev->v4l2_dev,
+ "insufficient slice buffer space (%d bytes)\n",
+ ctx->slicebuf.size);
+ }
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_SIZE);
+ width = (val >> 16) & 0xffff;
+ height = val & 0xffff;
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_TYPE);
+ if ((val & 0x7) == 0) {
+ dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+ } else {
+ dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ }
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_ERR_MB);
+ if (val > 0)
+ v4l2_err(&dev->v4l2_dev,
+ "errors in %d macroblocks\n", val);
+
+ if (dev->devtype->product == CODA_7541) {
+ val = coda_read(dev, CODA_RET_DEC_PIC_OPTION);
+ if (val == 0) {
+ /* not enough bitstream data */
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "prescan failed: %d\n", val);
+ ctx->prescan_failed = true;
+ return;
+ }
+ }
+
+ ctx->frm_dis_flg = coda_read(dev, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+ /*
+ * The previous display frame was copied out by the rotator,
+ * now it can be overwritten again
+ */
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ ctx->frm_dis_flg &= ~(1 << ctx->display_idx);
+ coda_write(dev, ctx->frm_dis_flg,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+ }
+
+ /*
+ * The index of the last decoded frame, not necessarily in
+ * display order, and the index of the next display frame.
+ * The latter could have been decoded in a previous run.
+ */
+ decoded_idx = coda_read(dev, CODA_RET_DEC_PIC_CUR_IDX);
+ display_idx = coda_read(dev, CODA_RET_DEC_PIC_FRAME_IDX);
+
+ if (decoded_idx == -1) {
+ /* no frame was decoded, but we might have a display frame */
+ if (display_idx < 0 && ctx->display_idx < 0)
+ ctx->prescan_failed = true;
+ } else if (decoded_idx == -2) {
+ /* no frame was decoded, we still return the remaining buffers */
+ } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
+ v4l2_err(&dev->v4l2_dev,
+ "decoded frame index out of range: %d\n", decoded_idx);
+ }
+
+ if (display_idx == -1) {
+ /*
+ * no more frames to be decoded, but there could still
+ * be rotator output to dequeue
+ */
+ ctx->prescan_failed = true;
+ } else if (display_idx == -3) {
+ /* possibly prescan failure */
+ } else if (display_idx < 0 || display_idx >= ctx->num_internal_frames) {
+ v4l2_err(&dev->v4l2_dev,
+ "presentation frame index out of range: %d\n",
+ display_idx);
+ }
+
+ /* If a frame was copied out, return it */
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ dst_buf->v4l2_buf.sequence = ctx->osequence++;
+
+ vb2_set_plane_payload(dst_buf, 0, width * height * 3 / 2);
+
+ v4l2_m2m_buf_done(dst_buf, success ? VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: decoding frame (%d) (%s)\n",
+ dst_buf->v4l2_buf.sequence,
+ (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+ "KEYFRAME" : "PFRAME");
+ } else {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: no frame decoded\n");
+ }
+
+ /* The rotator will copy the current display frame next time */
+ ctx->display_idx = display_idx;
+}
+
+static void coda_finish_encode(struct coda_ctx *ctx)
+{
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ u32 wr_ptr, start_ptr;
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ /* Get results from the coda */
+ start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
+ wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+
+ /* Calculate bytesused field */
+ if (dst_buf->v4l2_buf.sequence == 0) {
+ vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr +
+ ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1] +
+ ctx->vpu_header_size[2]);
+ } else {
+ vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr);
+ }
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
+ wr_ptr - start_ptr);
+
+ coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
+ coda_read(dev, CODA_RET_ENC_PIC_FLAG);
+
+ if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
+ dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+ } else {
+ dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ }
+
+ dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->v4l2_buf.flags |=
+ src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+ ctx->gopcounter--;
+ if (ctx->gopcounter < 0)
+ ctx->gopcounter = ctx->params.gop_size - 1;
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: encoding frame (%d) (%s)\n",
+ dst_buf->v4l2_buf.sequence,
+ (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+ "KEYFRAME" : "PFRAME");
+}
+
+static irqreturn_t coda_irq_handler(int irq, void *data)
+{
+ struct coda_dev *dev = data;
+ struct coda_ctx *ctx;
+
+ cancel_delayed_work(&dev->timeout);
+
+ /* read status register to attend the IRQ */
+ coda_read(dev, CODA_REG_BIT_INT_STATUS);
+ coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
+ CODA_REG_BIT_INT_CLEAR);
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (ctx == NULL) {
+ v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n");
+ mutex_unlock(&dev->coda_mutex);
+ return IRQ_HANDLED;
+ }
+
+ if (ctx->aborting) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "task has been aborted\n");
+ goto out;
+ }
+
+ if (coda_isbusy(ctx->dev)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "coda is still busy!!!!\n");
+ return IRQ_NONE;
+ }
+
+ if (ctx->inst_type == CODA_INST_DECODER)
+ coda_finish_decode(ctx);
+ else
+ coda_finish_encode(ctx);
+
+out:
+ if (ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out)) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: sent command 'SEQ_END' to coda\n", __func__);
+ if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_END failed\n");
+ }
+
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+
+ coda_free_framebuffers(ctx);
+ coda_free_context_buffers(ctx);
+ }
+
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->m2m_ctx);
+
+ return IRQ_HANDLED;
+}
+
+static void coda_timeout(struct work_struct *work)
+{
+ struct coda_ctx *ctx;
+ struct coda_dev *dev = container_of(to_delayed_work(work),
+ struct coda_dev, timeout);
+
+ dev_err(&dev->plat_dev->dev, "CODA PIC_RUN timeout, stopping all streams\n");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->instances, list) {
+ if (mutex_is_locked(&ctx->buffer_mutex))
+ mutex_unlock(&ctx->buffer_mutex);
+ v4l2_m2m_streamoff(NULL, ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ v4l2_m2m_streamoff(NULL, ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ }
+ mutex_unlock(&dev->dev_mutex);
+
+ mutex_unlock(&dev->coda_mutex);
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static u32 coda_supported_firmwares[] = {
+ CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
+ CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
+};
+
+static bool coda_firmware_supported(u32 vernum)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(coda_supported_firmwares); i++)
+ if (vernum == coda_supported_firmwares[i])
+ return true;
+ return false;
+}
+
+static int coda_hw_init(struct coda_dev *dev)
+{
+ u16 product, major, minor, release;
+ u32 data;
+ u16 *p;
+ int i, ret;
+
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
+
+ /*
+ * Copy the first CODA_ISRAM_SIZE in the internal SRAM.
+ * The 16-bit chars in the code buffer are in memory access
+ * order, re-sort them to CODA order for register download.
+ * Data in this SRAM survives a reboot.
+ */
+ p = (u16 *)dev->codebuf.vaddr;
+ if (dev->devtype->product == CODA_DX6) {
+ for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
+ data = CODA_DOWN_ADDRESS_SET(i) |
+ CODA_DOWN_DATA_SET(p[i ^ 1]);
+ coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
+ }
+ } else {
+ for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
+ data = CODA_DOWN_ADDRESS_SET(i) |
+ CODA_DOWN_DATA_SET(p[round_down(i, 4) +
+ 3 - (i % 4)]);
+ coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
+ }
+ }
+
+ /* Clear registers */
+ for (i = 0; i < 64; i++)
+ coda_write(dev, 0, CODA_REG_BIT_CODE_BUF_ADDR + i * 4);
+
+ /* Tell the BIT where to find everything it needs */
+ if (dev->devtype->product == CODA_7541) {
+ coda_write(dev, dev->tempbuf.paddr,
+ CODA_REG_BIT_TEMP_BUF_ADDR);
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+ } else {
+ coda_write(dev, dev->workbuf.paddr,
+ CODA_REG_BIT_WORK_BUF_ADDR);
+ }
+ coda_write(dev, dev->codebuf.paddr,
+ CODA_REG_BIT_CODE_BUF_ADDR);
+ coda_write(dev, 0, CODA_REG_BIT_CODE_RUN);
+
+ /* Set default values */
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ coda_write(dev, CODADX6_STREAM_BUF_PIC_FLUSH, CODA_REG_BIT_STREAM_CTRL);
+ break;
+ default:
+ coda_write(dev, CODA7_STREAM_BUF_PIC_FLUSH, CODA_REG_BIT_STREAM_CTRL);
+ }
+ coda_write(dev, 0, CODA_REG_BIT_FRAME_MEM_CTRL);
+
+ if (dev->devtype->product != CODA_DX6)
+ coda_write(dev, 0, CODA7_REG_BIT_AXI_SRAM_USE);
+
+ coda_write(dev, CODA_INT_INTERRUPT_ENABLE,
+ CODA_REG_BIT_INT_ENABLE);
+
+ /* Reset VPU and start processor */
+ data = coda_read(dev, CODA_REG_BIT_CODE_RESET);
+ data |= CODA_REG_RESET_ENABLE;
+ coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
+ udelay(10);
+ data &= ~CODA_REG_RESET_ENABLE;
+ coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
+ coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
+
+ /* Load firmware */
+ coda_write(dev, 0, CODA_CMD_FIRMWARE_VERNUM);
+ coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+ coda_write(dev, 0, CODA_REG_BIT_RUN_INDEX);
+ coda_write(dev, 0, CODA_REG_BIT_RUN_COD_STD);
+ coda_write(dev, CODA_COMMAND_FIRMWARE_GET, CODA_REG_BIT_RUN_COMMAND);
+ if (coda_wait_timeout(dev)) {
+ clk_disable_unprepare(dev->clk_per);
+ clk_disable_unprepare(dev->clk_ahb);
+ v4l2_err(&dev->v4l2_dev, "firmware get command error\n");
+ return -EIO;
+ }
+
+ /* Check we are compatible with the loaded firmware */
+ data = coda_read(dev, CODA_CMD_FIRMWARE_VERNUM);
+ product = CODA_FIRMWARE_PRODUCT(data);
+ major = CODA_FIRMWARE_MAJOR(data);
+ minor = CODA_FIRMWARE_MINOR(data);
+ release = CODA_FIRMWARE_RELEASE(data);
+
+ clk_disable_unprepare(dev->clk_per);
+ clk_disable_unprepare(dev->clk_ahb);
+
+ if (product != dev->devtype->product) {
+ v4l2_err(&dev->v4l2_dev, "Wrong firmware. Hw: %s, Fw: %s,"
+ " Version: %u.%u.%u\n",
+ coda_product_name(dev->devtype->product),
+ coda_product_name(product), major, minor, release);
+ return -EINVAL;
+ }
+
+ v4l2_info(&dev->v4l2_dev, "Initialized %s.\n",
+ coda_product_name(product));
+
+ if (coda_firmware_supported(data)) {
+ v4l2_info(&dev->v4l2_dev, "Firmware version: %u.%u.%u\n",
+ major, minor, release);
+ } else {
+ v4l2_warn(&dev->v4l2_dev, "Unsupported firmware version: "
+ "%u.%u.%u\n", major, minor, release);
+ }
+
+ return 0;
+
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+ return ret;
+}
+
+static void coda_fw_callback(const struct firmware *fw, void *context)
+{
+ struct coda_dev *dev = context;
+ struct platform_device *pdev = dev->plat_dev;
+ int ret;
+
+ if (!fw) {
+ v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
+ return;
+ }
+
+ /* allocate auxiliary per-device code buffer for the BIT processor */
+ ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate code buffer\n");
+ return;
+ }
+
+ /* Copy the whole firmware image to the code buffer */
+ memcpy(dev->codebuf.vaddr, fw->data, fw->size);
+ release_firmware(fw);
+
+ ret = coda_hw_init(dev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "HW initialization failed\n");
+ return;
+ }
+
+ dev->vfd.fops = &coda_fops,
+ dev->vfd.ioctl_ops = &coda_ioctl_ops;
+ dev->vfd.release = video_device_release_empty,
+ dev->vfd.lock = &dev->dev_mutex;
+ dev->vfd.v4l2_dev = &dev->v4l2_dev;
+ dev->vfd.vfl_dir = VFL_DIR_M2M;
+ snprintf(dev->vfd.name, sizeof(dev->vfd.name), "%s", CODA_NAME);
+ video_set_drvdata(&dev->vfd, dev);
+
+ dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to alloc vb2 context\n");
+ return;
+ }
+
+ dev->m2m_dev = v4l2_m2m_init(&coda_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ goto rel_ctx;
+ }
+
+ ret = video_register_device(&dev->vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto rel_m2m;
+ }
+ v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video%d\n",
+ dev->vfd.num);
+
+ return;
+
+rel_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+rel_ctx:
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+}
+
+static int coda_firmware_request(struct coda_dev *dev)
+{
+ char *fw = dev->devtype->firmware;
+
+ dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
+ coda_product_name(dev->devtype->product));
+
+ return request_firmware_nowait(THIS_MODULE, true,
+ fw, &dev->plat_dev->dev, GFP_KERNEL, dev, coda_fw_callback);
+}
+
+enum coda_platform {
+ CODA_IMX27,
+ CODA_IMX53,
+};
+
+static const struct coda_devtype coda_devdata[] = {
+ [CODA_IMX27] = {
+ .firmware = "v4l-codadx6-imx27.bin",
+ .product = CODA_DX6,
+ .codecs = codadx6_codecs,
+ .num_codecs = ARRAY_SIZE(codadx6_codecs),
+ },
+ [CODA_IMX53] = {
+ .firmware = "v4l-coda7541-imx53.bin",
+ .product = CODA_7541,
+ .codecs = coda7_codecs,
+ .num_codecs = ARRAY_SIZE(coda7_codecs),
+ },
+};
+
+static struct platform_device_id coda_platform_ids[] = {
+ { .name = "coda-imx27", .driver_data = CODA_IMX27 },
+ { .name = "coda-imx53", .driver_data = CODA_IMX53 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, coda_platform_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id coda_dt_ids[] = {
+ { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
+ { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, coda_dt_ids);
+#endif
+
+static int coda_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(of_match_ptr(coda_dt_ids), &pdev->dev);
+ const struct platform_device_id *pdev_id;
+ struct coda_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
+ struct gen_pool *pool;
+ struct coda_dev *dev;
+ struct resource *res;
+ int ret, irq;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof *dev, GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "Not enough memory for %s\n",
+ CODA_NAME);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&dev->irqlock);
+ INIT_LIST_HEAD(&dev->instances);
+ INIT_DELAYED_WORK(&dev->timeout, coda_timeout);
+
+ dev->plat_dev = pdev;
+ dev->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(dev->clk_per)) {
+ dev_err(&pdev->dev, "Could not get per clock\n");
+ return PTR_ERR(dev->clk_per);
+ }
+
+ dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(dev->clk_ahb)) {
+ dev_err(&pdev->dev, "Could not get ahb clock\n");
+ return PTR_ERR(dev->clk_ahb);
+ }
+
+ /* Get memory for physical registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs_base))
+ return PTR_ERR(dev->regs_base);
+
+ /* IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return -ENOENT;
+ }
+
+ if (devm_request_threaded_irq(&pdev->dev, irq, NULL, coda_irq_handler,
+ IRQF_ONESHOT, dev_name(&pdev->dev), dev) < 0) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return -ENOENT;
+ }
+
+ /* Get IRAM pool from device tree or platform data */
+ pool = of_get_named_gen_pool(np, "iram", 0);
+ if (!pool && pdata)
+ pool = dev_get_gen_pool(pdata->iram_dev);
+ if (!pool) {
+ dev_err(&pdev->dev, "iram pool not available\n");
+ return -ENOMEM;
+ }
+ dev->iram_pool = pool;
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->coda_mutex);
+
+ pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
+
+ if (of_id) {
+ dev->devtype = of_id->data;
+ } else if (pdev_id) {
+ dev->devtype = &coda_devdata[pdev_id->driver_data];
+ } else {
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return -EINVAL;
+ }
+
+ /* allocate auxiliary per-device buffers for the BIT processor */
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ ret = coda_alloc_aux_buf(dev, &dev->workbuf,
+ CODADX6_WORK_BUF_SIZE);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate work buffer\n");
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
+ }
+ break;
+ case CODA_7541:
+ dev->tempbuf.size = CODA7_TEMP_BUF_SIZE;
+ break;
+ }
+ if (dev->tempbuf.size) {
+ ret = coda_alloc_aux_buf(dev, &dev->tempbuf,
+ dev->tempbuf.size);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate temp buffer\n");
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
+ }
+ }
+
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ dev->iram_size = CODADX6_IRAM_SIZE;
+ break;
+ case CODA_7541:
+ dev->iram_size = CODA7_IRAM_SIZE;
+ break;
+ }
+ dev->iram_vaddr = (unsigned long)gen_pool_dma_alloc(dev->iram_pool,
+ dev->iram_size, (dma_addr_t *)&dev->iram_paddr);
+ if (!dev->iram_vaddr) {
+ dev_err(&pdev->dev, "unable to alloc iram\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ return coda_firmware_request(dev);
+}
+
+static int coda_remove(struct platform_device *pdev)
+{
+ struct coda_dev *dev = platform_get_drvdata(pdev);
+
+ video_unregister_device(&dev->vfd);
+ if (dev->m2m_dev)
+ v4l2_m2m_release(dev->m2m_dev);
+ if (dev->alloc_ctx)
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ if (dev->iram_vaddr)
+ gen_pool_free(dev->iram_pool, dev->iram_vaddr, dev->iram_size);
+ coda_free_aux_buf(dev, &dev->codebuf);
+ coda_free_aux_buf(dev, &dev->tempbuf);
+ coda_free_aux_buf(dev, &dev->workbuf);
+ return 0;
+}
+
+static struct platform_driver coda_driver = {
+ .probe = coda_probe,
+ .remove = coda_remove,
+ .driver = {
+ .name = CODA_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(coda_dt_ids),
+ },
+ .id_table = coda_platform_ids,
+};
+
+module_platform_driver(coda_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_DESCRIPTION("Coda multi-standard codec V4L2 driver");
diff --git a/drivers/media/platform/coda.h b/drivers/media/platform/coda.h
new file mode 100644
index 00000000000..4e32e2edea6
--- /dev/null
+++ b/drivers/media/platform/coda.h
@@ -0,0 +1,338 @@
+/*
+ * linux/drivers/media/platform/coda/coda_regs.h
+ *
+ * Copyright (C) 2012 Vista Silicon SL
+ * Javier Martin <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _REGS_CODA_H_
+#define _REGS_CODA_H_
+
+/* HW registers */
+#define CODA_REG_BIT_CODE_RUN 0x000
+#define CODA_REG_RUN_ENABLE (1 << 0)
+#define CODA_REG_BIT_CODE_DOWN 0x004
+#define CODA_DOWN_ADDRESS_SET(x) (((x) & 0xffff) << 16)
+#define CODA_DOWN_DATA_SET(x) ((x) & 0xffff)
+#define CODA_REG_BIT_HOST_IN_REQ 0x008
+#define CODA_REG_BIT_INT_CLEAR 0x00c
+#define CODA_REG_BIT_INT_CLEAR_SET 0x1
+#define CODA_REG_BIT_INT_STATUS 0x010
+#define CODA_REG_BIT_CODE_RESET 0x014
+#define CODA_REG_RESET_ENABLE (1 << 0)
+#define CODA_REG_BIT_CUR_PC 0x018
+
+/* Static SW registers */
+#define CODA_REG_BIT_CODE_BUF_ADDR 0x100
+#define CODA_REG_BIT_WORK_BUF_ADDR 0x104
+#define CODA_REG_BIT_PARA_BUF_ADDR 0x108
+#define CODA_REG_BIT_STREAM_CTRL 0x10c
+#define CODA7_STREAM_BUF_PIC_RESET (1 << 4)
+#define CODADX6_STREAM_BUF_PIC_RESET (1 << 3)
+#define CODA7_STREAM_BUF_PIC_FLUSH (1 << 3)
+#define CODADX6_STREAM_BUF_PIC_FLUSH (1 << 2)
+#define CODA7_STREAM_BUF_DYNALLOC_EN (1 << 5)
+#define CODADX6_STREAM_BUF_DYNALLOC_EN (1 << 4)
+#define CODA_STREAM_CHKDIS_OFFSET (1 << 1)
+#define CODA_STREAM_ENDIAN_SELECT (1 << 0)
+#define CODA_REG_BIT_FRAME_MEM_CTRL 0x110
+#define CODA_IMAGE_ENDIAN_SELECT (1 << 0)
+#define CODA_REG_BIT_BIT_STREAM_PARAM 0x114
+#define CODA_BIT_STREAM_END_FLAG (1 << 2)
+#define CODA_BIT_DEC_SEQ_INIT_ESCAPE (1 << 0)
+#define CODA_REG_BIT_TEMP_BUF_ADDR 0x118
+#define CODA_REG_BIT_RD_PTR(x) (0x120 + 8 * (x))
+#define CODA_REG_BIT_WR_PTR(x) (0x124 + 8 * (x))
+#define CODA_REG_BIT_FRM_DIS_FLG(x) (0x150 + 4 * (x))
+#define CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR 0x140
+#define CODA7_REG_BIT_AXI_SRAM_USE 0x140
+#define CODA7_USE_HOST_ME_ENABLE (1 << 11)
+#define CODA7_USE_HOST_OVL_ENABLE (1 << 10)
+#define CODA7_USE_HOST_DBK_ENABLE (1 << 9)
+#define CODA7_USE_HOST_IP_ENABLE (1 << 8)
+#define CODA7_USE_HOST_BIT_ENABLE (1 << 7)
+#define CODA7_USE_ME_ENABLE (1 << 4)
+#define CODA7_USE_OVL_ENABLE (1 << 3)
+#define CODA7_USE_DBK_ENABLE (1 << 2)
+#define CODA7_USE_IP_ENABLE (1 << 1)
+#define CODA7_USE_BIT_ENABLE (1 << 0)
+
+#define CODA_REG_BIT_BUSY 0x160
+#define CODA_REG_BIT_BUSY_FLAG 1
+#define CODA_REG_BIT_RUN_COMMAND 0x164
+#define CODA_COMMAND_SEQ_INIT 1
+#define CODA_COMMAND_SEQ_END 2
+#define CODA_COMMAND_PIC_RUN 3
+#define CODA_COMMAND_SET_FRAME_BUF 4
+#define CODA_COMMAND_ENCODE_HEADER 5
+#define CODA_COMMAND_ENC_PARA_SET 6
+#define CODA_COMMAND_DEC_PARA_SET 7
+#define CODA_COMMAND_DEC_BUF_FLUSH 8
+#define CODA_COMMAND_RC_CHANGE_PARAMETER 9
+#define CODA_COMMAND_FIRMWARE_GET 0xf
+#define CODA_REG_BIT_RUN_INDEX 0x168
+#define CODA_INDEX_SET(x) ((x) & 0x3)
+#define CODA_REG_BIT_RUN_COD_STD 0x16c
+#define CODADX6_MODE_DECODE_MP4 0
+#define CODADX6_MODE_ENCODE_MP4 1
+#define CODADX6_MODE_DECODE_H264 2
+#define CODADX6_MODE_ENCODE_H264 3
+#define CODA7_MODE_DECODE_H264 0
+#define CODA7_MODE_DECODE_VC1 1
+#define CODA7_MODE_DECODE_MP2 2
+#define CODA7_MODE_DECODE_MP4 3
+#define CODA7_MODE_DECODE_DV3 3
+#define CODA7_MODE_DECODE_RV 4
+#define CODA7_MODE_DECODE_MJPG 5
+#define CODA7_MODE_ENCODE_H264 8
+#define CODA7_MODE_ENCODE_MP4 11
+#define CODA7_MODE_ENCODE_MJPG 13
+#define CODA_MODE_INVALID 0xffff
+#define CODA_REG_BIT_INT_ENABLE 0x170
+#define CODA_INT_INTERRUPT_ENABLE (1 << 3)
+#define CODA_REG_BIT_INT_REASON 0x174
+#define CODA7_REG_BIT_RUN_AUX_STD 0x178
+#define CODA_MP4_AUX_MPEG4 0
+#define CODA_MP4_AUX_DIVX3 1
+#define CODA_VPX_AUX_THO 0
+#define CODA_VPX_AUX_VP6 1
+#define CODA_VPX_AUX_VP8 2
+#define CODA_H264_AUX_AVC 0
+#define CODA_H264_AUX_MVC 1
+
+/*
+ * Commands' mailbox:
+ * registers with offsets in the range 0x180-0x1d0
+ * have different meaning depending on the command being
+ * issued.
+ */
+
+/* Decoder Sequence Initialization */
+#define CODA_CMD_DEC_SEQ_BB_START 0x180
+#define CODA_CMD_DEC_SEQ_BB_SIZE 0x184
+#define CODA_CMD_DEC_SEQ_OPTION 0x188
+#define CODA_REORDER_ENABLE (1 << 1)
+#define CODADX6_QP_REPORT (1 << 0)
+#define CODA7_MP4_DEBLK_ENABLE (1 << 0)
+#define CODA_CMD_DEC_SEQ_SRC_SIZE 0x18c
+#define CODA_CMD_DEC_SEQ_START_BYTE 0x190
+#define CODA_CMD_DEC_SEQ_PS_BB_START 0x194
+#define CODA_CMD_DEC_SEQ_PS_BB_SIZE 0x198
+#define CODA_CMD_DEC_SEQ_MP4_ASP_CLASS 0x19c
+#define CODA_CMD_DEC_SEQ_X264_MV_EN 0x19c
+#define CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE 0x1a0
+
+#define CODA7_RET_DEC_SEQ_ASPECT 0x1b0
+#define CODA_RET_DEC_SEQ_SUCCESS 0x1c0
+#define CODA_RET_DEC_SEQ_SRC_FMT 0x1c4 /* SRC_SIZE on CODA7 */
+#define CODA_RET_DEC_SEQ_SRC_SIZE 0x1c4
+#define CODA_RET_DEC_SEQ_SRC_F_RATE 0x1c8
+#define CODA9_RET_DEC_SEQ_ASPECT 0x1c8
+#define CODA_RET_DEC_SEQ_FRAME_NEED 0x1cc
+#define CODA_RET_DEC_SEQ_FRAME_DELAY 0x1d0
+#define CODA_RET_DEC_SEQ_INFO 0x1d4
+#define CODA_RET_DEC_SEQ_CROP_LEFT_RIGHT 0x1d8
+#define CODA_RET_DEC_SEQ_CROP_TOP_BOTTOM 0x1dc
+#define CODA_RET_DEC_SEQ_NEXT_FRAME_NUM 0x1e0
+#define CODA_RET_DEC_SEQ_ERR_REASON 0x1e0
+#define CODA_RET_DEC_SEQ_FRATE_NR 0x1e4
+#define CODA_RET_DEC_SEQ_FRATE_DR 0x1e8
+#define CODA_RET_DEC_SEQ_JPG_PARA 0x1e4
+#define CODA_RET_DEC_SEQ_JPG_THUMB_IND 0x1e8
+
+/* Decoder Picture Run */
+#define CODA_CMD_DEC_PIC_ROT_MODE 0x180
+#define CODA_CMD_DEC_PIC_ROT_ADDR_Y 0x184
+#define CODA_CMD_DEC_PIC_ROT_ADDR_CB 0x188
+#define CODA_CMD_DEC_PIC_ROT_ADDR_CR 0x18c
+#define CODA_CMD_DEC_PIC_ROT_STRIDE 0x190
+
+#define CODA_CMD_DEC_PIC_OPTION 0x194
+#define CODA_PRE_SCAN_EN (1 << 0)
+#define CODA_PRE_SCAN_MODE_DECODE (0 << 1)
+#define CODA_PRE_SCAN_MODE_RETURN (1 << 1)
+#define CODA_IFRAME_SEARCH_EN (1 << 2)
+#define CODA_SKIP_FRAME_MODE (0x3 << 3)
+#define CODA_CMD_DEC_PIC_SKIP_NUM 0x198
+#define CODA_CMD_DEC_PIC_CHUNK_SIZE 0x19c
+#define CODA_CMD_DEC_PIC_BB_START 0x1a0
+#define CODA_CMD_DEC_PIC_START_BYTE 0x1a4
+#define CODA_RET_DEC_PIC_SIZE 0x1bc
+#define CODA_RET_DEC_PIC_FRAME_NUM 0x1c0
+#define CODA_RET_DEC_PIC_FRAME_IDX 0x1c4
+#define CODA_RET_DEC_PIC_ERR_MB 0x1c8
+#define CODA_RET_DEC_PIC_TYPE 0x1cc
+#define CODA_PIC_TYPE_MASK 0x7
+#define CODA_PIC_TYPE_MASK_VC1 0x3f
+#define CODA9_PIC_TYPE_FIRST_MASK (0x7 << 3)
+#define CODA9_PIC_TYPE_IDR_MASK (0x3 << 6)
+#define CODA7_PIC_TYPE_H264_NPF_MASK (0x3 << 16)
+#define CODA7_PIC_TYPE_INTERLACED (1 << 18)
+#define CODA_RET_DEC_PIC_POST 0x1d0
+#define CODA_RET_DEC_PIC_MVC_REPORT 0x1d0
+#define CODA_RET_DEC_PIC_OPTION 0x1d4
+#define CODA_RET_DEC_PIC_SUCCESS 0x1d8
+#define CODA_RET_DEC_PIC_CUR_IDX 0x1dc
+#define CODA_RET_DEC_PIC_CROP_LEFT_RIGHT 0x1e0
+#define CODA_RET_DEC_PIC_CROP_TOP_BOTTOM 0x1e4
+#define CODA_RET_DEC_PIC_FRAME_NEED 0x1ec
+
+/* Encoder Sequence Initialization */
+#define CODA_CMD_ENC_SEQ_BB_START 0x180
+#define CODA_CMD_ENC_SEQ_BB_SIZE 0x184
+#define CODA_CMD_ENC_SEQ_OPTION 0x188
+#define CODA7_OPTION_AVCINTRA16X16ONLY_OFFSET 9
+#define CODA7_OPTION_GAMMA_OFFSET 8
+#define CODA7_OPTION_RCQPMAX_OFFSET 7
+#define CODADX6_OPTION_GAMMA_OFFSET 7
+#define CODA7_OPTION_RCQPMIN_OFFSET 6
+#define CODA_OPTION_LIMITQP_OFFSET 6
+#define CODA_OPTION_RCINTRAQP_OFFSET 5
+#define CODA_OPTION_FMO_OFFSET 4
+#define CODA_OPTION_AVC_AUD_OFFSET 2
+#define CODA_OPTION_SLICEREPORT_OFFSET 1
+#define CODA_CMD_ENC_SEQ_COD_STD 0x18c
+#define CODA_STD_MPEG4 0
+#define CODA_STD_H263 1
+#define CODA_STD_H264 2
+#define CODA_STD_MJPG 3
+#define CODA_CMD_ENC_SEQ_SRC_SIZE 0x190
+#define CODA7_PICWIDTH_OFFSET 16
+#define CODA7_PICWIDTH_MASK 0xffff
+#define CODADX6_PICWIDTH_OFFSET 10
+#define CODADX6_PICWIDTH_MASK 0x3ff
+#define CODA_PICHEIGHT_OFFSET 0
+#define CODADX6_PICHEIGHT_MASK 0x3ff
+#define CODA7_PICHEIGHT_MASK 0xffff
+#define CODA_CMD_ENC_SEQ_SRC_F_RATE 0x194
+#define CODA_CMD_ENC_SEQ_MP4_PARA 0x198
+#define CODA_MP4PARAM_VERID_OFFSET 6
+#define CODA_MP4PARAM_VERID_MASK 0x01
+#define CODA_MP4PARAM_INTRADCVLCTHR_OFFSET 2
+#define CODA_MP4PARAM_INTRADCVLCTHR_MASK 0x07
+#define CODA_MP4PARAM_REVERSIBLEVLCENABLE_OFFSET 1
+#define CODA_MP4PARAM_REVERSIBLEVLCENABLE_MASK 0x01
+#define CODA_MP4PARAM_DATAPARTITIONENABLE_OFFSET 0
+#define CODA_MP4PARAM_DATAPARTITIONENABLE_MASK 0x01
+#define CODA_CMD_ENC_SEQ_263_PARA 0x19c
+#define CODA_263PARAM_ANNEXJENABLE_OFFSET 2
+#define CODA_263PARAM_ANNEXJENABLE_MASK 0x01
+#define CODA_263PARAM_ANNEXKENABLE_OFFSET 1
+#define CODA_263PARAM_ANNEXKENABLE_MASK 0x01
+#define CODA_263PARAM_ANNEXTENABLE_OFFSET 0
+#define CODA_263PARAM_ANNEXTENABLE_MASK 0x01
+#define CODA_CMD_ENC_SEQ_264_PARA 0x1a0
+#define CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET 12
+#define CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK 0x0f
+#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET 8
+#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK 0x0f
+#define CODA_264PARAM_DISABLEDEBLK_OFFSET 6
+#define CODA_264PARAM_DISABLEDEBLK_MASK 0x01
+#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET 5
+#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_MASK 0x01
+#define CODA_264PARAM_CHROMAQPOFFSET_OFFSET 0
+#define CODA_264PARAM_CHROMAQPOFFSET_MASK 0x1f
+#define CODA_CMD_ENC_SEQ_SLICE_MODE 0x1a4
+#define CODA_SLICING_SIZE_OFFSET 2
+#define CODA_SLICING_SIZE_MASK 0x3fffffff
+#define CODA_SLICING_UNIT_OFFSET 1
+#define CODA_SLICING_UNIT_MASK 0x01
+#define CODA_SLICING_MODE_OFFSET 0
+#define CODA_SLICING_MODE_MASK 0x01
+#define CODA_CMD_ENC_SEQ_GOP_SIZE 0x1a8
+#define CODA_GOP_SIZE_OFFSET 0
+#define CODA_GOP_SIZE_MASK 0x3f
+#define CODA_CMD_ENC_SEQ_RC_PARA 0x1ac
+#define CODA_RATECONTROL_AUTOSKIP_OFFSET 31
+#define CODA_RATECONTROL_AUTOSKIP_MASK 0x01
+#define CODA_RATECONTROL_INITIALDELAY_OFFSET 16
+#define CODA_RATECONTROL_INITIALDELAY_MASK 0x7f
+#define CODA_RATECONTROL_BITRATE_OFFSET 1
+#define CODA_RATECONTROL_BITRATE_MASK 0x7f
+#define CODA_RATECONTROL_ENABLE_OFFSET 0
+#define CODA_RATECONTROL_ENABLE_MASK 0x01
+#define CODA_CMD_ENC_SEQ_RC_BUF_SIZE 0x1b0
+#define CODA_CMD_ENC_SEQ_INTRA_REFRESH 0x1b4
+#define CODADX6_CMD_ENC_SEQ_FMO 0x1b8
+#define CODA_FMOPARAM_TYPE_OFFSET 4
+#define CODA_FMOPARAM_TYPE_MASK 1
+#define CODA_FMOPARAM_SLICENUM_OFFSET 0
+#define CODA_FMOPARAM_SLICENUM_MASK 0x0f
+#define CODADX6_CMD_ENC_SEQ_INTRA_QP 0x1bc
+#define CODA7_CMD_ENC_SEQ_SEARCH_BASE 0x1b8
+#define CODA7_CMD_ENC_SEQ_SEARCH_SIZE 0x1bc
+#define CODA7_CMD_ENC_SEQ_INTRA_QP 0x1c4
+#define CODA_CMD_ENC_SEQ_RC_QP_MAX 0x1c8
+#define CODA_QPMAX_OFFSET 0
+#define CODA_QPMAX_MASK 0x3f
+#define CODA_CMD_ENC_SEQ_RC_GAMMA 0x1cc
+#define CODA_GAMMA_OFFSET 0
+#define CODA_GAMMA_MASK 0xffff
+#define CODA_RET_ENC_SEQ_SUCCESS 0x1c0
+
+/* Encoder Picture Run */
+#define CODA_CMD_ENC_PIC_SRC_ADDR_Y 0x180
+#define CODA_CMD_ENC_PIC_SRC_ADDR_CB 0x184
+#define CODA_CMD_ENC_PIC_SRC_ADDR_CR 0x188
+#define CODA_CMD_ENC_PIC_QS 0x18c
+#define CODA_CMD_ENC_PIC_ROT_MODE 0x190
+#define CODA_ROT_MIR_ENABLE (1 << 4)
+#define CODA_ROT_0 (0x0 << 0)
+#define CODA_ROT_90 (0x1 << 0)
+#define CODA_ROT_180 (0x2 << 0)
+#define CODA_ROT_270 (0x3 << 0)
+#define CODA_MIR_NONE (0x0 << 2)
+#define CODA_MIR_VER (0x1 << 2)
+#define CODA_MIR_HOR (0x2 << 2)
+#define CODA_MIR_VER_HOR (0x3 << 2)
+#define CODA_CMD_ENC_PIC_OPTION 0x194
+#define CODA_CMD_ENC_PIC_BB_START 0x198
+#define CODA_CMD_ENC_PIC_BB_SIZE 0x19c
+#define CODA_RET_ENC_FRAME_NUM 0x1c0
+#define CODA_RET_ENC_PIC_TYPE 0x1c4
+#define CODA_RET_ENC_PIC_FRAME_IDX 0x1c8
+#define CODA_RET_ENC_PIC_SLICE_NUM 0x1cc
+#define CODA_RET_ENC_PIC_FLAG 0x1d0
+#define CODA_RET_ENC_PIC_SUCCESS 0x1d8
+
+/* Set Frame Buffer */
+#define CODA_CMD_SET_FRAME_BUF_NUM 0x180
+#define CODA_CMD_SET_FRAME_BUF_STRIDE 0x184
+#define CODA_CMD_SET_FRAME_SLICE_BB_START 0x188
+#define CODA_CMD_SET_FRAME_SLICE_BB_SIZE 0x18c
+#define CODA7_CMD_SET_FRAME_AXI_BIT_ADDR 0x190
+#define CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR 0x194
+#define CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR 0x198
+#define CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR 0x19c
+#define CODA7_CMD_SET_FRAME_AXI_OVL_ADDR 0x1a0
+#define CODA7_CMD_SET_FRAME_MAX_DEC_SIZE 0x1a4
+#define CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE 0x1a8
+
+/* Encoder Header */
+#define CODA_CMD_ENC_HEADER_CODE 0x180
+#define CODA_GAMMA_OFFSET 0
+#define CODA_HEADER_H264_SPS 0
+#define CODA_HEADER_H264_PPS 1
+#define CODA_HEADER_MP4V_VOL 0
+#define CODA_HEADER_MP4V_VOS 1
+#define CODA_HEADER_MP4V_VIS 2
+#define CODA_CMD_ENC_HEADER_BB_START 0x184
+#define CODA_CMD_ENC_HEADER_BB_SIZE 0x188
+
+/* Get Version */
+#define CODA_CMD_FIRMWARE_VERNUM 0x1c0
+#define CODA_FIRMWARE_PRODUCT(x) (((x) >> 16) & 0xffff)
+#define CODA_FIRMWARE_MAJOR(x) (((x) >> 12) & 0x0f)
+#define CODA_FIRMWARE_MINOR(x) (((x) >> 8) & 0x0f)
+#define CODA_FIRMWARE_RELEASE(x) ((x) & 0xff)
+#define CODA_FIRMWARE_VERNUM(product, major, minor, release) \
+ ((product) << 16 | ((major) << 12) | \
+ ((minor) << 8) | (release))
+
+#endif
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
new file mode 100644
index 00000000000..afb3aec1320
--- /dev/null
+++ b/drivers/media/platform/davinci/Kconfig
@@ -0,0 +1,78 @@
+config VIDEO_DAVINCI_VPIF_DISPLAY
+ tristate "TI DaVinci VPIF V4L2-Display driver"
+ depends on VIDEO_DEV && ARCH_DAVINCI
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_ADV7343 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_THS7303 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Enables Davinci VPIF module used for display devices.
+ This module is used for display on TI DM6467/DA850/OMAPL138
+ SoCs.
+
+ To compile this driver as a module, choose M here. There will
+ be two modules called vpif.ko and vpif_display.ko
+
+config VIDEO_DAVINCI_VPIF_CAPTURE
+ tristate "TI DaVinci VPIF video capture driver"
+ depends on VIDEO_DEV && ARCH_DAVINCI
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Enables Davinci VPIF module used for capture devices.
+ This module is used for capture on TI DM6467/DA850/OMAPL138
+ SoCs.
+
+ To compile this driver as a module, choose M here. There will
+ be two modules called vpif.ko and vpif_capture.ko
+
+config VIDEO_DM6446_CCDC
+ tristate "TI DM6446 CCDC video capture driver"
+ depends on VIDEO_V4L2 && (ARCH_DAVINCI || ARCH_OMAP3)
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces
+ with decoder modules such as TVP5146 over BT656 or
+ sensor module such as MT9T001 over a raw interface. This
+ module configures the interface and CCDC/ISIF to do
+ video frame capture from slave decoders.
+
+ To compile this driver as a module, choose M here. There will
+ be three modules called vpfe_capture.ko, vpss.ko and dm644x_ccdc.ko
+
+config VIDEO_DM355_CCDC
+ tristate "TI DM355 CCDC video capture driver"
+ depends on VIDEO_V4L2 && ARCH_DAVINCI
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables DM355 CCD hw module. DM355 CCDC hw interfaces
+ with decoder modules such as TVP5146 over BT656 or
+ sensor module such as MT9T001 over a raw interface. This
+ module configures the interface and CCDC/ISIF to do
+ video frame capture from a slave decoders
+
+ To compile this driver as a module, choose M here. There will
+ be three modules called vpfe_capture.ko, vpss.ko and dm355_ccdc.ko
+
+config VIDEO_DM365_ISIF
+ tristate "TI DM365 ISIF video capture driver"
+ depends on VIDEO_V4L2 && ARCH_DAVINCI
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables ISIF hw module. This is the hardware module for
+ configuring ISIF in VPFE to capture Raw Bayer RGB data from
+ a image sensor or YUV data from a YUV source.
+
+ To compile this driver as a module, choose M here. There will
+ be three modules called vpfe_capture.ko, vpss.ko and isif.ko
+
+config VIDEO_DAVINCI_VPBE_DISPLAY
+ tristate "TI DaVinci VPBE V4L2-Display driver"
+ depends on ARCH_DAVINCI
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Enables Davinci VPBE module used for display devices.
+ This module is used for display on TI DM644x/DM365/DM355
+ based display devices.
+
+ To compile this driver as a module, choose M here. There will
+ be five modules created called vpss.ko, vpbe.ko, vpbe_osd.ko,
+ vpbe_venc.ko and vpbe_display.ko
diff --git a/drivers/media/platform/davinci/Makefile b/drivers/media/platform/davinci/Makefile
new file mode 100644
index 00000000000..d74d9eeb0e9
--- /dev/null
+++ b/drivers/media/platform/davinci/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for the davinci video device drivers.
+#
+
+#VPIF Display driver
+obj-$(CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY) += vpif.o vpif_display.o
+#VPIF Capture driver
+obj-$(CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE) += vpif.o vpif_capture.o
+
+# Capture: DM6446 and DM355
+obj-$(CONFIG_VIDEO_DM6446_CCDC) += vpfe_capture.o vpss.o dm644x_ccdc.o
+obj-$(CONFIG_VIDEO_DM355_CCDC) += vpfe_capture.o vpss.o dm355_ccdc.o
+obj-$(CONFIG_VIDEO_DM365_ISIF) += vpfe_capture.o vpss.o isif.o
+obj-$(CONFIG_VIDEO_DAVINCI_VPBE_DISPLAY) += vpss.o vpbe.o vpbe_osd.o \
+ vpbe_venc.o vpbe_display.o
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
new file mode 100644
index 00000000000..86b9b351896
--- /dev/null
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ccdc device API
+ */
+#ifndef _CCDC_HW_DEVICE_H
+#define _CCDC_HW_DEVICE_H
+
+#ifdef __KERNEL__
+#include <linux/videodev2.h>
+#include <linux/device.h>
+#include <media/davinci/vpfe_types.h>
+#include <media/davinci/ccdc_types.h>
+
+/*
+ * ccdc hw operations
+ */
+struct ccdc_hw_ops {
+ /* Pointer to initialize function to initialize ccdc device */
+ int (*open) (struct device *dev);
+ /* Pointer to deinitialize function */
+ int (*close) (struct device *dev);
+ /* set ccdc base address */
+ void (*set_ccdc_base)(void *base, int size);
+ /* Pointer to function to enable or disable ccdc */
+ void (*enable) (int en);
+ /* reset sbl. only for 6446 */
+ void (*reset) (void);
+ /* enable output to sdram */
+ void (*enable_out_to_sdram) (int en);
+ /* Pointer to function to set hw parameters */
+ int (*set_hw_if_params) (struct vpfe_hw_if_param *param);
+ /* get interface parameters */
+ int (*get_hw_if_params) (struct vpfe_hw_if_param *param);
+ /*
+ * Pointer to function to set parameters. Used
+ * for implementing VPFE_S_CCDC_PARAMS
+ */
+ int (*set_params) (void *params);
+ /*
+ * Pointer to function to get parameter. Used
+ * for implementing VPFE_G_CCDC_PARAMS
+ */
+ int (*get_params) (void *params);
+ /* Pointer to function to configure ccdc */
+ int (*configure) (void);
+
+ /* Pointer to function to set buffer type */
+ int (*set_buftype) (enum ccdc_buftype buf_type);
+ /* Pointer to function to get buffer type */
+ enum ccdc_buftype (*get_buftype) (void);
+ /* Pointer to function to set frame format */
+ int (*set_frame_format) (enum ccdc_frmfmt frm_fmt);
+ /* Pointer to function to get frame format */
+ enum ccdc_frmfmt (*get_frame_format) (void);
+ /* enumerate hw pix formats */
+ int (*enum_pix)(u32 *hw_pix, int i);
+ /* Pointer to function to set buffer type */
+ u32 (*get_pixel_format) (void);
+ /* Pointer to function to get pixel format. */
+ int (*set_pixel_format) (u32 pixfmt);
+ /* Pointer to function to set image window */
+ int (*set_image_window) (struct v4l2_rect *win);
+ /* Pointer to function to set image window */
+ void (*get_image_window) (struct v4l2_rect *win);
+ /* Pointer to function to get line length */
+ unsigned int (*get_line_length) (void);
+
+ /* Query CCDC control IDs */
+ int (*queryctrl)(struct v4l2_queryctrl *qctrl);
+ /* Set CCDC control */
+ int (*set_control)(struct v4l2_control *ctrl);
+ /* Get CCDC control */
+ int (*get_control)(struct v4l2_control *ctrl);
+
+ /* Pointer to function to set frame buffer address */
+ void (*setfbaddr) (unsigned long addr);
+ /* Pointer to function to get field id */
+ int (*getfid) (void);
+};
+
+struct ccdc_hw_device {
+ /* ccdc device name */
+ char name[32];
+ /* module owner */
+ struct module *owner;
+ /* hw ops */
+ struct ccdc_hw_ops hw_ops;
+};
+
+/* Used by CCDC module to register & unregister with vpfe capture driver */
+int vpfe_register_ccdc_device(struct ccdc_hw_device *dev);
+void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev);
+
+#endif
+#endif
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
new file mode 100644
index 00000000000..05f8fb7f7b7
--- /dev/null
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -0,0 +1,1039 @@
+/*
+ * Copyright (C) 2005-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * CCDC hardware module for DM355
+ * ------------------------------
+ *
+ * This module is for configuring DM355 CCD controller of VPFE to capture
+ * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
+ * such as Defect Pixel Correction, Color Space Conversion etc to
+ * pre-process the Bayer RGB data, before writing it to SDRAM. This
+ * module also allows application to configure individual
+ * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL.
+ * To do so, application include dm355_ccdc.h and vpfe_capture.h header
+ * files. The setparams() API is called by vpfe_capture driver
+ * to configure module parameters
+ *
+ * TODO: 1) Raw bayer parameter settings and bayer capture
+ * 2) Split module parameter structure to module specific ioctl structs
+ * 3) add support for lense shading correction
+ * 4) investigate if enum used for user space type definition
+ * to be replaced by #defines or integer
+ */
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <media/davinci/dm355_ccdc.h>
+#include <media/davinci/vpss.h>
+
+#include "dm355_ccdc_regs.h"
+#include "ccdc_hw_device.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CCDC Driver for DM355");
+MODULE_AUTHOR("Texas Instruments");
+
+static struct ccdc_oper_config {
+ struct device *dev;
+ /* CCDC interface type */
+ enum vpfe_hw_if_type if_type;
+ /* Raw Bayer configuration */
+ struct ccdc_params_raw bayer;
+ /* YCbCr configuration */
+ struct ccdc_params_ycbcr ycbcr;
+ /* ccdc base address */
+ void __iomem *base_addr;
+} ccdc_cfg = {
+ /* Raw configurations */
+ .bayer = {
+ .pix_fmt = CCDC_PIXFMT_RAW,
+ .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
+ .win = CCDC_WIN_VGA,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .gain = {
+ .r_ye = 256,
+ .gb_g = 256,
+ .gr_cy = 256,
+ .b_mg = 256
+ },
+ .config_params = {
+ .datasft = 2,
+ .mfilt1 = CCDC_NO_MEDIAN_FILTER1,
+ .mfilt2 = CCDC_NO_MEDIAN_FILTER2,
+ .alaw = {
+ .gamma_wd = 2,
+ },
+ .blk_clamp = {
+ .sample_pixel = 1,
+ .dc_sub = 25
+ },
+ .col_pat_field0 = {
+ .olop = CCDC_GREEN_BLUE,
+ .olep = CCDC_BLUE,
+ .elop = CCDC_RED,
+ .elep = CCDC_GREEN_RED
+ },
+ .col_pat_field1 = {
+ .olop = CCDC_GREEN_BLUE,
+ .olep = CCDC_BLUE,
+ .elop = CCDC_RED,
+ .elep = CCDC_GREEN_RED
+ },
+ },
+ },
+ /* YCbCr configuration */
+ .ycbcr = {
+ .win = CCDC_WIN_PAL,
+ .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
+ .frm_fmt = CCDC_FRMFMT_INTERLACED,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .bt656_enable = 1,
+ .pix_order = CCDC_PIXORDER_CBYCRY,
+ .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
+ },
+};
+
+
+/* Raw Bayer formats */
+static u32 ccdc_raw_bayer_pix_formats[] =
+ {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
+
+/* Raw YUV formats */
+static u32 ccdc_raw_yuv_pix_formats[] =
+ {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+
+/* register access routines */
+static inline u32 regr(u32 offset)
+{
+ return __raw_readl(ccdc_cfg.base_addr + offset);
+}
+
+static inline void regw(u32 val, u32 offset)
+{
+ __raw_writel(val, ccdc_cfg.base_addr + offset);
+}
+
+static void ccdc_enable(int en)
+{
+ unsigned int temp;
+ temp = regr(SYNCEN);
+ temp &= (~CCDC_SYNCEN_VDHDEN_MASK);
+ temp |= (en & CCDC_SYNCEN_VDHDEN_MASK);
+ regw(temp, SYNCEN);
+}
+
+static void ccdc_enable_output_to_sdram(int en)
+{
+ unsigned int temp;
+ temp = regr(SYNCEN);
+ temp &= (~(CCDC_SYNCEN_WEN_MASK));
+ temp |= ((en << CCDC_SYNCEN_WEN_SHIFT) & CCDC_SYNCEN_WEN_MASK);
+ regw(temp, SYNCEN);
+}
+
+static void ccdc_config_gain_offset(void)
+{
+ /* configure gain */
+ regw(ccdc_cfg.bayer.gain.r_ye, RYEGAIN);
+ regw(ccdc_cfg.bayer.gain.gr_cy, GRCYGAIN);
+ regw(ccdc_cfg.bayer.gain.gb_g, GBGGAIN);
+ regw(ccdc_cfg.bayer.gain.b_mg, BMGGAIN);
+ /* configure offset */
+ regw(ccdc_cfg.bayer.ccdc_offset, OFFSET);
+}
+
+/*
+ * ccdc_restore_defaults()
+ * This function restore power on defaults in the ccdc registers
+ */
+static int ccdc_restore_defaults(void)
+{
+ int i;
+
+ dev_dbg(ccdc_cfg.dev, "\nstarting ccdc_restore_defaults...");
+ /* set all registers to zero */
+ for (i = 0; i <= CCDC_REG_LAST; i += 4)
+ regw(0, i);
+
+ /* now override the values with power on defaults in registers */
+ regw(MODESET_DEFAULT, MODESET);
+ /* no culling support */
+ regw(CULH_DEFAULT, CULH);
+ regw(CULV_DEFAULT, CULV);
+ /* Set default Gain and Offset */
+ ccdc_cfg.bayer.gain.r_ye = GAIN_DEFAULT;
+ ccdc_cfg.bayer.gain.gb_g = GAIN_DEFAULT;
+ ccdc_cfg.bayer.gain.gr_cy = GAIN_DEFAULT;
+ ccdc_cfg.bayer.gain.b_mg = GAIN_DEFAULT;
+ ccdc_config_gain_offset();
+ regw(OUTCLIP_DEFAULT, OUTCLIP);
+ regw(LSCCFG2_DEFAULT, LSCCFG2);
+ /* select ccdc input */
+ if (vpss_select_ccdc_source(VPSS_CCDCIN)) {
+ dev_dbg(ccdc_cfg.dev, "\ncouldn't select ccdc input source");
+ return -EFAULT;
+ }
+ /* select ccdc clock */
+ if (vpss_enable_clock(VPSS_CCDC_CLOCK, 1) < 0) {
+ dev_dbg(ccdc_cfg.dev, "\ncouldn't enable ccdc clock");
+ return -EFAULT;
+ }
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_restore_defaults...");
+ return 0;
+}
+
+static int ccdc_open(struct device *device)
+{
+ return ccdc_restore_defaults();
+}
+
+static int ccdc_close(struct device *device)
+{
+ /* disable clock */
+ vpss_enable_clock(VPSS_CCDC_CLOCK, 0);
+ /* do nothing for now */
+ return 0;
+}
+/*
+ * ccdc_setwin()
+ * This function will configure the window size to
+ * be capture in CCDC reg.
+ */
+static void ccdc_setwin(struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt, int ppc)
+{
+ int horz_start, horz_nr_pixels;
+ int vert_start, vert_nr_lines;
+ int mid_img = 0;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
+
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left << (ppc - 1);
+ horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
+
+ /* Writing the horizontal info into the registers */
+ regw(horz_start, SPH);
+ regw(horz_nr_pixels, NPH);
+ vert_start = image_win->top;
+
+ if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ /* configure VDINT0 and VDINT1 */
+ regw(vert_start, VDINT0);
+ } else {
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /* configure VDINT0 and VDINT1 */
+ mid_img = vert_start + (image_win->height / 2);
+ regw(vert_start, VDINT0);
+ regw(mid_img, VDINT1);
+ }
+ regw(vert_start & CCDC_START_VER_ONE_MASK, SLV0);
+ regw(vert_start & CCDC_START_VER_TWO_MASK, SLV1);
+ regw(vert_nr_lines & CCDC_NUM_LINES_VER, NLV);
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
+}
+
+static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
+{
+ if (ccdcparam->datasft < CCDC_DATA_NO_SHIFT ||
+ ccdcparam->datasft > CCDC_DATA_SHIFT_6BIT) {
+ dev_dbg(ccdc_cfg.dev, "Invalid value of data shift\n");
+ return -EINVAL;
+ }
+
+ if (ccdcparam->mfilt1 < CCDC_NO_MEDIAN_FILTER1 ||
+ ccdcparam->mfilt1 > CCDC_MEDIAN_FILTER1) {
+ dev_dbg(ccdc_cfg.dev, "Invalid value of median filter1\n");
+ return -EINVAL;
+ }
+
+ if (ccdcparam->mfilt2 < CCDC_NO_MEDIAN_FILTER2 ||
+ ccdcparam->mfilt2 > CCDC_MEDIAN_FILTER2) {
+ dev_dbg(ccdc_cfg.dev, "Invalid value of median filter2\n");
+ return -EINVAL;
+ }
+
+ if ((ccdcparam->med_filt_thres < 0) ||
+ (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) {
+ dev_dbg(ccdc_cfg.dev,
+ "Invalid value of median filter threshold\n");
+ return -EINVAL;
+ }
+
+ if (ccdcparam->data_sz < CCDC_DATA_16BITS ||
+ ccdcparam->data_sz > CCDC_DATA_8BITS) {
+ dev_dbg(ccdc_cfg.dev, "Invalid value of data size\n");
+ return -EINVAL;
+ }
+
+ if (ccdcparam->alaw.enable) {
+ if (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_13_4 ||
+ ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) {
+ dev_dbg(ccdc_cfg.dev, "Invalid value of ALAW\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ccdcparam->blk_clamp.b_clamp_enable) {
+ if (ccdcparam->blk_clamp.sample_pixel < CCDC_SAMPLE_1PIXELS ||
+ ccdcparam->blk_clamp.sample_pixel > CCDC_SAMPLE_16PIXELS) {
+ dev_dbg(ccdc_cfg.dev,
+ "Invalid value of sample pixel\n");
+ return -EINVAL;
+ }
+ if (ccdcparam->blk_clamp.sample_ln < CCDC_SAMPLE_1LINES ||
+ ccdcparam->blk_clamp.sample_ln > CCDC_SAMPLE_16LINES) {
+ dev_dbg(ccdc_cfg.dev,
+ "Invalid value of sample lines\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/* Parameter operations */
+static int ccdc_set_params(void __user *params)
+{
+ struct ccdc_config_params_raw ccdc_raw_params;
+ int x;
+
+ /* only raw module parameters can be set through the IOCTL */
+ if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
+ return -EINVAL;
+
+ x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
+ if (x) {
+ dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdc"
+ "params, %d\n", x);
+ return -EFAULT;
+ }
+
+ if (!validate_ccdc_param(&ccdc_raw_params)) {
+ memcpy(&ccdc_cfg.bayer.config_params,
+ &ccdc_raw_params,
+ sizeof(ccdc_raw_params));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/* This function will configure CCDC for YCbCr video capture */
+static void ccdc_config_ycbcr(void)
+{
+ struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
+ u32 temp;
+
+ /* first set the CCDC power on defaults values in all registers */
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
+ ccdc_restore_defaults();
+
+ /* configure pixel format & video frame format */
+ temp = (((params->pix_fmt & CCDC_INPUT_MODE_MASK) <<
+ CCDC_INPUT_MODE_SHIFT) |
+ ((params->frm_fmt & CCDC_FRM_FMT_MASK) <<
+ CCDC_FRM_FMT_SHIFT));
+
+ /* setup BT.656 sync mode */
+ if (params->bt656_enable) {
+ regw(CCDC_REC656IF_BT656_EN, REC656IF);
+ /*
+ * configure the FID, VD, HD pin polarity fld,hd pol positive,
+ * vd negative, 8-bit pack mode
+ */
+ temp |= CCDC_VD_POL_NEGATIVE;
+ } else { /* y/c external sync mode */
+ temp |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
+ CCDC_FID_POL_SHIFT) |
+ ((params->hd_pol & CCDC_HD_POL_MASK) <<
+ CCDC_HD_POL_SHIFT) |
+ ((params->vd_pol & CCDC_VD_POL_MASK) <<
+ CCDC_VD_POL_SHIFT));
+ }
+
+ /* pack the data to 8-bit */
+ temp |= CCDC_DATA_PACK_ENABLE;
+
+ regw(temp, MODESET);
+
+ /* configure video window */
+ ccdc_setwin(&params->win, params->frm_fmt, 2);
+
+ /* configure the order of y cb cr in SD-RAM */
+ temp = (params->pix_order << CCDC_Y8POS_SHIFT);
+ temp |= CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC;
+ regw(temp, CCDCFG);
+
+ /*
+ * configure the horizontal line offset. This is done by rounding up
+ * width to a multiple of 16 pixels and multiply by two to account for
+ * y:cb:cr 4:2:2 data
+ */
+ regw(((params->win.width * 2 + 31) >> 5), HSIZE);
+
+ /* configure the memory line offset */
+ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
+ /* two fields are interleaved in memory */
+ regw(CCDC_SDOFST_FIELD_INTERLEAVED, SDOFST);
+ }
+
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
+}
+
+/*
+ * ccdc_config_black_clamp()
+ * configure parameters for Optical Black Clamp
+ */
+static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
+{
+ u32 val;
+
+ if (!bclamp->b_clamp_enable) {
+ /* configure DCSub */
+ regw(bclamp->dc_sub & CCDC_BLK_DC_SUB_MASK, DCSUB);
+ regw(0x0000, CLAMP);
+ return;
+ }
+ /* Enable the Black clamping, set sample lines and pixels */
+ val = (bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) |
+ ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
+ CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE;
+ regw(val, CLAMP);
+
+ /* If Black clamping is enable then make dcsub 0 */
+ val = (bclamp->sample_ln & CCDC_NUM_LINE_CALC_MASK)
+ << CCDC_NUM_LINE_CALC_SHIFT;
+ regw(val, DCSUB);
+}
+
+/*
+ * ccdc_config_black_compense()
+ * configure parameters for Black Compensation
+ */
+static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
+{
+ u32 val;
+
+ val = (bcomp->b & CCDC_BLK_COMP_MASK) |
+ ((bcomp->gb & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_GB_COMP_SHIFT);
+ regw(val, BLKCMP1);
+
+ val = ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_GR_COMP_SHIFT) |
+ ((bcomp->r & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_R_COMP_SHIFT);
+ regw(val, BLKCMP0);
+}
+
+/*
+ * ccdc_write_dfc_entry()
+ * write an entry in the dfc table.
+ */
+int ccdc_write_dfc_entry(int index, struct ccdc_vertical_dft *dfc)
+{
+/* TODO This is to be re-visited and adjusted */
+#define DFC_WRITE_WAIT_COUNT 1000
+ u32 val, count = DFC_WRITE_WAIT_COUNT;
+
+ regw(dfc->dft_corr_vert[index], DFCMEM0);
+ regw(dfc->dft_corr_horz[index], DFCMEM1);
+ regw(dfc->dft_corr_sub1[index], DFCMEM2);
+ regw(dfc->dft_corr_sub2[index], DFCMEM3);
+ regw(dfc->dft_corr_sub3[index], DFCMEM4);
+ /* set WR bit to write */
+ val = regr(DFCMEMCTL) | CCDC_DFCMEMCTL_DFCMWR_MASK;
+ regw(val, DFCMEMCTL);
+
+ /*
+ * Assume, it is very short. If we get an error, we need to
+ * adjust this value
+ */
+ while (regr(DFCMEMCTL) & CCDC_DFCMEMCTL_DFCMWR_MASK)
+ count--;
+ /*
+ * TODO We expect the count to be non-zero to be successful. Adjust
+ * the count if write requires more time
+ */
+
+ if (count) {
+ dev_err(ccdc_cfg.dev, "defect table write timeout !!!\n");
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * ccdc_config_vdfc()
+ * configure parameters for Vertical Defect Correction
+ */
+static int ccdc_config_vdfc(struct ccdc_vertical_dft *dfc)
+{
+ u32 val;
+ int i;
+
+ /* Configure General Defect Correction. The table used is from IPIPE */
+ val = dfc->gen_dft_en & CCDC_DFCCTL_GDFCEN_MASK;
+
+ /* Configure Vertical Defect Correction if needed */
+ if (!dfc->ver_dft_en) {
+ /* Enable only General Defect Correction */
+ regw(val, DFCCTL);
+ return 0;
+ }
+
+ if (dfc->table_size > CCDC_DFT_TABLE_SIZE)
+ return -EINVAL;
+
+ val |= CCDC_DFCCTL_VDFC_DISABLE;
+ val |= (dfc->dft_corr_ctl.vdfcsl & CCDC_DFCCTL_VDFCSL_MASK) <<
+ CCDC_DFCCTL_VDFCSL_SHIFT;
+ val |= (dfc->dft_corr_ctl.vdfcuda & CCDC_DFCCTL_VDFCUDA_MASK) <<
+ CCDC_DFCCTL_VDFCUDA_SHIFT;
+ val |= (dfc->dft_corr_ctl.vdflsft & CCDC_DFCCTL_VDFLSFT_MASK) <<
+ CCDC_DFCCTL_VDFLSFT_SHIFT;
+ regw(val , DFCCTL);
+
+ /* clear address ptr to offset 0 */
+ val = CCDC_DFCMEMCTL_DFCMARST_MASK << CCDC_DFCMEMCTL_DFCMARST_SHIFT;
+
+ /* write defect table entries */
+ for (i = 0; i < dfc->table_size; i++) {
+ /* increment address for non zero index */
+ if (i != 0)
+ val = CCDC_DFCMEMCTL_INC_ADDR;
+ regw(val, DFCMEMCTL);
+ if (ccdc_write_dfc_entry(i, dfc) < 0)
+ return -EFAULT;
+ }
+
+ /* update saturation level and enable dfc */
+ regw(dfc->saturation_ctl & CCDC_VDC_DFCVSAT_MASK, DFCVSAT);
+ val = regr(DFCCTL) | (CCDC_DFCCTL_VDFCEN_MASK <<
+ CCDC_DFCCTL_VDFCEN_SHIFT);
+ regw(val, DFCCTL);
+ return 0;
+}
+
+/*
+ * ccdc_config_csc()
+ * configure parameters for color space conversion
+ * Each register CSCM0-7 has two values in S8Q5 format.
+ */
+static void ccdc_config_csc(struct ccdc_csc *csc)
+{
+ u32 val1 = 0, val2;
+ int i;
+
+ if (!csc->enable)
+ return;
+
+ /* Enable the CSC sub-module */
+ regw(CCDC_CSC_ENABLE, CSCCTL);
+
+ /* Converting the co-eff as per the format of the register */
+ for (i = 0; i < CCDC_CSC_COEFF_TABLE_SIZE; i++) {
+ if ((i % 2) == 0) {
+ /* CSCM - LSB */
+ val1 = (csc->coeff[i].integer &
+ CCDC_CSC_COEF_INTEG_MASK)
+ << CCDC_CSC_COEF_INTEG_SHIFT;
+ /*
+ * convert decimal part to binary. Use 2 decimal
+ * precision, user values range from .00 - 0.99
+ */
+ val1 |= (((csc->coeff[i].decimal &
+ CCDC_CSC_COEF_DECIMAL_MASK) *
+ CCDC_CSC_DEC_MAX) / 100);
+ } else {
+
+ /* CSCM - MSB */
+ val2 = (csc->coeff[i].integer &
+ CCDC_CSC_COEF_INTEG_MASK)
+ << CCDC_CSC_COEF_INTEG_SHIFT;
+ val2 |= (((csc->coeff[i].decimal &
+ CCDC_CSC_COEF_DECIMAL_MASK) *
+ CCDC_CSC_DEC_MAX) / 100);
+ val2 <<= CCDC_CSCM_MSB_SHIFT;
+ val2 |= val1;
+ regw(val2, (CSCM0 + ((i - 1) << 1)));
+ }
+ }
+}
+
+/*
+ * ccdc_config_color_patterns()
+ * configure parameters for color patterns
+ */
+static void ccdc_config_color_patterns(struct ccdc_col_pat *pat0,
+ struct ccdc_col_pat *pat1)
+{
+ u32 val;
+
+ val = (pat0->olop | (pat0->olep << 2) | (pat0->elop << 4) |
+ (pat0->elep << 6) | (pat1->olop << 8) | (pat1->olep << 10) |
+ (pat1->elop << 12) | (pat1->elep << 14));
+ regw(val, COLPTN);
+}
+
+/* This function will configure CCDC for Raw mode image capture */
+static int ccdc_config_raw(void)
+{
+ struct ccdc_params_raw *params = &ccdc_cfg.bayer;
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int val;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
+
+ /* restore power on defaults to register */
+ ccdc_restore_defaults();
+
+ /* CCDCFG register:
+ * set CCD Not to swap input since input is RAW data
+ * set FID detection function to Latch at V-Sync
+ * set WENLOG - ccdc valid area to AND
+ * set TRGSEL to WENBIT
+ * set EXTRG to DISABLE
+ * disable latching function on VSYNC - shadowed registers
+ */
+ regw(CCDC_YCINSWP_RAW | CCDC_CCDCFG_FIDMD_LATCH_VSYNC |
+ CCDC_CCDCFG_WENLOG_AND | CCDC_CCDCFG_TRGSEL_WEN |
+ CCDC_CCDCFG_EXTRG_DISABLE | CCDC_LATCH_ON_VSYNC_DISABLE, CCDCFG);
+
+ /*
+ * Set VDHD direction to input, input type to raw input
+ * normal data polarity, do not use external WEN
+ */
+ val = (CCDC_VDHDOUT_INPUT | CCDC_RAW_IP_MODE | CCDC_DATAPOL_NORMAL |
+ CCDC_EXWEN_DISABLE);
+
+ /*
+ * Configure the vertical sync polarity (MODESET.VDPOL), horizontal
+ * sync polarity (MODESET.HDPOL), field id polarity (MODESET.FLDPOL),
+ * frame format(progressive or interlace), & pixel format (Input mode)
+ */
+ val |= (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
+ ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
+ ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
+ ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
+ ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT));
+
+ /* set pack for alaw compression */
+ if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+ config_params->alaw.enable)
+ val |= CCDC_DATA_PACK_ENABLE;
+
+ /* Configure for LPF */
+ if (config_params->lpf_enable)
+ val |= (config_params->lpf_enable & CCDC_LPF_MASK) <<
+ CCDC_LPF_SHIFT;
+
+ /* Configure the data shift */
+ val |= (config_params->datasft & CCDC_DATASFT_MASK) <<
+ CCDC_DATASFT_SHIFT;
+ regw(val , MODESET);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to MODESET...\n", val);
+
+ /* Configure the Median Filter threshold */
+ regw((config_params->med_filt_thres) & CCDC_MED_FILT_THRESH, MEDFILT);
+
+ /* Configure GAMMAWD register. defaur 11-2, and Mosaic cfa pattern */
+ val = CCDC_GAMMA_BITS_11_2 << CCDC_GAMMAWD_INPUT_SHIFT |
+ CCDC_CFA_MOSAIC;
+
+ /* Enable and configure aLaw register if needed */
+ if (config_params->alaw.enable) {
+ val |= (CCDC_ALAW_ENABLE |
+ ((config_params->alaw.gamma_wd &
+ CCDC_ALAW_GAMMA_WD_MASK) <<
+ CCDC_GAMMAWD_INPUT_SHIFT));
+ }
+
+ /* Configure Median filter1 & filter2 */
+ val |= ((config_params->mfilt1 << CCDC_MFILT1_SHIFT) |
+ (config_params->mfilt2 << CCDC_MFILT2_SHIFT));
+
+ regw(val, GAMMAWD);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to GAMMAWD...\n", val);
+
+ /* configure video window */
+ ccdc_setwin(&params->win, params->frm_fmt, 1);
+
+ /* Optical Clamp Averaging */
+ ccdc_config_black_clamp(&config_params->blk_clamp);
+
+ /* Black level compensation */
+ ccdc_config_black_compense(&config_params->blk_comp);
+
+ /* Vertical Defect Correction if needed */
+ if (ccdc_config_vdfc(&config_params->vertical_dft) < 0)
+ return -EFAULT;
+
+ /* color space conversion */
+ ccdc_config_csc(&config_params->csc);
+
+ /* color pattern */
+ ccdc_config_color_patterns(&config_params->col_pat_field0,
+ &config_params->col_pat_field1);
+
+ /* Configure the Gain & offset control */
+ ccdc_config_gain_offset();
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to COLPTN...\n", val);
+
+ /* Configure DATAOFST register */
+ val = (config_params->data_offset.horz_offset & CCDC_DATAOFST_MASK) <<
+ CCDC_DATAOFST_H_SHIFT;
+ val |= (config_params->data_offset.vert_offset & CCDC_DATAOFST_MASK) <<
+ CCDC_DATAOFST_V_SHIFT;
+ regw(val, DATAOFST);
+
+ /* configuring HSIZE register */
+ val = (params->horz_flip_enable & CCDC_HSIZE_FLIP_MASK) <<
+ CCDC_HSIZE_FLIP_SHIFT;
+
+ /* If pack 8 is enable then 1 pixel will take 1 byte */
+ if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+ config_params->alaw.enable) {
+ val |= (((params->win.width) + 31) >> 5) &
+ CCDC_HSIZE_VAL_MASK;
+
+ /* adjust to multiple of 32 */
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
+ (((params->win.width) + 31) >> 5) &
+ CCDC_HSIZE_VAL_MASK);
+ } else {
+ /* else one pixel will take 2 byte */
+ val |= (((params->win.width * 2) + 31) >> 5) &
+ CCDC_HSIZE_VAL_MASK;
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
+ (((params->win.width * 2) + 31) >> 5) &
+ CCDC_HSIZE_VAL_MASK);
+ }
+ regw(val, HSIZE);
+
+ /* Configure SDOFST register */
+ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (params->image_invert_enable) {
+ /* For interlace inverse mode */
+ regw(CCDC_SDOFST_INTERLACE_INVERSE, SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+ CCDC_SDOFST_INTERLACE_INVERSE);
+ } else {
+ /* For interlace non inverse mode */
+ regw(CCDC_SDOFST_INTERLACE_NORMAL, SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+ CCDC_SDOFST_INTERLACE_NORMAL);
+ }
+ } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ if (params->image_invert_enable) {
+ /* For progessive inverse mode */
+ regw(CCDC_SDOFST_PROGRESSIVE_INVERSE, SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+ CCDC_SDOFST_PROGRESSIVE_INVERSE);
+ } else {
+ /* For progessive non inverse mode */
+ regw(CCDC_SDOFST_PROGRESSIVE_NORMAL, SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+ CCDC_SDOFST_PROGRESSIVE_NORMAL);
+ }
+ }
+ dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
+ return 0;
+}
+
+static int ccdc_configure(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_config_raw();
+ else
+ ccdc_config_ycbcr();
+ return 0;
+}
+
+static int ccdc_set_buftype(enum ccdc_buftype buf_type)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.buf_type = buf_type;
+ else
+ ccdc_cfg.ycbcr.buf_type = buf_type;
+ return 0;
+}
+static enum ccdc_buftype ccdc_get_buftype(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_cfg.bayer.buf_type;
+ return ccdc_cfg.ycbcr.buf_type;
+}
+
+static int ccdc_enum_pix(u32 *pix, int i)
+{
+ int ret = -EINVAL;
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
+ *pix = ccdc_raw_bayer_pix_formats[i];
+ ret = 0;
+ }
+ } else {
+ if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
+ *pix = ccdc_raw_yuv_pix_formats[i];
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+static int ccdc_set_pixel_format(u32 pixfmt)
+{
+ struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ if (pixfmt == V4L2_PIX_FMT_SBGGR8)
+ alaw->enable = 1;
+ else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
+ return -EINVAL;
+ } else {
+ if (pixfmt == V4L2_PIX_FMT_YUYV)
+ ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+ else if (pixfmt == V4L2_PIX_FMT_UYVY)
+ ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+static u32 ccdc_get_pixel_format(void)
+{
+ struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
+ u32 pixfmt;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ if (alaw->enable)
+ pixfmt = V4L2_PIX_FMT_SBGGR8;
+ else
+ pixfmt = V4L2_PIX_FMT_SBGGR16;
+ else {
+ if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ else
+ pixfmt = V4L2_PIX_FMT_UYVY;
+ }
+ return pixfmt;
+}
+static int ccdc_set_image_window(struct v4l2_rect *win)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.win = *win;
+ else
+ ccdc_cfg.ycbcr.win = *win;
+ return 0;
+}
+
+static void ccdc_get_image_window(struct v4l2_rect *win)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ *win = ccdc_cfg.bayer.win;
+ else
+ *win = ccdc_cfg.ycbcr.win;
+}
+
+static unsigned int ccdc_get_line_length(void)
+{
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int len;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ if ((config_params->alaw.enable) ||
+ (config_params->data_sz == CCDC_DATA_8BITS))
+ len = ccdc_cfg.bayer.win.width;
+ else
+ len = ccdc_cfg.bayer.win.width * 2;
+ } else
+ len = ccdc_cfg.ycbcr.win.width * 2;
+ return ALIGN(len, 32);
+}
+
+static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
+ return 0;
+}
+
+static enum ccdc_frmfmt ccdc_get_frame_format(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_cfg.bayer.frm_fmt;
+ else
+ return ccdc_cfg.ycbcr.frm_fmt;
+}
+
+static int ccdc_getfid(void)
+{
+ return (regr(MODESET) >> 15) & 1;
+}
+
+/* misc operations */
+static inline void ccdc_setfbaddr(unsigned long addr)
+{
+ regw((addr >> 21) & 0x007f, STADRH);
+ regw((addr >> 5) & 0x0ffff, STADRL);
+}
+
+static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
+{
+ ccdc_cfg.if_type = params->if_type;
+
+ switch (params->if_type) {
+ case VPFE_BT656:
+ case VPFE_YCBCR_SYNC_16:
+ case VPFE_YCBCR_SYNC_8:
+ ccdc_cfg.ycbcr.vd_pol = params->vdpol;
+ ccdc_cfg.ycbcr.hd_pol = params->hdpol;
+ break;
+ default:
+ /* TODO add support for raw bayer here */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct ccdc_hw_device ccdc_hw_dev = {
+ .name = "DM355 CCDC",
+ .owner = THIS_MODULE,
+ .hw_ops = {
+ .open = ccdc_open,
+ .close = ccdc_close,
+ .enable = ccdc_enable,
+ .enable_out_to_sdram = ccdc_enable_output_to_sdram,
+ .set_hw_if_params = ccdc_set_hw_if_params,
+ .set_params = ccdc_set_params,
+ .configure = ccdc_configure,
+ .set_buftype = ccdc_set_buftype,
+ .get_buftype = ccdc_get_buftype,
+ .enum_pix = ccdc_enum_pix,
+ .set_pixel_format = ccdc_set_pixel_format,
+ .get_pixel_format = ccdc_get_pixel_format,
+ .set_frame_format = ccdc_set_frame_format,
+ .get_frame_format = ccdc_get_frame_format,
+ .set_image_window = ccdc_set_image_window,
+ .get_image_window = ccdc_get_image_window,
+ .get_line_length = ccdc_get_line_length,
+ .setfbaddr = ccdc_setfbaddr,
+ .getfid = ccdc_getfid,
+ },
+};
+
+static int dm355_ccdc_probe(struct platform_device *pdev)
+{
+ void (*setup_pinmux)(void);
+ struct resource *res;
+ int status = 0;
+
+ /*
+ * first try to register with vpfe. If not correct platform, then we
+ * don't have to iomap
+ */
+ status = vpfe_register_ccdc_device(&ccdc_hw_dev);
+ if (status < 0)
+ return status;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ status = -ENODEV;
+ goto fail_nores;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), res->name);
+ if (!res) {
+ status = -EBUSY;
+ goto fail_nores;
+ }
+
+ ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ if (!ccdc_cfg.base_addr) {
+ status = -ENOMEM;
+ goto fail_nomem;
+ }
+
+ /* Platform data holds setup_pinmux function ptr */
+ if (NULL == pdev->dev.platform_data) {
+ status = -ENODEV;
+ goto fail_nomap;
+ }
+ setup_pinmux = pdev->dev.platform_data;
+ /*
+ * setup Mux configuration for ccdc which may be different for
+ * different SoCs using this CCDC
+ */
+ setup_pinmux();
+ ccdc_cfg.dev = &pdev->dev;
+ printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
+ return 0;
+fail_nomap:
+ iounmap(ccdc_cfg.base_addr);
+fail_nomem:
+ release_mem_region(res->start, resource_size(res));
+fail_nores:
+ vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+ return status;
+}
+
+static int dm355_ccdc_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ iounmap(ccdc_cfg.base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+ return 0;
+}
+
+static struct platform_driver dm355_ccdc_driver = {
+ .driver = {
+ .name = "dm355_ccdc",
+ .owner = THIS_MODULE,
+ },
+ .remove = dm355_ccdc_remove,
+ .probe = dm355_ccdc_probe,
+};
+
+module_platform_driver(dm355_ccdc_driver);
diff --git a/drivers/media/platform/davinci/dm355_ccdc_regs.h b/drivers/media/platform/davinci/dm355_ccdc_regs.h
new file mode 100644
index 00000000000..2e1946e0b99
--- /dev/null
+++ b/drivers/media/platform/davinci/dm355_ccdc_regs.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2005-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _DM355_CCDC_REGS_H
+#define _DM355_CCDC_REGS_H
+
+/**************************************************************************\
+* Register OFFSET Definitions
+\**************************************************************************/
+#define SYNCEN 0x00
+#define MODESET 0x04
+#define HDWIDTH 0x08
+#define VDWIDTH 0x0c
+#define PPLN 0x10
+#define LPFR 0x14
+#define SPH 0x18
+#define NPH 0x1c
+#define SLV0 0x20
+#define SLV1 0x24
+#define NLV 0x28
+#define CULH 0x2c
+#define CULV 0x30
+#define HSIZE 0x34
+#define SDOFST 0x38
+#define STADRH 0x3c
+#define STADRL 0x40
+#define CLAMP 0x44
+#define DCSUB 0x48
+#define COLPTN 0x4c
+#define BLKCMP0 0x50
+#define BLKCMP1 0x54
+#define MEDFILT 0x58
+#define RYEGAIN 0x5c
+#define GRCYGAIN 0x60
+#define GBGGAIN 0x64
+#define BMGGAIN 0x68
+#define OFFSET 0x6c
+#define OUTCLIP 0x70
+#define VDINT0 0x74
+#define VDINT1 0x78
+#define RSV0 0x7c
+#define GAMMAWD 0x80
+#define REC656IF 0x84
+#define CCDCFG 0x88
+#define FMTCFG 0x8c
+#define FMTPLEN 0x90
+#define FMTSPH 0x94
+#define FMTLNH 0x98
+#define FMTSLV 0x9c
+#define FMTLNV 0xa0
+#define FMTRLEN 0xa4
+#define FMTHCNT 0xa8
+#define FMT_ADDR_PTR_B 0xac
+#define FMT_ADDR_PTR(i) (FMT_ADDR_PTR_B + (i * 4))
+#define FMTPGM_VF0 0xcc
+#define FMTPGM_VF1 0xd0
+#define FMTPGM_AP0 0xd4
+#define FMTPGM_AP1 0xd8
+#define FMTPGM_AP2 0xdc
+#define FMTPGM_AP3 0xe0
+#define FMTPGM_AP4 0xe4
+#define FMTPGM_AP5 0xe8
+#define FMTPGM_AP6 0xec
+#define FMTPGM_AP7 0xf0
+#define LSCCFG1 0xf4
+#define LSCCFG2 0xf8
+#define LSCH0 0xfc
+#define LSCV0 0x100
+#define LSCKH 0x104
+#define LSCKV 0x108
+#define LSCMEMCTL 0x10c
+#define LSCMEMD 0x110
+#define LSCMEMQ 0x114
+#define DFCCTL 0x118
+#define DFCVSAT 0x11c
+#define DFCMEMCTL 0x120
+#define DFCMEM0 0x124
+#define DFCMEM1 0x128
+#define DFCMEM2 0x12c
+#define DFCMEM3 0x130
+#define DFCMEM4 0x134
+#define CSCCTL 0x138
+#define CSCM0 0x13c
+#define CSCM1 0x140
+#define CSCM2 0x144
+#define CSCM3 0x148
+#define CSCM4 0x14c
+#define CSCM5 0x150
+#define CSCM6 0x154
+#define CSCM7 0x158
+#define DATAOFST 0x15c
+#define CCDC_REG_LAST DATAOFST
+/**************************************************************
+* Define for various register bit mask and shifts for CCDC
+*
+**************************************************************/
+#define CCDC_RAW_IP_MODE 0
+#define CCDC_VDHDOUT_INPUT 0
+#define CCDC_YCINSWP_RAW (0 << 4)
+#define CCDC_EXWEN_DISABLE 0
+#define CCDC_DATAPOL_NORMAL 0
+#define CCDC_CCDCFG_FIDMD_LATCH_VSYNC 0
+#define CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC (1 << 6)
+#define CCDC_CCDCFG_WENLOG_AND 0
+#define CCDC_CCDCFG_TRGSEL_WEN 0
+#define CCDC_CCDCFG_EXTRG_DISABLE 0
+#define CCDC_CFA_MOSAIC 0
+#define CCDC_Y8POS_SHIFT 11
+
+#define CCDC_VDC_DFCVSAT_MASK 0x3fff
+#define CCDC_DATAOFST_MASK 0x0ff
+#define CCDC_DATAOFST_H_SHIFT 0
+#define CCDC_DATAOFST_V_SHIFT 8
+#define CCDC_GAMMAWD_CFA_MASK 1
+#define CCDC_GAMMAWD_CFA_SHIFT 5
+#define CCDC_GAMMAWD_INPUT_SHIFT 2
+#define CCDC_FID_POL_MASK 1
+#define CCDC_FID_POL_SHIFT 4
+#define CCDC_HD_POL_MASK 1
+#define CCDC_HD_POL_SHIFT 3
+#define CCDC_VD_POL_MASK 1
+#define CCDC_VD_POL_SHIFT 2
+#define CCDC_VD_POL_NEGATIVE (1 << 2)
+#define CCDC_FRM_FMT_MASK 1
+#define CCDC_FRM_FMT_SHIFT 7
+#define CCDC_DATA_SZ_MASK 7
+#define CCDC_DATA_SZ_SHIFT 8
+#define CCDC_VDHDOUT_MASK 1
+#define CCDC_VDHDOUT_SHIFT 0
+#define CCDC_EXWEN_MASK 1
+#define CCDC_EXWEN_SHIFT 5
+#define CCDC_INPUT_MODE_MASK 3
+#define CCDC_INPUT_MODE_SHIFT 12
+#define CCDC_PIX_FMT_MASK 3
+#define CCDC_PIX_FMT_SHIFT 12
+#define CCDC_DATAPOL_MASK 1
+#define CCDC_DATAPOL_SHIFT 6
+#define CCDC_WEN_ENABLE (1 << 1)
+#define CCDC_VDHDEN_ENABLE (1 << 16)
+#define CCDC_LPF_ENABLE (1 << 14)
+#define CCDC_ALAW_ENABLE 1
+#define CCDC_ALAW_GAMMA_WD_MASK 7
+#define CCDC_REC656IF_BT656_EN 3
+
+#define CCDC_FMTCFG_FMTMODE_MASK 3
+#define CCDC_FMTCFG_FMTMODE_SHIFT 1
+#define CCDC_FMTCFG_LNUM_MASK 3
+#define CCDC_FMTCFG_LNUM_SHIFT 4
+#define CCDC_FMTCFG_ADDRINC_MASK 7
+#define CCDC_FMTCFG_ADDRINC_SHIFT 8
+
+#define CCDC_CCDCFG_FIDMD_SHIFT 6
+#define CCDC_CCDCFG_WENLOG_SHIFT 8
+#define CCDC_CCDCFG_TRGSEL_SHIFT 9
+#define CCDC_CCDCFG_EXTRG_SHIFT 10
+#define CCDC_CCDCFG_MSBINVI_SHIFT 13
+
+#define CCDC_HSIZE_FLIP_SHIFT 12
+#define CCDC_HSIZE_FLIP_MASK 1
+#define CCDC_HSIZE_VAL_MASK 0xFFF
+#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
+#define CCDC_SDOFST_INTERLACE_INVERSE 0x4B6D
+#define CCDC_SDOFST_INTERLACE_NORMAL 0x0B6D
+#define CCDC_SDOFST_PROGRESSIVE_INVERSE 0x4000
+#define CCDC_SDOFST_PROGRESSIVE_NORMAL 0
+#define CCDC_START_PX_HOR_MASK 0x7FFF
+#define CCDC_NUM_PX_HOR_MASK 0x7FFF
+#define CCDC_START_VER_ONE_MASK 0x7FFF
+#define CCDC_START_VER_TWO_MASK 0x7FFF
+#define CCDC_NUM_LINES_VER 0x7FFF
+
+#define CCDC_BLK_CLAMP_ENABLE (1 << 15)
+#define CCDC_BLK_SGAIN_MASK 0x1F
+#define CCDC_BLK_ST_PXL_MASK 0x1FFF
+#define CCDC_BLK_SAMPLE_LN_MASK 3
+#define CCDC_BLK_SAMPLE_LN_SHIFT 13
+
+#define CCDC_NUM_LINE_CALC_MASK 3
+#define CCDC_NUM_LINE_CALC_SHIFT 14
+
+#define CCDC_BLK_DC_SUB_MASK 0x3FFF
+#define CCDC_BLK_COMP_MASK 0xFF
+#define CCDC_BLK_COMP_GB_COMP_SHIFT 8
+#define CCDC_BLK_COMP_GR_COMP_SHIFT 0
+#define CCDC_BLK_COMP_R_COMP_SHIFT 8
+#define CCDC_LATCH_ON_VSYNC_DISABLE (1 << 15)
+#define CCDC_LATCH_ON_VSYNC_ENABLE (0 << 15)
+#define CCDC_FPC_ENABLE (1 << 15)
+#define CCDC_FPC_FPC_NUM_MASK 0x7FFF
+#define CCDC_DATA_PACK_ENABLE (1 << 11)
+#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16
+#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_SHIFT 16
+#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF
+#define CCDC_VP_OUT_VERT_NUM_SHIFT 17
+#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF
+#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4
+#define CCDC_VP_OUT_HORZ_ST_MASK 0xF
+
+#define CCDC_CSC_COEF_INTEG_MASK 7
+#define CCDC_CSC_COEF_DECIMAL_MASK 0x1f
+#define CCDC_CSC_COEF_INTEG_SHIFT 5
+#define CCDC_CSCM_MSB_SHIFT 8
+#define CCDC_CSC_ENABLE 1
+#define CCDC_CSC_DEC_MAX 32
+
+#define CCDC_MFILT1_SHIFT 10
+#define CCDC_MFILT2_SHIFT 8
+#define CCDC_MED_FILT_THRESH 0x3FFF
+#define CCDC_LPF_MASK 1
+#define CCDC_LPF_SHIFT 14
+#define CCDC_OFFSET_MASK 0x3FF
+#define CCDC_DATASFT_MASK 7
+#define CCDC_DATASFT_SHIFT 8
+
+#define CCDC_DF_ENABLE 1
+
+#define CCDC_FMTPLEN_P0_MASK 0xF
+#define CCDC_FMTPLEN_P1_MASK 0xF
+#define CCDC_FMTPLEN_P2_MASK 7
+#define CCDC_FMTPLEN_P3_MASK 7
+#define CCDC_FMTPLEN_P0_SHIFT 0
+#define CCDC_FMTPLEN_P1_SHIFT 4
+#define CCDC_FMTPLEN_P2_SHIFT 8
+#define CCDC_FMTPLEN_P3_SHIFT 12
+
+#define CCDC_FMTSPH_MASK 0x1FFF
+#define CCDC_FMTLNH_MASK 0x1FFF
+#define CCDC_FMTSLV_MASK 0x1FFF
+#define CCDC_FMTLNV_MASK 0x7FFF
+#define CCDC_FMTRLEN_MASK 0x1FFF
+#define CCDC_FMTHCNT_MASK 0x1FFF
+
+#define CCDC_ADP_INIT_MASK 0x1FFF
+#define CCDC_ADP_LINE_SHIFT 13
+#define CCDC_ADP_LINE_MASK 3
+#define CCDC_FMTPGN_APTR_MASK 7
+
+#define CCDC_DFCCTL_GDFCEN_MASK 1
+#define CCDC_DFCCTL_VDFCEN_MASK 1
+#define CCDC_DFCCTL_VDFC_DISABLE (0 << 4)
+#define CCDC_DFCCTL_VDFCEN_SHIFT 4
+#define CCDC_DFCCTL_VDFCSL_MASK 3
+#define CCDC_DFCCTL_VDFCSL_SHIFT 5
+#define CCDC_DFCCTL_VDFCUDA_MASK 1
+#define CCDC_DFCCTL_VDFCUDA_SHIFT 7
+#define CCDC_DFCCTL_VDFLSFT_MASK 3
+#define CCDC_DFCCTL_VDFLSFT_SHIFT 8
+#define CCDC_DFCMEMCTL_DFCMARST_MASK 1
+#define CCDC_DFCMEMCTL_DFCMARST_SHIFT 2
+#define CCDC_DFCMEMCTL_DFCMWR_MASK 1
+#define CCDC_DFCMEMCTL_DFCMWR_SHIFT 0
+#define CCDC_DFCMEMCTL_INC_ADDR (0 << 2)
+
+#define CCDC_LSCCFG_GFTSF_MASK 7
+#define CCDC_LSCCFG_GFTSF_SHIFT 1
+#define CCDC_LSCCFG_GFTINV_MASK 0xf
+#define CCDC_LSCCFG_GFTINV_SHIFT 4
+#define CCDC_LSC_GFTABLE_SEL_MASK 3
+#define CCDC_LSC_GFTABLE_EPEL_SHIFT 8
+#define CCDC_LSC_GFTABLE_OPEL_SHIFT 10
+#define CCDC_LSC_GFTABLE_EPOL_SHIFT 12
+#define CCDC_LSC_GFTABLE_OPOL_SHIFT 14
+#define CCDC_LSC_GFMODE_MASK 3
+#define CCDC_LSC_GFMODE_SHIFT 4
+#define CCDC_LSC_DISABLE 0
+#define CCDC_LSC_ENABLE 1
+#define CCDC_LSC_TABLE1_SLC 0
+#define CCDC_LSC_TABLE2_SLC 1
+#define CCDC_LSC_TABLE3_SLC 2
+#define CCDC_LSC_MEMADDR_RESET (1 << 2)
+#define CCDC_LSC_MEMADDR_INCR (0 << 2)
+#define CCDC_LSC_FRAC_MASK_T1 0xFF
+#define CCDC_LSC_INT_MASK 3
+#define CCDC_LSC_FRAC_MASK 0x3FFF
+#define CCDC_LSC_CENTRE_MASK 0x3FFF
+#define CCDC_LSC_COEF_MASK 0xff
+#define CCDC_LSC_COEFL_SHIFT 0
+#define CCDC_LSC_COEFU_SHIFT 8
+#define CCDC_GAIN_MASK 0x7FF
+#define CCDC_SYNCEN_VDHDEN_MASK (1 << 0)
+#define CCDC_SYNCEN_WEN_MASK (1 << 1)
+#define CCDC_SYNCEN_WEN_SHIFT 1
+
+/* Power on Defaults in hardware */
+#define MODESET_DEFAULT 0x200
+#define CULH_DEFAULT 0xFFFF
+#define CULV_DEFAULT 0xFF
+#define GAIN_DEFAULT 256
+#define OUTCLIP_DEFAULT 0x3FFF
+#define LSCCFG2_DEFAULT 0xE
+
+#endif
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
new file mode 100644
index 00000000000..30fa08405d6
--- /dev/null
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -0,0 +1,1044 @@
+/*
+ * Copyright (C) 2006-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * CCDC hardware module for DM6446
+ * ------------------------------
+ *
+ * This module is for configuring CCD controller of DM6446 VPFE to capture
+ * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
+ * such as Defect Pixel Correction, Color Space Conversion etc to
+ * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This
+ * module also allows application to configure individual
+ * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL.
+ * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header
+ * files. The setparams() API is called by vpfe_capture driver
+ * to configure module parameters. This file is named DM644x so that other
+ * variants such DM6443 may be supported using the same module.
+ *
+ * TODO: Test Raw bayer parameter settings and bayer capture
+ * Split module parameter structure to module specific ioctl structs
+ * investigate if enum used for user space type definition
+ * to be replaced by #defines or integer
+ */
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <media/davinci/dm644x_ccdc.h>
+#include <media/davinci/vpss.h>
+
+#include "dm644x_ccdc_regs.h"
+#include "ccdc_hw_device.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CCDC Driver for DM6446");
+MODULE_AUTHOR("Texas Instruments");
+
+static struct ccdc_oper_config {
+ struct device *dev;
+ /* CCDC interface type */
+ enum vpfe_hw_if_type if_type;
+ /* Raw Bayer configuration */
+ struct ccdc_params_raw bayer;
+ /* YCbCr configuration */
+ struct ccdc_params_ycbcr ycbcr;
+ /* ccdc base address */
+ void __iomem *base_addr;
+} ccdc_cfg = {
+ /* Raw configurations */
+ .bayer = {
+ .pix_fmt = CCDC_PIXFMT_RAW,
+ .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
+ .win = CCDC_WIN_VGA,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .config_params = {
+ .data_sz = CCDC_DATA_10BITS,
+ },
+ },
+ .ycbcr = {
+ .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
+ .frm_fmt = CCDC_FRMFMT_INTERLACED,
+ .win = CCDC_WIN_PAL,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .bt656_enable = 1,
+ .pix_order = CCDC_PIXORDER_CBYCRY,
+ .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
+ },
+};
+
+#define CCDC_MAX_RAW_YUV_FORMATS 2
+
+/* Raw Bayer formats */
+static u32 ccdc_raw_bayer_pix_formats[] =
+ {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
+
+/* Raw YUV formats */
+static u32 ccdc_raw_yuv_pix_formats[] =
+ {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+
+/* CCDC Save/Restore context */
+static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)];
+
+/* register access routines */
+static inline u32 regr(u32 offset)
+{
+ return __raw_readl(ccdc_cfg.base_addr + offset);
+}
+
+static inline void regw(u32 val, u32 offset)
+{
+ __raw_writel(val, ccdc_cfg.base_addr + offset);
+}
+
+static void ccdc_enable(int flag)
+{
+ regw(flag, CCDC_PCR);
+}
+
+static void ccdc_enable_vport(int flag)
+{
+ if (flag)
+ /* enable video port */
+ regw(CCDC_ENABLE_VIDEO_PORT, CCDC_FMTCFG);
+ else
+ regw(CCDC_DISABLE_VIDEO_PORT, CCDC_FMTCFG);
+}
+
+/*
+ * ccdc_setwin()
+ * This function will configure the window size
+ * to be capture in CCDC reg
+ */
+void ccdc_setwin(struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt,
+ int ppc)
+{
+ int horz_start, horz_nr_pixels;
+ int vert_start, vert_nr_lines;
+ int val = 0, mid_img = 0;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left << (ppc - 1);
+ horz_nr_pixels = (image_win->width << (ppc - 1)) - 1;
+ regw((horz_start << CCDC_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels,
+ CCDC_HORZ_INFO);
+
+ vert_start = image_win->top;
+
+ if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ /* configure VDINT0 */
+ val = (vert_start << CCDC_VDINT_VDINT0_SHIFT);
+ regw(val, CCDC_VDINT);
+
+ } else {
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /*
+ * configure VDINT0 and VDINT1. VDINT1 will be at half
+ * of image height
+ */
+ mid_img = vert_start + (image_win->height / 2);
+ val = (vert_start << CCDC_VDINT_VDINT0_SHIFT) |
+ (mid_img & CCDC_VDINT_VDINT1_MASK);
+ regw(val, CCDC_VDINT);
+
+ }
+ regw((vert_start << CCDC_VERT_START_SLV0_SHIFT) | vert_start,
+ CCDC_VERT_START);
+ regw(vert_nr_lines, CCDC_VERT_LINES);
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
+}
+
+static void ccdc_readregs(void)
+{
+ unsigned int val = 0;
+
+ val = regr(CCDC_ALAW);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to ALAW...\n", val);
+ val = regr(CCDC_CLAMP);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to CLAMP...\n", val);
+ val = regr(CCDC_DCSUB);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to DCSUB...\n", val);
+ val = regr(CCDC_BLKCMP);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to BLKCMP...\n", val);
+ val = regr(CCDC_FPC_ADDR);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC_ADDR...\n", val);
+ val = regr(CCDC_FPC);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC...\n", val);
+ val = regr(CCDC_FMTCFG);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMTCFG...\n", val);
+ val = regr(CCDC_COLPTN);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to COLPTN...\n", val);
+ val = regr(CCDC_FMT_HORZ);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_HORZ...\n", val);
+ val = regr(CCDC_FMT_VERT);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_VERT...\n", val);
+ val = regr(CCDC_HSIZE_OFF);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HSIZE_OFF...\n", val);
+ val = regr(CCDC_SDOFST);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SDOFST...\n", val);
+ val = regr(CCDC_VP_OUT);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VP_OUT...\n", val);
+ val = regr(CCDC_SYN_MODE);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SYN_MODE...\n", val);
+ val = regr(CCDC_HORZ_INFO);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HORZ_INFO...\n", val);
+ val = regr(CCDC_VERT_START);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_START...\n", val);
+ val = regr(CCDC_VERT_LINES);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val);
+}
+
+static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
+{
+ if (ccdcparam->alaw.enable) {
+ u8 max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
+ u8 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
+
+ if ((ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) ||
+ (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_15_6) ||
+ (max_gamma > max_data)) {
+ dev_dbg(ccdc_cfg.dev, "\nInvalid data line select");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params)
+{
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int *fpc_virtaddr = NULL;
+ unsigned int *fpc_physaddr = NULL;
+
+ memcpy(config_params, raw_params, sizeof(*raw_params));
+ /*
+ * allocate memory for fault pixel table and copy the user
+ * values to the table
+ */
+ if (!config_params->fault_pxl.enable)
+ return 0;
+
+ fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
+ fpc_virtaddr = (unsigned int *)phys_to_virt(
+ (unsigned long)fpc_physaddr);
+ /*
+ * Allocate memory for FPC table if current
+ * FPC table buffer is not big enough to
+ * accommodate FPC Number requested
+ */
+ if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) {
+ if (fpc_physaddr != NULL) {
+ free_pages((unsigned long)fpc_physaddr,
+ get_order
+ (config_params->fault_pxl.fp_num *
+ FP_NUM_BYTES));
+ }
+
+ /* Allocate memory for FPC table */
+ fpc_virtaddr =
+ (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA,
+ get_order(raw_params->
+ fault_pxl.fp_num *
+ FP_NUM_BYTES));
+
+ if (fpc_virtaddr == NULL) {
+ dev_dbg(ccdc_cfg.dev,
+ "\nUnable to allocate memory for FPC");
+ return -EFAULT;
+ }
+ fpc_physaddr =
+ (unsigned int *)virt_to_phys((void *)fpc_virtaddr);
+ }
+
+ /* Copy number of fault pixels and FPC table */
+ config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num;
+ if (copy_from_user(fpc_virtaddr,
+ (void __user *)raw_params->fault_pxl.fpc_table_addr,
+ config_params->fault_pxl.fp_num * FP_NUM_BYTES)) {
+ dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed");
+ return -EFAULT;
+ }
+ config_params->fault_pxl.fpc_table_addr = (unsigned int)fpc_physaddr;
+ return 0;
+}
+
+static int ccdc_close(struct device *dev)
+{
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL;
+
+ fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
+
+ if (fpc_physaddr != NULL) {
+ fpc_virtaddr = (unsigned int *)
+ phys_to_virt((unsigned long)fpc_physaddr);
+ free_pages((unsigned long)fpc_virtaddr,
+ get_order(config_params->fault_pxl.fp_num *
+ FP_NUM_BYTES));
+ }
+ return 0;
+}
+
+/*
+ * ccdc_restore_defaults()
+ * This function will write defaults to all CCDC registers
+ */
+static void ccdc_restore_defaults(void)
+{
+ int i;
+
+ /* disable CCDC */
+ ccdc_enable(0);
+ /* set all registers to default value */
+ for (i = 4; i <= 0x94; i += 4)
+ regw(0, i);
+ regw(CCDC_NO_CULLING, CCDC_CULLING);
+ regw(CCDC_GAMMA_BITS_11_2, CCDC_ALAW);
+}
+
+static int ccdc_open(struct device *device)
+{
+ ccdc_restore_defaults();
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_enable_vport(1);
+ return 0;
+}
+
+static void ccdc_sbl_reset(void)
+{
+ vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O);
+}
+
+/* Parameter operations */
+static int ccdc_set_params(void __user *params)
+{
+ struct ccdc_config_params_raw ccdc_raw_params;
+ int x;
+
+ if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
+ return -EINVAL;
+
+ x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
+ if (x) {
+ dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying"
+ "ccdc params, %d\n", x);
+ return -EFAULT;
+ }
+
+ if (!validate_ccdc_param(&ccdc_raw_params)) {
+ if (!ccdc_update_raw_params(&ccdc_raw_params))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * ccdc_config_ycbcr()
+ * This function will configure CCDC for YCbCr video capture
+ */
+void ccdc_config_ycbcr(void)
+{
+ struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
+ u32 syn_mode;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
+ /*
+ * first restore the CCDC registers to default values
+ * This is important since we assume default values to be set in
+ * a lot of registers that we didn't touch
+ */
+ ccdc_restore_defaults();
+
+ /*
+ * configure pixel format, frame format, configure video frame
+ * format, enable output to SDRAM, enable internal timing generator
+ * and 8bit pack mode
+ */
+ syn_mode = (((params->pix_fmt & CCDC_SYN_MODE_INPMOD_MASK) <<
+ CCDC_SYN_MODE_INPMOD_SHIFT) |
+ ((params->frm_fmt & CCDC_SYN_FLDMODE_MASK) <<
+ CCDC_SYN_FLDMODE_SHIFT) | CCDC_VDHDEN_ENABLE |
+ CCDC_WEN_ENABLE | CCDC_DATA_PACK_ENABLE);
+
+ /* setup BT.656 sync mode */
+ if (params->bt656_enable) {
+ regw(CCDC_REC656IF_BT656_EN, CCDC_REC656IF);
+
+ /*
+ * configure the FID, VD, HD pin polarity,
+ * fld,hd pol positive, vd negative, 8-bit data
+ */
+ syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE;
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ syn_mode |= CCDC_SYN_MODE_10BITS;
+ else
+ syn_mode |= CCDC_SYN_MODE_8BITS;
+ } else {
+ /* y/c external sync mode */
+ syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
+ CCDC_FID_POL_SHIFT) |
+ ((params->hd_pol & CCDC_HD_POL_MASK) <<
+ CCDC_HD_POL_SHIFT) |
+ ((params->vd_pol & CCDC_VD_POL_MASK) <<
+ CCDC_VD_POL_SHIFT));
+ }
+ regw(syn_mode, CCDC_SYN_MODE);
+
+ /* configure video window */
+ ccdc_setwin(&params->win, params->frm_fmt, 2);
+
+ /*
+ * configure the order of y cb cr in SDRAM, and disable latch
+ * internal register on vsync
+ */
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT,
+ CCDC_CCDCFG);
+ else
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
+
+ /*
+ * configure the horizontal line offset. This should be a
+ * on 32 byte boundary. So clear LSB 5 bits
+ */
+ regw(((params->win.width * 2 + 31) & ~0x1f), CCDC_HSIZE_OFF);
+
+ /* configure the memory line offset */
+ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
+ /* two fields are interleaved in memory */
+ regw(CCDC_SDOFST_FIELD_INTERLEAVED, CCDC_SDOFST);
+
+ ccdc_sbl_reset();
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
+}
+
+static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
+{
+ u32 val;
+
+ if (!bclamp->enable) {
+ /* configure DCSub */
+ val = (bclamp->dc_sub) & CCDC_BLK_DC_SUB_MASK;
+ regw(val, CCDC_DCSUB);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to DCSUB...\n", val);
+ regw(CCDC_CLAMP_DEFAULT_VAL, CCDC_CLAMP);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to CLAMP...\n");
+ return;
+ }
+ /*
+ * Configure gain, Start pixel, No of line to be avg,
+ * No of pixel/line to be avg, & Enable the Black clamping
+ */
+ val = ((bclamp->sgain & CCDC_BLK_SGAIN_MASK) |
+ ((bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) <<
+ CCDC_BLK_ST_PXL_SHIFT) |
+ ((bclamp->sample_ln & CCDC_BLK_SAMPLE_LINE_MASK) <<
+ CCDC_BLK_SAMPLE_LINE_SHIFT) |
+ ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
+ CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE);
+ regw(val, CCDC_CLAMP);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to CLAMP...\n", val);
+ /* If Black clamping is enable then make dcsub 0 */
+ regw(CCDC_DCSUB_DEFAULT_VAL, CCDC_DCSUB);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x00000000 to DCSUB...\n");
+}
+
+static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
+{
+ u32 val;
+
+ val = ((bcomp->b & CCDC_BLK_COMP_MASK) |
+ ((bcomp->gb & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_GB_COMP_SHIFT) |
+ ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_GR_COMP_SHIFT) |
+ ((bcomp->r & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_R_COMP_SHIFT));
+ regw(val, CCDC_BLKCMP);
+}
+
+static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc)
+{
+ u32 val;
+
+ /* Initially disable FPC */
+ val = CCDC_FPC_DISABLE;
+ regw(val, CCDC_FPC);
+
+ if (!fpc->enable)
+ return;
+
+ /* Configure Fault pixel if needed */
+ regw(fpc->fpc_table_addr, CCDC_FPC_ADDR);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC_ADDR...\n",
+ (fpc->fpc_table_addr));
+ /* Write the FPC params with FPC disable */
+ val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK;
+ regw(val, CCDC_FPC);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
+ /* read the FPC register */
+ val = regr(CCDC_FPC) | CCDC_FPC_ENABLE;
+ regw(val, CCDC_FPC);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
+}
+
+/*
+ * ccdc_config_raw()
+ * This function will configure CCDC for Raw capture mode
+ */
+void ccdc_config_raw(void)
+{
+ struct ccdc_params_raw *params = &ccdc_cfg.bayer;
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int syn_mode = 0;
+ unsigned int val;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
+
+ /* Reset CCDC */
+ ccdc_restore_defaults();
+
+ /* Disable latching function registers on VSYNC */
+ regw(CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
+
+ /*
+ * Configure the vertical sync polarity(SYN_MODE.VDPOL),
+ * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
+ * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
+ * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
+ * SDRAM, enable internal timing generator
+ */
+ syn_mode =
+ (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
+ ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
+ ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
+ ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
+ ((config_params->data_sz & CCDC_DATA_SZ_MASK) <<
+ CCDC_DATA_SZ_SHIFT) |
+ ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT) |
+ CCDC_WEN_ENABLE | CCDC_VDHDEN_ENABLE);
+
+ /* Enable and configure aLaw register if needed */
+ if (config_params->alaw.enable) {
+ val = ((config_params->alaw.gamma_wd &
+ CCDC_ALAW_GAMMA_WD_MASK) | CCDC_ALAW_ENABLE);
+ regw(val, CCDC_ALAW);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to ALAW...\n", val);
+ }
+
+ /* Configure video window */
+ ccdc_setwin(&params->win, params->frm_fmt, CCDC_PPC_RAW);
+
+ /* Configure Black Clamp */
+ ccdc_config_black_clamp(&config_params->blk_clamp);
+
+ /* Configure Black level compensation */
+ ccdc_config_black_compense(&config_params->blk_comp);
+
+ /* Configure Fault Pixel Correction */
+ ccdc_config_fpc(&config_params->fault_pxl);
+
+ /* If data size is 8 bit then pack the data */
+ if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+ config_params->alaw.enable)
+ syn_mode |= CCDC_DATA_PACK_ENABLE;
+
+#ifdef CONFIG_DM644X_VIDEO_PORT_ENABLE
+ /* enable video port */
+ val = CCDC_ENABLE_VIDEO_PORT;
+#else
+ /* disable video port */
+ val = CCDC_DISABLE_VIDEO_PORT;
+#endif
+
+ if (config_params->data_sz == CCDC_DATA_8BITS)
+ val |= (CCDC_DATA_10BITS & CCDC_FMTCFG_VPIN_MASK)
+ << CCDC_FMTCFG_VPIN_SHIFT;
+ else
+ val |= (config_params->data_sz & CCDC_FMTCFG_VPIN_MASK)
+ << CCDC_FMTCFG_VPIN_SHIFT;
+ /* Write value in FMTCFG */
+ regw(val, CCDC_FMTCFG);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMTCFG...\n", val);
+ /* Configure the color pattern according to mt9t001 sensor */
+ regw(CCDC_COLPTN_VAL, CCDC_COLPTN);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0xBB11BB11 to COLPTN...\n");
+ /*
+ * Configure Data formatter(Video port) pixel selection
+ * (FMT_HORZ, FMT_VERT)
+ */
+ val = ((params->win.left & CCDC_FMT_HORZ_FMTSPH_MASK) <<
+ CCDC_FMT_HORZ_FMTSPH_SHIFT) |
+ (params->win.width & CCDC_FMT_HORZ_FMTLNH_MASK);
+ regw(val, CCDC_FMT_HORZ);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_HORZ...\n", val);
+ val = (params->win.top & CCDC_FMT_VERT_FMTSLV_MASK)
+ << CCDC_FMT_VERT_FMTSLV_SHIFT;
+ if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
+ val |= (params->win.height) & CCDC_FMT_VERT_FMTLNV_MASK;
+ else
+ val |= (params->win.height >> 1) & CCDC_FMT_VERT_FMTLNV_MASK;
+
+ dev_dbg(ccdc_cfg.dev, "\nparams->win.height 0x%x ...\n",
+ params->win.height);
+ regw(val, CCDC_FMT_VERT);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_VERT...\n", val);
+
+ dev_dbg(ccdc_cfg.dev, "\nbelow regw(val, FMT_VERT)...");
+
+ /*
+ * Configure Horizontal offset register. If pack 8 is enabled then
+ * 1 pixel will take 1 byte
+ */
+ if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+ config_params->alaw.enable)
+ regw((params->win.width + CCDC_32BYTE_ALIGN_VAL) &
+ CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF);
+ else
+ /* else one pixel will take 2 byte */
+ regw(((params->win.width * CCDC_TWO_BYTES_PER_PIXEL) +
+ CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK,
+ CCDC_HSIZE_OFF);
+
+ /* Set value for SDOFST */
+ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (params->image_invert_enable) {
+ /* For intelace inverse mode */
+ regw(CCDC_INTERLACED_IMAGE_INVERT, CCDC_SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x4B6D to SDOFST..\n");
+ }
+
+ else {
+ /* For intelace non inverse mode */
+ regw(CCDC_INTERLACED_NO_IMAGE_INVERT, CCDC_SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x0249 to SDOFST..\n");
+ }
+ } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ regw(CCDC_PROGRESSIVE_NO_IMAGE_INVERT, CCDC_SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to SDOFST...\n");
+ }
+
+ /*
+ * Configure video port pixel selection (VPOUT)
+ * Here -1 is to make the height value less than FMT_VERT.FMTLNV
+ */
+ if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
+ val = (((params->win.height - 1) & CCDC_VP_OUT_VERT_NUM_MASK))
+ << CCDC_VP_OUT_VERT_NUM_SHIFT;
+ else
+ val =
+ ((((params->win.height >> CCDC_INTERLACED_HEIGHT_SHIFT) -
+ 1) & CCDC_VP_OUT_VERT_NUM_MASK)) <<
+ CCDC_VP_OUT_VERT_NUM_SHIFT;
+
+ val |= ((((params->win.width))) & CCDC_VP_OUT_HORZ_NUM_MASK)
+ << CCDC_VP_OUT_HORZ_NUM_SHIFT;
+ val |= (params->win.left) & CCDC_VP_OUT_HORZ_ST_MASK;
+ regw(val, CCDC_VP_OUT);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to VP_OUT...\n", val);
+ regw(syn_mode, CCDC_SYN_MODE);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to SYN_MODE...\n", syn_mode);
+
+ ccdc_sbl_reset();
+ dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
+ ccdc_readregs();
+}
+
+static int ccdc_configure(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_config_raw();
+ else
+ ccdc_config_ycbcr();
+ return 0;
+}
+
+static int ccdc_set_buftype(enum ccdc_buftype buf_type)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.buf_type = buf_type;
+ else
+ ccdc_cfg.ycbcr.buf_type = buf_type;
+ return 0;
+}
+
+static enum ccdc_buftype ccdc_get_buftype(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_cfg.bayer.buf_type;
+ return ccdc_cfg.ycbcr.buf_type;
+}
+
+static int ccdc_enum_pix(u32 *pix, int i)
+{
+ int ret = -EINVAL;
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
+ *pix = ccdc_raw_bayer_pix_formats[i];
+ ret = 0;
+ }
+ } else {
+ if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
+ *pix = ccdc_raw_yuv_pix_formats[i];
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+static int ccdc_set_pixel_format(u32 pixfmt)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ if (pixfmt == V4L2_PIX_FMT_SBGGR8)
+ ccdc_cfg.bayer.config_params.alaw.enable = 1;
+ else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
+ return -EINVAL;
+ } else {
+ if (pixfmt == V4L2_PIX_FMT_YUYV)
+ ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+ else if (pixfmt == V4L2_PIX_FMT_UYVY)
+ ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 ccdc_get_pixel_format(void)
+{
+ struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
+ u32 pixfmt;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ if (alaw->enable)
+ pixfmt = V4L2_PIX_FMT_SBGGR8;
+ else
+ pixfmt = V4L2_PIX_FMT_SBGGR16;
+ else {
+ if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ else
+ pixfmt = V4L2_PIX_FMT_UYVY;
+ }
+ return pixfmt;
+}
+
+static int ccdc_set_image_window(struct v4l2_rect *win)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.win = *win;
+ else
+ ccdc_cfg.ycbcr.win = *win;
+ return 0;
+}
+
+static void ccdc_get_image_window(struct v4l2_rect *win)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ *win = ccdc_cfg.bayer.win;
+ else
+ *win = ccdc_cfg.ycbcr.win;
+}
+
+static unsigned int ccdc_get_line_length(void)
+{
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int len;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ if ((config_params->alaw.enable) ||
+ (config_params->data_sz == CCDC_DATA_8BITS))
+ len = ccdc_cfg.bayer.win.width;
+ else
+ len = ccdc_cfg.bayer.win.width * 2;
+ } else
+ len = ccdc_cfg.ycbcr.win.width * 2;
+ return ALIGN(len, 32);
+}
+
+static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
+ return 0;
+}
+
+static enum ccdc_frmfmt ccdc_get_frame_format(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_cfg.bayer.frm_fmt;
+ else
+ return ccdc_cfg.ycbcr.frm_fmt;
+}
+
+static int ccdc_getfid(void)
+{
+ return (regr(CCDC_SYN_MODE) >> 15) & 1;
+}
+
+/* misc operations */
+static inline void ccdc_setfbaddr(unsigned long addr)
+{
+ regw(addr & 0xffffffe0, CCDC_SDR_ADDR);
+}
+
+static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
+{
+ ccdc_cfg.if_type = params->if_type;
+
+ switch (params->if_type) {
+ case VPFE_BT656:
+ case VPFE_YCBCR_SYNC_16:
+ case VPFE_YCBCR_SYNC_8:
+ case VPFE_BT656_10BIT:
+ ccdc_cfg.ycbcr.vd_pol = params->vdpol;
+ ccdc_cfg.ycbcr.hd_pol = params->hdpol;
+ break;
+ default:
+ /* TODO add support for raw bayer here */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void ccdc_save_context(void)
+{
+ ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR);
+ ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE);
+ ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID);
+ ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES);
+ ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO);
+ ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START);
+ ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES);
+ ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING);
+ ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF);
+ ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST);
+ ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR);
+ ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP);
+ ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB);
+ ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN);
+ ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP);
+ ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC);
+ ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR);
+ ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT);
+ ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW);
+ ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF);
+ ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG);
+ ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG);
+ ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ);
+ ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT);
+ ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0);
+ ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1);
+ ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2);
+ ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3);
+ ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4);
+ ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5);
+ ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6);
+ ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7);
+ ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0);
+ ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1);
+ ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0);
+ ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1);
+ ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT);
+}
+
+static void ccdc_restore_context(void)
+{
+ regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE);
+ regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID);
+ regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES);
+ regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO);
+ regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START);
+ regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES);
+ regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING);
+ regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF);
+ regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST);
+ regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR);
+ regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP);
+ regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB);
+ regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN);
+ regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP);
+ regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC);
+ regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR);
+ regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT);
+ regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW);
+ regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF);
+ regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG);
+ regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG);
+ regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ);
+ regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT);
+ regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0);
+ regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1);
+ regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2);
+ regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3);
+ regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4);
+ regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5);
+ regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6);
+ regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7);
+ regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0);
+ regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1);
+ regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0);
+ regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1);
+ regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
+ regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
+}
+static struct ccdc_hw_device ccdc_hw_dev = {
+ .name = "DM6446 CCDC",
+ .owner = THIS_MODULE,
+ .hw_ops = {
+ .open = ccdc_open,
+ .close = ccdc_close,
+ .reset = ccdc_sbl_reset,
+ .enable = ccdc_enable,
+ .set_hw_if_params = ccdc_set_hw_if_params,
+ .set_params = ccdc_set_params,
+ .configure = ccdc_configure,
+ .set_buftype = ccdc_set_buftype,
+ .get_buftype = ccdc_get_buftype,
+ .enum_pix = ccdc_enum_pix,
+ .set_pixel_format = ccdc_set_pixel_format,
+ .get_pixel_format = ccdc_get_pixel_format,
+ .set_frame_format = ccdc_set_frame_format,
+ .get_frame_format = ccdc_get_frame_format,
+ .set_image_window = ccdc_set_image_window,
+ .get_image_window = ccdc_get_image_window,
+ .get_line_length = ccdc_get_line_length,
+ .setfbaddr = ccdc_setfbaddr,
+ .getfid = ccdc_getfid,
+ },
+};
+
+static int dm644x_ccdc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int status = 0;
+
+ /*
+ * first try to register with vpfe. If not correct platform, then we
+ * don't have to iomap
+ */
+ status = vpfe_register_ccdc_device(&ccdc_hw_dev);
+ if (status < 0)
+ return status;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ status = -ENODEV;
+ goto fail_nores;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), res->name);
+ if (!res) {
+ status = -EBUSY;
+ goto fail_nores;
+ }
+
+ ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ if (!ccdc_cfg.base_addr) {
+ status = -ENOMEM;
+ goto fail_nomem;
+ }
+
+ ccdc_cfg.dev = &pdev->dev;
+ printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
+ return 0;
+fail_nomem:
+ release_mem_region(res->start, resource_size(res));
+fail_nores:
+ vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+ return status;
+}
+
+static int dm644x_ccdc_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ iounmap(ccdc_cfg.base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+ return 0;
+}
+
+static int dm644x_ccdc_suspend(struct device *dev)
+{
+ /* Save CCDC context */
+ ccdc_save_context();
+ /* Disable CCDC */
+ ccdc_enable(0);
+
+ return 0;
+}
+
+static int dm644x_ccdc_resume(struct device *dev)
+{
+ /* Restore CCDC context */
+ ccdc_restore_context();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dm644x_ccdc_pm_ops = {
+ .suspend = dm644x_ccdc_suspend,
+ .resume = dm644x_ccdc_resume,
+};
+
+static struct platform_driver dm644x_ccdc_driver = {
+ .driver = {
+ .name = "dm644x_ccdc",
+ .owner = THIS_MODULE,
+ .pm = &dm644x_ccdc_pm_ops,
+ },
+ .remove = dm644x_ccdc_remove,
+ .probe = dm644x_ccdc_probe,
+};
+
+module_platform_driver(dm644x_ccdc_driver);
diff --git a/drivers/media/platform/davinci/dm644x_ccdc_regs.h b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
new file mode 100644
index 00000000000..2b0aca5383f
--- /dev/null
+++ b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2006-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _DM644X_CCDC_REGS_H
+#define _DM644X_CCDC_REGS_H
+
+/**************************************************************************\
+* Register OFFSET Definitions
+\**************************************************************************/
+#define CCDC_PID 0x0
+#define CCDC_PCR 0x4
+#define CCDC_SYN_MODE 0x8
+#define CCDC_HD_VD_WID 0xc
+#define CCDC_PIX_LINES 0x10
+#define CCDC_HORZ_INFO 0x14
+#define CCDC_VERT_START 0x18
+#define CCDC_VERT_LINES 0x1c
+#define CCDC_CULLING 0x20
+#define CCDC_HSIZE_OFF 0x24
+#define CCDC_SDOFST 0x28
+#define CCDC_SDR_ADDR 0x2c
+#define CCDC_CLAMP 0x30
+#define CCDC_DCSUB 0x34
+#define CCDC_COLPTN 0x38
+#define CCDC_BLKCMP 0x3c
+#define CCDC_FPC 0x40
+#define CCDC_FPC_ADDR 0x44
+#define CCDC_VDINT 0x48
+#define CCDC_ALAW 0x4c
+#define CCDC_REC656IF 0x50
+#define CCDC_CCDCFG 0x54
+#define CCDC_FMTCFG 0x58
+#define CCDC_FMT_HORZ 0x5c
+#define CCDC_FMT_VERT 0x60
+#define CCDC_FMT_ADDR0 0x64
+#define CCDC_FMT_ADDR1 0x68
+#define CCDC_FMT_ADDR2 0x6c
+#define CCDC_FMT_ADDR3 0x70
+#define CCDC_FMT_ADDR4 0x74
+#define CCDC_FMT_ADDR5 0x78
+#define CCDC_FMT_ADDR6 0x7c
+#define CCDC_FMT_ADDR7 0x80
+#define CCDC_PRGEVEN_0 0x84
+#define CCDC_PRGEVEN_1 0x88
+#define CCDC_PRGODD_0 0x8c
+#define CCDC_PRGODD_1 0x90
+#define CCDC_VP_OUT 0x94
+#define CCDC_REG_END 0x98
+
+/***************************************************************
+* Define for various register bit mask and shifts for CCDC
+****************************************************************/
+#define CCDC_FID_POL_MASK 1
+#define CCDC_FID_POL_SHIFT 4
+#define CCDC_HD_POL_MASK 1
+#define CCDC_HD_POL_SHIFT 3
+#define CCDC_VD_POL_MASK 1
+#define CCDC_VD_POL_SHIFT 2
+#define CCDC_HSIZE_OFF_MASK 0xffffffe0
+#define CCDC_32BYTE_ALIGN_VAL 31
+#define CCDC_FRM_FMT_MASK 0x1
+#define CCDC_FRM_FMT_SHIFT 7
+#define CCDC_DATA_SZ_MASK 7
+#define CCDC_DATA_SZ_SHIFT 8
+#define CCDC_PIX_FMT_MASK 3
+#define CCDC_PIX_FMT_SHIFT 12
+#define CCDC_VP2SDR_DISABLE 0xFFFBFFFF
+#define CCDC_WEN_ENABLE (1 << 17)
+#define CCDC_SDR2RSZ_DISABLE 0xFFF7FFFF
+#define CCDC_VDHDEN_ENABLE (1 << 16)
+#define CCDC_LPF_ENABLE (1 << 14)
+#define CCDC_ALAW_ENABLE (1 << 3)
+#define CCDC_ALAW_GAMMA_WD_MASK 7
+#define CCDC_BLK_CLAMP_ENABLE (1 << 31)
+#define CCDC_BLK_SGAIN_MASK 0x1F
+#define CCDC_BLK_ST_PXL_MASK 0x7FFF
+#define CCDC_BLK_ST_PXL_SHIFT 10
+#define CCDC_BLK_SAMPLE_LN_MASK 7
+#define CCDC_BLK_SAMPLE_LN_SHIFT 28
+#define CCDC_BLK_SAMPLE_LINE_MASK 7
+#define CCDC_BLK_SAMPLE_LINE_SHIFT 25
+#define CCDC_BLK_DC_SUB_MASK 0x03FFF
+#define CCDC_BLK_COMP_MASK 0xFF
+#define CCDC_BLK_COMP_GB_COMP_SHIFT 8
+#define CCDC_BLK_COMP_GR_COMP_SHIFT 16
+#define CCDC_BLK_COMP_R_COMP_SHIFT 24
+#define CCDC_LATCH_ON_VSYNC_DISABLE (1 << 15)
+#define CCDC_FPC_ENABLE (1 << 15)
+#define CCDC_FPC_DISABLE 0
+#define CCDC_FPC_FPC_NUM_MASK 0x7FFF
+#define CCDC_DATA_PACK_ENABLE (1 << 11)
+#define CCDC_FMTCFG_VPIN_MASK 7
+#define CCDC_FMTCFG_VPIN_SHIFT 12
+#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16
+#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_SHIFT 16
+#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF
+#define CCDC_VP_OUT_VERT_NUM_SHIFT 17
+#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF
+#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4
+#define CCDC_VP_OUT_HORZ_ST_MASK 0xF
+#define CCDC_HORZ_INFO_SPH_SHIFT 16
+#define CCDC_VERT_START_SLV0_SHIFT 16
+#define CCDC_VDINT_VDINT0_SHIFT 16
+#define CCDC_VDINT_VDINT1_MASK 0xFFFF
+#define CCDC_PPC_RAW 1
+#define CCDC_DCSUB_DEFAULT_VAL 0
+#define CCDC_CLAMP_DEFAULT_VAL 0
+#define CCDC_ENABLE_VIDEO_PORT 0x8000
+#define CCDC_DISABLE_VIDEO_PORT 0
+#define CCDC_COLPTN_VAL 0xBB11BB11
+#define CCDC_TWO_BYTES_PER_PIXEL 2
+#define CCDC_INTERLACED_IMAGE_INVERT 0x4B6D
+#define CCDC_INTERLACED_NO_IMAGE_INVERT 0x0249
+#define CCDC_PROGRESSIVE_IMAGE_INVERT 0x4000
+#define CCDC_PROGRESSIVE_NO_IMAGE_INVERT 0
+#define CCDC_INTERLACED_HEIGHT_SHIFT 1
+#define CCDC_SYN_MODE_INPMOD_SHIFT 12
+#define CCDC_SYN_MODE_INPMOD_MASK 3
+#define CCDC_SYN_MODE_8BITS (7 << 8)
+#define CCDC_SYN_MODE_10BITS (6 << 8)
+#define CCDC_SYN_MODE_11BITS (5 << 8)
+#define CCDC_SYN_MODE_12BITS (4 << 8)
+#define CCDC_SYN_MODE_13BITS (3 << 8)
+#define CCDC_SYN_MODE_14BITS (2 << 8)
+#define CCDC_SYN_MODE_15BITS (1 << 8)
+#define CCDC_SYN_MODE_16BITS (0 << 8)
+#define CCDC_SYN_FLDMODE_MASK 1
+#define CCDC_SYN_FLDMODE_SHIFT 7
+#define CCDC_REC656IF_BT656_EN 3
+#define CCDC_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
+#define CCDC_CCDCFG_Y8POS_SHIFT 11
+#define CCDC_CCDCFG_BW656_10BIT (1 << 5)
+#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
+#define CCDC_NO_CULLING 0xffff00ff
+#endif
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
new file mode 100644
index 00000000000..3332cca632e
--- /dev/null
+++ b/drivers/media/platform/davinci/isif.c
@@ -0,0 +1,1145 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Image Sensor Interface (ISIF) driver
+ *
+ * This driver is for configuring the ISIF IP available on DM365 or any other
+ * TI SoCs. This is used for capturing yuv or bayer video or image data
+ * from a decoder or sensor. This IP is similar to the CCDC IP on DM355
+ * and DM6446, but with enhanced or additional ip blocks. The driver
+ * configures the ISIF upon commands from the vpfe bridge driver through
+ * ccdc_hw_device interface.
+ *
+ * TODO: 1) Raw bayer parameter settings and bayer capture
+ * 2) Add support for control ioctl
+ */
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <mach/mux.h>
+
+#include <media/davinci/isif.h>
+#include <media/davinci/vpss.h>
+
+#include "isif_regs.h"
+#include "ccdc_hw_device.h"
+
+/* Defaults for module configuration parameters */
+static struct isif_config_params_raw isif_config_defaults = {
+ .linearize = {
+ .en = 0,
+ .corr_shft = ISIF_NO_SHIFT,
+ .scale_fact = {1, 0},
+ },
+ .df_csc = {
+ .df_or_csc = 0,
+ .csc = {
+ .en = 0,
+ },
+ },
+ .dfc = {
+ .en = 0,
+ },
+ .bclamp = {
+ .en = 0,
+ },
+ .gain_offset = {
+ .gain = {
+ .r_ye = {1, 0},
+ .gr_cy = {1, 0},
+ .gb_g = {1, 0},
+ .b_mg = {1, 0},
+ },
+ },
+ .culling = {
+ .hcpat_odd = 0xff,
+ .hcpat_even = 0xff,
+ .vcpat = 0xff,
+ },
+ .compress = {
+ .alg = ISIF_ALAW,
+ },
+};
+
+/* ISIF operation configuration */
+static struct isif_oper_config {
+ struct device *dev;
+ enum vpfe_hw_if_type if_type;
+ struct isif_ycbcr_config ycbcr;
+ struct isif_params_raw bayer;
+ enum isif_data_pack data_pack;
+ /* ISIF base address */
+ void __iomem *base_addr;
+ /* ISIF Linear Table 0 */
+ void __iomem *linear_tbl0_addr;
+ /* ISIF Linear Table 1 */
+ void __iomem *linear_tbl1_addr;
+} isif_cfg = {
+ .ycbcr = {
+ .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
+ .frm_fmt = CCDC_FRMFMT_INTERLACED,
+ .win = ISIF_WIN_NTSC,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .pix_order = CCDC_PIXORDER_CBYCRY,
+ .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED,
+ },
+ .bayer = {
+ .pix_fmt = CCDC_PIXFMT_RAW,
+ .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
+ .win = ISIF_WIN_VGA,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .gain = {
+ .r_ye = {1, 0},
+ .gr_cy = {1, 0},
+ .gb_g = {1, 0},
+ .b_mg = {1, 0},
+ },
+ .cfa_pat = ISIF_CFA_PAT_MOSAIC,
+ .data_msb = ISIF_BIT_MSB_11,
+ .config_params = {
+ .data_shift = ISIF_NO_SHIFT,
+ .col_pat_field0 = {
+ .olop = ISIF_GREEN_BLUE,
+ .olep = ISIF_BLUE,
+ .elop = ISIF_RED,
+ .elep = ISIF_GREEN_RED,
+ },
+ .col_pat_field1 = {
+ .olop = ISIF_GREEN_BLUE,
+ .olep = ISIF_BLUE,
+ .elop = ISIF_RED,
+ .elep = ISIF_GREEN_RED,
+ },
+ .test_pat_gen = 0,
+ },
+ },
+ .data_pack = ISIF_DATA_PACK8,
+};
+
+/* Raw Bayer formats */
+static const u32 isif_raw_bayer_pix_formats[] = {
+ V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
+
+/* Raw YUV formats */
+static const u32 isif_raw_yuv_pix_formats[] = {
+ V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+
+/* register access routines */
+static inline u32 regr(u32 offset)
+{
+ return __raw_readl(isif_cfg.base_addr + offset);
+}
+
+static inline void regw(u32 val, u32 offset)
+{
+ __raw_writel(val, isif_cfg.base_addr + offset);
+}
+
+/* reg_modify() - read, modify and write register */
+static inline u32 reg_modify(u32 mask, u32 val, u32 offset)
+{
+ u32 new_val = (regr(offset) & ~mask) | (val & mask);
+
+ regw(new_val, offset);
+ return new_val;
+}
+
+static inline void regw_lin_tbl(u32 val, u32 offset, int i)
+{
+ if (!i)
+ __raw_writel(val, isif_cfg.linear_tbl0_addr + offset);
+ else
+ __raw_writel(val, isif_cfg.linear_tbl1_addr + offset);
+}
+
+static void isif_disable_all_modules(void)
+{
+ /* disable BC */
+ regw(0, CLAMPCFG);
+ /* disable vdfc */
+ regw(0, DFCCTL);
+ /* disable CSC */
+ regw(0, CSCCTL);
+ /* disable linearization */
+ regw(0, LINCFG0);
+ /* disable other modules here as they are supported */
+}
+
+static void isif_enable(int en)
+{
+ if (!en) {
+ /* Before disable isif, disable all ISIF modules */
+ isif_disable_all_modules();
+ /*
+ * wait for next VD. Assume lowest scan rate is 12 Hz. So
+ * 100 msec delay is good enough
+ */
+ msleep(100);
+ }
+ reg_modify(ISIF_SYNCEN_VDHDEN_MASK, en, SYNCEN);
+}
+
+static void isif_enable_output_to_sdram(int en)
+{
+ reg_modify(ISIF_SYNCEN_WEN_MASK, en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
+}
+
+static void isif_config_culling(struct isif_cul *cul)
+{
+ u32 val;
+
+ /* Horizontal pattern */
+ val = (cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT) | cul->hcpat_odd;
+ regw(val, CULH);
+
+ /* vertical pattern */
+ regw(cul->vcpat, CULV);
+
+ /* LPF */
+ reg_modify(ISIF_LPF_MASK << ISIF_LPF_SHIFT,
+ cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
+}
+
+static void isif_config_gain_offset(void)
+{
+ struct isif_gain_offsets_adj *gain_off_p =
+ &isif_cfg.bayer.config_params.gain_offset;
+ u32 val;
+
+ val = (!!gain_off_p->gain_sdram_en << GAIN_SDRAM_EN_SHIFT) |
+ (!!gain_off_p->gain_ipipe_en << GAIN_IPIPE_EN_SHIFT) |
+ (!!gain_off_p->gain_h3a_en << GAIN_H3A_EN_SHIFT) |
+ (!!gain_off_p->offset_sdram_en << OFST_SDRAM_EN_SHIFT) |
+ (!!gain_off_p->offset_ipipe_en << OFST_IPIPE_EN_SHIFT) |
+ (!!gain_off_p->offset_h3a_en << OFST_H3A_EN_SHIFT);
+
+ reg_modify(GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
+
+ val = (gain_off_p->gain.r_ye.integer << GAIN_INTEGER_SHIFT) |
+ gain_off_p->gain.r_ye.decimal;
+ regw(val, CRGAIN);
+
+ val = (gain_off_p->gain.gr_cy.integer << GAIN_INTEGER_SHIFT) |
+ gain_off_p->gain.gr_cy.decimal;
+ regw(val, CGRGAIN);
+
+ val = (gain_off_p->gain.gb_g.integer << GAIN_INTEGER_SHIFT) |
+ gain_off_p->gain.gb_g.decimal;
+ regw(val, CGBGAIN);
+
+ val = (gain_off_p->gain.b_mg.integer << GAIN_INTEGER_SHIFT) |
+ gain_off_p->gain.b_mg.decimal;
+ regw(val, CBGAIN);
+
+ regw(gain_off_p->offset, COFSTA);
+}
+
+static void isif_restore_defaults(void)
+{
+ enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
+
+ dev_dbg(isif_cfg.dev, "\nstarting isif_restore_defaults...");
+ isif_cfg.bayer.config_params = isif_config_defaults;
+ /* Enable clock to ISIF, IPIPEIF and BL */
+ vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
+ vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
+ vpss_enable_clock(VPSS_BL_CLOCK, 1);
+ /* Set default offset and gain */
+ isif_config_gain_offset();
+ vpss_select_ccdc_source(source);
+ dev_dbg(isif_cfg.dev, "\nEnd of isif_restore_defaults...");
+}
+
+static int isif_open(struct device *device)
+{
+ isif_restore_defaults();
+ return 0;
+}
+
+/* This function will configure the window size to be capture in ISIF reg */
+static void isif_setwin(struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt, int ppc)
+{
+ int horz_start, horz_nr_pixels;
+ int vert_start, vert_nr_lines;
+ int mid_img = 0;
+
+ dev_dbg(isif_cfg.dev, "\nStarting isif_setwin...");
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left << (ppc - 1);
+ horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
+
+ /* Writing the horizontal info into the registers */
+ regw(horz_start & START_PX_HOR_MASK, SPH);
+ regw(horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
+ vert_start = image_win->top;
+
+ if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* To account for VD since line 0 doesn't have any data */
+ vert_start += 1;
+ } else {
+ /* To account for VD since line 0 doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /* configure VDINT0 and VDINT1 */
+ mid_img = vert_start + (image_win->height / 2);
+ regw(mid_img, VDINT1);
+ }
+
+ regw(0, VDINT0);
+ regw(vert_start & START_VER_ONE_MASK, SLV0);
+ regw(vert_start & START_VER_TWO_MASK, SLV1);
+ regw(vert_nr_lines & NUM_LINES_VER, LNV);
+}
+
+static void isif_config_bclamp(struct isif_black_clamp *bc)
+{
+ u32 val;
+
+ /*
+ * DC Offset is always added to image data irrespective of bc enable
+ * status
+ */
+ regw(bc->dc_offset, CLDCOFST);
+
+ if (bc->en) {
+ val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT;
+
+ /* Enable BC and horizontal clamp caculation paramaters */
+ val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT);
+
+ regw(val, CLAMPCFG);
+
+ if (bc->horz.mode != ISIF_HORZ_BC_DISABLE) {
+ /*
+ * Window count for calculation
+ * Base window selection
+ * pixel limit
+ * Horizontal size of window
+ * vertical size of the window
+ * Horizontal start position of the window
+ * Vertical start position of the window
+ */
+ val = bc->horz.win_count_calc |
+ ((!!bc->horz.base_win_sel_calc) <<
+ ISIF_HORZ_BC_WIN_SEL_SHIFT) |
+ ((!!bc->horz.clamp_pix_limit) <<
+ ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
+ (bc->horz.win_h_sz_calc <<
+ ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
+ (bc->horz.win_v_sz_calc <<
+ ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
+ regw(val, CLHWIN0);
+
+ regw(bc->horz.win_start_h_calc, CLHWIN1);
+ regw(bc->horz.win_start_v_calc, CLHWIN2);
+ }
+
+ /* vertical clamp caculation paramaters */
+
+ /* Reset clamp value sel for previous line */
+ val |=
+ (bc->vert.reset_val_sel << ISIF_VERT_BC_RST_VAL_SEL_SHIFT) |
+ (bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT);
+ regw(val, CLVWIN0);
+
+ /* Optical Black horizontal start position */
+ regw(bc->vert.ob_start_h, CLVWIN1);
+ /* Optical Black vertical start position */
+ regw(bc->vert.ob_start_v, CLVWIN2);
+ /* Optical Black vertical size for calculation */
+ regw(bc->vert.ob_v_sz_calc, CLVWIN3);
+ /* Vertical start position for BC subtraction */
+ regw(bc->vert_start_sub, CLSV);
+ }
+}
+
+static void isif_config_linearization(struct isif_linearize *linearize)
+{
+ u32 val, i;
+
+ if (!linearize->en) {
+ regw(0, LINCFG0);
+ return;
+ }
+
+ /* shift value for correction & enable linearization (set lsb) */
+ val = (linearize->corr_shft << ISIF_LIN_CORRSFT_SHIFT) | 1;
+ regw(val, LINCFG0);
+
+ /* Scale factor */
+ val = ((!!linearize->scale_fact.integer) <<
+ ISIF_LIN_SCALE_FACT_INTEG_SHIFT) |
+ linearize->scale_fact.decimal;
+ regw(val, LINCFG1);
+
+ for (i = 0; i < ISIF_LINEAR_TAB_SIZE; i++) {
+ if (i % 2)
+ regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 1);
+ else
+ regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 0);
+ }
+}
+
+static int isif_config_dfc(struct isif_dfc *vdfc)
+{
+ /* initialize retries to loop for max ~ 250 usec */
+ u32 val, count, retries = loops_per_jiffy / (4000/HZ);
+ int i;
+
+ if (!vdfc->en)
+ return 0;
+
+ /* Correction mode */
+ val = (vdfc->corr_mode << ISIF_VDFC_CORR_MOD_SHIFT);
+
+ /* Correct whole line or partial */
+ if (vdfc->corr_whole_line)
+ val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
+
+ /* level shift value */
+ val |= vdfc->def_level_shift << ISIF_VDFC_LEVEL_SHFT_SHIFT;
+
+ regw(val, DFCCTL);
+
+ /* Defect saturation level */
+ regw(vdfc->def_sat_level, VDFSATLV);
+
+ regw(vdfc->table[0].pos_vert, DFCMEM0);
+ regw(vdfc->table[0].pos_horz, DFCMEM1);
+ if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
+ vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+ regw(vdfc->table[0].level_at_pos, DFCMEM2);
+ regw(vdfc->table[0].level_up_pixels, DFCMEM3);
+ regw(vdfc->table[0].level_low_pixels, DFCMEM4);
+ }
+
+ /* set DFCMARST and set DFCMWR */
+ val = regr(DFCMEMCTL) | (1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT) | 1;
+ regw(val, DFCMEMCTL);
+
+ count = retries;
+ while (count && (regr(DFCMEMCTL) & 0x1))
+ count--;
+
+ if (!count) {
+ dev_dbg(isif_cfg.dev, "defect table write timeout !!!\n");
+ return -1;
+ }
+
+ for (i = 1; i < vdfc->num_vdefects; i++) {
+ regw(vdfc->table[i].pos_vert, DFCMEM0);
+ regw(vdfc->table[i].pos_horz, DFCMEM1);
+ if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
+ vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+ regw(vdfc->table[i].level_at_pos, DFCMEM2);
+ regw(vdfc->table[i].level_up_pixels, DFCMEM3);
+ regw(vdfc->table[i].level_low_pixels, DFCMEM4);
+ }
+ val = regr(DFCMEMCTL);
+ /* clear DFCMARST and set DFCMWR */
+ val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
+ val |= 1;
+ regw(val, DFCMEMCTL);
+
+ count = retries;
+ while (count && (regr(DFCMEMCTL) & 0x1))
+ count--;
+
+ if (!count) {
+ dev_err(isif_cfg.dev,
+ "defect table write timeout !!!\n");
+ return -1;
+ }
+ }
+ if (vdfc->num_vdefects < ISIF_VDFC_TABLE_SIZE) {
+ /* Extra cycle needed */
+ regw(0, DFCMEM0);
+ regw(0x1FFF, DFCMEM1);
+ regw(1, DFCMEMCTL);
+ }
+
+ /* enable VDFC */
+ reg_modify((1 << ISIF_VDFC_EN_SHIFT), (1 << ISIF_VDFC_EN_SHIFT),
+ DFCCTL);
+ return 0;
+}
+
+static void isif_config_csc(struct isif_df_csc *df_csc)
+{
+ u32 val1 = 0, val2 = 0, i;
+
+ if (!df_csc->csc.en) {
+ regw(0, CSCCTL);
+ return;
+ }
+ for (i = 0; i < ISIF_CSC_NUM_COEFF; i++) {
+ if ((i % 2) == 0) {
+ /* CSCM - LSB */
+ val1 = (df_csc->csc.coeff[i].integer <<
+ ISIF_CSC_COEF_INTEG_SHIFT) |
+ df_csc->csc.coeff[i].decimal;
+ } else {
+
+ /* CSCM - MSB */
+ val2 = (df_csc->csc.coeff[i].integer <<
+ ISIF_CSC_COEF_INTEG_SHIFT) |
+ df_csc->csc.coeff[i].decimal;
+ val2 <<= ISIF_CSCM_MSB_SHIFT;
+ val2 |= val1;
+ regw(val2, (CSCM0 + ((i - 1) << 1)));
+ }
+ }
+
+ /* program the active area */
+ regw(df_csc->start_pix, FMTSPH);
+ /*
+ * one extra pixel as required for CSC. Actually number of
+ * pixel - 1 should be configured in this register. So we
+ * need to subtract 1 before writing to FMTSPH, but we will
+ * not do this since csc requires one extra pixel
+ */
+ regw(df_csc->num_pixels, FMTLNH);
+ regw(df_csc->start_line, FMTSLV);
+ /*
+ * one extra line as required for CSC. See reason documented for
+ * num_pixels
+ */
+ regw(df_csc->num_lines, FMTLNV);
+
+ /* Enable CSC */
+ regw(1, CSCCTL);
+}
+
+static int isif_config_raw(void)
+{
+ struct isif_params_raw *params = &isif_cfg.bayer;
+ struct isif_config_params_raw *module_params =
+ &isif_cfg.bayer.config_params;
+ struct vpss_pg_frame_size frame_size;
+ struct vpss_sync_pol sync;
+ u32 val;
+
+ dev_dbg(isif_cfg.dev, "\nStarting isif_config_raw..\n");
+
+ /*
+ * Configure CCDCFG register:-
+ * Set CCD Not to swap input since input is RAW data
+ * Set FID detection function to Latch at V-Sync
+ * Set WENLOG - isif valid area
+ * Set TRGSEL
+ * Set EXTRG
+ * Packed to 8 or 16 bits
+ */
+
+ val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
+ ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
+ ISIF_CCDCFG_EXTRG_DISABLE | isif_cfg.data_pack;
+
+ dev_dbg(isif_cfg.dev, "Writing 0x%x to ...CCDCFG \n", val);
+ regw(val, CCDCFG);
+
+ /*
+ * Configure the vertical sync polarity(MODESET.VDPOL)
+ * Configure the horizontal sync polarity (MODESET.HDPOL)
+ * Configure frame id polarity (MODESET.FLDPOL)
+ * Configure data polarity
+ * Configure External WEN Selection
+ * Configure frame format(progressive or interlace)
+ * Configure pixel format (Input mode)
+ * Configure the data shift
+ */
+
+ val = ISIF_VDHDOUT_INPUT | (params->vd_pol << ISIF_VD_POL_SHIFT) |
+ (params->hd_pol << ISIF_HD_POL_SHIFT) |
+ (params->fid_pol << ISIF_FID_POL_SHIFT) |
+ (ISIF_DATAPOL_NORMAL << ISIF_DATAPOL_SHIFT) |
+ (ISIF_EXWEN_DISABLE << ISIF_EXWEN_SHIFT) |
+ (params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
+ (params->pix_fmt << ISIF_INPUT_SHIFT) |
+ (params->config_params.data_shift << ISIF_DATASFT_SHIFT);
+
+ regw(val, MODESET);
+ dev_dbg(isif_cfg.dev, "Writing 0x%x to MODESET...\n", val);
+
+ /*
+ * Configure GAMMAWD register
+ * CFA pattern setting
+ */
+ val = params->cfa_pat << ISIF_GAMMAWD_CFA_SHIFT;
+
+ /* Gamma msb */
+ if (module_params->compress.alg == ISIF_ALAW)
+ val |= ISIF_ALAW_ENABLE;
+
+ val |= (params->data_msb << ISIF_ALAW_GAMMA_WD_SHIFT);
+ regw(val, CGAMMAWD);
+
+ /* Configure DPCM compression settings */
+ if (module_params->compress.alg == ISIF_DPCM) {
+ val = BIT(ISIF_DPCM_EN_SHIFT) |
+ (module_params->compress.pred <<
+ ISIF_DPCM_PREDICTOR_SHIFT);
+ }
+
+ regw(val, MISC);
+
+ /* Configure Gain & Offset */
+ isif_config_gain_offset();
+
+ /* Configure Color pattern */
+ val = (params->config_params.col_pat_field0.olop) |
+ (params->config_params.col_pat_field0.olep << 2) |
+ (params->config_params.col_pat_field0.elop << 4) |
+ (params->config_params.col_pat_field0.elep << 6) |
+ (params->config_params.col_pat_field1.olop << 8) |
+ (params->config_params.col_pat_field1.olep << 10) |
+ (params->config_params.col_pat_field1.elop << 12) |
+ (params->config_params.col_pat_field1.elep << 14);
+ regw(val, CCOLP);
+ dev_dbg(isif_cfg.dev, "Writing %x to CCOLP ...\n", val);
+
+ /* Configure HSIZE register */
+ val = (!!params->horz_flip_en) << ISIF_HSIZE_FLIP_SHIFT;
+
+ /* calculate line offset in 32 bytes based on pack value */
+ if (isif_cfg.data_pack == ISIF_PACK_8BIT)
+ val |= ((params->win.width + 31) >> 5);
+ else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
+ val |= (((params->win.width +
+ (params->win.width >> 2)) + 31) >> 5);
+ else
+ val |= (((params->win.width * 2) + 31) >> 5);
+ regw(val, HSIZE);
+
+ /* Configure SDOFST register */
+ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (params->image_invert_en) {
+ /* For interlace inverse mode */
+ regw(0x4B6D, SDOFST);
+ dev_dbg(isif_cfg.dev, "Writing 0x4B6D to SDOFST...\n");
+ } else {
+ /* For interlace non inverse mode */
+ regw(0x0B6D, SDOFST);
+ dev_dbg(isif_cfg.dev, "Writing 0x0B6D to SDOFST...\n");
+ }
+ } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ if (params->image_invert_en) {
+ /* For progressive inverse mode */
+ regw(0x4000, SDOFST);
+ dev_dbg(isif_cfg.dev, "Writing 0x4000 to SDOFST...\n");
+ } else {
+ /* For progressive non inverse mode */
+ regw(0x0000, SDOFST);
+ dev_dbg(isif_cfg.dev, "Writing 0x0000 to SDOFST...\n");
+ }
+ }
+
+ /* Configure video window */
+ isif_setwin(&params->win, params->frm_fmt, 1);
+
+ /* Configure Black Clamp */
+ isif_config_bclamp(&module_params->bclamp);
+
+ /* Configure Vertical Defection Pixel Correction */
+ if (isif_config_dfc(&module_params->dfc) < 0)
+ return -EFAULT;
+
+ if (!module_params->df_csc.df_or_csc)
+ /* Configure Color Space Conversion */
+ isif_config_csc(&module_params->df_csc);
+
+ isif_config_linearization(&module_params->linearize);
+
+ /* Configure Culling */
+ isif_config_culling(&module_params->culling);
+
+ /* Configure horizontal and vertical offsets(DFC,LSC,Gain) */
+ regw(module_params->horz_offset, DATAHOFST);
+ regw(module_params->vert_offset, DATAVOFST);
+
+ /* Setup test pattern if enabled */
+ if (params->config_params.test_pat_gen) {
+ /* Use the HD/VD pol settings from user */
+ sync.ccdpg_hdpol = params->hd_pol;
+ sync.ccdpg_vdpol = params->vd_pol;
+ dm365_vpss_set_sync_pol(sync);
+ frame_size.hlpfr = isif_cfg.bayer.win.width;
+ frame_size.pplen = isif_cfg.bayer.win.height;
+ dm365_vpss_set_pg_frame_size(frame_size);
+ vpss_select_ccdc_source(VPSS_PGLPBK);
+ }
+
+ dev_dbg(isif_cfg.dev, "\nEnd of isif_config_ycbcr...\n");
+ return 0;
+}
+
+static int isif_set_buftype(enum ccdc_buftype buf_type)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ isif_cfg.bayer.buf_type = buf_type;
+ else
+ isif_cfg.ycbcr.buf_type = buf_type;
+
+ return 0;
+
+}
+static enum ccdc_buftype isif_get_buftype(void)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ return isif_cfg.bayer.buf_type;
+
+ return isif_cfg.ycbcr.buf_type;
+}
+
+static int isif_enum_pix(u32 *pix, int i)
+{
+ int ret = -EINVAL;
+
+ if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+ if (i < ARRAY_SIZE(isif_raw_bayer_pix_formats)) {
+ *pix = isif_raw_bayer_pix_formats[i];
+ ret = 0;
+ }
+ } else {
+ if (i < ARRAY_SIZE(isif_raw_yuv_pix_formats)) {
+ *pix = isif_raw_yuv_pix_formats[i];
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int isif_set_pixel_format(unsigned int pixfmt)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+ if (pixfmt == V4L2_PIX_FMT_SBGGR8) {
+ if ((isif_cfg.bayer.config_params.compress.alg !=
+ ISIF_ALAW) &&
+ (isif_cfg.bayer.config_params.compress.alg !=
+ ISIF_DPCM)) {
+ dev_dbg(isif_cfg.dev,
+ "Either configure A-Law or DPCM\n");
+ return -EINVAL;
+ }
+ isif_cfg.data_pack = ISIF_PACK_8BIT;
+ } else if (pixfmt == V4L2_PIX_FMT_SBGGR16) {
+ isif_cfg.bayer.config_params.compress.alg =
+ ISIF_NO_COMPRESSION;
+ isif_cfg.data_pack = ISIF_PACK_16BIT;
+ } else
+ return -EINVAL;
+ isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ } else {
+ if (pixfmt == V4L2_PIX_FMT_YUYV)
+ isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+ else if (pixfmt == V4L2_PIX_FMT_UYVY)
+ isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ else
+ return -EINVAL;
+ isif_cfg.data_pack = ISIF_PACK_8BIT;
+ }
+ return 0;
+}
+
+static u32 isif_get_pixel_format(void)
+{
+ u32 pixfmt;
+
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ if (isif_cfg.bayer.config_params.compress.alg == ISIF_ALAW ||
+ isif_cfg.bayer.config_params.compress.alg == ISIF_DPCM)
+ pixfmt = V4L2_PIX_FMT_SBGGR8;
+ else
+ pixfmt = V4L2_PIX_FMT_SBGGR16;
+ else {
+ if (isif_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ else
+ pixfmt = V4L2_PIX_FMT_UYVY;
+ }
+ return pixfmt;
+}
+
+static int isif_set_image_window(struct v4l2_rect *win)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+ isif_cfg.bayer.win.top = win->top;
+ isif_cfg.bayer.win.left = win->left;
+ isif_cfg.bayer.win.width = win->width;
+ isif_cfg.bayer.win.height = win->height;
+ } else {
+ isif_cfg.ycbcr.win.top = win->top;
+ isif_cfg.ycbcr.win.left = win->left;
+ isif_cfg.ycbcr.win.width = win->width;
+ isif_cfg.ycbcr.win.height = win->height;
+ }
+ return 0;
+}
+
+static void isif_get_image_window(struct v4l2_rect *win)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ *win = isif_cfg.bayer.win;
+ else
+ *win = isif_cfg.ycbcr.win;
+}
+
+static unsigned int isif_get_line_length(void)
+{
+ unsigned int len;
+
+ if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+ if (isif_cfg.data_pack == ISIF_PACK_8BIT)
+ len = ((isif_cfg.bayer.win.width));
+ else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
+ len = (((isif_cfg.bayer.win.width * 2) +
+ (isif_cfg.bayer.win.width >> 2)));
+ else
+ len = (((isif_cfg.bayer.win.width * 2)));
+ } else
+ len = (((isif_cfg.ycbcr.win.width * 2)));
+ return ALIGN(len, 32);
+}
+
+static int isif_set_frame_format(enum ccdc_frmfmt frm_fmt)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ isif_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ isif_cfg.ycbcr.frm_fmt = frm_fmt;
+ return 0;
+}
+static enum ccdc_frmfmt isif_get_frame_format(void)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ return isif_cfg.bayer.frm_fmt;
+ return isif_cfg.ycbcr.frm_fmt;
+}
+
+static int isif_getfid(void)
+{
+ return (regr(MODESET) >> 15) & 0x1;
+}
+
+/* misc operations */
+static void isif_setfbaddr(unsigned long addr)
+{
+ regw((addr >> 21) & 0x07ff, CADU);
+ regw((addr >> 5) & 0x0ffff, CADL);
+}
+
+static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
+{
+ isif_cfg.if_type = params->if_type;
+
+ switch (params->if_type) {
+ case VPFE_BT656:
+ case VPFE_BT656_10BIT:
+ case VPFE_YCBCR_SYNC_8:
+ isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
+ isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ break;
+ case VPFE_BT1120:
+ case VPFE_YCBCR_SYNC_16:
+ isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_16BIT;
+ isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ break;
+ case VPFE_RAW_BAYER:
+ isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ break;
+ default:
+ dev_dbg(isif_cfg.dev, "Invalid interface type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This function will configure ISIF for YCbCr parameters. */
+static int isif_config_ycbcr(void)
+{
+ struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
+ struct vpss_pg_frame_size frame_size;
+ u32 modeset = 0, ccdcfg = 0;
+ struct vpss_sync_pol sync;
+
+ dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
+
+ /* configure pixel format or input mode */
+ modeset = modeset | (params->pix_fmt << ISIF_INPUT_SHIFT) |
+ (params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
+ (params->fid_pol << ISIF_FID_POL_SHIFT) |
+ (params->hd_pol << ISIF_HD_POL_SHIFT) |
+ (params->vd_pol << ISIF_VD_POL_SHIFT);
+
+ /* pack the data to 8-bit ISIFCFG */
+ switch (isif_cfg.if_type) {
+ case VPFE_BT656:
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ modeset |= (VPFE_PINPOL_NEGATIVE << ISIF_VD_POL_SHIFT);
+ regw(3, REC656IF);
+ ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR;
+ break;
+ case VPFE_BT656_10BIT:
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ /* setup BT.656, embedded sync */
+ regw(3, REC656IF);
+ /* enable 10 bit mode in ccdcfg */
+ ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR |
+ ISIF_BW656_ENABLE;
+ break;
+ case VPFE_BT1120:
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ regw(3, REC656IF);
+ break;
+
+ case VPFE_YCBCR_SYNC_8:
+ ccdcfg |= ISIF_DATA_PACK8;
+ ccdcfg |= ISIF_YCINSWP_YCBCR;
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ break;
+ case VPFE_YCBCR_SYNC_16:
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* should never come here */
+ dev_dbg(isif_cfg.dev, "Invalid interface type\n");
+ return -EINVAL;
+ }
+
+ regw(modeset, MODESET);
+
+ /* Set up pix order */
+ ccdcfg |= params->pix_order << ISIF_PIX_ORDER_SHIFT;
+
+ regw(ccdcfg, CCDCFG);
+
+ /* configure video window */
+ if ((isif_cfg.if_type == VPFE_BT1120) ||
+ (isif_cfg.if_type == VPFE_YCBCR_SYNC_16))
+ isif_setwin(&params->win, params->frm_fmt, 1);
+ else
+ isif_setwin(&params->win, params->frm_fmt, 2);
+
+ /*
+ * configure the horizontal line offset
+ * this is done by rounding up width to a multiple of 16 pixels
+ * and multiply by two to account for y:cb:cr 4:2:2 data
+ */
+ regw(((((params->win.width * 2) + 31) & 0xffffffe0) >> 5), HSIZE);
+
+ /* configure the memory line offset */
+ if ((params->frm_fmt == CCDC_FRMFMT_INTERLACED) &&
+ (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED))
+ /* two fields are interleaved in memory */
+ regw(0x00000249, SDOFST);
+
+ /* Setup test pattern if enabled */
+ if (isif_cfg.bayer.config_params.test_pat_gen) {
+ sync.ccdpg_hdpol = params->hd_pol;
+ sync.ccdpg_vdpol = params->vd_pol;
+ dm365_vpss_set_sync_pol(sync);
+ dm365_vpss_set_pg_frame_size(frame_size);
+ }
+ return 0;
+}
+
+static int isif_configure(void)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ return isif_config_raw();
+ return isif_config_ycbcr();
+}
+
+static int isif_close(struct device *device)
+{
+ /* copy defaults to module params */
+ isif_cfg.bayer.config_params = isif_config_defaults;
+ return 0;
+}
+
+static struct ccdc_hw_device isif_hw_dev = {
+ .name = "ISIF",
+ .owner = THIS_MODULE,
+ .hw_ops = {
+ .open = isif_open,
+ .close = isif_close,
+ .enable = isif_enable,
+ .enable_out_to_sdram = isif_enable_output_to_sdram,
+ .set_hw_if_params = isif_set_hw_if_params,
+ .configure = isif_configure,
+ .set_buftype = isif_set_buftype,
+ .get_buftype = isif_get_buftype,
+ .enum_pix = isif_enum_pix,
+ .set_pixel_format = isif_set_pixel_format,
+ .get_pixel_format = isif_get_pixel_format,
+ .set_frame_format = isif_set_frame_format,
+ .get_frame_format = isif_get_frame_format,
+ .set_image_window = isif_set_image_window,
+ .get_image_window = isif_get_image_window,
+ .get_line_length = isif_get_line_length,
+ .setfbaddr = isif_setfbaddr,
+ .getfid = isif_getfid,
+ },
+};
+
+static int isif_probe(struct platform_device *pdev)
+{
+ void (*setup_pinmux)(void);
+ struct resource *res;
+ void *__iomem addr;
+ int status = 0, i;
+
+ /* Platform data holds setup_pinmux function ptr */
+ if (!pdev->dev.platform_data)
+ return -ENODEV;
+
+ /*
+ * first try to register with vpfe. If not correct platform, then we
+ * don't have to iomap
+ */
+ status = vpfe_register_ccdc_device(&isif_hw_dev);
+ if (status < 0)
+ return status;
+
+ setup_pinmux = pdev->dev.platform_data;
+ /*
+ * setup Mux configuration for ccdc which may be different for
+ * different SoCs using this CCDC
+ */
+ setup_pinmux();
+
+ i = 0;
+ /* Get the ISIF base address, linearization table0 and table1 addr. */
+ while (i < 3) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res) {
+ status = -ENODEV;
+ goto fail_nobase_res;
+ }
+ res = request_mem_region(res->start, resource_size(res),
+ res->name);
+ if (!res) {
+ status = -EBUSY;
+ goto fail_nobase_res;
+ }
+ addr = ioremap_nocache(res->start, resource_size(res));
+ if (!addr) {
+ status = -ENOMEM;
+ goto fail_base_iomap;
+ }
+ switch (i) {
+ case 0:
+ /* ISIF base address */
+ isif_cfg.base_addr = addr;
+ break;
+ case 1:
+ /* ISIF linear tbl0 address */
+ isif_cfg.linear_tbl0_addr = addr;
+ break;
+ default:
+ /* ISIF linear tbl0 address */
+ isif_cfg.linear_tbl1_addr = addr;
+ break;
+ }
+ i++;
+ }
+ isif_cfg.dev = &pdev->dev;
+
+ printk(KERN_NOTICE "%s is registered with vpfe.\n",
+ isif_hw_dev.name);
+ return 0;
+fail_base_iomap:
+ release_mem_region(res->start, resource_size(res));
+ i--;
+fail_nobase_res:
+ if (isif_cfg.base_addr)
+ iounmap(isif_cfg.base_addr);
+ if (isif_cfg.linear_tbl0_addr)
+ iounmap(isif_cfg.linear_tbl0_addr);
+
+ while (i >= 0) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ release_mem_region(res->start, resource_size(res));
+ i--;
+ }
+ vpfe_unregister_ccdc_device(&isif_hw_dev);
+ return status;
+}
+
+static int isif_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ int i = 0;
+
+ iounmap(isif_cfg.base_addr);
+ iounmap(isif_cfg.linear_tbl0_addr);
+ iounmap(isif_cfg.linear_tbl1_addr);
+ while (i < 3) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ i++;
+ }
+ vpfe_unregister_ccdc_device(&isif_hw_dev);
+ return 0;
+}
+
+static struct platform_driver isif_driver = {
+ .driver = {
+ .name = "isif",
+ .owner = THIS_MODULE,
+ },
+ .remove = isif_remove,
+ .probe = isif_probe,
+};
+
+module_platform_driver(isif_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/davinci/isif_regs.h b/drivers/media/platform/davinci/isif_regs.h
new file mode 100644
index 00000000000..3993aece821
--- /dev/null
+++ b/drivers/media/platform/davinci/isif_regs.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ISIF_REGS_H
+#define _ISIF_REGS_H
+
+/* ISIF registers relative offsets */
+#define SYNCEN 0x00
+#define MODESET 0x04
+#define HDW 0x08
+#define VDW 0x0c
+#define PPLN 0x10
+#define LPFR 0x14
+#define SPH 0x18
+#define LNH 0x1c
+#define SLV0 0x20
+#define SLV1 0x24
+#define LNV 0x28
+#define CULH 0x2c
+#define CULV 0x30
+#define HSIZE 0x34
+#define SDOFST 0x38
+#define CADU 0x3c
+#define CADL 0x40
+#define LINCFG0 0x44
+#define LINCFG1 0x48
+#define CCOLP 0x4c
+#define CRGAIN 0x50
+#define CGRGAIN 0x54
+#define CGBGAIN 0x58
+#define CBGAIN 0x5c
+#define COFSTA 0x60
+#define FLSHCFG0 0x64
+#define FLSHCFG1 0x68
+#define FLSHCFG2 0x6c
+#define VDINT0 0x70
+#define VDINT1 0x74
+#define VDINT2 0x78
+#define MISC 0x7c
+#define CGAMMAWD 0x80
+#define REC656IF 0x84
+#define CCDCFG 0x88
+/*****************************************************
+* Defect Correction registers
+*****************************************************/
+#define DFCCTL 0x8c
+#define VDFSATLV 0x90
+#define DFCMEMCTL 0x94
+#define DFCMEM0 0x98
+#define DFCMEM1 0x9c
+#define DFCMEM2 0xa0
+#define DFCMEM3 0xa4
+#define DFCMEM4 0xa8
+/****************************************************
+* Black Clamp registers
+****************************************************/
+#define CLAMPCFG 0xac
+#define CLDCOFST 0xb0
+#define CLSV 0xb4
+#define CLHWIN0 0xb8
+#define CLHWIN1 0xbc
+#define CLHWIN2 0xc0
+#define CLVRV 0xc4
+#define CLVWIN0 0xc8
+#define CLVWIN1 0xcc
+#define CLVWIN2 0xd0
+#define CLVWIN3 0xd4
+/****************************************************
+* Lense Shading Correction
+****************************************************/
+#define DATAHOFST 0xd8
+#define DATAVOFST 0xdc
+#define LSCHVAL 0xe0
+#define LSCVVAL 0xe4
+#define TWODLSCCFG 0xe8
+#define TWODLSCOFST 0xec
+#define TWODLSCINI 0xf0
+#define TWODLSCGRBU 0xf4
+#define TWODLSCGRBL 0xf8
+#define TWODLSCGROF 0xfc
+#define TWODLSCORBU 0x100
+#define TWODLSCORBL 0x104
+#define TWODLSCOROF 0x108
+#define TWODLSCIRQEN 0x10c
+#define TWODLSCIRQST 0x110
+/****************************************************
+* Data formatter
+****************************************************/
+#define FMTCFG 0x114
+#define FMTPLEN 0x118
+#define FMTSPH 0x11c
+#define FMTLNH 0x120
+#define FMTSLV 0x124
+#define FMTLNV 0x128
+#define FMTRLEN 0x12c
+#define FMTHCNT 0x130
+#define FMTAPTR_BASE 0x134
+/* Below macro for addresses FMTAPTR0 - FMTAPTR15 */
+#define FMTAPTR(i) (FMTAPTR_BASE + (i * 4))
+#define FMTPGMVF0 0x174
+#define FMTPGMVF1 0x178
+#define FMTPGMAPU0 0x17c
+#define FMTPGMAPU1 0x180
+#define FMTPGMAPS0 0x184
+#define FMTPGMAPS1 0x188
+#define FMTPGMAPS2 0x18c
+#define FMTPGMAPS3 0x190
+#define FMTPGMAPS4 0x194
+#define FMTPGMAPS5 0x198
+#define FMTPGMAPS6 0x19c
+#define FMTPGMAPS7 0x1a0
+/************************************************
+* Color Space Converter
+************************************************/
+#define CSCCTL 0x1a4
+#define CSCM0 0x1a8
+#define CSCM1 0x1ac
+#define CSCM2 0x1b0
+#define CSCM3 0x1b4
+#define CSCM4 0x1b8
+#define CSCM5 0x1bc
+#define CSCM6 0x1c0
+#define CSCM7 0x1c4
+#define OBWIN0 0x1c8
+#define OBWIN1 0x1cc
+#define OBWIN2 0x1d0
+#define OBWIN3 0x1d4
+#define OBVAL0 0x1d8
+#define OBVAL1 0x1dc
+#define OBVAL2 0x1e0
+#define OBVAL3 0x1e4
+#define OBVAL4 0x1e8
+#define OBVAL5 0x1ec
+#define OBVAL6 0x1f0
+#define OBVAL7 0x1f4
+#define CLKCTL 0x1f8
+
+/* Masks & Shifts below */
+#define START_PX_HOR_MASK 0x7FFF
+#define NUM_PX_HOR_MASK 0x7FFF
+#define START_VER_ONE_MASK 0x7FFF
+#define START_VER_TWO_MASK 0x7FFF
+#define NUM_LINES_VER 0x7FFF
+
+/* gain - offset masks */
+#define GAIN_INTEGER_SHIFT 9
+#define OFFSET_MASK 0xFFF
+#define GAIN_SDRAM_EN_SHIFT 12
+#define GAIN_IPIPE_EN_SHIFT 13
+#define GAIN_H3A_EN_SHIFT 14
+#define OFST_SDRAM_EN_SHIFT 8
+#define OFST_IPIPE_EN_SHIFT 9
+#define OFST_H3A_EN_SHIFT 10
+#define GAIN_OFFSET_EN_MASK 0x7700
+
+/* Culling */
+#define CULL_PAT_EVEN_LINE_SHIFT 8
+
+/* CCDCFG register */
+#define ISIF_YCINSWP_RAW (0x00 << 4)
+#define ISIF_YCINSWP_YCBCR (0x01 << 4)
+#define ISIF_CCDCFG_FIDMD_LATCH_VSYNC (0x00 << 6)
+#define ISIF_CCDCFG_WENLOG_AND (0x00 << 8)
+#define ISIF_CCDCFG_TRGSEL_WEN (0x00 << 9)
+#define ISIF_CCDCFG_EXTRG_DISABLE (0x00 << 10)
+#define ISIF_LATCH_ON_VSYNC_DISABLE (0x01 << 15)
+#define ISIF_LATCH_ON_VSYNC_ENABLE (0x00 << 15)
+#define ISIF_DATA_PACK_MASK 3
+#define ISIF_DATA_PACK16 0
+#define ISIF_DATA_PACK12 1
+#define ISIF_DATA_PACK8 2
+#define ISIF_PIX_ORDER_SHIFT 11
+#define ISIF_BW656_ENABLE (0x01 << 5)
+
+/* MODESET registers */
+#define ISIF_VDHDOUT_INPUT (0x00 << 0)
+#define ISIF_INPUT_SHIFT 12
+#define ISIF_RAW_INPUT_MODE 0
+#define ISIF_FID_POL_SHIFT 4
+#define ISIF_HD_POL_SHIFT 3
+#define ISIF_VD_POL_SHIFT 2
+#define ISIF_DATAPOL_NORMAL 0
+#define ISIF_DATAPOL_SHIFT 6
+#define ISIF_EXWEN_DISABLE 0
+#define ISIF_EXWEN_SHIFT 5
+#define ISIF_FRM_FMT_SHIFT 7
+#define ISIF_DATASFT_SHIFT 8
+#define ISIF_LPF_SHIFT 14
+#define ISIF_LPF_MASK 1
+
+/* GAMMAWD registers */
+#define ISIF_ALAW_GAMMA_WD_MASK 0xF
+#define ISIF_ALAW_GAMMA_WD_SHIFT 1
+#define ISIF_ALAW_ENABLE 1
+#define ISIF_GAMMAWD_CFA_SHIFT 5
+
+/* HSIZE registers */
+#define ISIF_HSIZE_FLIP_MASK 1
+#define ISIF_HSIZE_FLIP_SHIFT 12
+
+/* MISC registers */
+#define ISIF_DPCM_EN_SHIFT 12
+#define ISIF_DPCM_PREDICTOR_SHIFT 13
+
+/* Black clamp related */
+#define ISIF_BC_MODE_COLOR_SHIFT 4
+#define ISIF_HORZ_BC_MODE_SHIFT 1
+#define ISIF_HORZ_BC_WIN_SEL_SHIFT 5
+#define ISIF_HORZ_BC_PIX_LIMIT_SHIFT 6
+#define ISIF_HORZ_BC_WIN_H_SIZE_SHIFT 8
+#define ISIF_HORZ_BC_WIN_V_SIZE_SHIFT 12
+#define ISIF_VERT_BC_RST_VAL_SEL_SHIFT 4
+#define ISIF_VERT_BC_LINE_AVE_COEF_SHIFT 8
+
+/* VDFC registers */
+#define ISIF_VDFC_EN_SHIFT 4
+#define ISIF_VDFC_CORR_MOD_SHIFT 5
+#define ISIF_VDFC_CORR_WHOLE_LN_SHIFT 7
+#define ISIF_VDFC_LEVEL_SHFT_SHIFT 8
+#define ISIF_VDFC_POS_MASK 0x1FFF
+#define ISIF_DFCMEMCTL_DFCMARST_SHIFT 2
+
+/* CSC registers */
+#define ISIF_CSC_COEF_INTEG_MASK 7
+#define ISIF_CSC_COEF_DECIMAL_MASK 0x1f
+#define ISIF_CSC_COEF_INTEG_SHIFT 5
+#define ISIF_CSCM_MSB_SHIFT 8
+#define ISIF_DF_CSC_SPH_MASK 0x1FFF
+#define ISIF_DF_CSC_LNH_MASK 0x1FFF
+#define ISIF_DF_CSC_SLV_MASK 0x1FFF
+#define ISIF_DF_CSC_LNV_MASK 0x1FFF
+#define ISIF_DF_NUMLINES 0x7FFF
+#define ISIF_DF_NUMPIX 0x1FFF
+
+/* Offsets for LSC/DFC/Gain */
+#define ISIF_DATA_H_OFFSET_MASK 0x1FFF
+#define ISIF_DATA_V_OFFSET_MASK 0x1FFF
+
+/* Linearization */
+#define ISIF_LIN_CORRSFT_SHIFT 4
+#define ISIF_LIN_SCALE_FACT_INTEG_SHIFT 10
+
+
+/* Pattern registers */
+#define ISIF_PG_EN (1 << 3)
+#define ISIF_SEL_PG_SRC (3 << 4)
+#define ISIF_PG_VD_POL_SHIFT 0
+#define ISIF_PG_HD_POL_SHIFT 1
+
+/*random other junk*/
+#define ISIF_SYNCEN_VDHDEN_MASK (1 << 0)
+#define ISIF_SYNCEN_WEN_MASK (1 << 1)
+#define ISIF_SYNCEN_WEN_SHIFT 1
+
+#endif
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
new file mode 100644
index 00000000000..33b9660b7f7
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -0,0 +1,872 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpss.h>
+#include <media/davinci/vpbe_venc.h>
+
+#define VPBE_DEFAULT_OUTPUT "Composite"
+#define VPBE_DEFAULT_MODE "ntsc"
+
+static char *def_output = VPBE_DEFAULT_OUTPUT;
+static char *def_mode = VPBE_DEFAULT_MODE;
+static int debug;
+
+module_param(def_output, charp, S_IRUGO);
+module_param(def_mode, charp, S_IRUGO);
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(def_output, "vpbe output name (default:Composite)");
+MODULE_PARM_DESC(def_mode, "vpbe output mode name (default:ntsc");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("TI DMXXX VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/**
+ * vpbe_current_encoder_info - Get config info for current encoder
+ * @vpbe_dev - vpbe device ptr
+ *
+ * Return ptr to current encoder config info
+ */
+static struct encoder_config_info*
+vpbe_current_encoder_info(struct vpbe_device *vpbe_dev)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int index = vpbe_dev->current_sd_index;
+
+ return ((index == 0) ? &cfg->venc :
+ &cfg->ext_encoders[index-1]);
+}
+
+/**
+ * vpbe_find_encoder_sd_index - Given a name find encoder sd index
+ *
+ * @vpbe_config - ptr to vpbe cfg
+ * @output_index - index used by application
+ *
+ * Return sd index of the encoder
+ */
+static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
+ int index)
+{
+ char *encoder_name = cfg->outputs[index].subdev_name;
+ int i;
+
+ /* Venc is always first */
+ if (!strcmp(encoder_name, cfg->venc.module_name))
+ return 0;
+
+ for (i = 0; i < cfg->num_ext_encoders; i++) {
+ if (!strcmp(encoder_name,
+ cfg->ext_encoders[i].module_name))
+ return i+1;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_g_cropcap - Get crop capabilities of the display
+ * @vpbe_dev - vpbe device ptr
+ * @cropcap - cropcap is a ptr to struct v4l2_cropcap
+ *
+ * Update the crop capabilities in crop cap for current
+ * mode
+ */
+static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
+ struct v4l2_cropcap *cropcap)
+{
+ if (NULL == cropcap)
+ return -EINVAL;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.top = 0;
+ cropcap->bounds.width = vpbe_dev->current_timings.xres;
+ cropcap->bounds.height = vpbe_dev->current_timings.yres;
+ cropcap->defrect = cropcap->bounds;
+
+ return 0;
+}
+
+/**
+ * vpbe_enum_outputs - enumerate outputs
+ * @vpbe_dev - vpbe device ptr
+ * @output - ptr to v4l2_output structure
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
+ struct v4l2_output *output)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int temp_index = output->index;
+
+ if (temp_index >= cfg->num_outputs)
+ return -EINVAL;
+
+ *output = cfg->outputs[temp_index].output;
+ output->index = temp_index;
+
+ return 0;
+}
+
+static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
+ int output_index)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = output_index;
+ int i;
+
+ if (NULL == mode)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if (!strcmp(mode, var.name)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
+ struct vpbe_enc_mode_info *mode_info)
+{
+ if (NULL == mode_info)
+ return -EINVAL;
+
+ *mode_info = vpbe_dev->current_timings;
+
+ return 0;
+}
+
+/* Get std by std id */
+static int vpbe_get_std_info(struct vpbe_device *vpbe_dev,
+ v4l2_std_id std_id)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if ((var.timings_type & VPBE_ENC_STD) &&
+ (var.std_id & std_id)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vpbe_get_std_info_by_name(struct vpbe_device *vpbe_dev,
+ char *std_name)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if (!strcmp(var.name, std_name)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_set_output - Set output
+ * @vpbe_dev - vpbe device ptr
+ * @index - index of output
+ *
+ * Set vpbe output to the output specified by the index
+ */
+static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
+{
+ struct encoder_config_info *curr_enc_info =
+ vpbe_current_encoder_info(vpbe_dev);
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct venc_platform_data *venc_device = vpbe_dev->venc_device;
+ enum v4l2_mbus_pixelcode if_params;
+ int enc_out_index;
+ int sd_index;
+ int ret = 0;
+
+ if (index >= cfg->num_outputs)
+ return -EINVAL;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ sd_index = vpbe_dev->current_sd_index;
+ enc_out_index = cfg->outputs[index].output.index;
+ /*
+ * Currently we switch the encoder based on output selected
+ * by the application. If media controller is implemented later
+ * there is will be an API added to setup_link between venc
+ * and external encoder. So in that case below comparison always
+ * match and encoder will not be switched. But if application
+ * chose not to use media controller, then this provides current
+ * way of switching encoder at the venc output.
+ */
+ if (strcmp(curr_enc_info->module_name,
+ cfg->outputs[index].subdev_name)) {
+ /* Need to switch the encoder at the output */
+ sd_index = vpbe_find_encoder_sd_index(cfg, index);
+ if (sd_index < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if_params = cfg->outputs[index].if_params;
+ venc_device->setup_if_config(if_params);
+ if (ret)
+ goto out;
+ }
+
+ /* Set output at the encoder */
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_routing, 0, enc_out_index, 0);
+ if (ret)
+ goto out;
+
+ /*
+ * It is assumed that venc or extenal encoder will set a default
+ * mode in the sub device. For external encoder or LCD pannel output,
+ * we also need to set up the lcd port for the required mode. So setup
+ * the lcd port for the default mode that is configured in the board
+ * arch/arm/mach-davinci/board-dm355-evm.setup file for the external
+ * encoder.
+ */
+ ret = vpbe_get_mode_info(vpbe_dev,
+ cfg->outputs[index].default_mode, index);
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ vpbe_dev->current_sd_index = sd_index;
+ vpbe_dev->current_out_index = index;
+ }
+out:
+ mutex_unlock(&vpbe_dev->lock);
+ return ret;
+}
+
+static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < cfg->num_outputs; i++) {
+ if (!strcmp(def_output,
+ cfg->outputs[i].output.name)) {
+ ret = vpbe_set_output(vpbe_dev, i);
+ if (!ret)
+ vpbe_dev->current_out_index = i;
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * vpbe_get_output - Get output
+ * @vpbe_dev - vpbe device ptr
+ *
+ * return current vpbe output to the the index
+ */
+static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
+{
+ return vpbe_dev->current_out_index;
+}
+
+/**
+ * vpbe_s_dv_timings - Set the given preset timings in the encoder
+ *
+ * Sets the timings if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ struct vpbe_output *output = &cfg->outputs[out_index];
+ int sd_index = vpbe_dev->current_sd_index;
+ int ret, i;
+
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_DV_TIMINGS))
+ return -EINVAL;
+
+ for (i = 0; i < output->num_modes; i++) {
+ if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS &&
+ !memcmp(&output->modes[i].dv_timings,
+ dv_timings, sizeof(*dv_timings)))
+ break;
+ }
+ if (i >= output->num_modes)
+ return -EINVAL;
+ vpbe_dev->current_timings = output->modes[i];
+ mutex_lock(&vpbe_dev->lock);
+
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_dv_timings, dv_timings);
+ if (!ret && (vpbe_dev->amp != NULL)) {
+ /* Call amplifier subdevice */
+ ret = v4l2_subdev_call(vpbe_dev->amp, video,
+ s_dv_timings, dv_timings);
+ }
+ /* set the lcd controller output for the given mode */
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ }
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+/**
+ * vpbe_g_dv_timings - Get the timings in the current encoder
+ *
+ * Get the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_dv_timings(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_timings *dv_timings)
+{
+ if (vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_DV_TIMINGS) {
+ *dv_timings = vpbe_dev->current_timings.dv_timings;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_enum_dv_timings - Enumerate the dv timings in the current encoder
+ *
+ * Get the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_enum_dv_timings(struct vpbe_device *vpbe_dev,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ struct vpbe_output *output = &cfg->outputs[out_index];
+ int j = 0;
+ int i;
+
+ if (!(output->output.capabilities & V4L2_OUT_CAP_DV_TIMINGS))
+ return -EINVAL;
+
+ for (i = 0; i < output->num_modes; i++) {
+ if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS) {
+ if (j == timings->index)
+ break;
+ j++;
+ }
+ }
+
+ if (i == output->num_modes)
+ return -EINVAL;
+ timings->timings = output->modes[i].dv_timings;
+ return 0;
+}
+
+/**
+ * vpbe_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id std_id)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ int sd_index = vpbe_dev->current_sd_index;
+ int ret;
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_STD))
+ return -EINVAL;
+
+ ret = vpbe_get_std_info(vpbe_dev, std_id);
+ if (ret)
+ return ret;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_std_output, std_id);
+ /* set the lcd controller output for the given mode */
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ }
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+/**
+ * vpbe_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
+{
+ struct vpbe_enc_mode_info *cur_timings = &vpbe_dev->current_timings;
+
+ if (cur_timings->timings_type & VPBE_ENC_STD) {
+ *std_id = cur_timings->std_id;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_set_mode - Set mode in the current encoder using mode info
+ *
+ * Use the mode string to decide what timings to set in the encoder
+ * This is typically useful when fbset command is used to change the current
+ * timings by specifying a string to indicate the timings.
+ */
+static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
+ struct vpbe_enc_mode_info *mode_info)
+{
+ struct vpbe_enc_mode_info *preset_mode = NULL;
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct v4l2_dv_timings dv_timings;
+ struct osd_state *osd_device;
+ int out_index = vpbe_dev->current_out_index;
+ int ret = 0;
+ int i;
+
+ if ((NULL == mode_info) || (NULL == mode_info->name))
+ return -EINVAL;
+
+ for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
+ if (!strcmp(mode_info->name,
+ cfg->outputs[out_index].modes[i].name)) {
+ preset_mode = &cfg->outputs[out_index].modes[i];
+ /*
+ * it may be one of the 3 timings type. Check and
+ * invoke right API
+ */
+ if (preset_mode->timings_type & VPBE_ENC_STD)
+ return vpbe_s_std(vpbe_dev,
+ preset_mode->std_id);
+ if (preset_mode->timings_type &
+ VPBE_ENC_DV_TIMINGS) {
+ dv_timings =
+ preset_mode->dv_timings;
+ return vpbe_s_dv_timings(vpbe_dev, &dv_timings);
+ }
+ }
+ }
+
+ /* Only custom timing should reach here */
+ if (preset_mode == NULL)
+ return -EINVAL;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ osd_device = vpbe_dev->osd_device;
+ vpbe_dev->current_timings = *preset_mode;
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
+{
+ int ret;
+
+ ret = vpbe_get_std_info_by_name(vpbe_dev, def_mode);
+ if (ret)
+ return ret;
+
+ /* set the default mode in the encoder */
+ return vpbe_set_mode(vpbe_dev, &vpbe_dev->current_timings);
+}
+
+static int platform_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpbe_device *vpbe_dev = data;
+
+ if (strstr(pdev->name, "vpbe-osd") != NULL)
+ vpbe_dev->osd_device = platform_get_drvdata(pdev);
+ if (strstr(pdev->name, "vpbe-venc") != NULL)
+ vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
+
+ return 0;
+}
+
+/**
+ * vpbe_initialize() - Initialize the vpbe display controller
+ * @vpbe_dev - vpbe device ptr
+ *
+ * Master frame buffer device drivers calls this to initialize vpbe
+ * display controller. This will then registers v4l2 device and the sub
+ * devices and sets a current encoder sub device for display. v4l2 display
+ * device driver is the master and frame buffer display device driver is
+ * the slave. Frame buffer display driver checks the initialized during
+ * probe and exit if not initialized. Returns status.
+ */
+static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+ struct encoder_config_info *enc_info;
+ struct amp_config_info *amp_info;
+ struct v4l2_subdev **enc_subdev;
+ struct osd_state *osd_device;
+ struct i2c_adapter *i2c_adap;
+ int num_encoders;
+ int ret = 0;
+ int err;
+ int i;
+
+ /*
+ * v4l2 abd FBDev frame buffer devices will get the vpbe_dev pointer
+ * from the platform device by iteration of platform drivers and
+ * matching with device name
+ */
+ if (NULL == vpbe_dev || NULL == dev) {
+ printk(KERN_ERR "Null device pointers.\n");
+ return -ENODEV;
+ }
+
+ if (vpbe_dev->initialized)
+ return 0;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ /* We have dac clock available for platform */
+ vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac");
+ if (IS_ERR(vpbe_dev->dac_clk)) {
+ ret = PTR_ERR(vpbe_dev->dac_clk);
+ goto fail_mutex_unlock;
+ }
+ if (clk_prepare_enable(vpbe_dev->dac_clk)) {
+ ret = -ENODEV;
+ goto fail_mutex_unlock;
+ }
+ }
+
+ /* first enable vpss clocks */
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
+
+ /* First register a v4l2 device */
+ ret = v4l2_device_register(dev, &vpbe_dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(dev->driver,
+ "Unable to register v4l2 device.\n");
+ goto fail_clk_put;
+ }
+ v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n");
+
+ err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev,
+ platform_device_get);
+ if (err < 0) {
+ ret = err;
+ goto fail_dev_unregister;
+ }
+
+ vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
+ vpbe_dev->cfg->venc.module_name);
+ /* register venc sub device */
+ if (vpbe_dev->venc == NULL) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "vpbe unable to init venc sub device\n");
+ ret = -ENODEV;
+ goto fail_dev_unregister;
+ }
+ /* initialize osd device */
+ osd_device = vpbe_dev->osd_device;
+
+ if (NULL != osd_device->ops.initialize) {
+ err = osd_device->ops.initialize(osd_device);
+ if (err) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to initialize the OSD device");
+ err = -ENOMEM;
+ goto fail_dev_unregister;
+ }
+ }
+
+ /*
+ * Register any external encoders that are configured. At index 0 we
+ * store venc sd index.
+ */
+ num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
+ vpbe_dev->encoders = kmalloc(
+ sizeof(struct v4l2_subdev *)*num_encoders,
+ GFP_KERNEL);
+ if (NULL == vpbe_dev->encoders) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to allocate memory for encoders sub devices");
+ ret = -ENOMEM;
+ goto fail_dev_unregister;
+ }
+
+ i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id);
+ for (i = 0; i < (vpbe_dev->cfg->num_ext_encoders + 1); i++) {
+ if (i == 0) {
+ /* venc is at index 0 */
+ enc_subdev = &vpbe_dev->encoders[i];
+ *enc_subdev = vpbe_dev->venc;
+ continue;
+ }
+ enc_info = &vpbe_dev->cfg->ext_encoders[i];
+ if (enc_info->is_i2c) {
+ enc_subdev = &vpbe_dev->encoders[i];
+ *enc_subdev = v4l2_i2c_new_subdev_board(
+ &vpbe_dev->v4l2_dev, i2c_adap,
+ &enc_info->board_info, NULL);
+ if (*enc_subdev)
+ v4l2_info(&vpbe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ enc_info->module_name);
+ else {
+ v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s"
+ " failed to register",
+ enc_info->module_name);
+ ret = -ENODEV;
+ goto fail_kfree_encoders;
+ }
+ } else
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
+ " currently not supported");
+ }
+ /* Add amplifier subdevice for dm365 */
+ if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) &&
+ vpbe_dev->cfg->amp != NULL) {
+ amp_info = vpbe_dev->cfg->amp;
+ if (amp_info->is_i2c) {
+ vpbe_dev->amp = v4l2_i2c_new_subdev_board(
+ &vpbe_dev->v4l2_dev, i2c_adap,
+ &amp_info->board_info, NULL);
+ if (!vpbe_dev->amp) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "amplifier %s failed to register",
+ amp_info->module_name);
+ ret = -ENODEV;
+ goto fail_kfree_encoders;
+ }
+ v4l2_info(&vpbe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ amp_info->module_name);
+ } else {
+ vpbe_dev->amp = NULL;
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers"
+ " currently not supported");
+ }
+ } else {
+ vpbe_dev->amp = NULL;
+ }
+
+ /* set the current encoder and output to that of venc by default */
+ vpbe_dev->current_sd_index = 0;
+ vpbe_dev->current_out_index = 0;
+
+ mutex_unlock(&vpbe_dev->lock);
+
+ printk(KERN_NOTICE "Setting default output to %s\n", def_output);
+ ret = vpbe_set_default_output(vpbe_dev);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
+ def_output);
+ return ret;
+ }
+
+ printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
+ ret = vpbe_set_default_mode(vpbe_dev);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
+ def_mode);
+ return ret;
+ }
+ vpbe_dev->initialized = 1;
+ /* TBD handling of bootargs for default output and mode */
+ return 0;
+
+fail_kfree_encoders:
+ kfree(vpbe_dev->encoders);
+fail_dev_unregister:
+ v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+fail_clk_put:
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
+ clk_put(vpbe_dev->dac_clk);
+ }
+fail_mutex_unlock:
+ mutex_unlock(&vpbe_dev->lock);
+ return ret;
+}
+
+/**
+ * vpbe_deinitialize() - de-initialize the vpbe display controller
+ * @dev - Master and slave device ptr
+ *
+ * vpbe_master and slave frame buffer devices calls this to de-initialize
+ * the display controller. It is called when master and slave device
+ * driver modules are removed and no longer requires the display controller.
+ */
+static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+ v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
+ clk_put(vpbe_dev->dac_clk);
+ }
+
+ kfree(vpbe_dev->amp);
+ kfree(vpbe_dev->encoders);
+ vpbe_dev->initialized = 0;
+ /* disable vpss clocks */
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 0);
+}
+
+static struct vpbe_device_ops vpbe_dev_ops = {
+ .g_cropcap = vpbe_g_cropcap,
+ .enum_outputs = vpbe_enum_outputs,
+ .set_output = vpbe_set_output,
+ .get_output = vpbe_get_output,
+ .s_dv_timings = vpbe_s_dv_timings,
+ .g_dv_timings = vpbe_g_dv_timings,
+ .enum_dv_timings = vpbe_enum_dv_timings,
+ .s_std = vpbe_s_std,
+ .g_std = vpbe_g_std,
+ .initialize = vpbe_initialize,
+ .deinitialize = vpbe_deinitialize,
+ .get_mode_info = vpbe_get_current_mode_info,
+ .set_mode = vpbe_set_mode,
+};
+
+static int vpbe_probe(struct platform_device *pdev)
+{
+ struct vpbe_device *vpbe_dev;
+ struct vpbe_config *cfg;
+ int ret = -EINVAL;
+
+ if (pdev->dev.platform_data == NULL) {
+ v4l2_err(pdev->dev.driver, "No platform data\n");
+ return -ENODEV;
+ }
+ cfg = pdev->dev.platform_data;
+
+ if (!cfg->module_name[0] ||
+ !cfg->osd.module_name[0] ||
+ !cfg->venc.module_name[0]) {
+ v4l2_err(pdev->dev.driver, "vpbe display module names not"
+ " defined\n");
+ return ret;
+ }
+
+ vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
+ if (vpbe_dev == NULL) {
+ v4l2_err(pdev->dev.driver, "Unable to allocate memory"
+ " for vpbe_device\n");
+ return -ENOMEM;
+ }
+ vpbe_dev->cfg = cfg;
+ vpbe_dev->ops = vpbe_dev_ops;
+ vpbe_dev->pdev = &pdev->dev;
+
+ if (cfg->outputs->num_modes > 0)
+ vpbe_dev->current_timings = vpbe_dev->cfg->outputs[0].modes[0];
+ else {
+ kfree(vpbe_dev);
+ return -ENODEV;
+ }
+
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpbe_dev);
+ mutex_init(&vpbe_dev->lock);
+
+ return 0;
+}
+
+static int vpbe_remove(struct platform_device *device)
+{
+ struct vpbe_device *vpbe_dev = platform_get_drvdata(device);
+
+ kfree(vpbe_dev);
+
+ return 0;
+}
+
+static struct platform_driver vpbe_driver = {
+ .driver = {
+ .name = "vpbe_controller",
+ .owner = THIS_MODULE,
+ },
+ .probe = vpbe_probe,
+ .remove = vpbe_remove,
+};
+
+module_platform_driver(vpbe_driver);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
new file mode 100644
index 00000000000..bf5eff99452
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -0,0 +1,1858 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <mach/cputype.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_display.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpbe_osd.h>
+#include "vpbe_venc_regs.h"
+
+#define VPBE_DISPLAY_DRIVER "vpbe-v4l2"
+
+static int debug;
+
+#define VPBE_DEFAULT_NUM_BUFS 3
+
+module_param(debug, int, 0644);
+
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer);
+
+static int venc_is_second_field(struct vpbe_display *disp_dev)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int ret;
+ int val;
+
+ ret = v4l2_subdev_call(vpbe_dev->venc,
+ core,
+ ioctl,
+ VENC_GET_FLD,
+ &val);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in getting Field ID 0\n");
+ }
+ return val;
+}
+
+static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
+ struct vpbe_layer *layer)
+{
+ struct timespec timevalue;
+
+ if (layer->cur_frm == layer->next_frm)
+ return;
+ ktime_get_ts(&timevalue);
+ layer->cur_frm->vb.v4l2_buf.timestamp.tv_sec =
+ timevalue.tv_sec;
+ layer->cur_frm->vb.v4l2_buf.timestamp.tv_usec =
+ timevalue.tv_nsec / NSEC_PER_USEC;
+ vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_DONE);
+ /* Make cur_frm pointing to next_frm */
+ layer->cur_frm = layer->next_frm;
+}
+
+static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
+ struct vpbe_layer *layer)
+{
+ struct osd_state *osd_device = disp_obj->osd_device;
+ unsigned long addr;
+
+ spin_lock(&disp_obj->dma_queue_lock);
+ if (list_empty(&layer->dma_queue) ||
+ (layer->cur_frm != layer->next_frm)) {
+ spin_unlock(&disp_obj->dma_queue_lock);
+ return;
+ }
+ /*
+ * one field is displayed configure
+ * the next frame if it is available
+ * otherwise hold on current frame
+ * Get next from the buffer queue
+ */
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ /* Remove that from the buffer queue */
+ list_del(&layer->next_frm->list);
+ spin_unlock(&disp_obj->dma_queue_lock);
+ /* Mark state of the frame to active */
+ layer->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb, 0);
+ osd_device->ops.start_layer(osd_device,
+ layer->layer_info.id,
+ addr,
+ disp_obj->cbcr_ofst);
+}
+
+/* interrupt service routine */
+static irqreturn_t venc_isr(int irq, void *arg)
+{
+ struct vpbe_display *disp_dev = (struct vpbe_display *)arg;
+ struct vpbe_layer *layer;
+ static unsigned last_event;
+ unsigned event = 0;
+ int fid;
+ int i;
+
+ if ((NULL == arg) || (NULL == disp_dev->dev[0]))
+ return IRQ_HANDLED;
+
+ if (venc_is_second_field(disp_dev))
+ event |= VENC_SECOND_FIELD;
+ else
+ event |= VENC_FIRST_FIELD;
+
+ if (event == (last_event & ~VENC_END_OF_FRAME)) {
+ /*
+ * If the display is non-interlaced, then we need to flag the
+ * end-of-frame event at every interrupt regardless of the
+ * value of the FIDST bit. We can conclude that the display is
+ * non-interlaced if the value of the FIDST bit is unchanged
+ * from the previous interrupt.
+ */
+ event |= VENC_END_OF_FRAME;
+ } else if (event == VENC_SECOND_FIELD) {
+ /* end-of-frame for interlaced display */
+ event |= VENC_END_OF_FRAME;
+ }
+ last_event = event;
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ layer = disp_dev->dev[i];
+ /* If streaming is started in this layer */
+ if (!layer->started)
+ continue;
+
+ if (layer->layer_first_int) {
+ layer->layer_first_int = 0;
+ continue;
+ }
+ /* Check the field format */
+ if ((V4L2_FIELD_NONE == layer->pix_fmt.field) &&
+ (event & VENC_END_OF_FRAME)) {
+ /* Progressive mode */
+
+ vpbe_isr_even_field(disp_dev, layer);
+ vpbe_isr_odd_field(disp_dev, layer);
+ } else {
+ /* Interlaced mode */
+
+ layer->field_id ^= 1;
+ if (event & VENC_FIRST_FIELD)
+ fid = 0;
+ else
+ fid = 1;
+
+ /*
+ * If field id does not match with store
+ * field id
+ */
+ if (fid != layer->field_id) {
+ /* Make them in sync */
+ layer->field_id = fid;
+ continue;
+ }
+ /*
+ * device field id and local field id are
+ * in sync. If this is even field
+ */
+ if (0 == fid)
+ vpbe_isr_even_field(disp_dev, layer);
+ else /* odd field */
+ vpbe_isr_odd_field(disp_dev, layer);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * vpbe_buffer_prepare()
+ * This is the callback function called from vb2_qbuf() function
+ * the buffer is prepared and user space virtual address is converted into
+ * physical address
+ */
+static int vpbe_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *q = vb->vb2_queue;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned long addr;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_prepare\n");
+
+ if (vb->state != VB2_BUF_STATE_ACTIVE &&
+ vb->state != VB2_BUF_STATE_PREPARED) {
+ vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage);
+ if (vb2_plane_vaddr(vb, 0) &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (q->streaming) {
+ if (!IS_ALIGNED(addr, 8)) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "buffer_prepare:offset is \
+ not aligned to 32 bytes\n");
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * vpbe_buffer_setup()
+ * This function allocates memory for the buffers
+ */
+static int
+vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
+
+ /* Store number of buffers allocated in numbuffer member */
+ if (*nbuffers < VPBE_DEFAULT_NUM_BUFS)
+ *nbuffers = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+
+ *nplanes = 1;
+ sizes[0] = layer->pix_fmt.sizeimage;
+ alloc_ctxs[0] = layer->alloc_ctx;
+
+ return 0;
+}
+
+/*
+ * vpbe_buffer_queue()
+ * This function adds the buffer to DMA queue
+ */
+static void vpbe_buffer_queue(struct vb2_buffer *vb)
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned long flags;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_queue\n");
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&disp->dma_queue_lock, flags);
+ list_add_tail(&buf->list, &layer->dma_queue);
+ spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
+}
+
+/*
+ * vpbe_buf_cleanup()
+ * This function is called from the vb2 layer to free memory allocated to
+ * the buffers
+ */
+static void vpbe_buf_cleanup(struct vb2_buffer *vb)
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
+ unsigned long flags;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buf_cleanup\n");
+
+ spin_lock_irqsave(&layer->irqlock, flags);
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ list_del_init(&buf->list);
+ spin_unlock_irqrestore(&layer->irqlock, flags);
+}
+
+static void vpbe_wait_prepare(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ mutex_unlock(&layer->opslock);
+}
+
+static void vpbe_wait_finish(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ mutex_lock(&layer->opslock);
+}
+
+static int vpbe_buffer_init(struct vb2_buffer *vb)
+{
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+ return 0;
+}
+
+static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+ int ret;
+
+ /* Get the next frame from the buffer queue */
+ layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&layer->cur_frm->list);
+ /* Mark state of the current frame to active */
+ layer->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ /* Initialize field_id and started member */
+ layer->field_id = 0;
+
+ /* Set parameters in OSD and VENC */
+ ret = vpbe_set_osd_display_params(fh->disp_dev, layer);
+ if (ret < 0) {
+ struct vpbe_disp_buffer *buf, *tmp;
+
+ vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_QUEUED);
+ list_for_each_entry_safe(buf, tmp, &layer->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+
+ return ret;
+ }
+
+ /*
+ * if request format is yuv420 semiplanar, need to
+ * enable both video windows
+ */
+ layer->started = 1;
+ layer->layer_first_int = 1;
+
+ return ret;
+}
+
+static void vpbe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp = fh->disp_dev;
+ unsigned long flags;
+
+ if (!vb2_is_streaming(vq))
+ return;
+
+ /* release all active buffers */
+ spin_lock_irqsave(&disp->dma_queue_lock, flags);
+ if (layer->cur_frm == layer->next_frm) {
+ vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ } else {
+ if (layer->cur_frm != NULL)
+ vb2_buffer_done(&layer->cur_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ if (layer->next_frm != NULL)
+ vb2_buffer_done(&layer->next_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&layer->dma_queue)) {
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ list_del(&layer->next_frm->list);
+ vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
+}
+
+static struct vb2_ops video_qops = {
+ .queue_setup = vpbe_buffer_queue_setup,
+ .wait_prepare = vpbe_wait_prepare,
+ .wait_finish = vpbe_wait_finish,
+ .buf_init = vpbe_buffer_init,
+ .buf_prepare = vpbe_buffer_prepare,
+ .start_streaming = vpbe_start_streaming,
+ .stop_streaming = vpbe_stop_streaming,
+ .buf_cleanup = vpbe_buf_cleanup,
+ .buf_queue = vpbe_buffer_queue,
+};
+
+static
+struct vpbe_layer*
+_vpbe_display_get_other_win_layer(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer)
+{
+ enum vpbe_display_device_id thiswin, otherwin;
+ thiswin = layer->device_id;
+
+ otherwin = (thiswin == VPBE_DISPLAY_DEVICE_0) ?
+ VPBE_DISPLAY_DEVICE_1 : VPBE_DISPLAY_DEVICE_0;
+ return disp_dev->dev[otherwin];
+}
+
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer)
+{
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ unsigned long addr;
+ int ret;
+
+ addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb, 0);
+ /* Set address in the display registers */
+ osd_device->ops.start_layer(osd_device,
+ layer->layer_info.id,
+ addr,
+ disp_dev->cbcr_ofst);
+
+ ret = osd_device->ops.enable_layer(osd_device,
+ layer->layer_info.id, 0);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in enabling osd window layer 0\n");
+ return -1;
+ }
+
+ /* Enable the window */
+ layer->layer_info.enable = 1;
+ if (cfg->pixfmt == PIXFMT_NV12) {
+ struct vpbe_layer *otherlayer =
+ _vpbe_display_get_other_win_layer(disp_dev, layer);
+
+ ret = osd_device->ops.enable_layer(osd_device,
+ otherlayer->layer_info.id, 1);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in enabling osd window layer 1\n");
+ return -1;
+ }
+ otherlayer->layer_info.enable = 1;
+ }
+ return 0;
+}
+
+static void
+vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer,
+ int expected_xsize, int expected_ysize)
+{
+ struct display_layer_info *layer_info = &layer->layer_info;
+ struct v4l2_pix_format *pixfmt = &layer->pix_fmt;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int calculated_xsize;
+ int h_exp = 0;
+ int v_exp = 0;
+ int h_scale;
+ int v_scale;
+
+ v4l2_std_id standard_id = vpbe_dev->current_timings.std_id;
+
+ /*
+ * Application initially set the image format. Current display
+ * size is obtained from the vpbe display controller. expected_xsize
+ * and expected_ysize are set through S_CROP ioctl. Based on this,
+ * driver will calculate the scale factors for vertical and
+ * horizontal direction so that the image is displayed scaled
+ * and expanded. Application uses expansion to display the image
+ * in a square pixel. Otherwise it is displayed using displays
+ * pixel aspect ratio.It is expected that application chooses
+ * the crop coordinates for cropped or scaled display. if crop
+ * size is less than the image size, it is displayed cropped or
+ * it is displayed scaled and/or expanded.
+ *
+ * to begin with, set the crop window same as expected. Later we
+ * will override with scaled window size
+ */
+
+ cfg->xsize = pixfmt->width;
+ cfg->ysize = pixfmt->height;
+ layer_info->h_zoom = ZOOM_X1; /* no horizontal zoom */
+ layer_info->v_zoom = ZOOM_X1; /* no horizontal zoom */
+ layer_info->h_exp = H_EXP_OFF; /* no horizontal zoom */
+ layer_info->v_exp = V_EXP_OFF; /* no horizontal zoom */
+
+ if (pixfmt->width < expected_xsize) {
+ h_scale = vpbe_dev->current_timings.xres / pixfmt->width;
+ if (h_scale < 2)
+ h_scale = 1;
+ else if (h_scale >= 4)
+ h_scale = 4;
+ else
+ h_scale = 2;
+ cfg->xsize *= h_scale;
+ if (cfg->xsize < expected_xsize) {
+ if ((standard_id & V4L2_STD_525_60) ||
+ (standard_id & V4L2_STD_625_50)) {
+ calculated_xsize = (cfg->xsize *
+ VPBE_DISPLAY_H_EXP_RATIO_N) /
+ VPBE_DISPLAY_H_EXP_RATIO_D;
+ if (calculated_xsize <= expected_xsize) {
+ h_exp = 1;
+ cfg->xsize = calculated_xsize;
+ }
+ }
+ }
+ if (h_scale == 2)
+ layer_info->h_zoom = ZOOM_X2;
+ else if (h_scale == 4)
+ layer_info->h_zoom = ZOOM_X4;
+ if (h_exp)
+ layer_info->h_exp = H_EXP_9_OVER_8;
+ } else {
+ /* no scaling, only cropping. Set display area to crop area */
+ cfg->xsize = expected_xsize;
+ }
+
+ if (pixfmt->height < expected_ysize) {
+ v_scale = expected_ysize / pixfmt->height;
+ if (v_scale < 2)
+ v_scale = 1;
+ else if (v_scale >= 4)
+ v_scale = 4;
+ else
+ v_scale = 2;
+ cfg->ysize *= v_scale;
+ if (cfg->ysize < expected_ysize) {
+ if ((standard_id & V4L2_STD_625_50)) {
+ calculated_xsize = (cfg->ysize *
+ VPBE_DISPLAY_V_EXP_RATIO_N) /
+ VPBE_DISPLAY_V_EXP_RATIO_D;
+ if (calculated_xsize <= expected_ysize) {
+ v_exp = 1;
+ cfg->ysize = calculated_xsize;
+ }
+ }
+ }
+ if (v_scale == 2)
+ layer_info->v_zoom = ZOOM_X2;
+ else if (v_scale == 4)
+ layer_info->v_zoom = ZOOM_X4;
+ if (v_exp)
+ layer_info->h_exp = V_EXP_6_OVER_5;
+ } else {
+ /* no scaling, only cropping. Set display area to crop area */
+ cfg->ysize = expected_ysize;
+ }
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "crop display xsize = %d, ysize = %d\n",
+ cfg->xsize, cfg->ysize);
+}
+
+static void vpbe_disp_adj_position(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer,
+ int top, int left)
+{
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+ cfg->xpos = min((unsigned int)left,
+ vpbe_dev->current_timings.xres - cfg->xsize);
+ cfg->ypos = min((unsigned int)top,
+ vpbe_dev->current_timings.yres - cfg->ysize);
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "new xpos = %d, ypos = %d\n",
+ cfg->xpos, cfg->ypos);
+}
+
+static void vpbe_disp_check_window_params(struct vpbe_display *disp_dev,
+ struct v4l2_rect *c)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+ if ((c->width == 0) ||
+ ((c->width + c->left) > vpbe_dev->current_timings.xres))
+ c->width = vpbe_dev->current_timings.xres - c->left;
+
+ if ((c->height == 0) || ((c->height + c->top) >
+ vpbe_dev->current_timings.yres))
+ c->height = vpbe_dev->current_timings.yres - c->top;
+
+ /* window height must be even for interlaced display */
+ if (vpbe_dev->current_timings.interlaced)
+ c->height &= (~0x01);
+
+}
+
+/**
+ * vpbe_try_format()
+ * If user application provides width and height, and have bytesperline set
+ * to zero, driver calculates bytesperline and sizeimage based on hardware
+ * limits.
+ */
+static int vpbe_try_format(struct vpbe_display *disp_dev,
+ struct v4l2_pix_format *pixfmt, int check)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int min_height = 1;
+ int min_width = 32;
+ int max_height;
+ int max_width;
+ int bpp;
+
+ if ((pixfmt->pixelformat != V4L2_PIX_FMT_UYVY) &&
+ (pixfmt->pixelformat != V4L2_PIX_FMT_NV12))
+ /* choose default as V4L2_PIX_FMT_UYVY */
+ pixfmt->pixelformat = V4L2_PIX_FMT_UYVY;
+
+ /* Check the field format */
+ if ((pixfmt->field != V4L2_FIELD_INTERLACED) &&
+ (pixfmt->field != V4L2_FIELD_NONE)) {
+ if (vpbe_dev->current_timings.interlaced)
+ pixfmt->field = V4L2_FIELD_INTERLACED;
+ else
+ pixfmt->field = V4L2_FIELD_NONE;
+ }
+
+ if (pixfmt->field == V4L2_FIELD_INTERLACED)
+ min_height = 2;
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ bpp = 1;
+ else
+ bpp = 2;
+
+ max_width = vpbe_dev->current_timings.xres;
+ max_height = vpbe_dev->current_timings.yres;
+
+ min_width /= bpp;
+
+ if (!pixfmt->width || (pixfmt->width < min_width) ||
+ (pixfmt->width > max_width)) {
+ pixfmt->width = vpbe_dev->current_timings.xres;
+ }
+
+ if (!pixfmt->height || (pixfmt->height < min_height) ||
+ (pixfmt->height > max_height)) {
+ pixfmt->height = vpbe_dev->current_timings.yres;
+ }
+
+ if (pixfmt->bytesperline < (pixfmt->width * bpp))
+ pixfmt->bytesperline = pixfmt->width * bpp;
+
+ /* Make the bytesperline 32 byte aligned */
+ pixfmt->bytesperline = ((pixfmt->width * bpp + 31) & ~31);
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height +
+ (pixfmt->bytesperline * pixfmt->height >> 1);
+ else
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ return 0;
+}
+
+static int vpbe_display_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ cap->version = VPBE_DISPLAY_VERSION_CODE;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ snprintf(cap->driver, sizeof(cap->driver), "%s",
+ dev_name(vpbe_dev->pdev));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vpbe_dev->pdev));
+ strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vpbe_display_s_crop(struct file *file, void *priv,
+ const struct v4l2_crop *crop)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ struct v4l2_rect rect = crop->c;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_S_CROP, layer id = %d\n", layer->device_id);
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ if (rect.top < 0)
+ rect.top = 0;
+ if (rect.left < 0)
+ rect.left = 0;
+
+ vpbe_disp_check_window_params(disp_dev, &rect);
+
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+
+ vpbe_disp_calculate_scale_factor(disp_dev, layer,
+ rect.width,
+ rect.height);
+ vpbe_disp_adj_position(disp_dev, layer, rect.top,
+ rect.left);
+ ret = osd_device->ops.set_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in set layer config:\n");
+ return -EINVAL;
+ }
+
+ /* apply zooming and h or v expansion */
+ osd_device->ops.set_zoom(osd_device,
+ layer->layer_info.id,
+ layer->layer_info.h_zoom,
+ layer->layer_info.v_zoom);
+ ret = osd_device->ops.set_vid_expansion(osd_device,
+ layer->layer_info.h_exp,
+ layer->layer_info.v_exp);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in set vid expansion:\n");
+ return -EINVAL;
+ }
+
+ if ((layer->layer_info.h_zoom != ZOOM_X1) ||
+ (layer->layer_info.v_zoom != ZOOM_X1) ||
+ (layer->layer_info.h_exp != H_EXP_OFF) ||
+ (layer->layer_info.v_exp != V_EXP_OFF))
+ /* Enable expansion filter */
+ osd_device->ops.set_interpolation_filter(osd_device, 1);
+ else
+ osd_device->ops.set_interpolation_filter(osd_device, 0);
+
+ return 0;
+}
+
+static int vpbe_display_g_crop(struct file *file, void *priv,
+ struct v4l2_crop *crop)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = fh->disp_dev->osd_device;
+ struct v4l2_rect *rect = &crop->c;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_G_CROP, layer id = %d\n",
+ layer->device_id);
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ rect->top = cfg->ypos;
+ rect->left = cfg->xpos;
+ rect->width = cfg->xsize;
+ rect->height = cfg->ysize;
+
+ return 0;
+}
+
+static int vpbe_display_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cropcap)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
+
+ cropcap->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.top = 0;
+ cropcap->bounds.width = vpbe_dev->current_timings.xres;
+ cropcap->bounds.height = vpbe_dev->current_timings.yres;
+ cropcap->pixelaspect = vpbe_dev->current_timings.aspect;
+ cropcap->defrect = cropcap->bounds;
+ return 0;
+}
+
+static int vpbe_display_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_G_FMT, layer id = %d\n",
+ layer->device_id);
+
+ /* If buffer type is video output */
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+ /* Fill in the information about format */
+ fmt->fmt.pix = layer->pix_fmt;
+
+ return 0;
+}
+
+static int vpbe_display_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned int index = 0;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_ENUM_FMT, layer id = %d\n",
+ layer->device_id);
+ if (fmt->index > 1) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid format index\n");
+ return -EINVAL;
+ }
+
+ /* Fill in the information about format */
+ index = fmt->index;
+ memset(fmt, 0, sizeof(*fmt));
+ fmt->index = index;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ if (index == 0) {
+ strcpy(fmt->description, "YUV 4:2:2 - UYVY");
+ fmt->pixelformat = V4L2_PIX_FMT_UYVY;
+ } else {
+ strcpy(fmt->description, "Y/CbCr 4:2:0");
+ fmt->pixelformat = V4L2_PIX_FMT_NV12;
+ }
+
+ return 0;
+}
+
+static int vpbe_display_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_S_FMT, layer id = %d\n",
+ layer->device_id);
+
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+ /* Check for valid pixel format */
+ ret = vpbe_try_format(disp_dev, pixfmt, 1);
+ if (ret)
+ return ret;
+
+ /* YUV420 is requested, check availability of the
+ other video window */
+
+ layer->pix_fmt = *pixfmt;
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
+ struct vpbe_layer *otherlayer;
+
+ otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
+ /* if other layer is available, only
+ * claim it, do not configure it
+ */
+ ret = osd_device->ops.request_layer(osd_device,
+ otherlayer->layer_info.id);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Display Manager failed to allocate layer\n");
+ return -EBUSY;
+ }
+ }
+
+ /* Get osd layer config */
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ /* Store the pixel format in the layer object */
+ cfg->xsize = pixfmt->width;
+ cfg->ysize = pixfmt->height;
+ cfg->line_length = pixfmt->bytesperline;
+ cfg->ypos = 0;
+ cfg->xpos = 0;
+ cfg->interlaced = vpbe_dev->current_timings.interlaced;
+
+ if (V4L2_PIX_FMT_UYVY == pixfmt->pixelformat)
+ cfg->pixfmt = PIXFMT_YCBCRI;
+
+ /* Change of the default pixel format for both video windows */
+ if (V4L2_PIX_FMT_NV12 == pixfmt->pixelformat) {
+ struct vpbe_layer *otherlayer;
+ cfg->pixfmt = PIXFMT_NV12;
+ otherlayer = _vpbe_display_get_other_win_layer(disp_dev,
+ layer);
+ otherlayer->layer_info.config.pixfmt = PIXFMT_NV12;
+ }
+
+ /* Set the layer config in the osd window */
+ ret = osd_device->ops.set_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in S_FMT params:\n");
+ return -EINVAL;
+ }
+
+ /* Readback and fill the local copy of current pix format */
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+
+ return 0;
+}
+
+static int vpbe_display_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+
+ /* Check for valid field format */
+ return vpbe_try_format(disp_dev, pixfmt, 0);
+
+}
+
+/**
+ * vpbe_display_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_display_s_std(struct file *file, void *priv,
+ v4l2_std_id std_id)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n");
+
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ if (NULL != vpbe_dev->ops.s_std) {
+ ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set standard for sub devices\n");
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_display_g_std(struct file *file, void *priv,
+ v4l2_std_id *std_id)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n");
+
+ /* Get the standard from the current encoder */
+ if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) {
+ *std_id = vpbe_dev->current_timings.std_id;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_display_enum_output - enumerate outputs
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_display_enum_output(struct file *file, void *priv,
+ struct v4l2_output *output)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
+
+ /* Enumerate outputs */
+
+ if (NULL == vpbe_dev->ops.enum_outputs)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
+ if (ret) {
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "Failed to enumerate outputs\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_s_output - Set output to
+ * the output specified by the index
+ */
+static int vpbe_display_s_output(struct file *file, void *priv,
+ unsigned int i)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n");
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ if (NULL == vpbe_dev->ops.set_output)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.set_output(vpbe_dev, i);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set output for sub devices\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_g_output - Get output from subdevice
+ * for a given by the index
+ */
+static int vpbe_display_g_output(struct file *file, void *priv,
+ unsigned int *i)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n");
+ /* Get the standard from the current encoder */
+ *i = vpbe_dev->current_out_index;
+
+ return 0;
+}
+
+/**
+ * vpbe_display_enum_dv_timings - Enumerate the dv timings
+ *
+ * enum the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_enum_dv_timings(struct file *file, void *priv,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n");
+
+ /* Enumerate outputs */
+ if (NULL == vpbe_dev->ops.enum_dv_timings)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.enum_dv_timings(vpbe_dev, timings);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to enumerate dv timings info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_s_dv_timings - Set the dv timings
+ *
+ * Set the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_TIMINGS\n");
+
+
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+
+ /* Set the given standard in the encoder */
+ if (!vpbe_dev->ops.s_dv_timings)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.s_dv_timings(vpbe_dev, timings);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set the dv timings info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_g_dv_timings - Set the dv timings
+ *
+ * Get the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_TIMINGS\n");
+
+ /* Get the given standard in the encoder */
+
+ if (vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_DV_TIMINGS) {
+ *dv_timings = vpbe_dev->current_timings.dv_timings;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpbe_display_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = fh->disp_dev->osd_device;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_STREAMOFF,layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If io is allowed for this file handle, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+
+ /* If streaming is not started, return error */
+ if (!layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "streaming not started in layer"
+ " id = %d\n", layer->device_id);
+ return -EINVAL;
+ }
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ layer->started = 0;
+ ret = vb2_streamoff(&layer->buffer_queue, buf_type);
+
+ return ret;
+}
+
+static int vpbe_display_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int ret;
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_STREAMON, layerid=%d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If file handle is not allowed IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+ /* If Streaming is already started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "layer is already streaming\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Call vb2_streamon to start streaming
+ * in videobuf
+ */
+ ret = vb2_streamon(&layer->buffer_queue, buf_type);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "error in vb2_streamon\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int vpbe_display_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_DQBUF, layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+ /* If this file handle is not allowed to do IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+ if (file->f_flags & O_NONBLOCK)
+ /* Call videobuf_dqbuf for non blocking mode */
+ ret = vb2_dqbuf(&layer->buffer_queue, buf, 1);
+ else
+ /* Call videobuf_dqbuf for blocking mode */
+ ret = vb2_dqbuf(&layer->buffer_queue, buf, 0);
+
+ return ret;
+}
+
+static int vpbe_display_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_QBUF, layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If this file handle is not allowed to do IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+
+ return vb2_qbuf(&layer->buffer_queue, p);
+}
+
+static int vpbe_display_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_QUERYBUF, layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+ /* Call vb2_querybuf to get information */
+ return vb2_querybuf(&layer->buffer_queue, buf);
+}
+
+static int vpbe_display_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req_buf)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vb2_queue *q;
+ int ret;
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If io users of the layer is not zero, return error */
+ if (0 != layer->io_usrs) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "not IO user\n");
+ return -EBUSY;
+ }
+ /* Initialize videobuf queue as per the buffer type */
+ layer->alloc_ctx = vb2_dma_contig_init_ctx(vpbe_dev->pdev);
+ if (IS_ERR(layer->alloc_ctx)) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to get the context\n");
+ return PTR_ERR(layer->alloc_ctx);
+ }
+ q = &layer->buffer_queue;
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = fh;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+
+ ret = vb2_queue_init(q);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "vb2_queue_init() failed\n");
+ vb2_dma_contig_cleanup_ctx(layer->alloc_ctx);
+ return ret;
+ }
+ /* Set io allowed member of file handle to TRUE */
+ fh->io_allowed = 1;
+ /* Increment io usrs member of layer object to 1 */
+ layer->io_usrs = 1;
+ /* Store type of memory requested in layer object */
+ layer->memory = req_buf->memory;
+ /* Initialize buffer queue */
+ INIT_LIST_HEAD(&layer->dma_queue);
+ /* Allocate buffers */
+ return vb2_reqbufs(q, req_buf);
+}
+
+/*
+ * vpbe_display_mmap()
+ * It is used to map kernel space buffers into user spaces
+ */
+static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ /* Get the layer object and file handle object */
+ struct vpbe_fh *fh = filep->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_mmap\n");
+
+ if (mutex_lock_interruptible(&layer->opslock))
+ return -ERESTARTSYS;
+ ret = vb2_mmap(&layer->buffer_queue, vma);
+ mutex_unlock(&layer->opslock);
+ return ret;
+}
+
+/* vpbe_display_poll(): It is used for select/poll system call
+ */
+static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
+{
+ struct vpbe_fh *fh = filep->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned int err = 0;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
+ if (layer->started) {
+ mutex_lock(&layer->opslock);
+ err = vb2_poll(&layer->buffer_queue, filep, wait);
+ mutex_unlock(&layer->opslock);
+ }
+ return err;
+}
+
+/*
+ * vpbe_display_open()
+ * It creates object of file handle structure and stores it in private_data
+ * member of filepointer
+ */
+static int vpbe_display_open(struct file *file)
+{
+ struct vpbe_fh *fh = NULL;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int err;
+
+ /* Allocate memory for the file handle object */
+ fh = kmalloc(sizeof(struct vpbe_fh), GFP_KERNEL);
+ if (fh == NULL) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to allocate memory for file handle object\n");
+ return -ENOMEM;
+ }
+ v4l2_fh_init(&fh->fh, vdev);
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe display open plane = %d\n",
+ layer->device_id);
+
+ /* store pointer to fh in private_data member of filep */
+ file->private_data = fh;
+ fh->layer = layer;
+ fh->disp_dev = disp_dev;
+
+ if (!layer->usrs) {
+ if (mutex_lock_interruptible(&layer->opslock))
+ return -ERESTARTSYS;
+ /* First claim the layer for this device */
+ err = osd_device->ops.request_layer(osd_device,
+ layer->layer_info.id);
+ mutex_unlock(&layer->opslock);
+ if (err < 0) {
+ /* Couldn't get layer */
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Display Manager failed to allocate layer\n");
+ kfree(fh);
+ return -EINVAL;
+ }
+ }
+ /* Increment layer usrs counter */
+ layer->usrs++;
+ /* Set io_allowed member to false */
+ fh->io_allowed = 0;
+ v4l2_fh_add(&fh->fh);
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe display device opened successfully\n");
+ return 0;
+}
+
+/*
+ * vpbe_display_release()
+ * This function deletes buffer queue, frees the buffers and the davinci
+ * display file * handle
+ */
+static int vpbe_display_release(struct file *file)
+{
+ /* Get the layer object and file handle object */
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n");
+
+ mutex_lock(&layer->opslock);
+ /* if this instance is doing IO */
+ if (fh->io_allowed) {
+ /* Reset io_usrs member of layer object */
+ layer->io_usrs = 0;
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ layer->started = 0;
+ /* Free buffers allocated */
+ vb2_queue_release(&layer->buffer_queue);
+ vb2_dma_contig_cleanup_ctx(&layer->buffer_queue);
+ }
+
+ /* Decrement layer usrs counter */
+ layer->usrs--;
+ /* If this file handle has initialize encoder device, reset it */
+ if (!layer->usrs) {
+ if (cfg->pixfmt == PIXFMT_NV12) {
+ struct vpbe_layer *otherlayer;
+ otherlayer =
+ _vpbe_display_get_other_win_layer(disp_dev, layer);
+ osd_device->ops.disable_layer(osd_device,
+ otherlayer->layer_info.id);
+ osd_device->ops.release_layer(osd_device,
+ otherlayer->layer_info.id);
+ }
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ osd_device->ops.release_layer(osd_device,
+ layer->layer_info.id);
+ }
+
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+ file->private_data = NULL;
+ mutex_unlock(&layer->opslock);
+
+ /* Free memory allocated to file handle object */
+ kfree(fh);
+
+ disp_dev->cbcr_ofst = 0;
+
+ return 0;
+}
+
+/* vpbe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
+ .vidioc_querycap = vpbe_display_querycap,
+ .vidioc_g_fmt_vid_out = vpbe_display_g_fmt,
+ .vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt,
+ .vidioc_s_fmt_vid_out = vpbe_display_s_fmt,
+ .vidioc_try_fmt_vid_out = vpbe_display_try_fmt,
+ .vidioc_reqbufs = vpbe_display_reqbufs,
+ .vidioc_querybuf = vpbe_display_querybuf,
+ .vidioc_qbuf = vpbe_display_qbuf,
+ .vidioc_dqbuf = vpbe_display_dqbuf,
+ .vidioc_streamon = vpbe_display_streamon,
+ .vidioc_streamoff = vpbe_display_streamoff,
+ .vidioc_cropcap = vpbe_display_cropcap,
+ .vidioc_g_crop = vpbe_display_g_crop,
+ .vidioc_s_crop = vpbe_display_s_crop,
+ .vidioc_s_std = vpbe_display_s_std,
+ .vidioc_g_std = vpbe_display_g_std,
+ .vidioc_enum_output = vpbe_display_enum_output,
+ .vidioc_s_output = vpbe_display_s_output,
+ .vidioc_g_output = vpbe_display_g_output,
+ .vidioc_s_dv_timings = vpbe_display_s_dv_timings,
+ .vidioc_g_dv_timings = vpbe_display_g_dv_timings,
+ .vidioc_enum_dv_timings = vpbe_display_enum_dv_timings,
+};
+
+static struct v4l2_file_operations vpbe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpbe_display_open,
+ .release = vpbe_display_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpbe_display_mmap,
+ .poll = vpbe_display_poll
+};
+
+static int vpbe_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpbe_display *vpbe_disp = data;
+
+ if (strcmp("vpbe_controller", pdev->name) == 0)
+ vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
+
+ if (strstr(pdev->name, "vpbe-osd") != NULL)
+ vpbe_disp->osd_device = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+static int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer = NULL;
+ struct video_device *vbd = NULL;
+
+ /* Allocate memory for four plane display objects */
+
+ disp_dev->dev[i] =
+ kzalloc(sizeof(struct vpbe_layer), GFP_KERNEL);
+
+ /* If memory allocation fails, return error */
+ if (!disp_dev->dev[i]) {
+ printk(KERN_ERR "ran out of memory\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&disp_dev->dev[i]->irqlock);
+ mutex_init(&disp_dev->dev[i]->opslock);
+
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[i];
+ vbd = &vpbe_display_layer->video_dev;
+ /* Initialize field of video device */
+ vbd->release = video_device_release_empty;
+ vbd->fops = &vpbe_fops;
+ vbd->ioctl_ops = &vpbe_ioctl_ops;
+ vbd->minor = -1;
+ vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
+ vbd->lock = &vpbe_display_layer->opslock;
+ vbd->vfl_dir = VFL_DIR_TX;
+
+ if (disp_dev->vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_STD)
+ vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50);
+
+ snprintf(vbd->name, sizeof(vbd->name),
+ "DaVinci_VPBE Display_DRIVER_V%d.%d.%d",
+ (VPBE_DISPLAY_VERSION_CODE >> 16) & 0xff,
+ (VPBE_DISPLAY_VERSION_CODE >> 8) & 0xff,
+ (VPBE_DISPLAY_VERSION_CODE) & 0xff);
+
+ vpbe_display_layer->device_id = i;
+
+ vpbe_display_layer->layer_info.id =
+ ((i == VPBE_DISPLAY_DEVICE_0) ? WIN_VID0 : WIN_VID1);
+
+
+ return 0;
+}
+
+static int register_device(struct vpbe_layer *vpbe_display_layer,
+ struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
+{
+ int err;
+
+ v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+ "Trying to register VPBE display device.\n");
+ v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+ "layer=%x,layer->video_dev=%x\n",
+ (int)vpbe_display_layer,
+ (int)&vpbe_display_layer->video_dev);
+
+ err = video_register_device(&vpbe_display_layer->video_dev,
+ VFL_TYPE_GRABBER,
+ -1);
+ if (err)
+ return -ENODEV;
+
+ vpbe_display_layer->disp_dev = disp_dev;
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, disp_dev);
+ set_bit(V4L2_FL_USE_FH_PRIO, &vpbe_display_layer->video_dev.flags);
+ video_set_drvdata(&vpbe_display_layer->video_dev,
+ vpbe_display_layer);
+
+ return 0;
+}
+
+
+
+/*
+ * vpbe_display_probe()
+ * This function creates device entries by register itself to the V4L2 driver
+ * and initializes fields of each layer objects
+ */
+static int vpbe_display_probe(struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer;
+ struct vpbe_display *disp_dev;
+ struct resource *res = NULL;
+ int k;
+ int i;
+ int err;
+ int irq;
+
+ printk(KERN_DEBUG "vpbe_display_probe\n");
+ /* Allocate memory for vpbe_display */
+ disp_dev = devm_kzalloc(&pdev->dev, sizeof(struct vpbe_display),
+ GFP_KERNEL);
+ if (!disp_dev)
+ return -ENOMEM;
+
+ spin_lock_init(&disp_dev->dma_queue_lock);
+ /*
+ * Scan all the platform devices to find the vpbe
+ * controller device and get the vpbe_dev object
+ */
+ err = bus_for_each_dev(&platform_bus_type, NULL, disp_dev,
+ vpbe_device_get);
+ if (err < 0)
+ return err;
+ /* Initialize the vpbe display controller */
+ if (NULL != disp_dev->vpbe_dev->ops.initialize) {
+ err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
+ disp_dev->vpbe_dev);
+ if (err) {
+ v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+ "Error initing vpbe\n");
+ err = -ENOMEM;
+ goto probe_out;
+ }
+ }
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ if (init_vpbe_layer(i, disp_dev, pdev)) {
+ err = -ENODEV;
+ goto probe_out;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+ "Unable to get VENC interrupt resource\n");
+ err = -ENODEV;
+ goto probe_out;
+ }
+
+ irq = res->start;
+ err = devm_request_irq(&pdev->dev, irq, venc_isr, 0,
+ VPBE_DISPLAY_DRIVER, disp_dev);
+ if (err) {
+ v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+ "Unable to request interrupt\n");
+ goto probe_out;
+ }
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
+ err = -ENODEV;
+ goto probe_out;
+ }
+ }
+
+ printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n");
+ return 0;
+
+probe_out:
+ for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[k];
+ /* Unregister video device */
+ if (vpbe_display_layer) {
+ video_unregister_device(
+ &vpbe_display_layer->video_dev);
+ kfree(disp_dev->dev[k]);
+ }
+ }
+ return err;
+}
+
+/*
+ * vpbe_display_remove()
+ * It un-register hardware layer from V4L2 driver
+ */
+static int vpbe_display_remove(struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer;
+ struct vpbe_display *disp_dev = platform_get_drvdata(pdev);
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int i;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
+
+ /* deinitialize the vpbe display controller */
+ if (NULL != vpbe_dev->ops.deinitialize)
+ vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
+ /* un-register device */
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[i];
+ /* Unregister video device */
+ video_unregister_device(&vpbe_display_layer->video_dev);
+
+ }
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ kfree(disp_dev->dev[i]);
+ disp_dev->dev[i] = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver vpbe_display_driver = {
+ .driver = {
+ .name = VPBE_DISPLAY_DRIVER,
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+ },
+ .probe = vpbe_display_probe,
+ .remove = vpbe_display_remove,
+};
+
+module_platform_driver(vpbe_display_driver);
+
+MODULE_DESCRIPTION("TI DM644x/DM355/DM365 VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
new file mode 100644
index 00000000000..d053c2669c1
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -0,0 +1,1597 @@
+/*
+ * Copyright (C) 2007-2010 Texas Instruments Inc
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ *
+ * Andy Lowe (alowe@mvista.com), MontaVista Software
+ * - Initial version
+ * Murali Karicheri (mkaricheri@gmail.com), Texas Instruments Ltd.
+ * - ported to sub device interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <mach/cputype.h>
+#include <mach/hardware.h>
+
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_osd.h>
+
+#include <linux/io.h>
+#include "vpbe_osd_regs.h"
+
+#define MODULE_NAME "davinci-vpbe-osd"
+
+static struct platform_device_id vpbe_osd_devtype[] = {
+ {
+ .name = DM644X_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_1,
+ }, {
+ .name = DM365_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_2,
+ }, {
+ .name = DM355_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_3,
+ },
+ {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(platform, vpbe_osd_devtype);
+
+/* register access routines */
+static inline u32 osd_read(struct osd_state *sd, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ return readl(osd->osd_base + offset);
+}
+
+static inline u32 osd_write(struct osd_state *sd, u32 val, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ writel(val, osd->osd_base + offset);
+
+ return val;
+}
+
+static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ void __iomem *addr = osd->osd_base + offset;
+ u32 val = readl(addr) | mask;
+
+ writel(val, addr);
+
+ return val;
+}
+
+static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ void __iomem *addr = osd->osd_base + offset;
+ u32 val = readl(addr) & ~mask;
+
+ writel(val, addr);
+
+ return val;
+}
+
+static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
+ u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ void __iomem *addr = osd->osd_base + offset;
+ u32 new_val = (readl(addr) & ~mask) | (val & mask);
+
+ writel(new_val, addr);
+
+ return new_val;
+}
+
+/* define some macros for layer and pixfmt classification */
+#define is_osd_win(layer) (((layer) == WIN_OSD0) || ((layer) == WIN_OSD1))
+#define is_vid_win(layer) (((layer) == WIN_VID0) || ((layer) == WIN_VID1))
+#define is_rgb_pixfmt(pixfmt) \
+ (((pixfmt) == PIXFMT_RGB565) || ((pixfmt) == PIXFMT_RGB888))
+#define is_yc_pixfmt(pixfmt) \
+ (((pixfmt) == PIXFMT_YCBCRI) || ((pixfmt) == PIXFMT_YCRCBI) || \
+ ((pixfmt) == PIXFMT_NV12))
+#define MAX_WIN_SIZE OSD_VIDWIN0XP_V0X
+#define MAX_LINE_LENGTH (OSD_VIDWIN0OFST_V0LO << 5)
+
+/**
+ * _osd_dm6446_vid0_pingpong() - field inversion fix for DM6446
+ * @sd - ptr to struct osd_state
+ * @field_inversion - inversion flag
+ * @fb_base_phys - frame buffer address
+ * @lconfig - ptr to layer config
+ *
+ * This routine implements a workaround for the field signal inversion silicon
+ * erratum described in Advisory 1.3.8 for the DM6446. The fb_base_phys and
+ * lconfig parameters apply to the vid0 window. This routine should be called
+ * whenever the vid0 layer configuration or start address is modified, or when
+ * the OSD field inversion setting is modified.
+ * Returns: 1 if the ping-pong buffers need to be toggled in the vsync isr, or
+ * 0 otherwise
+ */
+static int _osd_dm6446_vid0_pingpong(struct osd_state *sd,
+ int field_inversion,
+ unsigned long fb_base_phys,
+ const struct osd_layer_config *lconfig)
+{
+ struct osd_platform_data *pdata;
+
+ pdata = (struct osd_platform_data *)sd->dev->platform_data;
+ if (pdata != NULL && pdata->field_inv_wa_enable) {
+
+ if (!field_inversion || !lconfig->interlaced) {
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_PPVWIN0ADR);
+ osd_modify(sd, OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, 0,
+ OSD_MISCCTL);
+ return 0;
+ } else {
+ unsigned miscctl = OSD_MISCCTL_PPRV;
+
+ osd_write(sd,
+ (fb_base_phys & ~0x1F) - lconfig->line_length,
+ OSD_VIDWIN0ADR);
+ osd_write(sd,
+ (fb_base_phys & ~0x1F) + lconfig->line_length,
+ OSD_PPVWIN0ADR);
+ osd_modify(sd,
+ OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, miscctl,
+ OSD_MISCCTL);
+
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void _osd_set_field_inversion(struct osd_state *sd, int enable)
+{
+ unsigned fsinv = 0;
+
+ if (enable)
+ fsinv = OSD_MODE_FSINV;
+
+ osd_modify(sd, OSD_MODE_FSINV, fsinv, OSD_MODE);
+}
+
+static void _osd_set_blink_attribute(struct osd_state *sd, int enable,
+ enum osd_blink_interval blink)
+{
+ u32 osdatrmd = 0;
+
+ if (enable) {
+ osdatrmd |= OSD_OSDATRMD_BLNK;
+ osdatrmd |= blink << OSD_OSDATRMD_BLNKINT_SHIFT;
+ }
+ /* caller must ensure that OSD1 is configured in attribute mode */
+ osd_modify(sd, OSD_OSDATRMD_BLNKINT | OSD_OSDATRMD_BLNK, osdatrmd,
+ OSD_OSDATRMD);
+}
+
+static void _osd_set_rom_clut(struct osd_state *sd,
+ enum osd_rom_clut rom_clut)
+{
+ if (rom_clut == ROM_CLUT0)
+ osd_clear(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+ else
+ osd_set(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+}
+
+static void _osd_set_palette_map(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ unsigned char pixel_value,
+ unsigned char clut_index,
+ enum osd_pix_format pixfmt)
+{
+ static const int map_2bpp[] = { 0, 5, 10, 15 };
+ static const int map_1bpp[] = { 0, 15 };
+ int bmp_offset;
+ int bmp_shift;
+ int bmp_mask;
+ int bmp_reg;
+
+ switch (pixfmt) {
+ case PIXFMT_1BPP:
+ bmp_reg = map_1bpp[pixel_value & 0x1];
+ break;
+ case PIXFMT_2BPP:
+ bmp_reg = map_2bpp[pixel_value & 0x3];
+ break;
+ case PIXFMT_4BPP:
+ bmp_reg = pixel_value & 0xf;
+ break;
+ default:
+ return;
+ }
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ bmp_offset = OSD_W0BMP01 + (bmp_reg >> 1) * sizeof(u32);
+ break;
+ case OSDWIN_OSD1:
+ bmp_offset = OSD_W1BMP01 + (bmp_reg >> 1) * sizeof(u32);
+ break;
+ default:
+ return;
+ }
+
+ if (bmp_reg & 1) {
+ bmp_shift = 8;
+ bmp_mask = 0xff << 8;
+ } else {
+ bmp_shift = 0;
+ bmp_mask = 0xff;
+ }
+
+ osd_modify(sd, bmp_mask, clut_index << bmp_shift, bmp_offset);
+}
+
+static void _osd_set_rec601_attenuation(struct osd_state *sd,
+ enum osd_win_layer osdwin, int enable)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
+ enable ? OSD_OSDWIN0MD_ATN0E : 0,
+ OSD_OSDWIN0MD);
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
+ enable ? OSD_OSDWIN0MD_ATN0E : 0,
+ OSD_OSDWIN0MD);
+ else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+ (sd->vpbe_type == VPBE_VERSION_2))
+ osd_modify(sd, OSD_EXTMODE_ATNOSD0EN,
+ enable ? OSD_EXTMODE_ATNOSD0EN : 0,
+ OSD_EXTMODE);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
+ enable ? OSD_OSDWIN1MD_ATN1E : 0,
+ OSD_OSDWIN1MD);
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
+ enable ? OSD_OSDWIN1MD_ATN1E : 0,
+ OSD_OSDWIN1MD);
+ else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+ (sd->vpbe_type == VPBE_VERSION_2))
+ osd_modify(sd, OSD_EXTMODE_ATNOSD1EN,
+ enable ? OSD_EXTMODE_ATNOSD1EN : 0,
+ OSD_EXTMODE);
+ break;
+ }
+}
+
+static void _osd_set_blending_factor(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ enum osd_blending_factor blend)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0MD_BLND0,
+ blend << OSD_OSDWIN0MD_BLND0_SHIFT, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1MD_BLND1,
+ blend << OSD_OSDWIN1MD_BLND1_SHIFT, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_enable_rgb888_pixblend(struct osd_state *sd,
+ enum osd_win_layer osdwin)
+{
+
+ osd_modify(sd, OSD_MISCCTL_BLDSEL, 0, OSD_MISCCTL);
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_EXTMODE_OSD0BLDCHR,
+ OSD_EXTMODE_OSD0BLDCHR, OSD_EXTMODE);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_EXTMODE_OSD1BLDCHR,
+ OSD_EXTMODE_OSD1BLDCHR, OSD_EXTMODE);
+ break;
+ }
+}
+
+static void _osd_enable_color_key(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ unsigned colorkey,
+ enum osd_pix_format pixfmt)
+{
+ switch (pixfmt) {
+ case PIXFMT_1BPP:
+ case PIXFMT_2BPP:
+ case PIXFMT_4BPP:
+ case PIXFMT_8BPP:
+ if (sd->vpbe_type == VPBE_VERSION_3) {
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_TRANSPBMPIDX_BMP0,
+ colorkey <<
+ OSD_TRANSPBMPIDX_BMP0_SHIFT,
+ OSD_TRANSPBMPIDX);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_TRANSPBMPIDX_BMP1,
+ colorkey <<
+ OSD_TRANSPBMPIDX_BMP1_SHIFT,
+ OSD_TRANSPBMPIDX);
+ break;
+ }
+ }
+ break;
+ case PIXFMT_RGB565:
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_write(sd, colorkey & OSD_TRANSPVAL_RGBTRANS,
+ OSD_TRANSPVAL);
+ else if (sd->vpbe_type == VPBE_VERSION_3)
+ osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
+ OSD_TRANSPVALL);
+ break;
+ case PIXFMT_YCBCRI:
+ case PIXFMT_YCRCBI:
+ if (sd->vpbe_type == VPBE_VERSION_3)
+ osd_modify(sd, OSD_TRANSPVALU_Y, colorkey,
+ OSD_TRANSPVALU);
+ break;
+ case PIXFMT_RGB888:
+ if (sd->vpbe_type == VPBE_VERSION_3) {
+ osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
+ OSD_TRANSPVALL);
+ osd_modify(sd, OSD_TRANSPVALU_RGBU, colorkey >> 16,
+ OSD_TRANSPVALU);
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_set(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_set(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_disable_color_key(struct osd_state *sd,
+ enum osd_win_layer osdwin)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_clear(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_clear(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_osd_clut(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ enum osd_clut clut)
+{
+ u32 winmd = 0;
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ if (clut == RAM_CLUT)
+ winmd |= OSD_OSDWIN0MD_CLUTS0;
+ osd_modify(sd, OSD_OSDWIN0MD_CLUTS0, winmd, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ if (clut == RAM_CLUT)
+ winmd |= OSD_OSDWIN1MD_CLUTS1;
+ osd_modify(sd, OSD_OSDWIN1MD_CLUTS1, winmd, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_zoom(struct osd_state *sd, enum osd_layer layer,
+ enum osd_zoom_factor h_zoom,
+ enum osd_zoom_factor v_zoom)
+{
+ u32 winmd = 0;
+
+ switch (layer) {
+ case WIN_OSD0:
+ winmd |= (h_zoom << OSD_OSDWIN0MD_OHZ0_SHIFT);
+ winmd |= (v_zoom << OSD_OSDWIN0MD_OVZ0_SHIFT);
+ osd_modify(sd, OSD_OSDWIN0MD_OHZ0 | OSD_OSDWIN0MD_OVZ0, winmd,
+ OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ winmd |= (h_zoom << OSD_VIDWINMD_VHZ0_SHIFT);
+ winmd |= (v_zoom << OSD_VIDWINMD_VVZ0_SHIFT);
+ osd_modify(sd, OSD_VIDWINMD_VHZ0 | OSD_VIDWINMD_VVZ0, winmd,
+ OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ winmd |= (h_zoom << OSD_OSDWIN1MD_OHZ1_SHIFT);
+ winmd |= (v_zoom << OSD_OSDWIN1MD_OVZ1_SHIFT);
+ osd_modify(sd, OSD_OSDWIN1MD_OHZ1 | OSD_OSDWIN1MD_OVZ1, winmd,
+ OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ winmd |= (h_zoom << OSD_VIDWINMD_VHZ1_SHIFT);
+ winmd |= (v_zoom << OSD_VIDWINMD_VVZ1_SHIFT);
+ osd_modify(sd, OSD_VIDWINMD_VHZ1 | OSD_VIDWINMD_VVZ1, winmd,
+ OSD_VIDWINMD);
+ break;
+ }
+}
+
+static void _osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_clear(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ osd_clear(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ /* disable attribute mode as well as disabling the window */
+ osd_clear(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+ OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ osd_clear(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+ break;
+ }
+}
+
+static void osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (!win->is_enabled) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return;
+ }
+ win->is_enabled = 0;
+
+ _osd_disable_layer(sd, layer);
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void _osd_enable_attribute_mode(struct osd_state *sd)
+{
+ /* enable attribute mode for OSD1 */
+ osd_set(sd, OSD_OSDWIN1MD_OASW, OSD_OSDWIN1MD);
+}
+
+static void _osd_enable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_set(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ osd_set(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ /* enable OSD1 and disable attribute mode */
+ osd_modify(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+ OSD_OSDWIN1MD_OACT1, OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ osd_set(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+ break;
+ }
+}
+
+static int osd_enable_layer(struct osd_state *sd, enum osd_layer layer,
+ int otherwin)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ /*
+ * use otherwin flag to know this is the other vid window
+ * in YUV420 mode, if is, skip this check
+ */
+ if (!otherwin && (!win->is_allocated ||
+ !win->fb_base_phys ||
+ !cfg->line_length ||
+ !cfg->xsize ||
+ !cfg->ysize)) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return -1;
+ }
+
+ if (win->is_enabled) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return 0;
+ }
+ win->is_enabled = 1;
+
+ if (cfg->pixfmt != PIXFMT_OSD_ATTR)
+ _osd_enable_layer(sd, layer);
+ else {
+ _osd_enable_attribute_mode(sd);
+ _osd_set_blink_attribute(sd, osd->is_blinking, osd->blink);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+#define OSD_SRC_ADDR_HIGH4 0x7800000
+#define OSD_SRC_ADDR_HIGH7 0x7F0000
+#define OSD_SRCADD_OFSET_SFT 23
+#define OSD_SRCADD_ADD_SFT 16
+#define OSD_WINADL_MASK 0xFFFF
+#define OSD_WINOFST_MASK 0x1000
+#define VPBE_REG_BASE 0x80000000
+
+static void _osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+ unsigned long fb_base_phys,
+ unsigned long cbcr_ofst)
+{
+
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ switch (layer) {
+ case WIN_OSD0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN0ADR);
+ break;
+ case WIN_VID0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+ break;
+ case WIN_OSD1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN1ADR);
+ break;
+ case WIN_VID1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN1ADR);
+ break;
+ }
+ } else if (sd->vpbe_type == VPBE_VERSION_3) {
+ unsigned long fb_offset_32 =
+ (fb_base_phys - VPBE_REG_BASE) >> 5;
+
+ switch (layer) {
+ case WIN_OSD0:
+ osd_modify(sd, OSD_OSDWINADH_O0AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O0AH_SHIFT),
+ OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_OSDWIN0ADL_O0AL,
+ OSD_OSDWIN0ADL);
+ break;
+ case WIN_VID0:
+ osd_modify(sd, OSD_VIDWINADH_V0AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V0AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_VIDWIN0ADL_V0AL,
+ OSD_VIDWIN0ADL);
+ break;
+ case WIN_OSD1:
+ osd_modify(sd, OSD_OSDWINADH_O1AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O1AH_SHIFT),
+ OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_OSDWIN1ADL_O1AL,
+ OSD_OSDWIN1ADL);
+ break;
+ case WIN_VID1:
+ osd_modify(sd, OSD_VIDWINADH_V1AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V1AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_VIDWIN1ADL_V1AL,
+ OSD_VIDWIN1ADL);
+ break;
+ }
+ } else if (sd->vpbe_type == VPBE_VERSION_2) {
+ struct osd_window_state *win = &sd->win[layer];
+ unsigned long fb_offset_32, cbcr_offset_32;
+
+ fb_offset_32 = fb_base_phys - VPBE_REG_BASE;
+ if (cbcr_ofst)
+ cbcr_offset_32 = cbcr_ofst;
+ else
+ cbcr_offset_32 = win->lconfig.line_length *
+ win->lconfig.ysize;
+ cbcr_offset_32 += fb_offset_32;
+ fb_offset_32 = fb_offset_32 >> 5;
+ cbcr_offset_32 = cbcr_offset_32 >> 5;
+ /*
+ * DM365: start address is 27-bit long address b26 - b23 are
+ * in offset register b12 - b9, and * bit 26 has to be '1'
+ */
+ if (win->lconfig.pixfmt == PIXFMT_NV12) {
+ switch (layer) {
+ case WIN_VID0:
+ case WIN_VID1:
+ /* Y is in VID0 */
+ osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
+ osd_modify(sd, OSD_VIDWINADH_V0AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V0AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN0ADL);
+ /* CbCr is in VID1 */
+ osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
+ ((cbcr_offset_32 &
+ OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
+ osd_modify(sd, OSD_VIDWINADH_V1AH,
+ (cbcr_offset_32 &
+ OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V1AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, cbcr_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN1ADL);
+ break;
+ default:
+ break;
+ }
+ }
+
+ switch (layer) {
+ case WIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0OFST_O0AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
+ OSD_OSDWIN0OFST);
+ osd_modify(sd, OSD_OSDWINADH_O0AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O0AH_SHIFT), OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_OSDWIN0ADL);
+ break;
+ case WIN_VID0:
+ if (win->lconfig.pixfmt != PIXFMT_NV12) {
+ osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
+ osd_modify(sd, OSD_VIDWINADH_V0AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V0AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN0ADL);
+ }
+ break;
+ case WIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1OFST_O1AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
+ OSD_OSDWIN1OFST);
+ osd_modify(sd, OSD_OSDWINADH_O1AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O1AH_SHIFT),
+ OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_OSDWIN1ADL);
+ break;
+ case WIN_VID1:
+ if (win->lconfig.pixfmt != PIXFMT_NV12) {
+ osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
+ osd_modify(sd, OSD_VIDWINADH_V1AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V1AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN1ADL);
+ }
+ break;
+ }
+ }
+}
+
+static void osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+ unsigned long fb_base_phys,
+ unsigned long cbcr_ofst)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->fb_base_phys = fb_base_phys & ~0x1F;
+ _osd_start_layer(sd, layer, fb_base_phys, cbcr_ofst);
+
+ if (layer == WIN_VID0) {
+ osd->pingpong =
+ _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+ win->fb_base_phys,
+ cfg);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_get_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ *lconfig = win->lconfig;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+/**
+ * try_layer_config() - Try a specific configuration for the layer
+ * @sd - ptr to struct osd_state
+ * @layer - layer to configure
+ * @lconfig - layer configuration to try
+ *
+ * If the requested lconfig is completely rejected and the value of lconfig on
+ * exit is the current lconfig, then try_layer_config() returns 1. Otherwise,
+ * try_layer_config() returns 0. A return value of 0 does not necessarily mean
+ * that the value of lconfig on exit is identical to the value of lconfig on
+ * entry, but merely that it represents a change from the current lconfig.
+ */
+static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ int bad_config = 0;
+
+ /* verify that the pixel format is compatible with the layer */
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ case PIXFMT_2BPP:
+ case PIXFMT_4BPP:
+ case PIXFMT_8BPP:
+ case PIXFMT_RGB565:
+ if (osd->vpbe_type == VPBE_VERSION_1)
+ bad_config = !is_vid_win(layer);
+ break;
+ case PIXFMT_YCBCRI:
+ case PIXFMT_YCRCBI:
+ bad_config = !is_vid_win(layer);
+ break;
+ case PIXFMT_RGB888:
+ if (osd->vpbe_type == VPBE_VERSION_1)
+ bad_config = !is_vid_win(layer);
+ else if ((osd->vpbe_type == VPBE_VERSION_3) ||
+ (osd->vpbe_type == VPBE_VERSION_2))
+ bad_config = !is_osd_win(layer);
+ break;
+ case PIXFMT_NV12:
+ if (osd->vpbe_type != VPBE_VERSION_2)
+ bad_config = 1;
+ else
+ bad_config = is_osd_win(layer);
+ break;
+ case PIXFMT_OSD_ATTR:
+ bad_config = (layer != WIN_OSD1);
+ break;
+ default:
+ bad_config = 1;
+ break;
+ }
+ if (bad_config) {
+ /*
+ * The requested pixel format is incompatible with the layer,
+ * so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return bad_config;
+ }
+
+ /* DM6446: */
+ /* only one OSD window at a time can use RGB pixel formats */
+ if ((osd->vpbe_type == VPBE_VERSION_1) &&
+ is_osd_win(layer) && is_rgb_pixfmt(lconfig->pixfmt)) {
+ enum osd_pix_format pixfmt;
+ if (layer == WIN_OSD0)
+ pixfmt = osd->win[WIN_OSD1].lconfig.pixfmt;
+ else
+ pixfmt = osd->win[WIN_OSD0].lconfig.pixfmt;
+
+ if (is_rgb_pixfmt(pixfmt)) {
+ /*
+ * The other OSD window is already configured for an
+ * RGB, so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return 1;
+ }
+ }
+
+ /* DM6446: only one video window at a time can use RGB888 */
+ if ((osd->vpbe_type == VPBE_VERSION_1) && is_vid_win(layer) &&
+ lconfig->pixfmt == PIXFMT_RGB888) {
+ enum osd_pix_format pixfmt;
+
+ if (layer == WIN_VID0)
+ pixfmt = osd->win[WIN_VID1].lconfig.pixfmt;
+ else
+ pixfmt = osd->win[WIN_VID0].lconfig.pixfmt;
+
+ if (pixfmt == PIXFMT_RGB888) {
+ /*
+ * The other video window is already configured for
+ * RGB888, so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return 1;
+ }
+ }
+
+ /* window dimensions must be non-zero */
+ if (!lconfig->line_length || !lconfig->xsize || !lconfig->ysize) {
+ *lconfig = win->lconfig;
+ return 1;
+ }
+
+ /* round line_length up to a multiple of 32 */
+ lconfig->line_length = ((lconfig->line_length + 31) / 32) * 32;
+ lconfig->line_length =
+ min(lconfig->line_length, (unsigned)MAX_LINE_LENGTH);
+ lconfig->xsize = min(lconfig->xsize, (unsigned)MAX_WIN_SIZE);
+ lconfig->ysize = min(lconfig->ysize, (unsigned)MAX_WIN_SIZE);
+ lconfig->xpos = min(lconfig->xpos, (unsigned)MAX_WIN_SIZE);
+ lconfig->ypos = min(lconfig->ypos, (unsigned)MAX_WIN_SIZE);
+ lconfig->interlaced = (lconfig->interlaced != 0);
+ if (lconfig->interlaced) {
+ /* ysize and ypos must be even for interlaced displays */
+ lconfig->ysize &= ~1;
+ lconfig->ypos &= ~1;
+ }
+
+ return 0;
+}
+
+static void _osd_disable_vid_rgb888(struct osd_state *sd)
+{
+ /*
+ * The DM6446 supports RGB888 pixel format in a single video window.
+ * This routine disables RGB888 pixel format for both video windows.
+ * The caller must ensure that neither video window is currently
+ * configured for RGB888 pixel format.
+ */
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_clear(sd, OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+}
+
+static void _osd_enable_vid_rgb888(struct osd_state *sd,
+ enum osd_layer layer)
+{
+ /*
+ * The DM6446 supports RGB888 pixel format in a single video window.
+ * This routine enables RGB888 pixel format for the specified video
+ * window. The caller must ensure that the other video window is not
+ * currently configured for RGB888 pixel format, as this routine will
+ * disable RGB888 pixel format for the other window.
+ */
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ if (layer == WIN_VID0)
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+ else if (layer == WIN_VID1)
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL);
+ }
+}
+
+static void _osd_set_cbcr_order(struct osd_state *sd,
+ enum osd_pix_format pixfmt)
+{
+ /*
+ * The caller must ensure that all windows using YC pixfmt use the same
+ * Cb/Cr order.
+ */
+ if (pixfmt == PIXFMT_YCBCRI)
+ osd_clear(sd, OSD_MODE_CS, OSD_MODE);
+ else if (pixfmt == PIXFMT_YCRCBI)
+ osd_set(sd, OSD_MODE_CS, OSD_MODE);
+}
+
+static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+ const struct osd_layer_config *lconfig)
+{
+ u32 winmd = 0, winmd_mask = 0, bmw = 0;
+
+ _osd_set_cbcr_order(sd, lconfig->pixfmt);
+
+ switch (layer) {
+ case WIN_OSD0:
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ winmd_mask |= OSD_OSDWIN0MD_RGB0E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN0MD_RGB0E;
+ } else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+ (sd->vpbe_type == VPBE_VERSION_2)) {
+ winmd_mask |= OSD_OSDWIN0MD_BMP0MD;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_RGB565:
+ winmd |= (1 <<
+ OSD_OSDWIN0MD_BMP0MD_SHIFT);
+ break;
+ case PIXFMT_RGB888:
+ winmd |= (2 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
+ _osd_enable_rgb888_pixblend(sd, OSDWIN_OSD0);
+ break;
+ case PIXFMT_YCBCRI:
+ case PIXFMT_YCRCBI:
+ winmd |= (3 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
+ break;
+ default:
+ break;
+ }
+ }
+
+ winmd_mask |= OSD_OSDWIN0MD_BMW0 | OSD_OSDWIN0MD_OFF0;
+
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ bmw = 0;
+ break;
+ case PIXFMT_2BPP:
+ bmw = 1;
+ break;
+ case PIXFMT_4BPP:
+ bmw = 2;
+ break;
+ case PIXFMT_8BPP:
+ bmw = 3;
+ break;
+ default:
+ break;
+ }
+ winmd |= (bmw << OSD_OSDWIN0MD_BMW0_SHIFT);
+
+ if (lconfig->interlaced)
+ winmd |= OSD_OSDWIN0MD_OFF0;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN0MD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_OSDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_OSDWIN0XL);
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN0YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_OSDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_OSDWIN0YL);
+ }
+ break;
+ case WIN_VID0:
+ winmd_mask |= OSD_VIDWINMD_VFF0;
+ if (lconfig->interlaced)
+ winmd |= OSD_VIDWINMD_VFF0;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
+ /*
+ * For YUV420P format the register contents are
+ * duplicated in both VID registers
+ */
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ (lconfig->pixfmt == PIXFMT_NV12)) {
+ /* other window also */
+ if (lconfig->interlaced) {
+ winmd_mask |= OSD_VIDWINMD_VFF1;
+ winmd |= OSD_VIDWINMD_VFF1;
+ osd_modify(sd, winmd_mask, winmd,
+ OSD_VIDWINMD);
+ }
+
+ osd_modify(sd, OSD_MISCCTL_S420D,
+ OSD_MISCCTL_S420D, OSD_MISCCTL);
+ osd_write(sd, lconfig->line_length >> 5,
+ OSD_VIDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
+ /*
+ * if NV21 pixfmt and line length not 32B
+ * aligned (e.g. NTSC), Need to set window
+ * X pixel size to be 32B aligned as well
+ */
+ if (lconfig->xsize % 32) {
+ osd_write(sd,
+ ((lconfig->xsize + 31) & ~31),
+ OSD_VIDWIN1XL);
+ osd_write(sd,
+ ((lconfig->xsize + 31) & ~31),
+ OSD_VIDWIN0XL);
+ }
+ } else if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ (lconfig->pixfmt != PIXFMT_NV12)) {
+ osd_modify(sd, OSD_MISCCTL_S420D, ~OSD_MISCCTL_S420D,
+ OSD_MISCCTL);
+ }
+
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN0YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos >> 1,
+ OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1,
+ OSD_VIDWIN1YL);
+ }
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+ }
+ }
+ break;
+ case WIN_OSD1:
+ /*
+ * The caller must ensure that OSD1 is disabled prior to
+ * switching from a normal mode to attribute mode or from
+ * attribute mode to a normal mode.
+ */
+ if (lconfig->pixfmt == PIXFMT_OSD_ATTR) {
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ winmd_mask |= OSD_OSDWIN1MD_ATN1E |
+ OSD_OSDWIN1MD_RGB1E | OSD_OSDWIN1MD_CLUTS1 |
+ OSD_OSDWIN1MD_BLND1 | OSD_OSDWIN1MD_TE1;
+ } else {
+ winmd_mask |= OSD_OSDWIN1MD_BMP1MD |
+ OSD_OSDWIN1MD_CLUTS1 | OSD_OSDWIN1MD_BLND1 |
+ OSD_OSDWIN1MD_TE1;
+ }
+ } else {
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ winmd_mask |= OSD_OSDWIN1MD_RGB1E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN1MD_RGB1E;
+ } else if ((sd->vpbe_type == VPBE_VERSION_3)
+ || (sd->vpbe_type == VPBE_VERSION_2)) {
+ winmd_mask |= OSD_OSDWIN1MD_BMP1MD;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_RGB565:
+ winmd |=
+ (1 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+ break;
+ case PIXFMT_RGB888:
+ winmd |=
+ (2 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+ _osd_enable_rgb888_pixblend(sd,
+ OSDWIN_OSD1);
+ break;
+ case PIXFMT_YCBCRI:
+ case PIXFMT_YCRCBI:
+ winmd |=
+ (3 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+ break;
+ default:
+ break;
+ }
+ }
+
+ winmd_mask |= OSD_OSDWIN1MD_BMW1;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ bmw = 0;
+ break;
+ case PIXFMT_2BPP:
+ bmw = 1;
+ break;
+ case PIXFMT_4BPP:
+ bmw = 2;
+ break;
+ case PIXFMT_8BPP:
+ bmw = 3;
+ break;
+ default:
+ break;
+ }
+ winmd |= (bmw << OSD_OSDWIN1MD_BMW1_SHIFT);
+ }
+
+ winmd_mask |= OSD_OSDWIN1MD_OFF1;
+ if (lconfig->interlaced)
+ winmd |= OSD_OSDWIN1MD_OFF1;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN1MD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_OSDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_OSDWIN1XL);
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN1YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_OSDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_OSDWIN1YL);
+ }
+ break;
+ case WIN_VID1:
+ winmd_mask |= OSD_VIDWINMD_VFF1;
+ if (lconfig->interlaced)
+ winmd |= OSD_VIDWINMD_VFF1;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
+ /*
+ * For YUV420P format the register contents are
+ * duplicated in both VID registers
+ */
+ if (sd->vpbe_type == VPBE_VERSION_2) {
+ if (lconfig->pixfmt == PIXFMT_NV12) {
+ /* other window also */
+ if (lconfig->interlaced) {
+ winmd_mask |= OSD_VIDWINMD_VFF0;
+ winmd |= OSD_VIDWINMD_VFF0;
+ osd_modify(sd, winmd_mask, winmd,
+ OSD_VIDWINMD);
+ }
+ osd_modify(sd, OSD_MISCCTL_S420D,
+ OSD_MISCCTL_S420D, OSD_MISCCTL);
+ osd_write(sd, lconfig->line_length >> 5,
+ OSD_VIDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
+ } else {
+ osd_modify(sd, OSD_MISCCTL_S420D,
+ ~OSD_MISCCTL_S420D, OSD_MISCCTL);
+ }
+ }
+
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN1YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos >> 1,
+ OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1,
+ OSD_VIDWIN0YL);
+ }
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+ }
+ }
+ break;
+ }
+}
+
+static int osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+ int reject_config;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ reject_config = try_layer_config(sd, layer, lconfig);
+ if (reject_config) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return reject_config;
+ }
+
+ /* update the current Cb/Cr order */
+ if (is_yc_pixfmt(lconfig->pixfmt))
+ osd->yc_pixfmt = lconfig->pixfmt;
+
+ /*
+ * If we are switching OSD1 from normal mode to attribute mode or from
+ * attribute mode to normal mode, then we must disable the window.
+ */
+ if (layer == WIN_OSD1) {
+ if (((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt != PIXFMT_OSD_ATTR)) ||
+ ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt == PIXFMT_OSD_ATTR))) {
+ win->is_enabled = 0;
+ _osd_disable_layer(sd, layer);
+ }
+ }
+
+ _osd_set_layer_config(sd, layer, lconfig);
+
+ if (layer == WIN_OSD1) {
+ struct osd_osdwin_state *osdwin_state =
+ &osd->osdwin[OSDWIN_OSD1];
+
+ if ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt == PIXFMT_OSD_ATTR)) {
+ /*
+ * We just switched OSD1 from attribute mode to normal
+ * mode, so we must initialize the CLUT select, the
+ * blend factor, transparency colorkey enable, and
+ * attenuation enable (DM6446 only) bits in the
+ * OSDWIN1MD register.
+ */
+ _osd_set_osd_clut(sd, OSDWIN_OSD1,
+ osdwin_state->clut);
+ _osd_set_blending_factor(sd, OSDWIN_OSD1,
+ osdwin_state->blend);
+ if (osdwin_state->colorkey_blending) {
+ _osd_enable_color_key(sd, OSDWIN_OSD1,
+ osdwin_state->
+ colorkey,
+ lconfig->pixfmt);
+ } else
+ _osd_disable_color_key(sd, OSDWIN_OSD1);
+ _osd_set_rec601_attenuation(sd, OSDWIN_OSD1,
+ osdwin_state->
+ rec601_attenuation);
+ } else if ((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt != PIXFMT_OSD_ATTR)) {
+ /*
+ * We just switched OSD1 from normal mode to attribute
+ * mode, so we must initialize the blink enable and
+ * blink interval bits in the OSDATRMD register.
+ */
+ _osd_set_blink_attribute(sd, osd->is_blinking,
+ osd->blink);
+ }
+ }
+
+ /*
+ * If we just switched to a 1-, 2-, or 4-bits-per-pixel bitmap format
+ * then configure a default palette map.
+ */
+ if ((lconfig->pixfmt != cfg->pixfmt) &&
+ ((lconfig->pixfmt == PIXFMT_1BPP) ||
+ (lconfig->pixfmt == PIXFMT_2BPP) ||
+ (lconfig->pixfmt == PIXFMT_4BPP))) {
+ enum osd_win_layer osdwin =
+ ((layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1);
+ struct osd_osdwin_state *osdwin_state =
+ &osd->osdwin[osdwin];
+ unsigned char clut_index;
+ unsigned char clut_entries = 0;
+
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ clut_entries = 2;
+ break;
+ case PIXFMT_2BPP:
+ clut_entries = 4;
+ break;
+ case PIXFMT_4BPP:
+ clut_entries = 16;
+ break;
+ default:
+ break;
+ }
+ /*
+ * The default palette map maps the pixel value to the clut
+ * index, i.e. pixel value 0 maps to clut entry 0, pixel value
+ * 1 maps to clut entry 1, etc.
+ */
+ for (clut_index = 0; clut_index < 16; clut_index++) {
+ osdwin_state->palette_map[clut_index] = clut_index;
+ if (clut_index < clut_entries) {
+ _osd_set_palette_map(sd, osdwin, clut_index,
+ clut_index,
+ lconfig->pixfmt);
+ }
+ }
+ }
+
+ *cfg = *lconfig;
+ /* DM6446: configure the RGB888 enable and window selection */
+ if (osd->win[WIN_VID0].lconfig.pixfmt == PIXFMT_RGB888)
+ _osd_enable_vid_rgb888(sd, WIN_VID0);
+ else if (osd->win[WIN_VID1].lconfig.pixfmt == PIXFMT_RGB888)
+ _osd_enable_vid_rgb888(sd, WIN_VID1);
+ else
+ _osd_disable_vid_rgb888(sd);
+
+ if (layer == WIN_VID0) {
+ osd->pingpong =
+ _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+ win->fb_base_phys,
+ cfg);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void osd_init_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ enum osd_win_layer osdwin;
+ struct osd_osdwin_state *osdwin_state;
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->is_enabled = 0;
+ _osd_disable_layer(sd, layer);
+
+ win->h_zoom = ZOOM_X1;
+ win->v_zoom = ZOOM_X1;
+ _osd_set_zoom(sd, layer, win->h_zoom, win->v_zoom);
+
+ win->fb_base_phys = 0;
+ _osd_start_layer(sd, layer, win->fb_base_phys, 0);
+
+ cfg->line_length = 0;
+ cfg->xsize = 0;
+ cfg->ysize = 0;
+ cfg->xpos = 0;
+ cfg->ypos = 0;
+ cfg->interlaced = 0;
+ switch (layer) {
+ case WIN_OSD0:
+ case WIN_OSD1:
+ osdwin = (layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1;
+ osdwin_state = &osd->osdwin[osdwin];
+ /*
+ * Other code relies on the fact that OSD windows default to a
+ * bitmap pixel format when they are deallocated, so don't
+ * change this default pixel format.
+ */
+ cfg->pixfmt = PIXFMT_8BPP;
+ _osd_set_layer_config(sd, layer, cfg);
+ osdwin_state->clut = RAM_CLUT;
+ _osd_set_osd_clut(sd, osdwin, osdwin_state->clut);
+ osdwin_state->colorkey_blending = 0;
+ _osd_disable_color_key(sd, osdwin);
+ osdwin_state->blend = OSD_8_VID_0;
+ _osd_set_blending_factor(sd, osdwin, osdwin_state->blend);
+ osdwin_state->rec601_attenuation = 0;
+ _osd_set_rec601_attenuation(sd, osdwin,
+ osdwin_state->
+ rec601_attenuation);
+ if (osdwin == OSDWIN_OSD1) {
+ osd->is_blinking = 0;
+ osd->blink = BLINK_X1;
+ }
+ break;
+ case WIN_VID0:
+ case WIN_VID1:
+ cfg->pixfmt = osd->yc_pixfmt;
+ _osd_set_layer_config(sd, layer, cfg);
+ break;
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_release_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (!win->is_allocated) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+ osd_init_layer(sd, layer);
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->is_allocated = 0;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static int osd_request_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (win->is_allocated) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return -1;
+ }
+ win->is_allocated = 1;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void _osd_init(struct osd_state *sd)
+{
+ osd_write(sd, 0, OSD_MODE);
+ osd_write(sd, 0, OSD_VIDWINMD);
+ osd_write(sd, 0, OSD_OSDWIN0MD);
+ osd_write(sd, 0, OSD_OSDWIN1MD);
+ osd_write(sd, 0, OSD_RECTCUR);
+ osd_write(sd, 0, OSD_MISCCTL);
+ if (sd->vpbe_type == VPBE_VERSION_3) {
+ osd_write(sd, 0, OSD_VBNDRY);
+ osd_write(sd, 0, OSD_EXTMODE);
+ osd_write(sd, OSD_MISCCTL_DMANG, OSD_MISCCTL);
+ }
+}
+
+static void osd_set_left_margin(struct osd_state *sd, u32 val)
+{
+ osd_write(sd, val, OSD_BASEPX);
+}
+
+static void osd_set_top_margin(struct osd_state *sd, u32 val)
+{
+ osd_write(sd, val, OSD_BASEPY);
+}
+
+static int osd_initialize(struct osd_state *osd)
+{
+ if (osd == NULL)
+ return -ENODEV;
+ _osd_init(osd);
+
+ /* set default Cb/Cr order */
+ osd->yc_pixfmt = PIXFMT_YCBCRI;
+
+ if (osd->vpbe_type == VPBE_VERSION_3) {
+ /*
+ * ROM CLUT1 on the DM355 is similar (identical?) to ROM CLUT0
+ * on the DM6446, so make ROM_CLUT1 the default on the DM355.
+ */
+ osd->rom_clut = ROM_CLUT1;
+ }
+
+ _osd_set_field_inversion(osd, osd->field_inversion);
+ _osd_set_rom_clut(osd, osd->rom_clut);
+
+ osd_init_layer(osd, WIN_OSD0);
+ osd_init_layer(osd, WIN_VID0);
+ osd_init_layer(osd, WIN_OSD1);
+ osd_init_layer(osd, WIN_VID1);
+
+ return 0;
+}
+
+static const struct vpbe_osd_ops osd_ops = {
+ .initialize = osd_initialize,
+ .request_layer = osd_request_layer,
+ .release_layer = osd_release_layer,
+ .enable_layer = osd_enable_layer,
+ .disable_layer = osd_disable_layer,
+ .set_layer_config = osd_set_layer_config,
+ .get_layer_config = osd_get_layer_config,
+ .start_layer = osd_start_layer,
+ .set_left_margin = osd_set_left_margin,
+ .set_top_margin = osd_set_top_margin,
+};
+
+static int osd_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *pdev_id;
+ struct osd_state *osd;
+ struct resource *res;
+
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id)
+ return -EINVAL;
+
+ osd = devm_kzalloc(&pdev->dev, sizeof(struct osd_state), GFP_KERNEL);
+ if (osd == NULL)
+ return -ENOMEM;
+
+
+ osd->dev = &pdev->dev;
+ osd->vpbe_type = pdev_id->driver_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ osd->osd_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(osd->osd_base))
+ return PTR_ERR(osd->osd_base);
+
+ osd->osd_base_phys = res->start;
+ osd->osd_size = resource_size(res);
+ spin_lock_init(&osd->lock);
+ osd->ops = osd_ops;
+ platform_set_drvdata(pdev, osd);
+ dev_notice(osd->dev, "OSD sub device probe success\n");
+
+ return 0;
+}
+
+static int osd_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver osd_driver = {
+ .probe = osd_probe,
+ .remove = osd_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .id_table = vpbe_osd_devtype
+};
+
+module_platform_driver(osd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci OSD Manager Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/davinci/vpbe_osd_regs.h b/drivers/media/platform/davinci/vpbe_osd_regs.h
new file mode 100644
index 00000000000..584520f3af6
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_osd_regs.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _VPBE_OSD_REGS_H
+#define _VPBE_OSD_REGS_H
+
+/* VPBE Global Registers */
+#define VPBE_PID 0x0
+#define VPBE_PCR 0x4
+
+/* VPSS CLock Registers */
+#define VPSSCLK_PID 0x00
+#define VPSSCLK_CLKCTRL 0x04
+
+/* VPSS Buffer Logic Registers */
+#define VPSSBL_PID 0x00
+#define VPSSBL_PCR 0x04
+#define VPSSBL_BCR 0x08
+#define VPSSBL_INTSTAT 0x0C
+#define VPSSBL_INTSEL 0x10
+#define VPSSBL_EVTSEL 0x14
+#define VPSSBL_MEMCTRL 0x18
+#define VPSSBL_CCDCMUX 0x1C
+
+/* DM365 ISP5 system configuration */
+#define ISP5_PID 0x0
+#define ISP5_PCCR 0x4
+#define ISP5_BCR 0x8
+#define ISP5_INTSTAT 0xC
+#define ISP5_INTSEL1 0x10
+#define ISP5_INTSEL2 0x14
+#define ISP5_INTSEL3 0x18
+#define ISP5_EVTSEL 0x1c
+#define ISP5_CCDCMUX 0x20
+
+/* VPBE On-Screen Display Subsystem Registers (OSD) */
+#define OSD_MODE 0x00
+#define OSD_VIDWINMD 0x04
+#define OSD_OSDWIN0MD 0x08
+#define OSD_OSDWIN1MD 0x0C
+#define OSD_OSDATRMD 0x0C
+#define OSD_RECTCUR 0x10
+#define OSD_VIDWIN0OFST 0x18
+#define OSD_VIDWIN1OFST 0x1C
+#define OSD_OSDWIN0OFST 0x20
+#define OSD_OSDWIN1OFST 0x24
+#define OSD_VIDWINADH 0x28
+#define OSD_VIDWIN0ADL 0x2C
+#define OSD_VIDWIN0ADR 0x2C
+#define OSD_VIDWIN1ADL 0x30
+#define OSD_VIDWIN1ADR 0x30
+#define OSD_OSDWINADH 0x34
+#define OSD_OSDWIN0ADL 0x38
+#define OSD_OSDWIN0ADR 0x38
+#define OSD_OSDWIN1ADL 0x3C
+#define OSD_OSDWIN1ADR 0x3C
+#define OSD_BASEPX 0x40
+#define OSD_BASEPY 0x44
+#define OSD_VIDWIN0XP 0x48
+#define OSD_VIDWIN0YP 0x4C
+#define OSD_VIDWIN0XL 0x50
+#define OSD_VIDWIN0YL 0x54
+#define OSD_VIDWIN1XP 0x58
+#define OSD_VIDWIN1YP 0x5C
+#define OSD_VIDWIN1XL 0x60
+#define OSD_VIDWIN1YL 0x64
+#define OSD_OSDWIN0XP 0x68
+#define OSD_OSDWIN0YP 0x6C
+#define OSD_OSDWIN0XL 0x70
+#define OSD_OSDWIN0YL 0x74
+#define OSD_OSDWIN1XP 0x78
+#define OSD_OSDWIN1YP 0x7C
+#define OSD_OSDWIN1XL 0x80
+#define OSD_OSDWIN1YL 0x84
+#define OSD_CURXP 0x88
+#define OSD_CURYP 0x8C
+#define OSD_CURXL 0x90
+#define OSD_CURYL 0x94
+#define OSD_W0BMP01 0xA0
+#define OSD_W0BMP23 0xA4
+#define OSD_W0BMP45 0xA8
+#define OSD_W0BMP67 0xAC
+#define OSD_W0BMP89 0xB0
+#define OSD_W0BMPAB 0xB4
+#define OSD_W0BMPCD 0xB8
+#define OSD_W0BMPEF 0xBC
+#define OSD_W1BMP01 0xC0
+#define OSD_W1BMP23 0xC4
+#define OSD_W1BMP45 0xC8
+#define OSD_W1BMP67 0xCC
+#define OSD_W1BMP89 0xD0
+#define OSD_W1BMPAB 0xD4
+#define OSD_W1BMPCD 0xD8
+#define OSD_W1BMPEF 0xDC
+#define OSD_VBNDRY 0xE0
+#define OSD_EXTMODE 0xE4
+#define OSD_MISCCTL 0xE8
+#define OSD_CLUTRAMYCB 0xEC
+#define OSD_CLUTRAMCR 0xF0
+#define OSD_TRANSPVAL 0xF4
+#define OSD_TRANSPVALL 0xF4
+#define OSD_TRANSPVALU 0xF8
+#define OSD_TRANSPBMPIDX 0xFC
+#define OSD_PPVWIN0ADR 0xFC
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV (1 << 1)
+#define VPBE_PCR_CLK_OFF (1 << 0)
+
+#define VPSSBL_INTSTAT_HSSIINT (1 << 14)
+#define VPSSBL_INTSTAT_CFALDINT (1 << 13)
+#define VPSSBL_INTSTAT_IPIPE_INT5 (1 << 12)
+#define VPSSBL_INTSTAT_IPIPE_INT4 (1 << 11)
+#define VPSSBL_INTSTAT_IPIPE_INT3 (1 << 10)
+#define VPSSBL_INTSTAT_IPIPE_INT2 (1 << 9)
+#define VPSSBL_INTSTAT_IPIPE_INT1 (1 << 8)
+#define VPSSBL_INTSTAT_IPIPE_INT0 (1 << 7)
+#define VPSSBL_INTSTAT_IPIPEIFINT (1 << 6)
+#define VPSSBL_INTSTAT_OSDINT (1 << 5)
+#define VPSSBL_INTSTAT_VENCINT (1 << 4)
+#define VPSSBL_INTSTAT_H3AINT (1 << 3)
+#define VPSSBL_INTSTAT_CCDC_VDINT2 (1 << 2)
+#define VPSSBL_INTSTAT_CCDC_VDINT1 (1 << 1)
+#define VPSSBL_INTSTAT_CCDC_VDINT0 (1 << 0)
+
+/* DM365 ISP5 bit definitions */
+#define ISP5_INTSTAT_VENCINT (1 << 21)
+#define ISP5_INTSTAT_OSDINT (1 << 20)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC 0
+#define SDTV_PAL 1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P 0
+#define HDTV_625P 1
+#define HDTV_1080I 2
+#define HDTV_720P 3
+
+#define OSD_MODE_CS (1 << 15)
+#define OSD_MODE_OVRSZ (1 << 14)
+#define OSD_MODE_OHRSZ (1 << 13)
+#define OSD_MODE_EF (1 << 12)
+#define OSD_MODE_VVRSZ (1 << 11)
+#define OSD_MODE_VHRSZ (1 << 10)
+#define OSD_MODE_FSINV (1 << 9)
+#define OSD_MODE_BCLUT (1 << 8)
+#define OSD_MODE_CABG_SHIFT 0
+#define OSD_MODE_CABG (0xff << 0)
+
+#define OSD_VIDWINMD_VFINV (1 << 15)
+#define OSD_VIDWINMD_V1EFC (1 << 14)
+#define OSD_VIDWINMD_VHZ1_SHIFT 12
+#define OSD_VIDWINMD_VHZ1 (3 << 12)
+#define OSD_VIDWINMD_VVZ1_SHIFT 10
+#define OSD_VIDWINMD_VVZ1 (3 << 10)
+#define OSD_VIDWINMD_VFF1 (1 << 9)
+#define OSD_VIDWINMD_ACT1 (1 << 8)
+#define OSD_VIDWINMD_V0EFC (1 << 6)
+#define OSD_VIDWINMD_VHZ0_SHIFT 4
+#define OSD_VIDWINMD_VHZ0 (3 << 4)
+#define OSD_VIDWINMD_VVZ0_SHIFT 2
+#define OSD_VIDWINMD_VVZ0 (3 << 2)
+#define OSD_VIDWINMD_VFF0 (1 << 1)
+#define OSD_VIDWINMD_ACT0 (1 << 0)
+
+#define OSD_OSDWIN0MD_ATN0E (1 << 14)
+#define OSD_OSDWIN0MD_RGB0E (1 << 13)
+#define OSD_OSDWIN0MD_BMP0MD_SHIFT 13
+#define OSD_OSDWIN0MD_BMP0MD (3 << 13)
+#define OSD_OSDWIN0MD_CLUTS0 (1 << 12)
+#define OSD_OSDWIN0MD_OHZ0_SHIFT 10
+#define OSD_OSDWIN0MD_OHZ0 (3 << 10)
+#define OSD_OSDWIN0MD_OVZ0_SHIFT 8
+#define OSD_OSDWIN0MD_OVZ0 (3 << 8)
+#define OSD_OSDWIN0MD_BMW0_SHIFT 6
+#define OSD_OSDWIN0MD_BMW0 (3 << 6)
+#define OSD_OSDWIN0MD_BLND0_SHIFT 3
+#define OSD_OSDWIN0MD_BLND0 (7 << 3)
+#define OSD_OSDWIN0MD_TE0 (1 << 2)
+#define OSD_OSDWIN0MD_OFF0 (1 << 1)
+#define OSD_OSDWIN0MD_OACT0 (1 << 0)
+
+#define OSD_OSDWIN1MD_OASW (1 << 15)
+#define OSD_OSDWIN1MD_ATN1E (1 << 14)
+#define OSD_OSDWIN1MD_RGB1E (1 << 13)
+#define OSD_OSDWIN1MD_BMP1MD_SHIFT 13
+#define OSD_OSDWIN1MD_BMP1MD (3 << 13)
+#define OSD_OSDWIN1MD_CLUTS1 (1 << 12)
+#define OSD_OSDWIN1MD_OHZ1_SHIFT 10
+#define OSD_OSDWIN1MD_OHZ1 (3 << 10)
+#define OSD_OSDWIN1MD_OVZ1_SHIFT 8
+#define OSD_OSDWIN1MD_OVZ1 (3 << 8)
+#define OSD_OSDWIN1MD_BMW1_SHIFT 6
+#define OSD_OSDWIN1MD_BMW1 (3 << 6)
+#define OSD_OSDWIN1MD_BLND1_SHIFT 3
+#define OSD_OSDWIN1MD_BLND1 (7 << 3)
+#define OSD_OSDWIN1MD_TE1 (1 << 2)
+#define OSD_OSDWIN1MD_OFF1 (1 << 1)
+#define OSD_OSDWIN1MD_OACT1 (1 << 0)
+
+#define OSD_OSDATRMD_OASW (1 << 15)
+#define OSD_OSDATRMD_OHZA_SHIFT 10
+#define OSD_OSDATRMD_OHZA (3 << 10)
+#define OSD_OSDATRMD_OVZA_SHIFT 8
+#define OSD_OSDATRMD_OVZA (3 << 8)
+#define OSD_OSDATRMD_BLNKINT_SHIFT 6
+#define OSD_OSDATRMD_BLNKINT (3 << 6)
+#define OSD_OSDATRMD_OFFA (1 << 1)
+#define OSD_OSDATRMD_BLNK (1 << 0)
+
+#define OSD_RECTCUR_RCAD_SHIFT 8
+#define OSD_RECTCUR_RCAD (0xff << 8)
+#define OSD_RECTCUR_CLUTSR (1 << 7)
+#define OSD_RECTCUR_RCHW_SHIFT 4
+#define OSD_RECTCUR_RCHW (7 << 4)
+#define OSD_RECTCUR_RCVW_SHIFT 1
+#define OSD_RECTCUR_RCVW (7 << 1)
+#define OSD_RECTCUR_RCACT (1 << 0)
+
+#define OSD_VIDWIN0OFST_V0LO (0x1ff << 0)
+
+#define OSD_VIDWIN1OFST_V1LO (0x1ff << 0)
+
+#define OSD_OSDWIN0OFST_O0LO (0x1ff << 0)
+
+#define OSD_OSDWIN1OFST_O1LO (0x1ff << 0)
+
+#define OSD_WINOFST_AH_SHIFT 9
+
+#define OSD_VIDWIN0OFST_V0AH (0xf << 9)
+#define OSD_VIDWIN1OFST_V1AH (0xf << 9)
+#define OSD_OSDWIN0OFST_O0AH (0xf << 9)
+#define OSD_OSDWIN1OFST_O1AH (0xf << 9)
+
+#define OSD_VIDWINADH_V1AH_SHIFT 8
+#define OSD_VIDWINADH_V1AH (0x7f << 8)
+#define OSD_VIDWINADH_V0AH_SHIFT 0
+#define OSD_VIDWINADH_V0AH (0x7f << 0)
+
+#define OSD_VIDWIN0ADL_V0AL (0xffff << 0)
+
+#define OSD_VIDWIN1ADL_V1AL (0xffff << 0)
+
+#define OSD_OSDWINADH_O1AH_SHIFT 8
+#define OSD_OSDWINADH_O1AH (0x7f << 8)
+#define OSD_OSDWINADH_O0AH_SHIFT 0
+#define OSD_OSDWINADH_O0AH (0x7f << 0)
+
+#define OSD_OSDWIN0ADL_O0AL (0xffff << 0)
+
+#define OSD_OSDWIN1ADL_O1AL (0xffff << 0)
+
+#define OSD_BASEPX_BPX (0x3ff << 0)
+
+#define OSD_BASEPY_BPY (0x1ff << 0)
+
+#define OSD_VIDWIN0XP_V0X (0x7ff << 0)
+
+#define OSD_VIDWIN0YP_V0Y (0x7ff << 0)
+
+#define OSD_VIDWIN0XL_V0W (0x7ff << 0)
+
+#define OSD_VIDWIN0YL_V0H (0x7ff << 0)
+
+#define OSD_VIDWIN1XP_V1X (0x7ff << 0)
+
+#define OSD_VIDWIN1YP_V1Y (0x7ff << 0)
+
+#define OSD_VIDWIN1XL_V1W (0x7ff << 0)
+
+#define OSD_VIDWIN1YL_V1H (0x7ff << 0)
+
+#define OSD_OSDWIN0XP_W0X (0x7ff << 0)
+
+#define OSD_OSDWIN0YP_W0Y (0x7ff << 0)
+
+#define OSD_OSDWIN0XL_W0W (0x7ff << 0)
+
+#define OSD_OSDWIN0YL_W0H (0x7ff << 0)
+
+#define OSD_OSDWIN1XP_W1X (0x7ff << 0)
+
+#define OSD_OSDWIN1YP_W1Y (0x7ff << 0)
+
+#define OSD_OSDWIN1XL_W1W (0x7ff << 0)
+
+#define OSD_OSDWIN1YL_W1H (0x7ff << 0)
+
+#define OSD_CURXP_RCSX (0x7ff << 0)
+
+#define OSD_CURYP_RCSY (0x7ff << 0)
+
+#define OSD_CURXL_RCSW (0x7ff << 0)
+
+#define OSD_CURYL_RCSH (0x7ff << 0)
+
+#define OSD_EXTMODE_EXPMDSEL (1 << 15)
+#define OSD_EXTMODE_SCRNHEXP_SHIFT 13
+#define OSD_EXTMODE_SCRNHEXP (3 << 13)
+#define OSD_EXTMODE_SCRNVEXP (1 << 12)
+#define OSD_EXTMODE_OSD1BLDCHR (1 << 11)
+#define OSD_EXTMODE_OSD0BLDCHR (1 << 10)
+#define OSD_EXTMODE_ATNOSD1EN (1 << 9)
+#define OSD_EXTMODE_ATNOSD0EN (1 << 8)
+#define OSD_EXTMODE_OSDHRSZ15 (1 << 7)
+#define OSD_EXTMODE_VIDHRSZ15 (1 << 6)
+#define OSD_EXTMODE_ZMFILV1HEN (1 << 5)
+#define OSD_EXTMODE_ZMFILV1VEN (1 << 4)
+#define OSD_EXTMODE_ZMFILV0HEN (1 << 3)
+#define OSD_EXTMODE_ZMFILV0VEN (1 << 2)
+#define OSD_EXTMODE_EXPFILHEN (1 << 1)
+#define OSD_EXTMODE_EXPFILVEN (1 << 0)
+
+#define OSD_MISCCTL_BLDSEL (1 << 15)
+#define OSD_MISCCTL_S420D (1 << 14)
+#define OSD_MISCCTL_BMAPT (1 << 13)
+#define OSD_MISCCTL_DM365M (1 << 12)
+#define OSD_MISCCTL_RGBEN (1 << 7)
+#define OSD_MISCCTL_RGBWIN (1 << 6)
+#define OSD_MISCCTL_DMANG (1 << 6)
+#define OSD_MISCCTL_TMON (1 << 5)
+#define OSD_MISCCTL_RSEL (1 << 4)
+#define OSD_MISCCTL_CPBSY (1 << 3)
+#define OSD_MISCCTL_PPSW (1 << 2)
+#define OSD_MISCCTL_PPRV (1 << 1)
+
+#define OSD_CLUTRAMYCB_Y_SHIFT 8
+#define OSD_CLUTRAMYCB_Y (0xff << 8)
+#define OSD_CLUTRAMYCB_CB_SHIFT 0
+#define OSD_CLUTRAMYCB_CB (0xff << 0)
+
+#define OSD_CLUTRAMCR_CR_SHIFT 8
+#define OSD_CLUTRAMCR_CR (0xff << 8)
+#define OSD_CLUTRAMCR_CADDR_SHIFT 0
+#define OSD_CLUTRAMCR_CADDR (0xff << 0)
+
+#define OSD_TRANSPVAL_RGBTRANS (0xffff << 0)
+
+#define OSD_TRANSPVALL_RGBL (0xffff << 0)
+
+#define OSD_TRANSPVALU_Y_SHIFT 8
+#define OSD_TRANSPVALU_Y (0xff << 8)
+#define OSD_TRANSPVALU_RGBU_SHIFT 0
+#define OSD_TRANSPVALU_RGBU (0xff << 0)
+
+#define OSD_TRANSPBMPIDX_BMP1_SHIFT 8
+#define OSD_TRANSPBMPIDX_BMP1 (0xff << 8)
+#define OSD_TRANSPBMPIDX_BMP0_SHIFT 0
+#define OSD_TRANSPBMPIDX_BMP0 0xff
+
+#endif /* _DAVINCI_VPBE_H_ */
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
new file mode 100644
index 00000000000..14a023a75d2
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <mach/hardware.h>
+#include <mach/mux.h>
+#include <linux/platform_data/i2c-davinci.h>
+
+#include <linux/io.h>
+
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+
+#include "vpbe_venc_regs.h"
+
+#define MODULE_NAME "davinci-vpbe-venc"
+
+static struct platform_device_id vpbe_venc_devtype[] = {
+ {
+ .name = DM644X_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_1,
+ }, {
+ .name = DM365_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_2,
+ }, {
+ .name = DM355_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_3,
+ },
+ {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(platform, vpbe_venc_devtype);
+
+static int debug = 2;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-2");
+
+struct venc_state {
+ struct v4l2_subdev sd;
+ struct venc_callback *callback;
+ struct venc_platform_data *pdata;
+ struct device *pdev;
+ u32 output;
+ v4l2_std_id std;
+ spinlock_t lock;
+ void __iomem *venc_base;
+ void __iomem *vdaccfg_reg;
+ enum vpbe_version venc_type;
+};
+
+static inline struct venc_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct venc_state, sd);
+}
+
+static inline u32 venc_read(struct v4l2_subdev *sd, u32 offset)
+{
+ struct venc_state *venc = to_state(sd);
+
+ return readl(venc->venc_base + offset);
+}
+
+static inline u32 venc_write(struct v4l2_subdev *sd, u32 offset, u32 val)
+{
+ struct venc_state *venc = to_state(sd);
+
+ writel(val, (venc->venc_base + offset));
+
+ return val;
+}
+
+static inline u32 venc_modify(struct v4l2_subdev *sd, u32 offset,
+ u32 val, u32 mask)
+{
+ u32 new_val = (venc_read(sd, offset) & ~mask) | (val & mask);
+
+ venc_write(sd, offset, new_val);
+
+ return new_val;
+}
+
+static inline u32 vdaccfg_write(struct v4l2_subdev *sd, u32 val)
+{
+ struct venc_state *venc = to_state(sd);
+
+ writel(val, venc->vdaccfg_reg);
+
+ val = readl(venc->vdaccfg_reg);
+
+ return val;
+}
+
+#define VDAC_COMPONENT 0x543
+#define VDAC_S_VIDEO 0x210
+/* This function sets the dac of the VPBE for various outputs
+ */
+static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
+{
+ switch (out_index) {
+ case 0:
+ v4l2_dbg(debug, 1, sd, "Setting output to Composite\n");
+ venc_write(sd, VENC_DACSEL, 0);
+ break;
+ case 1:
+ v4l2_dbg(debug, 1, sd, "Setting output to Component\n");
+ venc_write(sd, VENC_DACSEL, VDAC_COMPONENT);
+ break;
+ case 2:
+ v4l2_dbg(debug, 1, sd, "Setting output to S-video\n");
+ venc_write(sd, VENC_DACSEL, VDAC_S_VIDEO);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
+{
+ struct venc_state *venc = to_state(sd);
+
+ v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
+
+ if (benable) {
+ venc_write(sd, VENC_VMOD, 0);
+ venc_write(sd, VENC_CVBS, 0);
+ venc_write(sd, VENC_LCDOUT, 0);
+ venc_write(sd, VENC_HSPLS, 0);
+ venc_write(sd, VENC_HSTART, 0);
+ venc_write(sd, VENC_HVALID, 0);
+ venc_write(sd, VENC_HINT, 0);
+ venc_write(sd, VENC_VSPLS, 0);
+ venc_write(sd, VENC_VSTART, 0);
+ venc_write(sd, VENC_VVALID, 0);
+ venc_write(sd, VENC_VINT, 0);
+ venc_write(sd, VENC_YCCCTL, 0);
+ venc_write(sd, VENC_DACSEL, 0);
+
+ } else {
+ venc_write(sd, VENC_VMOD, 0);
+ /* disable VCLK output pin enable */
+ venc_write(sd, VENC_VIDCTL, 0x141);
+
+ /* Disable output sync pins */
+ venc_write(sd, VENC_SYNCCTL, 0);
+
+ /* Disable DCLOCK */
+ venc_write(sd, VENC_DCLKCTL, 0);
+ venc_write(sd, VENC_DRGBX1, 0x0000057C);
+
+ /* Disable LCD output control (accepting default polarity) */
+ venc_write(sd, VENC_LCDOUT, 0);
+ if (venc->venc_type != VPBE_VERSION_3)
+ venc_write(sd, VENC_CMPNT, 0x100);
+ venc_write(sd, VENC_HSPLS, 0);
+ venc_write(sd, VENC_HINT, 0);
+ venc_write(sd, VENC_HSTART, 0);
+ venc_write(sd, VENC_HVALID, 0);
+
+ venc_write(sd, VENC_VSPLS, 0);
+ venc_write(sd, VENC_VINT, 0);
+ venc_write(sd, VENC_VSTART, 0);
+ venc_write(sd, VENC_VVALID, 0);
+
+ venc_write(sd, VENC_HSDLY, 0);
+ venc_write(sd, VENC_VSDLY, 0);
+
+ venc_write(sd, VENC_YCCCTL, 0);
+ venc_write(sd, VENC_VSTARTA, 0);
+
+ /* Set OSD clock and OSD Sync Adavance registers */
+ venc_write(sd, VENC_OSDCLK0, 1);
+ venc_write(sd, VENC_OSDCLK1, 2);
+ }
+}
+
+static void
+venc_enable_vpss_clock(int venc_type,
+ enum vpbe_enc_timings_type type,
+ unsigned int pclock)
+{
+ if (venc_type == VPBE_VERSION_1)
+ return;
+
+ if (venc_type == VPBE_VERSION_2 && (type == VPBE_ENC_STD || (type ==
+ VPBE_ENC_DV_TIMINGS && pclock <= 27000000))) {
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
+ return;
+ }
+
+ if (venc_type == VPBE_VERSION_3 && type == VPBE_ENC_STD)
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 0);
+}
+
+#define VDAC_CONFIG_SD_V3 0x0E21A6B6
+#define VDAC_CONFIG_SD_V2 0x081141CF
+/*
+ * setting NTSC mode
+ */
+static int venc_set_ntsc(struct v4l2_subdev *sd)
+{
+ u32 val;
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_ntsc\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ if (pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_525_60) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_STD, V4L2_STD_525_60);
+ venc_enabledigitaloutput(sd, 0);
+
+ if (venc->venc_type == VPBE_VERSION_3) {
+ venc_write(sd, VENC_CLKCTL, 0x01);
+ venc_write(sd, VENC_VIDCTL, 0);
+ val = vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
+ } else if (venc->venc_type == VPBE_VERSION_2) {
+ venc_write(sd, VENC_CLKCTL, 0x01);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
+ } else {
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS);
+ }
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+ venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_write(sd, VENC_DACTST, 0x0);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * setting PAL mode
+ */
+static int venc_set_pal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+
+ v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ if (venc->pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_625_50) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_STD, V4L2_STD_625_50);
+ venc_enabledigitaloutput(sd, 0);
+
+ if (venc->venc_type == VPBE_VERSION_3) {
+ venc_write(sd, VENC_CLKCTL, 0x1);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
+ } else if (venc->venc_type == VPBE_VERSION_2) {
+ venc_write(sd, VENC_CLKCTL, 0x1);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
+ } else {
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+ }
+
+ venc_modify(sd, VENC_SYNCCTL, 1 << VENC_SYNCCTL_OVD_SHIFT,
+ VENC_SYNCCTL_OVD);
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD,
+ (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD,
+ (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+ venc_modify(sd, VENC_VMOD,
+ (1 << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_write(sd, VENC_DACTST, 0x0);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+#define VDAC_CONFIG_HD_V2 0x081141EF
+/*
+ * venc_set_480p59_94
+ *
+ * This function configures the video encoder to EDTV(525p) component setting.
+ */
+static int venc_set_480p59_94(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
+ if (venc->venc_type != VPBE_VERSION_1 &&
+ venc->venc_type != VPBE_VERSION_2)
+ return -EINVAL;
+
+ /* Setup clock at VPSS & VENC for SD */
+ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 27000000) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 27000000);
+ venc_enabledigitaloutput(sd, 0);
+
+ if (venc->venc_type == VPBE_VERSION_2)
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+ if (venc->venc_type == VPBE_VERSION_1) {
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+ }
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_525P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+ VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * venc_set_625p
+ *
+ * This function configures the video encoder to HDTV(625p) component setting
+ */
+static int venc_set_576p50(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
+
+ if (venc->venc_type != VPBE_VERSION_1 &&
+ venc->venc_type != VPBE_VERSION_2)
+ return -EINVAL;
+ /* Setup clock at VPSS & VENC for SD */
+ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 27000000) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 27000000);
+ venc_enabledigitaloutput(sd, 0);
+
+ if (venc->venc_type == VPBE_VERSION_2)
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+ if (venc->venc_type == VPBE_VERSION_1) {
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+ }
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_625P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+ VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * venc_set_720p60_internal - Setup 720p60 in venc for dm365 only
+ */
+static int venc_set_720p60_internal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 74250000) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 74250000);
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+ venc_write(sd, VENC_VMOD, 0);
+ /* DM365 component HD mode */
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_720P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+ venc_write(sd, VENC_XHINTVL, 0);
+ return 0;
+}
+
+/*
+ * venc_set_1080i30_internal - Setup 1080i30 in venc for dm365 only
+ */
+static int venc_set_1080i30_internal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 74250000) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 74250000);
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+
+ venc_write(sd, VENC_VMOD, 0);
+ /* DM365 component HD mode */
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_1080I << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+ venc_write(sd, VENC_XHINTVL, 0);
+ return 0;
+}
+
+static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
+{
+ v4l2_dbg(debug, 1, sd, "venc_s_std_output\n");
+
+ if (norm & V4L2_STD_525_60)
+ return venc_set_ntsc(sd);
+ else if (norm & V4L2_STD_625_50)
+ return venc_set_pal(sd);
+
+ return -EINVAL;
+}
+
+static int venc_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct venc_state *venc = to_state(sd);
+ u32 height = dv_timings->bt.height;
+ int ret;
+
+ v4l2_dbg(debug, 1, sd, "venc_s_dv_timings\n");
+
+ if (height == 576)
+ return venc_set_576p50(sd);
+ else if (height == 480)
+ return venc_set_480p59_94(sd);
+ else if ((height == 720) &&
+ (venc->venc_type == VPBE_VERSION_2)) {
+ /* TBD setup internal 720p mode here */
+ ret = venc_set_720p60_internal(sd);
+ /* for DM365 VPBE, there is DAC inside */
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+ return ret;
+ } else if ((height == 1080) &&
+ (venc->venc_type == VPBE_VERSION_2)) {
+ /* TBD setup internal 1080i mode here */
+ ret = venc_set_1080i30_internal(sd);
+ /* for DM365 VPBE, there is DAC inside */
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+ return ret;
+ }
+ return -EINVAL;
+}
+
+static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
+ u32 config)
+{
+ struct venc_state *venc = to_state(sd);
+ int ret;
+
+ v4l2_dbg(debug, 1, sd, "venc_s_routing\n");
+
+ ret = venc_set_dac(sd, output);
+ if (!ret)
+ venc->output = output;
+
+ return ret;
+}
+
+static long venc_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd,
+ void *arg)
+{
+ u32 val;
+
+ switch (cmd) {
+ case VENC_GET_FLD:
+ val = venc_read(sd, VENC_VSTAT);
+ *((int *)arg) = ((val & VENC_VSTAT_FIDST) ==
+ VENC_VSTAT_FIDST);
+ break;
+ default:
+ v4l2_err(sd, "Wrong IOCTL cmd\n");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops venc_core_ops = {
+ .ioctl = venc_ioctl,
+};
+
+static const struct v4l2_subdev_video_ops venc_video_ops = {
+ .s_routing = venc_s_routing,
+ .s_std_output = venc_s_std_output,
+ .s_dv_timings = venc_s_dv_timings,
+};
+
+static const struct v4l2_subdev_ops venc_ops = {
+ .core = &venc_core_ops,
+ .video = &venc_video_ops,
+};
+
+static int venc_initialize(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ int ret;
+
+ /* Set default to output to composite and std to NTSC */
+ venc->output = 0;
+ venc->std = V4L2_STD_525_60;
+
+ ret = venc_s_routing(sd, 0, venc->output, 0);
+ if (ret < 0) {
+ v4l2_err(sd, "Error setting output during init\n");
+ return -EINVAL;
+ }
+
+ ret = venc_s_std_output(sd, venc->std);
+ if (ret < 0) {
+ v4l2_err(sd, "Error setting std during init\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int venc_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct venc_state **venc = data;
+
+ if (strstr(pdev->name, "vpbe-venc") != NULL)
+ *venc = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+struct v4l2_subdev *venc_sub_dev_init(struct v4l2_device *v4l2_dev,
+ const char *venc_name)
+{
+ struct venc_state *venc;
+ int err;
+
+ err = bus_for_each_dev(&platform_bus_type, NULL, &venc,
+ venc_device_get);
+ if (venc == NULL)
+ return NULL;
+
+ v4l2_subdev_init(&venc->sd, &venc_ops);
+
+ strcpy(venc->sd.name, venc_name);
+ if (v4l2_device_register_subdev(v4l2_dev, &venc->sd) < 0) {
+ v4l2_err(v4l2_dev,
+ "vpbe unable to register venc sub device\n");
+ return NULL;
+ }
+ if (venc_initialize(&venc->sd)) {
+ v4l2_err(v4l2_dev,
+ "vpbe venc initialization failed\n");
+ return NULL;
+ }
+
+ return &venc->sd;
+}
+EXPORT_SYMBOL(venc_sub_dev_init);
+
+static int venc_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *pdev_id;
+ struct venc_state *venc;
+ struct resource *res;
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "No platform data for VENC sub device");
+ return -EINVAL;
+ }
+
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id)
+ return -EINVAL;
+
+ venc = devm_kzalloc(&pdev->dev, sizeof(struct venc_state), GFP_KERNEL);
+ if (venc == NULL)
+ return -ENOMEM;
+
+ venc->venc_type = pdev_id->driver_data;
+ venc->pdev = &pdev->dev;
+ venc->pdata = pdev->dev.platform_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ venc->venc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(venc->venc_base))
+ return PTR_ERR(venc->venc_base);
+
+ if (venc->venc_type != VPBE_VERSION_1) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ venc->vdaccfg_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(venc->vdaccfg_reg))
+ return PTR_ERR(venc->vdaccfg_reg);
+ }
+ spin_lock_init(&venc->lock);
+ platform_set_drvdata(pdev, venc);
+ dev_notice(venc->pdev, "VENC sub device probe success\n");
+
+ return 0;
+}
+
+static int venc_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver venc_driver = {
+ .probe = venc_probe,
+ .remove = venc_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .id_table = vpbe_venc_devtype
+};
+
+module_platform_driver(venc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VPBE VENC Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/davinci/vpbe_venc_regs.h b/drivers/media/platform/davinci/vpbe_venc_regs.h
new file mode 100644
index 00000000000..947cb151077
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_venc_regs.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2..
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _VPBE_VENC_REGS_H
+#define _VPBE_VENC_REGS_H
+
+/* VPBE Video Encoder / Digital LCD Subsystem Registers (VENC) */
+#define VENC_VMOD 0x00
+#define VENC_VIDCTL 0x04
+#define VENC_VDPRO 0x08
+#define VENC_SYNCCTL 0x0C
+#define VENC_HSPLS 0x10
+#define VENC_VSPLS 0x14
+#define VENC_HINT 0x18
+#define VENC_HSTART 0x1C
+#define VENC_HVALID 0x20
+#define VENC_VINT 0x24
+#define VENC_VSTART 0x28
+#define VENC_VVALID 0x2C
+#define VENC_HSDLY 0x30
+#define VENC_VSDLY 0x34
+#define VENC_YCCCTL 0x38
+#define VENC_RGBCTL 0x3C
+#define VENC_RGBCLP 0x40
+#define VENC_LINECTL 0x44
+#define VENC_CULLLINE 0x48
+#define VENC_LCDOUT 0x4C
+#define VENC_BRTS 0x50
+#define VENC_BRTW 0x54
+#define VENC_ACCTL 0x58
+#define VENC_PWMP 0x5C
+#define VENC_PWMW 0x60
+#define VENC_DCLKCTL 0x64
+#define VENC_DCLKPTN0 0x68
+#define VENC_DCLKPTN1 0x6C
+#define VENC_DCLKPTN2 0x70
+#define VENC_DCLKPTN3 0x74
+#define VENC_DCLKPTN0A 0x78
+#define VENC_DCLKPTN1A 0x7C
+#define VENC_DCLKPTN2A 0x80
+#define VENC_DCLKPTN3A 0x84
+#define VENC_DCLKHS 0x88
+#define VENC_DCLKHSA 0x8C
+#define VENC_DCLKHR 0x90
+#define VENC_DCLKVS 0x94
+#define VENC_DCLKVR 0x98
+#define VENC_CAPCTL 0x9C
+#define VENC_CAPDO 0xA0
+#define VENC_CAPDE 0xA4
+#define VENC_ATR0 0xA8
+#define VENC_ATR1 0xAC
+#define VENC_ATR2 0xB0
+#define VENC_VSTAT 0xB8
+#define VENC_RAMADR 0xBC
+#define VENC_RAMPORT 0xC0
+#define VENC_DACTST 0xC4
+#define VENC_YCOLVL 0xC8
+#define VENC_SCPROG 0xCC
+#define VENC_CVBS 0xDC
+#define VENC_CMPNT 0xE0
+#define VENC_ETMG0 0xE4
+#define VENC_ETMG1 0xE8
+#define VENC_ETMG2 0xEC
+#define VENC_ETMG3 0xF0
+#define VENC_DACSEL 0xF4
+#define VENC_ARGBX0 0x100
+#define VENC_ARGBX1 0x104
+#define VENC_ARGBX2 0x108
+#define VENC_ARGBX3 0x10C
+#define VENC_ARGBX4 0x110
+#define VENC_DRGBX0 0x114
+#define VENC_DRGBX1 0x118
+#define VENC_DRGBX2 0x11C
+#define VENC_DRGBX3 0x120
+#define VENC_DRGBX4 0x124
+#define VENC_VSTARTA 0x128
+#define VENC_OSDCLK0 0x12C
+#define VENC_OSDCLK1 0x130
+#define VENC_HVLDCL0 0x134
+#define VENC_HVLDCL1 0x138
+#define VENC_OSDHADV 0x13C
+#define VENC_CLKCTL 0x140
+#define VENC_GAMCTL 0x144
+#define VENC_XHINTVL 0x174
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV (1 << 1)
+#define VPBE_PCR_CLK_OFF (1 << 0)
+
+#define VENC_VMOD_VDMD_SHIFT 12
+#define VENC_VMOD_VDMD_YCBCR16 0
+#define VENC_VMOD_VDMD_YCBCR8 1
+#define VENC_VMOD_VDMD_RGB666 2
+#define VENC_VMOD_VDMD_RGB8 3
+#define VENC_VMOD_VDMD_EPSON 4
+#define VENC_VMOD_VDMD_CASIO 5
+#define VENC_VMOD_VDMD_UDISPQVGA 6
+#define VENC_VMOD_VDMD_STNLCD 7
+#define VENC_VMOD_VIE_SHIFT 1
+#define VENC_VMOD_VDMD (7 << 12)
+#define VENC_VMOD_ITLCL (1 << 11)
+#define VENC_VMOD_ITLC (1 << 10)
+#define VENC_VMOD_NSIT (1 << 9)
+#define VENC_VMOD_HDMD (1 << 8)
+#define VENC_VMOD_TVTYP_SHIFT 6
+#define VENC_VMOD_TVTYP (3 << 6)
+#define VENC_VMOD_SLAVE (1 << 5)
+#define VENC_VMOD_VMD (1 << 4)
+#define VENC_VMOD_BLNK (1 << 3)
+#define VENC_VMOD_VIE (1 << 1)
+#define VENC_VMOD_VENC (1 << 0)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC 0
+#define SDTV_PAL 1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P 0
+#define HDTV_625P 1
+#define HDTV_1080I 2
+#define HDTV_720P 3
+
+#define VENC_VIDCTL_VCLKP (1 << 14)
+#define VENC_VIDCTL_VCLKE_SHIFT 13
+#define VENC_VIDCTL_VCLKE (1 << 13)
+#define VENC_VIDCTL_VCLKZ_SHIFT 12
+#define VENC_VIDCTL_VCLKZ (1 << 12)
+#define VENC_VIDCTL_SYDIR_SHIFT 8
+#define VENC_VIDCTL_SYDIR (1 << 8)
+#define VENC_VIDCTL_DOMD_SHIFT 4
+#define VENC_VIDCTL_DOMD (3 << 4)
+#define VENC_VIDCTL_YCDIR_SHIFT 0
+#define VENC_VIDCTL_YCDIR (1 << 0)
+
+#define VENC_VDPRO_ATYCC_SHIFT 5
+#define VENC_VDPRO_ATYCC (1 << 5)
+#define VENC_VDPRO_ATCOM_SHIFT 4
+#define VENC_VDPRO_ATCOM (1 << 4)
+#define VENC_VDPRO_DAFRQ (1 << 3)
+#define VENC_VDPRO_DAUPS (1 << 2)
+#define VENC_VDPRO_CUPS (1 << 1)
+#define VENC_VDPRO_YUPS (1 << 0)
+
+#define VENC_SYNCCTL_VPL_SHIFT 3
+#define VENC_SYNCCTL_VPL (1 << 3)
+#define VENC_SYNCCTL_HPL_SHIFT 2
+#define VENC_SYNCCTL_HPL (1 << 2)
+#define VENC_SYNCCTL_SYEV_SHIFT 1
+#define VENC_SYNCCTL_SYEV (1 << 1)
+#define VENC_SYNCCTL_SYEH_SHIFT 0
+#define VENC_SYNCCTL_SYEH (1 << 0)
+#define VENC_SYNCCTL_OVD_SHIFT 14
+#define VENC_SYNCCTL_OVD (1 << 14)
+
+#define VENC_DCLKCTL_DCKEC_SHIFT 11
+#define VENC_DCLKCTL_DCKEC (1 << 11)
+#define VENC_DCLKCTL_DCKPW_SHIFT 0
+#define VENC_DCLKCTL_DCKPW (0x3f << 0)
+
+#define VENC_VSTAT_FIDST (1 << 4)
+
+#define VENC_CMPNT_MRGB_SHIFT 14
+#define VENC_CMPNT_MRGB (1 << 14)
+
+#endif /* _VPBE_VENC_REGS_H */
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
new file mode 100644
index 00000000000..a51bda2fb63
--- /dev/null
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -0,0 +1,2044 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Driver name : VPFE Capture driver
+ * VPFE Capture driver allows applications to capture and stream video
+ * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
+ * TVP5146 or Raw Bayer RGB image data from an image sensor
+ * such as Microns' MT9T001, MT9T031 etc.
+ *
+ * These SoCs have, in common, a Video Processing Subsystem (VPSS) that
+ * consists of a Video Processing Front End (VPFE) for capturing
+ * video/raw image data and Video Processing Back End (VPBE) for displaying
+ * YUV data through an in-built analog encoder or Digital LCD port. This
+ * driver is for capture through VPFE. A typical EVM using these SoCs have
+ * following high level configuration.
+ *
+ *
+ * decoder(TVP5146/ YUV/
+ * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
+ * data input | |
+ * V |
+ * SDRAM |
+ * V
+ * Image Processor
+ * |
+ * V
+ * SDRAM
+ * The data flow happens from a decoder connected to the VPFE over a
+ * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
+ * and to the input of VPFE through an optional MUX (if more inputs are
+ * to be interfaced on the EVM). The input data is first passed through
+ * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
+ * does very little or no processing on YUV data and does pre-process Raw
+ * Bayer RGB data through modules such as Defect Pixel Correction (DFC)
+ * Color Space Conversion (CSC), data gain/offset etc. After this, data
+ * can be written to SDRAM or can be connected to the image processing
+ * block such as IPIPE (on DM355 only).
+ *
+ * Features supported
+ * - MMAP IO
+ * - Capture using TVP5146 over BT.656
+ * - support for interfacing decoders using sub device model
+ * - Work with DM355 or DM6446 CCDC to do Raw Bayer RGB/YUV
+ * data capture to SDRAM.
+ * TODO list
+ * - Support multiple REQBUF after open
+ * - Support for de-allocating buffers through REQBUF
+ * - Support for Raw Bayer RGB capture
+ * - Support for chaining Image Processor
+ * - Support for static allocation of buffers
+ * - Support for USERPTR IO
+ * - Support for STREAMON before QBUF
+ * - Support for control ioctls
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <media/v4l2-common.h>
+#include <linux/io.h>
+#include <media/davinci/vpfe_capture.h>
+#include "ccdc_hw_device.h"
+
+static int debug;
+static u32 numbuffers = 3;
+static u32 bufsize = (720 * 576 * 2);
+
+module_param(numbuffers, uint, S_IRUGO);
+module_param(bufsize, uint, S_IRUGO);
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(numbuffers, "buffer count (default:3)");
+MODULE_PARM_DESC(bufsize, "buffer size in bytes (default:720 x 576 x 2)");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/* standard information */
+struct vpfe_standard {
+ v4l2_std_id std_id;
+ unsigned int width;
+ unsigned int height;
+ struct v4l2_fract pixelaspect;
+ /* 0 - progressive, 1 - interlaced */
+ int frame_format;
+};
+
+/* ccdc configuration */
+struct ccdc_config {
+ /* This make sure vpfe is probed and ready to go */
+ int vpfe_probed;
+ /* name of ccdc device */
+ char name[32];
+};
+
+/* data structures */
+static struct vpfe_config_params config_params = {
+ .min_numbuffers = 3,
+ .numbuffers = 3,
+ .min_bufsize = 720 * 480 * 2,
+ .device_bufsize = 720 * 576 * 2,
+};
+
+/* ccdc device registered */
+static struct ccdc_hw_device *ccdc_dev;
+/* lock for accessing ccdc information */
+static DEFINE_MUTEX(ccdc_lock);
+/* ccdc configuration */
+static struct ccdc_config *ccdc_cfg;
+
+const struct vpfe_standard vpfe_standards[] = {
+ {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
+ {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
+};
+
+/* Used when raw Bayer image from ccdc is directly captured to SDRAM */
+static const struct vpfe_pixel_format vpfe_pix_fmts[] = {
+ {
+ .fmtdesc = {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Bayer GrRBGb 8bit A-Law compr.",
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ },
+ .bpp = 1,
+ },
+ {
+ .fmtdesc = {
+ .index = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Bayer GrRBGb - 16bit",
+ .pixelformat = V4L2_PIX_FMT_SBGGR16,
+ },
+ .bpp = 2,
+ },
+ {
+ .fmtdesc = {
+ .index = 2,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Bayer GrRBGb 8bit DPCM compr.",
+ .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
+ },
+ .bpp = 1,
+ },
+ {
+ .fmtdesc = {
+ .index = 3,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "YCbCr 4:2:2 Interleaved UYVY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+ .bpp = 2,
+ },
+ {
+ .fmtdesc = {
+ .index = 4,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "YCbCr 4:2:2 Interleaved YUYV",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ .bpp = 2,
+ },
+ {
+ .fmtdesc = {
+ .index = 5,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Y/CbCr 4:2:0 - Semi planar",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ },
+ .bpp = 1,
+ },
+};
+
+/*
+ * vpfe_lookup_pix_format()
+ * lookup an entry in the vpfe pix format table based on pix_format
+ */
+static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe_pix_fmts); i++) {
+ if (pix_format == vpfe_pix_fmts[i].fmtdesc.pixelformat)
+ return &vpfe_pix_fmts[i];
+ }
+ return NULL;
+}
+
+/*
+ * vpfe_register_ccdc_device. CCDC module calls this to
+ * register with vpfe capture
+ */
+int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
+{
+ int ret = 0;
+ printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
+
+ BUG_ON(!dev->hw_ops.open);
+ BUG_ON(!dev->hw_ops.enable);
+ BUG_ON(!dev->hw_ops.set_hw_if_params);
+ BUG_ON(!dev->hw_ops.configure);
+ BUG_ON(!dev->hw_ops.set_buftype);
+ BUG_ON(!dev->hw_ops.get_buftype);
+ BUG_ON(!dev->hw_ops.enum_pix);
+ BUG_ON(!dev->hw_ops.set_frame_format);
+ BUG_ON(!dev->hw_ops.get_frame_format);
+ BUG_ON(!dev->hw_ops.get_pixel_format);
+ BUG_ON(!dev->hw_ops.set_pixel_format);
+ BUG_ON(!dev->hw_ops.set_image_window);
+ BUG_ON(!dev->hw_ops.get_image_window);
+ BUG_ON(!dev->hw_ops.get_line_length);
+ BUG_ON(!dev->hw_ops.getfid);
+
+ mutex_lock(&ccdc_lock);
+ if (NULL == ccdc_cfg) {
+ /*
+ * TODO. Will this ever happen? if so, we need to fix it.
+ * Proabably we need to add the request to a linked list and
+ * walk through it during vpfe probe
+ */
+ printk(KERN_ERR "vpfe capture not initialized\n");
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ if (strcmp(dev->name, ccdc_cfg->name)) {
+ /* ignore this ccdc */
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (ccdc_dev) {
+ printk(KERN_ERR "ccdc already registered\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ccdc_dev = dev;
+unlock:
+ mutex_unlock(&ccdc_lock);
+ return ret;
+}
+EXPORT_SYMBOL(vpfe_register_ccdc_device);
+
+/*
+ * vpfe_unregister_ccdc_device. CCDC module calls this to
+ * unregister with vpfe capture
+ */
+void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
+{
+ if (NULL == dev) {
+ printk(KERN_ERR "invalid ccdc device ptr\n");
+ return;
+ }
+
+ printk(KERN_NOTICE "vpfe_unregister_ccdc_device, dev->name = %s\n",
+ dev->name);
+
+ if (strcmp(dev->name, ccdc_cfg->name)) {
+ /* ignore this ccdc */
+ return;
+ }
+
+ mutex_lock(&ccdc_lock);
+ ccdc_dev = NULL;
+ mutex_unlock(&ccdc_lock);
+ return;
+}
+EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
+
+/*
+ * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
+ */
+static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev,
+ struct v4l2_format *f)
+{
+ struct v4l2_rect image_win;
+ enum ccdc_buftype buf_type;
+ enum ccdc_frmfmt frm_fmt;
+
+ memset(f, 0, sizeof(*f));
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ccdc_dev->hw_ops.get_image_window(&image_win);
+ f->fmt.pix.width = image_win.width;
+ f->fmt.pix.height = image_win.height;
+ f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length();
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
+ buf_type = ccdc_dev->hw_ops.get_buftype();
+ f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format();
+ frm_fmt = ccdc_dev->hw_ops.get_frame_format();
+ if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED)
+ f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+ else {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n");
+ return -EINVAL;
+ }
+ } else {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * vpfe_config_ccdc_image_format()
+ * For a pix format, configure ccdc to setup the capture
+ */
+static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe_dev)
+{
+ enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ int ret = 0;
+
+ if (ccdc_dev->hw_ops.set_pixel_format(
+ vpfe_dev->fmt.fmt.pix.pixelformat) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "couldn't set pix format in ccdc\n");
+ return -EINVAL;
+ }
+ /* configure the image window */
+ ccdc_dev->hw_ops.set_image_window(&vpfe_dev->crop);
+
+ switch (vpfe_dev->fmt.fmt.pix.field) {
+ case V4L2_FIELD_INTERLACED:
+ /* do nothing, since it is default */
+ ret = ccdc_dev->hw_ops.set_buftype(
+ CCDC_BUFTYPE_FLD_INTERLEAVED);
+ break;
+ case V4L2_FIELD_NONE:
+ frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+ /* buffer type only applicable for interlaced scan */
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ ret = ccdc_dev->hw_ops.set_buftype(
+ CCDC_BUFTYPE_FLD_SEPARATED);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set the frame format */
+ if (!ret)
+ ret = ccdc_dev->hw_ops.set_frame_format(frm_fmt);
+ return ret;
+}
+/*
+ * vpfe_config_image_format()
+ * For a given standard, this functions sets up the default
+ * pix format & crop values in the vpfe device and ccdc. It first
+ * starts with defaults based values from the standard table.
+ * It then checks if sub device support g_mbus_fmt and then override the
+ * values based on that.Sets crop values to match with scan resolution
+ * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
+ * values in ccdc
+ */
+static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
+ v4l2_std_id std_id)
+{
+ struct vpfe_subdev_info *sdinfo = vpfe_dev->current_subdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
+ if (vpfe_standards[i].std_id & std_id) {
+ vpfe_dev->std_info.active_pixels =
+ vpfe_standards[i].width;
+ vpfe_dev->std_info.active_lines =
+ vpfe_standards[i].height;
+ vpfe_dev->std_info.frame_format =
+ vpfe_standards[i].frame_format;
+ vpfe_dev->std_index = i;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(vpfe_standards)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "standard not supported\n");
+ return -EINVAL;
+ }
+
+ vpfe_dev->crop.top = 0;
+ vpfe_dev->crop.left = 0;
+ vpfe_dev->crop.width = vpfe_dev->std_info.active_pixels;
+ vpfe_dev->crop.height = vpfe_dev->std_info.active_lines;
+ pix->width = vpfe_dev->crop.width;
+ pix->height = vpfe_dev->crop.height;
+
+ /* first field and frame format based on standard frame format */
+ if (vpfe_dev->std_info.frame_format) {
+ pix->field = V4L2_FIELD_INTERLACED;
+ /* assume V4L2_PIX_FMT_UYVY as default */
+ pix->pixelformat = V4L2_PIX_FMT_UYVY;
+ v4l2_fill_mbus_format(&mbus_fmt, pix,
+ V4L2_MBUS_FMT_YUYV10_2X10);
+ } else {
+ pix->field = V4L2_FIELD_NONE;
+ /* assume V4L2_PIX_FMT_SBGGR8 */
+ pix->pixelformat = V4L2_PIX_FMT_SBGGR8;
+ v4l2_fill_mbus_format(&mbus_fmt, pix,
+ V4L2_MBUS_FMT_SBGGR8_1X8);
+ }
+
+ /* if sub device supports g_mbus_fmt, override the defaults */
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ sdinfo->grp_id, video, g_mbus_fmt, &mbus_fmt);
+
+ if (ret && ret != -ENOIOCTLCMD) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "error in getting g_mbus_fmt from sub device\n");
+ return ret;
+ }
+ v4l2_fill_pix_format(pix, &mbus_fmt);
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ /* Sets the values in CCDC */
+ ret = vpfe_config_ccdc_image_format(vpfe_dev);
+ if (ret)
+ return ret;
+
+ /* Update the values of sizeimage and bytesperline */
+ if (!ret) {
+ pix->bytesperline = ccdc_dev->hw_ops.get_line_length();
+ pix->sizeimage = pix->bytesperline * pix->height;
+ }
+ return ret;
+}
+
+static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
+{
+ int ret = 0;
+
+ /* set first input of current subdevice as the current input */
+ vpfe_dev->current_input = 0;
+
+ /* set default standard */
+ vpfe_dev->std_index = 0;
+
+ /* Configure the default format information */
+ ret = vpfe_config_image_format(vpfe_dev,
+ vpfe_standards[vpfe_dev->std_index].std_id);
+ if (ret)
+ return ret;
+
+ /* now open the ccdc device to initialize it */
+ mutex_lock(&ccdc_lock);
+ if (NULL == ccdc_dev) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (!try_module_get(ccdc_dev->owner)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Couldn't lock ccdc module\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+ ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev);
+ if (!ret)
+ vpfe_dev->initialized = 1;
+
+ /* Clear all VPFE/CCDC interrupts */
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(-1);
+
+unlock:
+ mutex_unlock(&ccdc_lock);
+ return ret;
+}
+
+/*
+ * vpfe_open : It creates object of file handle structure and
+ * stores it in private_data member of filepointer
+ */
+static int vpfe_open(struct file *file)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+ struct vpfe_fh *fh;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_open\n");
+
+ if (!vpfe_dev->cfg->num_subdevs) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "No decoder registered\n");
+ return -ENODEV;
+ }
+
+ /* Allocate memory for the file handle object */
+ fh = kmalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
+ if (NULL == fh) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "unable to allocate memory for file handle object\n");
+ return -ENOMEM;
+ }
+ /* store pointer to fh in private_data member of file */
+ file->private_data = fh;
+ fh->vpfe_dev = vpfe_dev;
+ v4l2_fh_init(&fh->fh, vdev);
+ mutex_lock(&vpfe_dev->lock);
+ /* If decoder is not initialized. initialize it */
+ if (!vpfe_dev->initialized) {
+ if (vpfe_initialize_device(vpfe_dev)) {
+ mutex_unlock(&vpfe_dev->lock);
+ return -ENODEV;
+ }
+ }
+ /* Increment device usrs counter */
+ vpfe_dev->usrs++;
+ /* Set io_allowed member to false */
+ fh->io_allowed = 0;
+ v4l2_fh_add(&fh->fh);
+ mutex_unlock(&vpfe_dev->lock);
+ return 0;
+}
+
+static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev)
+{
+ unsigned long addr;
+
+ vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vpfe_dev->next_frm->queue);
+ vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE;
+ addr = videobuf_to_dma_contig(vpfe_dev->next_frm);
+
+ ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
+{
+ unsigned long addr;
+
+ addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
+ addr += vpfe_dev->field_off;
+ ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
+{
+ v4l2_get_timestamp(&vpfe_dev->cur_frm->ts);
+ vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
+ vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ wake_up_interruptible(&vpfe_dev->cur_frm->done);
+ vpfe_dev->cur_frm = vpfe_dev->next_frm;
+}
+
+/* ISR for VINT0*/
+static irqreturn_t vpfe_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+ enum v4l2_field field;
+ int fid;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n");
+ field = vpfe_dev->fmt.fmt.pix.field;
+
+ /* if streaming not started, don't do anything */
+ if (!vpfe_dev->started)
+ goto clear_intr;
+
+ /* only for 6446 this will be applicable */
+ if (NULL != ccdc_dev->hw_ops.reset)
+ ccdc_dev->hw_ops.reset();
+
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "frame format is progressive...\n");
+ if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
+ vpfe_process_buffer_complete(vpfe_dev);
+ goto clear_intr;
+ }
+
+ /* interlaced or TB capture check which field we are in hardware */
+ fid = ccdc_dev->hw_ops.getfid();
+
+ /* switch the software maintained field id */
+ vpfe_dev->field_id ^= 1;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "field id = %x:%x.\n",
+ fid, vpfe_dev->field_id);
+ if (fid == vpfe_dev->field_id) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the next frame
+ * is available, release the current frame and move on
+ */
+ if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
+ vpfe_process_buffer_complete(vpfe_dev);
+ /*
+ * based on whether the two fields are stored
+ * interleavely or separately in memory, reconfigure
+ * the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB) {
+ vpfe_schedule_bottom_field(vpfe_dev);
+ }
+ goto clear_intr;
+ }
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ spin_lock(&vpfe_dev->dma_queue_lock);
+ if (!list_empty(&vpfe_dev->dma_queue) &&
+ vpfe_dev->cur_frm == vpfe_dev->next_frm)
+ vpfe_schedule_next_buffer(vpfe_dev);
+ spin_unlock(&vpfe_dev->dma_queue_lock);
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe_dev->field_id = fid;
+ }
+clear_intr:
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
+ return IRQ_HANDLED;
+}
+
+/* vdint1_isr - isr handler for VINT1 interrupt */
+static irqreturn_t vdint1_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n");
+
+ /* if streaming not started, don't do anything */
+ if (!vpfe_dev->started) {
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&vpfe_dev->dma_queue_lock);
+ if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) &&
+ !list_empty(&vpfe_dev->dma_queue) &&
+ vpfe_dev->cur_frm == vpfe_dev->next_frm)
+ vpfe_schedule_next_buffer(vpfe_dev);
+ spin_unlock(&vpfe_dev->dma_queue_lock);
+
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
+ return IRQ_HANDLED;
+}
+
+static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
+{
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = ccdc_dev->hw_ops.get_frame_format();
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+ free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
+}
+
+static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
+{
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = ccdc_dev->hw_ops.get_frame_format();
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE) {
+ return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr,
+ 0, "vpfe_capture1",
+ vpfe_dev);
+ }
+ return 0;
+}
+
+/* vpfe_stop_ccdc_capture: stop streaming in ccdc/isif */
+static void vpfe_stop_ccdc_capture(struct vpfe_device *vpfe_dev)
+{
+ vpfe_dev->started = 0;
+ ccdc_dev->hw_ops.enable(0);
+ if (ccdc_dev->hw_ops.enable_out_to_sdram)
+ ccdc_dev->hw_ops.enable_out_to_sdram(0);
+}
+
+/*
+ * vpfe_release : This function deletes buffer queue, frees the
+ * buffers and the vpfe file handle
+ */
+static int vpfe_release(struct file *file)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
+
+ /* Get the device lock */
+ mutex_lock(&vpfe_dev->lock);
+ /* if this instance is doing IO */
+ if (fh->io_allowed) {
+ if (vpfe_dev->started) {
+ sdinfo = vpfe_dev->current_subdev;
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ sdinfo->grp_id,
+ video, s_stream, 0);
+ if (ret && (ret != -ENOIOCTLCMD))
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "stream off failed in subdev\n");
+ vpfe_stop_ccdc_capture(vpfe_dev);
+ vpfe_detach_irq(vpfe_dev);
+ videobuf_streamoff(&vpfe_dev->buffer_queue);
+ }
+ vpfe_dev->io_usrs = 0;
+ vpfe_dev->numbuffers = config_params.numbuffers;
+ videobuf_stop(&vpfe_dev->buffer_queue);
+ videobuf_mmap_free(&vpfe_dev->buffer_queue);
+ }
+
+ /* Decrement device usrs counter */
+ vpfe_dev->usrs--;
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+ /* If this is the last file handle */
+ if (!vpfe_dev->usrs) {
+ vpfe_dev->initialized = 0;
+ if (ccdc_dev->hw_ops.close)
+ ccdc_dev->hw_ops.close(vpfe_dev->pdev);
+ module_put(ccdc_dev->owner);
+ }
+ mutex_unlock(&vpfe_dev->lock);
+ file->private_data = NULL;
+ /* Free memory allocated to file handle object */
+ kfree(fh);
+ return 0;
+}
+
+/*
+ * vpfe_mmap : It is used to map kernel space buffers
+ * into user spaces
+ */
+static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /* Get the device object and file handle object */
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
+
+ return videobuf_mmap_mapper(&vpfe_dev->buffer_queue, vma);
+}
+
+/*
+ * vpfe_poll: It is used for select/poll system call
+ */
+static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
+
+ if (vpfe_dev->started)
+ return videobuf_poll_stream(file,
+ &vpfe_dev->buffer_queue, wait);
+ return 0;
+}
+
+/* vpfe capture driver file operations */
+static const struct v4l2_file_operations vpfe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpfe_open,
+ .release = vpfe_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpfe_mmap,
+ .poll = vpfe_poll
+};
+
+/*
+ * vpfe_check_format()
+ * This function adjust the input pixel format as per hardware
+ * capabilities and update the same in pixfmt.
+ * Following algorithm used :-
+ *
+ * If given pixformat is not in the vpfe list of pix formats or not
+ * supported by the hardware, current value of pixformat in the device
+ * is used
+ * If given field is not supported, then current field is used. If field
+ * is different from current, then it is matched with that from sub device.
+ * Minimum height is 2 lines for interlaced or tb field and 1 line for
+ * progressive. Maximum height is clamped to active active lines of scan
+ * Minimum width is 32 bytes in memory and width is clamped to active
+ * pixels of scan.
+ * bytesperline is a multiple of 32.
+ */
+static const struct vpfe_pixel_format *
+ vpfe_check_format(struct vpfe_device *vpfe_dev,
+ struct v4l2_pix_format *pixfmt)
+{
+ u32 min_height = 1, min_width = 32, max_width, max_height;
+ const struct vpfe_pixel_format *vpfe_pix_fmt;
+ u32 pix;
+ int temp, found;
+
+ vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+ if (NULL == vpfe_pix_fmt) {
+ /*
+ * use current pixel format in the vpfe device. We
+ * will find this pix format in the table
+ */
+ pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
+ vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+ }
+
+ /* check if hw supports it */
+ temp = 0;
+ found = 0;
+ while (ccdc_dev->hw_ops.enum_pix(&pix, temp) >= 0) {
+ if (vpfe_pix_fmt->fmtdesc.pixelformat == pix) {
+ found = 1;
+ break;
+ }
+ temp++;
+ }
+
+ if (!found) {
+ /* use current pixel format */
+ pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
+ /*
+ * Since this is currently used in the vpfe device, we
+ * will find this pix format in the table
+ */
+ vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+ }
+
+ /* check what field format is supported */
+ if (pixfmt->field == V4L2_FIELD_ANY) {
+ /* if field is any, use current value as default */
+ pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
+ }
+
+ /*
+ * if field is not same as current field in the vpfe device
+ * try matching the field with the sub device field
+ */
+ if (vpfe_dev->fmt.fmt.pix.field != pixfmt->field) {
+ /*
+ * If field value is not in the supported fields, use current
+ * field used in the device as default
+ */
+ switch (pixfmt->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ /* if sub device is supporting progressive, use that */
+ if (!vpfe_dev->std_info.frame_format)
+ pixfmt->field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_NONE:
+ if (vpfe_dev->std_info.frame_format)
+ pixfmt->field = V4L2_FIELD_INTERLACED;
+ break;
+
+ default:
+ /* use current field as default */
+ pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
+ break;
+ }
+ }
+
+ /* Now adjust image resolutions supported */
+ if (pixfmt->field == V4L2_FIELD_INTERLACED ||
+ pixfmt->field == V4L2_FIELD_SEQ_TB)
+ min_height = 2;
+
+ max_width = vpfe_dev->std_info.active_pixels;
+ max_height = vpfe_dev->std_info.active_lines;
+ min_width /= vpfe_pix_fmt->bpp;
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "width = %d, height = %d, bpp = %d\n",
+ pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp);
+
+ pixfmt->width = clamp((pixfmt->width), min_width, max_width);
+ pixfmt->height = clamp((pixfmt->height), min_height, max_height);
+
+ /* If interlaced, adjust height to be a multiple of 2 */
+ if (pixfmt->field == V4L2_FIELD_INTERLACED)
+ pixfmt->height &= (~1);
+ /*
+ * recalculate bytesperline and sizeimage since width
+ * and height might have changed
+ */
+ pixfmt->bytesperline = (((pixfmt->width * vpfe_pix_fmt->bpp) + 31)
+ & ~31);
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ pixfmt->sizeimage =
+ pixfmt->bytesperline * pixfmt->height +
+ ((pixfmt->bytesperline * pixfmt->height) >> 1);
+ else
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height ="
+ " %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
+ pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp,
+ pixfmt->bytesperline, pixfmt->sizeimage);
+ return vpfe_pix_fmt;
+}
+
+static int vpfe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
+
+ cap->version = VPFE_CAPTURE_VERSION_CODE;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
+ strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
+ return 0;
+}
+
+static int vpfe_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt_vid_cap\n");
+ /* Fill in the information about format */
+ *fmt = vpfe_dev->fmt;
+ return ret;
+}
+
+static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ const struct vpfe_pixel_format *pix_fmt;
+ int temp_index;
+ u32 pix;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt_vid_cap\n");
+
+ if (ccdc_dev->hw_ops.enum_pix(&pix, fmt->index) < 0)
+ return -EINVAL;
+
+ /* Fill in the information about format */
+ pix_fmt = vpfe_lookup_pix_format(pix);
+ if (NULL != pix_fmt) {
+ temp_index = fmt->index;
+ *fmt = pix_fmt->fmtdesc;
+ fmt->index = temp_index;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ const struct vpfe_pixel_format *pix_fmts;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n");
+
+ /* If streaming is started, return error */
+ if (vpfe_dev->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+
+ /* Check for valid frame format */
+ pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix);
+
+ if (NULL == pix_fmts)
+ return -EINVAL;
+
+ /* store the pixel format in the device object */
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ /* First detach any IRQ if currently attached */
+ vpfe_detach_irq(vpfe_dev);
+ vpfe_dev->fmt = *fmt;
+ /* set image capture parameters in the ccdc */
+ ret = vpfe_config_ccdc_image_format(vpfe_dev);
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ const struct vpfe_pixel_format *pix_fmts;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n");
+
+ pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix);
+ if (NULL == pix_fmts)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
+ * given app input index
+ */
+static int vpfe_get_subdev_input_index(struct vpfe_device *vpfe_dev,
+ int *subdev_index,
+ int *subdev_input_index,
+ int app_input_index)
+{
+ struct vpfe_config *cfg = vpfe_dev->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < cfg->num_subdevs; i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (app_input_index < (j + sdinfo->num_inputs)) {
+ *subdev_index = i;
+ *subdev_input_index = app_input_index - j;
+ return 0;
+ }
+ j += sdinfo->num_inputs;
+ }
+ return -EINVAL;
+}
+
+/*
+ * vpfe_get_app_input - Get app input index for a given subdev input index
+ * driver stores the input index of the current sub device and translate it
+ * when application request the current input
+ */
+static int vpfe_get_app_input_index(struct vpfe_device *vpfe_dev,
+ int *app_input_index)
+{
+ struct vpfe_config *cfg = vpfe_dev->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < cfg->num_subdevs; i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (!strcmp(sdinfo->name, vpfe_dev->current_subdev->name)) {
+ if (vpfe_dev->current_input >= sdinfo->num_inputs)
+ return -1;
+ *app_input_index = j + vpfe_dev->current_input;
+ return 0;
+ }
+ j += sdinfo->num_inputs;
+ }
+ return -EINVAL;
+}
+
+static int vpfe_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int subdev, index ;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
+
+ if (vpfe_get_subdev_input_index(vpfe_dev,
+ &subdev,
+ &index,
+ inp->index) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "input information not found"
+ " for the subdev\n");
+ return -EINVAL;
+ }
+ sdinfo = &vpfe_dev->cfg->sub_devs[subdev];
+ memcpy(inp, &sdinfo->inputs[index], sizeof(struct v4l2_input));
+ return 0;
+}
+
+static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
+
+ return vpfe_get_app_input_index(vpfe_dev, index);
+}
+
+
+static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ struct vpfe_subdev_info *sdinfo;
+ int subdev_index, inp_index;
+ struct vpfe_route *route;
+ u32 input = 0, output = 0;
+ int ret = -EINVAL;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ /*
+ * If streaming is started return device busy
+ * error
+ */
+ if (vpfe_dev->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+ ret = vpfe_get_subdev_input_index(vpfe_dev,
+ &subdev_index,
+ &inp_index,
+ index);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "invalid input index\n");
+ goto unlock_out;
+ }
+
+ sdinfo = &vpfe_dev->cfg->sub_devs[subdev_index];
+ sd = vpfe_dev->sd[subdev_index];
+ route = &sdinfo->routes[inp_index];
+ if (route && sdinfo->can_route) {
+ input = route->input;
+ output = route->output;
+ }
+
+ if (sd)
+ ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0);
+
+ if (ret) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "vpfe_doioctl:error in setting input in decoder\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ vpfe_dev->current_subdev = sdinfo;
+ if (sd)
+ vpfe_dev->v4l2_dev.ctrl_handler = sd->ctrl_handler;
+ vpfe_dev->current_input = index;
+ vpfe_dev->std_index = 0;
+
+ /* set the bus/interface parameter for the sub device in ccdc */
+ ret = ccdc_dev->hw_ops.set_hw_if_params(&sdinfo->ccdc_if_params);
+ if (ret)
+ goto unlock_out;
+
+ /* set the default image parameters in the device */
+ ret = vpfe_config_image_format(vpfe_dev,
+ vpfe_standards[vpfe_dev->std_index].std_id);
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ sdinfo = vpfe_dev->current_subdev;
+ if (ret)
+ return ret;
+ /* Call querystd function of decoder device */
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, querystd, std_id);
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
+
+ /* Call decoder driver function to set the standard */
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ sdinfo = vpfe_dev->current_subdev;
+ /* If streaming is started, return device busy error */
+ if (vpfe_dev->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, s_std, std_id);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
+ goto unlock_out;
+ }
+ ret = vpfe_config_image_format(vpfe_dev, std_id);
+
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
+
+ *std_id = vpfe_standards[vpfe_dev->std_index].std_id;
+ return 0;
+}
+/*
+ * Videobuf operations
+ */
+static int vpfe_videobuf_setup(struct videobuf_queue *vq,
+ unsigned int *count,
+ unsigned int *size)
+{
+ struct vpfe_fh *fh = vq->priv_data;
+ struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_setup\n");
+ *size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ if (vpfe_dev->memory == V4L2_MEMORY_MMAP &&
+ vpfe_dev->fmt.fmt.pix.sizeimage > config_params.device_bufsize)
+ *size = config_params.device_bufsize;
+
+ if (*count < config_params.min_numbuffers)
+ *count = config_params.min_numbuffers;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "count=%d, size=%d\n", *count, *size);
+ return 0;
+}
+
+static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct vpfe_fh *fh = vq->priv_data;
+ struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+ unsigned long addr;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
+
+ /* If buffer is not initialized, initialize it */
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = vpfe_dev->fmt.fmt.pix.width;
+ vb->height = vpfe_dev->fmt.fmt.pix.height;
+ vb->size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ vb->field = field;
+
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret < 0)
+ return ret;
+
+ addr = videobuf_to_dma_contig(vb);
+ /* Make sure user addresses are aligned to 32 bytes */
+ if (!ALIGN(addr, 32))
+ return -EINVAL;
+
+ vb->state = VIDEOBUF_PREPARED;
+ }
+ return 0;
+}
+
+static void vpfe_videobuf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ /* Get the file handle object and device object */
+ struct vpfe_fh *fh = vq->priv_data;
+ struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+ unsigned long flags;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue\n");
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
+ list_add_tail(&vb->queue, &vpfe_dev->dma_queue);
+ spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
+
+ /* Change state of the buffer */
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+static void vpfe_videobuf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct vpfe_fh *fh = vq->priv_data;
+ struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+ unsigned long flags;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_videobuf_release\n");
+
+ /*
+ * We need to flush the buffer from the dma queue since
+ * they are de-allocated
+ */
+ spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
+ INIT_LIST_HEAD(&vpfe_dev->dma_queue);
+ spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops vpfe_videobuf_qops = {
+ .buf_setup = vpfe_videobuf_setup,
+ .buf_prepare = vpfe_videobuf_prepare,
+ .buf_queue = vpfe_videobuf_queue,
+ .buf_release = vpfe_videobuf_release,
+};
+
+/*
+ * vpfe_reqbufs. currently support REQBUF only once opening
+ * the device.
+ */
+static int vpfe_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req_buf)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ if (vpfe_dev->io_usrs != 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ vpfe_dev->memory = req_buf->memory;
+ videobuf_queue_dma_contig_init(&vpfe_dev->buffer_queue,
+ &vpfe_videobuf_qops,
+ vpfe_dev->pdev,
+ &vpfe_dev->irqlock,
+ req_buf->type,
+ vpfe_dev->fmt.fmt.pix.field,
+ sizeof(struct videobuf_buffer),
+ fh, NULL);
+
+ fh->io_allowed = 1;
+ vpfe_dev->io_usrs = 1;
+ INIT_LIST_HEAD(&vpfe_dev->dma_queue);
+ ret = videobuf_reqbufs(&vpfe_dev->buffer_queue, req_buf);
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ if (vpfe_dev->memory != V4L2_MEMORY_MMAP) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
+ return -EINVAL;
+ }
+ /* Call videobuf_querybuf to get information */
+ return videobuf_querybuf(&vpfe_dev->buffer_queue, buf);
+}
+
+static int vpfe_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If this file handle is not allowed to do IO,
+ * return error
+ */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+ return videobuf_qbuf(&vpfe_dev->buffer_queue, p);
+}
+
+static int vpfe_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+ return videobuf_dqbuf(&vpfe_dev->buffer_queue,
+ buf, file->f_flags & O_NONBLOCK);
+}
+
+/*
+ * vpfe_calculate_offsets : This function calculates buffers offset
+ * for top and bottom field
+ */
+static void vpfe_calculate_offsets(struct vpfe_device *vpfe_dev)
+{
+ struct v4l2_rect image_win;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_calculate_offsets\n");
+
+ ccdc_dev->hw_ops.get_image_window(&image_win);
+ vpfe_dev->field_off = image_win.height * image_win.width;
+}
+
+/* vpfe_start_ccdc_capture: start streaming in ccdc/isif */
+static void vpfe_start_ccdc_capture(struct vpfe_device *vpfe_dev)
+{
+ ccdc_dev->hw_ops.enable(1);
+ if (ccdc_dev->hw_ops.enable_out_to_sdram)
+ ccdc_dev->hw_ops.enable_out_to_sdram(1);
+ vpfe_dev->started = 1;
+}
+
+/*
+ * vpfe_streamon. Assume the DMA queue is not empty.
+ * application is expected to call QBUF before calling
+ * this ioctl. If not, driver returns error
+ */
+static int vpfe_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+ struct vpfe_subdev_info *sdinfo;
+ unsigned long addr;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ /* If file handle is not allowed IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+
+ sdinfo = vpfe_dev->current_subdev;
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, s_stream, 1);
+
+ if (ret && (ret != -ENOIOCTLCMD)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "stream on failed in subdev\n");
+ return -EINVAL;
+ }
+
+ /* If buffer queue is empty, return error */
+ if (list_empty(&vpfe_dev->buffer_queue.stream)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
+ return -EIO;
+ }
+
+ /* Call videobuf_streamon to start streaming * in videobuf */
+ ret = videobuf_streamon(&vpfe_dev->buffer_queue);
+ if (ret)
+ return ret;
+
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ goto streamoff;
+ /* Get the next frame from the buffer queue */
+ vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
+ struct videobuf_buffer, queue);
+ vpfe_dev->cur_frm = vpfe_dev->next_frm;
+ /* Remove buffer from the buffer queue */
+ list_del(&vpfe_dev->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ vpfe_dev->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ vpfe_dev->field_id = 0;
+ addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
+
+ /* Calculate field offset */
+ vpfe_calculate_offsets(vpfe_dev);
+
+ if (vpfe_attach_irq(vpfe_dev) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error in attaching interrupt handle\n");
+ ret = -EFAULT;
+ goto unlock_out;
+ }
+ if (ccdc_dev->hw_ops.configure() < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error in configuring ccdc\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ ccdc_dev->hw_ops.setfbaddr((unsigned long)(addr));
+ vpfe_start_ccdc_capture(vpfe_dev);
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+streamoff:
+ ret = videobuf_streamoff(&vpfe_dev->buffer_queue);
+ return ret;
+}
+
+static int vpfe_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+ struct vpfe_subdev_info *sdinfo;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ /* If io is allowed for this file handle, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+
+ /* If streaming is not started, return error */
+ if (!vpfe_dev->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "device started\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ vpfe_stop_ccdc_capture(vpfe_dev);
+ vpfe_detach_irq(vpfe_dev);
+
+ sdinfo = vpfe_dev->current_subdev;
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, s_stream, 0);
+
+ if (ret && (ret != -ENOIOCTLCMD))
+ v4l2_err(&vpfe_dev->v4l2_dev, "stream off failed in subdev\n");
+ ret = videobuf_streamoff(&vpfe_dev->buffer_queue);
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *crop)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_cropcap\n");
+
+ if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
+ return -EINVAL;
+
+ memset(crop, 0, sizeof(struct v4l2_cropcap));
+ crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ crop->bounds.width = crop->defrect.width =
+ vpfe_standards[vpfe_dev->std_index].width;
+ crop->bounds.height = crop->defrect.height =
+ vpfe_standards[vpfe_dev->std_index].height;
+ crop->pixelaspect = vpfe_standards[vpfe_dev->std_index].pixelaspect;
+ return 0;
+}
+
+static int vpfe_g_crop(struct file *file, void *priv,
+ struct v4l2_crop *crop)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_crop\n");
+
+ crop->c = vpfe_dev->crop;
+ return 0;
+}
+
+static int vpfe_s_crop(struct file *file, void *priv,
+ const struct v4l2_crop *crop)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct v4l2_rect rect = crop->c;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_crop\n");
+
+ if (vpfe_dev->started) {
+ /* make sure streaming is not started */
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Cannot change crop when streaming is ON\n");
+ return -EBUSY;
+ }
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ if (rect.top < 0 || rect.left < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "doesn't support negative values for top & left\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ /* adjust the width to 16 pixel boundary */
+ rect.width = ((rect.width + 15) & ~0xf);
+
+ /* make sure parameters are valid */
+ if ((rect.left + rect.width >
+ vpfe_dev->std_info.active_pixels) ||
+ (rect.top + rect.height >
+ vpfe_dev->std_info.active_lines)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_CROP params\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ ccdc_dev->hw_ops.set_image_window(&rect);
+ vpfe_dev->fmt.fmt.pix.width = rect.width;
+ vpfe_dev->fmt.fmt.pix.height = rect.height;
+ vpfe_dev->fmt.fmt.pix.bytesperline =
+ ccdc_dev->hw_ops.get_line_length();
+ vpfe_dev->fmt.fmt.pix.sizeimage =
+ vpfe_dev->fmt.fmt.pix.bytesperline *
+ vpfe_dev->fmt.fmt.pix.height;
+ vpfe_dev->crop = rect;
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+
+static long vpfe_param_handler(struct file *file, void *priv,
+ bool valid_prio, unsigned int cmd, void *param)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ int ret = 0;
+
+ v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n");
+
+ if (vpfe_dev->started) {
+ /* only allowed if streaming is not started */
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "device already started\n");
+ return -EBUSY;
+ }
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ switch (cmd) {
+ case VPFE_CMD_S_CCDC_RAW_PARAMS:
+ v4l2_warn(&vpfe_dev->v4l2_dev,
+ "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
+ if (ccdc_dev->hw_ops.set_params) {
+ ret = ccdc_dev->hw_ops.set_params(param);
+ if (ret) {
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "Error setting parameters in CCDC\n");
+ goto unlock_out;
+ }
+ ret = vpfe_get_ccdc_image_format(vpfe_dev,
+ &vpfe_dev->fmt);
+ if (ret < 0) {
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "Invalid image format at CCDC\n");
+ goto unlock_out;
+ }
+ } else {
+ ret = -EINVAL;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+ }
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+
+/* vpfe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
+ .vidioc_querycap = vpfe_querycap,
+ .vidioc_g_fmt_vid_cap = vpfe_g_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vpfe_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vpfe_try_fmt_vid_cap,
+ .vidioc_enum_input = vpfe_enum_input,
+ .vidioc_g_input = vpfe_g_input,
+ .vidioc_s_input = vpfe_s_input,
+ .vidioc_querystd = vpfe_querystd,
+ .vidioc_s_std = vpfe_s_std,
+ .vidioc_g_std = vpfe_g_std,
+ .vidioc_reqbufs = vpfe_reqbufs,
+ .vidioc_querybuf = vpfe_querybuf,
+ .vidioc_qbuf = vpfe_qbuf,
+ .vidioc_dqbuf = vpfe_dqbuf,
+ .vidioc_streamon = vpfe_streamon,
+ .vidioc_streamoff = vpfe_streamoff,
+ .vidioc_cropcap = vpfe_cropcap,
+ .vidioc_g_crop = vpfe_g_crop,
+ .vidioc_s_crop = vpfe_s_crop,
+ .vidioc_default = vpfe_param_handler,
+};
+
+static struct vpfe_device *vpfe_initialize(void)
+{
+ struct vpfe_device *vpfe_dev;
+
+ /* Default number of buffers should be 3 */
+ if ((numbuffers > 0) &&
+ (numbuffers < config_params.min_numbuffers))
+ numbuffers = config_params.min_numbuffers;
+
+ /*
+ * Set buffer size to min buffers size if invalid buffer size is
+ * given
+ */
+ if (bufsize < config_params.min_bufsize)
+ bufsize = config_params.min_bufsize;
+
+ config_params.numbuffers = numbuffers;
+
+ if (numbuffers)
+ config_params.device_bufsize = bufsize;
+
+ /* Allocate memory for device objects */
+ vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL);
+
+ return vpfe_dev;
+}
+
+/*
+ * vpfe_probe : This function creates device entries by register
+ * itself to the V4L2 driver and initializes fields of each
+ * device objects
+ */
+static int vpfe_probe(struct platform_device *pdev)
+{
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_config *vpfe_cfg;
+ struct resource *res1;
+ struct vpfe_device *vpfe_dev;
+ struct i2c_adapter *i2c_adap;
+ struct video_device *vfd;
+ int ret = -ENOMEM, i, j;
+ int num_subdevs = 0;
+
+ /* Get the pointer to the device object */
+ vpfe_dev = vpfe_initialize();
+
+ if (!vpfe_dev) {
+ v4l2_err(pdev->dev.driver,
+ "Failed to allocate memory for vpfe_dev\n");
+ return ret;
+ }
+
+ vpfe_dev->pdev = &pdev->dev;
+
+ if (NULL == pdev->dev.platform_data) {
+ v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
+ ret = -ENODEV;
+ goto probe_free_dev_mem;
+ }
+
+ vpfe_cfg = pdev->dev.platform_data;
+ vpfe_dev->cfg = vpfe_cfg;
+ if (NULL == vpfe_cfg->ccdc ||
+ NULL == vpfe_cfg->card_name ||
+ NULL == vpfe_cfg->sub_devs) {
+ v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+
+ /* Allocate memory for ccdc configuration */
+ ccdc_cfg = kmalloc(sizeof(struct ccdc_config), GFP_KERNEL);
+ if (NULL == ccdc_cfg) {
+ v4l2_err(pdev->dev.driver,
+ "Memory allocation failed for ccdc_cfg\n");
+ goto probe_free_dev_mem;
+ }
+
+ mutex_lock(&ccdc_lock);
+
+ strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32);
+ /* Get VINT0 irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for VINT0\n");
+ ret = -ENODEV;
+ goto probe_free_ccdc_cfg_mem;
+ }
+ vpfe_dev->ccdc_irq0 = res1->start;
+
+ /* Get VINT1 irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for VINT1\n");
+ ret = -ENODEV;
+ goto probe_free_ccdc_cfg_mem;
+ }
+ vpfe_dev->ccdc_irq1 = res1->start;
+
+ ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
+ "vpfe_capture0", vpfe_dev);
+
+ if (0 != ret) {
+ v4l2_err(pdev->dev.driver, "Unable to request interrupt\n");
+ goto probe_free_ccdc_cfg_mem;
+ }
+
+ /* Allocate memory for video device */
+ vfd = video_device_alloc();
+ if (NULL == vfd) {
+ ret = -ENOMEM;
+ v4l2_err(pdev->dev.driver, "Unable to alloc video device\n");
+ goto probe_out_release_irq;
+ }
+
+ /* Initialize field of video device */
+ vfd->release = video_device_release;
+ vfd->fops = &vpfe_fops;
+ vfd->ioctl_ops = &vpfe_ioctl_ops;
+ vfd->tvnorms = 0;
+ vfd->v4l2_dev = &vpfe_dev->v4l2_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "%s_V%d.%d.%d",
+ CAPTURE_DRV_NAME,
+ (VPFE_CAPTURE_VERSION_CODE >> 16) & 0xff,
+ (VPFE_CAPTURE_VERSION_CODE >> 8) & 0xff,
+ (VPFE_CAPTURE_VERSION_CODE) & 0xff);
+ /* Set video_dev to the video device */
+ vpfe_dev->video_dev = vfd;
+
+ ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to register v4l2 device.\n");
+ goto probe_out_video_release;
+ }
+ v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
+ spin_lock_init(&vpfe_dev->irqlock);
+ spin_lock_init(&vpfe_dev->dma_queue_lock);
+ mutex_init(&vpfe_dev->lock);
+
+ /* Initialize field of the device objects */
+ vpfe_dev->numbuffers = config_params.numbuffers;
+
+ /* register video device */
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "trying to register vpfe device.\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "video_dev=%x\n", (int)&vpfe_dev->video_dev);
+ vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vpfe_dev->video_dev->flags);
+ ret = video_register_device(vpfe_dev->video_dev,
+ VFL_TYPE_GRABBER, -1);
+
+ if (ret) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to register video device.\n");
+ goto probe_out_v4l2_unregister;
+ }
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "video device registered\n");
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpfe_dev);
+ /* set driver private data */
+ video_set_drvdata(vpfe_dev->video_dev, vpfe_dev);
+ i2c_adap = i2c_get_adapter(vpfe_cfg->i2c_adapter_id);
+ num_subdevs = vpfe_cfg->num_subdevs;
+ vpfe_dev->sd = kmalloc(sizeof(struct v4l2_subdev *) * num_subdevs,
+ GFP_KERNEL);
+ if (NULL == vpfe_dev->sd) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "unable to allocate memory for subdevice pointers\n");
+ ret = -ENOMEM;
+ goto probe_out_video_unregister;
+ }
+
+ for (i = 0; i < num_subdevs; i++) {
+ struct v4l2_input *inps;
+
+ sdinfo = &vpfe_cfg->sub_devs[i];
+
+ /* Load up the subdevice */
+ vpfe_dev->sd[i] =
+ v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
+ i2c_adap,
+ &sdinfo->board_info,
+ NULL);
+ if (vpfe_dev->sd[i]) {
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ sdinfo->name);
+ vpfe_dev->sd[i]->grp_id = sdinfo->grp_id;
+ /* update tvnorms from the sub devices */
+ for (j = 0; j < sdinfo->num_inputs; j++) {
+ inps = &sdinfo->inputs[j];
+ vfd->tvnorms |= inps->std;
+ }
+ } else {
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "v4l2 sub device %s register fails\n",
+ sdinfo->name);
+ goto probe_sd_out;
+ }
+ }
+
+ /* set first sub device as current one */
+ vpfe_dev->current_subdev = &vpfe_cfg->sub_devs[0];
+ vpfe_dev->v4l2_dev.ctrl_handler = vpfe_dev->sd[0]->ctrl_handler;
+
+ /* We have at least one sub device to work with */
+ mutex_unlock(&ccdc_lock);
+ return 0;
+
+probe_sd_out:
+ kfree(vpfe_dev->sd);
+probe_out_video_unregister:
+ video_unregister_device(vpfe_dev->video_dev);
+probe_out_v4l2_unregister:
+ v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+probe_out_video_release:
+ if (!video_is_registered(vpfe_dev->video_dev))
+ video_device_release(vpfe_dev->video_dev);
+probe_out_release_irq:
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+probe_free_ccdc_cfg_mem:
+ kfree(ccdc_cfg);
+ mutex_unlock(&ccdc_lock);
+probe_free_dev_mem:
+ kfree(vpfe_dev);
+ return ret;
+}
+
+/*
+ * vpfe_remove : It un-register device from V4L2 driver
+ */
+static int vpfe_remove(struct platform_device *pdev)
+{
+ struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
+
+ v4l2_info(pdev->dev.driver, "vpfe_remove\n");
+
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+ kfree(vpfe_dev->sd);
+ v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+ video_unregister_device(vpfe_dev->video_dev);
+ kfree(vpfe_dev);
+ kfree(ccdc_cfg);
+ return 0;
+}
+
+static int vpfe_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int vpfe_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops vpfe_dev_pm_ops = {
+ .suspend = vpfe_suspend,
+ .resume = vpfe_resume,
+};
+
+static struct platform_driver vpfe_driver = {
+ .driver = {
+ .name = CAPTURE_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &vpfe_dev_pm_ops,
+ },
+ .probe = vpfe_probe,
+ .remove = vpfe_remove,
+};
+
+module_platform_driver(vpfe_driver);
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
new file mode 100644
index 00000000000..cd08e524838
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif.c
@@ -0,0 +1,487 @@
+/*
+ * vpif - Video Port Interface driver
+ * VPIF is a receiver and transmitter for video data. It has two channels(0, 1)
+ * that receiveing video byte stream and two channels(2, 3) for video output.
+ * The hardware supports SDTV, HDTV formats, raw data capture.
+ * Currently, the driver supports NTSC and PAL standards.
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/v4l2-dv-timings.h>
+
+#include "vpif.h"
+
+MODULE_DESCRIPTION("TI DaVinci Video Port Interface driver");
+MODULE_LICENSE("GPL");
+
+#define VPIF_CH0_MAX_MODES 22
+#define VPIF_CH1_MAX_MODES 2
+#define VPIF_CH2_MAX_MODES 15
+#define VPIF_CH3_MAX_MODES 2
+
+spinlock_t vpif_lock;
+
+void __iomem *vpif_base;
+EXPORT_SYMBOL_GPL(vpif_base);
+
+/**
+ * vpif_ch_params: video standard configuration parameters for vpif
+ * The table must include all presets from supported subdevices.
+ */
+const struct vpif_channel_config_params vpif_ch_params[] = {
+ /* HDTV formats */
+ {
+ .name = "480p59_94",
+ .width = 720,
+ .height = 480,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 138-8,
+ .sav2eav = 720,
+ .l1 = 1,
+ .l3 = 43,
+ .l5 = 523,
+ .vsize = 525,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_720X480P59_94,
+ },
+ {
+ .name = "576p50",
+ .width = 720,
+ .height = 576,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 144-8,
+ .sav2eav = 720,
+ .l1 = 1,
+ .l3 = 45,
+ .l5 = 621,
+ .vsize = 625,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_720X576P50,
+ },
+ {
+ .name = "720p50",
+ .width = 1280,
+ .height = 720,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 700-8,
+ .sav2eav = 1280,
+ .l1 = 1,
+ .l3 = 26,
+ .l5 = 746,
+ .vsize = 750,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1280X720P50,
+ },
+ {
+ .name = "720p60",
+ .width = 1280,
+ .height = 720,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 370 - 8,
+ .sav2eav = 1280,
+ .l1 = 1,
+ .l3 = 26,
+ .l5 = 746,
+ .vsize = 750,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1280X720P60,
+ },
+ {
+ .name = "1080I50",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 0,
+ .ycmux_mode = 0,
+ .eav2sav = 720 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 21,
+ .l5 = 561,
+ .l7 = 563,
+ .l9 = 584,
+ .l11 = 1124,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1920X1080I50,
+ },
+ {
+ .name = "1080I60",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 0,
+ .ycmux_mode = 0,
+ .eav2sav = 280 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 21,
+ .l5 = 561,
+ .l7 = 563,
+ .l9 = 584,
+ .l11 = 1124,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1920X1080I60,
+ },
+ {
+ .name = "1080p60",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 280 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 42,
+ .l5 = 1122,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1920X1080P60,
+ },
+
+ /* SDTV formats */
+ {
+ .name = "NTSC_M",
+ .width = 720,
+ .height = 480,
+ .frm_fmt = 0,
+ .ycmux_mode = 1,
+ .eav2sav = 268,
+ .sav2eav = 1440,
+ .l1 = 1,
+ .l3 = 23,
+ .l5 = 263,
+ .l7 = 266,
+ .l9 = 286,
+ .l11 = 525,
+ .vsize = 525,
+ .capture_format = 0,
+ .vbi_supported = 1,
+ .hd_sd = 0,
+ .stdid = V4L2_STD_525_60,
+ },
+ {
+ .name = "PAL_BDGHIK",
+ .width = 720,
+ .height = 576,
+ .frm_fmt = 0,
+ .ycmux_mode = 1,
+ .eav2sav = 280,
+ .sav2eav = 1440,
+ .l1 = 1,
+ .l3 = 23,
+ .l5 = 311,
+ .l7 = 313,
+ .l9 = 336,
+ .l11 = 624,
+ .vsize = 625,
+ .capture_format = 0,
+ .vbi_supported = 1,
+ .hd_sd = 0,
+ .stdid = V4L2_STD_625_50,
+ },
+};
+EXPORT_SYMBOL_GPL(vpif_ch_params);
+
+const unsigned int vpif_ch_params_count = ARRAY_SIZE(vpif_ch_params);
+EXPORT_SYMBOL_GPL(vpif_ch_params_count);
+
+static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val)
+{
+ if (val)
+ vpif_set_bit(reg, bit);
+ else
+ vpif_clr_bit(reg, bit);
+}
+
+/* This structure is used to keep track of VPIF size register's offsets */
+struct vpif_registers {
+ u32 h_cfg, v_cfg_00, v_cfg_01, v_cfg_02, v_cfg, ch_ctrl;
+ u32 line_offset, vanc0_strt, vanc0_size, vanc1_strt;
+ u32 vanc1_size, width_mask, len_mask;
+ u8 max_modes;
+};
+
+static const struct vpif_registers vpifregs[VPIF_NUM_CHANNELS] = {
+ /* Channel0 */
+ {
+ VPIF_CH0_H_CFG, VPIF_CH0_V_CFG_00, VPIF_CH0_V_CFG_01,
+ VPIF_CH0_V_CFG_02, VPIF_CH0_V_CFG_03, VPIF_CH0_CTRL,
+ VPIF_CH0_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF,
+ VPIF_CH0_MAX_MODES,
+ },
+ /* Channel1 */
+ {
+ VPIF_CH1_H_CFG, VPIF_CH1_V_CFG_00, VPIF_CH1_V_CFG_01,
+ VPIF_CH1_V_CFG_02, VPIF_CH1_V_CFG_03, VPIF_CH1_CTRL,
+ VPIF_CH1_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF,
+ VPIF_CH1_MAX_MODES,
+ },
+ /* Channel2 */
+ {
+ VPIF_CH2_H_CFG, VPIF_CH2_V_CFG_00, VPIF_CH2_V_CFG_01,
+ VPIF_CH2_V_CFG_02, VPIF_CH2_V_CFG_03, VPIF_CH2_CTRL,
+ VPIF_CH2_IMG_ADD_OFST, VPIF_CH2_VANC0_STRT, VPIF_CH2_VANC0_SIZE,
+ VPIF_CH2_VANC1_STRT, VPIF_CH2_VANC1_SIZE, 0x7FF, 0x7FF,
+ VPIF_CH2_MAX_MODES
+ },
+ /* Channel3 */
+ {
+ VPIF_CH3_H_CFG, VPIF_CH3_V_CFG_00, VPIF_CH3_V_CFG_01,
+ VPIF_CH3_V_CFG_02, VPIF_CH3_V_CFG_03, VPIF_CH3_CTRL,
+ VPIF_CH3_IMG_ADD_OFST, VPIF_CH3_VANC0_STRT, VPIF_CH3_VANC0_SIZE,
+ VPIF_CH3_VANC1_STRT, VPIF_CH3_VANC1_SIZE, 0x7FF, 0x7FF,
+ VPIF_CH3_MAX_MODES
+ },
+};
+
+/* vpif_set_mode_info:
+ * This function is used to set horizontal and vertical config parameters
+ * As per the standard in the channel, configure the values of L1, L3,
+ * L5, L7 L9, L11 in VPIF Register , also write width and height
+ */
+static void vpif_set_mode_info(const struct vpif_channel_config_params *config,
+ u8 channel_id, u8 config_channel_id)
+{
+ u32 value;
+
+ value = (config->eav2sav & vpifregs[config_channel_id].width_mask);
+ value <<= VPIF_CH_LEN_SHIFT;
+ value |= (config->sav2eav & vpifregs[config_channel_id].width_mask);
+ regw(value, vpifregs[channel_id].h_cfg);
+
+ value = (config->l1 & vpifregs[config_channel_id].len_mask);
+ value <<= VPIF_CH_LEN_SHIFT;
+ value |= (config->l3 & vpifregs[config_channel_id].len_mask);
+ regw(value, vpifregs[channel_id].v_cfg_00);
+
+ value = (config->l5 & vpifregs[config_channel_id].len_mask);
+ value <<= VPIF_CH_LEN_SHIFT;
+ value |= (config->l7 & vpifregs[config_channel_id].len_mask);
+ regw(value, vpifregs[channel_id].v_cfg_01);
+
+ value = (config->l9 & vpifregs[config_channel_id].len_mask);
+ value <<= VPIF_CH_LEN_SHIFT;
+ value |= (config->l11 & vpifregs[config_channel_id].len_mask);
+ regw(value, vpifregs[channel_id].v_cfg_02);
+
+ value = (config->vsize & vpifregs[config_channel_id].len_mask);
+ regw(value, vpifregs[channel_id].v_cfg);
+}
+
+/* config_vpif_params
+ * Function to set the parameters of a channel
+ * Mainly modifies the channel ciontrol register
+ * It sets frame format, yc mux mode
+ */
+static void config_vpif_params(struct vpif_params *vpifparams,
+ u8 channel_id, u8 found)
+{
+ const struct vpif_channel_config_params *config = &vpifparams->std_info;
+ u32 value, ch_nip, reg;
+ u8 start, end;
+ int i;
+
+ start = channel_id;
+ end = channel_id + found;
+
+ for (i = start; i < end; i++) {
+ reg = vpifregs[i].ch_ctrl;
+ if (channel_id < 2)
+ ch_nip = VPIF_CAPTURE_CH_NIP;
+ else
+ ch_nip = VPIF_DISPLAY_CH_NIP;
+
+ vpif_wr_bit(reg, ch_nip, config->frm_fmt);
+ vpif_wr_bit(reg, VPIF_CH_YC_MUX_BIT, config->ycmux_mode);
+ vpif_wr_bit(reg, VPIF_CH_INPUT_FIELD_FRAME_BIT,
+ vpifparams->video_params.storage_mode);
+
+ /* Set raster scanning SDR Format */
+ vpif_clr_bit(reg, VPIF_CH_SDR_FMT_BIT);
+ vpif_wr_bit(reg, VPIF_CH_DATA_MODE_BIT, config->capture_format);
+
+ if (channel_id > 1) /* Set the Pixel enable bit */
+ vpif_set_bit(reg, VPIF_DISPLAY_PIX_EN_BIT);
+ else if (config->capture_format) {
+ /* Set the polarity of various pins */
+ vpif_wr_bit(reg, VPIF_CH_FID_POLARITY_BIT,
+ vpifparams->iface.fid_pol);
+ vpif_wr_bit(reg, VPIF_CH_V_VALID_POLARITY_BIT,
+ vpifparams->iface.vd_pol);
+ vpif_wr_bit(reg, VPIF_CH_H_VALID_POLARITY_BIT,
+ vpifparams->iface.hd_pol);
+
+ value = regr(reg);
+ /* Set data width */
+ value &= ~(0x3u <<
+ VPIF_CH_DATA_WIDTH_BIT);
+ value |= ((vpifparams->params.data_sz) <<
+ VPIF_CH_DATA_WIDTH_BIT);
+ regw(value, reg);
+ }
+
+ /* Write the pitch in the driver */
+ regw((vpifparams->video_params.hpitch),
+ vpifregs[i].line_offset);
+ }
+}
+
+/* vpif_set_video_params
+ * This function is used to set video parameters in VPIF register
+ */
+int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id)
+{
+ const struct vpif_channel_config_params *config = &vpifparams->std_info;
+ int found = 1;
+
+ vpif_set_mode_info(config, channel_id, channel_id);
+ if (!config->ycmux_mode) {
+ /* YC are on separate channels (HDTV formats) */
+ vpif_set_mode_info(config, channel_id + 1, channel_id);
+ found = 2;
+ }
+
+ config_vpif_params(vpifparams, channel_id, found);
+
+ regw(0x80, VPIF_REQ_SIZE);
+ regw(0x01, VPIF_EMULATION_CTRL);
+
+ return found;
+}
+EXPORT_SYMBOL(vpif_set_video_params);
+
+void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams,
+ u8 channel_id)
+{
+ u32 value;
+
+ value = 0x3F8 & (vbiparams->hstart0);
+ value |= 0x3FFFFFF & ((vbiparams->vstart0) << 16);
+ regw(value, vpifregs[channel_id].vanc0_strt);
+
+ value = 0x3F8 & (vbiparams->hstart1);
+ value |= 0x3FFFFFF & ((vbiparams->vstart1) << 16);
+ regw(value, vpifregs[channel_id].vanc1_strt);
+
+ value = 0x3F8 & (vbiparams->hsize0);
+ value |= 0x3FFFFFF & ((vbiparams->vsize0) << 16);
+ regw(value, vpifregs[channel_id].vanc0_size);
+
+ value = 0x3F8 & (vbiparams->hsize1);
+ value |= 0x3FFFFFF & ((vbiparams->vsize1) << 16);
+ regw(value, vpifregs[channel_id].vanc1_size);
+
+}
+EXPORT_SYMBOL(vpif_set_vbi_display_params);
+
+int vpif_channel_getfid(u8 channel_id)
+{
+ return (regr(vpifregs[channel_id].ch_ctrl) & VPIF_CH_FID_MASK)
+ >> VPIF_CH_FID_SHIFT;
+}
+EXPORT_SYMBOL(vpif_channel_getfid);
+
+static int vpif_probe(struct platform_device *pdev)
+{
+ static struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vpif_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(vpif_base))
+ return PTR_ERR(vpif_base);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get(&pdev->dev);
+
+ spin_lock_init(&vpif_lock);
+ dev_info(&pdev->dev, "vpif probe success\n");
+ return 0;
+}
+
+static int vpif_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int vpif_suspend(struct device *dev)
+{
+ pm_runtime_put(dev);
+ return 0;
+}
+
+static int vpif_resume(struct device *dev)
+{
+ pm_runtime_get(dev);
+ return 0;
+}
+
+static const struct dev_pm_ops vpif_pm = {
+ .suspend = vpif_suspend,
+ .resume = vpif_resume,
+};
+
+#define vpif_pm_ops (&vpif_pm)
+#else
+#define vpif_pm_ops NULL
+#endif
+
+static struct platform_driver vpif_driver = {
+ .driver = {
+ .name = "vpif",
+ .owner = THIS_MODULE,
+ .pm = vpif_pm_ops,
+ },
+ .remove = vpif_remove,
+ .probe = vpif_probe,
+};
+
+static void vpif_exit(void)
+{
+ platform_driver_unregister(&vpif_driver);
+}
+
+static int __init vpif_init(void)
+{
+ return platform_driver_register(&vpif_driver);
+}
+subsys_initcall(vpif_init);
+module_exit(vpif_exit);
+
diff --git a/drivers/media/platform/davinci/vpif.h b/drivers/media/platform/davinci/vpif.h
new file mode 100644
index 00000000000..9956e678869
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif.h
@@ -0,0 +1,688 @@
+/*
+ * VPIF header file
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef VPIF_H
+#define VPIF_H
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <media/davinci/vpif_types.h>
+
+/* Maximum channel allowed */
+#define VPIF_NUM_CHANNELS (4)
+#define VPIF_CAPTURE_NUM_CHANNELS (2)
+#define VPIF_DISPLAY_NUM_CHANNELS (2)
+
+/* Macros to read/write registers */
+extern void __iomem *vpif_base;
+extern spinlock_t vpif_lock;
+
+#define regr(reg) readl((reg) + vpif_base)
+#define regw(value, reg) writel(value, (reg + vpif_base))
+
+/* Register Address Offsets */
+#define VPIF_PID (0x0000)
+#define VPIF_CH0_CTRL (0x0004)
+#define VPIF_CH1_CTRL (0x0008)
+#define VPIF_CH2_CTRL (0x000C)
+#define VPIF_CH3_CTRL (0x0010)
+
+#define VPIF_INTEN (0x0020)
+#define VPIF_INTEN_SET (0x0024)
+#define VPIF_INTEN_CLR (0x0028)
+#define VPIF_STATUS (0x002C)
+#define VPIF_STATUS_CLR (0x0030)
+#define VPIF_EMULATION_CTRL (0x0034)
+#define VPIF_REQ_SIZE (0x0038)
+
+#define VPIF_CH0_TOP_STRT_ADD_LUMA (0x0040)
+#define VPIF_CH0_BTM_STRT_ADD_LUMA (0x0044)
+#define VPIF_CH0_TOP_STRT_ADD_CHROMA (0x0048)
+#define VPIF_CH0_BTM_STRT_ADD_CHROMA (0x004c)
+#define VPIF_CH0_TOP_STRT_ADD_HANC (0x0050)
+#define VPIF_CH0_BTM_STRT_ADD_HANC (0x0054)
+#define VPIF_CH0_TOP_STRT_ADD_VANC (0x0058)
+#define VPIF_CH0_BTM_STRT_ADD_VANC (0x005c)
+#define VPIF_CH0_SP_CFG (0x0060)
+#define VPIF_CH0_IMG_ADD_OFST (0x0064)
+#define VPIF_CH0_HANC_ADD_OFST (0x0068)
+#define VPIF_CH0_H_CFG (0x006c)
+#define VPIF_CH0_V_CFG_00 (0x0070)
+#define VPIF_CH0_V_CFG_01 (0x0074)
+#define VPIF_CH0_V_CFG_02 (0x0078)
+#define VPIF_CH0_V_CFG_03 (0x007c)
+
+#define VPIF_CH1_TOP_STRT_ADD_LUMA (0x0080)
+#define VPIF_CH1_BTM_STRT_ADD_LUMA (0x0084)
+#define VPIF_CH1_TOP_STRT_ADD_CHROMA (0x0088)
+#define VPIF_CH1_BTM_STRT_ADD_CHROMA (0x008c)
+#define VPIF_CH1_TOP_STRT_ADD_HANC (0x0090)
+#define VPIF_CH1_BTM_STRT_ADD_HANC (0x0094)
+#define VPIF_CH1_TOP_STRT_ADD_VANC (0x0098)
+#define VPIF_CH1_BTM_STRT_ADD_VANC (0x009c)
+#define VPIF_CH1_SP_CFG (0x00a0)
+#define VPIF_CH1_IMG_ADD_OFST (0x00a4)
+#define VPIF_CH1_HANC_ADD_OFST (0x00a8)
+#define VPIF_CH1_H_CFG (0x00ac)
+#define VPIF_CH1_V_CFG_00 (0x00b0)
+#define VPIF_CH1_V_CFG_01 (0x00b4)
+#define VPIF_CH1_V_CFG_02 (0x00b8)
+#define VPIF_CH1_V_CFG_03 (0x00bc)
+
+#define VPIF_CH2_TOP_STRT_ADD_LUMA (0x00c0)
+#define VPIF_CH2_BTM_STRT_ADD_LUMA (0x00c4)
+#define VPIF_CH2_TOP_STRT_ADD_CHROMA (0x00c8)
+#define VPIF_CH2_BTM_STRT_ADD_CHROMA (0x00cc)
+#define VPIF_CH2_TOP_STRT_ADD_HANC (0x00d0)
+#define VPIF_CH2_BTM_STRT_ADD_HANC (0x00d4)
+#define VPIF_CH2_TOP_STRT_ADD_VANC (0x00d8)
+#define VPIF_CH2_BTM_STRT_ADD_VANC (0x00dc)
+#define VPIF_CH2_SP_CFG (0x00e0)
+#define VPIF_CH2_IMG_ADD_OFST (0x00e4)
+#define VPIF_CH2_HANC_ADD_OFST (0x00e8)
+#define VPIF_CH2_H_CFG (0x00ec)
+#define VPIF_CH2_V_CFG_00 (0x00f0)
+#define VPIF_CH2_V_CFG_01 (0x00f4)
+#define VPIF_CH2_V_CFG_02 (0x00f8)
+#define VPIF_CH2_V_CFG_03 (0x00fc)
+#define VPIF_CH2_HANC0_STRT (0x0100)
+#define VPIF_CH2_HANC0_SIZE (0x0104)
+#define VPIF_CH2_HANC1_STRT (0x0108)
+#define VPIF_CH2_HANC1_SIZE (0x010c)
+#define VPIF_CH2_VANC0_STRT (0x0110)
+#define VPIF_CH2_VANC0_SIZE (0x0114)
+#define VPIF_CH2_VANC1_STRT (0x0118)
+#define VPIF_CH2_VANC1_SIZE (0x011c)
+
+#define VPIF_CH3_TOP_STRT_ADD_LUMA (0x0140)
+#define VPIF_CH3_BTM_STRT_ADD_LUMA (0x0144)
+#define VPIF_CH3_TOP_STRT_ADD_CHROMA (0x0148)
+#define VPIF_CH3_BTM_STRT_ADD_CHROMA (0x014c)
+#define VPIF_CH3_TOP_STRT_ADD_HANC (0x0150)
+#define VPIF_CH3_BTM_STRT_ADD_HANC (0x0154)
+#define VPIF_CH3_TOP_STRT_ADD_VANC (0x0158)
+#define VPIF_CH3_BTM_STRT_ADD_VANC (0x015c)
+#define VPIF_CH3_SP_CFG (0x0160)
+#define VPIF_CH3_IMG_ADD_OFST (0x0164)
+#define VPIF_CH3_HANC_ADD_OFST (0x0168)
+#define VPIF_CH3_H_CFG (0x016c)
+#define VPIF_CH3_V_CFG_00 (0x0170)
+#define VPIF_CH3_V_CFG_01 (0x0174)
+#define VPIF_CH3_V_CFG_02 (0x0178)
+#define VPIF_CH3_V_CFG_03 (0x017c)
+#define VPIF_CH3_HANC0_STRT (0x0180)
+#define VPIF_CH3_HANC0_SIZE (0x0184)
+#define VPIF_CH3_HANC1_STRT (0x0188)
+#define VPIF_CH3_HANC1_SIZE (0x018c)
+#define VPIF_CH3_VANC0_STRT (0x0190)
+#define VPIF_CH3_VANC0_SIZE (0x0194)
+#define VPIF_CH3_VANC1_STRT (0x0198)
+#define VPIF_CH3_VANC1_SIZE (0x019c)
+
+#define VPIF_IODFT_CTRL (0x01c0)
+
+/* Functions for bit Manipulation */
+static inline void vpif_set_bit(u32 reg, u32 bit)
+{
+ regw((regr(reg)) | (0x01 << bit), reg);
+}
+
+static inline void vpif_clr_bit(u32 reg, u32 bit)
+{
+ regw(((regr(reg)) & ~(0x01 << bit)), reg);
+}
+
+/* Macro for Generating mask */
+#ifdef GENERATE_MASK
+#undef GENERATE_MASK
+#endif
+
+#define GENERATE_MASK(bits, pos) \
+ ((((0xFFFFFFFF) << (32 - bits)) >> (32 - bits)) << pos)
+
+/* Bit positions in the channel control registers */
+#define VPIF_CH_DATA_MODE_BIT (2)
+#define VPIF_CH_YC_MUX_BIT (3)
+#define VPIF_CH_SDR_FMT_BIT (4)
+#define VPIF_CH_HANC_EN_BIT (8)
+#define VPIF_CH_VANC_EN_BIT (9)
+
+#define VPIF_CAPTURE_CH_NIP (10)
+#define VPIF_DISPLAY_CH_NIP (11)
+
+#define VPIF_DISPLAY_PIX_EN_BIT (10)
+
+#define VPIF_CH_INPUT_FIELD_FRAME_BIT (12)
+
+#define VPIF_CH_FID_POLARITY_BIT (15)
+#define VPIF_CH_V_VALID_POLARITY_BIT (14)
+#define VPIF_CH_H_VALID_POLARITY_BIT (13)
+#define VPIF_CH_DATA_WIDTH_BIT (28)
+
+#define VPIF_CH_CLK_EDGE_CTRL_BIT (31)
+
+/* Mask various length */
+#define VPIF_CH_EAVSAV_MASK GENERATE_MASK(13, 0)
+#define VPIF_CH_LEN_MASK GENERATE_MASK(12, 0)
+#define VPIF_CH_WIDTH_MASK GENERATE_MASK(13, 0)
+#define VPIF_CH_LEN_SHIFT (16)
+
+/* VPIF masks for registers */
+#define VPIF_REQ_SIZE_MASK (0x1ff)
+
+/* bit posotion of interrupt vpif_ch_intr register */
+#define VPIF_INTEN_FRAME_CH0 (0x00000001)
+#define VPIF_INTEN_FRAME_CH1 (0x00000002)
+#define VPIF_INTEN_FRAME_CH2 (0x00000004)
+#define VPIF_INTEN_FRAME_CH3 (0x00000008)
+
+/* bit position of clock and channel enable in vpif_chn_ctrl register */
+
+#define VPIF_CH0_CLK_EN (0x00000002)
+#define VPIF_CH0_EN (0x00000001)
+#define VPIF_CH1_CLK_EN (0x00000002)
+#define VPIF_CH1_EN (0x00000001)
+#define VPIF_CH2_CLK_EN (0x00000002)
+#define VPIF_CH2_EN (0x00000001)
+#define VPIF_CH3_CLK_EN (0x00000002)
+#define VPIF_CH3_EN (0x00000001)
+#define VPIF_CH_CLK_EN (0x00000002)
+#define VPIF_CH_EN (0x00000001)
+
+#define VPIF_INT_TOP (0x00)
+#define VPIF_INT_BOTTOM (0x01)
+#define VPIF_INT_BOTH (0x02)
+
+#define VPIF_CH0_INT_CTRL_SHIFT (6)
+#define VPIF_CH1_INT_CTRL_SHIFT (6)
+#define VPIF_CH2_INT_CTRL_SHIFT (6)
+#define VPIF_CH3_INT_CTRL_SHIFT (6)
+#define VPIF_CH_INT_CTRL_SHIFT (6)
+
+#define VPIF_CH2_CLIP_ANC_EN 14
+#define VPIF_CH2_CLIP_ACTIVE_EN 13
+
+#define VPIF_CH3_CLIP_ANC_EN 14
+#define VPIF_CH3_CLIP_ACTIVE_EN 13
+
+/* enabled interrupt on both the fields on vpid_ch0_ctrl register */
+#define channel0_intr_assert() (regw((regr(VPIF_CH0_CTRL)|\
+ (VPIF_INT_BOTH << VPIF_CH0_INT_CTRL_SHIFT)), VPIF_CH0_CTRL))
+
+/* enabled interrupt on both the fields on vpid_ch1_ctrl register */
+#define channel1_intr_assert() (regw((regr(VPIF_CH1_CTRL)|\
+ (VPIF_INT_BOTH << VPIF_CH1_INT_CTRL_SHIFT)), VPIF_CH1_CTRL))
+
+/* enabled interrupt on both the fields on vpid_ch0_ctrl register */
+#define channel2_intr_assert() (regw((regr(VPIF_CH2_CTRL)|\
+ (VPIF_INT_BOTH << VPIF_CH2_INT_CTRL_SHIFT)), VPIF_CH2_CTRL))
+
+/* enabled interrupt on both the fields on vpid_ch1_ctrl register */
+#define channel3_intr_assert() (regw((regr(VPIF_CH3_CTRL)|\
+ (VPIF_INT_BOTH << VPIF_CH3_INT_CTRL_SHIFT)), VPIF_CH3_CTRL))
+
+#define VPIF_CH_FID_MASK (0x20)
+#define VPIF_CH_FID_SHIFT (5)
+
+#define VPIF_NTSC_VBI_START_FIELD0 (1)
+#define VPIF_NTSC_VBI_START_FIELD1 (263)
+#define VPIF_PAL_VBI_START_FIELD0 (624)
+#define VPIF_PAL_VBI_START_FIELD1 (311)
+
+#define VPIF_NTSC_HBI_START_FIELD0 (1)
+#define VPIF_NTSC_HBI_START_FIELD1 (263)
+#define VPIF_PAL_HBI_START_FIELD0 (624)
+#define VPIF_PAL_HBI_START_FIELD1 (311)
+
+#define VPIF_NTSC_VBI_COUNT_FIELD0 (20)
+#define VPIF_NTSC_VBI_COUNT_FIELD1 (19)
+#define VPIF_PAL_VBI_COUNT_FIELD0 (24)
+#define VPIF_PAL_VBI_COUNT_FIELD1 (25)
+
+#define VPIF_NTSC_HBI_COUNT_FIELD0 (263)
+#define VPIF_NTSC_HBI_COUNT_FIELD1 (262)
+#define VPIF_PAL_HBI_COUNT_FIELD0 (312)
+#define VPIF_PAL_HBI_COUNT_FIELD1 (313)
+
+#define VPIF_NTSC_VBI_SAMPLES_PER_LINE (720)
+#define VPIF_PAL_VBI_SAMPLES_PER_LINE (720)
+#define VPIF_NTSC_HBI_SAMPLES_PER_LINE (268)
+#define VPIF_PAL_HBI_SAMPLES_PER_LINE (280)
+
+#define VPIF_CH_VANC_EN (0x20)
+#define VPIF_DMA_REQ_SIZE (0x080)
+#define VPIF_EMULATION_DISABLE (0x01)
+
+extern u8 irq_vpif_capture_channel[VPIF_NUM_CHANNELS];
+
+/* inline function to enable/disable channel0 */
+static inline void enable_channel0(int enable)
+{
+ if (enable)
+ regw((regr(VPIF_CH0_CTRL) | (VPIF_CH0_EN)), VPIF_CH0_CTRL);
+ else
+ regw((regr(VPIF_CH0_CTRL) & (~VPIF_CH0_EN)), VPIF_CH0_CTRL);
+}
+
+/* inline function to enable/disable channel1 */
+static inline void enable_channel1(int enable)
+{
+ if (enable)
+ regw((regr(VPIF_CH1_CTRL) | (VPIF_CH1_EN)), VPIF_CH1_CTRL);
+ else
+ regw((regr(VPIF_CH1_CTRL) & (~VPIF_CH1_EN)), VPIF_CH1_CTRL);
+}
+
+/* inline function to enable interrupt for channel0 */
+static inline void channel0_intr_enable(int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpif_lock, flags);
+
+ if (enable) {
+ regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+
+ regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH0), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH0),
+ VPIF_INTEN_SET);
+ } else {
+ regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH0)), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH0),
+ VPIF_INTEN_SET);
+ }
+ spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to enable interrupt for channel1 */
+static inline void channel1_intr_enable(int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpif_lock, flags);
+
+ if (enable) {
+ regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+
+ regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH1), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH1),
+ VPIF_INTEN_SET);
+ } else {
+ regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH1)), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH1),
+ VPIF_INTEN_SET);
+ }
+ spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to set buffer addresses in case of Y/C non mux mode */
+static inline void ch0_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH1_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH1_BTM_STRT_ADD_CHROMA);
+}
+
+/* inline function to set buffer addresses in VPIF registers for video data */
+static inline void ch0_set_videobuf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH0_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH0_BTM_STRT_ADD_CHROMA);
+}
+
+static inline void ch1_set_videobuf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+
+ regw(top_strt_luma, VPIF_CH1_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH1_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH1_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH1_BTM_STRT_ADD_CHROMA);
+}
+
+static inline void ch0_set_vbi_addr(unsigned long top_vbi,
+ unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+ regw(top_vbi, VPIF_CH0_TOP_STRT_ADD_VANC);
+ regw(btm_vbi, VPIF_CH0_BTM_STRT_ADD_VANC);
+}
+
+static inline void ch0_set_hbi_addr(unsigned long top_vbi,
+ unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+ regw(top_vbi, VPIF_CH0_TOP_STRT_ADD_HANC);
+ regw(btm_vbi, VPIF_CH0_BTM_STRT_ADD_HANC);
+}
+
+static inline void ch1_set_vbi_addr(unsigned long top_vbi,
+ unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+ regw(top_vbi, VPIF_CH1_TOP_STRT_ADD_VANC);
+ regw(btm_vbi, VPIF_CH1_BTM_STRT_ADD_VANC);
+}
+
+static inline void ch1_set_hbi_addr(unsigned long top_vbi,
+ unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+ regw(top_vbi, VPIF_CH1_TOP_STRT_ADD_HANC);
+ regw(btm_vbi, VPIF_CH1_BTM_STRT_ADD_HANC);
+}
+
+/* Inline function to enable raw vbi in the given channel */
+static inline void disable_raw_feature(u8 channel_id, u8 index)
+{
+ u32 ctrl_reg;
+ if (0 == channel_id)
+ ctrl_reg = VPIF_CH0_CTRL;
+ else
+ ctrl_reg = VPIF_CH1_CTRL;
+
+ if (1 == index)
+ vpif_clr_bit(ctrl_reg, VPIF_CH_VANC_EN_BIT);
+ else
+ vpif_clr_bit(ctrl_reg, VPIF_CH_HANC_EN_BIT);
+}
+
+static inline void enable_raw_feature(u8 channel_id, u8 index)
+{
+ u32 ctrl_reg;
+ if (0 == channel_id)
+ ctrl_reg = VPIF_CH0_CTRL;
+ else
+ ctrl_reg = VPIF_CH1_CTRL;
+
+ if (1 == index)
+ vpif_set_bit(ctrl_reg, VPIF_CH_VANC_EN_BIT);
+ else
+ vpif_set_bit(ctrl_reg, VPIF_CH_HANC_EN_BIT);
+}
+
+/* inline function to enable/disable channel2 */
+static inline void enable_channel2(int enable)
+{
+ if (enable) {
+ regw((regr(VPIF_CH2_CTRL) | (VPIF_CH2_CLK_EN)), VPIF_CH2_CTRL);
+ regw((regr(VPIF_CH2_CTRL) | (VPIF_CH2_EN)), VPIF_CH2_CTRL);
+ } else {
+ regw((regr(VPIF_CH2_CTRL) & (~VPIF_CH2_CLK_EN)), VPIF_CH2_CTRL);
+ regw((regr(VPIF_CH2_CTRL) & (~VPIF_CH2_EN)), VPIF_CH2_CTRL);
+ }
+}
+
+/* inline function to enable/disable channel3 */
+static inline void enable_channel3(int enable)
+{
+ if (enable) {
+ regw((regr(VPIF_CH3_CTRL) | (VPIF_CH3_CLK_EN)), VPIF_CH3_CTRL);
+ regw((regr(VPIF_CH3_CTRL) | (VPIF_CH3_EN)), VPIF_CH3_CTRL);
+ } else {
+ regw((regr(VPIF_CH3_CTRL) & (~VPIF_CH3_CLK_EN)), VPIF_CH3_CTRL);
+ regw((regr(VPIF_CH3_CTRL) & (~VPIF_CH3_EN)), VPIF_CH3_CTRL);
+ }
+}
+
+/* inline function to enable interrupt for channel2 */
+static inline void channel2_intr_enable(int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpif_lock, flags);
+
+ if (enable) {
+ regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+ regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH2), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH2),
+ VPIF_INTEN_SET);
+ } else {
+ regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH2)), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH2),
+ VPIF_INTEN_SET);
+ }
+ spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to enable interrupt for channel3 */
+static inline void channel3_intr_enable(int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpif_lock, flags);
+
+ if (enable) {
+ regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+
+ regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH3), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH3),
+ VPIF_INTEN_SET);
+ } else {
+ regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH3)), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH3),
+ VPIF_INTEN_SET);
+ }
+ spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to enable raw vbi data for channel2 */
+static inline void channel2_raw_enable(int enable, u8 index)
+{
+ u32 mask;
+
+ if (1 == index)
+ mask = VPIF_CH_VANC_EN_BIT;
+ else
+ mask = VPIF_CH_HANC_EN_BIT;
+
+ if (enable)
+ vpif_set_bit(VPIF_CH2_CTRL, mask);
+ else
+ vpif_clr_bit(VPIF_CH2_CTRL, mask);
+}
+
+/* inline function to enable raw vbi data for channel3*/
+static inline void channel3_raw_enable(int enable, u8 index)
+{
+ u32 mask;
+
+ if (1 == index)
+ mask = VPIF_CH_VANC_EN_BIT;
+ else
+ mask = VPIF_CH_HANC_EN_BIT;
+
+ if (enable)
+ vpif_set_bit(VPIF_CH3_CTRL, mask);
+ else
+ vpif_clr_bit(VPIF_CH3_CTRL, mask);
+}
+
+/* function to enable clipping (for both active and blanking regions) on ch 2 */
+static inline void channel2_clipping_enable(int enable)
+{
+ if (enable) {
+ vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN);
+ vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN);
+ } else {
+ vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN);
+ vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN);
+ }
+}
+
+/* function to enable clipping (for both active and blanking regions) on ch 3 */
+static inline void channel3_clipping_enable(int enable)
+{
+ if (enable) {
+ vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN);
+ vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN);
+ } else {
+ vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN);
+ vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN);
+ }
+}
+
+/* inline function to set buffer addresses in case of Y/C non mux mode */
+static inline void ch2_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH3_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH3_BTM_STRT_ADD_CHROMA);
+}
+
+/* inline function to set buffer addresses in VPIF registers for video data */
+static inline void ch2_set_videobuf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH2_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH2_BTM_STRT_ADD_CHROMA);
+}
+
+static inline void ch3_set_videobuf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH3_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH3_BTM_STRT_ADD_CHROMA);
+}
+
+/* inline function to set buffer addresses in VPIF registers for vbi data */
+static inline void ch2_set_vbi_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_VANC);
+ regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_VANC);
+}
+
+static inline void ch3_set_vbi_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_VANC);
+ regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_VANC);
+}
+
+static inline int vpif_intr_status(int channel)
+{
+ int status = 0;
+ int mask;
+
+ if (channel < 0 || channel > 3)
+ return 0;
+
+ mask = 1 << channel;
+ status = regr(VPIF_STATUS) & mask;
+ regw(status, VPIF_STATUS_CLR);
+
+ return status;
+}
+
+#define VPIF_MAX_NAME (30)
+
+/* This structure will store size parameters as per the mode selected by user */
+struct vpif_channel_config_params {
+ char name[VPIF_MAX_NAME]; /* Name of the mode */
+ u16 width; /* Indicates width of the image */
+ u16 height; /* Indicates height of the image */
+ u8 frm_fmt; /* Interlaced (0) or progressive (1) */
+ u8 ycmux_mode; /* This mode requires one (0) or two (1)
+ channels */
+ u16 eav2sav; /* length of eav 2 sav */
+ u16 sav2eav; /* length of sav 2 eav */
+ u16 l1, l3, l5, l7, l9, l11; /* Other parameter configurations */
+ u16 vsize; /* Vertical size of the image */
+ u8 capture_format; /* Indicates whether capture format
+ * is in BT or in CCD/CMOS */
+ u8 vbi_supported; /* Indicates whether this mode
+ * supports capturing vbi or not */
+ u8 hd_sd; /* HDTV (1) or SDTV (0) format */
+ v4l2_std_id stdid; /* SDTV format */
+ struct v4l2_dv_timings dv_timings; /* HDTV format */
+};
+
+extern const unsigned int vpif_ch_params_count;
+extern const struct vpif_channel_config_params vpif_ch_params[];
+
+struct vpif_video_params;
+struct vpif_params;
+struct vpif_vbi_params;
+
+int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id);
+void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams,
+ u8 channel_id);
+int vpif_channel_getfid(u8 channel_id);
+
+enum data_size {
+ _8BITS = 0,
+ _10BITS,
+ _12BITS,
+};
+
+/* Structure for vpif parameters for raw vbi data */
+struct vpif_vbi_params {
+ __u32 hstart0; /* Horizontal start of raw vbi data for first field */
+ __u32 vstart0; /* Vertical start of raw vbi data for first field */
+ __u32 hsize0; /* Horizontal size of raw vbi data for first field */
+ __u32 vsize0; /* Vertical size of raw vbi data for first field */
+ __u32 hstart1; /* Horizontal start of raw vbi data for second field */
+ __u32 vstart1; /* Vertical start of raw vbi data for second field */
+ __u32 hsize1; /* Horizontal size of raw vbi data for second field */
+ __u32 vsize1; /* Vertical size of raw vbi data for second field */
+};
+
+/* structure for vpif parameters */
+struct vpif_video_params {
+ __u8 storage_mode; /* Indicates field or frame mode */
+ unsigned long hpitch;
+ v4l2_std_id stdid;
+};
+
+struct vpif_params {
+ struct vpif_interface iface;
+ struct vpif_video_params video_params;
+ struct vpif_channel_config_params std_info;
+ union param {
+ struct vpif_vbi_params vbi_params;
+ enum data_size data_sz;
+ } params;
+};
+
+#endif /* End of #ifndef VPIF_H */
+
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
new file mode 100644
index 00000000000..1e4ec697fb1
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -0,0 +1,1809 @@
+/*
+ * Copyright (C) 2009 Texas Instruments Inc
+ * Copyright (C) 2014 Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * TODO : add support for VBI & HBI data service
+ * add static buffer allocation
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ioctl.h>
+
+#include "vpif.h"
+#include "vpif_capture.h"
+
+MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_CAPTURE_VERSION);
+
+#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
+#define vpif_dbg(level, debug, fmt, arg...) \
+ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg)
+
+static int debug = 1;
+static u32 ch0_numbuffers = 3;
+static u32 ch1_numbuffers = 3;
+static u32 ch0_bufsize = 1920 * 1080 * 2;
+static u32 ch1_bufsize = 720 * 576 * 2;
+
+module_param(debug, int, 0644);
+module_param(ch0_numbuffers, uint, S_IRUGO);
+module_param(ch1_numbuffers, uint, S_IRUGO);
+module_param(ch0_bufsize, uint, S_IRUGO);
+module_param(ch1_bufsize, uint, S_IRUGO);
+
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+MODULE_PARM_DESC(ch2_numbuffers, "Channel0 buffer count (default:3)");
+MODULE_PARM_DESC(ch3_numbuffers, "Channel1 buffer count (default:3)");
+MODULE_PARM_DESC(ch2_bufsize, "Channel0 buffer size (default:1920 x 1080 x 2)");
+MODULE_PARM_DESC(ch3_bufsize, "Channel1 buffer size (default:720 x 576 x 2)");
+
+static struct vpif_config_params config_params = {
+ .min_numbuffers = 3,
+ .numbuffers[0] = 3,
+ .numbuffers[1] = 3,
+ .min_bufsize[0] = 720 * 480 * 2,
+ .min_bufsize[1] = 720 * 480 * 2,
+ .channel_bufsize[0] = 1920 * 1080 * 2,
+ .channel_bufsize[1] = 720 * 576 * 2,
+};
+
+#define VPIF_DRIVER_NAME "vpif_capture"
+
+/* global variables */
+static struct vpif_device vpif_obj = { {NULL} };
+static struct device *vpif_dev;
+static void vpif_calculate_offsets(struct channel_obj *ch);
+static void vpif_config_addr(struct channel_obj *ch, int muxmode);
+
+static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} };
+
+/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */
+static int ycmux_mode;
+
+static inline struct vpif_cap_buffer *to_vpif_buffer(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct vpif_cap_buffer, vb);
+}
+
+/**
+ * vpif_buffer_prepare : callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into physical address
+ */
+static int vpif_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct channel_obj *ch = vb2_get_drv_priv(q);
+ struct common_obj *common;
+ unsigned long addr;
+
+ vpif_dbg(2, debug, "vpif_buffer_prepare\n");
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage);
+ if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ vb->v4l2_buf.field = common->fmt.fmt.pix.field;
+
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (!IS_ALIGNED((addr + common->ytop_off), 8) ||
+ !IS_ALIGNED((addr + common->ybtm_off), 8) ||
+ !IS_ALIGNED((addr + common->ctop_off), 8) ||
+ !IS_ALIGNED((addr + common->cbtm_off), 8)) {
+ vpif_dbg(1, debug, "offset is not aligned\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpif_buffer_queue_setup : Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @fmt: v4l2 format
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes[]: contains the size (in bytes) of each plane.
+ * @alloc_ctxs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer count and buffer size
+ */
+static int vpif_buffer_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ vpif_dbg(2, debug, "vpif_buffer_setup\n");
+
+ if (fmt && fmt->fmt.pix.sizeimage < common->fmt.fmt.pix.sizeimage)
+ return -EINVAL;
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ *nplanes = 1;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : common->fmt.fmt.pix.sizeimage;
+ alloc_ctxs[0] = common->alloc_ctx;
+
+ /* Calculate the offset for Y and C data in the buffer */
+ vpif_calculate_offsets(ch);
+
+ return 0;
+}
+
+/**
+ * vpif_buffer_queue : Callback function to add buffer to DMA queue
+ * @vb: ptr to vb2_buffer
+ */
+static void vpif_buffer_queue(struct vb2_buffer *vb)
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpif_cap_buffer *buf = to_vpif_buffer(vb);
+ struct common_obj *common;
+ unsigned long flags;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ vpif_dbg(2, debug, "vpif_buffer_queue\n");
+
+ spin_lock_irqsave(&common->irqlock, flags);
+ /* add the buffer to the DMA queue */
+ list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+/**
+ * vpif_start_streaming : Starts the DMA engine for streaming
+ * @vb: ptr to vb2_buffer
+ * @count: number of buffers
+ */
+static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpif_capture_config *vpif_config_data =
+ vpif_dev->platform_data;
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpif = &ch->vpifparams;
+ struct vpif_cap_buffer *buf, *tmp;
+ unsigned long addr, flags;
+ int ret;
+
+ spin_lock_irqsave(&common->irqlock, flags);
+
+ /* Initialize field_id */
+ ch->field_id = 0;
+
+ /* configure 1 or 2 channel mode */
+ if (vpif_config_data->setup_input_channel_mode) {
+ ret = vpif_config_data->
+ setup_input_channel_mode(vpif->std_info.ycmux_mode);
+ if (ret < 0) {
+ vpif_dbg(1, debug, "can't set vpif channel mode\n");
+ goto err;
+ }
+ }
+
+ ret = v4l2_subdev_call(ch->sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) {
+ vpif_dbg(1, debug, "stream on failed in subdev\n");
+ goto err;
+ }
+
+ /* Call vpif_set_params function to set the parameters and addresses */
+ ret = vpif_set_video_params(vpif, ch->channel_id);
+ if (ret < 0) {
+ vpif_dbg(1, debug, "can't set video params\n");
+ goto err;
+ }
+
+ ycmux_mode = ret;
+ vpif_config_addr(ch, ret);
+
+ /* Get the next frame from the buffer queue */
+ common->cur_frm = common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_cap_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+ /* Mark state of the current frame to active */
+ common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+
+ addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+
+ common->set_addr(addr + common->ytop_off,
+ addr + common->ybtm_off,
+ addr + common->ctop_off,
+ addr + common->cbtm_off);
+
+ /**
+ * Set interrupt for both the fields in VPIF Register enable channel in
+ * VPIF register
+ */
+ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
+ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+ channel0_intr_assert();
+ channel0_intr_enable(1);
+ enable_channel0(1);
+ }
+ if (VPIF_CHANNEL1_VIDEO == ch->channel_id ||
+ ycmux_mode == 2) {
+ channel1_intr_assert();
+ channel1_intr_enable(1);
+ enable_channel1(1);
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&common->irqlock, flags);
+
+ return ret;
+}
+
+/**
+ * vpif_stop_streaming : Stop the DMA engine
+ * @vq: ptr to vb2_queue
+ *
+ * This callback stops the DMA engine and any remaining buffers
+ * in the DMA queue are released.
+ */
+static void vpif_stop_streaming(struct vb2_queue *vq)
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common;
+ unsigned long flags;
+ int ret;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* Disable channel as per its device type and channel id */
+ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+ enable_channel0(0);
+ channel0_intr_enable(0);
+ }
+ if (VPIF_CHANNEL1_VIDEO == ch->channel_id ||
+ ycmux_mode == 2) {
+ enable_channel1(0);
+ channel1_intr_enable(0);
+ }
+
+ ycmux_mode = 0;
+
+ ret = v4l2_subdev_call(ch->sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ vpif_dbg(1, debug, "stream off failed in subdev\n");
+
+ /* release all active buffers */
+ spin_lock_irqsave(&common->irqlock, flags);
+ if (common->cur_frm == common->next_frm) {
+ vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ } else {
+ if (common->cur_frm != NULL)
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ if (common->next_frm != NULL)
+ vb2_buffer_done(&common->next_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&common->dma_queue)) {
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_cap_buffer, list);
+ list_del(&common->next_frm->list);
+ vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+static struct vb2_ops video_qops = {
+ .queue_setup = vpif_buffer_queue_setup,
+ .buf_prepare = vpif_buffer_prepare,
+ .start_streaming = vpif_start_streaming,
+ .stop_streaming = vpif_stop_streaming,
+ .buf_queue = vpif_buffer_queue,
+};
+
+/**
+ * vpif_process_buffer_complete: process a completed buffer
+ * @common: ptr to common channel object
+ *
+ * This function time stamp the buffer and mark it as DONE. It also
+ * wake up any process waiting on the QUEUE and set the next buffer
+ * as current
+ */
+static void vpif_process_buffer_complete(struct common_obj *common)
+{
+ v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_DONE);
+ /* Make curFrm pointing to nextFrm */
+ common->cur_frm = common->next_frm;
+}
+
+/**
+ * vpif_schedule_next_buffer: set next buffer address for capture
+ * @common : ptr to common channel object
+ *
+ * This function will get next buffer from the dma queue and
+ * set the buffer address in the vpif register for capture.
+ * the buffer is marked active
+ */
+static void vpif_schedule_next_buffer(struct common_obj *common)
+{
+ unsigned long addr = 0;
+
+ spin_lock(&common->irqlock);
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_cap_buffer, list);
+ /* Remove that buffer from the buffer queue */
+ list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
+ common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
+
+ /* Set top and bottom field addresses in VPIF registers */
+ common->set_addr(addr + common->ytop_off,
+ addr + common->ybtm_off,
+ addr + common->ctop_off,
+ addr + common->cbtm_off);
+}
+
+/**
+ * vpif_channel_isr : ISR handler for vpif capture
+ * @irq: irq number
+ * @dev_id: dev_id ptr
+ *
+ * It changes status of the captured buffer, takes next buffer from the queue
+ * and sets its address in VPIF registers
+ */
+static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
+{
+ struct vpif_device *dev = &vpif_obj;
+ struct common_obj *common;
+ struct channel_obj *ch;
+ enum v4l2_field field;
+ int channel_id = 0;
+ int fid = -1, i;
+
+ channel_id = *(int *)(dev_id);
+ if (!vpif_intr_status(channel_id))
+ return IRQ_NONE;
+
+ ch = dev->dev[channel_id];
+
+ field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field;
+
+ for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) {
+ common = &ch->common[i];
+ /* skip If streaming is not started in this channel */
+ /* Check the field format */
+ if (1 == ch->vpifparams.std_info.frm_fmt) {
+ /* Progressive mode */
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
+ continue;
+ }
+ spin_unlock(&common->irqlock);
+
+ if (!channel_first_int[i][channel_id])
+ vpif_process_buffer_complete(common);
+
+ channel_first_int[i][channel_id] = 0;
+
+ vpif_schedule_next_buffer(common);
+
+
+ channel_first_int[i][channel_id] = 0;
+ } else {
+ /**
+ * Interlaced mode. If it is first interrupt, ignore
+ * it
+ */
+ if (channel_first_int[i][channel_id]) {
+ channel_first_int[i][channel_id] = 0;
+ continue;
+ }
+ if (0 == i) {
+ ch->field_id ^= 1;
+ /* Get field id from VPIF registers */
+ fid = vpif_channel_getfid(ch->channel_id);
+ if (fid != ch->field_id) {
+ /**
+ * If field id does not match stored
+ * field id, make them in sync
+ */
+ if (0 == fid)
+ ch->field_id = fid;
+ return IRQ_HANDLED;
+ }
+ }
+ /* device field id and local field id are in sync */
+ if (0 == fid) {
+ /* this is even field */
+ if (common->cur_frm == common->next_frm)
+ continue;
+
+ /* mark the current buffer as done */
+ vpif_process_buffer_complete(common);
+ } else if (1 == fid) {
+ /* odd field */
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue) ||
+ (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
+ continue;
+ }
+ spin_unlock(&common->irqlock);
+
+ vpif_schedule_next_buffer(common);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * vpif_update_std_info() - update standard related info
+ * @ch: ptr to channel object
+ *
+ * For a given standard selected by application, update values
+ * in the device data structures
+ */
+static int vpif_update_std_info(struct channel_obj *ch)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ const struct vpif_channel_config_params *config;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct video_obj *vid_ch = &ch->video;
+ int index;
+
+ vpif_dbg(2, debug, "vpif_update_std_info\n");
+
+ for (index = 0; index < vpif_ch_params_count; index++) {
+ config = &vpif_ch_params[index];
+ if (config->hd_sd == 0) {
+ vpif_dbg(2, debug, "SD format\n");
+ if (config->stdid & vid_ch->stdid) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ } else {
+ vpif_dbg(2, debug, "HD format\n");
+ if (!memcmp(&config->dv_timings, &vid_ch->dv_timings,
+ sizeof(vid_ch->dv_timings))) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ }
+ }
+
+ /* standard not found */
+ if (index == vpif_ch_params_count)
+ return -EINVAL;
+
+ common->fmt.fmt.pix.width = std_info->width;
+ common->width = std_info->width;
+ common->fmt.fmt.pix.height = std_info->height;
+ common->height = std_info->height;
+ common->fmt.fmt.pix.bytesperline = std_info->width;
+ vpifparams->video_params.hpitch = std_info->width;
+ vpifparams->video_params.storage_mode = std_info->frm_fmt;
+
+ return 0;
+}
+
+/**
+ * vpif_calculate_offsets : This function calculates buffers offsets
+ * @ch : ptr to channel object
+ *
+ * This function calculates buffer offsets for Y and C in the top and
+ * bottom field
+ */
+static void vpif_calculate_offsets(struct channel_obj *ch)
+{
+ unsigned int hpitch, vpitch, sizeimage;
+ struct video_obj *vid_ch = &(ch->video);
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ enum v4l2_field field = common->fmt.fmt.pix.field;
+
+ vpif_dbg(2, debug, "vpif_calculate_offsets\n");
+
+ if (V4L2_FIELD_ANY == field) {
+ if (vpifparams->std_info.frm_fmt)
+ vid_ch->buf_field = V4L2_FIELD_NONE;
+ else
+ vid_ch->buf_field = V4L2_FIELD_INTERLACED;
+ } else
+ vid_ch->buf_field = common->fmt.fmt.pix.field;
+
+ sizeimage = common->fmt.fmt.pix.sizeimage;
+
+ hpitch = common->fmt.fmt.pix.bytesperline;
+ vpitch = sizeimage / (hpitch * 2);
+
+ if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+ (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
+ /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
+ common->ytop_off = 0;
+ common->ybtm_off = hpitch;
+ common->ctop_off = sizeimage / 2;
+ common->cbtm_off = sizeimage / 2 + hpitch;
+ } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) {
+ /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
+ common->ytop_off = 0;
+ common->ybtm_off = sizeimage / 4;
+ common->ctop_off = sizeimage / 2;
+ common->cbtm_off = common->ctop_off + sizeimage / 4;
+ } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) {
+ /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
+ common->ybtm_off = 0;
+ common->ytop_off = sizeimage / 4;
+ common->cbtm_off = sizeimage / 2;
+ common->ctop_off = common->cbtm_off + sizeimage / 4;
+ }
+ if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+ (V4L2_FIELD_INTERLACED == vid_ch->buf_field))
+ vpifparams->video_params.storage_mode = 1;
+ else
+ vpifparams->video_params.storage_mode = 0;
+
+ if (1 == vpifparams->std_info.frm_fmt)
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline;
+ else {
+ if ((field == V4L2_FIELD_ANY)
+ || (field == V4L2_FIELD_INTERLACED))
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline * 2;
+ else
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline;
+ }
+
+ ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid;
+}
+
+/**
+ * vpif_config_format: configure default frame format in the device
+ * ch : ptr to channel object
+ */
+static void vpif_config_format(struct channel_obj *ch)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+ vpif_dbg(2, debug, "vpif_config_format\n");
+
+ common->fmt.fmt.pix.field = V4L2_FIELD_ANY;
+ common->fmt.fmt.pix.sizeimage
+ = config_params.channel_bufsize[ch->channel_id];
+
+ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER)
+ common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;
+ else
+ common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
+ common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+}
+
+/**
+ * vpif_get_default_field() - Get default field type based on interface
+ * @vpif_params - ptr to vpif params
+ */
+static inline enum v4l2_field vpif_get_default_field(
+ struct vpif_interface *iface)
+{
+ return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE :
+ V4L2_FIELD_INTERLACED;
+}
+
+/**
+ * vpif_check_format() - check given pixel format for compatibility
+ * @ch - channel ptr
+ * @pixfmt - Given pixel format
+ * @update - update the values as per hardware requirement
+ *
+ * Check the application pixel format for S_FMT and update the input
+ * values as per hardware limits for TRY_FMT. The default pixel and
+ * field format is selected based on interface type.
+ */
+static int vpif_check_format(struct channel_obj *ch,
+ struct v4l2_pix_format *pixfmt,
+ int update)
+{
+ struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
+ struct vpif_params *vpif_params = &ch->vpifparams;
+ enum v4l2_field field = pixfmt->field;
+ u32 sizeimage, hpitch, vpitch;
+ int ret = -EINVAL;
+
+ vpif_dbg(2, debug, "vpif_check_format\n");
+ /**
+ * first check for the pixel format. If if_type is Raw bayer,
+ * only V4L2_PIX_FMT_SBGGR8 format is supported. Otherwise only
+ * V4L2_PIX_FMT_YUV422P is supported
+ */
+ if (vpif_params->iface.if_type == VPIF_IF_RAW_BAYER) {
+ if (pixfmt->pixelformat != V4L2_PIX_FMT_SBGGR8) {
+ if (!update) {
+ vpif_dbg(2, debug, "invalid pix format\n");
+ goto exit;
+ }
+ pixfmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
+ }
+ } else {
+ if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) {
+ if (!update) {
+ vpif_dbg(2, debug, "invalid pixel format\n");
+ goto exit;
+ }
+ pixfmt->pixelformat = V4L2_PIX_FMT_YUV422P;
+ }
+ }
+
+ if (!(VPIF_VALID_FIELD(field))) {
+ if (!update) {
+ vpif_dbg(2, debug, "invalid field format\n");
+ goto exit;
+ }
+ /**
+ * By default use FIELD_NONE for RAW Bayer capture
+ * and FIELD_INTERLACED for other interfaces
+ */
+ field = vpif_get_default_field(&vpif_params->iface);
+ } else if (field == V4L2_FIELD_ANY)
+ /* unsupported field. Use default */
+ field = vpif_get_default_field(&vpif_params->iface);
+
+ /* validate the hpitch */
+ hpitch = pixfmt->bytesperline;
+ if (hpitch < vpif_params->std_info.width) {
+ if (!update) {
+ vpif_dbg(2, debug, "invalid hpitch\n");
+ goto exit;
+ }
+ hpitch = vpif_params->std_info.width;
+ }
+
+ sizeimage = pixfmt->sizeimage;
+
+ vpitch = sizeimage / (hpitch * 2);
+
+ /* validate the vpitch */
+ if (vpitch < vpif_params->std_info.height) {
+ if (!update) {
+ vpif_dbg(2, debug, "Invalid vpitch\n");
+ goto exit;
+ }
+ vpitch = vpif_params->std_info.height;
+ }
+
+ /* Check for 8 byte alignment */
+ if (!ALIGN(hpitch, 8)) {
+ if (!update) {
+ vpif_dbg(2, debug, "invalid pitch alignment\n");
+ goto exit;
+ }
+ /* adjust to next 8 byte boundary */
+ hpitch = (((hpitch + 7) / 8) * 8);
+ }
+ /* if update is set, modify the bytesperline and sizeimage */
+ if (update) {
+ pixfmt->bytesperline = hpitch;
+ pixfmt->sizeimage = hpitch * vpitch * 2;
+ }
+ /**
+ * Image width and height is always based on current standard width and
+ * height
+ */
+ pixfmt->width = common->fmt.fmt.pix.width;
+ pixfmt->height = common->fmt.fmt.pix.height;
+ return 0;
+exit:
+ return ret;
+}
+
+/**
+ * vpif_config_addr() - function to configure buffer address in vpif
+ * @ch - channel ptr
+ * @muxmode - channel mux mode
+ */
+static void vpif_config_addr(struct channel_obj *ch, int muxmode)
+{
+ struct common_obj *common;
+
+ vpif_dbg(2, debug, "vpif_config_addr\n");
+
+ common = &(ch->common[VPIF_VIDEO_INDEX]);
+
+ if (VPIF_CHANNEL1_VIDEO == ch->channel_id)
+ common->set_addr = ch1_set_videobuf_addr;
+ else if (2 == muxmode)
+ common->set_addr = ch0_set_videobuf_addr_yc_nmux;
+ else
+ common->set_addr = ch0_set_videobuf_addr;
+}
+
+/**
+ * vpif_input_to_subdev() - Maps input to sub device
+ * @vpif_cfg - global config ptr
+ * @chan_cfg - channel config ptr
+ * @input_index - Given input index from application
+ *
+ * lookup the sub device information for a given input index.
+ * we report all the inputs to application. inputs table also
+ * has sub device name for the each input
+ */
+static int vpif_input_to_subdev(
+ struct vpif_capture_config *vpif_cfg,
+ struct vpif_capture_chan_config *chan_cfg,
+ int input_index)
+{
+ struct vpif_subdev_info *subdev_info;
+ const char *subdev_name;
+ int i;
+
+ vpif_dbg(2, debug, "vpif_input_to_subdev\n");
+
+ subdev_name = chan_cfg->inputs[input_index].subdev_name;
+ if (subdev_name == NULL)
+ return -1;
+
+ /* loop through the sub device list to get the sub device info */
+ for (i = 0; i < vpif_cfg->subdev_count; i++) {
+ subdev_info = &vpif_cfg->subdev_info[i];
+ if (!strcmp(subdev_info->name, subdev_name))
+ return i;
+ }
+ return -1;
+}
+
+/**
+ * vpif_set_input() - Select an input
+ * @vpif_cfg - global config ptr
+ * @ch - channel
+ * @_index - Given input index from application
+ *
+ * Select the given input.
+ */
+static int vpif_set_input(
+ struct vpif_capture_config *vpif_cfg,
+ struct channel_obj *ch,
+ int index)
+{
+ struct vpif_capture_chan_config *chan_cfg =
+ &vpif_cfg->chan_config[ch->channel_id];
+ struct vpif_subdev_info *subdev_info = NULL;
+ struct v4l2_subdev *sd = NULL;
+ u32 input = 0, output = 0;
+ int sd_index;
+ int ret;
+
+ sd_index = vpif_input_to_subdev(vpif_cfg, chan_cfg, index);
+ if (sd_index >= 0) {
+ sd = vpif_obj.sd[sd_index];
+ subdev_info = &vpif_cfg->subdev_info[sd_index];
+ }
+
+ /* first setup input path from sub device to vpif */
+ if (sd && vpif_cfg->setup_input_path) {
+ ret = vpif_cfg->setup_input_path(ch->channel_id,
+ subdev_info->name);
+ if (ret < 0) {
+ vpif_dbg(1, debug, "couldn't setup input path for the" \
+ " sub device %s, for input index %d\n",
+ subdev_info->name, index);
+ return ret;
+ }
+ }
+
+ if (sd) {
+ input = chan_cfg->inputs[index].input_route;
+ output = chan_cfg->inputs[index].output_route;
+ ret = v4l2_subdev_call(sd, video, s_routing,
+ input, output, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ vpif_dbg(1, debug, "Failed to set input\n");
+ return ret;
+ }
+ }
+ ch->input_idx = index;
+ ch->sd = sd;
+ /* copy interface parameters to vpif */
+ ch->vpifparams.iface = chan_cfg->vpif_if;
+
+ /* update tvnorms from the sub device input info */
+ ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
+ return 0;
+}
+
+/**
+ * vpif_querystd() - querystd handler
+ * @file: file ptr
+ * @priv: file handle
+ * @std_id: ptr to std id
+ *
+ * This function is called to detect standard at the selected input
+ */
+static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ int ret = 0;
+
+ vpif_dbg(2, debug, "vpif_querystd\n");
+
+ /* Call querystd function of decoder device */
+ ret = v4l2_subdev_call(ch->sd, video, querystd, std_id);
+
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ return -ENODATA;
+ if (ret) {
+ vpif_dbg(1, debug, "Failed to query standard for sub devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vpif_g_std() - get STD handler
+ * @file: file ptr
+ * @priv: file handle
+ * @std_id: ptr to std id
+ */
+static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+
+ vpif_dbg(2, debug, "vpif_g_std\n");
+
+ if (config->chan_config[ch->channel_id].inputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_STD)
+ return -ENODATA;
+
+ *std = ch->video.stdid;
+ return 0;
+}
+
+/**
+ * vpif_s_std() - set STD handler
+ * @file: file ptr
+ * @priv: file handle
+ * @std_id: ptr to std id
+ */
+static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+ int ret;
+
+ vpif_dbg(2, debug, "vpif_s_std\n");
+
+ if (config->chan_config[ch->channel_id].inputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_STD)
+ return -ENODATA;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ /* Call encoder subdevice function to set the standard */
+ ch->video.stdid = std_id;
+ memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
+
+ /* Get the information about the standard */
+ if (vpif_update_std_info(ch)) {
+ vpif_err("Error getting the standard info\n");
+ return -EINVAL;
+ }
+
+ /* Configure the default format information */
+ vpif_config_format(ch);
+
+ /* set standard in the sub device */
+ ret = v4l2_subdev_call(ch->sd, video, s_std, std_id);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) {
+ vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * vpif_enum_input() - ENUMINPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @input: ptr to input structure
+ */
+static int vpif_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_capture_chan_config *chan_cfg;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+
+ if (input->index >= chan_cfg->input_count) {
+ vpif_dbg(1, debug, "Invalid input index\n");
+ return -EINVAL;
+ }
+
+ memcpy(input, &chan_cfg->inputs[input->index].input,
+ sizeof(*input));
+ return 0;
+}
+
+/**
+ * vpif_g_input() - Get INPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @index: ptr to input index
+ */
+static int vpif_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+
+ *index = ch->input_idx;
+ return 0;
+}
+
+/**
+ * vpif_s_input() - Set INPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @index: input index
+ */
+static int vpif_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_capture_chan_config *chan_cfg;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+
+ if (index >= chan_cfg->input_count)
+ return -EINVAL;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ return vpif_set_input(config, ch, index);
+}
+
+/**
+ * vpif_enum_fmt_vid_cap() - ENUM_FMT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @index: input index
+ */
+static int vpif_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+
+ if (fmt->index != 0) {
+ vpif_dbg(1, debug, "Invalid format index\n");
+ return -EINVAL;
+ }
+
+ /* Fill in the information about format */
+ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) {
+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ strcpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb");
+ fmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
+ } else {
+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ strcpy(fmt->description, "YCbCr4:2:2 YC Planar");
+ fmt->pixelformat = V4L2_PIX_FMT_YUV422P;
+ }
+ return 0;
+}
+
+/**
+ * vpif_try_fmt_vid_cap() - TRY_FMT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to v4l2 format structure
+ */
+static int vpif_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+ return vpif_check_format(ch, pixfmt, 1);
+}
+
+
+/**
+ * vpif_g_fmt_vid_cap() - Set INPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to v4l2 format structure
+ */
+static int vpif_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* Check the validity of the buffer type */
+ if (common->fmt.type != fmt->type)
+ return -EINVAL;
+
+ /* Fill in the information about format */
+ *fmt = common->fmt;
+ return 0;
+}
+
+/**
+ * vpif_s_fmt_vid_cap() - Set FMT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to v4l2 format structure
+ */
+static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct v4l2_pix_format *pixfmt;
+ int ret = 0;
+
+ vpif_dbg(2, debug, "%s\n", __func__);
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ pixfmt = &fmt->fmt.pix;
+ /* Check for valid field format */
+ ret = vpif_check_format(ch, pixfmt, 0);
+
+ if (ret)
+ return ret;
+ /* store the format in the channel object */
+ common->fmt = *fmt;
+ return 0;
+}
+
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
+static int vpif_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ strlcpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vpif_dev));
+ strlcpy(cap->card, config->card_name, sizeof(cap->card));
+
+ return 0;
+}
+
+/**
+ * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: input timings
+ */
+static int
+vpif_enum_dv_timings(struct file *file, void *priv,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+ int ret;
+
+ if (config->chan_config[ch->channel_id].inputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ timings->pad = 0;
+
+ ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ return -EINVAL;
+
+ return ret;
+}
+
+/**
+ * vpif_query_dv_timings() - QUERY_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: input timings
+ */
+static int
+vpif_query_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+ int ret;
+
+ if (config->chan_config[ch->channel_id].inputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ return -ENODATA;
+
+ return ret;
+}
+
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt;
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+ int ret;
+
+ if (config->chan_config[ch->channel_id].inputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ if (timings->type != V4L2_DV_BT_656_1120) {
+ vpif_dbg(2, debug, "Timing type not defined\n");
+ return -EINVAL;
+ }
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ /* Configure subdevice timings, if any */
+ ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ ret = 0;
+ if (ret < 0) {
+ vpif_dbg(2, debug, "Error setting custom DV timings\n");
+ return ret;
+ }
+
+ if (!(timings->bt.width && timings->bt.height &&
+ (timings->bt.hbackporch ||
+ timings->bt.hfrontporch ||
+ timings->bt.hsync) &&
+ timings->bt.vfrontporch &&
+ (timings->bt.vbackporch ||
+ timings->bt.vsync))) {
+ vpif_dbg(2, debug, "Timings for width, height, "
+ "horizontal back porch, horizontal sync, "
+ "horizontal front porch, vertical back porch, "
+ "vertical sync and vertical back porch "
+ "must be defined\n");
+ return -EINVAL;
+ }
+
+ vid_ch->dv_timings = *timings;
+
+ /* Configure video port timings */
+
+ std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8;
+ std_info->sav2eav = bt->width;
+
+ std_info->l1 = 1;
+ std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+ std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ if (bt->interlaced) {
+ if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+ std_info->l5 = std_info->vsize/2 -
+ (bt->vfrontporch - 1);
+ std_info->l7 = std_info->vsize/2 + 1;
+ std_info->l9 = std_info->l7 + bt->il_vsync +
+ bt->il_vbackporch + 1;
+ std_info->l11 = std_info->vsize -
+ (bt->il_vfrontporch - 1);
+ } else {
+ vpif_dbg(2, debug, "Required timing values for "
+ "interlaced BT format missing\n");
+ return -EINVAL;
+ }
+ } else {
+ std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+ }
+ strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
+ std_info->width = bt->width;
+ std_info->height = bt->height;
+ std_info->frm_fmt = bt->interlaced ? 0 : 1;
+ std_info->ycmux_mode = 0;
+ std_info->capture_format = 0;
+ std_info->vbi_supported = 0;
+ std_info->hd_sd = 1;
+ std_info->stdid = 0;
+
+ vid_ch->stdid = 0;
+ return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct video_obj *vid_ch = &ch->video;
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+
+ if (config->chan_config[ch->channel_id].inputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ *timings = vid_ch->dv_timings;
+
+ return 0;
+}
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+ /* status for sub devices */
+ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
+/* vpif capture ioctl operations */
+static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
+ .vidioc_querycap = vpif_querycap,
+ .vidioc_enum_fmt_vid_cap = vpif_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vpif_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vpif_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vpif_try_fmt_vid_cap,
+
+ .vidioc_enum_input = vpif_enum_input,
+ .vidioc_s_input = vpif_s_input,
+ .vidioc_g_input = vpif_g_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_querystd = vpif_querystd,
+ .vidioc_s_std = vpif_s_std,
+ .vidioc_g_std = vpif_g_std,
+
+ .vidioc_enum_dv_timings = vpif_enum_dv_timings,
+ .vidioc_query_dv_timings = vpif_query_dv_timings,
+ .vidioc_s_dv_timings = vpif_s_dv_timings,
+ .vidioc_g_dv_timings = vpif_g_dv_timings,
+
+ .vidioc_log_status = vpif_log_status,
+};
+
+/* vpif file operations */
+static struct v4l2_file_operations vpif_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll
+};
+
+/**
+ * initialize_vpif() - Initialize vpif data structures
+ *
+ * Allocate memory for data structures and initialize them
+ */
+static int initialize_vpif(void)
+{
+ int err = 0, i, j;
+ int free_channel_objects_index;
+
+ /* Default number of buffers should be 3 */
+ if ((ch0_numbuffers > 0) &&
+ (ch0_numbuffers < config_params.min_numbuffers))
+ ch0_numbuffers = config_params.min_numbuffers;
+ if ((ch1_numbuffers > 0) &&
+ (ch1_numbuffers < config_params.min_numbuffers))
+ ch1_numbuffers = config_params.min_numbuffers;
+
+ /* Set buffer size to min buffers size if it is invalid */
+ if (ch0_bufsize < config_params.min_bufsize[VPIF_CHANNEL0_VIDEO])
+ ch0_bufsize =
+ config_params.min_bufsize[VPIF_CHANNEL0_VIDEO];
+ if (ch1_bufsize < config_params.min_bufsize[VPIF_CHANNEL1_VIDEO])
+ ch1_bufsize =
+ config_params.min_bufsize[VPIF_CHANNEL1_VIDEO];
+
+ config_params.numbuffers[VPIF_CHANNEL0_VIDEO] = ch0_numbuffers;
+ config_params.numbuffers[VPIF_CHANNEL1_VIDEO] = ch1_numbuffers;
+ if (ch0_numbuffers) {
+ config_params.channel_bufsize[VPIF_CHANNEL0_VIDEO]
+ = ch0_bufsize;
+ }
+ if (ch1_numbuffers) {
+ config_params.channel_bufsize[VPIF_CHANNEL1_VIDEO]
+ = ch1_bufsize;
+ }
+
+ /* Allocate memory for six channel objects */
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ vpif_obj.dev[i] =
+ kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL);
+ /* If memory allocation fails, return error */
+ if (!vpif_obj.dev[i]) {
+ free_channel_objects_index = i;
+ err = -ENOMEM;
+ goto vpif_init_free_channel_objects;
+ }
+ }
+ return 0;
+
+vpif_init_free_channel_objects:
+ for (j = 0; j < free_channel_objects_index; j++)
+ kfree(vpif_obj.dev[j]);
+ return err;
+}
+
+static int vpif_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ int i;
+
+ for (i = 0; i < vpif_obj.config->subdev_count; i++)
+ if (!strcmp(vpif_obj.config->subdev_info[i].name,
+ subdev->name)) {
+ vpif_obj.sd[i] = subdev;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vpif_probe_complete(void)
+{
+ struct common_obj *common;
+ struct video_device *vdev;
+ struct channel_obj *ch;
+ struct vb2_queue *q;
+ int i, j, err, k;
+
+ for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
+ ch = vpif_obj.dev[j];
+ ch->channel_id = j;
+ common = &(ch->common[VPIF_VIDEO_INDEX]);
+ spin_lock_init(&common->irqlock);
+ mutex_init(&common->lock);
+
+ /* select input 0 */
+ err = vpif_set_input(vpif_obj.config, ch, 0);
+ if (err)
+ goto probe_out;
+
+ /* Initialize vb2 queue */
+ q = &common->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = ch;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpif_cap_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &common->lock;
+
+ err = vb2_queue_init(q);
+ if (err) {
+ vpif_err("vpif_capture: vb2_queue_init() failed\n");
+ goto probe_out;
+ }
+
+ common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
+ if (IS_ERR(common->alloc_ctx)) {
+ vpif_err("Failed to get the context\n");
+ err = PTR_ERR(common->alloc_ctx);
+ goto probe_out;
+ }
+
+ INIT_LIST_HEAD(&common->dma_queue);
+
+ /* Initialize the video_device structure */
+ vdev = ch->video_dev;
+ strlcpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = &vpif_fops;
+ vdev->ioctl_ops = &vpif_ioctl_ops;
+ vdev->v4l2_dev = &vpif_obj.v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = q;
+ vdev->lock = &common->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags);
+ video_set_drvdata(ch->video_dev, ch);
+ err = video_register_device(vdev,
+ VFL_TYPE_GRABBER, (j ? 1 : 0));
+ if (err)
+ goto probe_out;
+ }
+
+ v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
+ return 0;
+
+probe_out:
+ for (k = 0; k < j; k++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[k];
+ common = &ch->common[k];
+ vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
+ /* Unregister video device */
+ video_unregister_device(ch->video_dev);
+ }
+ kfree(vpif_obj.sd);
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ ch = vpif_obj.dev[i];
+ /* Note: does nothing if ch->video_dev == NULL */
+ video_device_release(ch->video_dev);
+ }
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ return err;
+}
+
+static int vpif_async_complete(struct v4l2_async_notifier *notifier)
+{
+ return vpif_probe_complete();
+}
+
+/**
+ * vpif_probe : This function probes the vpif capture driver
+ * @pdev: platform device pointer
+ *
+ * This creates device entries by register itself to the V4L2 driver and
+ * initializes fields of each channel objects
+ */
+static __init int vpif_probe(struct platform_device *pdev)
+{
+ struct vpif_subdev_info *subdevdata;
+ int i, j, err;
+ int res_idx = 0;
+ struct i2c_adapter *i2c_adap;
+ struct channel_obj *ch;
+ struct video_device *vfd;
+ struct resource *res;
+ int subdev_count;
+
+ vpif_dev = &pdev->dev;
+
+ err = initialize_vpif();
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error initializing vpif\n");
+ return err;
+ }
+
+ err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
+ return err;
+ }
+
+ while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
+ err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr,
+ IRQF_SHARED, VPIF_DRIVER_NAME,
+ (void *)(&vpif_obj.dev[res_idx]->
+ channel_id));
+ if (err) {
+ err = -EINVAL;
+ goto vpif_unregister;
+ }
+ res_idx++;
+ }
+
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ /* Allocate memory for video device */
+ vfd = video_device_alloc();
+ if (NULL == vfd) {
+ for (j = 0; j < i; j++) {
+ ch = vpif_obj.dev[j];
+ video_device_release(ch->video_dev);
+ }
+ err = -ENOMEM;
+ goto vpif_unregister;
+ }
+
+ /* Set video_dev to the video device */
+ ch->video_dev = vfd;
+ }
+
+ vpif_obj.config = pdev->dev.platform_data;
+
+ subdev_count = vpif_obj.config->subdev_count;
+ vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
+ GFP_KERNEL);
+ if (vpif_obj.sd == NULL) {
+ vpif_err("unable to allocate memory for subdevice pointers\n");
+ err = -ENOMEM;
+ goto vpif_sd_error;
+ }
+
+ if (!vpif_obj.config->asd_sizes) {
+ i2c_adap = i2c_get_adapter(1);
+ for (i = 0; i < subdev_count; i++) {
+ subdevdata = &vpif_obj.config->subdev_info[i];
+ vpif_obj.sd[i] =
+ v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+ i2c_adap,
+ &subdevdata->
+ board_info,
+ NULL);
+
+ if (!vpif_obj.sd[i]) {
+ vpif_err("Error registering v4l2 subdevice\n");
+ err = -ENODEV;
+ goto probe_subdev_out;
+ }
+ v4l2_info(&vpif_obj.v4l2_dev,
+ "registered sub device %s\n",
+ subdevdata->name);
+ }
+ vpif_probe_complete();
+ } else {
+ vpif_obj.notifier.subdevs = vpif_obj.config->asd;
+ vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
+ vpif_obj.notifier.bound = vpif_async_bound;
+ vpif_obj.notifier.complete = vpif_async_complete;
+ err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
+ &vpif_obj.notifier);
+ if (err) {
+ vpif_err("Error registering async notifier\n");
+ err = -EINVAL;
+ goto probe_subdev_out;
+ }
+ }
+
+ return 0;
+
+probe_subdev_out:
+ /* free sub devices memory */
+ kfree(vpif_obj.sd);
+
+vpif_sd_error:
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ ch = vpif_obj.dev[i];
+ /* Note: does nothing if ch->video_dev == NULL */
+ video_device_release(ch->video_dev);
+ }
+vpif_unregister:
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ return err;
+}
+
+/**
+ * vpif_remove() - driver remove handler
+ * @device: ptr to platform device structure
+ *
+ * The vidoe device is unregistered
+ */
+static int vpif_remove(struct platform_device *device)
+{
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ kfree(vpif_obj.sd);
+ /* un-register device */
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[i];
+ vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
+ /* Unregister video device */
+ video_unregister_device(ch->video_dev);
+ kfree(vpif_obj.dev[i]);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * vpif_suspend: vpif device suspend
+ */
+static int vpif_suspend(struct device *dev)
+{
+
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (!vb2_is_streaming(&common->buffer_queue))
+ continue;
+
+ mutex_lock(&common->lock);
+ /* Disable channel */
+ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
+ enable_channel0(0);
+ channel0_intr_enable(0);
+ }
+ if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
+ ycmux_mode == 2) {
+ enable_channel1(0);
+ channel1_intr_enable(0);
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+
+/*
+ * vpif_resume: vpif device suspend
+ */
+static int vpif_resume(struct device *dev)
+{
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (!vb2_is_streaming(&common->buffer_queue))
+ continue;
+
+ mutex_lock(&common->lock);
+ /* Enable channel */
+ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
+ enable_channel0(1);
+ channel0_intr_enable(1);
+ }
+ if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
+ ycmux_mode == 2) {
+ enable_channel1(1);
+ channel1_intr_enable(1);
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume);
+
+static __refdata struct platform_driver vpif_driver = {
+ .driver = {
+ .name = VPIF_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &vpif_pm_ops,
+ },
+ .probe = vpif_probe,
+ .remove = vpif_remove,
+};
+
+module_platform_driver(vpif_driver);
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
new file mode 100644
index 00000000000..1ee17824f48
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef VPIF_CAPTURE_H
+#define VPIF_CAPTURE_H
+
+/* Header files */
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
+
+#include "vpif.h"
+
+/* Macros */
+#define VPIF_CAPTURE_VERSION "0.0.2"
+
+#define VPIF_VALID_FIELD(field) (((V4L2_FIELD_ANY == field) || \
+ (V4L2_FIELD_NONE == field)) || \
+ (((V4L2_FIELD_INTERLACED == field) || \
+ (V4L2_FIELD_SEQ_TB == field)) || \
+ (V4L2_FIELD_SEQ_BT == field)))
+
+#define VPIF_CAPTURE_MAX_DEVICES 2
+#define VPIF_VIDEO_INDEX 0
+#define VPIF_NUMBER_OF_OBJECTS 1
+
+/* Enumerated data type to give id to each device per channel */
+enum vpif_channel_id {
+ VPIF_CHANNEL0_VIDEO = 0,
+ VPIF_CHANNEL1_VIDEO,
+};
+
+struct video_obj {
+ enum v4l2_field buf_field;
+ /* Currently selected or default standard */
+ v4l2_std_id stdid;
+ struct v4l2_dv_timings dv_timings;
+};
+
+struct vpif_cap_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+struct common_obj {
+ /* Pointer pointing to current v4l2_buffer */
+ struct vpif_cap_buffer *cur_frm;
+ /* Pointer pointing to current v4l2_buffer */
+ struct vpif_cap_buffer *next_frm;
+ /* Used to store pixel format */
+ struct v4l2_format fmt;
+ /* Buffer queue used in video-buf */
+ struct vb2_queue buffer_queue;
+ /* allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
+ /* Queue of filled frames */
+ struct list_head dma_queue;
+ /* Used in video-buf */
+ spinlock_t irqlock;
+ /* lock used to access this structure */
+ struct mutex lock;
+ /* Function pointer to set the addresses */
+ void (*set_addr) (unsigned long, unsigned long, unsigned long,
+ unsigned long);
+ /* offset where Y top starts from the starting of the buffer */
+ u32 ytop_off;
+ /* offset where Y bottom starts from the starting of the buffer */
+ u32 ybtm_off;
+ /* offset where C top starts from the starting of the buffer */
+ u32 ctop_off;
+ /* offset where C bottom starts from the starting of the buffer */
+ u32 cbtm_off;
+ /* Indicates width of the image data */
+ u32 width;
+ /* Indicates height of the image data */
+ u32 height;
+};
+
+struct channel_obj {
+ /* Identifies video device for this channel */
+ struct video_device *video_dev;
+ /* Indicates id of the field which is being displayed */
+ u32 field_id;
+ /* flag to indicate whether decoder is initialized */
+ u8 initialized;
+ /* Identifies channel */
+ enum vpif_channel_id channel_id;
+ /* Current input */
+ u32 input_idx;
+ /* subdev corresponding to the current input, may be NULL */
+ struct v4l2_subdev *sd;
+ /* vpif configuration params */
+ struct vpif_params vpifparams;
+ /* common object array */
+ struct common_obj common[VPIF_NUMBER_OF_OBJECTS];
+ /* video object */
+ struct video_obj video;
+};
+
+struct vpif_device {
+ struct v4l2_device v4l2_dev;
+ struct channel_obj *dev[VPIF_CAPTURE_NUM_CHANNELS];
+ struct v4l2_subdev **sd;
+ struct v4l2_async_notifier notifier;
+ struct vpif_capture_config *config;
+};
+
+struct vpif_config_params {
+ u8 min_numbuffers;
+ u8 numbuffers[VPIF_CAPTURE_NUM_CHANNELS];
+ s8 device_type;
+ u32 min_bufsize[VPIF_CAPTURE_NUM_CHANNELS];
+ u32 channel_bufsize[VPIF_CAPTURE_NUM_CHANNELS];
+ u8 default_device[VPIF_CAPTURE_NUM_CHANNELS];
+ u32 video_limit[VPIF_CAPTURE_NUM_CHANNELS];
+ u8 max_device_type;
+};
+
+#endif /* VPIF_CAPTURE_H */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
new file mode 100644
index 00000000000..b431b58f39e
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -0,0 +1,1481 @@
+/*
+ * vpif-display - VPIF display driver
+ * Display driver for TI DaVinci VPIF
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014 Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ioctl.h>
+
+#include "vpif.h"
+#include "vpif_display.h"
+
+MODULE_DESCRIPTION("TI DaVinci VPIF Display driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_DISPLAY_VERSION);
+
+#define VPIF_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50)
+
+#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
+#define vpif_dbg(level, debug, fmt, arg...) \
+ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg)
+
+static int debug = 1;
+
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+#define VPIF_DRIVER_NAME "vpif_display"
+
+/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */
+static int ycmux_mode;
+
+static u8 channel_first_int[VPIF_NUMOBJECTS][2] = { {1, 1} };
+
+static struct vpif_device vpif_obj = { {NULL} };
+static struct device *vpif_dev;
+static void vpif_calculate_offsets(struct channel_obj *ch);
+static void vpif_config_addr(struct channel_obj *ch, int muxmode);
+
+static inline struct vpif_disp_buffer *to_vpif_buffer(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct vpif_disp_buffer, vb);
+}
+
+/**
+ * vpif_buffer_prepare : callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into physical address
+ */
+static int vpif_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
+ struct common_obj *common;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage);
+ if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ vb->v4l2_buf.field = common->fmt.fmt.pix.field;
+
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
+ unsigned long addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (!ISALIGNED(addr + common->ytop_off) ||
+ !ISALIGNED(addr + common->ybtm_off) ||
+ !ISALIGNED(addr + common->ctop_off) ||
+ !ISALIGNED(addr + common->cbtm_off)) {
+ vpif_err("buffer offset not aligned to 8 bytes\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vpif_buffer_queue_setup : Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @fmt: v4l2 format
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes[]: contains the size (in bytes) of each plane.
+ * @alloc_ctxs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer count and buffer size
+ */
+static int vpif_buffer_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (fmt && fmt->fmt.pix.sizeimage < common->fmt.fmt.pix.sizeimage)
+ return -EINVAL;
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ *nplanes = 1;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : common->fmt.fmt.pix.sizeimage;
+ alloc_ctxs[0] = common->alloc_ctx;
+
+ /* Calculate the offset for Y and C data in the buffer */
+ vpif_calculate_offsets(ch);
+
+ return 0;
+}
+
+/**
+ * vpif_buffer_queue : Callback function to add buffer to DMA queue
+ * @vb: ptr to vb2_buffer
+ *
+ * This callback fucntion queues the buffer to DMA engine
+ */
+static void vpif_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vpif_disp_buffer *buf = to_vpif_buffer(vb);
+ struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
+ struct common_obj *common;
+ unsigned long flags;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&common->irqlock, flags);
+ list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+/**
+ * vpif_start_streaming : Starts the DMA engine for streaming
+ * @vb: ptr to vb2_buffer
+ * @count: number of buffers
+ */
+static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpif_display_config *vpif_config_data =
+ vpif_dev->platform_data;
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpif = &ch->vpifparams;
+ struct vpif_disp_buffer *buf, *tmp;
+ unsigned long addr, flags;
+ int ret;
+
+ spin_lock_irqsave(&common->irqlock, flags);
+
+ /* Initialize field_id */
+ ch->field_id = 0;
+
+ /* clock settings */
+ if (vpif_config_data->set_clock) {
+ ret = vpif_config_data->set_clock(ch->vpifparams.std_info.
+ ycmux_mode, ch->vpifparams.std_info.hd_sd);
+ if (ret < 0) {
+ vpif_err("can't set clock\n");
+ goto err;
+ }
+ }
+
+ /* set the parameters and addresses */
+ ret = vpif_set_video_params(vpif, ch->channel_id + 2);
+ if (ret < 0)
+ goto err;
+
+ ycmux_mode = ret;
+ vpif_config_addr(ch, ret);
+ /* Get the next frame from the buffer queue */
+ common->next_frm = common->cur_frm =
+ list_entry(common->dma_queue.next,
+ struct vpif_disp_buffer, list);
+
+ list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+ /* Mark state of the current frame to active */
+ common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+
+ addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+ common->set_addr((addr + common->ytop_off),
+ (addr + common->ybtm_off),
+ (addr + common->ctop_off),
+ (addr + common->cbtm_off));
+
+ /*
+ * Set interrupt for both the fields in VPIF
+ * Register enable channel in VPIF register
+ */
+ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
+ if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+ channel2_intr_assert();
+ channel2_intr_enable(1);
+ enable_channel2(1);
+ if (vpif_config_data->chan_config[VPIF_CHANNEL2_VIDEO].clip_en)
+ channel2_clipping_enable(1);
+ }
+
+ if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) {
+ channel3_intr_assert();
+ channel3_intr_enable(1);
+ enable_channel3(1);
+ if (vpif_config_data->chan_config[VPIF_CHANNEL3_VIDEO].clip_en)
+ channel3_clipping_enable(1);
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&common->irqlock, flags);
+
+ return ret;
+}
+
+/**
+ * vpif_stop_streaming : Stop the DMA engine
+ * @vq: ptr to vb2_queue
+ *
+ * This callback stops the DMA engine and any remaining buffers
+ * in the DMA queue are released.
+ */
+static void vpif_stop_streaming(struct vb2_queue *vq)
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common;
+ unsigned long flags;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* Disable channel */
+ if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+ enable_channel2(0);
+ channel2_intr_enable(0);
+ }
+ if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) {
+ enable_channel3(0);
+ channel3_intr_enable(0);
+ }
+
+ /* release all active buffers */
+ spin_lock_irqsave(&common->irqlock, flags);
+ if (common->cur_frm == common->next_frm) {
+ vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ } else {
+ if (common->cur_frm != NULL)
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ if (common->next_frm != NULL)
+ vb2_buffer_done(&common->next_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&common->dma_queue)) {
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_disp_buffer, list);
+ list_del(&common->next_frm->list);
+ vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+static struct vb2_ops video_qops = {
+ .queue_setup = vpif_buffer_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = vpif_buffer_prepare,
+ .start_streaming = vpif_start_streaming,
+ .stop_streaming = vpif_stop_streaming,
+ .buf_queue = vpif_buffer_queue,
+};
+
+static void process_progressive_mode(struct common_obj *common)
+{
+ unsigned long addr = 0;
+
+ spin_lock(&common->irqlock);
+ /* Get the next buffer from buffer queue */
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_disp_buffer, list);
+ /* Remove that buffer from the buffer queue */
+ list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
+ /* Mark status of the buffer as active */
+ common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+
+ /* Set top and bottom field addrs in VPIF registers */
+ addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
+ common->set_addr(addr + common->ytop_off,
+ addr + common->ybtm_off,
+ addr + common->ctop_off,
+ addr + common->cbtm_off);
+}
+
+static void process_interlaced_mode(int fid, struct common_obj *common)
+{
+ /* device field id and local field id are in sync */
+ /* If this is even field */
+ if (0 == fid) {
+ if (common->cur_frm == common->next_frm)
+ return;
+
+ /* one frame is displayed If next frame is
+ * available, release cur_frm and move on */
+ /* Copy frame display time */
+ v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
+ /* Change status of the cur_frm */
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_DONE);
+ /* Make cur_frm pointing to next_frm */
+ common->cur_frm = common->next_frm;
+
+ } else if (1 == fid) { /* odd field */
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)
+ || (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
+ return;
+ }
+ spin_unlock(&common->irqlock);
+ /* one field is displayed configure the next
+ * frame if it is available else hold on current
+ * frame */
+ /* Get next from the buffer queue */
+ process_progressive_mode(common);
+ }
+}
+
+/*
+ * vpif_channel_isr: It changes status of the displayed buffer, takes next
+ * buffer from the queue and sets its address in VPIF registers
+ */
+static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
+{
+ struct vpif_device *dev = &vpif_obj;
+ struct channel_obj *ch;
+ struct common_obj *common;
+ enum v4l2_field field;
+ int fid = -1, i;
+ int channel_id = 0;
+
+ channel_id = *(int *)(dev_id);
+ if (!vpif_intr_status(channel_id + 2))
+ return IRQ_NONE;
+
+ ch = dev->dev[channel_id];
+ field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field;
+ for (i = 0; i < VPIF_NUMOBJECTS; i++) {
+ common = &ch->common[i];
+ /* If streaming is started in this channel */
+
+ if (1 == ch->vpifparams.std_info.frm_fmt) {
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
+ continue;
+ }
+ spin_unlock(&common->irqlock);
+
+ /* Progressive mode */
+ if (!channel_first_int[i][channel_id]) {
+ /* Mark status of the cur_frm to
+ * done and unlock semaphore on it */
+ v4l2_get_timestamp(&common->cur_frm->vb.
+ v4l2_buf.timestamp);
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_DONE);
+ /* Make cur_frm pointing to next_frm */
+ common->cur_frm = common->next_frm;
+ }
+
+ channel_first_int[i][channel_id] = 0;
+ process_progressive_mode(common);
+ } else {
+ /* Interlaced mode */
+ /* If it is first interrupt, ignore it */
+
+ if (channel_first_int[i][channel_id]) {
+ channel_first_int[i][channel_id] = 0;
+ continue;
+ }
+
+ if (0 == i) {
+ ch->field_id ^= 1;
+ /* Get field id from VPIF registers */
+ fid = vpif_channel_getfid(ch->channel_id + 2);
+ /* If fid does not match with stored field id */
+ if (fid != ch->field_id) {
+ /* Make them in sync */
+ if (0 == fid)
+ ch->field_id = fid;
+
+ return IRQ_HANDLED;
+ }
+ }
+ process_interlaced_mode(fid, common);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int vpif_update_std_info(struct channel_obj *ch)
+{
+ struct video_obj *vid_ch = &ch->video;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ const struct vpif_channel_config_params *config;
+
+ int i;
+
+ for (i = 0; i < vpif_ch_params_count; i++) {
+ config = &vpif_ch_params[i];
+ if (config->hd_sd == 0) {
+ vpif_dbg(2, debug, "SD format\n");
+ if (config->stdid & vid_ch->stdid) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ }
+ }
+
+ if (i == vpif_ch_params_count) {
+ vpif_dbg(1, debug, "Format not found\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpif_update_resolution(struct channel_obj *ch)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct video_obj *vid_ch = &ch->video;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+
+ if (!vid_ch->stdid && !vid_ch->dv_timings.bt.height)
+ return -EINVAL;
+
+ if (vid_ch->stdid) {
+ if (vpif_update_std_info(ch))
+ return -EINVAL;
+ }
+
+ common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
+ common->fmt.fmt.pix.width = std_info->width;
+ common->fmt.fmt.pix.height = std_info->height;
+ vpif_dbg(1, debug, "Pixel details: Width = %d,Height = %d\n",
+ common->fmt.fmt.pix.width, common->fmt.fmt.pix.height);
+
+ /* Set height and width paramateres */
+ common->height = std_info->height;
+ common->width = std_info->width;
+ common->fmt.fmt.pix.sizeimage = common->height * common->width * 2;
+
+ if (vid_ch->stdid)
+ common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+ if (ch->vpifparams.std_info.frm_fmt)
+ common->fmt.fmt.pix.field = V4L2_FIELD_NONE;
+ else
+ common->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ return 0;
+}
+
+/*
+ * vpif_calculate_offsets: This function calculates buffers offset for Y and C
+ * in the top and bottom field
+ */
+static void vpif_calculate_offsets(struct channel_obj *ch)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ enum v4l2_field field = common->fmt.fmt.pix.field;
+ struct video_obj *vid_ch = &ch->video;
+ unsigned int hpitch, vpitch, sizeimage;
+
+ if (V4L2_FIELD_ANY == common->fmt.fmt.pix.field) {
+ if (ch->vpifparams.std_info.frm_fmt)
+ vid_ch->buf_field = V4L2_FIELD_NONE;
+ else
+ vid_ch->buf_field = V4L2_FIELD_INTERLACED;
+ } else {
+ vid_ch->buf_field = common->fmt.fmt.pix.field;
+ }
+
+ sizeimage = common->fmt.fmt.pix.sizeimage;
+
+ hpitch = common->fmt.fmt.pix.bytesperline;
+ vpitch = sizeimage / (hpitch * 2);
+ if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+ (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
+ common->ytop_off = 0;
+ common->ybtm_off = hpitch;
+ common->ctop_off = sizeimage / 2;
+ common->cbtm_off = sizeimage / 2 + hpitch;
+ } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) {
+ common->ytop_off = 0;
+ common->ybtm_off = sizeimage / 4;
+ common->ctop_off = sizeimage / 2;
+ common->cbtm_off = common->ctop_off + sizeimage / 4;
+ } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) {
+ common->ybtm_off = 0;
+ common->ytop_off = sizeimage / 4;
+ common->cbtm_off = sizeimage / 2;
+ common->ctop_off = common->cbtm_off + sizeimage / 4;
+ }
+
+ if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+ (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
+ vpifparams->video_params.storage_mode = 1;
+ } else {
+ vpifparams->video_params.storage_mode = 0;
+ }
+
+ if (ch->vpifparams.std_info.frm_fmt == 1) {
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline;
+ } else {
+ if ((field == V4L2_FIELD_ANY) ||
+ (field == V4L2_FIELD_INTERLACED))
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline * 2;
+ else
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline;
+ }
+
+ ch->vpifparams.video_params.stdid = ch->vpifparams.std_info.stdid;
+}
+
+static void vpif_config_addr(struct channel_obj *ch, int muxmode)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (VPIF_CHANNEL3_VIDEO == ch->channel_id) {
+ common->set_addr = ch3_set_videobuf_addr;
+ } else {
+ if (2 == muxmode)
+ common->set_addr = ch2_set_videobuf_addr_yc_nmux;
+ else
+ common->set_addr = ch2_set_videobuf_addr;
+ }
+}
+
+/* functions implementing ioctls */
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
+static int vpif_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ strlcpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vpif_dev));
+ strlcpy(cap->card, config->card_name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vpif_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index != 0)
+ return -EINVAL;
+
+ /* Fill in the information about format */
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ strcpy(fmt->description, "YCbCr4:2:2 YC Planar");
+ fmt->pixelformat = V4L2_PIX_FMT_YUV422P;
+ fmt->flags = 0;
+ return 0;
+}
+
+static int vpif_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* Check the validity of the buffer type */
+ if (common->fmt.type != fmt->type)
+ return -EINVAL;
+
+ if (vpif_update_resolution(ch))
+ return -EINVAL;
+ *fmt = common->fmt;
+ return 0;
+}
+
+static int vpif_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+ /*
+ * to supress v4l-compliance warnings silently correct
+ * the pixelformat
+ */
+ if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P)
+ pixfmt->pixelformat = common->fmt.fmt.pix.pixelformat;
+
+ if (vpif_update_resolution(ch))
+ return -EINVAL;
+
+ pixfmt->colorspace = common->fmt.fmt.pix.colorspace;
+ pixfmt->field = common->fmt.fmt.pix.field;
+ pixfmt->bytesperline = common->fmt.fmt.pix.width;
+ pixfmt->width = common->fmt.fmt.pix.width;
+ pixfmt->height = common->fmt.fmt.pix.height;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2;
+ pixfmt->priv = 0;
+
+ return 0;
+}
+
+static int vpif_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+ int ret;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ ret = vpif_try_fmt_vid_out(file, priv, fmt);
+ if (ret)
+ return ret;
+
+ /* store the pix format in the channel object */
+ common->fmt.fmt.pix = *pixfmt;
+
+ /* store the format in the channel object */
+ common->fmt = *fmt;
+ return 0;
+}
+
+static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_display_chan_config *chan_cfg;
+ struct v4l2_output output;
+ int ret;
+
+ if (config->chan_config[ch->channel_id].outputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+ if (output.capabilities != V4L2_OUT_CAP_STD)
+ return -ENODATA;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+
+ if (!(std_id & VPIF_V4L2_STD))
+ return -EINVAL;
+
+ /* Call encoder subdevice function to set the standard */
+ ch->video.stdid = std_id;
+ memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
+ /* Get the information about the standard */
+ if (vpif_update_resolution(ch))
+ return -EINVAL;
+
+ common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width;
+
+ ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
+ s_std_output, std_id);
+ if (ret < 0) {
+ vpif_err("Failed to set output standard\n");
+ return ret;
+ }
+
+ ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
+ s_std, std_id);
+ if (ret < 0)
+ vpif_err("Failed to set standard for sub devices\n");
+ return ret;
+}
+
+static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+ struct v4l2_output output;
+
+ if (config->chan_config[ch->channel_id].outputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+ if (output.capabilities != V4L2_OUT_CAP_STD)
+ return -ENODATA;
+
+ *std = ch->video.stdid;
+ return 0;
+}
+
+static int vpif_enum_output(struct file *file, void *fh,
+ struct v4l2_output *output)
+{
+
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ if (output->index >= chan_cfg->output_count) {
+ vpif_dbg(1, debug, "Invalid output index\n");
+ return -EINVAL;
+ }
+
+ *output = chan_cfg->outputs[output->index].output;
+ return 0;
+}
+
+/**
+ * vpif_output_to_subdev() - Maps output to sub device
+ * @vpif_cfg - global config ptr
+ * @chan_cfg - channel config ptr
+ * @index - Given output index from application
+ *
+ * lookup the sub device information for a given output index.
+ * we report all the output to application. output table also
+ * has sub device name for the each output
+ */
+static int
+vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
+ struct vpif_display_chan_config *chan_cfg, int index)
+{
+ struct vpif_subdev_info *subdev_info;
+ const char *subdev_name;
+ int i;
+
+ vpif_dbg(2, debug, "vpif_output_to_subdev\n");
+
+ if (chan_cfg->outputs == NULL)
+ return -1;
+
+ subdev_name = chan_cfg->outputs[index].subdev_name;
+ if (subdev_name == NULL)
+ return -1;
+
+ /* loop through the sub device list to get the sub device info */
+ for (i = 0; i < vpif_cfg->subdev_count; i++) {
+ subdev_info = &vpif_cfg->subdevinfo[i];
+ if (!strcmp(subdev_info->name, subdev_name))
+ return i;
+ }
+ return -1;
+}
+
+/**
+ * vpif_set_output() - Select an output
+ * @vpif_cfg - global config ptr
+ * @ch - channel
+ * @index - Given output index from application
+ *
+ * Select the given output.
+ */
+static int vpif_set_output(struct vpif_display_config *vpif_cfg,
+ struct channel_obj *ch, int index)
+{
+ struct vpif_display_chan_config *chan_cfg =
+ &vpif_cfg->chan_config[ch->channel_id];
+ struct vpif_subdev_info *subdev_info = NULL;
+ struct v4l2_subdev *sd = NULL;
+ u32 input = 0, output = 0;
+ int sd_index;
+ int ret;
+
+ sd_index = vpif_output_to_subdev(vpif_cfg, chan_cfg, index);
+ if (sd_index >= 0) {
+ sd = vpif_obj.sd[sd_index];
+ subdev_info = &vpif_cfg->subdevinfo[sd_index];
+ }
+
+ if (sd) {
+ input = chan_cfg->outputs[index].input_route;
+ output = chan_cfg->outputs[index].output_route;
+ ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ vpif_err("Failed to set output\n");
+ return ret;
+ }
+
+ }
+ ch->output_idx = index;
+ ch->sd = sd;
+ if (chan_cfg->outputs != NULL)
+ /* update tvnorms from the sub device output info */
+ ch->video_dev->tvnorms = chan_cfg->outputs[index].output.std;
+ return 0;
+}
+
+static int vpif_s_output(struct file *file, void *priv, unsigned int i)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+
+ if (i >= chan_cfg->output_count)
+ return -EINVAL;
+
+ return vpif_set_output(config, ch, i);
+}
+
+static int vpif_g_output(struct file *file, void *priv, unsigned int *i)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+
+ *i = ch->output_idx;
+
+ return 0;
+}
+
+/**
+ * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: input timings
+ */
+static int
+vpif_enum_dv_timings(struct file *file, void *priv,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+ struct v4l2_output output;
+ int ret;
+
+ if (config->chan_config[ch->channel_id].outputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+ if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ timings->pad = 0;
+
+ ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ return -EINVAL;
+ return ret;
+}
+
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt;
+ struct vpif_display_chan_config *chan_cfg;
+ struct v4l2_output output;
+ int ret;
+
+ if (config->chan_config[ch->channel_id].outputs == NULL)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+ if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ if (timings->type != V4L2_DV_BT_656_1120) {
+ vpif_dbg(2, debug, "Timing type not defined\n");
+ return -EINVAL;
+ }
+
+ /* Configure subdevice timings, if any */
+ ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ ret = 0;
+ if (ret < 0) {
+ vpif_dbg(2, debug, "Error setting custom DV timings\n");
+ return ret;
+ }
+
+ if (!(timings->bt.width && timings->bt.height &&
+ (timings->bt.hbackporch ||
+ timings->bt.hfrontporch ||
+ timings->bt.hsync) &&
+ timings->bt.vfrontporch &&
+ (timings->bt.vbackporch ||
+ timings->bt.vsync))) {
+ vpif_dbg(2, debug, "Timings for width, height, "
+ "horizontal back porch, horizontal sync, "
+ "horizontal front porch, vertical back porch, "
+ "vertical sync and vertical back porch "
+ "must be defined\n");
+ return -EINVAL;
+ }
+
+ vid_ch->dv_timings = *timings;
+
+ /* Configure video port timings */
+
+ std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8;
+ std_info->sav2eav = bt->width;
+
+ std_info->l1 = 1;
+ std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+ std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ if (bt->interlaced) {
+ if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+ std_info->l5 = std_info->vsize/2 -
+ (bt->vfrontporch - 1);
+ std_info->l7 = std_info->vsize/2 + 1;
+ std_info->l9 = std_info->l7 + bt->il_vsync +
+ bt->il_vbackporch + 1;
+ std_info->l11 = std_info->vsize -
+ (bt->il_vfrontporch - 1);
+ } else {
+ vpif_dbg(2, debug, "Required timing values for "
+ "interlaced BT format missing\n");
+ return -EINVAL;
+ }
+ } else {
+ std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+ }
+ strncpy(std_info->name, "Custom timings BT656/1120",
+ VPIF_MAX_NAME);
+ std_info->width = bt->width;
+ std_info->height = bt->height;
+ std_info->frm_fmt = bt->interlaced ? 0 : 1;
+ std_info->ycmux_mode = 0;
+ std_info->capture_format = 0;
+ std_info->vbi_supported = 0;
+ std_info->hd_sd = 1;
+ std_info->stdid = 0;
+ vid_ch->stdid = 0;
+
+ return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_output output;
+
+ if (config->chan_config[ch->channel_id].outputs == NULL)
+ goto error;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+
+ if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS)
+ goto error;
+
+ *timings = vid_ch->dv_timings;
+
+ return 0;
+error:
+ return -ENODATA;
+}
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+ /* status for sub devices */
+ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
+/* vpif display ioctl operations */
+static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
+ .vidioc_querycap = vpif_querycap,
+ .vidioc_enum_fmt_vid_out = vpif_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vpif_g_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vpif_s_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vpif_try_fmt_vid_out,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_s_std = vpif_s_std,
+ .vidioc_g_std = vpif_g_std,
+
+ .vidioc_enum_output = vpif_enum_output,
+ .vidioc_s_output = vpif_s_output,
+ .vidioc_g_output = vpif_g_output,
+
+ .vidioc_enum_dv_timings = vpif_enum_dv_timings,
+ .vidioc_s_dv_timings = vpif_s_dv_timings,
+ .vidioc_g_dv_timings = vpif_g_dv_timings,
+
+ .vidioc_log_status = vpif_log_status,
+};
+
+static const struct v4l2_file_operations vpif_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll
+};
+
+/*Configure the channels, buffer sizei, request irq */
+static int initialize_vpif(void)
+{
+ int free_channel_objects_index;
+ int err, i, j;
+
+ /* Allocate memory for six channel objects */
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ vpif_obj.dev[i] =
+ kzalloc(sizeof(struct channel_obj), GFP_KERNEL);
+ /* If memory allocation fails, return error */
+ if (!vpif_obj.dev[i]) {
+ free_channel_objects_index = i;
+ err = -ENOMEM;
+ goto vpif_init_free_channel_objects;
+ }
+ }
+
+ return 0;
+
+vpif_init_free_channel_objects:
+ for (j = 0; j < free_channel_objects_index; j++)
+ kfree(vpif_obj.dev[j]);
+ return err;
+}
+
+static int vpif_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ int i;
+
+ for (i = 0; i < vpif_obj.config->subdev_count; i++)
+ if (!strcmp(vpif_obj.config->subdevinfo[i].name,
+ subdev->name)) {
+ vpif_obj.sd[i] = subdev;
+ vpif_obj.sd[i]->grp_id = 1 << i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vpif_probe_complete(void)
+{
+ struct common_obj *common;
+ struct video_device *vdev;
+ struct channel_obj *ch;
+ struct vb2_queue *q;
+ int j, err, k;
+
+ for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
+ ch = vpif_obj.dev[j];
+ /* Initialize field of the channel objects */
+ for (k = 0; k < VPIF_NUMOBJECTS; k++) {
+ common = &ch->common[k];
+ spin_lock_init(&common->irqlock);
+ mutex_init(&common->lock);
+ common->set_addr = NULL;
+ common->ytop_off = 0;
+ common->ybtm_off = 0;
+ common->ctop_off = 0;
+ common->cbtm_off = 0;
+ common->cur_frm = NULL;
+ common->next_frm = NULL;
+ memset(&common->fmt, 0, sizeof(common->fmt));
+ }
+ ch->initialized = 0;
+ if (vpif_obj.config->subdev_count)
+ ch->sd = vpif_obj.sd[0];
+ ch->channel_id = j;
+
+ memset(&ch->vpifparams, 0, sizeof(ch->vpifparams));
+
+ ch->common[VPIF_VIDEO_INDEX].fmt.type =
+ V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ /* select output 0 */
+ err = vpif_set_output(vpif_obj.config, ch, 0);
+ if (err)
+ goto probe_out;
+
+ /* set initial format */
+ ch->video.stdid = V4L2_STD_525_60;
+ memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
+ vpif_update_resolution(ch);
+
+ /* Initialize vb2 queue */
+ q = &common->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = ch;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpif_disp_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &common->lock;
+ err = vb2_queue_init(q);
+ if (err) {
+ vpif_err("vpif_display: vb2_queue_init() failed\n");
+ goto probe_out;
+ }
+
+ common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
+ if (IS_ERR(common->alloc_ctx)) {
+ vpif_err("Failed to get the context\n");
+ err = PTR_ERR(common->alloc_ctx);
+ goto probe_out;
+ }
+
+ INIT_LIST_HEAD(&common->dma_queue);
+
+ /* register video device */
+ vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
+ (int)ch, (int)&ch->video_dev);
+
+ /* Initialize the video_device structure */
+ vdev = ch->video_dev;
+ strlcpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = &vpif_fops;
+ vdev->ioctl_ops = &vpif_ioctl_ops;
+ vdev->v4l2_dev = &vpif_obj.v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_TX;
+ vdev->queue = q;
+ vdev->lock = &common->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags);
+ video_set_drvdata(ch->video_dev, ch);
+ err = video_register_device(vdev, VFL_TYPE_GRABBER,
+ (j ? 3 : 2));
+ if (err < 0)
+ goto probe_out;
+ }
+
+ return 0;
+
+probe_out:
+ for (k = 0; k < j; k++) {
+ ch = vpif_obj.dev[k];
+ common = &ch->common[k];
+ vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
+ video_unregister_device(ch->video_dev);
+ video_device_release(ch->video_dev);
+ ch->video_dev = NULL;
+ }
+ return err;
+}
+
+static int vpif_async_complete(struct v4l2_async_notifier *notifier)
+{
+ return vpif_probe_complete();
+}
+
+/*
+ * vpif_probe: This function creates device entries by register itself to the
+ * V4L2 driver and initializes fields of each channel objects
+ */
+static __init int vpif_probe(struct platform_device *pdev)
+{
+ struct vpif_subdev_info *subdevdata;
+ int i, j = 0, err = 0;
+ int res_idx = 0;
+ struct i2c_adapter *i2c_adap;
+ struct channel_obj *ch;
+ struct video_device *vfd;
+ struct resource *res;
+ int subdev_count;
+
+ vpif_dev = &pdev->dev;
+ err = initialize_vpif();
+
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error initializing vpif\n");
+ return err;
+ }
+
+ err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
+ return err;
+ }
+
+ while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
+ err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr,
+ IRQF_SHARED, VPIF_DRIVER_NAME,
+ (void *)(&vpif_obj.dev[res_idx]->
+ channel_id));
+ if (err) {
+ err = -EINVAL;
+ vpif_err("VPIF IRQ request failed\n");
+ goto vpif_unregister;
+ }
+ res_idx++;
+ }
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+
+ /* Allocate memory for video device */
+ vfd = video_device_alloc();
+ if (vfd == NULL) {
+ for (j = 0; j < i; j++) {
+ ch = vpif_obj.dev[j];
+ video_device_release(ch->video_dev);
+ }
+ err = -ENOMEM;
+ goto vpif_unregister;
+ }
+
+ /* Set video_dev to the video device */
+ ch->video_dev = vfd;
+ }
+
+ vpif_obj.config = pdev->dev.platform_data;
+ subdev_count = vpif_obj.config->subdev_count;
+ subdevdata = vpif_obj.config->subdevinfo;
+ vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
+ GFP_KERNEL);
+ if (vpif_obj.sd == NULL) {
+ vpif_err("unable to allocate memory for subdevice pointers\n");
+ err = -ENOMEM;
+ goto vpif_sd_error;
+ }
+
+ if (!vpif_obj.config->asd_sizes) {
+ i2c_adap = i2c_get_adapter(1);
+ for (i = 0; i < subdev_count; i++) {
+ vpif_obj.sd[i] =
+ v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+ i2c_adap,
+ &subdevdata[i].
+ board_info,
+ NULL);
+ if (!vpif_obj.sd[i]) {
+ vpif_err("Error registering v4l2 subdevice\n");
+ err = -ENODEV;
+ goto probe_subdev_out;
+ }
+
+ if (vpif_obj.sd[i])
+ vpif_obj.sd[i]->grp_id = 1 << i;
+ }
+ vpif_probe_complete();
+ } else {
+ vpif_obj.notifier.subdevs = vpif_obj.config->asd;
+ vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
+ vpif_obj.notifier.bound = vpif_async_bound;
+ vpif_obj.notifier.complete = vpif_async_complete;
+ err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
+ &vpif_obj.notifier);
+ if (err) {
+ vpif_err("Error registering async notifier\n");
+ err = -EINVAL;
+ goto probe_subdev_out;
+ }
+ }
+
+ return 0;
+
+probe_subdev_out:
+ kfree(vpif_obj.sd);
+vpif_sd_error:
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ ch = vpif_obj.dev[i];
+ /* Note: does nothing if ch->video_dev == NULL */
+ video_device_release(ch->video_dev);
+ }
+vpif_unregister:
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ return err;
+}
+
+/*
+ * vpif_remove: It un-register channels from V4L2 driver
+ */
+static int vpif_remove(struct platform_device *device)
+{
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ kfree(vpif_obj.sd);
+ /* un-register device */
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[i];
+ vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
+ /* Unregister video device */
+ video_unregister_device(ch->video_dev);
+
+ ch->video_dev = NULL;
+ kfree(vpif_obj.dev[i]);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vpif_suspend(struct device *dev)
+{
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (!vb2_is_streaming(&common->buffer_queue))
+ continue;
+
+ mutex_lock(&common->lock);
+ /* Disable channel */
+ if (ch->channel_id == VPIF_CHANNEL2_VIDEO) {
+ enable_channel2(0);
+ channel2_intr_enable(0);
+ }
+ if (ch->channel_id == VPIF_CHANNEL3_VIDEO ||
+ ycmux_mode == 2) {
+ enable_channel3(0);
+ channel3_intr_enable(0);
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+
+static int vpif_resume(struct device *dev)
+{
+
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (!vb2_is_streaming(&common->buffer_queue))
+ continue;
+
+ mutex_lock(&common->lock);
+ /* Enable channel */
+ if (ch->channel_id == VPIF_CHANNEL2_VIDEO) {
+ enable_channel2(1);
+ channel2_intr_enable(1);
+ }
+ if (ch->channel_id == VPIF_CHANNEL3_VIDEO ||
+ ycmux_mode == 2) {
+ enable_channel3(1);
+ channel3_intr_enable(1);
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume);
+
+static __refdata struct platform_driver vpif_driver = {
+ .driver = {
+ .name = VPIF_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &vpif_pm_ops,
+ },
+ .probe = vpif_probe,
+ .remove = vpif_remove,
+};
+
+module_platform_driver(vpif_driver);
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
new file mode 100644
index 00000000000..7b21a760767
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -0,0 +1,127 @@
+/*
+ * VPIF display header file
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef VPIF_DISPLAY_H
+#define VPIF_DISPLAY_H
+
+/* Header files */
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
+
+#include "vpif.h"
+
+/* Macros */
+#define VPIF_DISPLAY_VERSION "0.0.2"
+
+#define VPIF_VALID_FIELD(field) \
+ (((V4L2_FIELD_ANY == field) || (V4L2_FIELD_NONE == field)) || \
+ (((V4L2_FIELD_INTERLACED == field) || (V4L2_FIELD_SEQ_TB == field)) || \
+ (V4L2_FIELD_SEQ_BT == field)))
+
+#define VPIF_DISPLAY_MAX_DEVICES (2)
+#define VPIF_SLICED_BUF_SIZE (256)
+#define VPIF_SLICED_MAX_SERVICES (3)
+#define VPIF_VIDEO_INDEX (0)
+#define VPIF_VBI_INDEX (1)
+#define VPIF_HBI_INDEX (2)
+
+/* Setting it to 1 as HBI/VBI support yet to be added , else 3*/
+#define VPIF_NUMOBJECTS (1)
+
+/* Macros */
+#define ISALIGNED(a) (0 == ((a) & 7))
+
+/* enumerated data types */
+/* Enumerated data type to give id to each device per channel */
+enum vpif_channel_id {
+ VPIF_CHANNEL2_VIDEO = 0, /* Channel2 Video */
+ VPIF_CHANNEL3_VIDEO, /* Channel3 Video */
+};
+
+/* structures */
+
+struct video_obj {
+ enum v4l2_field buf_field;
+ u32 latest_only; /* indicate whether to return
+ * most recent displayed frame only */
+ v4l2_std_id stdid; /* Currently selected or default
+ * standard */
+ struct v4l2_dv_timings dv_timings;
+};
+
+struct vpif_disp_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+struct common_obj {
+ struct vpif_disp_buffer *cur_frm; /* Pointer pointing to current
+ * vb2_buffer */
+ struct vpif_disp_buffer *next_frm; /* Pointer pointing to next
+ * vb2_buffer */
+ struct v4l2_format fmt; /* Used to store the format */
+ struct vb2_queue buffer_queue; /* Buffer queue used in
+ * video-buf */
+ /* allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ struct list_head dma_queue; /* Queue of filled frames */
+ spinlock_t irqlock; /* Used in video-buf */
+
+ /* channel specific parameters */
+ struct mutex lock; /* lock used to access this
+ * structure */
+ u32 ytop_off; /* offset of Y top from the
+ * starting of the buffer */
+ u32 ybtm_off; /* offset of Y bottom from the
+ * starting of the buffer */
+ u32 ctop_off; /* offset of C top from the
+ * starting of the buffer */
+ u32 cbtm_off; /* offset of C bottom from the
+ * starting of the buffer */
+ /* Function pointer to set the addresses */
+ void (*set_addr)(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+ u32 height;
+ u32 width;
+};
+
+struct channel_obj {
+ /* V4l2 specific parameters */
+ struct video_device *video_dev; /* Identifies video device for
+ * this channel */
+ u32 field_id; /* Indicates id of the field
+ * which is being displayed */
+ u8 initialized; /* flag to indicate whether
+ * encoder is initialized */
+ u32 output_idx; /* Current output index */
+ struct v4l2_subdev *sd; /* Current output subdev(may be NULL) */
+
+ enum vpif_channel_id channel_id;/* Identifies channel */
+ struct vpif_params vpifparams;
+ struct common_obj common[VPIF_NUMOBJECTS];
+ struct video_obj video;
+};
+
+/* vpif device structure */
+struct vpif_device {
+ struct v4l2_device v4l2_dev;
+ struct channel_obj *dev[VPIF_DISPLAY_NUM_CHANNELS];
+ struct v4l2_subdev **sd;
+ struct v4l2_async_notifier notifier;
+ struct vpif_display_config *config;
+};
+
+#endif /* VPIF_DISPLAY_H */
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
new file mode 100644
index 00000000000..31120b4a4a3
--- /dev/null
+++ b/drivers/media/platform/davinci/vpss.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * common vpss system module platform driver for all video drivers.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+
+#include <media/davinci/vpss.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VPSS Driver");
+MODULE_AUTHOR("Texas Instruments");
+
+/* DM644x defines */
+#define DM644X_SBL_PCR_VPSS (4)
+
+#define DM355_VPSSBL_INTSEL 0x10
+#define DM355_VPSSBL_EVTSEL 0x14
+/* vpss BL register offsets */
+#define DM355_VPSSBL_CCDCMUX 0x1c
+/* vpss CLK register offsets */
+#define DM355_VPSSCLK_CLKCTRL 0x04
+/* masks and shifts */
+#define VPSS_HSSISEL_SHIFT 4
+/*
+ * VDINT0 - vpss_int0, VDINT1 - vpss_int1, H3A - vpss_int4,
+ * IPIPE_INT1_SDR - vpss_int5
+ */
+#define DM355_VPSSBL_INTSEL_DEFAULT 0xff83ff10
+/* VENCINT - vpss_int8 */
+#define DM355_VPSSBL_EVTSEL_DEFAULT 0x4
+
+#define DM365_ISP5_PCCR 0x04
+#define DM365_ISP5_PCCR_BL_CLK_ENABLE BIT(0)
+#define DM365_ISP5_PCCR_ISIF_CLK_ENABLE BIT(1)
+#define DM365_ISP5_PCCR_H3A_CLK_ENABLE BIT(2)
+#define DM365_ISP5_PCCR_RSZ_CLK_ENABLE BIT(3)
+#define DM365_ISP5_PCCR_IPIPE_CLK_ENABLE BIT(4)
+#define DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE BIT(5)
+#define DM365_ISP5_PCCR_RSV BIT(6)
+
+#define DM365_ISP5_BCR 0x08
+#define DM365_ISP5_BCR_ISIF_OUT_ENABLE BIT(1)
+
+#define DM365_ISP5_INTSEL1 0x10
+#define DM365_ISP5_INTSEL2 0x14
+#define DM365_ISP5_INTSEL3 0x18
+#define DM365_ISP5_CCDCMUX 0x20
+#define DM365_ISP5_PG_FRAME_SIZE 0x28
+#define DM365_VPBE_CLK_CTRL 0x00
+
+#define VPSS_CLK_CTRL 0x01c40044
+#define VPSS_CLK_CTRL_VENCCLKEN BIT(3)
+#define VPSS_CLK_CTRL_DACCLKEN BIT(4)
+
+/*
+ * vpss interrupts. VDINT0 - vpss_int0, VDINT1 - vpss_int1,
+ * AF - vpss_int3
+ */
+#define DM365_ISP5_INTSEL1_DEFAULT 0x0b1f0100
+/* AEW - vpss_int6, RSZ_INT_DMA - vpss_int5 */
+#define DM365_ISP5_INTSEL2_DEFAULT 0x1f0a0f1f
+/* VENC - vpss_int8 */
+#define DM365_ISP5_INTSEL3_DEFAULT 0x00000015
+
+/* masks and shifts for DM365*/
+#define DM365_CCDC_PG_VD_POL_SHIFT 0
+#define DM365_CCDC_PG_HD_POL_SHIFT 1
+
+#define CCD_SRC_SEL_MASK (BIT_MASK(5) | BIT_MASK(4))
+#define CCD_SRC_SEL_SHIFT 4
+
+/* Different SoC platforms supported by this driver */
+enum vpss_platform_type {
+ DM644X,
+ DM355,
+ DM365,
+};
+
+/*
+ * vpss operations. Depends on platform. Not all functions are available
+ * on all platforms. The api, first check if a function is available before
+ * invoking it. In the probe, the function ptrs are initialized based on
+ * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc.
+ */
+struct vpss_hw_ops {
+ /* enable clock */
+ int (*enable_clock)(enum vpss_clock_sel clock_sel, int en);
+ /* select input to ccdc */
+ void (*select_ccdc_source)(enum vpss_ccdc_source_sel src_sel);
+ /* clear wbl overflow bit */
+ int (*clear_wbl_overflow)(enum vpss_wbl_sel wbl_sel);
+ /* set sync polarity */
+ void (*set_sync_pol)(struct vpss_sync_pol);
+ /* set the PG_FRAME_SIZE register*/
+ void (*set_pg_frame_size)(struct vpss_pg_frame_size);
+ /* check and clear interrupt if occurred */
+ int (*dma_complete_interrupt)(void);
+};
+
+/* vpss configuration */
+struct vpss_oper_config {
+ __iomem void *vpss_regs_base0;
+ __iomem void *vpss_regs_base1;
+ resource_size_t *vpss_regs_base2;
+ enum vpss_platform_type platform;
+ spinlock_t vpss_lock;
+ struct vpss_hw_ops hw_ops;
+};
+
+static struct vpss_oper_config oper_cfg;
+
+/* register access routines */
+static inline u32 bl_regr(u32 offset)
+{
+ return __raw_readl(oper_cfg.vpss_regs_base0 + offset);
+}
+
+static inline void bl_regw(u32 val, u32 offset)
+{
+ __raw_writel(val, oper_cfg.vpss_regs_base0 + offset);
+}
+
+static inline u32 vpss_regr(u32 offset)
+{
+ return __raw_readl(oper_cfg.vpss_regs_base1 + offset);
+}
+
+static inline void vpss_regw(u32 val, u32 offset)
+{
+ __raw_writel(val, oper_cfg.vpss_regs_base1 + offset);
+}
+
+/* For DM365 only */
+static inline u32 isp5_read(u32 offset)
+{
+ return __raw_readl(oper_cfg.vpss_regs_base0 + offset);
+}
+
+/* For DM365 only */
+static inline void isp5_write(u32 val, u32 offset)
+{
+ __raw_writel(val, oper_cfg.vpss_regs_base0 + offset);
+}
+
+static void dm365_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
+{
+ u32 temp = isp5_read(DM365_ISP5_CCDCMUX) & ~CCD_SRC_SEL_MASK;
+
+ /* if we are using pattern generator, enable it */
+ if (src_sel == VPSS_PGLPBK || src_sel == VPSS_CCDCPG)
+ temp |= 0x08;
+
+ temp |= (src_sel << CCD_SRC_SEL_SHIFT);
+ isp5_write(temp, DM365_ISP5_CCDCMUX);
+}
+
+static void dm355_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
+{
+ bl_regw(src_sel << VPSS_HSSISEL_SHIFT, DM355_VPSSBL_CCDCMUX);
+}
+
+int vpss_dma_complete_interrupt(void)
+{
+ if (!oper_cfg.hw_ops.dma_complete_interrupt)
+ return 2;
+ return oper_cfg.hw_ops.dma_complete_interrupt();
+}
+EXPORT_SYMBOL(vpss_dma_complete_interrupt);
+
+int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
+{
+ if (!oper_cfg.hw_ops.select_ccdc_source)
+ return -EINVAL;
+
+ oper_cfg.hw_ops.select_ccdc_source(src_sel);
+ return 0;
+}
+EXPORT_SYMBOL(vpss_select_ccdc_source);
+
+static int dm644x_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
+{
+ u32 mask = 1, val;
+
+ if (wbl_sel < VPSS_PCR_AEW_WBL_0 ||
+ wbl_sel > VPSS_PCR_CCDC_WBL_O)
+ return -EINVAL;
+
+ /* writing a 0 clear the overflow */
+ mask = ~(mask << wbl_sel);
+ val = bl_regr(DM644X_SBL_PCR_VPSS) & mask;
+ bl_regw(val, DM644X_SBL_PCR_VPSS);
+ return 0;
+}
+
+void vpss_set_sync_pol(struct vpss_sync_pol sync)
+{
+ if (!oper_cfg.hw_ops.set_sync_pol)
+ return;
+
+ oper_cfg.hw_ops.set_sync_pol(sync);
+}
+EXPORT_SYMBOL(vpss_set_sync_pol);
+
+int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
+{
+ if (!oper_cfg.hw_ops.clear_wbl_overflow)
+ return -EINVAL;
+
+ return oper_cfg.hw_ops.clear_wbl_overflow(wbl_sel);
+}
+EXPORT_SYMBOL(vpss_clear_wbl_overflow);
+
+/*
+ * dm355_enable_clock - Enable VPSS Clock
+ * @clock_sel: Clock to be enabled/disabled
+ * @en: enable/disable flag
+ *
+ * This is called to enable or disable a vpss clock
+ */
+static int dm355_enable_clock(enum vpss_clock_sel clock_sel, int en)
+{
+ unsigned long flags;
+ u32 utemp, mask = 0x1, shift = 0;
+
+ switch (clock_sel) {
+ case VPSS_VPBE_CLOCK:
+ /* nothing since lsb */
+ break;
+ case VPSS_VENC_CLOCK_SEL:
+ shift = 2;
+ break;
+ case VPSS_CFALD_CLOCK:
+ shift = 3;
+ break;
+ case VPSS_H3A_CLOCK:
+ shift = 4;
+ break;
+ case VPSS_IPIPE_CLOCK:
+ shift = 5;
+ break;
+ case VPSS_CCDC_CLOCK:
+ shift = 6;
+ break;
+ default:
+ printk(KERN_ERR "dm355_enable_clock:"
+ " Invalid selector: %d\n", clock_sel);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&oper_cfg.vpss_lock, flags);
+ utemp = vpss_regr(DM355_VPSSCLK_CLKCTRL);
+ if (!en)
+ utemp &= ~(mask << shift);
+ else
+ utemp |= (mask << shift);
+
+ vpss_regw(utemp, DM355_VPSSCLK_CLKCTRL);
+ spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags);
+ return 0;
+}
+
+static int dm365_enable_clock(enum vpss_clock_sel clock_sel, int en)
+{
+ unsigned long flags;
+ u32 utemp, mask = 0x1, shift = 0, offset = DM365_ISP5_PCCR;
+ u32 (*read)(u32 offset) = isp5_read;
+ void(*write)(u32 val, u32 offset) = isp5_write;
+
+ switch (clock_sel) {
+ case VPSS_BL_CLOCK:
+ break;
+ case VPSS_CCDC_CLOCK:
+ shift = 1;
+ break;
+ case VPSS_H3A_CLOCK:
+ shift = 2;
+ break;
+ case VPSS_RSZ_CLOCK:
+ shift = 3;
+ break;
+ case VPSS_IPIPE_CLOCK:
+ shift = 4;
+ break;
+ case VPSS_IPIPEIF_CLOCK:
+ shift = 5;
+ break;
+ case VPSS_PCLK_INTERNAL:
+ shift = 6;
+ break;
+ case VPSS_PSYNC_CLOCK_SEL:
+ shift = 7;
+ break;
+ case VPSS_VPBE_CLOCK:
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_VENC_CLOCK_SEL:
+ shift = 2;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_LDC_CLOCK:
+ shift = 3;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_FDIF_CLOCK:
+ shift = 4;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_OSD_CLOCK_SEL:
+ shift = 6;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_LDC_CLOCK_SEL:
+ shift = 7;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ default:
+ printk(KERN_ERR "dm365_enable_clock: Invalid selector: %d\n",
+ clock_sel);
+ return -1;
+ }
+
+ spin_lock_irqsave(&oper_cfg.vpss_lock, flags);
+ utemp = read(offset);
+ if (!en) {
+ mask = ~mask;
+ utemp &= (mask << shift);
+ } else
+ utemp |= (mask << shift);
+
+ write(utemp, offset);
+ spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags);
+
+ return 0;
+}
+
+int vpss_enable_clock(enum vpss_clock_sel clock_sel, int en)
+{
+ if (!oper_cfg.hw_ops.enable_clock)
+ return -EINVAL;
+
+ return oper_cfg.hw_ops.enable_clock(clock_sel, en);
+}
+EXPORT_SYMBOL(vpss_enable_clock);
+
+void dm365_vpss_set_sync_pol(struct vpss_sync_pol sync)
+{
+ int val = 0;
+ val = isp5_read(DM365_ISP5_CCDCMUX);
+
+ val |= (sync.ccdpg_hdpol << DM365_CCDC_PG_HD_POL_SHIFT);
+ val |= (sync.ccdpg_vdpol << DM365_CCDC_PG_VD_POL_SHIFT);
+
+ isp5_write(val, DM365_ISP5_CCDCMUX);
+}
+EXPORT_SYMBOL(dm365_vpss_set_sync_pol);
+
+void vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
+{
+ if (!oper_cfg.hw_ops.set_pg_frame_size)
+ return;
+
+ oper_cfg.hw_ops.set_pg_frame_size(frame_size);
+}
+EXPORT_SYMBOL(vpss_set_pg_frame_size);
+
+void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
+{
+ int current_reg = ((frame_size.hlpfr >> 1) - 1) << 16;
+
+ current_reg |= (frame_size.pplen - 1);
+ isp5_write(current_reg, DM365_ISP5_PG_FRAME_SIZE);
+}
+EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
+
+static int vpss_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ char *platform_name;
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENOENT;
+ }
+
+ platform_name = pdev->dev.platform_data;
+ if (!strcmp(platform_name, "dm355_vpss"))
+ oper_cfg.platform = DM355;
+ else if (!strcmp(platform_name, "dm365_vpss"))
+ oper_cfg.platform = DM365;
+ else if (!strcmp(platform_name, "dm644x_vpss"))
+ oper_cfg.platform = DM644X;
+ else {
+ dev_err(&pdev->dev, "vpss driver not supported on"
+ " this platform\n");
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "%s vpss probed\n", platform_name);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ oper_cfg.vpss_regs_base0 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(oper_cfg.vpss_regs_base0))
+ return PTR_ERR(oper_cfg.vpss_regs_base0);
+
+ if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ oper_cfg.vpss_regs_base1 = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(oper_cfg.vpss_regs_base1))
+ return PTR_ERR(oper_cfg.vpss_regs_base1);
+ }
+
+ if (oper_cfg.platform == DM355) {
+ oper_cfg.hw_ops.enable_clock = dm355_enable_clock;
+ oper_cfg.hw_ops.select_ccdc_source = dm355_select_ccdc_source;
+ /* Setup vpss interrupts */
+ bl_regw(DM355_VPSSBL_INTSEL_DEFAULT, DM355_VPSSBL_INTSEL);
+ bl_regw(DM355_VPSSBL_EVTSEL_DEFAULT, DM355_VPSSBL_EVTSEL);
+ } else if (oper_cfg.platform == DM365) {
+ oper_cfg.hw_ops.enable_clock = dm365_enable_clock;
+ oper_cfg.hw_ops.select_ccdc_source = dm365_select_ccdc_source;
+ /* Setup vpss interrupts */
+ isp5_write((isp5_read(DM365_ISP5_PCCR) |
+ DM365_ISP5_PCCR_BL_CLK_ENABLE |
+ DM365_ISP5_PCCR_ISIF_CLK_ENABLE |
+ DM365_ISP5_PCCR_H3A_CLK_ENABLE |
+ DM365_ISP5_PCCR_RSZ_CLK_ENABLE |
+ DM365_ISP5_PCCR_IPIPE_CLK_ENABLE |
+ DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE |
+ DM365_ISP5_PCCR_RSV), DM365_ISP5_PCCR);
+ isp5_write((isp5_read(DM365_ISP5_BCR) |
+ DM365_ISP5_BCR_ISIF_OUT_ENABLE), DM365_ISP5_BCR);
+ isp5_write(DM365_ISP5_INTSEL1_DEFAULT, DM365_ISP5_INTSEL1);
+ isp5_write(DM365_ISP5_INTSEL2_DEFAULT, DM365_ISP5_INTSEL2);
+ isp5_write(DM365_ISP5_INTSEL3_DEFAULT, DM365_ISP5_INTSEL3);
+ } else
+ oper_cfg.hw_ops.clear_wbl_overflow = dm644x_clear_wbl_overflow;
+
+ pm_runtime_enable(&pdev->dev);
+
+ pm_runtime_get(&pdev->dev);
+
+ spin_lock_init(&oper_cfg.vpss_lock);
+ dev_info(&pdev->dev, "%s vpss probe success\n", platform_name);
+
+ return 0;
+}
+
+static int vpss_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int vpss_suspend(struct device *dev)
+{
+ pm_runtime_put(dev);
+ return 0;
+}
+
+static int vpss_resume(struct device *dev)
+{
+ pm_runtime_get(dev);
+ return 0;
+}
+
+static const struct dev_pm_ops vpss_pm_ops = {
+ .suspend = vpss_suspend,
+ .resume = vpss_resume,
+};
+
+static struct platform_driver vpss_driver = {
+ .driver = {
+ .name = "vpss",
+ .owner = THIS_MODULE,
+ .pm = &vpss_pm_ops,
+ },
+ .remove = vpss_remove,
+ .probe = vpss_probe,
+};
+
+static void vpss_exit(void)
+{
+ iounmap(oper_cfg.vpss_regs_base2);
+ release_mem_region(VPSS_CLK_CTRL, 4);
+ platform_driver_unregister(&vpss_driver);
+}
+
+static int __init vpss_init(void)
+{
+ if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
+ return -EBUSY;
+
+ oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+ writel(VPSS_CLK_CTRL_VENCCLKEN |
+ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
+
+ return platform_driver_register(&vpss_driver);
+}
+subsys_initcall(vpss_init);
+module_exit(vpss_exit);
diff --git a/drivers/media/platform/exynos-gsc/Makefile b/drivers/media/platform/exynos-gsc/Makefile
new file mode 100644
index 00000000000..6d1411c6d49
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/Makefile
@@ -0,0 +1,3 @@
+exynos-gsc-objs := gsc-core.o gsc-m2m.o gsc-regs.o
+
+obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC) += exynos-gsc.o
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
new file mode 100644
index 00000000000..9d0cc04d7ab
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -0,0 +1,1266 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung EXYNOS5 SoC series G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <media/v4l2-ioctl.h>
+
+#include "gsc-core.h"
+
+#define GSC_CLOCK_GATE_NAME "gscl"
+
+static const struct gsc_fmt gsc_formats[] = {
+ {
+ .name = "RGB565",
+ .pixelformat = V4L2_PIX_FMT_RGB565X,
+ .depth = { 16 },
+ .color = GSC_RGB,
+ .num_planes = 1,
+ .num_comp = 1,
+ }, {
+ .name = "XRGB-8-8-8-8, 32 bpp",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .depth = { 32 },
+ .color = GSC_RGB,
+ .num_planes = 1,
+ .num_comp = 1,
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 1,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_C,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 1,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .pixelformat = V4L2_PIX_FMT_VYUY,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_C,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 1,
+ .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .pixelformat = V4L2_PIX_FMT_YVYU,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 1,
+ .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ }, {
+ .name = "YUV 4:4:4 planar, YCbYCr",
+ .pixelformat = V4L2_PIX_FMT_YUV32,
+ .depth = { 32 },
+ .color = GSC_YUV444,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 1,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 3,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV61,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .depth = { 12 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 3,
+ }, {
+ .name = "YUV 4:2:0 planar, YCrCb",
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .depth = { 12 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 3,
+
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .depth = { 12 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .depth = { 12 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV12M,
+ .depth = { 8, 4 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 2,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV420M,
+ .depth = { 8, 2, 2 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 3,
+ .num_comp = 3,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 3p, Y/Cr/Cb",
+ .pixelformat = V4L2_PIX_FMT_YVU420M,
+ .depth = { 8, 2, 2 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 3,
+ .num_comp = 3,
+ }, {
+ .name = "YUV 4:2:0 n.c. 2p, Y/CbCr tiled",
+ .pixelformat = V4L2_PIX_FMT_NV12MT_16X16,
+ .depth = { 8, 4 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 2,
+ .num_comp = 2,
+ }
+};
+
+const struct gsc_fmt *get_format(int index)
+{
+ if (index >= ARRAY_SIZE(gsc_formats))
+ return NULL;
+
+ return (struct gsc_fmt *)&gsc_formats[index];
+}
+
+const struct gsc_fmt *find_fmt(u32 *pixelformat, u32 *mbus_code, u32 index)
+{
+ const struct gsc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+
+ if (index >= ARRAY_SIZE(gsc_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(gsc_formats); ++i) {
+ fmt = get_format(i);
+ if (pixelformat && fmt->pixelformat == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == i)
+ def_fmt = fmt;
+ }
+ return def_fmt;
+
+}
+
+void gsc_set_frame_size(struct gsc_frame *frame, int width, int height)
+{
+ frame->f_width = width;
+ frame->f_height = height;
+ frame->crop.width = width;
+ frame->crop.height = height;
+ frame->crop.left = 0;
+ frame->crop.top = 0;
+}
+
+int gsc_cal_prescaler_ratio(struct gsc_variant *var, u32 src, u32 dst,
+ u32 *ratio)
+{
+ if ((dst > src) || (dst >= src / var->poly_sc_down_max)) {
+ *ratio = 1;
+ return 0;
+ }
+
+ if ((src / var->poly_sc_down_max / var->pre_sc_down_max) > dst) {
+ pr_err("Exceeded maximum downscaling ratio (1/16))");
+ return -EINVAL;
+ }
+
+ *ratio = (dst > (src / 8)) ? 2 : 4;
+
+ return 0;
+}
+
+void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *sh)
+{
+ if (hratio == 4 && vratio == 4)
+ *sh = 4;
+ else if ((hratio == 4 && vratio == 2) ||
+ (hratio == 2 && vratio == 4))
+ *sh = 3;
+ else if ((hratio == 4 && vratio == 1) ||
+ (hratio == 1 && vratio == 4) ||
+ (hratio == 2 && vratio == 2))
+ *sh = 2;
+ else if (hratio == 1 && vratio == 1)
+ *sh = 0;
+ else
+ *sh = 1;
+}
+
+void gsc_check_src_scale_info(struct gsc_variant *var,
+ struct gsc_frame *s_frame, u32 *wratio,
+ u32 tx, u32 ty, u32 *hratio)
+{
+ int remainder = 0, walign, halign;
+
+ if (is_yuv420(s_frame->fmt->color)) {
+ walign = GSC_SC_ALIGN_4;
+ halign = GSC_SC_ALIGN_4;
+ } else if (is_yuv422(s_frame->fmt->color)) {
+ walign = GSC_SC_ALIGN_4;
+ halign = GSC_SC_ALIGN_2;
+ } else {
+ walign = GSC_SC_ALIGN_2;
+ halign = GSC_SC_ALIGN_2;
+ }
+
+ remainder = s_frame->crop.width % (*wratio * walign);
+ if (remainder) {
+ s_frame->crop.width -= remainder;
+ gsc_cal_prescaler_ratio(var, s_frame->crop.width, tx, wratio);
+ pr_info("cropped src width size is recalculated from %d to %d",
+ s_frame->crop.width + remainder, s_frame->crop.width);
+ }
+
+ remainder = s_frame->crop.height % (*hratio * halign);
+ if (remainder) {
+ s_frame->crop.height -= remainder;
+ gsc_cal_prescaler_ratio(var, s_frame->crop.height, ty, hratio);
+ pr_info("cropped src height size is recalculated from %d to %d",
+ s_frame->crop.height + remainder, s_frame->crop.height);
+ }
+}
+
+int gsc_enum_fmt_mplane(struct v4l2_fmtdesc *f)
+{
+ const struct gsc_fmt *fmt;
+
+ fmt = find_fmt(NULL, NULL, f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->pixelformat;
+
+ return 0;
+}
+
+static u32 get_plane_info(struct gsc_frame *frm, u32 addr, u32 *index)
+{
+ if (frm->addr.y == addr) {
+ *index = 0;
+ return frm->addr.y;
+ } else if (frm->addr.cb == addr) {
+ *index = 1;
+ return frm->addr.cb;
+ } else if (frm->addr.cr == addr) {
+ *index = 2;
+ return frm->addr.cr;
+ } else {
+ pr_err("Plane address is wrong");
+ return -EINVAL;
+ }
+}
+
+void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm)
+{
+ u32 f_chk_addr, f_chk_len, s_chk_addr, s_chk_len;
+ f_chk_addr = f_chk_len = s_chk_addr = s_chk_len = 0;
+
+ f_chk_addr = frm->addr.y;
+ f_chk_len = frm->payload[0];
+ if (frm->fmt->num_planes == 2) {
+ s_chk_addr = frm->addr.cb;
+ s_chk_len = frm->payload[1];
+ } else if (frm->fmt->num_planes == 3) {
+ u32 low_addr, low_plane, mid_addr, mid_plane;
+ u32 high_addr, high_plane;
+ u32 t_min, t_max;
+
+ t_min = min3(frm->addr.y, frm->addr.cb, frm->addr.cr);
+ low_addr = get_plane_info(frm, t_min, &low_plane);
+ t_max = max3(frm->addr.y, frm->addr.cb, frm->addr.cr);
+ high_addr = get_plane_info(frm, t_max, &high_plane);
+
+ mid_plane = 3 - (low_plane + high_plane);
+ if (mid_plane == 0)
+ mid_addr = frm->addr.y;
+ else if (mid_plane == 1)
+ mid_addr = frm->addr.cb;
+ else if (mid_plane == 2)
+ mid_addr = frm->addr.cr;
+ else
+ return;
+
+ f_chk_addr = low_addr;
+ if (mid_addr + frm->payload[mid_plane] - low_addr >
+ high_addr + frm->payload[high_plane] - mid_addr) {
+ f_chk_len = frm->payload[low_plane];
+ s_chk_addr = mid_addr;
+ s_chk_len = high_addr +
+ frm->payload[high_plane] - mid_addr;
+ } else {
+ f_chk_len = mid_addr +
+ frm->payload[mid_plane] - low_addr;
+ s_chk_addr = high_addr;
+ s_chk_len = frm->payload[high_plane];
+ }
+ }
+ pr_debug("f_addr = 0x%08x, f_len = %d, s_addr = 0x%08x, s_len = %d\n",
+ f_chk_addr, f_chk_len, s_chk_addr, s_chk_len);
+}
+
+int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
+{
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ struct gsc_variant *variant = gsc->variant;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct gsc_fmt *fmt;
+ u32 max_w, max_h, mod_x, mod_y;
+ u32 min_w, min_h, tmp_w, tmp_h;
+ int i;
+
+ pr_debug("user put w: %d, h: %d", pix_mp->width, pix_mp->height);
+
+ fmt = find_fmt(&pix_mp->pixelformat, NULL, 0);
+ if (!fmt) {
+ pr_err("pixelformat format (0x%X) invalid\n",
+ pix_mp->pixelformat);
+ return -EINVAL;
+ }
+
+ if (pix_mp->field == V4L2_FIELD_ANY)
+ pix_mp->field = V4L2_FIELD_NONE;
+ else if (pix_mp->field != V4L2_FIELD_NONE) {
+ pr_err("Not supported field order(%d)\n", pix_mp->field);
+ return -EINVAL;
+ }
+
+ max_w = variant->pix_max->target_rot_dis_w;
+ max_h = variant->pix_max->target_rot_dis_h;
+
+ mod_x = ffs(variant->pix_align->org_w) - 1;
+ if (is_yuv420(fmt->color))
+ mod_y = ffs(variant->pix_align->org_h) - 1;
+ else
+ mod_y = ffs(variant->pix_align->org_h) - 2;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ min_w = variant->pix_min->org_w;
+ min_h = variant->pix_min->org_h;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ }
+
+ pr_debug("mod_x: %d, mod_y: %d, max_w: %d, max_h = %d",
+ mod_x, mod_y, max_w, max_h);
+
+ /* To check if image size is modified to adjust parameter against
+ hardware abilities */
+ tmp_w = pix_mp->width;
+ tmp_h = pix_mp->height;
+
+ v4l_bound_align_image(&pix_mp->width, min_w, max_w, mod_x,
+ &pix_mp->height, min_h, max_h, mod_y, 0);
+ if (tmp_w != pix_mp->width || tmp_h != pix_mp->height)
+ pr_info("Image size has been modified from %dx%d to %dx%d",
+ tmp_w, tmp_h, pix_mp->width, pix_mp->height);
+
+ pix_mp->num_planes = fmt->num_planes;
+
+ if (pix_mp->width >= 1280) /* HD */
+ pix_mp->colorspace = V4L2_COLORSPACE_REC709;
+ else /* SD */
+ pix_mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ int bpl = (pix_mp->width * fmt->depth[i]) >> 3;
+ pix_mp->plane_fmt[i].bytesperline = bpl;
+ pix_mp->plane_fmt[i].sizeimage = bpl * pix_mp->height;
+
+ pr_debug("[%d]: bpl: %d, sizeimage: %d",
+ i, bpl, pix_mp->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+int gsc_g_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
+{
+ struct gsc_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+ int i;
+
+ frame = ctx_get_frame(ctx, f->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ pix_mp = &f->fmt.pix_mp;
+
+ pix_mp->width = frame->f_width;
+ pix_mp->height = frame->f_height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = frame->fmt->pixelformat;
+ pix_mp->colorspace = V4L2_COLORSPACE_REC709;
+ pix_mp->num_planes = frame->fmt->num_planes;
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ pix_mp->plane_fmt[i].bytesperline = (frame->f_width *
+ frame->fmt->depth[i]) / 8;
+ pix_mp->plane_fmt[i].sizeimage =
+ pix_mp->plane_fmt[i].bytesperline * frame->f_height;
+ }
+
+ return 0;
+}
+
+void gsc_check_crop_change(u32 tmp_w, u32 tmp_h, u32 *w, u32 *h)
+{
+ if (tmp_w != *w || tmp_h != *h) {
+ pr_info("Cropped size has been modified from %dx%d to %dx%d",
+ *w, *h, tmp_w, tmp_h);
+ *w = tmp_w;
+ *h = tmp_h;
+ }
+}
+
+int gsc_g_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
+{
+ struct gsc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->c = frame->crop;
+
+ return 0;
+}
+
+int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
+{
+ struct gsc_frame *f;
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ struct gsc_variant *variant = gsc->variant;
+ u32 mod_x = 0, mod_y = 0, tmp_w, tmp_h;
+ u32 min_w, min_h, max_w, max_h;
+
+ if (cr->c.top < 0 || cr->c.left < 0) {
+ pr_err("doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+ pr_debug("user put w: %d, h: %d", cr->c.width, cr->c.height);
+
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ f = &ctx->d_frame;
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ f = &ctx->s_frame;
+ else
+ return -EINVAL;
+
+ max_w = f->f_width;
+ max_h = f->f_height;
+ tmp_w = cr->c.width;
+ tmp_h = cr->c.height;
+
+ if (V4L2_TYPE_IS_OUTPUT(cr->type)) {
+ if ((is_yuv422(f->fmt->color) && f->fmt->num_comp == 1) ||
+ is_rgb(f->fmt->color))
+ min_w = 32;
+ else
+ min_w = 64;
+ if ((is_yuv422(f->fmt->color) && f->fmt->num_comp == 3) ||
+ is_yuv420(f->fmt->color))
+ min_h = 32;
+ else
+ min_h = 16;
+ } else {
+ if (is_yuv420(f->fmt->color) || is_yuv422(f->fmt->color))
+ mod_x = ffs(variant->pix_align->target_w) - 1;
+ if (is_yuv420(f->fmt->color))
+ mod_y = ffs(variant->pix_align->target_h) - 1;
+ if (ctx->gsc_ctrls.rotate->val == 90 ||
+ ctx->gsc_ctrls.rotate->val == 270) {
+ max_w = f->f_height;
+ max_h = f->f_width;
+ min_w = variant->pix_min->target_rot_en_w;
+ min_h = variant->pix_min->target_rot_en_h;
+ tmp_w = cr->c.height;
+ tmp_h = cr->c.width;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ }
+ }
+ pr_debug("mod_x: %d, mod_y: %d, min_w: %d, min_h = %d",
+ mod_x, mod_y, min_w, min_h);
+ pr_debug("tmp_w : %d, tmp_h : %d", tmp_w, tmp_h);
+
+ v4l_bound_align_image(&tmp_w, min_w, max_w, mod_x,
+ &tmp_h, min_h, max_h, mod_y, 0);
+
+ if (!V4L2_TYPE_IS_OUTPUT(cr->type) &&
+ (ctx->gsc_ctrls.rotate->val == 90 ||
+ ctx->gsc_ctrls.rotate->val == 270))
+ gsc_check_crop_change(tmp_h, tmp_w,
+ &cr->c.width, &cr->c.height);
+ else
+ gsc_check_crop_change(tmp_w, tmp_h,
+ &cr->c.width, &cr->c.height);
+
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ /* Need to add code to algin left value with 2's multiple */
+ if (cr->c.left + tmp_w > max_w)
+ cr->c.left = max_w - tmp_w;
+ if (cr->c.top + tmp_h > max_h)
+ cr->c.top = max_h - tmp_h;
+
+ if ((is_yuv420(f->fmt->color) || is_yuv422(f->fmt->color)) &&
+ cr->c.left & 1)
+ cr->c.left -= 1;
+
+ pr_debug("Aligned l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
+ cr->c.left, cr->c.top, cr->c.width, cr->c.height, max_w, max_h);
+
+ return 0;
+}
+
+int gsc_check_scaler_ratio(struct gsc_variant *var, int sw, int sh, int dw,
+ int dh, int rot, int out_path)
+{
+ int tmp_w, tmp_h, sc_down_max;
+
+ if (out_path == GSC_DMA)
+ sc_down_max = var->sc_down_max;
+ else
+ sc_down_max = var->local_sc_down;
+
+ if (rot == 90 || rot == 270) {
+ tmp_w = dh;
+ tmp_h = dw;
+ } else {
+ tmp_w = dw;
+ tmp_h = dh;
+ }
+
+ if ((sw / tmp_w) > sc_down_max ||
+ (sh / tmp_h) > sc_down_max ||
+ (tmp_w / sw) > var->sc_up_max ||
+ (tmp_h / sh) > var->sc_up_max)
+ return -EINVAL;
+
+ return 0;
+}
+
+int gsc_set_scaler_info(struct gsc_ctx *ctx)
+{
+ struct gsc_scaler *sc = &ctx->scaler;
+ struct gsc_frame *s_frame = &ctx->s_frame;
+ struct gsc_frame *d_frame = &ctx->d_frame;
+ struct gsc_variant *variant = ctx->gsc_dev->variant;
+ struct device *dev = &ctx->gsc_dev->pdev->dev;
+ int tx, ty;
+ int ret;
+
+ ret = gsc_check_scaler_ratio(variant, s_frame->crop.width,
+ s_frame->crop.height, d_frame->crop.width, d_frame->crop.height,
+ ctx->gsc_ctrls.rotate->val, ctx->out_path);
+ if (ret) {
+ pr_err("out of scaler range");
+ return ret;
+ }
+
+ if (ctx->gsc_ctrls.rotate->val == 90 ||
+ ctx->gsc_ctrls.rotate->val == 270) {
+ ty = d_frame->crop.width;
+ tx = d_frame->crop.height;
+ } else {
+ tx = d_frame->crop.width;
+ ty = d_frame->crop.height;
+ }
+
+ if (tx <= 0 || ty <= 0) {
+ dev_err(dev, "Invalid target size: %dx%d", tx, ty);
+ return -EINVAL;
+ }
+
+ ret = gsc_cal_prescaler_ratio(variant, s_frame->crop.width,
+ tx, &sc->pre_hratio);
+ if (ret) {
+ pr_err("Horizontal scale ratio is out of range");
+ return ret;
+ }
+
+ ret = gsc_cal_prescaler_ratio(variant, s_frame->crop.height,
+ ty, &sc->pre_vratio);
+ if (ret) {
+ pr_err("Vertical scale ratio is out of range");
+ return ret;
+ }
+
+ gsc_check_src_scale_info(variant, s_frame, &sc->pre_hratio,
+ tx, ty, &sc->pre_vratio);
+
+ gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+ &sc->pre_shfactor);
+
+ sc->main_hratio = (s_frame->crop.width << 16) / tx;
+ sc->main_vratio = (s_frame->crop.height << 16) / ty;
+
+ pr_debug("scaler input/output size : sx = %d, sy = %d, tx = %d, ty = %d",
+ s_frame->crop.width, s_frame->crop.height, tx, ty);
+ pr_debug("scaler ratio info : pre_shfactor : %d, pre_h : %d",
+ sc->pre_shfactor, sc->pre_hratio);
+ pr_debug("pre_v :%d, main_h : %d, main_v : %d",
+ sc->pre_vratio, sc->main_hratio, sc->main_vratio);
+
+ return 0;
+}
+
+static int __gsc_s_ctrl(struct gsc_ctx *ctx, struct v4l2_ctrl *ctrl)
+{
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ struct gsc_variant *variant = gsc->variant;
+ unsigned int flags = GSC_DST_FMT | GSC_SRC_FMT;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+
+ case V4L2_CID_ROTATE:
+ if ((ctx->state & flags) == flags) {
+ ret = gsc_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height,
+ ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->gsc_ctrls.rotate->val,
+ ctx->out_path);
+
+ if (ret)
+ return -EINVAL;
+ }
+
+ ctx->rotation = ctrl->val;
+ break;
+
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->d_frame.alpha = ctrl->val;
+ break;
+ }
+
+ ctx->state |= GSC_PARAMS;
+ return 0;
+}
+
+static int gsc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gsc_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctx->gsc_dev->slock, flags);
+ ret = __gsc_s_ctrl(ctx, ctrl);
+ spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops gsc_ctrl_ops = {
+ .s_ctrl = gsc_s_ctrl,
+};
+
+int gsc_ctrls_create(struct gsc_ctx *ctx)
+{
+ if (ctx->ctrls_rdy) {
+ pr_err("Control handler of this context was created already");
+ return 0;
+ }
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, GSC_MAX_CTRL_NUM);
+
+ ctx->gsc_ctrls.rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &gsc_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
+ ctx->gsc_ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &gsc_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctx->gsc_ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &gsc_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ ctx->gsc_ctrls.global_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &gsc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0);
+
+ ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ pr_err("Failed to create G-Scaler control handlers");
+ return err;
+ }
+
+ return 0;
+}
+
+void gsc_ctrls_delete(struct gsc_ctx *ctx)
+{
+ if (ctx->ctrls_rdy) {
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ ctx->ctrls_rdy = false;
+ }
+}
+
+/* The color format (num_comp, num_planes) must be already configured. */
+int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
+ struct gsc_frame *frame, struct gsc_addr *addr)
+{
+ int ret = 0;
+ u32 pix_size;
+
+ if ((vb == NULL) || (frame == NULL))
+ return -EINVAL;
+
+ pix_size = frame->f_width * frame->f_height;
+
+ pr_debug("num_planes= %d, num_comp= %d, pix_size= %d",
+ frame->fmt->num_planes, frame->fmt->num_comp, pix_size);
+
+ addr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (frame->fmt->num_planes == 1) {
+ switch (frame->fmt->num_comp) {
+ case 1:
+ addr->cb = 0;
+ addr->cr = 0;
+ break;
+ case 2:
+ /* decompose Y into Y/Cb */
+ addr->cb = (dma_addr_t)(addr->y + pix_size);
+ addr->cr = 0;
+ break;
+ case 3:
+ /* decompose Y into Y/Cb/Cr */
+ addr->cb = (dma_addr_t)(addr->y + pix_size);
+ if (GSC_YUV420 == frame->fmt->color)
+ addr->cr = (dma_addr_t)(addr->cb
+ + (pix_size >> 2));
+ else /* 422 */
+ addr->cr = (dma_addr_t)(addr->cb
+ + (pix_size >> 1));
+ break;
+ default:
+ pr_err("Invalid the number of color planes");
+ return -EINVAL;
+ }
+ } else {
+ if (frame->fmt->num_planes >= 2)
+ addr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
+
+ if (frame->fmt->num_planes == 3)
+ addr->cr = vb2_dma_contig_plane_dma_addr(vb, 2);
+ }
+
+ if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
+ (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
+ (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
+ (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
+ (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
+ (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
+ swap(addr->cb, addr->cr);
+
+ pr_debug("ADDR: y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
+ addr->y, addr->cb, addr->cr, ret);
+
+ return ret;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *priv)
+{
+ struct gsc_dev *gsc = priv;
+ struct gsc_ctx *ctx;
+ int gsc_irq;
+
+ gsc_irq = gsc_hw_get_irq_status(gsc);
+ gsc_hw_clear_irq(gsc, gsc_irq);
+
+ if (gsc_irq == GSC_IRQ_OVERRUN) {
+ pr_err("Local path input over-run interrupt has occurred!\n");
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&gsc->slock);
+
+ if (test_and_clear_bit(ST_M2M_PEND, &gsc->state)) {
+
+ gsc_hw_enable_control(gsc, false);
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDING, &gsc->state)) {
+ set_bit(ST_M2M_SUSPENDED, &gsc->state);
+ wake_up(&gsc->irq_queue);
+ goto isr_unlock;
+ }
+ ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
+
+ if (!ctx || !ctx->m2m_ctx)
+ goto isr_unlock;
+
+ spin_unlock(&gsc->slock);
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);
+
+ /* wake_up job_abort, stop_streaming */
+ if (ctx->state & GSC_CTX_STOP_REQ) {
+ ctx->state &= ~GSC_CTX_STOP_REQ;
+ wake_up(&gsc->irq_queue);
+ }
+ return IRQ_HANDLED;
+ }
+
+isr_unlock:
+ spin_unlock(&gsc->slock);
+ return IRQ_HANDLED;
+}
+
+static struct gsc_pix_max gsc_v_100_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2047,
+ .real_rot_en_h = 2047,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_min gsc_v_100_min = {
+ .org_w = 64,
+ .org_h = 32,
+ .real_w = 64,
+ .real_h = 32,
+ .target_rot_dis_w = 64,
+ .target_rot_dis_h = 32,
+ .target_rot_en_w = 32,
+ .target_rot_en_h = 16,
+};
+
+static struct gsc_pix_align gsc_v_100_align = {
+ .org_h = 16,
+ .org_w = 16, /* yuv420 : 16, others : 8 */
+ .offset_h = 2, /* yuv420/422 : 2, others : 1 */
+ .real_w = 16, /* yuv420/422 : 4~16, others : 2~8 */
+ .real_h = 16, /* yuv420 : 4~16, others : 1 */
+ .target_w = 2, /* yuv420/422 : 2, others : 1 */
+ .target_h = 2, /* yuv420 : 2, others : 1 */
+};
+
+static struct gsc_variant gsc_v_100_variant = {
+ .pix_max = &gsc_v_100_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_driverdata gsc_v_100_drvdata = {
+ .variant = {
+ [0] = &gsc_v_100_variant,
+ [1] = &gsc_v_100_variant,
+ [2] = &gsc_v_100_variant,
+ [3] = &gsc_v_100_variant,
+ },
+ .num_entities = 4,
+ .lclk_frequency = 266000000UL,
+};
+
+static struct platform_device_id gsc_driver_ids[] = {
+ {
+ .name = "exynos-gsc",
+ .driver_data = (unsigned long)&gsc_v_100_drvdata,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, gsc_driver_ids);
+
+static const struct of_device_id exynos_gsc_match[] = {
+ {
+ .compatible = "samsung,exynos5-gsc",
+ .data = &gsc_v_100_drvdata,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_gsc_match);
+
+static void *gsc_get_drv_data(struct platform_device *pdev)
+{
+ struct gsc_driverdata *driver_data = NULL;
+
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(exynos_gsc_match,
+ pdev->dev.of_node);
+ if (match)
+ driver_data = (struct gsc_driverdata *)match->data;
+ } else {
+ driver_data = (struct gsc_driverdata *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+
+ return driver_data;
+}
+
+static void gsc_clk_put(struct gsc_dev *gsc)
+{
+ if (!IS_ERR(gsc->clock))
+ clk_unprepare(gsc->clock);
+}
+
+static int gsc_clk_get(struct gsc_dev *gsc)
+{
+ int ret;
+
+ dev_dbg(&gsc->pdev->dev, "gsc_clk_get Called\n");
+
+ gsc->clock = devm_clk_get(&gsc->pdev->dev, GSC_CLOCK_GATE_NAME);
+ if (IS_ERR(gsc->clock)) {
+ dev_err(&gsc->pdev->dev, "failed to get clock~~~: %s\n",
+ GSC_CLOCK_GATE_NAME);
+ return PTR_ERR(gsc->clock);
+ }
+
+ ret = clk_prepare(gsc->clock);
+ if (ret < 0) {
+ dev_err(&gsc->pdev->dev, "clock prepare failed for clock: %s\n",
+ GSC_CLOCK_GATE_NAME);
+ gsc->clock = ERR_PTR(-EINVAL);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gsc_m2m_suspend(struct gsc_dev *gsc)
+{
+ unsigned long flags;
+ int timeout;
+
+ spin_lock_irqsave(&gsc->slock, flags);
+ if (!gsc_m2m_pending(gsc)) {
+ spin_unlock_irqrestore(&gsc->slock, flags);
+ return 0;
+ }
+ clear_bit(ST_M2M_SUSPENDED, &gsc->state);
+ set_bit(ST_M2M_SUSPENDING, &gsc->state);
+ spin_unlock_irqrestore(&gsc->slock, flags);
+
+ timeout = wait_event_timeout(gsc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &gsc->state),
+ GSC_SHUTDOWN_TIMEOUT);
+
+ clear_bit(ST_M2M_SUSPENDING, &gsc->state);
+ return timeout == 0 ? -EAGAIN : 0;
+}
+
+static int gsc_m2m_resume(struct gsc_dev *gsc)
+{
+ struct gsc_ctx *ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsc->slock, flags);
+ /* Clear for full H/W setup in first run after resume */
+ ctx = gsc->m2m.ctx;
+ gsc->m2m.ctx = NULL;
+ spin_unlock_irqrestore(&gsc->slock, flags);
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+ return 0;
+}
+
+static int gsc_probe(struct platform_device *pdev)
+{
+ struct gsc_dev *gsc;
+ struct resource *res;
+ struct gsc_driverdata *drv_data = gsc_get_drv_data(pdev);
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ gsc = devm_kzalloc(dev, sizeof(struct gsc_dev), GFP_KERNEL);
+ if (!gsc)
+ return -ENOMEM;
+
+ if (dev->of_node)
+ gsc->id = of_alias_get_id(pdev->dev.of_node, "gsc");
+ else
+ gsc->id = pdev->id;
+
+ if (gsc->id < 0 || gsc->id >= drv_data->num_entities) {
+ dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
+ return -EINVAL;
+ }
+
+ gsc->variant = drv_data->variant[gsc->id];
+ gsc->pdev = pdev;
+ gsc->pdata = dev->platform_data;
+
+ init_waitqueue_head(&gsc->irq_queue);
+ spin_lock_init(&gsc->slock);
+ mutex_init(&gsc->lock);
+ gsc->clock = ERR_PTR(-EINVAL);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gsc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gsc->regs))
+ return PTR_ERR(gsc->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to get IRQ resource\n");
+ return -ENXIO;
+ }
+
+ ret = gsc_clk_get(gsc);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, res->start, gsc_irq_handler,
+ 0, pdev->name, gsc);
+ if (ret) {
+ dev_err(dev, "failed to install irq (%d)\n", ret);
+ goto err_clk;
+ }
+
+ ret = v4l2_device_register(dev, &gsc->v4l2_dev);
+ if (ret)
+ goto err_clk;
+
+ ret = gsc_register_m2m_device(gsc);
+ if (ret)
+ goto err_v4l2;
+
+ platform_set_drvdata(pdev, gsc);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_m2m;
+
+ /* Initialize continious memory allocator */
+ gsc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
+ if (IS_ERR(gsc->alloc_ctx)) {
+ ret = PTR_ERR(gsc->alloc_ctx);
+ goto err_pm;
+ }
+
+ dev_dbg(dev, "gsc-%d registered successfully\n", gsc->id);
+
+ pm_runtime_put(dev);
+ return 0;
+err_pm:
+ pm_runtime_put(dev);
+err_m2m:
+ gsc_unregister_m2m_device(gsc);
+err_v4l2:
+ v4l2_device_unregister(&gsc->v4l2_dev);
+err_clk:
+ gsc_clk_put(gsc);
+ return ret;
+}
+
+static int gsc_remove(struct platform_device *pdev)
+{
+ struct gsc_dev *gsc = platform_get_drvdata(pdev);
+
+ gsc_unregister_m2m_device(gsc);
+ v4l2_device_unregister(&gsc->v4l2_dev);
+
+ vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
+ pm_runtime_disable(&pdev->dev);
+ gsc_clk_put(gsc);
+
+ dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+ return 0;
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+
+ ret = clk_enable(gsc->clock);
+ if (ret)
+ return ret;
+
+ gsc_hw_set_sw_reset(gsc);
+ gsc_wait_reset(gsc);
+
+ return gsc_m2m_resume(gsc);
+}
+
+static int gsc_runtime_suspend(struct device *dev)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = gsc_m2m_suspend(gsc);
+ if (!ret)
+ clk_disable(gsc->clock);
+
+ pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+ return ret;
+}
+
+static int gsc_resume(struct device *dev)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+
+ /* Do not resume if the device was idle before system suspend */
+ spin_lock_irqsave(&gsc->slock, flags);
+ if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
+ !gsc_m2m_opened(gsc)) {
+ spin_unlock_irqrestore(&gsc->slock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&gsc->slock, flags);
+
+ if (!pm_runtime_suspended(dev))
+ return gsc_runtime_resume(dev);
+
+ return 0;
+}
+
+static int gsc_suspend(struct device *dev)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(dev);
+
+ pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+
+ if (test_and_set_bit(ST_SUSPEND, &gsc->state))
+ return 0;
+
+ if (!pm_runtime_suspended(dev))
+ return gsc_runtime_suspend(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops gsc_pm_ops = {
+ .suspend = gsc_suspend,
+ .resume = gsc_resume,
+ .runtime_suspend = gsc_runtime_suspend,
+ .runtime_resume = gsc_runtime_resume,
+};
+
+static struct platform_driver gsc_driver = {
+ .probe = gsc_probe,
+ .remove = gsc_remove,
+ .id_table = gsc_driver_ids,
+ .driver = {
+ .name = GSC_MODULE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &gsc_pm_ops,
+ .of_match_table = exynos_gsc_match,
+ }
+};
+
+module_platform_driver(gsc_driver);
+
+MODULE_AUTHOR("Hyunwong Kim <khw0178.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung EXYNOS5 Soc series G-Scaler driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
new file mode 100644
index 00000000000..ef0a6564cef
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -0,0 +1,534 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * header file for Samsung EXYNOS5 SoC series G-Scaler driver
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef GSC_CORE_H_
+#define GSC_CORE_H_
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "gsc-regs.h"
+
+#define CONFIG_VB2_GSC_DMA_CONTIG 1
+#define GSC_MODULE_NAME "exynos-gsc"
+
+#define GSC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
+#define GSC_MAX_DEVS 4
+#define GSC_M2M_BUF_NUM 0
+#define GSC_MAX_CTRL_NUM 10
+#define GSC_SC_ALIGN_4 4
+#define GSC_SC_ALIGN_2 2
+#define DEFAULT_CSC_EQ 1
+#define DEFAULT_CSC_RANGE 1
+
+#define GSC_PARAMS (1 << 0)
+#define GSC_SRC_FMT (1 << 1)
+#define GSC_DST_FMT (1 << 2)
+#define GSC_CTX_M2M (1 << 3)
+#define GSC_CTX_STOP_REQ (1 << 6)
+#define GSC_CTX_ABORT (1 << 7)
+
+enum gsc_dev_flags {
+ /* for global */
+ ST_SUSPEND,
+
+ /* for m2m node */
+ ST_M2M_OPEN,
+ ST_M2M_RUN,
+ ST_M2M_PEND,
+ ST_M2M_SUSPENDED,
+ ST_M2M_SUSPENDING,
+};
+
+enum gsc_irq {
+ GSC_IRQ_DONE,
+ GSC_IRQ_OVERRUN
+};
+
+/**
+ * enum gsc_datapath - the path of data used for G-Scaler
+ * @GSC_CAMERA: from camera
+ * @GSC_DMA: from/to DMA
+ * @GSC_LOCAL: to local path
+ * @GSC_WRITEBACK: from FIMD
+ */
+enum gsc_datapath {
+ GSC_CAMERA = 0x1,
+ GSC_DMA,
+ GSC_MIXER,
+ GSC_FIMD,
+ GSC_WRITEBACK,
+};
+
+enum gsc_color_fmt {
+ GSC_RGB = 0x1,
+ GSC_YUV420 = 0x2,
+ GSC_YUV422 = 0x4,
+ GSC_YUV444 = 0x8,
+};
+
+enum gsc_yuv_fmt {
+ GSC_LSB_Y = 0x10,
+ GSC_LSB_C,
+ GSC_CBCR = 0x20,
+ GSC_CRCB,
+};
+
+#define fh_to_ctx(__fh) container_of(__fh, struct gsc_ctx, fh)
+#define is_rgb(x) (!!((x) & 0x1))
+#define is_yuv420(x) (!!((x) & 0x2))
+#define is_yuv422(x) (!!((x) & 0x4))
+
+#define gsc_m2m_active(dev) test_bit(ST_M2M_RUN, &(dev)->state)
+#define gsc_m2m_pending(dev) test_bit(ST_M2M_PEND, &(dev)->state)
+#define gsc_m2m_opened(dev) test_bit(ST_M2M_OPEN, &(dev)->state)
+
+#define ctrl_to_ctx(__ctrl) \
+ container_of((__ctrl)->handler, struct gsc_ctx, ctrl_handler)
+/**
+ * struct gsc_fmt - the driver's internal color format data
+ * @mbus_code: Media Bus pixel code, -1 if not applicable
+ * @name: format description
+ * @pixelformat: the fourcc code for this format, 0 if not applicable
+ * @yorder: Y/C order
+ * @corder: Chrominance order control
+ * @num_planes: number of physically non-contiguous data planes
+ * @nr_comp: number of physically contiguous data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @flags: flags indicating which operation mode format applies to
+ */
+struct gsc_fmt {
+ enum v4l2_mbus_pixelcode mbus_code;
+ char *name;
+ u32 pixelformat;
+ u32 color;
+ u32 yorder;
+ u32 corder;
+ u16 num_planes;
+ u16 num_comp;
+ u8 depth[VIDEO_MAX_PLANES];
+ u32 flags;
+};
+
+/**
+ * struct gsc_input_buf - the driver's video buffer
+ * @vb: videobuf2 buffer
+ * @list : linked list structure for buffer queue
+ * @idx : index of G-Scaler input buffer
+ */
+struct gsc_input_buf {
+ struct vb2_buffer vb;
+ struct list_head list;
+ int idx;
+};
+
+/**
+ * struct gsc_addr - the G-Scaler physical address set
+ * @y: luminance plane address
+ * @cb: Cb plane address
+ * @cr: Cr plane address
+ */
+struct gsc_addr {
+ dma_addr_t y;
+ dma_addr_t cb;
+ dma_addr_t cr;
+};
+
+/* struct gsc_ctrls - the G-Scaler control set
+ * @rotate: rotation degree
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @global_alpha: the alpha value of current frame
+ */
+struct gsc_ctrls {
+ struct v4l2_ctrl *rotate;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *global_alpha;
+};
+
+/**
+ * struct gsc_scaler - the configuration data for G-Scaler inetrnal scaler
+ * @pre_shfactor: pre sclaer shift factor
+ * @pre_hratio: horizontal ratio of the prescaler
+ * @pre_vratio: vertical ratio of the prescaler
+ * @main_hratio: the main scaler's horizontal ratio
+ * @main_vratio: the main scaler's vertical ratio
+ */
+struct gsc_scaler {
+ u32 pre_shfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ u32 main_hratio;
+ u32 main_vratio;
+};
+
+struct gsc_dev;
+
+struct gsc_ctx;
+
+/**
+ * struct gsc_frame - source/target frame properties
+ * @f_width: SRC : SRCIMG_WIDTH, DST : OUTPUTDMA_WHOLE_IMG_WIDTH
+ * @f_height: SRC : SRCIMG_HEIGHT, DST : OUTPUTDMA_WHOLE_IMG_HEIGHT
+ * @crop: cropped(source)/scaled(destination) size
+ * @payload: image size in bytes (w x h x bpp)
+ * @addr: image frame buffer physical addresses
+ * @fmt: G-Scaler color format pointer
+ * @colorspace: value indicating v4l2_colorspace
+ * @alpha: frame's alpha value
+ */
+struct gsc_frame {
+ u32 f_width;
+ u32 f_height;
+ struct v4l2_rect crop;
+ unsigned long payload[VIDEO_MAX_PLANES];
+ struct gsc_addr addr;
+ const struct gsc_fmt *fmt;
+ u32 colorspace;
+ u8 alpha;
+};
+
+/**
+ * struct gsc_m2m_device - v4l2 memory-to-memory device data
+ * @vfd: the video device node for v4l2 m2m mode
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @ctx: hardware context data
+ * @refcnt: the reference counter
+ */
+struct gsc_m2m_device {
+ struct video_device *vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct gsc_ctx *ctx;
+ int refcnt;
+};
+
+/**
+ * struct gsc_pix_max - image pixel size limits in various IP configurations
+ *
+ * @org_scaler_bypass_w: max pixel width when the scaler is disabled
+ * @org_scaler_bypass_h: max pixel height when the scaler is disabled
+ * @org_scaler_input_w: max pixel width when the scaler is enabled
+ * @org_scaler_input_h: max pixel height when the scaler is enabled
+ * @real_rot_dis_w: max pixel src cropped height with the rotator is off
+ * @real_rot_dis_h: max pixel src croppped width with the rotator is off
+ * @real_rot_en_w: max pixel src cropped width with the rotator is on
+ * @real_rot_en_h: max pixel src cropped height with the rotator is on
+ * @target_rot_dis_w: max pixel dst scaled width with the rotator is off
+ * @target_rot_dis_h: max pixel dst scaled height with the rotator is off
+ * @target_rot_en_w: max pixel dst scaled width with the rotator is on
+ * @target_rot_en_h: max pixel dst scaled height with the rotator is on
+ */
+struct gsc_pix_max {
+ u16 org_scaler_bypass_w;
+ u16 org_scaler_bypass_h;
+ u16 org_scaler_input_w;
+ u16 org_scaler_input_h;
+ u16 real_rot_dis_w;
+ u16 real_rot_dis_h;
+ u16 real_rot_en_w;
+ u16 real_rot_en_h;
+ u16 target_rot_dis_w;
+ u16 target_rot_dis_h;
+ u16 target_rot_en_w;
+ u16 target_rot_en_h;
+};
+
+/**
+ * struct gsc_pix_min - image pixel size limits in various IP configurations
+ *
+ * @org_w: minimum source pixel width
+ * @org_h: minimum source pixel height
+ * @real_w: minimum input crop pixel width
+ * @real_h: minimum input crop pixel height
+ * @target_rot_dis_w: minimum output scaled pixel height when rotator is off
+ * @target_rot_dis_h: minimum output scaled pixel height when rotator is off
+ * @target_rot_en_w: minimum output scaled pixel height when rotator is on
+ * @target_rot_en_h: minimum output scaled pixel height when rotator is on
+ */
+struct gsc_pix_min {
+ u16 org_w;
+ u16 org_h;
+ u16 real_w;
+ u16 real_h;
+ u16 target_rot_dis_w;
+ u16 target_rot_dis_h;
+ u16 target_rot_en_w;
+ u16 target_rot_en_h;
+};
+
+struct gsc_pix_align {
+ u16 org_h;
+ u16 org_w;
+ u16 offset_h;
+ u16 real_w;
+ u16 real_h;
+ u16 target_w;
+ u16 target_h;
+};
+
+/**
+ * struct gsc_variant - G-Scaler variant information
+ */
+struct gsc_variant {
+ struct gsc_pix_max *pix_max;
+ struct gsc_pix_min *pix_min;
+ struct gsc_pix_align *pix_align;
+ u16 in_buf_cnt;
+ u16 out_buf_cnt;
+ u16 sc_up_max;
+ u16 sc_down_max;
+ u16 poly_sc_down_max;
+ u16 pre_sc_down_max;
+ u16 local_sc_down;
+};
+
+/**
+ * struct gsc_driverdata - per device type driver data for init time.
+ *
+ * @variant: the variant information for this driver.
+ * @lclk_frequency: G-Scaler clock frequency
+ * @num_entities: the number of g-scalers
+ */
+struct gsc_driverdata {
+ struct gsc_variant *variant[GSC_MAX_DEVS];
+ unsigned long lclk_frequency;
+ int num_entities;
+};
+
+/**
+ * struct gsc_dev - abstraction for G-Scaler entity
+ * @slock: the spinlock protecting this data structure
+ * @lock: the mutex protecting this data structure
+ * @pdev: pointer to the G-Scaler platform device
+ * @variant: the IP variant information
+ * @id: G-Scaler device index (0..GSC_MAX_DEVS)
+ * @clock: clocks required for G-Scaler operation
+ * @regs: the mapped hardware registers
+ * @irq_queue: interrupt handler waitqueue
+ * @m2m: memory-to-memory V4L2 device information
+ * @state: flags used to synchronize m2m and capture mode operation
+ * @alloc_ctx: videobuf2 memory allocator context
+ * @vdev: video device for G-Scaler instance
+ */
+struct gsc_dev {
+ spinlock_t slock;
+ struct mutex lock;
+ struct platform_device *pdev;
+ struct gsc_variant *variant;
+ u16 id;
+ struct clk *clock;
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+ struct gsc_m2m_device m2m;
+ struct exynos_platform_gscaler *pdata;
+ unsigned long state;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+};
+
+/**
+ * gsc_ctx - the device context data
+ * @s_frame: source frame properties
+ * @d_frame: destination frame properties
+ * @in_path: input mode (DMA or camera)
+ * @out_path: output mode (DMA or FIFO)
+ * @scaler: image scaler properties
+ * @flags: additional flags for image conversion
+ * @state: flags to keep track of user configuration
+ * @gsc_dev: the G-Scaler device this context applies to
+ * @m2m_ctx: memory-to-memory device context
+ * @fh: v4l2 file handle
+ * @ctrl_handler: v4l2 controls handler
+ * @gsc_ctrls G-Scaler control set
+ * @ctrls_rdy: true if the control handler is initialized
+ */
+struct gsc_ctx {
+ struct gsc_frame s_frame;
+ struct gsc_frame d_frame;
+ enum gsc_datapath in_path;
+ enum gsc_datapath out_path;
+ struct gsc_scaler scaler;
+ u32 flags;
+ u32 state;
+ int rotation;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ struct gsc_dev *gsc_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct gsc_ctrls gsc_ctrls;
+ bool ctrls_rdy;
+};
+
+void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm);
+int gsc_register_m2m_device(struct gsc_dev *gsc);
+void gsc_unregister_m2m_device(struct gsc_dev *gsc);
+void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state);
+
+u32 get_plane_size(struct gsc_frame *fr, unsigned int plane);
+const struct gsc_fmt *get_format(int index);
+const struct gsc_fmt *find_fmt(u32 *pixelformat, u32 *mbus_code, u32 index);
+int gsc_enum_fmt_mplane(struct v4l2_fmtdesc *f);
+int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f);
+void gsc_set_frame_size(struct gsc_frame *frame, int width, int height);
+int gsc_g_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f);
+void gsc_check_crop_change(u32 tmp_w, u32 tmp_h, u32 *w, u32 *h);
+int gsc_g_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr);
+int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr);
+int gsc_cal_prescaler_ratio(struct gsc_variant *var, u32 src, u32 dst,
+ u32 *ratio);
+void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *sh);
+void gsc_check_src_scale_info(struct gsc_variant *var,
+ struct gsc_frame *s_frame,
+ u32 *wratio, u32 tx, u32 ty, u32 *hratio);
+int gsc_check_scaler_ratio(struct gsc_variant *var, int sw, int sh, int dw,
+ int dh, int rot, int out_path);
+int gsc_set_scaler_info(struct gsc_ctx *ctx);
+int gsc_ctrls_create(struct gsc_ctx *ctx);
+void gsc_ctrls_delete(struct gsc_ctx *ctx);
+int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
+ struct gsc_frame *frame, struct gsc_addr *addr);
+
+static inline void gsc_ctx_state_lock_set(u32 state, struct gsc_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->gsc_dev->slock, flags);
+ ctx->state |= state;
+ spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
+}
+
+static inline void gsc_ctx_state_lock_clear(u32 state, struct gsc_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->gsc_dev->slock, flags);
+ ctx->state &= ~state;
+ spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
+}
+
+static inline int is_tiled(const struct gsc_fmt *fmt)
+{
+ return fmt->pixelformat == V4L2_PIX_FMT_NV12MT_16X16;
+}
+
+static inline void gsc_hw_enable_control(struct gsc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + GSC_ENABLE);
+
+ if (on)
+ cfg |= GSC_ENABLE_ON;
+ else
+ cfg &= ~GSC_ENABLE_ON;
+
+ writel(cfg, dev->regs + GSC_ENABLE);
+}
+
+static inline int gsc_hw_get_irq_status(struct gsc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + GSC_IRQ);
+ if (cfg & GSC_IRQ_STATUS_OR_IRQ)
+ return GSC_IRQ_OVERRUN;
+ else
+ return GSC_IRQ_DONE;
+
+}
+
+static inline void gsc_hw_clear_irq(struct gsc_dev *dev, int irq)
+{
+ u32 cfg = readl(dev->regs + GSC_IRQ);
+ if (irq == GSC_IRQ_OVERRUN)
+ cfg |= GSC_IRQ_STATUS_OR_IRQ;
+ else if (irq == GSC_IRQ_DONE)
+ cfg |= GSC_IRQ_STATUS_FRM_DONE_IRQ;
+ writel(cfg, dev->regs + GSC_IRQ);
+}
+
+static inline void gsc_lock(struct vb2_queue *vq)
+{
+ struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_lock(&ctx->gsc_dev->lock);
+}
+
+static inline void gsc_unlock(struct vb2_queue *vq)
+{
+ struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_unlock(&ctx->gsc_dev->lock);
+}
+
+static inline bool gsc_ctx_state_is_set(u32 mask, struct gsc_ctx *ctx)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&ctx->gsc_dev->slock, flags);
+ ret = (ctx->state & mask) == mask;
+ spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
+ return ret;
+}
+
+static inline struct gsc_frame *ctx_get_frame(struct gsc_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ struct gsc_frame *frame;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE == type) {
+ frame = &ctx->s_frame;
+ } else if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type) {
+ frame = &ctx->d_frame;
+ } else {
+ pr_err("Wrong buffer/video queue type (%d)", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return frame;
+}
+
+void gsc_hw_set_sw_reset(struct gsc_dev *dev);
+int gsc_wait_reset(struct gsc_dev *dev);
+
+void gsc_hw_set_frm_done_irq_mask(struct gsc_dev *dev, bool mask);
+void gsc_hw_set_gsc_irq_enable(struct gsc_dev *dev, bool mask);
+void gsc_hw_set_input_buf_masking(struct gsc_dev *dev, u32 shift, bool enable);
+void gsc_hw_set_output_buf_masking(struct gsc_dev *dev, u32 shift, bool enable);
+void gsc_hw_set_input_addr(struct gsc_dev *dev, struct gsc_addr *addr,
+ int index);
+void gsc_hw_set_output_addr(struct gsc_dev *dev, struct gsc_addr *addr,
+ int index);
+void gsc_hw_set_input_path(struct gsc_ctx *ctx);
+void gsc_hw_set_in_size(struct gsc_ctx *ctx);
+void gsc_hw_set_in_image_rgb(struct gsc_ctx *ctx);
+void gsc_hw_set_in_image_format(struct gsc_ctx *ctx);
+void gsc_hw_set_output_path(struct gsc_ctx *ctx);
+void gsc_hw_set_out_size(struct gsc_ctx *ctx);
+void gsc_hw_set_out_image_rgb(struct gsc_ctx *ctx);
+void gsc_hw_set_out_image_format(struct gsc_ctx *ctx);
+void gsc_hw_set_prescaler(struct gsc_ctx *ctx);
+void gsc_hw_set_mainscaler(struct gsc_ctx *ctx);
+void gsc_hw_set_rotation(struct gsc_ctx *ctx);
+void gsc_hw_set_global_alpha(struct gsc_ctx *ctx);
+void gsc_hw_set_sfr_update(struct gsc_ctx *ctx);
+
+#endif /* GSC_CORE_H_ */
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
new file mode 100644
index 00000000000..e434f1f03d7
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -0,0 +1,795 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung EXYNOS5 SoC series G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <media/v4l2-ioctl.h>
+
+#include "gsc-core.h"
+
+static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
+{
+ struct gsc_ctx *curr_ctx;
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ int ret;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
+ if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
+ return 0;
+
+ gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
+ ret = wait_event_timeout(gsc->irq_queue,
+ !gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
+ GSC_SHUTDOWN_TIMEOUT);
+
+ return ret == 0 ? -ETIMEDOUT : ret;
+}
+
+static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
+{
+ int ret;
+
+ ret = gsc_m2m_ctx_stop_req(ctx);
+ if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
+ gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct gsc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
+ return ret > 0 ? 0 : ret;
+}
+
+static void gsc_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct gsc_ctx *ctx = q->drv_priv;
+
+ __gsc_m2m_job_abort(ctx);
+
+ pm_runtime_put(&ctx->gsc_dev->pdev->dev);
+}
+
+void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
+{
+ struct vb2_buffer *src_vb, *dst_vb;
+
+ if (!ctx || !ctx->m2m_ctx)
+ return;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
+ dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.flags |=
+ src_vb->v4l2_buf.flags
+ & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src_vb, vb_state);
+ v4l2_m2m_buf_done(dst_vb, vb_state);
+
+ v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
+ ctx->m2m_ctx);
+ }
+}
+
+static void gsc_m2m_job_abort(void *priv)
+{
+ __gsc_m2m_job_abort((struct gsc_ctx *)priv);
+}
+
+static int gsc_get_bufs(struct gsc_ctx *ctx)
+{
+ struct gsc_frame *s_frame, *d_frame;
+ struct vb2_buffer *src_vb, *dst_vb;
+ int ret;
+
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
+ if (ret)
+ return ret;
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+ if (ret)
+ return ret;
+
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+
+ return 0;
+}
+
+static void gsc_m2m_device_run(void *priv)
+{
+ struct gsc_ctx *ctx = priv;
+ struct gsc_dev *gsc;
+ unsigned long flags;
+ int ret;
+ bool is_set = false;
+
+ if (WARN(!ctx, "null hardware context\n"))
+ return;
+
+ gsc = ctx->gsc_dev;
+ spin_lock_irqsave(&gsc->slock, flags);
+
+ set_bit(ST_M2M_PEND, &gsc->state);
+
+ /* Reconfigure hardware if the context has changed. */
+ if (gsc->m2m.ctx != ctx) {
+ pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
+ gsc->m2m.ctx, ctx);
+ ctx->state |= GSC_PARAMS;
+ gsc->m2m.ctx = ctx;
+ }
+
+ is_set = ctx->state & GSC_CTX_STOP_REQ;
+ if (is_set) {
+ ctx->state &= ~GSC_CTX_STOP_REQ;
+ ctx->state |= GSC_CTX_ABORT;
+ wake_up(&gsc->irq_queue);
+ goto put_device;
+ }
+
+ ret = gsc_get_bufs(ctx);
+ if (ret) {
+ pr_err("Wrong address");
+ goto put_device;
+ }
+
+ gsc_set_prefbuf(gsc, &ctx->s_frame);
+ gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
+ gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);
+
+ if (ctx->state & GSC_PARAMS) {
+ gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
+ gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
+ gsc_hw_set_frm_done_irq_mask(gsc, false);
+ gsc_hw_set_gsc_irq_enable(gsc, true);
+
+ if (gsc_set_scaler_info(ctx)) {
+ pr_err("Scaler setup error");
+ goto put_device;
+ }
+
+ gsc_hw_set_input_path(ctx);
+ gsc_hw_set_in_size(ctx);
+ gsc_hw_set_in_image_format(ctx);
+
+ gsc_hw_set_output_path(ctx);
+ gsc_hw_set_out_size(ctx);
+ gsc_hw_set_out_image_format(ctx);
+
+ gsc_hw_set_prescaler(ctx);
+ gsc_hw_set_mainscaler(ctx);
+ gsc_hw_set_rotation(ctx);
+ gsc_hw_set_global_alpha(ctx);
+ }
+
+ /* update shadow registers */
+ gsc_hw_set_sfr_update(ctx);
+
+ ctx->state &= ~GSC_PARAMS;
+ gsc_hw_enable_control(gsc, true);
+
+ spin_unlock_irqrestore(&gsc->slock, flags);
+ return;
+
+put_device:
+ ctx->state &= ~GSC_PARAMS;
+ spin_unlock_irqrestore(&gsc->slock, flags);
+}
+
+static int gsc_m2m_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct gsc_frame *frame;
+ int i;
+
+ frame = ctx_get_frame(ctx, vq->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ if (!frame->fmt)
+ return -EINVAL;
+
+ *num_planes = frame->fmt->num_planes;
+ for (i = 0; i < frame->fmt->num_planes; i++) {
+ sizes[i] = frame->payload[i];
+ allocators[i] = ctx->gsc_dev->alloc_ctx;
+ }
+ return 0;
+}
+
+static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct gsc_frame *frame;
+ int i;
+
+ frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
+ }
+
+ return 0;
+}
+
+static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
+
+ if (ctx->m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static struct vb2_ops gsc_m2m_qops = {
+ .queue_setup = gsc_m2m_queue_setup,
+ .buf_prepare = gsc_m2m_buf_prepare,
+ .buf_queue = gsc_m2m_buf_queue,
+ .wait_prepare = gsc_unlock,
+ .wait_finish = gsc_lock,
+ .stop_streaming = gsc_m2m_stop_streaming,
+ .start_streaming = gsc_m2m_start_streaming,
+};
+
+static int gsc_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+
+ strlcpy(cap->driver, gsc->pdev->name, sizeof(cap->driver));
+ strlcpy(cap->card, gsc->pdev->name, sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int gsc_m2m_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return gsc_enum_fmt_mplane(f);
+}
+
+static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+
+ return gsc_g_fmt_mplane(ctx, f);
+}
+
+static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+
+ return gsc_try_fmt_mplane(ctx, f);
+}
+
+static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct vb2_queue *vq;
+ struct gsc_frame *frame;
+ struct v4l2_pix_format_mplane *pix;
+ int i, ret = 0;
+
+ ret = gsc_m2m_try_fmt_mplane(file, fh, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+
+ if (vb2_is_streaming(vq)) {
+ pr_err("queue (%d) busy", f->type);
+ return -EBUSY;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+
+ pix = &f->fmt.pix_mp;
+ frame->fmt = find_fmt(&pix->pixelformat, NULL, 0);
+ frame->colorspace = pix->colorspace;
+ if (!frame->fmt)
+ return -EINVAL;
+
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ frame->payload[i] = pix->plane_fmt[i].sizeimage;
+
+ gsc_set_frame_size(frame, pix->width, pix->height);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
+ else
+ gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);
+
+ pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
+
+ return 0;
+}
+
+static int gsc_m2m_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ struct gsc_frame *frame;
+ u32 max_cnt;
+
+ max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
+ gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
+ if (reqbufs->count > max_cnt) {
+ return -EINVAL;
+ } else if (reqbufs->count == 0) {
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ gsc_ctx_state_lock_clear(GSC_SRC_FMT, ctx);
+ else
+ gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx);
+ }
+
+ frame = ctx_get_frame(ctx, reqbufs->type);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int gsc_m2m_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
+}
+
+static int gsc_m2m_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int gsc_m2m_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int gsc_m2m_dqbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int gsc_m2m_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+
+ /* The source and target color format need to be set */
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
+ return -EINVAL;
+ } else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int gsc_m2m_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int gsc_m2m_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct gsc_frame *frame;
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
+ return -EINVAL;
+
+ frame = ctx_get_frame(ctx, s->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->f_width;
+ s->r.height = frame->f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = frame->crop.left;
+ s->r.top = frame->crop.top;
+ s->r.width = frame->crop.width;
+ s->r.height = frame->crop.height;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int gsc_m2m_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct gsc_frame *frame;
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_crop cr;
+ struct gsc_variant *variant = ctx->gsc_dev->variant;
+ int ret;
+
+ cr.type = s->type;
+ cr.c = s->r;
+
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
+ return -EINVAL;
+
+ ret = gsc_try_crop(ctx, &cr);
+ if (ret)
+ return ret;
+
+ if (s->flags & V4L2_SEL_FLAG_LE &&
+ !is_rectangle_enclosed(&cr.c, &s->r))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE &&
+ !is_rectangle_enclosed(&s->r, &cr.c))
+ return -ERANGE;
+
+ s->r = cr.c;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE:
+ frame = &ctx->s_frame;
+ break;
+
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ frame = &ctx->d_frame;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Check to see if scaling ratio is within supported range */
+ if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = gsc_check_scaler_ratio(variant, cr.c.width,
+ cr.c.height, ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->gsc_ctrls.rotate->val, ctx->out_path);
+ } else {
+ ret = gsc_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height, cr.c.width,
+ cr.c.height, ctx->gsc_ctrls.rotate->val,
+ ctx->out_path);
+ }
+
+ if (ret) {
+ pr_err("Out of scaler range");
+ return -EINVAL;
+ }
+ }
+
+ frame->crop = cr.c;
+
+ gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
+ .vidioc_querycap = gsc_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = gsc_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = gsc_m2m_enum_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = gsc_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = gsc_m2m_g_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = gsc_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = gsc_m2m_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane,
+ .vidioc_reqbufs = gsc_m2m_reqbufs,
+ .vidioc_expbuf = gsc_m2m_expbuf,
+ .vidioc_querybuf = gsc_m2m_querybuf,
+ .vidioc_qbuf = gsc_m2m_qbuf,
+ .vidioc_dqbuf = gsc_m2m_dqbuf,
+ .vidioc_streamon = gsc_m2m_streamon,
+ .vidioc_streamoff = gsc_m2m_streamoff,
+ .vidioc_g_selection = gsc_m2m_g_selection,
+ .vidioc_s_selection = gsc_m2m_s_selection
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct gsc_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &gsc_m2m_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &gsc_m2m_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int gsc_m2m_open(struct file *file)
+{
+ struct gsc_dev *gsc = video_drvdata(file);
+ struct gsc_ctx *ctx = NULL;
+ int ret;
+
+ pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state);
+
+ if (mutex_lock_interruptible(&gsc->lock))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ v4l2_fh_init(&ctx->fh, gsc->m2m.vfd);
+ ret = gsc_ctrls_create(ctx);
+ if (ret)
+ goto error_fh;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->gsc_dev = gsc;
+ /* Default color format */
+ ctx->s_frame.fmt = get_format(0);
+ ctx->d_frame.fmt = get_format(0);
+ /* Setup the device context for mem2mem mode. */
+ ctx->state = GSC_CTX_M2M;
+ ctx->flags = 0;
+ ctx->in_path = GSC_DMA;
+ ctx->out_path = GSC_DMA;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ pr_err("Failed to initialize m2m context");
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_ctrls;
+ }
+
+ if (gsc->m2m.refcnt++ == 0)
+ set_bit(ST_M2M_OPEN, &gsc->state);
+
+ pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx);
+
+ mutex_unlock(&gsc->lock);
+ return 0;
+
+error_ctrls:
+ gsc_ctrls_delete(ctx);
+error_fh:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+unlock:
+ mutex_unlock(&gsc->lock);
+ return ret;
+}
+
+static int gsc_m2m_release(struct file *file)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+
+ pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
+ task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
+
+ mutex_lock(&gsc->lock);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ gsc_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ if (--gsc->m2m.refcnt <= 0)
+ clear_bit(ST_M2M_OPEN, &gsc->state);
+ kfree(ctx);
+
+ mutex_unlock(&gsc->lock);
+ return 0;
+}
+
+static unsigned int gsc_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ int ret;
+
+ if (mutex_lock_interruptible(&gsc->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ mutex_unlock(&gsc->lock);
+
+ return ret;
+}
+
+static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ int ret;
+
+ if (mutex_lock_interruptible(&gsc->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+ mutex_unlock(&gsc->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations gsc_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = gsc_m2m_open,
+ .release = gsc_m2m_release,
+ .poll = gsc_m2m_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = gsc_m2m_mmap,
+};
+
+static struct v4l2_m2m_ops gsc_m2m_ops = {
+ .device_run = gsc_m2m_device_run,
+ .job_abort = gsc_m2m_job_abort,
+};
+
+int gsc_register_m2m_device(struct gsc_dev *gsc)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ if (!gsc)
+ return -ENODEV;
+
+ pdev = gsc->pdev;
+
+ gsc->vdev.fops = &gsc_m2m_fops;
+ gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
+ gsc->vdev.release = video_device_release_empty;
+ gsc->vdev.lock = &gsc->lock;
+ gsc->vdev.vfl_dir = VFL_DIR_M2M;
+ gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
+ snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
+ GSC_MODULE_NAME, gsc->id);
+
+ video_set_drvdata(&gsc->vdev, gsc);
+
+ gsc->m2m.vfd = &gsc->vdev;
+ gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
+ if (IS_ERR(gsc->m2m.m2m_dev)) {
+ dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(gsc->m2m.m2m_dev);
+ goto err_m2m_r1;
+ }
+
+ ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s(): failed to register video device\n", __func__);
+ goto err_m2m_r2;
+ }
+
+ pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
+ return 0;
+
+err_m2m_r2:
+ v4l2_m2m_release(gsc->m2m.m2m_dev);
+err_m2m_r1:
+ video_device_release(gsc->m2m.vfd);
+
+ return ret;
+}
+
+void gsc_unregister_m2m_device(struct gsc_dev *gsc)
+{
+ if (gsc)
+ v4l2_m2m_release(gsc->m2m.m2m_dev);
+}
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.c b/drivers/media/platform/exynos-gsc/gsc-regs.c
new file mode 100644
index 00000000000..e22d147a694
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-regs.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung EXYNOS5 SoC series G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "gsc-core.h"
+
+void gsc_hw_set_sw_reset(struct gsc_dev *dev)
+{
+ writel(GSC_SW_RESET_SRESET, dev->regs + GSC_SW_RESET);
+}
+
+int gsc_wait_reset(struct gsc_dev *dev)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(50);
+ u32 cfg;
+
+ while (time_before(jiffies, end)) {
+ cfg = readl(dev->regs + GSC_SW_RESET);
+ if (!cfg)
+ return 0;
+ usleep_range(10, 20);
+ }
+
+ return -EBUSY;
+}
+
+void gsc_hw_set_frm_done_irq_mask(struct gsc_dev *dev, bool mask)
+{
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IRQ);
+ if (mask)
+ cfg |= GSC_IRQ_FRMDONE_MASK;
+ else
+ cfg &= ~GSC_IRQ_FRMDONE_MASK;
+ writel(cfg, dev->regs + GSC_IRQ);
+}
+
+void gsc_hw_set_gsc_irq_enable(struct gsc_dev *dev, bool mask)
+{
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IRQ);
+ if (mask)
+ cfg |= GSC_IRQ_ENABLE;
+ else
+ cfg &= ~GSC_IRQ_ENABLE;
+ writel(cfg, dev->regs + GSC_IRQ);
+}
+
+void gsc_hw_set_input_buf_masking(struct gsc_dev *dev, u32 shift,
+ bool enable)
+{
+ u32 cfg = readl(dev->regs + GSC_IN_BASE_ADDR_Y_MASK);
+ u32 mask = 1 << shift;
+
+ cfg &= ~mask;
+ cfg |= enable << shift;
+
+ writel(cfg, dev->regs + GSC_IN_BASE_ADDR_Y_MASK);
+ writel(cfg, dev->regs + GSC_IN_BASE_ADDR_CB_MASK);
+ writel(cfg, dev->regs + GSC_IN_BASE_ADDR_CR_MASK);
+}
+
+void gsc_hw_set_output_buf_masking(struct gsc_dev *dev, u32 shift,
+ bool enable)
+{
+ u32 cfg = readl(dev->regs + GSC_OUT_BASE_ADDR_Y_MASK);
+ u32 mask = 1 << shift;
+
+ cfg &= ~mask;
+ cfg |= enable << shift;
+
+ writel(cfg, dev->regs + GSC_OUT_BASE_ADDR_Y_MASK);
+ writel(cfg, dev->regs + GSC_OUT_BASE_ADDR_CB_MASK);
+ writel(cfg, dev->regs + GSC_OUT_BASE_ADDR_CR_MASK);
+}
+
+void gsc_hw_set_input_addr(struct gsc_dev *dev, struct gsc_addr *addr,
+ int index)
+{
+ pr_debug("src_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X", index,
+ addr->y, addr->cb, addr->cr);
+ writel(addr->y, dev->regs + GSC_IN_BASE_ADDR_Y(index));
+ writel(addr->cb, dev->regs + GSC_IN_BASE_ADDR_CB(index));
+ writel(addr->cr, dev->regs + GSC_IN_BASE_ADDR_CR(index));
+
+}
+
+void gsc_hw_set_output_addr(struct gsc_dev *dev,
+ struct gsc_addr *addr, int index)
+{
+ pr_debug("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X",
+ index, addr->y, addr->cb, addr->cr);
+ writel(addr->y, dev->regs + GSC_OUT_BASE_ADDR_Y(index));
+ writel(addr->cb, dev->regs + GSC_OUT_BASE_ADDR_CB(index));
+ writel(addr->cr, dev->regs + GSC_OUT_BASE_ADDR_CR(index));
+}
+
+void gsc_hw_set_input_path(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+
+ u32 cfg = readl(dev->regs + GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+
+ if (ctx->in_path == GSC_DMA)
+ cfg |= GSC_IN_PATH_MEMORY;
+
+ writel(cfg, dev->regs + GSC_IN_CON);
+}
+
+void gsc_hw_set_in_size(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->s_frame;
+ u32 cfg;
+
+ /* Set input pixel offset */
+ cfg = GSC_SRCIMG_OFFSET_X(frame->crop.left);
+ cfg |= GSC_SRCIMG_OFFSET_Y(frame->crop.top);
+ writel(cfg, dev->regs + GSC_SRCIMG_OFFSET);
+
+ /* Set input original size */
+ cfg = GSC_SRCIMG_WIDTH(frame->f_width);
+ cfg |= GSC_SRCIMG_HEIGHT(frame->f_height);
+ writel(cfg, dev->regs + GSC_SRCIMG_SIZE);
+
+ /* Set input cropped size */
+ cfg = GSC_CROPPED_WIDTH(frame->crop.width);
+ cfg |= GSC_CROPPED_HEIGHT(frame->crop.height);
+ writel(cfg, dev->regs + GSC_CROPPED_SIZE);
+}
+
+void gsc_hw_set_in_image_rgb(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->s_frame;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IN_CON);
+ if (frame->colorspace == V4L2_COLORSPACE_REC709)
+ cfg |= GSC_IN_RGB_HD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_SD_WIDE;
+
+ if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB565X)
+ cfg |= GSC_IN_RGB565;
+ else if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB32)
+ cfg |= GSC_IN_XRGB8888;
+
+ writel(cfg, dev->regs + GSC_IN_CON);
+}
+
+void gsc_hw_set_in_image_format(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->s_frame;
+ u32 i, depth = 0;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IN_CON);
+ cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+ GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+ GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE);
+ writel(cfg, dev->regs + GSC_IN_CON);
+
+ if (is_rgb(frame->fmt->color)) {
+ gsc_hw_set_in_image_rgb(ctx);
+ return;
+ }
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ depth += frame->fmt->depth[i];
+
+ switch (frame->fmt->num_comp) {
+ case 1:
+ cfg |= GSC_IN_YUV422_1P;
+ if (frame->fmt->yorder == GSC_LSB_Y)
+ cfg |= GSC_IN_YUV422_1P_ORDER_LSB_Y;
+ else
+ cfg |= GSC_IN_YUV422_1P_OEDER_LSB_C;
+ if (frame->fmt->corder == GSC_CBCR)
+ cfg |= GSC_IN_CHROMA_ORDER_CBCR;
+ else
+ cfg |= GSC_IN_CHROMA_ORDER_CRCB;
+ break;
+ case 2:
+ if (depth == 12)
+ cfg |= GSC_IN_YUV420_2P;
+ else
+ cfg |= GSC_IN_YUV422_2P;
+ if (frame->fmt->corder == GSC_CBCR)
+ cfg |= GSC_IN_CHROMA_ORDER_CBCR;
+ else
+ cfg |= GSC_IN_CHROMA_ORDER_CRCB;
+ break;
+ case 3:
+ if (depth == 12)
+ cfg |= GSC_IN_YUV420_3P;
+ else
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ }
+
+ if (is_tiled(frame->fmt))
+ cfg |= GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE;
+
+ writel(cfg, dev->regs + GSC_IN_CON);
+}
+
+void gsc_hw_set_output_path(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+
+ u32 cfg = readl(dev->regs + GSC_OUT_CON);
+ cfg &= ~GSC_OUT_PATH_MASK;
+
+ if (ctx->out_path == GSC_DMA)
+ cfg |= GSC_OUT_PATH_MEMORY;
+ else
+ cfg |= GSC_OUT_PATH_LOCAL;
+
+ writel(cfg, dev->regs + GSC_OUT_CON);
+}
+
+void gsc_hw_set_out_size(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ /* Set output original size */
+ if (ctx->out_path == GSC_DMA) {
+ cfg = GSC_DSTIMG_OFFSET_X(frame->crop.left);
+ cfg |= GSC_DSTIMG_OFFSET_Y(frame->crop.top);
+ writel(cfg, dev->regs + GSC_DSTIMG_OFFSET);
+
+ cfg = GSC_DSTIMG_WIDTH(frame->f_width);
+ cfg |= GSC_DSTIMG_HEIGHT(frame->f_height);
+ writel(cfg, dev->regs + GSC_DSTIMG_SIZE);
+ }
+
+ /* Set output scaled size */
+ if (ctx->gsc_ctrls.rotate->val == 90 ||
+ ctx->gsc_ctrls.rotate->val == 270) {
+ cfg = GSC_SCALED_WIDTH(frame->crop.height);
+ cfg |= GSC_SCALED_HEIGHT(frame->crop.width);
+ } else {
+ cfg = GSC_SCALED_WIDTH(frame->crop.width);
+ cfg |= GSC_SCALED_HEIGHT(frame->crop.height);
+ }
+ writel(cfg, dev->regs + GSC_SCALED_SIZE);
+}
+
+void gsc_hw_set_out_image_rgb(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_OUT_CON);
+ if (frame->colorspace == V4L2_COLORSPACE_REC709)
+ cfg |= GSC_OUT_RGB_HD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_SD_WIDE;
+
+ if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB565X)
+ cfg |= GSC_OUT_RGB565;
+ else if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB32)
+ cfg |= GSC_OUT_XRGB8888;
+
+ writel(cfg, dev->regs + GSC_OUT_CON);
+}
+
+void gsc_hw_set_out_image_format(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->d_frame;
+ u32 i, depth = 0;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_OUT_CON);
+ cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+ GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+ GSC_OUT_TILE_TYPE_MASK | GSC_OUT_TILE_MODE);
+ writel(cfg, dev->regs + GSC_OUT_CON);
+
+ if (is_rgb(frame->fmt->color)) {
+ gsc_hw_set_out_image_rgb(ctx);
+ return;
+ }
+
+ if (ctx->out_path != GSC_DMA) {
+ cfg |= GSC_OUT_YUV444;
+ goto end_set;
+ }
+
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ depth += frame->fmt->depth[i];
+
+ switch (frame->fmt->num_comp) {
+ case 1:
+ cfg |= GSC_OUT_YUV422_1P;
+ if (frame->fmt->yorder == GSC_LSB_Y)
+ cfg |= GSC_OUT_YUV422_1P_ORDER_LSB_Y;
+ else
+ cfg |= GSC_OUT_YUV422_1P_OEDER_LSB_C;
+ if (frame->fmt->corder == GSC_CBCR)
+ cfg |= GSC_OUT_CHROMA_ORDER_CBCR;
+ else
+ cfg |= GSC_OUT_CHROMA_ORDER_CRCB;
+ break;
+ case 2:
+ if (depth == 12)
+ cfg |= GSC_OUT_YUV420_2P;
+ else
+ cfg |= GSC_OUT_YUV422_2P;
+ if (frame->fmt->corder == GSC_CBCR)
+ cfg |= GSC_OUT_CHROMA_ORDER_CBCR;
+ else
+ cfg |= GSC_OUT_CHROMA_ORDER_CRCB;
+ break;
+ case 3:
+ cfg |= GSC_OUT_YUV420_3P;
+ break;
+ }
+
+ if (is_tiled(frame->fmt))
+ cfg |= GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE;
+
+end_set:
+ writel(cfg, dev->regs + GSC_OUT_CON);
+}
+
+void gsc_hw_set_prescaler(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_scaler *sc = &ctx->scaler;
+ u32 cfg;
+
+ cfg = GSC_PRESC_SHFACTOR(sc->pre_shfactor);
+ cfg |= GSC_PRESC_H_RATIO(sc->pre_hratio);
+ cfg |= GSC_PRESC_V_RATIO(sc->pre_vratio);
+ writel(cfg, dev->regs + GSC_PRE_SCALE_RATIO);
+}
+
+void gsc_hw_set_mainscaler(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_scaler *sc = &ctx->scaler;
+ u32 cfg;
+
+ cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+ writel(cfg, dev->regs + GSC_MAIN_H_RATIO);
+
+ cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+ writel(cfg, dev->regs + GSC_MAIN_V_RATIO);
+}
+
+void gsc_hw_set_rotation(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (ctx->gsc_ctrls.rotate->val) {
+ case 270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ case 180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case 90:
+ if (ctx->gsc_ctrls.hflip->val)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (ctx->gsc_ctrls.vflip->val)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case 0:
+ if (ctx->gsc_ctrls.hflip->val)
+ cfg |= GSC_IN_ROT_XFLIP;
+ else if (ctx->gsc_ctrls.vflip->val)
+ cfg |= GSC_IN_ROT_YFLIP;
+ }
+
+ writel(cfg, dev->regs + GSC_IN_CON);
+}
+
+void gsc_hw_set_global_alpha(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ if (!is_rgb(frame->fmt->color)) {
+ pr_debug("Not a RGB format");
+ return;
+ }
+
+ cfg = readl(dev->regs + GSC_OUT_CON);
+ cfg &= ~GSC_OUT_GLOBAL_ALPHA_MASK;
+
+ cfg |= GSC_OUT_GLOBAL_ALPHA(ctx->gsc_ctrls.global_alpha->val);
+ writel(cfg, dev->regs + GSC_OUT_CON);
+}
+
+void gsc_hw_set_sfr_update(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_ENABLE);
+ cfg |= GSC_ENABLE_SFR_UPDATE;
+ writel(cfg, dev->regs + GSC_ENABLE);
+}
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.h b/drivers/media/platform/exynos-gsc/gsc-regs.h
new file mode 100644
index 00000000000..4678f9a6a4f
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-regs.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef REGS_GSC_H_
+#define REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE 0x00
+#define GSC_ENABLE_OP_STATUS (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE (1 << 1)
+#define GSC_ENABLE_ON (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET 0x04
+#define GSC_SW_RESET_SRESET (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ 0x08
+#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
+#define GSC_IRQ_STATUS_FRM_DONE_IRQ (1 << 16)
+#define GSC_IRQ_FRMDONE_MASK (1 << 1)
+#define GSC_IRQ_ENABLE (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON 0x10
+#define GSC_IN_ROT_MASK (7 << 16)
+#define GSC_IN_ROT_270 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP (6 << 16)
+#define GSC_IN_ROT_90_XFLIP (5 << 16)
+#define GSC_IN_ROT_90 (4 << 16)
+#define GSC_IN_ROT_180 (3 << 16)
+#define GSC_IN_ROT_YFLIP (2 << 16)
+#define GSC_IN_ROT_XFLIP (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK (3 << 14)
+#define GSC_IN_RGB_HD_NARROW (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (2 << 14)
+#define GSC_IN_RGB_SD_NARROW (1 << 14)
+#define GSC_IN_RGB_SD_WIDE (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
+#define GSC_IN_FORMAT_MASK (7 << 8)
+#define GSC_IN_XRGB8888 (0 << 8)
+#define GSC_IN_RGB565 (1 << 8)
+#define GSC_IN_YUV420_2P (2 << 8)
+#define GSC_IN_YUV420_3P (3 << 8)
+#define GSC_IN_YUV422_1P (4 << 8)
+#define GSC_IN_YUV422_2P (5 << 8)
+#define GSC_IN_YUV422_3P (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK (1 << 4)
+#define GSC_IN_TILE_C_16x8 (0 << 4)
+#define GSC_IN_TILE_MODE (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
+#define GSC_IN_PATH_MASK (1 << 0)
+#define GSC_IN_PATH_MEMORY (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE 0x14
+#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
+#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET 0x18
+#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE 0x1c
+#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
+#define GSC_CROPPED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON 0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
+#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (2 << 10)
+#define GSC_OUT_RGB_SD_WIDE (1 << 10)
+#define GSC_OUT_RGB_SD_NARROW (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
+#define GSC_OUT_FORMAT_MASK (7 << 4)
+#define GSC_OUT_XRGB8888 (0 << 4)
+#define GSC_OUT_RGB565 (1 << 4)
+#define GSC_OUT_YUV420_2P (2 << 4)
+#define GSC_OUT_YUV420_3P (3 << 4)
+#define GSC_OUT_YUV422_1P (4 << 4)
+#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV444 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+#define GSC_OUT_TILE_C_16x8 (0 << 2)
+#define GSC_OUT_TILE_MODE (1 << 1)
+#define GSC_OUT_PATH_MASK (1 << 0)
+#define GSC_OUT_PATH_LOCAL (1 << 0)
+#define GSC_OUT_PATH_MEMORY (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE 0x24
+#define GSC_SCALED_HEIGHT(x) ((x) << 16)
+#define GSC_SCALED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO 0x28
+#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
+#define GSC_PRESC_V_RATIO(x) ((x) << 16)
+#define GSC_PRESC_H_RATIO(x) ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO 0x2c
+#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO 0x30
+#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE 0x40
+#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
+#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET 0x44
+#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK 0x4c
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK 0x7c
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK 0xac
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n) (0xb0 + (n) * 0x4)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK 0x10c
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK 0x15c
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK 0x1ac
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n) (0x1b0 + (n) * 0x4)
+
+#endif /* REGS_GSC_H_ */
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
new file mode 100644
index 00000000000..5dcaa0a8054
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -0,0 +1,77 @@
+
+config VIDEO_SAMSUNG_EXYNOS4_IS
+ bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on (PLAT_S5P || ARCH_EXYNOS)
+ depends on OF && COMMON_CLK
+ help
+ Say Y here to enable camera host interface devices for
+ Samsung S5P and EXYNOS SoC series.
+
+if VIDEO_SAMSUNG_EXYNOS4_IS
+
+config VIDEO_EXYNOS4_IS_COMMON
+ tristate
+
+config VIDEO_S5P_FIMC
+ tristate "S5P/EXYNOS4 FIMC/CAMIF camera interface driver"
+ depends on I2C
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select MFD_SYSCON
+ select VIDEO_EXYNOS4_IS_COMMON
+ help
+ This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC camera host
+ interface and video postprocessor (FIMC) devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s5p-fimc.
+
+config VIDEO_S5P_MIPI_CSIS
+ tristate "S5P/EXYNOS MIPI-CSI2 receiver (MIPI-CSIS) driver"
+ depends on REGULATOR
+ select GENERIC_PHY
+ help
+ This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC MIPI-CSI2
+ receiver (MIPI-CSIS) devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s5p-csis.
+
+if SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
+
+config VIDEO_EXYNOS_FIMC_LITE
+ tristate "EXYNOS FIMC-LITE camera interface driver"
+ depends on I2C
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_EXYNOS4_IS_COMMON
+ help
+ This is a V4L2 driver for Samsung EXYNOS4/5 SoC FIMC-LITE camera
+ host interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called exynos-fimc-lite.
+endif
+
+config VIDEO_EXYNOS4_FIMC_IS
+ tristate "EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver"
+ select VIDEOBUF2_DMA_CONTIG
+ depends on OF
+ select FW_LOADER
+ help
+ This is a V4L2 driver for Samsung EXYNOS4x12 SoC series
+ FIMC-IS (Imaging Subsystem).
+
+ To compile this driver as a module, choose M here: the
+ module will be called exynos4-fimc-is.
+
+config VIDEO_EXYNOS4_ISP_DMA_CAPTURE
+ bool "EXYNOS4x12 FIMC-IS ISP Direct DMA capture support"
+ depends on VIDEO_EXYNOS4_FIMC_IS
+ select VIDEO_EXYNOS4_IS_COMMON
+ default y
+ help
+ This option enables an additional video device node exposing a V4L2
+ video capture interface for the FIMC-IS ISP raw (Bayer) capture DMA.
+
+endif # VIDEO_SAMSUNG_EXYNOS4_IS
diff --git a/drivers/media/platform/exynos4-is/Makefile b/drivers/media/platform/exynos4-is/Makefile
new file mode 100644
index 00000000000..eed1b185d81
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/Makefile
@@ -0,0 +1,17 @@
+s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-m2m.o fimc-capture.o media-dev.o
+exynos-fimc-lite-objs += fimc-lite-reg.o fimc-lite.o
+s5p-csis-objs := mipi-csis.o
+exynos4-is-common-objs := common.o
+
+exynos-fimc-is-objs := fimc-is.o fimc-isp.o fimc-is-sensor.o fimc-is-regs.o
+exynos-fimc-is-objs += fimc-is-param.o fimc-is-errno.o fimc-is-i2c.o
+
+ifeq ($(CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE),y)
+exynos-fimc-is-objs += fimc-isp-video.o
+endif
+
+obj-$(CONFIG_VIDEO_S5P_MIPI_CSIS) += s5p-csis.o
+obj-$(CONFIG_VIDEO_EXYNOS_FIMC_LITE) += exynos-fimc-lite.o
+obj-$(CONFIG_VIDEO_EXYNOS4_FIMC_IS) += exynos-fimc-is.o
+obj-$(CONFIG_VIDEO_S5P_FIMC) += s5p-fimc.o
+obj-$(CONFIG_VIDEO_EXYNOS4_IS_COMMON) += exynos4-is-common.o
diff --git a/drivers/media/platform/exynos4-is/common.c b/drivers/media/platform/exynos4-is/common.c
new file mode 100644
index 00000000000..0eb34ecb8ee
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/common.c
@@ -0,0 +1,53 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC Camera Subsystem driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <media/exynos-fimc.h>
+#include "common.h"
+
+/* Called with the media graph mutex held or entity->stream_count > 0. */
+struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity)
+{
+ struct media_pad *pad = &entity->pads[0];
+ struct v4l2_subdev *sd;
+
+ while (pad->flags & MEDIA_PAD_FL_SINK) {
+ /* source pad */
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (sd->grp_id == GRP_ID_FIMC_IS_SENSOR ||
+ sd->grp_id == GRP_ID_SENSOR)
+ return sd;
+ /* sink pad */
+ pad = &sd->entity.pads[0];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(fimc_find_remote_sensor);
+
+void __fimc_vidioc_querycap(struct device *dev, struct v4l2_capability *cap,
+ unsigned int caps)
+{
+ strlcpy(cap->driver, dev->driver->name, sizeof(cap->driver));
+ strlcpy(cap->card, dev->driver->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev_name(dev));
+ cap->device_caps = caps;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+}
+EXPORT_SYMBOL(__fimc_vidioc_querycap);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/exynos4-is/common.h b/drivers/media/platform/exynos4-is/common.h
new file mode 100644
index 00000000000..75b9c71d941
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/common.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity);
+void __fimc_vidioc_querycap(struct device *dev, struct v4l2_capability *cap,
+ unsigned int caps);
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
new file mode 100644
index 00000000000..3d2babd5067
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -0,0 +1,1917 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
+ *
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "common.h"
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "media-dev.h"
+
+static int fimc_capture_hw_init(struct fimc_dev *fimc)
+{
+ struct fimc_source_info *si = &fimc->vid_cap.source_config;
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ int ret;
+ unsigned long flags;
+
+ if (ctx == NULL || ctx->s_frame.fmt == NULL)
+ return -EINVAL;
+
+ if (si->fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK) {
+ ret = fimc_hw_camblk_cfg_writeback(fimc);
+ if (ret < 0)
+ return ret;
+ }
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+ fimc_set_yuv_order(ctx);
+
+ fimc_hw_set_camera_polarity(fimc, si);
+ fimc_hw_set_camera_type(fimc, si);
+ fimc_hw_set_camera_source(fimc, si);
+ fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
+ ret = fimc_set_scaler_info(ctx);
+ if (!ret) {
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_hw_set_output_path(ctx);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->drv_data->alpha_color)
+ fimc_hw_set_rgb_alpha(ctx);
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return ret;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
+static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_vid_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ streaming = fimc->state & (1 << ST_CAPT_ISP_STREAM);
+
+ fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_SHUT |
+ 1 << ST_CAPT_STREAM | 1 << ST_CAPT_ISP_STREAM);
+ if (suspend)
+ fimc->state |= (1 << ST_CAPT_SUSPENDED);
+ else
+ fimc->state &= ~(1 << ST_CAPT_PEND | 1 << ST_CAPT_SUSPENDED);
+
+ /* Release unused buffers */
+ while (!suspend && !list_empty(&cap->pending_buf_q)) {
+ buf = fimc_pending_queue_pop(cap);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ /* If suspending put unused buffers onto pending queue */
+ while (!list_empty(&cap->active_buf_q)) {
+ buf = fimc_active_queue_pop(cap);
+ if (suspend)
+ fimc_pending_queue_add(cap, buf);
+ else
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ fimc_hw_reset(fimc);
+ cap->buf_index = 0;
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (streaming)
+ return fimc_pipeline_call(&cap->ve, set_stream, 0);
+ else
+ return 0;
+}
+
+static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
+{
+ unsigned long flags;
+
+ if (!fimc_capture_active(fimc))
+ return 0;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_bit(ST_CAPT_SHUT, &fimc->state);
+ fimc_deactivate_capture(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_CAPT_SHUT, &fimc->state),
+ (2*HZ/10)); /* 200 ms */
+
+ return fimc_capture_state_cleanup(fimc, suspend);
+}
+
+/**
+ * fimc_capture_config_update - apply the camera interface configuration
+ *
+ * To be called from within the interrupt handler with fimc.slock
+ * spinlock held. It updates the camera pixel crop, rotation and
+ * image flip in H/W.
+ */
+static int fimc_capture_config_update(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret;
+
+ fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
+ ret = fimc_set_scaler_info(ctx);
+ if (ret)
+ return ret;
+
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->drv_data->alpha_color)
+ fimc_hw_set_rgb_alpha(ctx);
+
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ return ret;
+}
+
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_pipeline *p = to_fimc_pipeline(cap->ve.pipe);
+ struct v4l2_subdev *csis = p->subdevs[IDX_CSIS];
+ struct fimc_frame *f = &cap->ctx->d_frame;
+ struct fimc_vid_buffer *v_buf;
+ struct timeval *tv;
+ struct timespec ts;
+
+ if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ goto done;
+ }
+
+ if (!list_empty(&cap->active_buf_q) &&
+ test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) {
+ ktime_get_real_ts(&ts);
+
+ v_buf = fimc_active_queue_pop(cap);
+
+ tv = &v_buf->vb.v4l2_buf.timestamp;
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ v_buf->vb.v4l2_buf.sequence = cap->frame_count++;
+
+ vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
+ }
+
+ if (!list_empty(&cap->pending_buf_q)) {
+
+ v_buf = fimc_pending_queue_pop(cap);
+ fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
+ v_buf->index = cap->buf_index;
+
+ /* Move the buffer to the capture active queue */
+ fimc_active_queue_add(cap, v_buf);
+
+ dbg("next frame: %d, done frame: %d",
+ fimc_hw_get_frame_index(fimc), v_buf->index);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ }
+ /*
+ * Set up a buffer at MIPI-CSIS if current image format
+ * requires the frame embedded data capture.
+ */
+ if (f->fmt->mdataplanes && !list_empty(&cap->active_buf_q)) {
+ unsigned int plane = ffs(f->fmt->mdataplanes) - 1;
+ unsigned int size = f->payload[plane];
+ s32 index = fimc_hw_get_frame_index(fimc);
+ void *vaddr;
+
+ list_for_each_entry(v_buf, &cap->active_buf_q, list) {
+ if (v_buf->index != index)
+ continue;
+ vaddr = vb2_plane_vaddr(&v_buf->vb, plane);
+ v4l2_subdev_call(csis, video, s_rx_buffer,
+ vaddr, &size);
+ break;
+ }
+ }
+
+ if (cap->active_buf_cnt == 0) {
+ if (deq_buf)
+ clear_bit(ST_CAPT_RUN, &fimc->state);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ } else {
+ set_bit(ST_CAPT_RUN, &fimc->state);
+ }
+
+ if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ fimc_capture_config_update(cap->ctx);
+done:
+ if (cap->active_buf_cnt == 1) {
+ fimc_deactivate_capture(fimc);
+ clear_bit(ST_CAPT_STREAM, &fimc->state);
+ }
+
+ dbg("frame: %d, active_buf_cnt: %d",
+ fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
+}
+
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ int min_bufs;
+ int ret;
+
+ vid_cap->frame_count = 0;
+
+ ret = fimc_capture_hw_init(fimc);
+ if (ret) {
+ fimc_capture_state_cleanup(fimc, false);
+ return ret;
+ }
+
+ set_bit(ST_CAPT_PEND, &fimc->state);
+
+ min_bufs = fimc->vid_cap.reqbufs_count > 1 ? 2 : 1;
+
+ if (vid_cap->active_buf_cnt >= min_bufs &&
+ !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
+ fimc_activate_capture(ctx);
+
+ if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
+ return fimc_pipeline_call(&vid_cap->ve, set_stream, 1);
+ }
+
+ return 0;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ if (!fimc_capture_active(fimc))
+ return;
+
+ fimc_stop_capture(fimc, false);
+}
+
+int fimc_capture_suspend(struct fimc_dev *fimc)
+{
+ bool suspend = fimc_capture_busy(fimc);
+
+ int ret = fimc_stop_capture(fimc, suspend);
+ if (ret)
+ return ret;
+ return fimc_pipeline_call(&fimc->vid_cap.ve, close);
+}
+
+static void buffer_queue(struct vb2_buffer *vb);
+
+int fimc_capture_resume(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct exynos_video_entity *ve = &vid_cap->ve;
+ struct fimc_vid_buffer *buf;
+ int i;
+
+ if (!test_and_clear_bit(ST_CAPT_SUSPENDED, &fimc->state))
+ return 0;
+
+ INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
+ vid_cap->buf_index = 0;
+ fimc_pipeline_call(ve, open, &ve->vdev.entity, false);
+ fimc_capture_hw_init(fimc);
+
+ clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ for (i = 0; i < vid_cap->reqbufs_count; i++) {
+ if (list_empty(&vid_cap->pending_buf_q))
+ break;
+ buf = fimc_pending_queue_pop(vid_cap);
+ buffer_queue(&buf->vb);
+ }
+ return 0;
+
+}
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ const struct v4l2_pix_format_mplane *pixm = NULL;
+ struct fimc_ctx *ctx = vq->drv_priv;
+ struct fimc_frame *frame = &ctx->d_frame;
+ struct fimc_fmt *fmt = frame->fmt;
+ unsigned long wh;
+ int i;
+
+ if (pfmt) {
+ pixm = &pfmt->fmt.pix_mp;
+ fmt = fimc_find_format(&pixm->pixelformat, NULL,
+ FMT_FLAGS_CAM | FMT_FLAGS_M2M, -1);
+ wh = pixm->width * pixm->height;
+ } else {
+ wh = frame->f_width * frame->f_height;
+ }
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ unsigned int size = (wh * fmt->depth[i]) / 8;
+ if (pixm)
+ sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
+ else if (fimc_fmt_is_user_defined(fmt->color))
+ sizes[i] = frame->payload[i];
+ else
+ sizes[i] = max_t(u32, size, frame->payload[i]);
+
+ allocators[i] = ctx->fimc_dev->alloc_ctx;
+ }
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct fimc_ctx *ctx = vq->drv_priv;
+ int i;
+
+ if (ctx->d_frame.fmt == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) {
+ unsigned long size = ctx->d_frame.payload[i];
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(&ctx->fimc_dev->vid_cap.ve.vdev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct fimc_vid_buffer *buf
+ = container_of(vb, struct fimc_vid_buffer, vb);
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct exynos_video_entity *ve = &vid_cap->ve;
+ unsigned long flags;
+ int min_bufs;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc_prepare_addr(ctx, &buf->vb, &ctx->d_frame, &buf->paddr);
+
+ if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) &&
+ !test_bit(ST_CAPT_STREAM, &fimc->state) &&
+ vid_cap->active_buf_cnt < FIMC_MAX_OUT_BUFS) {
+ /* Setup the buffer directly for processing. */
+ int buf_id = (vid_cap->reqbufs_count == 1) ? -1 :
+ vid_cap->buf_index;
+
+ fimc_hw_set_output_addr(fimc, &buf->paddr, buf_id);
+ buf->index = vid_cap->buf_index;
+ fimc_active_queue_add(vid_cap, buf);
+
+ if (++vid_cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ vid_cap->buf_index = 0;
+ } else {
+ fimc_pending_queue_add(vid_cap, buf);
+ }
+
+ min_bufs = vid_cap->reqbufs_count > 1 ? 2 : 1;
+
+
+ if (vb2_is_streaming(&vid_cap->vbq) &&
+ vid_cap->active_buf_cnt >= min_bufs &&
+ !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
+ int ret;
+
+ fimc_activate_capture(ctx);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
+ return;
+
+ ret = fimc_pipeline_call(ve, set_stream, 1);
+ if (ret < 0)
+ v4l2_err(&ve->vdev, "stream on failed: %d\n", ret);
+ return;
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static struct vb2_ops fimc_capture_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static int fimc_capture_set_default_format(struct fimc_dev *fimc);
+
+static int fimc_capture_open(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct exynos_video_entity *ve = &vc->ve;
+ int ret = -EBUSY;
+
+ dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+ mutex_lock(&fimc->lock);
+
+ if (fimc_m2m_active(fimc))
+ goto unlock;
+
+ set_bit(ST_CAPT_BUSY, &fimc->state);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ goto unlock;
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ goto unlock;
+ }
+
+ if (v4l2_fh_is_singular_file(file)) {
+ fimc_md_graph_lock(ve);
+
+ ret = fimc_pipeline_call(ve, open, &ve->vdev.entity, true);
+
+ if (ret == 0 && vc->user_subdev_api && vc->inh_sensor_ctrls) {
+ /*
+ * Recreate controls of the the video node to drop
+ * any controls inherited from the sensor subdev.
+ */
+ fimc_ctrls_delete(vc->ctx);
+
+ ret = fimc_ctrls_create(vc->ctx);
+ if (ret == 0)
+ vc->inh_sensor_ctrls = false;
+ }
+ if (ret == 0)
+ ve->vdev.entity.use_count++;
+
+ fimc_md_graph_unlock(ve);
+
+ if (ret == 0)
+ ret = fimc_capture_set_default_format(fimc);
+
+ if (ret < 0) {
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ v4l2_fh_release(file);
+ }
+ }
+unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_capture_release(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ bool close = v4l2_fh_is_singular_file(file);
+ int ret;
+
+ dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+ mutex_lock(&fimc->lock);
+
+ if (close && vc->streaming) {
+ media_entity_pipeline_stop(&vc->ve.vdev.entity);
+ vc->streaming = false;
+ }
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (close) {
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ fimc_pipeline_call(&vc->ve, close);
+ clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ fimc_md_graph_lock(&vc->ve);
+ vc->ve.vdev.entity.use_count--;
+ fimc_md_graph_unlock(&vc->ve);
+ }
+
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations fimc_capture_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_capture_open,
+ .release = fimc_capture_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * Format and crop negotiation helpers
+ */
+
+static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
+ u32 *width, u32 *height,
+ u32 *code, u32 *fourcc, int pad)
+{
+ bool rotation = ctx->rotation == 90 || ctx->rotation == 270;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ const struct fimc_variant *var = fimc->variant;
+ const struct fimc_pix_limit *pl = var->pix_limit;
+ struct fimc_frame *dst = &ctx->d_frame;
+ u32 depth, min_w, max_w, min_h, align_h = 3;
+ u32 mask = FMT_FLAGS_CAM;
+ struct fimc_fmt *ffmt;
+
+ /* Conversion from/to JPEG or User Defined format is not supported */
+ if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE &&
+ fimc_fmt_is_user_defined(ctx->s_frame.fmt->color))
+ *code = ctx->s_frame.fmt->mbus_code;
+
+ if (fourcc && *fourcc != V4L2_PIX_FMT_JPEG && pad == FIMC_SD_PAD_SOURCE)
+ mask |= FMT_FLAGS_M2M;
+
+ if (pad == FIMC_SD_PAD_SINK_FIFO)
+ mask = FMT_FLAGS_WRITEBACK;
+
+ ffmt = fimc_find_format(fourcc, code, mask, 0);
+ if (WARN_ON(!ffmt))
+ return NULL;
+
+ if (code)
+ *code = ffmt->mbus_code;
+ if (fourcc)
+ *fourcc = ffmt->fourcc;
+
+ if (pad != FIMC_SD_PAD_SOURCE) {
+ max_w = fimc_fmt_is_user_defined(ffmt->color) ?
+ pl->scaler_dis_w : pl->scaler_en_w;
+ /* Apply the camera input interface pixel constraints */
+ v4l_bound_align_image(width, max_t(u32, *width, 32), max_w, 4,
+ height, max_t(u32, *height, 32),
+ FIMC_CAMIF_MAX_HEIGHT,
+ fimc_fmt_is_user_defined(ffmt->color) ?
+ 3 : 1,
+ 0);
+ return ffmt;
+ }
+ /* Can't scale or crop in transparent (JPEG) transfer mode */
+ if (fimc_fmt_is_user_defined(ffmt->color)) {
+ *width = ctx->s_frame.f_width;
+ *height = ctx->s_frame.f_height;
+ return ffmt;
+ }
+ /* Apply the scaler and the output DMA constraints */
+ max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
+ if (ctx->state & FIMC_COMPOSE) {
+ min_w = dst->offs_h + dst->width;
+ min_h = dst->offs_v + dst->height;
+ } else {
+ min_w = var->min_out_pixsize;
+ min_h = var->min_out_pixsize;
+ }
+ if (var->min_vsize_align == 1 && !rotation)
+ align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
+
+ depth = fimc_get_format_depth(ffmt);
+ v4l_bound_align_image(width, min_w, max_w,
+ ffs(var->min_out_pixsize) - 1,
+ height, min_h, FIMC_CAMIF_MAX_HEIGHT,
+ align_h,
+ 64/(ALIGN(depth, 8)));
+
+ dbg("pad%d: code: 0x%x, %dx%d. dst fmt: %dx%d",
+ pad, code ? *code : 0, *width, *height,
+ dst->f_width, dst->f_height);
+
+ return ffmt;
+}
+
+static void fimc_capture_try_selection(struct fimc_ctx *ctx,
+ struct v4l2_rect *r,
+ int target)
+{
+ bool rotate = ctx->rotation == 90 || ctx->rotation == 270;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ const struct fimc_variant *var = fimc->variant;
+ const struct fimc_pix_limit *pl = var->pix_limit;
+ struct fimc_frame *sink = &ctx->s_frame;
+ u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
+ u32 align_sz = 0, align_h = 4;
+ u32 max_sc_h, max_sc_v;
+
+ /* In JPEG transparent transfer mode cropping is not supported */
+ if (fimc_fmt_is_user_defined(ctx->d_frame.fmt->color)) {
+ r->width = sink->f_width;
+ r->height = sink->f_height;
+ r->left = r->top = 0;
+ return;
+ }
+ if (target == V4L2_SEL_TGT_COMPOSE) {
+ if (ctx->rotation != 90 && ctx->rotation != 270)
+ align_h = 1;
+ max_sc_h = min(SCALER_MAX_HRATIO, 1 << (ffs(sink->width) - 3));
+ max_sc_v = min(SCALER_MAX_VRATIO, 1 << (ffs(sink->height) - 1));
+ min_sz = var->min_out_pixsize;
+ } else {
+ u32 depth = fimc_get_format_depth(sink->fmt);
+ align_sz = 64/ALIGN(depth, 8);
+ min_sz = var->min_inp_pixsize;
+ min_w = min_h = min_sz;
+ max_sc_h = max_sc_v = 1;
+ }
+ /*
+ * For the compose rectangle the following constraints must be met:
+ * - it must fit in the sink pad format rectangle (f_width/f_height);
+ * - maximum downscaling ratio is 64;
+ * - maximum crop size depends if the rotator is used or not;
+ * - the sink pad format width/height must be 4 multiple of the
+ * prescaler ratios determined by sink pad size and source pad crop,
+ * the prescaler ratio is returned by fimc_get_scaler_factor().
+ */
+ max_w = min_t(u32,
+ rotate ? pl->out_rot_en_w : pl->out_rot_dis_w,
+ rotate ? sink->f_height : sink->f_width);
+ max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height);
+
+ if (target == V4L2_SEL_TGT_COMPOSE) {
+ min_w = min_t(u32, max_w, sink->f_width / max_sc_h);
+ min_h = min_t(u32, max_h, sink->f_height / max_sc_v);
+ if (rotate) {
+ swap(max_sc_h, max_sc_v);
+ swap(min_w, min_h);
+ }
+ }
+ v4l_bound_align_image(&r->width, min_w, max_w, ffs(min_sz) - 1,
+ &r->height, min_h, max_h, align_h,
+ align_sz);
+ /* Adjust left/top if crop/compose rectangle is out of bounds */
+ r->left = clamp_t(u32, r->left, 0, sink->f_width - r->width);
+ r->top = clamp_t(u32, r->top, 0, sink->f_height - r->height);
+ r->left = round_down(r->left, var->hor_offs_align);
+
+ dbg("target %#x: (%d,%d)/%dx%d, sink fmt: %dx%d",
+ target, r->left, r->top, r->width, r->height,
+ sink->f_width, sink->f_height);
+}
+
+/*
+ * The video node ioctl operations
+ */
+static int fimc_cap_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ __fimc_vidioc_querycap(&fimc->pdev->dev, cap, V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE);
+ return 0;
+}
+
+static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM | FMT_FLAGS_M2M,
+ f->index);
+ if (!fmt)
+ return -EINVAL;
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ if (fmt->fourcc == V4L2_MBUS_FMT_JPEG_1X8)
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
+ return 0;
+}
+
+static struct media_entity *fimc_pipeline_get_head(struct media_entity *me)
+{
+ struct media_pad *pad = &me->pads[0];
+
+ while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) {
+ pad = media_entity_remote_pad(pad);
+ if (!pad)
+ break;
+ me = pad->entity;
+ pad = &me->pads[0];
+ }
+
+ return me;
+}
+
+/**
+ * fimc_pipeline_try_format - negotiate and/or set formats at pipeline
+ * elements
+ * @ctx: FIMC capture context
+ * @tfmt: media bus format to try/set on subdevs
+ * @fmt_id: fimc pixel format id corresponding to returned @tfmt (output)
+ * @set: true to set format on subdevs, false to try only
+ */
+static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
+ struct v4l2_mbus_framefmt *tfmt,
+ struct fimc_fmt **fmt_id,
+ bool set)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_pipeline *p = to_fimc_pipeline(fimc->vid_cap.ve.pipe);
+ struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
+ struct v4l2_subdev_format sfmt;
+ struct v4l2_mbus_framefmt *mf = &sfmt.format;
+ struct media_entity *me;
+ struct fimc_fmt *ffmt;
+ struct media_pad *pad;
+ int ret, i = 1;
+ u32 fcc;
+
+ if (WARN_ON(!sd || !tfmt))
+ return -EINVAL;
+
+ memset(&sfmt, 0, sizeof(sfmt));
+ sfmt.format = *tfmt;
+ sfmt.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE : V4L2_SUBDEV_FORMAT_TRY;
+
+ me = fimc_pipeline_get_head(&sd->entity);
+
+ while (1) {
+ ffmt = fimc_find_format(NULL, mf->code != 0 ? &mf->code : NULL,
+ FMT_FLAGS_CAM, i++);
+ if (ffmt == NULL) {
+ /*
+ * Notify user-space if common pixel code for
+ * host and sensor does not exist.
+ */
+ return -EINVAL;
+ }
+ mf->code = tfmt->code = ffmt->mbus_code;
+
+ /* set format on all pipeline subdevs */
+ while (me != &fimc->vid_cap.subdev.entity) {
+ sd = media_entity_to_v4l2_subdev(me);
+
+ sfmt.pad = 0;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt);
+ if (ret)
+ return ret;
+
+ if (me->pads[0].flags & MEDIA_PAD_FL_SINK) {
+ sfmt.pad = me->num_pads - 1;
+ mf->code = tfmt->code;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL,
+ &sfmt);
+ if (ret)
+ return ret;
+ }
+
+ pad = media_entity_remote_pad(&me->pads[sfmt.pad]);
+ if (!pad)
+ return -EINVAL;
+ me = pad->entity;
+ }
+
+ if (mf->code != tfmt->code)
+ continue;
+
+ fcc = ffmt->fourcc;
+ tfmt->width = mf->width;
+ tfmt->height = mf->height;
+ ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height,
+ NULL, &fcc, FIMC_SD_PAD_SINK_CAM);
+ ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height,
+ NULL, &fcc, FIMC_SD_PAD_SOURCE);
+ if (ffmt && ffmt->mbus_code)
+ mf->code = ffmt->mbus_code;
+ if (mf->width != tfmt->width || mf->height != tfmt->height)
+ continue;
+ tfmt->code = mf->code;
+ break;
+ }
+
+ if (fmt_id && ffmt)
+ *fmt_id = ffmt;
+ *tfmt = *mf;
+
+ return 0;
+}
+
+/**
+ * fimc_get_sensor_frame_desc - query the sensor for media bus frame parameters
+ * @sensor: pointer to the sensor subdev
+ * @plane_fmt: provides plane sizes corresponding to the frame layout entries
+ * @try: true to set the frame parameters, false to query only
+ *
+ * This function is used by this driver only for compressed/blob data formats.
+ */
+static int fimc_get_sensor_frame_desc(struct v4l2_subdev *sensor,
+ struct v4l2_plane_pix_format *plane_fmt,
+ unsigned int num_planes, bool try)
+{
+ struct v4l2_mbus_frame_desc fd;
+ int i, ret;
+ int pad;
+
+ for (i = 0; i < num_planes; i++)
+ fd.entry[i].length = plane_fmt[i].sizeimage;
+
+ pad = sensor->entity.num_pads - 1;
+ if (try)
+ ret = v4l2_subdev_call(sensor, pad, set_frame_desc, pad, &fd);
+ else
+ ret = v4l2_subdev_call(sensor, pad, get_frame_desc, pad, &fd);
+
+ if (ret < 0)
+ return ret;
+
+ if (num_planes != fd.num_entries)
+ return -EINVAL;
+
+ for (i = 0; i < num_planes; i++)
+ plane_fmt[i].sizeimage = fd.entry[i].length;
+
+ if (fd.entry[0].length > FIMC_MAX_JPEG_BUF_SIZE) {
+ v4l2_err(sensor->v4l2_dev, "Unsupported buffer size: %u\n",
+ fd.entry[0].length);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ __fimc_get_format(&fimc->vid_cap.ctx->d_frame, f);
+ return 0;
+}
+
+/*
+ * Try or set format on the fimc.X.capture video node and additionally
+ * on the whole pipeline if @try is false.
+ * Locking: the caller must _not_ hold the graph mutex.
+ */
+static int __video_try_or_set_format(struct fimc_dev *fimc,
+ struct v4l2_format *f, bool try,
+ struct fimc_fmt **inp_fmt,
+ struct fimc_fmt **out_fmt)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct exynos_video_entity *ve = &vc->ve;
+ struct fimc_ctx *ctx = vc->ctx;
+ unsigned int width = 0, height = 0;
+ int ret = 0;
+
+ /* Pre-configure format at the camera input interface, for JPEG only */
+ if (fimc_jpeg_fourcc(pix->pixelformat)) {
+ fimc_capture_try_format(ctx, &pix->width, &pix->height,
+ NULL, &pix->pixelformat,
+ FIMC_SD_PAD_SINK_CAM);
+ if (try) {
+ width = pix->width;
+ height = pix->height;
+ } else {
+ ctx->s_frame.f_width = pix->width;
+ ctx->s_frame.f_height = pix->height;
+ }
+ }
+
+ /* Try the format at the scaler and the DMA output */
+ *out_fmt = fimc_capture_try_format(ctx, &pix->width, &pix->height,
+ NULL, &pix->pixelformat,
+ FIMC_SD_PAD_SOURCE);
+ if (*out_fmt == NULL)
+ return -EINVAL;
+
+ /* Restore image width/height for JPEG (no resizing supported). */
+ if (try && fimc_jpeg_fourcc(pix->pixelformat)) {
+ pix->width = width;
+ pix->height = height;
+ }
+
+ /* Try to match format at the host and the sensor */
+ if (!vc->user_subdev_api) {
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = try ? &mbus_fmt : &fimc->vid_cap.ci_fmt;
+
+ mf->code = (*out_fmt)->mbus_code;
+ mf->width = pix->width;
+ mf->height = pix->height;
+
+ fimc_md_graph_lock(ve);
+ ret = fimc_pipeline_try_format(ctx, mf, inp_fmt, try);
+ fimc_md_graph_unlock(ve);
+
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf->width;
+ pix->height = mf->height;
+ }
+
+ fimc_adjust_mplane_format(*out_fmt, pix->width, pix->height, pix);
+
+ if ((*out_fmt)->flags & FMT_FLAGS_COMPRESSED) {
+ struct v4l2_subdev *sensor;
+
+ fimc_md_graph_lock(ve);
+
+ sensor = __fimc_md_get_subdev(ve->pipe, IDX_SENSOR);
+ if (sensor)
+ fimc_get_sensor_frame_desc(sensor, pix->plane_fmt,
+ (*out_fmt)->memplanes, try);
+ else
+ ret = -EPIPE;
+
+ fimc_md_graph_unlock(ve);
+ }
+
+ return ret;
+}
+
+static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_fmt *out_fmt = NULL, *inp_fmt = NULL;
+
+ return __video_try_or_set_format(fimc, f, true, &inp_fmt, &out_fmt);
+}
+
+static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx,
+ enum fimc_color_fmt color)
+{
+ bool jpeg = fimc_fmt_is_user_defined(color);
+
+ ctx->scaler.enabled = !jpeg;
+ fimc_ctrls_activate(ctx, !jpeg);
+
+ if (jpeg)
+ set_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
+ else
+ clear_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
+}
+
+static int __fimc_capture_set_format(struct fimc_dev *fimc,
+ struct v4l2_format *f)
+{
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct fimc_ctx *ctx = vc->ctx;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct fimc_frame *ff = &ctx->d_frame;
+ struct fimc_fmt *inp_fmt = NULL;
+ int ret, i;
+
+ if (vb2_is_busy(&fimc->vid_cap.vbq))
+ return -EBUSY;
+
+ ret = __video_try_or_set_format(fimc, f, false, &inp_fmt, &ff->fmt);
+ if (ret < 0)
+ return ret;
+
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
+ for (i = 0; i < ff->fmt->memplanes; i++) {
+ ff->bytesperline[i] = pix->plane_fmt[i].bytesperline;
+ ff->payload[i] = pix->plane_fmt[i].sizeimage;
+ }
+
+ set_frame_bounds(ff, pix->width, pix->height);
+ /* Reset the composition rectangle if not yet configured */
+ if (!(ctx->state & FIMC_COMPOSE))
+ set_frame_crop(ff, 0, 0, pix->width, pix->height);
+
+ fimc_capture_mark_jpeg_xfer(ctx, ff->fmt->color);
+
+ /* Reset cropping and set format at the camera interface input */
+ if (!vc->user_subdev_api) {
+ ctx->s_frame.fmt = inp_fmt;
+ set_frame_bounds(&ctx->s_frame, pix->width, pix->height);
+ set_frame_crop(&ctx->s_frame, 0, 0, pix->width, pix->height);
+ }
+
+ return ret;
+}
+
+static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ return __fimc_capture_set_format(fimc, f);
+}
+
+static int fimc_cap_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct exynos_video_entity *ve = &fimc->vid_cap.ve;
+ struct v4l2_subdev *sd;
+
+ if (i->index != 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ fimc_md_graph_lock(ve);
+ sd = __fimc_md_get_subdev(ve->pipe, IDX_SENSOR);
+ fimc_md_graph_unlock(ve);
+
+ if (sd)
+ strlcpy(i->name, sd->name, sizeof(i->name));
+
+ return 0;
+}
+
+static int fimc_cap_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return i == 0 ? i : -EINVAL;
+}
+
+static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+/**
+ * fimc_pipeline_validate - check for formats inconsistencies
+ * between source and sink pad of each link
+ *
+ * Return 0 if all formats match or -EPIPE otherwise.
+ */
+static int fimc_pipeline_validate(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct v4l2_subdev *sd = &vc->subdev;
+ struct fimc_pipeline *p = to_fimc_pipeline(vc->ve.pipe);
+ struct media_pad *sink_pad, *src_pad;
+ int i, ret;
+
+ while (1) {
+ /*
+ * Find current entity sink pad and any remote sink pad linked
+ * to it. We stop if there is no sink pad in current entity or
+ * it is not linked to any other remote entity.
+ */
+ src_pad = NULL;
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ struct media_pad *p = &sd->entity.pads[i];
+
+ if (p->flags & MEDIA_PAD_FL_SINK) {
+ sink_pad = p;
+ src_pad = media_entity_remote_pad(sink_pad);
+ if (src_pad)
+ break;
+ }
+ }
+
+ if (src_pad == NULL ||
+ media_entity_type(src_pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ /* Don't call FIMC subdev operation to avoid nested locking */
+ if (sd == &vc->subdev) {
+ struct fimc_frame *ff = &vc->ctx->s_frame;
+ sink_fmt.format.width = ff->f_width;
+ sink_fmt.format.height = ff->f_height;
+ sink_fmt.format.code = ff->fmt ? ff->fmt->mbus_code : 0;
+ } else {
+ sink_fmt.pad = sink_pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+ }
+
+ /* Retrieve format at the source pad */
+ sd = media_entity_to_v4l2_subdev(src_pad->entity);
+ src_fmt.pad = src_pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+
+ if (sd == p->subdevs[IDX_SENSOR] &&
+ fimc_user_defined_mbus_fmt(src_fmt.format.code)) {
+ struct v4l2_plane_pix_format plane_fmt[FIMC_MAX_PLANES];
+ struct fimc_frame *frame = &vc->ctx->d_frame;
+ unsigned int i;
+
+ ret = fimc_get_sensor_frame_desc(sd, plane_fmt,
+ frame->fmt->memplanes,
+ false);
+ if (ret < 0)
+ return -EPIPE;
+
+ for (i = 0; i < frame->fmt->memplanes; i++)
+ if (frame->payload[i] < plane_fmt[i].sizeimage)
+ return -EPIPE;
+ }
+ }
+ return 0;
+}
+
+static int fimc_cap_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct media_entity *entity = &vc->ve.vdev.entity;
+ struct fimc_source_info *si = NULL;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ if (fimc_capture_active(fimc))
+ return -EBUSY;
+
+ ret = media_entity_pipeline_start(entity, &vc->ve.pipe->mp);
+ if (ret < 0)
+ return ret;
+
+ sd = __fimc_md_get_subdev(vc->ve.pipe, IDX_SENSOR);
+ if (sd)
+ si = v4l2_get_subdev_hostdata(sd);
+
+ if (si == NULL) {
+ ret = -EPIPE;
+ goto err_p_stop;
+ }
+ /*
+ * Save configuration data related to currently attached image
+ * sensor or other data source, e.g. FIMC-IS.
+ */
+ vc->source_config = *si;
+
+ if (vc->input == GRP_ID_FIMC_IS)
+ vc->source_config.fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK;
+
+ if (vc->user_subdev_api) {
+ ret = fimc_pipeline_validate(fimc);
+ if (ret < 0)
+ goto err_p_stop;
+ }
+
+ ret = vb2_ioctl_streamon(file, priv, type);
+ if (!ret) {
+ vc->streaming = true;
+ return ret;
+ }
+
+err_p_stop:
+ media_entity_pipeline_stop(entity);
+ return ret;
+}
+
+static int fimc_cap_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ int ret;
+
+ ret = vb2_ioctl_streamoff(file, priv, type);
+ if (ret < 0)
+ return ret;
+
+ media_entity_pipeline_stop(&vc->ve.vdev.entity);
+ vc->streaming = false;
+ return 0;
+}
+
+static int fimc_cap_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ int ret;
+
+ ret = vb2_ioctl_reqbufs(file, priv, reqbufs);
+
+ if (!ret)
+ fimc->vid_cap.reqbufs_count = reqbufs->count;
+
+ return ret;
+}
+
+static int fimc_cap_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *f = &ctx->s_frame;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ f = &ctx->d_frame;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = f->o_width;
+ s->r.height = f->o_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ f = &ctx->d_frame;
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = f->offs_h;
+ s->r.top = f->offs_v;
+ s->r.width = f->width;
+ s->r.height = f->height;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int fimc_cap_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct v4l2_rect rect = s->r;
+ struct fimc_frame *f;
+ unsigned long flags;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ if (s->target == V4L2_SEL_TGT_COMPOSE)
+ f = &ctx->d_frame;
+ else if (s->target == V4L2_SEL_TGT_CROP)
+ f = &ctx->s_frame;
+ else
+ return -EINVAL;
+
+ fimc_capture_try_selection(ctx, &rect, s->target);
+
+ if (s->flags & V4L2_SEL_FLAG_LE &&
+ !enclosed_rectangle(&rect, &s->r))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE &&
+ !enclosed_rectangle(&s->r, &rect))
+ return -ERANGE;
+
+ s->r = rect;
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_frame_crop(f, s->r.left, s->r.top, s->r.width,
+ s->r.height);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
+ .vidioc_querycap = fimc_cap_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_cap_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_cap_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_cap_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_cap_g_fmt_mplane,
+
+ .vidioc_reqbufs = fimc_cap_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+
+ .vidioc_streamon = fimc_cap_streamon,
+ .vidioc_streamoff = fimc_cap_streamoff,
+
+ .vidioc_g_selection = fimc_cap_g_selection,
+ .vidioc_s_selection = fimc_cap_s_selection,
+
+ .vidioc_enum_input = fimc_cap_enum_input,
+ .vidioc_s_input = fimc_cap_s_input,
+ .vidioc_g_input = fimc_cap_g_input,
+};
+
+/* Capture subdev media entity operations */
+static int fimc_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct v4l2_subdev *sensor;
+
+ if (media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return -EINVAL;
+
+ if (WARN_ON(fimc == NULL))
+ return 0;
+
+ dbg("%s --> %s, flags: 0x%x. input: 0x%x",
+ local->entity->name, remote->entity->name, flags,
+ fimc->vid_cap.input);
+
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ fimc->vid_cap.input = 0;
+ return 0;
+ }
+
+ if (vc->input != 0)
+ return -EBUSY;
+
+ vc->input = sd->grp_id;
+
+ if (vc->user_subdev_api || vc->inh_sensor_ctrls)
+ return 0;
+
+ /* Inherit V4L2 controls from the image sensor subdev. */
+ sensor = fimc_find_remote_sensor(&vc->subdev.entity);
+ if (sensor == NULL)
+ return 0;
+
+ return v4l2_ctrl_add_handler(&vc->ctx->ctrls.handler,
+ sensor->ctrl_handler, NULL);
+}
+
+static const struct media_entity_operations fimc_sd_media_ops = {
+ .link_setup = fimc_link_setup,
+};
+
+/**
+ * fimc_sensor_notify - v4l2_device notification from a sensor subdev
+ * @sd: pointer to a subdev generating the notification
+ * @notification: the notification type, must be S5P_FIMC_TX_END_NOTIFY
+ * @arg: pointer to an u32 type integer that stores the frame payload value
+ *
+ * The End Of Frame notification sent by sensor subdev in its still capture
+ * mode. If there is only a single VSYNC generated by the sensor at the
+ * beginning of a frame transmission, FIMC does not issue the LastIrq
+ * (end of frame) interrupt. And this notification is used to complete the
+ * frame capture and returning a buffer to user-space. Subdev drivers should
+ * call this notification from their last 'End of frame capture' interrupt.
+ */
+void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg)
+{
+ struct fimc_source_info *si;
+ struct fimc_vid_buffer *buf;
+ struct fimc_md *fmd;
+ struct fimc_dev *fimc;
+ unsigned long flags;
+
+ if (sd == NULL)
+ return;
+
+ si = v4l2_get_subdev_hostdata(sd);
+ fmd = entity_to_fimc_mdev(&sd->entity);
+
+ spin_lock_irqsave(&fmd->slock, flags);
+
+ fimc = si ? source_to_sensor_info(si)->host : NULL;
+
+ if (fimc && arg && notification == S5P_FIMC_TX_END_NOTIFY &&
+ test_bit(ST_CAPT_PEND, &fimc->state)) {
+ unsigned long irq_flags;
+ spin_lock_irqsave(&fimc->slock, irq_flags);
+ if (!list_empty(&fimc->vid_cap.active_buf_q)) {
+ buf = list_entry(fimc->vid_cap.active_buf_q.next,
+ struct fimc_vid_buffer, list);
+ vb2_set_plane_payload(&buf->vb, 0, *((u32 *)arg));
+ }
+ fimc_capture_irq_handler(fimc, 1);
+ fimc_deactivate_capture(fimc);
+ spin_unlock_irqrestore(&fimc->slock, irq_flags);
+ }
+ spin_unlock_irqrestore(&fmd->slock, flags);
+}
+
+static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, code->index);
+ if (!fmt)
+ return -EINVAL;
+ code->code = fmt->mbus_code;
+ return 0;
+}
+
+static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *ff = &ctx->s_frame;
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mf = &fmt->format;
+ mutex_lock(&fimc->lock);
+
+ switch (fmt->pad) {
+ case FIMC_SD_PAD_SOURCE:
+ if (!WARN_ON(ff->fmt == NULL))
+ mf->code = ff->fmt->mbus_code;
+ /* Sink pads crop rectangle size */
+ mf->width = ff->width;
+ mf->height = ff->height;
+ break;
+ case FIMC_SD_PAD_SINK_FIFO:
+ *mf = fimc->vid_cap.wb_fmt;
+ break;
+ case FIMC_SD_PAD_SINK_CAM:
+ default:
+ *mf = fimc->vid_cap.ci_fmt;
+ break;
+ }
+
+ mutex_unlock(&fimc->lock);
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+ return 0;
+}
+
+static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct fimc_ctx *ctx = vc->ctx;
+ struct fimc_frame *ff;
+ struct fimc_fmt *ffmt;
+
+ dbg("pad%d: code: 0x%x, %dx%d",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ if (fmt->pad == FIMC_SD_PAD_SOURCE && vb2_is_busy(&vc->vbq))
+ return -EBUSY;
+
+ mutex_lock(&fimc->lock);
+ ffmt = fimc_capture_try_format(ctx, &mf->width, &mf->height,
+ &mf->code, NULL, fmt->pad);
+ mutex_unlock(&fimc->lock);
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ return 0;
+ }
+ /* There must be a bug in the driver if this happens */
+ if (WARN_ON(ffmt == NULL))
+ return -EINVAL;
+
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
+ fimc_capture_mark_jpeg_xfer(ctx, ffmt->color);
+ if (fmt->pad == FIMC_SD_PAD_SOURCE) {
+ ff = &ctx->d_frame;
+ /* Sink pads crop rectangle size */
+ mf->width = ctx->s_frame.width;
+ mf->height = ctx->s_frame.height;
+ } else {
+ ff = &ctx->s_frame;
+ }
+
+ mutex_lock(&fimc->lock);
+ set_frame_bounds(ff, mf->width, mf->height);
+
+ if (fmt->pad == FIMC_SD_PAD_SINK_FIFO)
+ vc->wb_fmt = *mf;
+ else if (fmt->pad == FIMC_SD_PAD_SINK_CAM)
+ vc->ci_fmt = *mf;
+
+ ff->fmt = ffmt;
+
+ /* Reset the crop rectangle if required. */
+ if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_COMPOSE)))
+ set_frame_crop(ff, 0, 0, mf->width, mf->height);
+
+ if (fmt->pad != FIMC_SD_PAD_SOURCE)
+ ctx->state &= ~FIMC_COMPOSE;
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *f = &ctx->s_frame;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
+
+ if (sel->pad == FIMC_SD_PAD_SOURCE)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ f = &ctx->d_frame;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ r->width = f->o_width;
+ r->height = f->o_height;
+ r->left = 0;
+ r->top = 0;
+ mutex_unlock(&fimc->lock);
+ return 0;
+
+ case V4L2_SEL_TGT_CROP:
+ try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+ f = &ctx->d_frame;
+ break;
+ default:
+ mutex_unlock(&fimc->lock);
+ return -EINVAL;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *try_sel;
+ } else {
+ r->left = f->offs_h;
+ r->top = f->offs_v;
+ r->width = f->width;
+ r->height = f->height;
+ }
+
+ dbg("target %#x: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
+ sel->pad, r->left, r->top, r->width, r->height,
+ f->f_width, f->f_height);
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *f = &ctx->s_frame;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
+ unsigned long flags;
+
+ if (sel->pad == FIMC_SD_PAD_SOURCE)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
+ fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+ f = &ctx->d_frame;
+ break;
+ default:
+ mutex_unlock(&fimc->lock);
+ return -EINVAL;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *try_sel = sel->r;
+ } else {
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_frame_crop(f, r->left, r->top, r->width, r->height);
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ if (sel->target == V4L2_SEL_TGT_COMPOSE)
+ ctx->state |= FIMC_COMPOSE;
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+
+ dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top,
+ r->width, r->height);
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = {
+ .enum_mbus_code = fimc_subdev_enum_mbus_code,
+ .get_selection = fimc_subdev_get_selection,
+ .set_selection = fimc_subdev_set_selection,
+ .get_fmt = fimc_subdev_get_fmt,
+ .set_fmt = fimc_subdev_set_fmt,
+};
+
+static struct v4l2_subdev_ops fimc_subdev_ops = {
+ .pad = &fimc_subdev_pad_ops,
+};
+
+/* Set default format at the sensor and host interface */
+static int fimc_capture_set_default_format(struct fimc_dev *fimc)
+{
+ struct v4l2_format fmt = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ .fmt.pix_mp = {
+ .width = FIMC_DEFAULT_WIDTH,
+ .height = FIMC_DEFAULT_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ },
+ };
+
+ return __fimc_capture_set_format(fimc, &fmt);
+}
+
+/* fimc->lock must be already initialized */
+static int fimc_register_capture_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev)
+{
+ struct video_device *vfd = &fimc->vid_cap.ve.vdev;
+ struct vb2_queue *q = &fimc->vid_cap.vbq;
+ struct fimc_ctx *ctx;
+ struct fimc_vid_cap *vid_cap;
+ struct fimc_fmt *fmt;
+ int ret = -ENOMEM;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->fimc_dev = fimc;
+ ctx->in_path = FIMC_IO_CAMERA;
+ ctx->out_path = FIMC_IO_DMA;
+ ctx->state = FIMC_CTX_CAP;
+ ctx->s_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
+ ctx->d_frame.fmt = ctx->s_frame.fmt;
+
+ memset(vfd, 0, sizeof(*vfd));
+ snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.capture", fimc->id);
+
+ vfd->fops = &fimc_capture_fops;
+ vfd->ioctl_ops = &fimc_capture_ioctl_ops;
+ vfd->v4l2_dev = v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->queue = q;
+ vfd->lock = &fimc->lock;
+
+ video_set_drvdata(vfd, fimc);
+ vid_cap = &fimc->vid_cap;
+ vid_cap->active_buf_cnt = 0;
+ vid_cap->reqbufs_count = 0;
+ vid_cap->ctx = ctx;
+
+ INIT_LIST_HEAD(&vid_cap->pending_buf_q);
+ INIT_LIST_HEAD(&vid_cap->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = ctx;
+ q->ops = &fimc_capture_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct fimc_vid_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &fimc->lock;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto err_free_ctx;
+
+ /* Default format configuration */
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
+ vid_cap->ci_fmt.width = FIMC_DEFAULT_WIDTH;
+ vid_cap->ci_fmt.height = FIMC_DEFAULT_HEIGHT;
+ vid_cap->ci_fmt.code = fmt->mbus_code;
+
+ ctx->s_frame.width = FIMC_DEFAULT_WIDTH;
+ ctx->s_frame.height = FIMC_DEFAULT_HEIGHT;
+ ctx->s_frame.fmt = fmt;
+
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_WRITEBACK, 0);
+ vid_cap->wb_fmt = vid_cap->ci_fmt;
+ vid_cap->wb_fmt.code = fmt->mbus_code;
+
+ vid_cap->vd_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &vid_cap->vd_pad, 0);
+ if (ret)
+ goto err_free_ctx;
+
+ ret = fimc_ctrls_create(ctx);
+ if (ret)
+ goto err_me_cleanup;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_ctrl_free;
+
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+
+ vfd->ctrl_handler = &ctx->ctrls.handler;
+ return 0;
+
+err_ctrl_free:
+ fimc_ctrls_delete(ctx);
+err_me_cleanup:
+ media_entity_cleanup(&vfd->entity);
+err_free_ctx:
+ kfree(ctx);
+ return ret;
+}
+
+static int fimc_capture_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (fimc == NULL)
+ return -ENXIO;
+
+ ret = fimc_register_m2m_device(fimc, sd->v4l2_dev);
+ if (ret)
+ return ret;
+
+ fimc->vid_cap.ve.pipe = v4l2_get_subdev_hostdata(sd);
+
+ ret = fimc_register_capture_device(fimc, sd->v4l2_dev);
+ if (ret) {
+ fimc_unregister_m2m_device(fimc);
+ fimc->vid_cap.ve.pipe = NULL;
+ }
+
+ return ret;
+}
+
+static void fimc_capture_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct video_device *vdev;
+
+ if (fimc == NULL)
+ return;
+
+ mutex_lock(&fimc->lock);
+
+ fimc_unregister_m2m_device(fimc);
+ vdev = &fimc->vid_cap.ve.vdev;
+
+ if (video_is_registered(vdev)) {
+ video_unregister_device(vdev);
+ media_entity_cleanup(&vdev->entity);
+ fimc_ctrls_delete(fimc->vid_cap.ctx);
+ fimc->vid_cap.ve.pipe = NULL;
+ }
+ kfree(fimc->vid_cap.ctx);
+ fimc->vid_cap.ctx = NULL;
+
+ mutex_unlock(&fimc->lock);
+}
+
+static const struct v4l2_subdev_internal_ops fimc_capture_sd_internal_ops = {
+ .registered = fimc_capture_subdev_registered,
+ .unregistered = fimc_capture_subdev_unregistered,
+};
+
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &fimc_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->id);
+
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK_CAM].flags = MEDIA_PAD_FL_SINK;
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK_FIFO].flags = MEDIA_PAD_FL_SINK;
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+ fimc->vid_cap.sd_pads, 0);
+ if (ret)
+ return ret;
+
+ sd->entity.ops = &fimc_sd_media_ops;
+ sd->internal_ops = &fimc_capture_sd_internal_ops;
+ v4l2_set_subdevdata(sd, fimc);
+ return 0;
+}
+
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_set_subdevdata(sd, NULL);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
new file mode 100644
index 00000000000..b70fd996d79
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -0,0 +1,1308 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series FIMC (CAMIF) driver
+ *
+ * Copyright (C) 2010-2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/mfd/syscon.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "media-dev.h"
+
+static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
+ "sclk_fimc", "fimc"
+};
+
+static struct fimc_fmt fimc_formats[] = {
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = { 16 },
+ .color = FIMC_FMT_RGB565,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "BGR666",
+ .fourcc = V4L2_PIX_FMT_BGR666,
+ .depth = { 32 },
+ .color = FIMC_FMT_RGB666,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "BGRA8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .depth = { 32 },
+ .color = FIMC_FMT_RGB888,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M | FMT_HAS_ALPHA,
+ }, {
+ .name = "ARGB1555",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .depth = { 16 },
+ .color = FIMC_FMT_RGB555,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
+ }, {
+ .name = "ARGB4444",
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .depth = { 16 },
+ .color = FIMC_FMT_RGB444,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
+ }, {
+ .name = "YUV 4:4:4",
+ .mbus_code = V4L2_MBUS_FMT_YUV10_1X30,
+ .flags = FMT_FLAGS_WRITEBACK,
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = { 16 },
+ .color = FIMC_FMT_CBYCRY422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = { 16 },
+ .color = FIMC_FMT_CRYCBY422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCRYCB422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .colplanes = 3,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCRYCB422,
+ .memplanes = 1,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = { 12 },
+ .color = FIMC_FMT_YCBCR420,
+ .memplanes = 1,
+ .colplanes = 3,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = { 12 },
+ .color = FIMC_FMT_YCBCR420,
+ .memplanes = 1,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .color = FIMC_FMT_YCBCR420,
+ .depth = { 8, 4 },
+ .memplanes = 2,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .color = FIMC_FMT_YCBCR420,
+ .depth = { 8, 2, 2 },
+ .memplanes = 3,
+ .colplanes = 3,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 2p, tiled",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .color = FIMC_FMT_YCBCR420,
+ .depth = { 8, 4 },
+ .memplanes = 2,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "JPEG encoded data",
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .color = FIMC_FMT_JPEG,
+ .depth = { 8 },
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_JPEG_1X8,
+ .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
+ }, {
+ .name = "S5C73MX interleaved UYVY/JPEG",
+ .fourcc = V4L2_PIX_FMT_S5C_UYVY_JPG,
+ .color = FIMC_FMT_YUYV_JPEG,
+ .depth = { 8 },
+ .memplanes = 2,
+ .colplanes = 1,
+ .mdataplanes = 0x2, /* plane 1 holds frame meta data */
+ .mbus_code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8,
+ .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
+ },
+};
+
+struct fimc_fmt *fimc_get_format(unsigned int index)
+{
+ if (index >= ARRAY_SIZE(fimc_formats))
+ return NULL;
+
+ return &fimc_formats[index];
+}
+
+int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
+ int dw, int dh, int rotation)
+{
+ if (rotation == 90 || rotation == 270)
+ swap(dw, dh);
+
+ if (!ctx->scaler.enabled)
+ return (sw == dw && sh == dh) ? 0 : -EINVAL;
+
+ if ((sw >= SCALER_MAX_HRATIO * dw) || (sh >= SCALER_MAX_VRATIO * dh))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+ u32 sh = 6;
+
+ if (src >= 64 * tar)
+ return -EINVAL;
+
+ while (sh--) {
+ u32 tmp = 1 << sh;
+ if (src >= tar * tmp) {
+ *shift = sh, *ratio = tmp;
+ return 0;
+ }
+ }
+ *shift = 0, *ratio = 1;
+ return 0;
+}
+
+int fimc_set_scaler_info(struct fimc_ctx *ctx)
+{
+ const struct fimc_variant *variant = ctx->fimc_dev->variant;
+ struct device *dev = &ctx->fimc_dev->pdev->dev;
+ struct fimc_scaler *sc = &ctx->scaler;
+ struct fimc_frame *s_frame = &ctx->s_frame;
+ struct fimc_frame *d_frame = &ctx->d_frame;
+ int tx, ty, sx, sy;
+ int ret;
+
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ ty = d_frame->width;
+ tx = d_frame->height;
+ } else {
+ tx = d_frame->width;
+ ty = d_frame->height;
+ }
+ if (tx <= 0 || ty <= 0) {
+ dev_err(dev, "Invalid target size: %dx%d\n", tx, ty);
+ return -EINVAL;
+ }
+
+ sx = s_frame->width;
+ sy = s_frame->height;
+ if (sx <= 0 || sy <= 0) {
+ dev_err(dev, "Invalid source size: %dx%d\n", sx, sy);
+ return -EINVAL;
+ }
+ sc->real_width = sx;
+ sc->real_height = sy;
+
+ ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
+ if (ret)
+ return ret;
+
+ ret = fimc_get_scaler_factor(sy, ty, &sc->pre_vratio, &sc->vfactor);
+ if (ret)
+ return ret;
+
+ sc->pre_dst_width = sx / sc->pre_hratio;
+ sc->pre_dst_height = sy / sc->pre_vratio;
+
+ if (variant->has_mainscaler_ext) {
+ sc->main_hratio = (sx << 14) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 14) / (ty << sc->vfactor);
+ } else {
+ sc->main_hratio = (sx << 8) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 8) / (ty << sc->vfactor);
+
+ }
+
+ sc->scaleup_h = (tx >= sx) ? 1 : 0;
+ sc->scaleup_v = (ty >= sy) ? 1 : 0;
+
+ /* check to see if input and output size/format differ */
+ if (s_frame->fmt->color == d_frame->fmt->color
+ && s_frame->width == d_frame->width
+ && s_frame->height == d_frame->height)
+ sc->copy_mode = 1;
+ else
+ sc->copy_mode = 0;
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *priv)
+{
+ struct fimc_dev *fimc = priv;
+ struct fimc_ctx *ctx;
+
+ fimc_hw_clear_irq(fimc);
+
+ spin_lock(&fimc->slock);
+
+ if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
+ if (test_and_clear_bit(ST_M2M_SUSPENDING, &fimc->state)) {
+ set_bit(ST_M2M_SUSPENDED, &fimc->state);
+ wake_up(&fimc->irq_queue);
+ goto out;
+ }
+ ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
+ if (ctx != NULL) {
+ spin_unlock(&fimc->slock);
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);
+
+ if (ctx->state & FIMC_CTX_SHUT) {
+ ctx->state &= ~FIMC_CTX_SHUT;
+ wake_up(&fimc->irq_queue);
+ }
+ return IRQ_HANDLED;
+ }
+ } else if (test_bit(ST_CAPT_PEND, &fimc->state)) {
+ int last_buf = test_bit(ST_CAPT_JPEG, &fimc->state) &&
+ fimc->vid_cap.reqbufs_count == 1;
+ fimc_capture_irq_handler(fimc, !last_buf);
+ }
+out:
+ spin_unlock(&fimc->slock);
+ return IRQ_HANDLED;
+}
+
+/* The color format (colplanes, memplanes) must be already configured. */
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
+ struct fimc_frame *frame, struct fimc_addr *paddr)
+{
+ int ret = 0;
+ u32 pix_size;
+
+ if (vb == NULL || frame == NULL)
+ return -EINVAL;
+
+ pix_size = frame->width * frame->height;
+
+ dbg("memplanes= %d, colplanes= %d, pix_size= %d",
+ frame->fmt->memplanes, frame->fmt->colplanes, pix_size);
+
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (frame->fmt->memplanes == 1) {
+ switch (frame->fmt->colplanes) {
+ case 1:
+ paddr->cb = 0;
+ paddr->cr = 0;
+ break;
+ case 2:
+ /* decompose Y into Y/Cb */
+ paddr->cb = (u32)(paddr->y + pix_size);
+ paddr->cr = 0;
+ break;
+ case 3:
+ paddr->cb = (u32)(paddr->y + pix_size);
+ /* decompose Y into Y/Cb/Cr */
+ if (FIMC_FMT_YCBCR420 == frame->fmt->color)
+ paddr->cr = (u32)(paddr->cb
+ + (pix_size >> 2));
+ else /* 422 */
+ paddr->cr = (u32)(paddr->cb
+ + (pix_size >> 1));
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (!frame->fmt->mdataplanes) {
+ if (frame->fmt->memplanes >= 2)
+ paddr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
+
+ if (frame->fmt->memplanes == 3)
+ paddr->cr = vb2_dma_contig_plane_dma_addr(vb, 2);
+ }
+
+ dbg("PHYS_ADDR: y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
+ paddr->y, paddr->cb, paddr->cr, ret);
+
+ return ret;
+}
+
+/* Set order for 1 and 2 plane YCBCR 4:2:2 formats. */
+void fimc_set_yuv_order(struct fimc_ctx *ctx)
+{
+ /* The one only mode supported in SoC. */
+ ctx->in_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
+ ctx->out_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
+
+ /* Set order for 1 plane input formats. */
+ switch (ctx->s_frame.fmt->color) {
+ case FIMC_FMT_YCRYCB422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case FIMC_FMT_CBYCRY422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case FIMC_FMT_CRYCBY422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case FIMC_FMT_YCBYCR422:
+ default:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCBYCR;
+ break;
+ }
+ dbg("ctx->in_order_1p= %d", ctx->in_order_1p);
+
+ switch (ctx->d_frame.fmt->color) {
+ case FIMC_FMT_YCRYCB422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case FIMC_FMT_CBYCRY422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case FIMC_FMT_CRYCBY422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case FIMC_FMT_YCBYCR422:
+ default:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ }
+ dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
+}
+
+void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
+{
+ bool pix_hoff = ctx->fimc_dev->drv_data->dma_pix_hoff;
+ u32 i, depth = 0;
+
+ for (i = 0; i < f->fmt->memplanes; i++)
+ depth += f->fmt->depth[i];
+
+ f->dma_offset.y_h = f->offs_h;
+ if (!pix_hoff)
+ f->dma_offset.y_h *= (depth >> 3);
+
+ f->dma_offset.y_v = f->offs_v;
+
+ f->dma_offset.cb_h = f->offs_h;
+ f->dma_offset.cb_v = f->offs_v;
+
+ f->dma_offset.cr_h = f->offs_h;
+ f->dma_offset.cr_v = f->offs_v;
+
+ if (!pix_hoff) {
+ if (f->fmt->colplanes == 3) {
+ f->dma_offset.cb_h >>= 1;
+ f->dma_offset.cr_h >>= 1;
+ }
+ if (f->fmt->color == FIMC_FMT_YCBCR420) {
+ f->dma_offset.cb_v >>= 1;
+ f->dma_offset.cr_v >>= 1;
+ }
+ }
+
+ dbg("in_offset: color= %d, y_h= %d, y_v= %d",
+ f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
+}
+
+static int fimc_set_color_effect(struct fimc_ctx *ctx, enum v4l2_colorfx colorfx)
+{
+ struct fimc_effect *effect = &ctx->effect;
+
+ switch (colorfx) {
+ case V4L2_COLORFX_NONE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+ break;
+ case V4L2_COLORFX_BW:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = 128;
+ effect->pat_cr = 128;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = 115;
+ effect->pat_cr = 145;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_NEGATIVE;
+ break;
+ case V4L2_COLORFX_EMBOSS:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_EMBOSSING;
+ break;
+ case V4L2_COLORFX_ART_FREEZE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARTFREEZE;
+ break;
+ case V4L2_COLORFX_SILHOUETTE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_SILHOUETTE;
+ break;
+ case V4L2_COLORFX_SET_CBCR:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = ctx->ctrls.colorfx_cbcr->val >> 8;
+ effect->pat_cr = ctx->ctrls.colorfx_cbcr->val & 0xff;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * V4L2 controls handling
+ */
+#define ctrl_to_ctx(__ctrl) \
+ container_of((__ctrl)->handler, struct fimc_ctx, ctrls.handler)
+
+static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ const struct fimc_variant *variant = fimc->variant;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+
+ case V4L2_CID_ROTATE:
+ if (fimc_capture_pending(fimc)) {
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctrl->val);
+ if (ret)
+ return -EINVAL;
+ }
+ if ((ctrl->val == 90 || ctrl->val == 270) &&
+ !variant->has_out_rot)
+ return -EINVAL;
+
+ ctx->rotation = ctrl->val;
+ break;
+
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->d_frame.alpha = ctrl->val;
+ break;
+
+ case V4L2_CID_COLORFX:
+ ret = fimc_set_color_effect(ctx, ctrl->val);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ ctx->state |= FIMC_PARAMS;
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ return 0;
+}
+
+static int fimc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fimc_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
+ ret = __fimc_s_ctrl(ctx, ctrl);
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops fimc_ctrl_ops = {
+ .s_ctrl = fimc_s_ctrl,
+};
+
+int fimc_ctrls_create(struct fimc_ctx *ctx)
+{
+ unsigned int max_alpha = fimc_get_alpha_mask(ctx->d_frame.fmt);
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+ struct v4l2_ctrl_handler *handler = &ctrls->handler;
+
+ if (ctx->ctrls.ready)
+ return 0;
+
+ v4l2_ctrl_handler_init(handler, 6);
+
+ ctrls->rotate = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+ ctrls->hflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (ctx->fimc_dev->drv_data->alpha_color)
+ ctrls->alpha = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, max_alpha, 1, 0);
+ else
+ ctrls->alpha = NULL;
+
+ ctrls->colorfx = v4l2_ctrl_new_std_menu(handler, &fimc_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
+ ~0x983f, V4L2_COLORFX_NONE);
+
+ ctrls->colorfx_cbcr = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+
+ ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+
+ if (!handler->error) {
+ v4l2_ctrl_cluster(2, &ctrls->colorfx);
+ ctrls->ready = true;
+ }
+
+ return handler->error;
+}
+
+void fimc_ctrls_delete(struct fimc_ctx *ctx)
+{
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+
+ if (ctrls->ready) {
+ v4l2_ctrl_handler_free(&ctrls->handler);
+ ctrls->ready = false;
+ ctrls->alpha = NULL;
+ }
+}
+
+void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
+{
+ unsigned int has_alpha = ctx->d_frame.fmt->flags & FMT_HAS_ALPHA;
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+
+ if (!ctrls->ready)
+ return;
+
+ mutex_lock(ctrls->handler.lock);
+ v4l2_ctrl_activate(ctrls->rotate, active);
+ v4l2_ctrl_activate(ctrls->hflip, active);
+ v4l2_ctrl_activate(ctrls->vflip, active);
+ v4l2_ctrl_activate(ctrls->colorfx, active);
+ if (ctrls->alpha)
+ v4l2_ctrl_activate(ctrls->alpha, active && has_alpha);
+
+ if (active) {
+ fimc_set_color_effect(ctx, ctrls->colorfx->cur.val);
+ ctx->rotation = ctrls->rotate->val;
+ ctx->hflip = ctrls->hflip->val;
+ ctx->vflip = ctrls->vflip->val;
+ } else {
+ ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+ ctx->rotation = 0;
+ ctx->hflip = 0;
+ ctx->vflip = 0;
+ }
+ mutex_unlock(ctrls->handler.lock);
+}
+
+/* Update maximum value of the alpha color control */
+void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_ctrl *ctrl = ctx->ctrls.alpha;
+
+ if (ctrl == NULL || !fimc->drv_data->alpha_color)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+ ctrl->maximum = fimc_get_alpha_mask(ctx->d_frame.fmt);
+
+ if (ctrl->cur.val > ctrl->maximum)
+ ctrl->cur.val = ctrl->maximum;
+
+ v4l2_ctrl_unlock(ctrl);
+}
+
+void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ int i;
+
+ pixm->width = frame->o_width;
+ pixm->height = frame->o_height;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->pixelformat = frame->fmt->fourcc;
+ pixm->colorspace = V4L2_COLORSPACE_JPEG;
+ pixm->num_planes = frame->fmt->memplanes;
+
+ for (i = 0; i < pixm->num_planes; ++i) {
+ pixm->plane_fmt[i].bytesperline = frame->bytesperline[i];
+ pixm->plane_fmt[i].sizeimage = frame->payload[i];
+ }
+}
+
+/**
+ * fimc_adjust_mplane_format - adjust bytesperline/sizeimage for each plane
+ * @fmt: fimc pixel format description (input)
+ * @width: requested pixel width
+ * @height: requested pixel height
+ * @pix: multi-plane format to adjust
+ */
+void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+ struct v4l2_pix_format_mplane *pix)
+{
+ u32 bytesperline = 0;
+ int i;
+
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->field = V4L2_FIELD_NONE;
+ pix->num_planes = fmt->memplanes;
+ pix->pixelformat = fmt->fourcc;
+ pix->height = height;
+ pix->width = width;
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+ u32 bpl = plane_fmt->bytesperline;
+
+ if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
+ bpl = pix->width; /* Planar */
+
+ if (fmt->colplanes == 1 && /* Packed */
+ (bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width))
+ bpl = (pix->width * fmt->depth[0]) / 8;
+ /*
+ * Currently bytesperline for each plane is same, except
+ * V4L2_PIX_FMT_YUV420M format. This calculation may need
+ * to be changed when other multi-planar formats are added
+ * to the fimc_formats[] array.
+ */
+ if (i == 0)
+ bytesperline = bpl;
+ else if (i == 1 && fmt->memplanes == 3)
+ bytesperline /= 2;
+
+ plane_fmt->bytesperline = bytesperline;
+ plane_fmt->sizeimage = max((pix->width * pix->height *
+ fmt->depth[i]) / 8, plane_fmt->sizeimage);
+ }
+}
+
+/**
+ * fimc_find_format - lookup fimc color format by fourcc or media bus format
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @mask: the color flags to match
+ * @index: offset in the fimc_formats array, ignored if negative
+ */
+struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
+ unsigned int mask, int index)
+{
+ struct fimc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(fimc_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
+ fmt = &fimc_formats[i];
+ if (!(fmt->flags & mask))
+ continue;
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static void fimc_clk_put(struct fimc_dev *fimc)
+{
+ int i;
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
+ if (IS_ERR(fimc->clock[i]))
+ continue;
+ clk_unprepare(fimc->clock[i]);
+ clk_put(fimc->clock[i]);
+ fimc->clock[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int fimc_clk_get(struct fimc_dev *fimc)
+{
+ int i, ret;
+
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++)
+ fimc->clock[i] = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
+ fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clocks[i]);
+ if (IS_ERR(fimc->clock[i])) {
+ ret = PTR_ERR(fimc->clock[i]);
+ goto err;
+ }
+ ret = clk_prepare(fimc->clock[i]);
+ if (ret < 0) {
+ clk_put(fimc->clock[i]);
+ fimc->clock[i] = ERR_PTR(-EINVAL);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ fimc_clk_put(fimc);
+ dev_err(&fimc->pdev->dev, "failed to get clock: %s\n",
+ fimc_clocks[i]);
+ return -ENXIO;
+}
+
+static int fimc_m2m_suspend(struct fimc_dev *fimc)
+{
+ unsigned long flags;
+ int timeout;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!fimc_m2m_pending(fimc)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
+ clear_bit(ST_M2M_SUSPENDED, &fimc->state);
+ set_bit(ST_M2M_SUSPENDING, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ timeout = wait_event_timeout(fimc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &fimc->state),
+ FIMC_SHUTDOWN_TIMEOUT);
+
+ clear_bit(ST_M2M_SUSPENDING, &fimc->state);
+ return timeout == 0 ? -EAGAIN : 0;
+}
+
+static int fimc_m2m_resume(struct fimc_dev *fimc)
+{
+ struct fimc_ctx *ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ /* Clear for full H/W setup in first run after resume */
+ ctx = fimc->m2m.ctx;
+ fimc->m2m.ctx = NULL;
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+ return 0;
+}
+
+static const struct of_device_id fimc_of_match[];
+
+static int fimc_parse_dt(struct fimc_dev *fimc, u32 *clk_freq)
+{
+ struct device *dev = &fimc->pdev->dev;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *of_id;
+ struct fimc_variant *v;
+ struct fimc_pix_limit *lim;
+ u32 args[FIMC_PIX_LIMITS_MAX];
+ int ret;
+
+ if (of_property_read_bool(node, "samsung,lcd-wb"))
+ return -ENODEV;
+
+ v = devm_kzalloc(dev, sizeof(*v) + sizeof(*lim), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ of_id = of_match_node(fimc_of_match, node);
+ if (!of_id)
+ return -EINVAL;
+ fimc->drv_data = of_id->data;
+ ret = of_property_read_u32_array(node, "samsung,pix-limits",
+ args, FIMC_PIX_LIMITS_MAX);
+ if (ret < 0)
+ return ret;
+
+ lim = (struct fimc_pix_limit *)&v[1];
+
+ lim->scaler_en_w = args[0];
+ lim->scaler_dis_w = args[1];
+ lim->out_rot_en_w = args[2];
+ lim->out_rot_dis_w = args[3];
+ v->pix_limit = lim;
+
+ ret = of_property_read_u32_array(node, "samsung,min-pix-sizes",
+ args, 2);
+ v->min_inp_pixsize = ret ? FIMC_DEF_MIN_SIZE : args[0];
+ v->min_out_pixsize = ret ? FIMC_DEF_MIN_SIZE : args[1];
+ ret = of_property_read_u32_array(node, "samsung,min-pix-alignment",
+ args, 2);
+ v->min_vsize_align = ret ? FIMC_DEF_HEIGHT_ALIGN : args[0];
+ v->hor_offs_align = ret ? FIMC_DEF_HOR_OFFS_ALIGN : args[1];
+
+ ret = of_property_read_u32(node, "samsung,rotators", &args[1]);
+ v->has_inp_rot = ret ? 1 : args[1] & 0x01;
+ v->has_out_rot = ret ? 1 : args[1] & 0x10;
+ v->has_mainscaler_ext = of_property_read_bool(node,
+ "samsung,mainscaler-ext");
+
+ v->has_isp_wb = of_property_read_bool(node, "samsung,isp-wb");
+ v->has_cam_if = of_property_read_bool(node, "samsung,cam-if");
+ of_property_read_u32(node, "clock-frequency", clk_freq);
+ fimc->id = of_alias_get_id(node, "fimc");
+
+ fimc->variant = v;
+ return 0;
+}
+
+static int fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ u32 lclk_freq = 0;
+ struct fimc_dev *fimc;
+ struct resource *res;
+ int ret = 0;
+
+ fimc = devm_kzalloc(dev, sizeof(*fimc), GFP_KERNEL);
+ if (!fimc)
+ return -ENOMEM;
+
+ fimc->pdev = pdev;
+
+ if (dev->of_node) {
+ ret = fimc_parse_dt(fimc, &lclk_freq);
+ if (ret < 0)
+ return ret;
+ } else {
+ fimc->drv_data = fimc_get_drvdata(pdev);
+ fimc->id = pdev->id;
+ }
+ if (!fimc->drv_data || fimc->id >= fimc->drv_data->num_entities ||
+ fimc->id < 0) {
+ dev_err(dev, "Invalid driver data or device id (%d)\n",
+ fimc->id);
+ return -EINVAL;
+ }
+ if (!dev->of_node)
+ fimc->variant = fimc->drv_data->variant[fimc->id];
+
+ init_waitqueue_head(&fimc->irq_queue);
+ spin_lock_init(&fimc->slock);
+ mutex_init(&fimc->lock);
+
+ fimc->sysreg = fimc_get_sysreg_regmap(dev->of_node);
+ if (IS_ERR(fimc->sysreg))
+ return PTR_ERR(fimc->sysreg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fimc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fimc->regs))
+ return PTR_ERR(fimc->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "Failed to get IRQ resource\n");
+ return -ENXIO;
+ }
+
+ ret = fimc_clk_get(fimc);
+ if (ret)
+ return ret;
+
+ if (lclk_freq == 0)
+ lclk_freq = fimc->drv_data->lclk_frequency;
+
+ ret = clk_set_rate(fimc->clock[CLK_BUS], lclk_freq);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(fimc->clock[CLK_BUS]);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(dev, res->start, fimc_irq_handler,
+ 0, dev_name(dev), fimc);
+ if (ret < 0) {
+ dev_err(dev, "failed to install irq (%d)\n", ret);
+ goto err_sclk;
+ }
+
+ ret = fimc_initialize_capture_subdev(fimc);
+ if (ret < 0)
+ goto err_sclk;
+
+ platform_set_drvdata(pdev, fimc);
+ pm_runtime_enable(dev);
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = clk_enable(fimc->clock[CLK_GATE]);
+ if (ret < 0)
+ goto err_sd;
+ }
+
+ /* Initialize contiguous memory allocator */
+ fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
+ if (IS_ERR(fimc->alloc_ctx)) {
+ ret = PTR_ERR(fimc->alloc_ctx);
+ goto err_gclk;
+ }
+
+ dev_dbg(dev, "FIMC.%d registered successfully\n", fimc->id);
+ return 0;
+
+err_gclk:
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock[CLK_GATE]);
+err_sd:
+ fimc_unregister_capture_subdev(fimc);
+err_sclk:
+ clk_disable(fimc->clock[CLK_BUS]);
+ fimc_clk_put(fimc);
+ return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+ /* Enable clocks and perform basic initialization */
+ clk_enable(fimc->clock[CLK_GATE]);
+ fimc_hw_reset(fimc);
+
+ /* Resume the capture or mem-to-mem device */
+ if (fimc_capture_busy(fimc))
+ return fimc_capture_resume(fimc);
+
+ return fimc_m2m_resume(fimc);
+}
+
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fimc_capture_busy(fimc))
+ ret = fimc_capture_suspend(fimc);
+ else
+ ret = fimc_m2m_suspend(fimc);
+ if (!ret)
+ clk_disable(fimc->clock[CLK_GATE]);
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+ /* Do not resume if the device was idle before system suspend */
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!test_and_clear_bit(ST_LPM, &fimc->state) ||
+ (!fimc_m2m_active(fimc) && !fimc_capture_busy(fimc))) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
+ fimc_hw_reset(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (fimc_capture_busy(fimc))
+ return fimc_capture_resume(fimc);
+
+ return fimc_m2m_resume(fimc);
+}
+
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+ if (test_and_set_bit(ST_LPM, &fimc->state))
+ return 0;
+ if (fimc_capture_busy(fimc))
+ return fimc_capture_suspend(fimc);
+
+ return fimc_m2m_suspend(fimc);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int fimc_remove(struct platform_device *pdev)
+{
+ struct fimc_dev *fimc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ clk_disable(fimc->clock[CLK_GATE]);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ fimc_unregister_capture_subdev(fimc);
+ vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+
+ clk_disable(fimc->clock[CLK_BUS]);
+ fimc_clk_put(fimc);
+
+ dev_info(&pdev->dev, "driver unloaded\n");
+ return 0;
+}
+
+/* Image pixel limits, similar across several FIMC HW revisions. */
+static const struct fimc_pix_limit s5p_pix_limit[4] = {
+ [0] = {
+ .scaler_en_w = 3264,
+ .scaler_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+ },
+ [1] = {
+ .scaler_en_w = 4224,
+ .scaler_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+ },
+ [2] = {
+ .scaler_en_w = 1920,
+ .scaler_dis_w = 8192,
+ .out_rot_en_w = 1280,
+ .out_rot_dis_w = 1920,
+ },
+};
+
+static const struct fimc_variant fimc0_variant_s5p = {
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .has_cam_if = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .min_vsize_align = 16,
+ .pix_limit = &s5p_pix_limit[0],
+};
+
+static const struct fimc_variant fimc2_variant_s5p = {
+ .has_cam_if = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .min_vsize_align = 16,
+ .pix_limit = &s5p_pix_limit[1],
+};
+
+static const struct fimc_variant fimc0_variant_s5pv210 = {
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .has_cam_if = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .min_vsize_align = 16,
+ .pix_limit = &s5p_pix_limit[1],
+};
+
+static const struct fimc_variant fimc1_variant_s5pv210 = {
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .has_cam_if = 1,
+ .has_mainscaler_ext = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 1,
+ .min_vsize_align = 1,
+ .pix_limit = &s5p_pix_limit[2],
+};
+
+static const struct fimc_variant fimc2_variant_s5pv210 = {
+ .has_cam_if = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .min_vsize_align = 16,
+ .pix_limit = &s5p_pix_limit[2],
+};
+
+/* S5PC100 */
+static const struct fimc_drvdata fimc_drvdata_s5p = {
+ .variant = {
+ [0] = &fimc0_variant_s5p,
+ [1] = &fimc0_variant_s5p,
+ [2] = &fimc2_variant_s5p,
+ },
+ .num_entities = 3,
+ .lclk_frequency = 133000000UL,
+ .out_buf_count = 4,
+};
+
+/* S5PV210, S5PC110 */
+static const struct fimc_drvdata fimc_drvdata_s5pv210 = {
+ .variant = {
+ [0] = &fimc0_variant_s5pv210,
+ [1] = &fimc1_variant_s5pv210,
+ [2] = &fimc2_variant_s5pv210,
+ },
+ .num_entities = 3,
+ .lclk_frequency = 166000000UL,
+ .out_buf_count = 4,
+ .dma_pix_hoff = 1,
+};
+
+/* EXYNOS4210, S5PV310, S5PC210 */
+static const struct fimc_drvdata fimc_drvdata_exynos4210 = {
+ .num_entities = 4,
+ .lclk_frequency = 166000000UL,
+ .dma_pix_hoff = 1,
+ .cistatus2 = 1,
+ .alpha_color = 1,
+ .out_buf_count = 32,
+};
+
+/* EXYNOS4212, EXYNOS4412 */
+static const struct fimc_drvdata fimc_drvdata_exynos4x12 = {
+ .num_entities = 4,
+ .lclk_frequency = 166000000UL,
+ .dma_pix_hoff = 1,
+ .cistatus2 = 1,
+ .alpha_color = 1,
+ .out_buf_count = 32,
+};
+
+static const struct platform_device_id fimc_driver_ids[] = {
+ {
+ .name = "s5p-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_s5p,
+ }, {
+ .name = "s5pv210-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_s5pv210,
+ }, {
+ .name = "exynos4-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_exynos4210,
+ }, {
+ .name = "exynos4x12-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_exynos4x12,
+ },
+ { },
+};
+
+static const struct of_device_id fimc_of_match[] = {
+ {
+ .compatible = "samsung,s5pv210-fimc",
+ .data = &fimc_drvdata_s5pv210,
+ }, {
+ .compatible = "samsung,exynos4210-fimc",
+ .data = &fimc_drvdata_exynos4210,
+ }, {
+ .compatible = "samsung,exynos4212-fimc",
+ .data = &fimc_drvdata_exynos4x12,
+ },
+ { /* sentinel */ },
+};
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+static struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = fimc_remove,
+ .id_table = fimc_driver_ids,
+ .driver = {
+ .of_match_table = fimc_of_match,
+ .name = FIMC_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
+ }
+};
+
+int __init fimc_register_driver(void)
+{
+ return platform_driver_register(&fimc_driver);
+}
+
+void __exit fimc_unregister_driver(void)
+{
+ platform_driver_unregister(&fimc_driver);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
new file mode 100644
index 00000000000..6c75c6ced1f
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -0,0 +1,728 @@
+/*
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_CORE_H_
+#define FIMC_CORE_H_
+
+/*#define DEBUG*/
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/io.h>
+#include <linux/sizes.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-mediabus.h>
+#include <media/exynos-fimc.h>
+
+#define dbg(fmt, args...) \
+ pr_debug("%s:%d: " fmt "\n", __func__, __LINE__, ##args)
+
+/* Time to wait for next frame VSYNC interrupt while stopping operation. */
+#define FIMC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
+#define MAX_FIMC_CLOCKS 2
+#define FIMC_DRIVER_NAME "exynos4-fimc"
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_OUT_BUFS 4
+#define SCALER_MAX_HRATIO 64
+#define SCALER_MAX_VRATIO 64
+#define DMA_MIN_SIZE 8
+#define FIMC_CAMIF_MAX_HEIGHT 0x2000
+#define FIMC_MAX_JPEG_BUF_SIZE (10 * SZ_1M)
+#define FIMC_MAX_PLANES 3
+#define FIMC_PIX_LIMITS_MAX 4
+#define FIMC_DEF_MIN_SIZE 16
+#define FIMC_DEF_HEIGHT_ALIGN 2
+#define FIMC_DEF_HOR_OFFS_ALIGN 1
+#define FIMC_DEFAULT_WIDTH 640
+#define FIMC_DEFAULT_HEIGHT 480
+
+/* indices to the clocks array */
+enum {
+ CLK_BUS,
+ CLK_GATE,
+};
+
+enum fimc_dev_flags {
+ ST_LPM,
+ /* m2m node */
+ ST_M2M_RUN,
+ ST_M2M_PEND,
+ ST_M2M_SUSPENDING,
+ ST_M2M_SUSPENDED,
+ /* capture node */
+ ST_CAPT_PEND,
+ ST_CAPT_RUN,
+ ST_CAPT_STREAM,
+ ST_CAPT_ISP_STREAM,
+ ST_CAPT_SUSPENDED,
+ ST_CAPT_SHUT,
+ ST_CAPT_BUSY,
+ ST_CAPT_APPLY_CFG,
+ ST_CAPT_JPEG,
+};
+
+#define fimc_m2m_active(dev) test_bit(ST_M2M_RUN, &(dev)->state)
+#define fimc_m2m_pending(dev) test_bit(ST_M2M_PEND, &(dev)->state)
+
+#define fimc_capture_running(dev) test_bit(ST_CAPT_RUN, &(dev)->state)
+#define fimc_capture_pending(dev) test_bit(ST_CAPT_PEND, &(dev)->state)
+#define fimc_capture_busy(dev) test_bit(ST_CAPT_BUSY, &(dev)->state)
+
+enum fimc_datapath {
+ FIMC_IO_NONE,
+ FIMC_IO_CAMERA,
+ FIMC_IO_DMA,
+ FIMC_IO_LCDFIFO,
+ FIMC_IO_WRITEBACK,
+ FIMC_IO_ISP,
+};
+
+enum fimc_color_fmt {
+ FIMC_FMT_RGB444 = 0x10,
+ FIMC_FMT_RGB555,
+ FIMC_FMT_RGB565,
+ FIMC_FMT_RGB666,
+ FIMC_FMT_RGB888,
+ FIMC_FMT_RGB30_LOCAL,
+ FIMC_FMT_YCBCR420 = 0x20,
+ FIMC_FMT_YCBYCR422,
+ FIMC_FMT_YCRYCB422,
+ FIMC_FMT_CBYCRY422,
+ FIMC_FMT_CRYCBY422,
+ FIMC_FMT_YCBCR444_LOCAL,
+ FIMC_FMT_RAW8 = 0x40,
+ FIMC_FMT_RAW10,
+ FIMC_FMT_RAW12,
+ FIMC_FMT_JPEG = 0x80,
+ FIMC_FMT_YUYV_JPEG = 0x100,
+};
+
+#define fimc_fmt_is_user_defined(x) (!!((x) & 0x180))
+#define fimc_fmt_is_rgb(x) (!!((x) & 0x10))
+
+#define IS_M2M(__strt) ((__strt) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || \
+ __strt == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+
+/* The hardware context state. */
+#define FIMC_PARAMS (1 << 0)
+#define FIMC_COMPOSE (1 << 1)
+#define FIMC_CTX_M2M (1 << 16)
+#define FIMC_CTX_CAP (1 << 17)
+#define FIMC_CTX_SHUT (1 << 18)
+
+/* Image conversion flags */
+#define FIMC_IN_DMA_ACCESS_TILED (1 << 0)
+#define FIMC_IN_DMA_ACCESS_LINEAR (0 << 0)
+#define FIMC_OUT_DMA_ACCESS_TILED (1 << 1)
+#define FIMC_OUT_DMA_ACCESS_LINEAR (0 << 1)
+#define FIMC_SCAN_MODE_PROGRESSIVE (0 << 2)
+#define FIMC_SCAN_MODE_INTERLACED (1 << 2)
+/*
+ * YCbCr data dynamic range for RGB-YUV color conversion.
+ * Y/Cb/Cr: (0 ~ 255) */
+#define FIMC_COLOR_RANGE_WIDE (0 << 3)
+/* Y (16 ~ 235), Cb/Cr (16 ~ 240) */
+#define FIMC_COLOR_RANGE_NARROW (1 << 3)
+
+/**
+ * struct fimc_dma_offset - pixel offset information for DMA
+ * @y_h: y value horizontal offset
+ * @y_v: y value vertical offset
+ * @cb_h: cb value horizontal offset
+ * @cb_v: cb value vertical offset
+ * @cr_h: cr value horizontal offset
+ * @cr_v: cr value vertical offset
+ */
+struct fimc_dma_offset {
+ int y_h;
+ int y_v;
+ int cb_h;
+ int cb_v;
+ int cr_h;
+ int cr_v;
+};
+
+/**
+ * struct fimc_effect - color effect information
+ * @type: effect type
+ * @pat_cb: cr value when type is "arbitrary"
+ * @pat_cr: cr value when type is "arbitrary"
+ */
+struct fimc_effect {
+ u32 type;
+ u8 pat_cb;
+ u8 pat_cr;
+};
+
+/**
+ * struct fimc_scaler - the configuration data for FIMC inetrnal scaler
+ * @scaleup_h: flag indicating scaling up horizontally
+ * @scaleup_v: flag indicating scaling up vertically
+ * @copy_mode: flag indicating transparent DMA transfer (no scaling
+ * and color format conversion)
+ * @enabled: flag indicating if the scaler is used
+ * @hfactor: horizontal shift factor
+ * @vfactor: vertical shift factor
+ * @pre_hratio: horizontal ratio of the prescaler
+ * @pre_vratio: vertical ratio of the prescaler
+ * @pre_dst_width: the prescaler's destination width
+ * @pre_dst_height: the prescaler's destination height
+ * @main_hratio: the main scaler's horizontal ratio
+ * @main_vratio: the main scaler's vertical ratio
+ * @real_width: source pixel (width - offset)
+ * @real_height: source pixel (height - offset)
+ */
+struct fimc_scaler {
+ unsigned int scaleup_h:1;
+ unsigned int scaleup_v:1;
+ unsigned int copy_mode:1;
+ unsigned int enabled:1;
+ u32 hfactor;
+ u32 vfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ u32 pre_dst_width;
+ u32 pre_dst_height;
+ u32 main_hratio;
+ u32 main_vratio;
+ u32 real_width;
+ u32 real_height;
+};
+
+/**
+ * struct fimc_addr - the FIMC physical address set for DMA
+ * @y: luminance plane physical address
+ * @cb: Cb plane physical address
+ * @cr: Cr plane physical address
+ */
+struct fimc_addr {
+ u32 y;
+ u32 cb;
+ u32 cr;
+};
+
+/**
+ * struct fimc_vid_buffer - the driver's video buffer
+ * @vb: v4l videobuf buffer
+ * @list: linked list structure for buffer queue
+ * @paddr: precalculated physical address set
+ * @index: buffer index for the output DMA engine
+ */
+struct fimc_vid_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ struct fimc_addr paddr;
+ int index;
+};
+
+/**
+ * struct fimc_frame - source/target frame properties
+ * @f_width: image full width (virtual screen size)
+ * @f_height: image full height (virtual screen size)
+ * @o_width: original image width as set by S_FMT
+ * @o_height: original image height as set by S_FMT
+ * @offs_h: image horizontal pixel offset
+ * @offs_v: image vertical pixel offset
+ * @width: image pixel width
+ * @height: image pixel weight
+ * @payload: image size in bytes (w x h x bpp)
+ * @bytesperline: bytesperline value for each plane
+ * @paddr: image frame buffer physical addresses
+ * @dma_offset: DMA offset in bytes
+ * @fmt: fimc color format pointer
+ */
+struct fimc_frame {
+ u32 f_width;
+ u32 f_height;
+ u32 o_width;
+ u32 o_height;
+ u32 offs_h;
+ u32 offs_v;
+ u32 width;
+ u32 height;
+ unsigned int payload[VIDEO_MAX_PLANES];
+ unsigned int bytesperline[VIDEO_MAX_PLANES];
+ struct fimc_addr paddr;
+ struct fimc_dma_offset dma_offset;
+ struct fimc_fmt *fmt;
+ u8 alpha;
+};
+
+/**
+ * struct fimc_m2m_device - v4l2 memory-to-memory device data
+ * @vfd: the video device node for v4l2 m2m mode
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @ctx: hardware context data
+ * @refcnt: the reference counter
+ */
+struct fimc_m2m_device {
+ struct video_device vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct fimc_ctx *ctx;
+ int refcnt;
+};
+
+#define FIMC_SD_PAD_SINK_CAM 0
+#define FIMC_SD_PAD_SINK_FIFO 1
+#define FIMC_SD_PAD_SOURCE 2
+#define FIMC_SD_PADS_NUM 3
+
+/**
+ * struct fimc_vid_cap - camera capture device information
+ * @ctx: hardware context data
+ * @subdev: subdev exposing the FIMC processing block
+ * @ve: exynos video device entity structure
+ * @vd_pad: fimc video capture node pad
+ * @sd_pads: fimc video processing block pads
+ * @ci_fmt: image format at the FIMC camera input (and the scaler output)
+ * @wb_fmt: image format at the FIMC ISP Writeback input
+ * @source_config: external image source related configuration structure
+ * @pending_buf_q: the pending buffer queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vbq: the capture am video buffer queue
+ * @active_buf_cnt: number of video buffers scheduled in hardware
+ * @buf_index: index for managing the output DMA buffers
+ * @frame_count: the frame counter for statistics
+ * @reqbufs_count: the number of buffers requested in REQBUFS ioctl
+ * @input_index: input (camera sensor) index
+ * @input: capture input type, grp_id of the attached subdev
+ * @user_subdev_api: true if subdevs are not configured by the host driver
+ * @inh_sensor_ctrls: a flag indicating v4l2 controls are inherited from
+ * an image sensor subdev
+ */
+struct fimc_vid_cap {
+ struct fimc_ctx *ctx;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct v4l2_subdev subdev;
+ struct exynos_video_entity ve;
+ struct media_pad vd_pad;
+ struct media_pad sd_pads[FIMC_SD_PADS_NUM];
+ struct v4l2_mbus_framefmt ci_fmt;
+ struct v4l2_mbus_framefmt wb_fmt;
+ struct fimc_source_info source_config;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct vb2_queue vbq;
+ int active_buf_cnt;
+ int buf_index;
+ unsigned int frame_count;
+ unsigned int reqbufs_count;
+ bool streaming;
+ int input_index;
+ u32 input;
+ bool user_subdev_api;
+ bool inh_sensor_ctrls;
+};
+
+/**
+ * struct fimc_pix_limit - image pixel size limits in various IP configurations
+ *
+ * @scaler_en_w: max input pixel width when the scaler is enabled
+ * @scaler_dis_w: max input pixel width when the scaler is disabled
+ * @in_rot_en_h: max input width with the input rotator is on
+ * @in_rot_dis_w: max input width with the input rotator is off
+ * @out_rot_en_w: max output width with the output rotator on
+ * @out_rot_dis_w: max output width with the output rotator off
+ */
+struct fimc_pix_limit {
+ u16 scaler_en_w;
+ u16 scaler_dis_w;
+ u16 in_rot_en_h;
+ u16 in_rot_dis_w;
+ u16 out_rot_en_w;
+ u16 out_rot_dis_w;
+};
+
+/**
+ * struct fimc_variant - FIMC device variant information
+ * @has_inp_rot: set if has input rotator
+ * @has_out_rot: set if has output rotator
+ * @has_mainscaler_ext: 1 if extended mainscaler ratios in CIEXTEN register
+ * are present in this IP revision
+ * @has_cam_if: set if this instance has a camera input interface
+ * @has_isp_wb: set if this instance has ISP writeback input
+ * @pix_limit: pixel size constraints for the scaler
+ * @min_inp_pixsize: minimum input pixel size
+ * @min_out_pixsize: minimum output pixel size
+ * @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
+ */
+struct fimc_variant {
+ unsigned int has_inp_rot:1;
+ unsigned int has_out_rot:1;
+ unsigned int has_mainscaler_ext:1;
+ unsigned int has_cam_if:1;
+ unsigned int has_isp_wb:1;
+ const struct fimc_pix_limit *pix_limit;
+ u16 min_inp_pixsize;
+ u16 min_out_pixsize;
+ u16 hor_offs_align;
+ u16 min_vsize_align;
+};
+
+/**
+ * struct fimc_drvdata - per device type driver data
+ * @variant: variant information for this device
+ * @num_entities: number of fimc instances available in a SoC
+ * @lclk_frequency: local bus clock frequency
+ * @cistatus2: 1 if the FIMC IPs have CISTATUS2 register
+ * @dma_pix_hoff: the horizontal DMA offset unit: 1 - pixels, 0 - bytes
+ * @alpha_color: 1 if alpha color component is supported
+ * @out_buf_count: maximum number of output DMA buffers supported
+ */
+struct fimc_drvdata {
+ const struct fimc_variant *variant[FIMC_MAX_DEVS];
+ int num_entities;
+ unsigned long lclk_frequency;
+ /* Fields common to all FIMC IP instances */
+ u8 cistatus2;
+ u8 dma_pix_hoff;
+ u8 alpha_color;
+ u8 out_buf_count;
+};
+
+#define fimc_get_drvdata(_pdev) \
+ ((struct fimc_drvdata *) platform_get_device_id(_pdev)->driver_data)
+
+struct fimc_ctx;
+
+/**
+ * struct fimc_dev - abstraction for FIMC entity
+ * @slock: the spinlock protecting this data structure
+ * @lock: the mutex protecting this data structure
+ * @pdev: pointer to the FIMC platform device
+ * @pdata: pointer to the device platform data
+ * @sysreg: pointer to the SYSREG regmap
+ * @variant: the IP variant information
+ * @id: FIMC device index (0..FIMC_MAX_DEVS)
+ * @clock: clocks required for FIMC operation
+ * @regs: the mapped hardware registers
+ * @irq_queue: interrupt handler waitqueue
+ * @v4l2_dev: root v4l2_device
+ * @m2m: memory-to-memory V4L2 device information
+ * @vid_cap: camera capture device information
+ * @state: flags used to synchronize m2m and capture mode operation
+ * @alloc_ctx: videobuf2 memory allocator context
+ * @pipeline: fimc video capture pipeline data structure
+ */
+struct fimc_dev {
+ spinlock_t slock;
+ struct mutex lock;
+ struct platform_device *pdev;
+ struct s5p_platform_fimc *pdata;
+ struct regmap *sysreg;
+ const struct fimc_variant *variant;
+ const struct fimc_drvdata *drv_data;
+ int id;
+ struct clk *clock[MAX_FIMC_CLOCKS];
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+ struct v4l2_device *v4l2_dev;
+ struct fimc_m2m_device m2m;
+ struct fimc_vid_cap vid_cap;
+ unsigned long state;
+ struct vb2_alloc_ctx *alloc_ctx;
+};
+
+/**
+ * struct fimc_ctrls - v4l2 controls structure
+ * @handler: the control handler
+ * @colorfx: image effect control
+ * @colorfx_cbcr: Cb/Cr coefficients control
+ * @rotate: image rotation control
+ * @hflip: horizontal flip control
+ * @vflip: vertical flip control
+ * @alpha: RGB alpha control
+ * @ready: true if @handler is initialized
+ */
+struct fimc_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *colorfx;
+ struct v4l2_ctrl *colorfx_cbcr;
+ };
+ struct v4l2_ctrl *rotate;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *alpha;
+ bool ready;
+};
+
+/**
+ * fimc_ctx - the device context data
+ * @s_frame: source frame properties
+ * @d_frame: destination frame properties
+ * @out_order_1p: output 1-plane YCBCR order
+ * @out_order_2p: output 2-plane YCBCR order
+ * @in_order_1p input 1-plane YCBCR order
+ * @in_order_2p: input 2-plane YCBCR order
+ * @in_path: input mode (DMA or camera)
+ * @out_path: output mode (DMA or FIFO)
+ * @scaler: image scaler properties
+ * @effect: image effect
+ * @rotation: image clockwise rotation in degrees
+ * @hflip: indicates image horizontal flip if set
+ * @vflip: indicates image vertical flip if set
+ * @flags: additional flags for image conversion
+ * @state: flags to keep track of user configuration
+ * @fimc_dev: the FIMC device this context applies to
+ * @fh: v4l2 file handle
+ * @ctrls: v4l2 controls structure
+ */
+struct fimc_ctx {
+ struct fimc_frame s_frame;
+ struct fimc_frame d_frame;
+ u32 out_order_1p;
+ u32 out_order_2p;
+ u32 in_order_1p;
+ u32 in_order_2p;
+ enum fimc_datapath in_path;
+ enum fimc_datapath out_path;
+ struct fimc_scaler scaler;
+ struct fimc_effect effect;
+ int rotation;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ u32 flags;
+ u32 state;
+ struct fimc_dev *fimc_dev;
+ struct v4l2_fh fh;
+ struct fimc_ctrls ctrls;
+};
+
+#define fh_to_ctx(__fh) container_of(__fh, struct fimc_ctx, fh)
+
+static inline void set_frame_bounds(struct fimc_frame *f, u32 width, u32 height)
+{
+ f->o_width = width;
+ f->o_height = height;
+ f->f_width = width;
+ f->f_height = height;
+}
+
+static inline void set_frame_crop(struct fimc_frame *f,
+ u32 left, u32 top, u32 width, u32 height)
+{
+ f->offs_h = left;
+ f->offs_v = top;
+ f->width = width;
+ f->height = height;
+}
+
+static inline u32 fimc_get_format_depth(struct fimc_fmt *ff)
+{
+ u32 i, depth = 0;
+
+ if (ff != NULL)
+ for (i = 0; i < ff->colplanes; i++)
+ depth += ff->depth[i];
+ return depth;
+}
+
+static inline bool fimc_capture_active(struct fimc_dev *fimc)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ ret = !!(fimc->state & (1 << ST_CAPT_RUN) ||
+ fimc->state & (1 << ST_CAPT_PEND));
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return ret;
+}
+
+static inline void fimc_ctx_state_set(u32 state, struct fimc_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
+ ctx->state |= state;
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
+}
+
+static inline bool fimc_ctx_state_is_set(u32 mask, struct fimc_ctx *ctx)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
+ ret = (ctx->state & mask) == mask;
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
+ return ret;
+}
+
+static inline int tiled_fmt(struct fimc_fmt *fmt)
+{
+ return fmt->fourcc == V4L2_PIX_FMT_NV12MT;
+}
+
+static inline bool fimc_jpeg_fourcc(u32 pixelformat)
+{
+ return (pixelformat == V4L2_PIX_FMT_JPEG ||
+ pixelformat == V4L2_PIX_FMT_S5C_UYVY_JPG);
+}
+
+static inline bool fimc_user_defined_mbus_fmt(u32 code)
+{
+ return (code == V4L2_MBUS_FMT_JPEG_1X8 ||
+ code == V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8);
+}
+
+/* Return the alpha component bit mask */
+static inline int fimc_get_alpha_mask(struct fimc_fmt *fmt)
+{
+ switch (fmt->color) {
+ case FIMC_FMT_RGB444: return 0x0f;
+ case FIMC_FMT_RGB555: return 0x01;
+ case FIMC_FMT_RGB888: return 0xff;
+ default: return 0;
+ };
+}
+
+static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ struct fimc_frame *frame;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE == type) {
+ if (fimc_ctx_state_is_set(FIMC_CTX_M2M, ctx))
+ frame = &ctx->s_frame;
+ else
+ return ERR_PTR(-EINVAL);
+ } else if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type) {
+ frame = &ctx->d_frame;
+ } else {
+ v4l2_err(ctx->fimc_dev->v4l2_dev,
+ "Wrong buffer/video queue type (%d)\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return frame;
+}
+
+/* -----------------------------------------------------*/
+/* fimc-core.c */
+int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+int fimc_ctrls_create(struct fimc_ctx *ctx);
+void fimc_ctrls_delete(struct fimc_ctx *ctx);
+void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active);
+void fimc_alpha_ctrl_update(struct fimc_ctx *ctx);
+void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f);
+void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+ struct v4l2_pix_format_mplane *pix);
+struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
+ unsigned int mask, int index);
+struct fimc_fmt *fimc_get_format(unsigned int index);
+
+int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
+ int dw, int dh, int rotation);
+int fimc_set_scaler_info(struct fimc_ctx *ctx);
+int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags);
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
+ struct fimc_frame *frame, struct fimc_addr *paddr);
+void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f);
+void fimc_set_yuv_order(struct fimc_ctx *ctx);
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf);
+
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev);
+void fimc_unregister_m2m_device(struct fimc_dev *fimc);
+int fimc_register_driver(void);
+void fimc_unregister_driver(void);
+
+#ifdef CONFIG_MFD_SYSCON
+static inline struct regmap * fimc_get_sysreg_regmap(struct device_node *node)
+{
+ return syscon_regmap_lookup_by_phandle(node, "samsung,sysreg");
+}
+#else
+#define fimc_get_sysreg_regmap(node) (NULL)
+#endif
+
+/* -----------------------------------------------------*/
+/* fimc-m2m.c */
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state);
+
+/* -----------------------------------------------------*/
+/* fimc-capture.c */
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc);
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc);
+int fimc_capture_ctrls_create(struct fimc_dev *fimc);
+void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg);
+int fimc_capture_suspend(struct fimc_dev *fimc);
+int fimc_capture_resume(struct fimc_dev *fimc);
+
+/*
+ * Buffer list manipulation functions. Must be called with fimc.slock held.
+ */
+
+/**
+ * fimc_active_queue_add - add buffer to the capture active buffers queue
+ * @buf: buffer to add to the active buffers list
+ */
+static inline void fimc_active_queue_add(struct fimc_vid_cap *vid_cap,
+ struct fimc_vid_buffer *buf)
+{
+ list_add_tail(&buf->list, &vid_cap->active_buf_q);
+ vid_cap->active_buf_cnt++;
+}
+
+/**
+ * fimc_active_queue_pop - pop buffer from the capture active buffers queue
+ *
+ * The caller must assure the active_buf_q list is not empty.
+ */
+static inline struct fimc_vid_buffer *fimc_active_queue_pop(
+ struct fimc_vid_cap *vid_cap)
+{
+ struct fimc_vid_buffer *buf;
+ buf = list_entry(vid_cap->active_buf_q.next,
+ struct fimc_vid_buffer, list);
+ list_del(&buf->list);
+ vid_cap->active_buf_cnt--;
+ return buf;
+}
+
+/**
+ * fimc_pending_queue_add - add buffer to the capture pending buffers queue
+ * @buf: buffer to add to the pending buffers list
+ */
+static inline void fimc_pending_queue_add(struct fimc_vid_cap *vid_cap,
+ struct fimc_vid_buffer *buf)
+{
+ list_add_tail(&buf->list, &vid_cap->pending_buf_q);
+}
+
+/**
+ * fimc_pending_queue_pop - pop buffer from the capture pending buffers queue
+ *
+ * The caller must assure the pending_buf_q list is not empty.
+ */
+static inline struct fimc_vid_buffer *fimc_pending_queue_pop(
+ struct fimc_vid_cap *vid_cap)
+{
+ struct fimc_vid_buffer *buf;
+ buf = list_entry(vid_cap->pending_buf_q.next,
+ struct fimc_vid_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* FIMC_CORE_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-command.h b/drivers/media/platform/exynos4-is/fimc-is-command.h
new file mode 100644
index 00000000000..0d1f52e394b
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-command.h
@@ -0,0 +1,137 @@
+/*
+ * Samsung Exynos4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * FIMC-IS command set definitions
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_IS_CMD_H_
+#define FIMC_IS_CMD_H_
+
+#define FIMC_IS_COMMAND_VER 110 /* FIMC-IS command set version 1.10 */
+
+/* Enumeration of commands beetween the FIMC-IS and the host processor. */
+
+/* HOST to FIMC-IS */
+#define HIC_PREVIEW_STILL 0x0001
+#define HIC_PREVIEW_VIDEO 0x0002
+#define HIC_CAPTURE_STILL 0x0003
+#define HIC_CAPTURE_VIDEO 0x0004
+#define HIC_STREAM_ON 0x0005
+#define HIC_STREAM_OFF 0x0006
+#define HIC_SET_PARAMETER 0x0007
+#define HIC_GET_PARAMETER 0x0008
+#define HIC_SET_TUNE 0x0009
+#define HIC_GET_STATUS 0x000b
+/* Sensor part */
+#define HIC_OPEN_SENSOR 0x000c
+#define HIC_CLOSE_SENSOR 0x000d
+#define HIC_SIMMIAN_INIT 0x000e
+#define HIC_SIMMIAN_WRITE 0x000f
+#define HIC_SIMMIAN_READ 0x0010
+#define HIC_POWER_DOWN 0x0011
+#define HIC_GET_SET_FILE_ADDR 0x0012
+#define HIC_LOAD_SET_FILE 0x0013
+#define HIC_MSG_CONFIG 0x0014
+#define HIC_MSG_TEST 0x0015
+/* FIMC-IS to HOST */
+#define IHC_GET_SENSOR_NUM 0x1000
+#define IHC_SET_SHOT_MARK 0x1001
+/* parameter1: frame number */
+/* parameter2: confidence level (smile 0~100) */
+/* parameter3: confidence level (blink 0~100) */
+#define IHC_SET_FACE_MARK 0x1002
+/* parameter1: coordinate count */
+/* parameter2: coordinate buffer address */
+#define IHC_FRAME_DONE 0x1003
+/* parameter1: frame start number */
+/* parameter2: frame count */
+#define IHC_AA_DONE 0x1004
+#define IHC_NOT_READY 0x1005
+
+#define IH_REPLY_DONE 0x2000
+#define IH_REPLY_NOT_DONE 0x2001
+
+enum fimc_is_scenario {
+ IS_SC_PREVIEW_STILL,
+ IS_SC_PREVIEW_VIDEO,
+ IS_SC_CAPTURE_STILL,
+ IS_SC_CAPTURE_VIDEO,
+ IS_SC_MAX
+};
+
+enum fimc_is_sub_scenario {
+ IS_SC_SUB_DEFAULT,
+ IS_SC_SUB_PS_VTCALL,
+ IS_SC_SUB_CS_VTCALL,
+ IS_SC_SUB_PV_VTCALL,
+ IS_SC_SUB_CV_VTCALL,
+};
+
+struct is_common_regs {
+ u32 hicmd;
+ u32 hic_sensorid;
+ u32 hic_param[4];
+ u32 reserved1[4];
+
+ u32 ihcmd;
+ u32 ihc_sensorid;
+ u32 ihc_param[4];
+ u32 reserved2[4];
+
+ u32 isp_sensor_id;
+ u32 isp_param[2];
+ u32 reserved3[1];
+
+ u32 scc_sensor_id;
+ u32 scc_param[2];
+ u32 reserved4[1];
+
+ u32 dnr_sensor_id;
+ u32 dnr_param[2];
+ u32 reserved5[1];
+
+ u32 scp_sensor_id;
+ u32 scp_param[2];
+ u32 reserved6[29];
+} __packed;
+
+struct is_mcuctl_reg {
+ u32 mcuctl;
+ u32 bboar;
+
+ u32 intgr0;
+ u32 intcr0;
+ u32 intmr0;
+ u32 intsr0;
+ u32 intmsr0;
+
+ u32 intgr1;
+ u32 intcr1;
+ u32 intmr1;
+ u32 intsr1;
+ u32 intmsr1;
+
+ u32 intcr2;
+ u32 intmr2;
+ u32 intsr2;
+ u32 intmsr2;
+
+ u32 gpoctrl;
+ u32 cpoenctlr;
+ u32 gpictlr;
+
+ u32 reserved[0xd];
+
+ struct is_common_regs common;
+} __packed;
+
+#endif /* FIMC_IS_CMD_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.c b/drivers/media/platform/exynos4-is/fimc-is-errno.c
new file mode 100644
index 00000000000..e8519e151c1
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-errno.c
@@ -0,0 +1,272 @@
+/*
+ * Samsung Exynos4 SoC series FIMC-IS slave interface driver
+ *
+ * Error log interface functions
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "fimc-is-errno.h"
+
+const char * const fimc_is_param_strerr(unsigned int error)
+{
+ switch (error) {
+ case ERROR_COMMON_CMD:
+ return "ERROR_COMMON_CMD: Invalid Command";
+ case ERROR_COMMON_PARAMETER:
+ return "ERROR_COMMON_PARAMETER: Invalid Parameter";
+ case ERROR_COMMON_SETFILE_LOAD:
+ return "ERROR_COMMON_SETFILE_LOAD: Illegal Setfile Loading";
+ case ERROR_COMMON_SETFILE_ADJUST:
+ return "ERROR_COMMON_SETFILE_ADJUST: Setfile isn't adjusted";
+ case ERROR_COMMON_SETFILE_INDEX:
+ return "ERROR_COMMON_SETFILE_INDEX: Invalid setfile index";
+ case ERROR_COMMON_INPUT_PATH:
+ return "ERROR_COMMON_INPUT_PATH: Input path can be changed in ready state";
+ case ERROR_COMMON_INPUT_INIT:
+ return "ERROR_COMMON_INPUT_INIT: IP can not start if input path is not set";
+ case ERROR_COMMON_OUTPUT_PATH:
+ return "ERROR_COMMON_OUTPUT_PATH: Output path can be changed in ready state (stop)";
+ case ERROR_COMMON_OUTPUT_INIT:
+ return "ERROR_COMMON_OUTPUT_INIT: IP can not start if output path is not set";
+ case ERROR_CONTROL_BYPASS:
+ return "ERROR_CONTROL_BYPASS";
+ case ERROR_OTF_INPUT_FORMAT:
+ return "ERROR_OTF_INPUT_FORMAT: Invalid format (DRC: YUV444, FD: YUV444, 422, 420)";
+ case ERROR_OTF_INPUT_WIDTH:
+ return "ERROR_OTF_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
+ case ERROR_OTF_INPUT_HEIGHT:
+ return "ERROR_OTF_INPUT_HEIGHT: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
+ case ERROR_OTF_INPUT_BIT_WIDTH:
+ return "ERROR_OTF_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
+ case ERROR_DMA_INPUT_WIDTH:
+ return "ERROR_DMA_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
+ case ERROR_DMA_INPUT_HEIGHT:
+ return "ERROR_DMA_INPUT_HEIGHT: Invalid height (DRC: 64~8192, FD: 16~8190)";
+ case ERROR_DMA_INPUT_FORMAT:
+ return "ERROR_DMA_INPUT_FORMAT: Invalid format (DRC: YUV444 or YUV422, FD: YUV444,422,420)";
+ case ERROR_DMA_INPUT_BIT_WIDTH:
+ return "ERROR_DMA_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
+ case ERROR_DMA_INPUT_ORDER:
+ return "ERROR_DMA_INPUT_ORDER: Invalid order(DRC: YYCbCr,YCbYCr,FD:NO,YYCbCr,YCbYCr,CbCr,CrCb)";
+ case ERROR_DMA_INPUT_PLANE:
+ return "ERROR_DMA_INPUT_PLANE: Invalid palne (DRC: 3, FD: 1, 2, 3)";
+ case ERROR_OTF_OUTPUT_WIDTH:
+ return "ERROR_OTF_OUTPUT_WIDTH: Invalid width (DRC: 128~8192)";
+ case ERROR_OTF_OUTPUT_HEIGHT:
+ return "ERROR_OTF_OUTPUT_HEIGHT: Invalid height (DRC: 64~8192)";
+ case ERROR_OTF_OUTPUT_FORMAT:
+ return "ERROR_OTF_OUTPUT_FORMAT: Invalid format (DRC: YUV444)";
+ case ERROR_OTF_OUTPUT_BIT_WIDTH:
+ return "ERROR_OTF_OUTPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
+ case ERROR_DMA_OUTPUT_WIDTH:
+ return "ERROR_DMA_OUTPUT_WIDTH";
+ case ERROR_DMA_OUTPUT_HEIGHT:
+ return "ERROR_DMA_OUTPUT_HEIGHT";
+ case ERROR_DMA_OUTPUT_FORMAT:
+ return "ERROR_DMA_OUTPUT_FORMAT";
+ case ERROR_DMA_OUTPUT_BIT_WIDTH:
+ return "ERROR_DMA_OUTPUT_BIT_WIDTH";
+ case ERROR_DMA_OUTPUT_PLANE:
+ return "ERROR_DMA_OUTPUT_PLANE";
+ case ERROR_DMA_OUTPUT_ORDER:
+ return "ERROR_DMA_OUTPUT_ORDER";
+
+ /* Sensor Error(100~199) */
+ case ERROR_SENSOR_I2C_FAIL:
+ return "ERROR_SENSOR_I2C_FAIL";
+ case ERROR_SENSOR_INVALID_FRAMERATE:
+ return "ERROR_SENSOR_INVALID_FRAMERATE";
+ case ERROR_SENSOR_INVALID_EXPOSURETIME:
+ return "ERROR_SENSOR_INVALID_EXPOSURETIME";
+ case ERROR_SENSOR_INVALID_SIZE:
+ return "ERROR_SENSOR_INVALID_SIZE";
+ case ERROR_SENSOR_INVALID_SETTING:
+ return "ERROR_SENSOR_INVALID_SETTING";
+ case ERROR_SENSOR_ACTURATOR_INIT_FAIL:
+ return "ERROR_SENSOR_ACTURATOR_INIT_FAIL";
+ case ERROR_SENSOR_INVALID_AF_POS:
+ return "ERROR_SENSOR_INVALID_AF_POS";
+ case ERROR_SENSOR_UNSUPPORT_FUNC:
+ return "ERROR_SENSOR_UNSUPPORT_FUNC";
+ case ERROR_SENSOR_UNSUPPORT_PERI:
+ return "ERROR_SENSOR_UNSUPPORT_PERI";
+ case ERROR_SENSOR_UNSUPPORT_AF:
+ return "ERROR_SENSOR_UNSUPPORT_AF";
+
+ /* ISP Error (200~299) */
+ case ERROR_ISP_AF_BUSY:
+ return "ERROR_ISP_AF_BUSY";
+ case ERROR_ISP_AF_INVALID_COMMAND:
+ return "ERROR_ISP_AF_INVALID_COMMAND";
+ case ERROR_ISP_AF_INVALID_MODE:
+ return "ERROR_ISP_AF_INVALID_MODE";
+
+ /* DRC Error (300~399) */
+ /* FD Error (400~499) */
+ case ERROR_FD_CONFIG_MAX_NUMBER_STATE:
+ return "ERROR_FD_CONFIG_MAX_NUMBER_STATE";
+ case ERROR_FD_CONFIG_MAX_NUMBER_INVALID:
+ return "ERROR_FD_CONFIG_MAX_NUMBER_INVALID";
+ case ERROR_FD_CONFIG_YAW_ANGLE_STATE:
+ return "ERROR_FD_CONFIG_YAW_ANGLE_STATE";
+ case ERROR_FD_CONFIG_YAW_ANGLE_INVALID:
+ return "ERROR_FD_CONFIG_YAW_ANGLE_INVALID\n";
+ case ERROR_FD_CONFIG_ROLL_ANGLE_STATE:
+ return "ERROR_FD_CONFIG_ROLL_ANGLE_STATE";
+ case ERROR_FD_CONFIG_ROLL_ANGLE_INVALID:
+ return "ERROR_FD_CONFIG_ROLL_ANGLE_INVALID";
+ case ERROR_FD_CONFIG_SMILE_MODE_INVALID:
+ return "ERROR_FD_CONFIG_SMILE_MODE_INVALID";
+ case ERROR_FD_CONFIG_BLINK_MODE_INVALID:
+ return "ERROR_FD_CONFIG_BLINK_MODE_INVALID";
+ case ERROR_FD_CONFIG_EYES_DETECT_INVALID:
+ return "ERROR_FD_CONFIG_EYES_DETECT_INVALID";
+ case ERROR_FD_CONFIG_MOUTH_DETECT_INVALID:
+ return "ERROR_FD_CONFIG_MOUTH_DETECT_INVALID";
+ case ERROR_FD_CONFIG_ORIENTATION_STATE:
+ return "ERROR_FD_CONFIG_ORIENTATION_STATE";
+ case ERROR_FD_CONFIG_ORIENTATION_INVALID:
+ return "ERROR_FD_CONFIG_ORIENTATION_INVALID";
+ case ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID:
+ return "ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID";
+ case ERROR_FD_RESULT:
+ return "ERROR_FD_RESULT";
+ case ERROR_FD_MODE:
+ return "ERROR_FD_MODE";
+ default:
+ return "Unknown";
+ }
+}
+
+const char * const fimc_is_strerr(unsigned int error)
+{
+ error &= ~IS_ERROR_TIME_OUT_FLAG;
+
+ switch (error) {
+ /* General */
+ case IS_ERROR_INVALID_COMMAND:
+ return "IS_ERROR_INVALID_COMMAND";
+ case IS_ERROR_REQUEST_FAIL:
+ return "IS_ERROR_REQUEST_FAIL";
+ case IS_ERROR_INVALID_SCENARIO:
+ return "IS_ERROR_INVALID_SCENARIO";
+ case IS_ERROR_INVALID_SENSORID:
+ return "IS_ERROR_INVALID_SENSORID";
+ case IS_ERROR_INVALID_MODE_CHANGE:
+ return "IS_ERROR_INVALID_MODE_CHANGE";
+ case IS_ERROR_INVALID_MAGIC_NUMBER:
+ return "IS_ERROR_INVALID_MAGIC_NUMBER";
+ case IS_ERROR_INVALID_SETFILE_HDR:
+ return "IS_ERROR_INVALID_SETFILE_HDR";
+ case IS_ERROR_BUSY:
+ return "IS_ERROR_BUSY";
+ case IS_ERROR_SET_PARAMETER:
+ return "IS_ERROR_SET_PARAMETER";
+ case IS_ERROR_INVALID_PATH:
+ return "IS_ERROR_INVALID_PATH";
+ case IS_ERROR_OPEN_SENSOR_FAIL:
+ return "IS_ERROR_OPEN_SENSOR_FAIL";
+ case IS_ERROR_ENTRY_MSG_THREAD_DOWN:
+ return "IS_ERROR_ENTRY_MSG_THREAD_DOWN";
+ case IS_ERROR_ISP_FRAME_END_NOT_DONE:
+ return "IS_ERROR_ISP_FRAME_END_NOT_DONE";
+ case IS_ERROR_DRC_FRAME_END_NOT_DONE:
+ return "IS_ERROR_DRC_FRAME_END_NOT_DONE";
+ case IS_ERROR_SCALERC_FRAME_END_NOT_DONE:
+ return "IS_ERROR_SCALERC_FRAME_END_NOT_DONE";
+ case IS_ERROR_ODC_FRAME_END_NOT_DONE:
+ return "IS_ERROR_ODC_FRAME_END_NOT_DONE";
+ case IS_ERROR_DIS_FRAME_END_NOT_DONE:
+ return "IS_ERROR_DIS_FRAME_END_NOT_DONE";
+ case IS_ERROR_TDNR_FRAME_END_NOT_DONE:
+ return "IS_ERROR_TDNR_FRAME_END_NOT_DONE";
+ case IS_ERROR_SCALERP_FRAME_END_NOT_DONE:
+ return "IS_ERROR_SCALERP_FRAME_END_NOT_DONE";
+ case IS_ERROR_WAIT_STREAM_OFF_NOT_DONE:
+ return "IS_ERROR_WAIT_STREAM_OFF_NOT_DONE";
+ case IS_ERROR_NO_MSG_IS_RECEIVED:
+ return "IS_ERROR_NO_MSG_IS_RECEIVED";
+ case IS_ERROR_SENSOR_MSG_FAIL:
+ return "IS_ERROR_SENSOR_MSG_FAIL";
+ case IS_ERROR_ISP_MSG_FAIL:
+ return "IS_ERROR_ISP_MSG_FAIL";
+ case IS_ERROR_DRC_MSG_FAIL:
+ return "IS_ERROR_DRC_MSG_FAIL";
+ case IS_ERROR_LHFD_MSG_FAIL:
+ return "IS_ERROR_LHFD_MSG_FAIL";
+ case IS_ERROR_UNKNOWN:
+ return "IS_ERROR_UNKNOWN";
+
+ /* Sensor */
+ case IS_ERROR_SENSOR_PWRDN_FAIL:
+ return "IS_ERROR_SENSOR_PWRDN_FAIL";
+
+ /* ISP */
+ case IS_ERROR_ISP_PWRDN_FAIL:
+ return "IS_ERROR_ISP_PWRDN_FAIL";
+ case IS_ERROR_ISP_MULTIPLE_INPUT:
+ return "IS_ERROR_ISP_MULTIPLE_INPUT";
+ case IS_ERROR_ISP_ABSENT_INPUT:
+ return "IS_ERROR_ISP_ABSENT_INPUT";
+ case IS_ERROR_ISP_ABSENT_OUTPUT:
+ return "IS_ERROR_ISP_ABSENT_OUTPUT";
+ case IS_ERROR_ISP_NONADJACENT_OUTPUT:
+ return "IS_ERROR_ISP_NONADJACENT_OUTPUT";
+ case IS_ERROR_ISP_FORMAT_MISMATCH:
+ return "IS_ERROR_ISP_FORMAT_MISMATCH";
+ case IS_ERROR_ISP_WIDTH_MISMATCH:
+ return "IS_ERROR_ISP_WIDTH_MISMATCH";
+ case IS_ERROR_ISP_HEIGHT_MISMATCH:
+ return "IS_ERROR_ISP_HEIGHT_MISMATCH";
+ case IS_ERROR_ISP_BITWIDTH_MISMATCH:
+ return "IS_ERROR_ISP_BITWIDTH_MISMATCH";
+ case IS_ERROR_ISP_FRAME_END_TIME_OUT:
+ return "IS_ERROR_ISP_FRAME_END_TIME_OUT";
+
+ /* DRC */
+ case IS_ERROR_DRC_PWRDN_FAIL:
+ return "IS_ERROR_DRC_PWRDN_FAIL";
+ case IS_ERROR_DRC_MULTIPLE_INPUT:
+ return "IS_ERROR_DRC_MULTIPLE_INPUT";
+ case IS_ERROR_DRC_ABSENT_INPUT:
+ return "IS_ERROR_DRC_ABSENT_INPUT";
+ case IS_ERROR_DRC_NONADJACENT_INPUT:
+ return "IS_ERROR_DRC_NONADJACENT_INPUT";
+ case IS_ERROR_DRC_ABSENT_OUTPUT:
+ return "IS_ERROR_DRC_ABSENT_OUTPUT";
+ case IS_ERROR_DRC_NONADJACENT_OUTPUT:
+ return "IS_ERROR_DRC_NONADJACENT_OUTPUT";
+ case IS_ERROR_DRC_FORMAT_MISMATCH:
+ return "IS_ERROR_DRC_FORMAT_MISMATCH";
+ case IS_ERROR_DRC_WIDTH_MISMATCH:
+ return "IS_ERROR_DRC_WIDTH_MISMATCH";
+ case IS_ERROR_DRC_HEIGHT_MISMATCH:
+ return "IS_ERROR_DRC_HEIGHT_MISMATCH";
+ case IS_ERROR_DRC_BITWIDTH_MISMATCH:
+ return "IS_ERROR_DRC_BITWIDTH_MISMATCH";
+ case IS_ERROR_DRC_FRAME_END_TIME_OUT:
+ return "IS_ERROR_DRC_FRAME_END_TIME_OUT";
+
+ /* FD */
+ case IS_ERROR_FD_PWRDN_FAIL:
+ return "IS_ERROR_FD_PWRDN_FAIL";
+ case IS_ERROR_FD_MULTIPLE_INPUT:
+ return "IS_ERROR_FD_MULTIPLE_INPUT";
+ case IS_ERROR_FD_ABSENT_INPUT:
+ return "IS_ERROR_FD_ABSENT_INPUT";
+ case IS_ERROR_FD_NONADJACENT_INPUT:
+ return "IS_ERROR_FD_NONADJACENT_INPUT";
+ case IS_ERROR_LHFD_FRAME_END_TIME_OUT:
+ return "IS_ERROR_LHFD_FRAME_END_TIME_OUT";
+ default:
+ return "Unknown";
+ }
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.h b/drivers/media/platform/exynos4-is/fimc-is-errno.h
new file mode 100644
index 00000000000..3de6f6da6f8
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-errno.h
@@ -0,0 +1,248 @@
+/*
+ * Samsung Exynos4 SoC series FIMC-IS slave interface driver
+ *
+ * FIMC-IS error code definition
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef FIMC_IS_ERR_H_
+#define FIMC_IS_ERR_H_
+
+#define IS_ERROR_VER 011 /* IS ERROR VERSION 0.11 */
+
+enum {
+ IS_ERROR_NONE,
+
+ /* General 1 ~ 99 */
+ IS_ERROR_INVALID_COMMAND,
+ IS_ERROR_REQUEST_FAIL,
+ IS_ERROR_INVALID_SCENARIO,
+ IS_ERROR_INVALID_SENSORID,
+ IS_ERROR_INVALID_MODE_CHANGE,
+ IS_ERROR_INVALID_MAGIC_NUMBER,
+ IS_ERROR_INVALID_SETFILE_HDR,
+ IS_ERROR_BUSY,
+ IS_ERROR_SET_PARAMETER,
+ IS_ERROR_INVALID_PATH,
+ IS_ERROR_OPEN_SENSOR_FAIL,
+ IS_ERROR_ENTRY_MSG_THREAD_DOWN,
+ IS_ERROR_ISP_FRAME_END_NOT_DONE,
+ IS_ERROR_DRC_FRAME_END_NOT_DONE,
+ IS_ERROR_SCALERC_FRAME_END_NOT_DONE,
+ IS_ERROR_ODC_FRAME_END_NOT_DONE,
+ IS_ERROR_DIS_FRAME_END_NOT_DONE,
+ IS_ERROR_TDNR_FRAME_END_NOT_DONE,
+ IS_ERROR_SCALERP_FRAME_END_NOT_DONE,
+ IS_ERROR_WAIT_STREAM_OFF_NOT_DONE,
+ IS_ERROR_NO_MSG_IS_RECEIVED,
+ IS_ERROR_SENSOR_MSG_FAIL,
+ IS_ERROR_ISP_MSG_FAIL,
+ IS_ERROR_DRC_MSG_FAIL,
+ IS_ERROR_SCALERC_MSG_FAIL,
+ IS_ERROR_ODC_MSG_FAIL,
+ IS_ERROR_DIS_MSG_FAIL,
+ IS_ERROR_TDNR_MSG_FAIL,
+ IS_ERROR_SCALERP_MSG_FAIL,
+ IS_ERROR_LHFD_MSG_FAIL,
+ IS_ERROR_LHFD_INTERNAL_STOP,
+
+ /* Sensor 100 ~ 199 */
+ IS_ERROR_SENSOR_PWRDN_FAIL = 100,
+ IS_ERROR_SENSOR_STREAM_ON_FAIL,
+ IS_ERROR_SENSOR_STREAM_OFF_FAIL,
+
+ /* ISP 200 ~ 299 */
+ IS_ERROR_ISP_PWRDN_FAIL = 200,
+ IS_ERROR_ISP_MULTIPLE_INPUT,
+ IS_ERROR_ISP_ABSENT_INPUT,
+ IS_ERROR_ISP_ABSENT_OUTPUT,
+ IS_ERROR_ISP_NONADJACENT_OUTPUT,
+ IS_ERROR_ISP_FORMAT_MISMATCH,
+ IS_ERROR_ISP_WIDTH_MISMATCH,
+ IS_ERROR_ISP_HEIGHT_MISMATCH,
+ IS_ERROR_ISP_BITWIDTH_MISMATCH,
+ IS_ERROR_ISP_FRAME_END_TIME_OUT,
+
+ /* DRC 300 ~ 399 */
+ IS_ERROR_DRC_PWRDN_FAIL = 300,
+ IS_ERROR_DRC_MULTIPLE_INPUT,
+ IS_ERROR_DRC_ABSENT_INPUT,
+ IS_ERROR_DRC_NONADJACENT_INPUT,
+ IS_ERROR_DRC_ABSENT_OUTPUT,
+ IS_ERROR_DRC_NONADJACENT_OUTPUT,
+ IS_ERROR_DRC_FORMAT_MISMATCH,
+ IS_ERROR_DRC_WIDTH_MISMATCH,
+ IS_ERROR_DRC_HEIGHT_MISMATCH,
+ IS_ERROR_DRC_BITWIDTH_MISMATCH,
+ IS_ERROR_DRC_FRAME_END_TIME_OUT,
+
+ /* SCALERC 400 ~ 499 */
+ IS_ERROR_SCALERC_PWRDN_FAIL = 400,
+
+ /* ODC 500 ~ 599 */
+ IS_ERROR_ODC_PWRDN_FAIL = 500,
+
+ /* DIS 600 ~ 699 */
+ IS_ERROR_DIS_PWRDN_FAIL = 600,
+
+ /* TDNR 700 ~ 799 */
+ IS_ERROR_TDNR_PWRDN_FAIL = 700,
+
+ /* SCALERC 800 ~ 899 */
+ IS_ERROR_SCALERP_PWRDN_FAIL = 800,
+
+ /* FD 900 ~ 999 */
+ IS_ERROR_FD_PWRDN_FAIL = 900,
+ IS_ERROR_FD_MULTIPLE_INPUT,
+ IS_ERROR_FD_ABSENT_INPUT,
+ IS_ERROR_FD_NONADJACENT_INPUT,
+ IS_ERROR_LHFD_FRAME_END_TIME_OUT,
+
+ IS_ERROR_UNKNOWN = 1000,
+};
+
+#define IS_ERROR_TIME_OUT_FLAG 0x80000000
+
+/* Set parameter error enum */
+enum fimc_is_error {
+ /* Common error (0~99) */
+ ERROR_COMMON_NONE = 0,
+ ERROR_COMMON_CMD = 1, /* Invalid command */
+ ERROR_COMMON_PARAMETER = 2, /* Invalid parameter */
+ /* setfile is not loaded before adjusting */
+ ERROR_COMMON_SETFILE_LOAD = 3,
+ /* setfile is not Adjusted before runnng. */
+ ERROR_COMMON_SETFILE_ADJUST = 4,
+ /* Index of setfile is not valid (0~MAX_SETFILE_NUM-1) */
+ ERROR_COMMON_SETFILE_INDEX = 5,
+ /* Input path can be changed in ready state(stop) */
+ ERROR_COMMON_INPUT_PATH = 6,
+ /* IP can not start if input path is not set */
+ ERROR_COMMON_INPUT_INIT = 7,
+ /* Output path can be changed in ready state (stop) */
+ ERROR_COMMON_OUTPUT_PATH = 8,
+ /* IP can not start if output path is not set */
+ ERROR_COMMON_OUTPUT_INIT = 9,
+
+ ERROR_CONTROL_NONE = ERROR_COMMON_NONE,
+ ERROR_CONTROL_BYPASS = 11, /* Enable or Disable */
+
+ ERROR_OTF_INPUT_NONE = ERROR_COMMON_NONE,
+ ERROR_OTF_INPUT_CMD = 21,
+ /* invalid format (DRC: YUV444, FD: YUV444, 422, 420) */
+ ERROR_OTF_INPUT_FORMAT = 22,
+ /* invalid width (DRC: 128~8192, FD: 32~8190) */
+ ERROR_OTF_INPUT_WIDTH = 23,
+ /* invalid height (DRC: 64~8192, FD: 16~8190) */
+ ERROR_OTF_INPUT_HEIGHT = 24,
+ /* invalid bit-width (DRC: 8~12bits, FD: 8bit) */
+ ERROR_OTF_INPUT_BIT_WIDTH = 25,
+ /* invalid FrameTime for ISP */
+ ERROR_OTF_INPUT_USER_FRAMETIIME = 26,
+
+ ERROR_DMA_INPUT_NONE = ERROR_COMMON_NONE,
+ /* invalid width (DRC: 128~8192, FD: 32~8190) */
+ ERROR_DMA_INPUT_WIDTH = 31,
+ /* invalid height (DRC: 64~8192, FD: 16~8190) */
+ ERROR_DMA_INPUT_HEIGHT = 32,
+ /* invalid format (DRC: YUV444 or YUV422, FD: YUV444, 422, 420) */
+ ERROR_DMA_INPUT_FORMAT = 33,
+ /* invalid bit-width (DRC: 8~12bit, FD: 8bit) */
+ ERROR_DMA_INPUT_BIT_WIDTH = 34,
+ /* invalid order(DRC: YYCbCrorYCbYCr, FD:NO,YYCbCr,YCbYCr,CbCr,CrCb) */
+ ERROR_DMA_INPUT_ORDER = 35,
+ /* invalid palne (DRC: 3, FD: 1, 2, 3) */
+ ERROR_DMA_INPUT_PLANE = 36,
+
+ ERROR_OTF_OUTPUT_NONE = ERROR_COMMON_NONE,
+ /* invalid width (DRC: 128~8192) */
+ ERROR_OTF_OUTPUT_WIDTH = 41,
+ /* invalid height (DRC: 64~8192) */
+ ERROR_OTF_OUTPUT_HEIGHT = 42,
+ /* invalid format (DRC: YUV444) */
+ ERROR_OTF_OUTPUT_FORMAT = 43,
+ /* invalid bit-width (DRC: 8~12bits) */
+ ERROR_OTF_OUTPUT_BIT_WIDTH = 44,
+
+ ERROR_DMA_OUTPUT_NONE = ERROR_COMMON_NONE,
+ ERROR_DMA_OUTPUT_WIDTH = 51, /* invalid width */
+ ERROR_DMA_OUTPUT_HEIGHT = 52, /* invalid height */
+ ERROR_DMA_OUTPUT_FORMAT = 53, /* invalid format */
+ ERROR_DMA_OUTPUT_BIT_WIDTH = 54, /* invalid bit-width */
+ ERROR_DMA_OUTPUT_PLANE = 55, /* invalid plane */
+ ERROR_DMA_OUTPUT_ORDER = 56, /* invalid order */
+
+ ERROR_GLOBAL_SHOTMODE_NONE = ERROR_COMMON_NONE,
+
+ /* SENSOR Error(100~199) */
+ ERROR_SENSOR_NONE = ERROR_COMMON_NONE,
+ ERROR_SENSOR_I2C_FAIL = 101,
+ ERROR_SENSOR_INVALID_FRAMERATE,
+ ERROR_SENSOR_INVALID_EXPOSURETIME,
+ ERROR_SENSOR_INVALID_SIZE,
+ ERROR_SENSOR_INVALID_SETTING,
+ ERROR_SENSOR_ACTURATOR_INIT_FAIL,
+ ERROR_SENSOR_INVALID_AF_POS,
+ ERROR_SENSOR_UNSUPPORT_FUNC,
+ ERROR_SENSOR_UNSUPPORT_PERI,
+ ERROR_SENSOR_UNSUPPORT_AF,
+
+ /* ISP Error (200~299) */
+ ERROR_ISP_AF_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_AF_BUSY = 201,
+ ERROR_ISP_AF_INVALID_COMMAND = 202,
+ ERROR_ISP_AF_INVALID_MODE = 203,
+ ERROR_ISP_FLASH_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_AWB_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_IMAGE_EFFECT_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_ISO_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_ADJUST_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_METERING_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_AFC_NONE = ERROR_COMMON_NONE,
+
+ /* DRC Error (300~399) */
+
+ /* FD Error (400~499) */
+ ERROR_FD_NONE = ERROR_COMMON_NONE,
+ /* Invalid max number (1~16) */
+ ERROR_FD_CONFIG_MAX_NUMBER_STATE = 401,
+ ERROR_FD_CONFIG_MAX_NUMBER_INVALID = 402,
+ ERROR_FD_CONFIG_YAW_ANGLE_STATE = 403,
+ ERROR_FD_CONFIG_YAW_ANGLE_INVALID = 404,
+ ERROR_FD_CONFIG_ROLL_ANGLE_STATE = 405,
+ ERROR_FD_CONFIG_ROLL_ANGLE_INVALID = 406,
+ ERROR_FD_CONFIG_SMILE_MODE_INVALID = 407,
+ ERROR_FD_CONFIG_BLINK_MODE_INVALID = 408,
+ ERROR_FD_CONFIG_EYES_DETECT_INVALID = 409,
+ ERROR_FD_CONFIG_MOUTH_DETECT_INVALID = 410,
+ ERROR_FD_CONFIG_ORIENTATION_STATE = 411,
+ ERROR_FD_CONFIG_ORIENTATION_INVALID = 412,
+ ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID = 413,
+ /* PARAM_FdResultStr can be only applied in ready-state or stream off */
+ ERROR_FD_RESULT = 414,
+ /* PARAM_FdModeStr can be only applied in ready-state or stream off */
+ ERROR_FD_MODE = 415,
+ /* Scaler Error (500 ~ 599) */
+ ERROR_SCALER_NO_NONE = ERROR_COMMON_NONE,
+ ERROR_SCALER_DMA_OUTSEL = 501,
+ ERROR_SCALER_H_RATIO = 502,
+ ERROR_SCALER_V_RATIO = 503,
+
+ ERROR_SCALER_IMAGE_EFFECT = 510,
+
+ ERROR_SCALER_ROTATE = 520,
+ ERROR_SCALER_FLIP = 521,
+};
+
+const char * const fimc_is_strerr(unsigned int error);
+const char * const fimc_is_param_strerr(unsigned int error);
+
+#endif /* FIMC_IS_ERR_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
new file mode 100644
index 00000000000..371cad4fcce
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
@@ -0,0 +1,149 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include "fimc-is-i2c.h"
+
+struct fimc_is_i2c {
+ struct i2c_adapter adapter;
+ struct clk *clock;
+};
+
+/*
+ * An empty algorithm is used as the actual I2C bus controller driver
+ * is implemented in the FIMC-IS subsystem firmware and the host CPU
+ * doesn't access the I2C bus controller.
+ */
+static const struct i2c_algorithm fimc_is_i2c_algorithm;
+
+static int fimc_is_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct fimc_is_i2c *isp_i2c;
+ struct i2c_adapter *i2c_adap;
+ int ret;
+
+ isp_i2c = devm_kzalloc(&pdev->dev, sizeof(*isp_i2c), GFP_KERNEL);
+ if (!isp_i2c)
+ return -ENOMEM;
+
+ isp_i2c->clock = devm_clk_get(&pdev->dev, "i2c_isp");
+ if (IS_ERR(isp_i2c->clock)) {
+ dev_err(&pdev->dev, "failed to get the clock\n");
+ return PTR_ERR(isp_i2c->clock);
+ }
+
+ i2c_adap = &isp_i2c->adapter;
+ i2c_adap->dev.of_node = node;
+ i2c_adap->dev.parent = &pdev->dev;
+ strlcpy(i2c_adap->name, "exynos4x12-isp-i2c", sizeof(i2c_adap->name));
+ i2c_adap->owner = THIS_MODULE;
+ i2c_adap->algo = &fimc_is_i2c_algorithm;
+ i2c_adap->class = I2C_CLASS_SPD;
+
+ ret = i2c_add_adapter(i2c_adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add I2C bus %s\n",
+ node->full_name);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, isp_i2c);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(&i2c_adap->dev);
+
+ return 0;
+}
+
+static int fimc_is_i2c_remove(struct platform_device *pdev)
+{
+ struct fimc_is_i2c *isp_i2c = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&isp_i2c->adapter.dev);
+ pm_runtime_disable(&pdev->dev);
+ i2c_del_adapter(&isp_i2c->adapter);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
+static int fimc_is_i2c_runtime_suspend(struct device *dev)
+{
+ struct fimc_is_i2c *isp_i2c = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isp_i2c->clock);
+ return 0;
+}
+
+static int fimc_is_i2c_runtime_resume(struct device *dev)
+{
+ struct fimc_is_i2c *isp_i2c = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(isp_i2c->clock);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_is_i2c_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_is_i2c_runtime_suspend(dev);
+}
+
+static int fimc_is_i2c_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_is_i2c_runtime_resume(dev);
+}
+#endif
+
+static struct dev_pm_ops fimc_is_i2c_pm_ops = {
+ SET_RUNTIME_PM_OPS(fimc_is_i2c_runtime_suspend,
+ fimc_is_i2c_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_is_i2c_suspend, fimc_is_i2c_resume)
+};
+
+static const struct of_device_id fimc_is_i2c_of_match[] = {
+ { .compatible = FIMC_IS_I2C_COMPATIBLE },
+ { },
+};
+
+static struct platform_driver fimc_is_i2c_driver = {
+ .probe = fimc_is_i2c_probe,
+ .remove = fimc_is_i2c_remove,
+ .driver = {
+ .of_match_table = fimc_is_i2c_of_match,
+ .name = "fimc-isp-i2c",
+ .owner = THIS_MODULE,
+ .pm = &fimc_is_i2c_pm_ops,
+ }
+};
+
+int fimc_is_register_i2c_driver(void)
+{
+ return platform_driver_register(&fimc_is_i2c_driver);
+}
+
+void fimc_is_unregister_i2c_driver(void)
+{
+ platform_driver_unregister(&fimc_is_i2c_driver);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.h b/drivers/media/platform/exynos4-is/fimc-is-i2c.h
new file mode 100644
index 00000000000..0d38d6bb963
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.h
@@ -0,0 +1,15 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define FIMC_IS_I2C_COMPATIBLE "samsung,exynos4212-i2c-isp"
+
+int fimc_is_register_i2c_driver(void);
+void fimc_is_unregister_i2c_driver(void);
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.c b/drivers/media/platform/exynos4-is/fimc-is-param.c
new file mode 100644
index 00000000000..bf1465d1bf6
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-param.c
@@ -0,0 +1,898 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#include "fimc-is.h"
+#include "fimc-is-command.h"
+#include "fimc-is-errno.h"
+#include "fimc-is-param.h"
+#include "fimc-is-regs.h"
+#include "fimc-is-sensor.h"
+
+static void __hw_param_copy(void *dst, void *src)
+{
+ memcpy(dst, src, FIMC_IS_PARAM_MAX_SIZE);
+}
+
+static void __fimc_is_hw_update_param_global_shotmode(struct fimc_is *is)
+{
+ struct param_global_shotmode *dst, *src;
+
+ dst = &is->is_p_region->parameter.global.shotmode;
+ src = &is->config[is->config_index].global.shotmode;
+ __hw_param_copy(dst, src);
+}
+
+static void __fimc_is_hw_update_param_sensor_framerate(struct fimc_is *is)
+{
+ struct param_sensor_framerate *dst, *src;
+
+ dst = &is->is_p_region->parameter.sensor.frame_rate;
+ src = &is->config[is->config_index].sensor.frame_rate;
+ __hw_param_copy(dst, src);
+}
+
+int __fimc_is_hw_update_param(struct fimc_is *is, u32 offset)
+{
+ struct is_param_region *par = &is->is_p_region->parameter;
+ struct chain_config *cfg = &is->config[is->config_index];
+
+ switch (offset) {
+ case PARAM_ISP_CONTROL:
+ __hw_param_copy(&par->isp.control, &cfg->isp.control);
+ break;
+
+ case PARAM_ISP_OTF_INPUT:
+ __hw_param_copy(&par->isp.otf_input, &cfg->isp.otf_input);
+ break;
+
+ case PARAM_ISP_DMA1_INPUT:
+ __hw_param_copy(&par->isp.dma1_input, &cfg->isp.dma1_input);
+ break;
+
+ case PARAM_ISP_DMA2_INPUT:
+ __hw_param_copy(&par->isp.dma2_input, &cfg->isp.dma2_input);
+ break;
+
+ case PARAM_ISP_AA:
+ __hw_param_copy(&par->isp.aa, &cfg->isp.aa);
+ break;
+
+ case PARAM_ISP_FLASH:
+ __hw_param_copy(&par->isp.flash, &cfg->isp.flash);
+ break;
+
+ case PARAM_ISP_AWB:
+ __hw_param_copy(&par->isp.awb, &cfg->isp.awb);
+ break;
+
+ case PARAM_ISP_IMAGE_EFFECT:
+ __hw_param_copy(&par->isp.effect, &cfg->isp.effect);
+ break;
+
+ case PARAM_ISP_ISO:
+ __hw_param_copy(&par->isp.iso, &cfg->isp.iso);
+ break;
+
+ case PARAM_ISP_ADJUST:
+ __hw_param_copy(&par->isp.adjust, &cfg->isp.adjust);
+ break;
+
+ case PARAM_ISP_METERING:
+ __hw_param_copy(&par->isp.metering, &cfg->isp.metering);
+ break;
+
+ case PARAM_ISP_AFC:
+ __hw_param_copy(&par->isp.afc, &cfg->isp.afc);
+ break;
+
+ case PARAM_ISP_OTF_OUTPUT:
+ __hw_param_copy(&par->isp.otf_output, &cfg->isp.otf_output);
+ break;
+
+ case PARAM_ISP_DMA1_OUTPUT:
+ __hw_param_copy(&par->isp.dma1_output, &cfg->isp.dma1_output);
+ break;
+
+ case PARAM_ISP_DMA2_OUTPUT:
+ __hw_param_copy(&par->isp.dma2_output, &cfg->isp.dma2_output);
+ break;
+
+ case PARAM_DRC_CONTROL:
+ __hw_param_copy(&par->drc.control, &cfg->drc.control);
+ break;
+
+ case PARAM_DRC_OTF_INPUT:
+ __hw_param_copy(&par->drc.otf_input, &cfg->drc.otf_input);
+ break;
+
+ case PARAM_DRC_DMA_INPUT:
+ __hw_param_copy(&par->drc.dma_input, &cfg->drc.dma_input);
+ break;
+
+ case PARAM_DRC_OTF_OUTPUT:
+ __hw_param_copy(&par->drc.otf_output, &cfg->drc.otf_output);
+ break;
+
+ case PARAM_FD_CONTROL:
+ __hw_param_copy(&par->fd.control, &cfg->fd.control);
+ break;
+
+ case PARAM_FD_OTF_INPUT:
+ __hw_param_copy(&par->fd.otf_input, &cfg->fd.otf_input);
+ break;
+
+ case PARAM_FD_DMA_INPUT:
+ __hw_param_copy(&par->fd.dma_input, &cfg->fd.dma_input);
+ break;
+
+ case PARAM_FD_CONFIG:
+ __hw_param_copy(&par->fd.config, &cfg->fd.config);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+unsigned int __get_pending_param_count(struct fimc_is *is)
+{
+ struct chain_config *config = &is->config[is->config_index];
+ unsigned long flags;
+ unsigned int count;
+
+ spin_lock_irqsave(&is->slock, flags);
+ count = hweight32(config->p_region_index[0]);
+ count += hweight32(config->p_region_index[1]);
+ spin_unlock_irqrestore(&is->slock, flags);
+
+ return count;
+}
+
+int __is_hw_update_params(struct fimc_is *is)
+{
+ unsigned long *p_index;
+ int i, id, ret = 0;
+
+ id = is->config_index;
+ p_index = &is->config[id].p_region_index[0];
+
+ if (test_bit(PARAM_GLOBAL_SHOTMODE, p_index))
+ __fimc_is_hw_update_param_global_shotmode(is);
+
+ if (test_bit(PARAM_SENSOR_FRAME_RATE, p_index))
+ __fimc_is_hw_update_param_sensor_framerate(is);
+
+ for (i = PARAM_ISP_CONTROL; i < PARAM_DRC_CONTROL; i++) {
+ if (test_bit(i, p_index))
+ ret = __fimc_is_hw_update_param(is, i);
+ }
+
+ for (i = PARAM_DRC_CONTROL; i < PARAM_SCALERC_CONTROL; i++) {
+ if (test_bit(i, p_index))
+ ret = __fimc_is_hw_update_param(is, i);
+ }
+
+ for (i = PARAM_FD_CONTROL; i <= PARAM_FD_CONFIG; i++) {
+ if (test_bit(i, p_index))
+ ret = __fimc_is_hw_update_param(is, i);
+ }
+
+ return ret;
+}
+
+void __is_get_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf)
+{
+ struct isp_param *isp;
+
+ isp = &is->config[is->config_index].isp;
+ mf->width = isp->otf_input.width;
+ mf->height = isp->otf_input.height;
+}
+
+void __is_set_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+ struct drc_param *drc;
+ struct fd_param *fd;
+
+ isp = &is->config[index].isp;
+ drc = &is->config[index].drc;
+ fd = &is->config[index].fd;
+
+ /* Update isp size info (OTF only) */
+ isp->otf_input.width = mf->width;
+ isp->otf_input.height = mf->height;
+ isp->otf_output.width = mf->width;
+ isp->otf_output.height = mf->height;
+ /* Update drc size info (OTF only) */
+ drc->otf_input.width = mf->width;
+ drc->otf_input.height = mf->height;
+ drc->otf_output.width = mf->width;
+ drc->otf_output.height = mf->height;
+ /* Update fd size info (OTF only) */
+ fd->otf_input.width = mf->width;
+ fd->otf_input.height = mf->height;
+
+ if (test_bit(PARAM_ISP_OTF_INPUT,
+ &is->config[index].p_region_index[0]))
+ return;
+
+ /* Update field */
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_INPUT);
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_OUTPUT);
+ fimc_is_set_param_bit(is, PARAM_DRC_OTF_INPUT);
+ fimc_is_set_param_bit(is, PARAM_DRC_OTF_OUTPUT);
+ fimc_is_set_param_bit(is, PARAM_FD_OTF_INPUT);
+}
+
+int fimc_is_hw_get_sensor_max_framerate(struct fimc_is *is)
+{
+ switch (is->sensor->drvdata->id) {
+ case FIMC_IS_SENSOR_ID_S5K6A3:
+ return 30;
+ default:
+ return 15;
+ }
+}
+
+void __is_set_sensor(struct fimc_is *is, int fps)
+{
+ unsigned int index = is->config_index;
+ struct sensor_param *sensor;
+ struct isp_param *isp;
+
+ sensor = &is->config[index].sensor;
+ isp = &is->config[index].isp;
+
+ if (fps == 0) {
+ sensor->frame_rate.frame_rate =
+ fimc_is_hw_get_sensor_max_framerate(is);
+ isp->otf_input.frametime_min = 0;
+ isp->otf_input.frametime_max = 66666;
+ } else {
+ sensor->frame_rate.frame_rate = fps;
+ isp->otf_input.frametime_min = 0;
+ isp->otf_input.frametime_max = (u32)1000000 / fps;
+ }
+
+ fimc_is_set_param_bit(is, PARAM_SENSOR_FRAME_RATE);
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_INPUT);
+}
+
+static void __maybe_unused __is_set_init_isp_aa(struct fimc_is *is)
+{
+ struct isp_param *isp;
+
+ isp = &is->config[is->config_index].isp;
+
+ isp->aa.cmd = ISP_AA_COMMAND_START;
+ isp->aa.target = ISP_AA_TARGET_AF | ISP_AA_TARGET_AE |
+ ISP_AA_TARGET_AWB;
+ isp->aa.mode = 0;
+ isp->aa.scene = 0;
+ isp->aa.sleep = 0;
+ isp->aa.face = 0;
+ isp->aa.touch_x = 0;
+ isp->aa.touch_y = 0;
+ isp->aa.manual_af_setting = 0;
+ isp->aa.err = ISP_AF_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_AA);
+}
+
+void __is_set_isp_flash(struct fimc_is *is, u32 cmd, u32 redeye)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp = &is->config[index].isp;
+
+ isp->flash.cmd = cmd;
+ isp->flash.redeye = redeye;
+ isp->flash.err = ISP_FLASH_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_FLASH);
+}
+
+void __is_set_isp_awb(struct fimc_is *is, u32 cmd, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+
+ isp = &is->config[index].isp;
+
+ isp->awb.cmd = cmd;
+ isp->awb.illumination = val;
+ isp->awb.err = ISP_AWB_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_AWB);
+}
+
+void __is_set_isp_effect(struct fimc_is *is, u32 cmd)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+
+ isp = &is->config[index].isp;
+
+ isp->effect.cmd = cmd;
+ isp->effect.err = ISP_IMAGE_EFFECT_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_IMAGE_EFFECT);
+}
+
+void __is_set_isp_iso(struct fimc_is *is, u32 cmd, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+
+ isp = &is->config[index].isp;
+
+ isp->iso.cmd = cmd;
+ isp->iso.value = val;
+ isp->iso.err = ISP_ISO_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_ISO);
+}
+
+void __is_set_isp_adjust(struct fimc_is *is, u32 cmd, u32 val)
+{
+ unsigned int index = is->config_index;
+ unsigned long *p_index;
+ struct isp_param *isp;
+
+ p_index = &is->config[index].p_region_index[0];
+ isp = &is->config[index].isp;
+
+ switch (cmd) {
+ case ISP_ADJUST_COMMAND_MANUAL_CONTRAST:
+ isp->adjust.contrast = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_SATURATION:
+ isp->adjust.saturation = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_SHARPNESS:
+ isp->adjust.sharpness = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_EXPOSURE:
+ isp->adjust.exposure = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS:
+ isp->adjust.brightness = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_HUE:
+ isp->adjust.hue = val;
+ break;
+ case ISP_ADJUST_COMMAND_AUTO:
+ isp->adjust.contrast = 0;
+ isp->adjust.saturation = 0;
+ isp->adjust.sharpness = 0;
+ isp->adjust.exposure = 0;
+ isp->adjust.brightness = 0;
+ isp->adjust.hue = 0;
+ break;
+ }
+
+ if (!test_bit(PARAM_ISP_ADJUST, p_index)) {
+ isp->adjust.cmd = cmd;
+ isp->adjust.err = ISP_ADJUST_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_ADJUST);
+ } else {
+ isp->adjust.cmd |= cmd;
+ }
+}
+
+void __is_set_isp_metering(struct fimc_is *is, u32 id, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[0];
+ isp = &is->config[index].isp;
+
+ switch (id) {
+ case IS_METERING_CONFIG_CMD:
+ isp->metering.cmd = val;
+ break;
+ case IS_METERING_CONFIG_WIN_POS_X:
+ isp->metering.win_pos_x = val;
+ break;
+ case IS_METERING_CONFIG_WIN_POS_Y:
+ isp->metering.win_pos_y = val;
+ break;
+ case IS_METERING_CONFIG_WIN_WIDTH:
+ isp->metering.win_width = val;
+ break;
+ case IS_METERING_CONFIG_WIN_HEIGHT:
+ isp->metering.win_height = val;
+ break;
+ default:
+ return;
+ }
+
+ if (!test_bit(PARAM_ISP_METERING, p_index)) {
+ isp->metering.err = ISP_METERING_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_METERING);
+ }
+}
+
+void __is_set_isp_afc(struct fimc_is *is, u32 cmd, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+
+ isp = &is->config[index].isp;
+
+ isp->afc.cmd = cmd;
+ isp->afc.manual = val;
+ isp->afc.err = ISP_AFC_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_AFC);
+}
+
+void __is_set_drc_control(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct drc_param *drc;
+
+ drc = &is->config[index].drc;
+
+ drc->control.bypass = val;
+
+ fimc_is_set_param_bit(is, PARAM_DRC_CONTROL);
+}
+
+void __is_set_fd_control(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->control.cmd = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index))
+ fimc_is_set_param_bit(is, PARAM_FD_CONTROL);
+}
+
+void __is_set_fd_config_maxface(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.max_number = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_MAXIMUM_NUMBER;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_MAXIMUM_NUMBER;
+ }
+}
+
+void __is_set_fd_config_rollangle(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.roll_angle = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_ROLL_ANGLE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_ROLL_ANGLE;
+ }
+}
+
+void __is_set_fd_config_yawangle(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.yaw_angle = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_YAW_ANGLE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_YAW_ANGLE;
+ }
+}
+
+void __is_set_fd_config_smilemode(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.smile_mode = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_SMILE_MODE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_SMILE_MODE;
+ }
+}
+
+void __is_set_fd_config_blinkmode(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.blink_mode = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_BLINK_MODE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_BLINK_MODE;
+ }
+}
+
+void __is_set_fd_config_eyedetect(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.eye_detect = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_EYES_DETECT;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_EYES_DETECT;
+ }
+}
+
+void __is_set_fd_config_mouthdetect(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.mouth_detect = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_MOUTH_DETECT;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_MOUTH_DETECT;
+ }
+}
+
+void __is_set_fd_config_orientation(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.orientation = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_ORIENTATION;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_ORIENTATION;
+ }
+}
+
+void __is_set_fd_config_orientation_val(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.orientation_value = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_ORIENTATION_VALUE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_ORIENTATION_VALUE;
+ }
+}
+
+void fimc_is_set_initial_params(struct fimc_is *is)
+{
+ struct global_param *global;
+ struct sensor_param *sensor;
+ struct isp_param *isp;
+ struct drc_param *drc;
+ struct fd_param *fd;
+ unsigned long *p_index;
+ unsigned int index;
+
+ index = is->config_index;
+ global = &is->config[index].global;
+ sensor = &is->config[index].sensor;
+ isp = &is->config[index].isp;
+ drc = &is->config[index].drc;
+ fd = &is->config[index].fd;
+ p_index = &is->config[index].p_region_index[0];
+
+ /* Global */
+ global->shotmode.cmd = 1;
+ fimc_is_set_param_bit(is, PARAM_GLOBAL_SHOTMODE);
+
+ /* ISP */
+ isp->control.cmd = CONTROL_COMMAND_START;
+ isp->control.bypass = CONTROL_BYPASS_DISABLE;
+ isp->control.err = CONTROL_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_CONTROL);
+
+ isp->otf_input.cmd = OTF_INPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_ISP_OTF_INPUT, p_index)) {
+ isp->otf_input.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ isp->otf_input.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_INPUT);
+ }
+ if (is->sensor->test_pattern)
+ isp->otf_input.format = OTF_INPUT_FORMAT_STRGEN_COLORBAR_BAYER;
+ else
+ isp->otf_input.format = OTF_INPUT_FORMAT_BAYER;
+ isp->otf_input.bitwidth = 10;
+ isp->otf_input.order = OTF_INPUT_ORDER_BAYER_GR_BG;
+ isp->otf_input.crop_offset_x = 0;
+ isp->otf_input.crop_offset_y = 0;
+ isp->otf_input.err = OTF_INPUT_ERROR_NONE;
+
+ isp->dma1_input.cmd = DMA_INPUT_COMMAND_DISABLE;
+ isp->dma1_input.width = 0;
+ isp->dma1_input.height = 0;
+ isp->dma1_input.format = 0;
+ isp->dma1_input.bitwidth = 0;
+ isp->dma1_input.plane = 0;
+ isp->dma1_input.order = 0;
+ isp->dma1_input.buffer_number = 0;
+ isp->dma1_input.width = 0;
+ isp->dma1_input.err = DMA_INPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA1_INPUT);
+
+ isp->dma2_input.cmd = DMA_INPUT_COMMAND_DISABLE;
+ isp->dma2_input.width = 0;
+ isp->dma2_input.height = 0;
+ isp->dma2_input.format = 0;
+ isp->dma2_input.bitwidth = 0;
+ isp->dma2_input.plane = 0;
+ isp->dma2_input.order = 0;
+ isp->dma2_input.buffer_number = 0;
+ isp->dma2_input.width = 0;
+ isp->dma2_input.err = DMA_INPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_INPUT);
+
+ isp->aa.cmd = ISP_AA_COMMAND_START;
+ isp->aa.target = ISP_AA_TARGET_AE | ISP_AA_TARGET_AWB;
+ fimc_is_set_param_bit(is, PARAM_ISP_AA);
+
+ if (!test_bit(PARAM_ISP_FLASH, p_index))
+ __is_set_isp_flash(is, ISP_FLASH_COMMAND_DISABLE,
+ ISP_FLASH_REDEYE_DISABLE);
+
+ if (!test_bit(PARAM_ISP_AWB, p_index))
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_AUTO, 0);
+
+ if (!test_bit(PARAM_ISP_IMAGE_EFFECT, p_index))
+ __is_set_isp_effect(is, ISP_IMAGE_EFFECT_DISABLE);
+
+ if (!test_bit(PARAM_ISP_ISO, p_index))
+ __is_set_isp_iso(is, ISP_ISO_COMMAND_AUTO, 0);
+
+ if (!test_bit(PARAM_ISP_ADJUST, p_index)) {
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_CONTRAST, 0);
+ __is_set_isp_adjust(is,
+ ISP_ADJUST_COMMAND_MANUAL_SATURATION, 0);
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_SHARPNESS, 0);
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_EXPOSURE, 0);
+ __is_set_isp_adjust(is,
+ ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS, 0);
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_HUE, 0);
+ }
+
+ if (!test_bit(PARAM_ISP_METERING, p_index)) {
+ __is_set_isp_metering(is, 0, ISP_METERING_COMMAND_CENTER);
+ __is_set_isp_metering(is, 1, 0);
+ __is_set_isp_metering(is, 2, 0);
+ __is_set_isp_metering(is, 3, 0);
+ __is_set_isp_metering(is, 4, 0);
+ }
+
+ if (!test_bit(PARAM_ISP_AFC, p_index))
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_AUTO, 0);
+
+ isp->otf_output.cmd = OTF_OUTPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_ISP_OTF_OUTPUT, p_index)) {
+ isp->otf_output.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ isp->otf_output.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_OUTPUT);
+ }
+ isp->otf_output.format = OTF_OUTPUT_FORMAT_YUV444;
+ isp->otf_output.bitwidth = 12;
+ isp->otf_output.order = 0;
+ isp->otf_output.err = OTF_OUTPUT_ERROR_NONE;
+
+ if (!test_bit(PARAM_ISP_DMA1_OUTPUT, p_index)) {
+ isp->dma1_output.cmd = DMA_OUTPUT_COMMAND_DISABLE;
+ isp->dma1_output.width = 0;
+ isp->dma1_output.height = 0;
+ isp->dma1_output.format = 0;
+ isp->dma1_output.bitwidth = 0;
+ isp->dma1_output.plane = 0;
+ isp->dma1_output.order = 0;
+ isp->dma1_output.buffer_number = 0;
+ isp->dma1_output.buffer_address = 0;
+ isp->dma1_output.notify_dma_done = 0;
+ isp->dma1_output.dma_out_mask = 0;
+ isp->dma1_output.err = DMA_OUTPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA1_OUTPUT);
+ }
+
+ if (!test_bit(PARAM_ISP_DMA2_OUTPUT, p_index)) {
+ isp->dma2_output.cmd = DMA_OUTPUT_COMMAND_DISABLE;
+ isp->dma2_output.width = 0;
+ isp->dma2_output.height = 0;
+ isp->dma2_output.format = 0;
+ isp->dma2_output.bitwidth = 0;
+ isp->dma2_output.plane = 0;
+ isp->dma2_output.order = 0;
+ isp->dma2_output.buffer_number = 0;
+ isp->dma2_output.buffer_address = 0;
+ isp->dma2_output.notify_dma_done = 0;
+ isp->dma2_output.dma_out_mask = 0;
+ isp->dma2_output.err = DMA_OUTPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
+ }
+
+ /* Sensor */
+ if (!test_bit(PARAM_SENSOR_FRAME_RATE, p_index)) {
+ if (is->config_index == 0)
+ __is_set_sensor(is, 0);
+ }
+
+ /* DRC */
+ drc->control.cmd = CONTROL_COMMAND_START;
+ __is_set_drc_control(is, CONTROL_BYPASS_ENABLE);
+
+ drc->otf_input.cmd = OTF_INPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_DRC_OTF_INPUT, p_index)) {
+ drc->otf_input.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ drc->otf_input.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_DRC_OTF_INPUT);
+ }
+ drc->otf_input.format = OTF_INPUT_FORMAT_YUV444;
+ drc->otf_input.bitwidth = 12;
+ drc->otf_input.order = 0;
+ drc->otf_input.err = OTF_INPUT_ERROR_NONE;
+
+ drc->dma_input.cmd = DMA_INPUT_COMMAND_DISABLE;
+ drc->dma_input.width = 0;
+ drc->dma_input.height = 0;
+ drc->dma_input.format = 0;
+ drc->dma_input.bitwidth = 0;
+ drc->dma_input.plane = 0;
+ drc->dma_input.order = 0;
+ drc->dma_input.buffer_number = 0;
+ drc->dma_input.width = 0;
+ drc->dma_input.err = DMA_INPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_DRC_DMA_INPUT);
+
+ drc->otf_output.cmd = OTF_OUTPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_DRC_OTF_OUTPUT, p_index)) {
+ drc->otf_output.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ drc->otf_output.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_DRC_OTF_OUTPUT);
+ }
+ drc->otf_output.format = OTF_OUTPUT_FORMAT_YUV444;
+ drc->otf_output.bitwidth = 8;
+ drc->otf_output.order = 0;
+ drc->otf_output.err = OTF_OUTPUT_ERROR_NONE;
+
+ /* FD */
+ __is_set_fd_control(is, CONTROL_COMMAND_STOP);
+ fd->control.bypass = CONTROL_BYPASS_DISABLE;
+
+ fd->otf_input.cmd = OTF_INPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_FD_OTF_INPUT, p_index)) {
+ fd->otf_input.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ fd->otf_input.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_FD_OTF_INPUT);
+ }
+
+ fd->otf_input.format = OTF_INPUT_FORMAT_YUV444;
+ fd->otf_input.bitwidth = 8;
+ fd->otf_input.order = 0;
+ fd->otf_input.err = OTF_INPUT_ERROR_NONE;
+
+ fd->dma_input.cmd = DMA_INPUT_COMMAND_DISABLE;
+ fd->dma_input.width = 0;
+ fd->dma_input.height = 0;
+ fd->dma_input.format = 0;
+ fd->dma_input.bitwidth = 0;
+ fd->dma_input.plane = 0;
+ fd->dma_input.order = 0;
+ fd->dma_input.buffer_number = 0;
+ fd->dma_input.width = 0;
+ fd->dma_input.err = DMA_INPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_DMA_INPUT);
+
+ __is_set_fd_config_maxface(is, 5);
+ __is_set_fd_config_rollangle(is, FD_CONFIG_ROLL_ANGLE_FULL);
+ __is_set_fd_config_yawangle(is, FD_CONFIG_YAW_ANGLE_45_90);
+ __is_set_fd_config_smilemode(is, FD_CONFIG_SMILE_MODE_DISABLE);
+ __is_set_fd_config_blinkmode(is, FD_CONFIG_BLINK_MODE_DISABLE);
+ __is_set_fd_config_eyedetect(is, FD_CONFIG_EYES_DETECT_ENABLE);
+ __is_set_fd_config_mouthdetect(is, FD_CONFIG_MOUTH_DETECT_DISABLE);
+ __is_set_fd_config_orientation(is, FD_CONFIG_ORIENTATION_DISABLE);
+ __is_set_fd_config_orientation_val(is, 0);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.h b/drivers/media/platform/exynos4-is/fimc-is-param.h
new file mode 100644
index 00000000000..8e31f764277
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-param.h
@@ -0,0 +1,1025 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_IS_PARAM_H_
+#define FIMC_IS_PARAM_H_
+
+#include <linux/compiler.h>
+
+#define FIMC_IS_CONFIG_TIMEOUT 3000 /* ms */
+#define IS_DEFAULT_WIDTH 1280
+#define IS_DEFAULT_HEIGHT 720
+
+#define DEFAULT_PREVIEW_STILL_WIDTH IS_DEFAULT_WIDTH
+#define DEFAULT_PREVIEW_STILL_HEIGHT IS_DEFAULT_HEIGHT
+#define DEFAULT_CAPTURE_STILL_WIDTH IS_DEFAULT_WIDTH
+#define DEFAULT_CAPTURE_STILL_HEIGHT IS_DEFAULT_HEIGHT
+#define DEFAULT_PREVIEW_VIDEO_WIDTH IS_DEFAULT_WIDTH
+#define DEFAULT_PREVIEW_VIDEO_HEIGHT IS_DEFAULT_HEIGHT
+#define DEFAULT_CAPTURE_VIDEO_WIDTH IS_DEFAULT_WIDTH
+#define DEFAULT_CAPTURE_VIDEO_HEIGHT IS_DEFAULT_HEIGHT
+
+#define DEFAULT_PREVIEW_STILL_FRAMERATE 30
+#define DEFAULT_CAPTURE_STILL_FRAMERATE 15
+#define DEFAULT_PREVIEW_VIDEO_FRAMERATE 30
+#define DEFAULT_CAPTURE_VIDEO_FRAMERATE 30
+
+#define FIMC_IS_REGION_VER 124 /* IS REGION VERSION 1.24 */
+#define FIMC_IS_PARAM_SIZE (FIMC_IS_REGION_SIZE + 1)
+#define FIMC_IS_MAGIC_NUMBER 0x01020304
+#define FIMC_IS_PARAM_MAX_SIZE 64 /* in bytes */
+#define FIMC_IS_PARAM_MAX_ENTRIES (FIMC_IS_PARAM_MAX_SIZE / 4)
+
+/* The parameter bitmask bit definitions. */
+enum is_param_bit {
+ PARAM_GLOBAL_SHOTMODE,
+ PARAM_SENSOR_CONTROL,
+ PARAM_SENSOR_OTF_OUTPUT,
+ PARAM_SENSOR_FRAME_RATE,
+ PARAM_BUFFER_CONTROL,
+ PARAM_BUFFER_OTF_INPUT,
+ PARAM_BUFFER_OTF_OUTPUT,
+ PARAM_ISP_CONTROL,
+ PARAM_ISP_OTF_INPUT,
+ PARAM_ISP_DMA1_INPUT,
+ /* 10 */
+ PARAM_ISP_DMA2_INPUT,
+ PARAM_ISP_AA,
+ PARAM_ISP_FLASH,
+ PARAM_ISP_AWB,
+ PARAM_ISP_IMAGE_EFFECT,
+ PARAM_ISP_ISO,
+ PARAM_ISP_ADJUST,
+ PARAM_ISP_METERING,
+ PARAM_ISP_AFC,
+ PARAM_ISP_OTF_OUTPUT,
+ /* 20 */
+ PARAM_ISP_DMA1_OUTPUT,
+ PARAM_ISP_DMA2_OUTPUT,
+ PARAM_DRC_CONTROL,
+ PARAM_DRC_OTF_INPUT,
+ PARAM_DRC_DMA_INPUT,
+ PARAM_DRC_OTF_OUTPUT,
+ PARAM_SCALERC_CONTROL,
+ PARAM_SCALERC_OTF_INPUT,
+ PARAM_SCALERC_IMAGE_EFFECT,
+ PARAM_SCALERC_INPUT_CROP,
+ /* 30 */
+ PARAM_SCALERC_OUTPUT_CROP,
+ PARAM_SCALERC_OTF_OUTPUT,
+ PARAM_SCALERC_DMA_OUTPUT,
+ PARAM_ODC_CONTROL,
+ PARAM_ODC_OTF_INPUT,
+ PARAM_ODC_OTF_OUTPUT,
+ PARAM_DIS_CONTROL,
+ PARAM_DIS_OTF_INPUT,
+ PARAM_DIS_OTF_OUTPUT,
+ PARAM_TDNR_CONTROL,
+ /* 40 */
+ PARAM_TDNR_OTF_INPUT,
+ PARAM_TDNR_1ST_FRAME,
+ PARAM_TDNR_OTF_OUTPUT,
+ PARAM_TDNR_DMA_OUTPUT,
+ PARAM_SCALERP_CONTROL,
+ PARAM_SCALERP_OTF_INPUT,
+ PARAM_SCALERP_IMAGE_EFFECT,
+ PARAM_SCALERP_INPUT_CROP,
+ PARAM_SCALERP_OUTPUT_CROP,
+ PARAM_SCALERP_ROTATION,
+ /* 50 */
+ PARAM_SCALERP_FLIP,
+ PARAM_SCALERP_OTF_OUTPUT,
+ PARAM_SCALERP_DMA_OUTPUT,
+ PARAM_FD_CONTROL,
+ PARAM_FD_OTF_INPUT,
+ PARAM_FD_DMA_INPUT,
+ PARAM_FD_CONFIG,
+};
+
+/* Interrupt map */
+#define FIMC_IS_INT_GENERAL 0
+#define FIMC_IS_INT_FRAME_DONE_ISP 1
+
+/* Input */
+
+#define CONTROL_COMMAND_STOP 0
+#define CONTROL_COMMAND_START 1
+
+#define CONTROL_BYPASS_DISABLE 0
+#define CONTROL_BYPASS_ENABLE 1
+
+#define CONTROL_ERROR_NONE 0
+
+/* OTF (On-The-Fly) input interface commands */
+#define OTF_INPUT_COMMAND_DISABLE 0
+#define OTF_INPUT_COMMAND_ENABLE 1
+
+/* OTF input interface color formats */
+enum oft_input_fmt {
+ OTF_INPUT_FORMAT_BAYER = 0, /* 1 channel */
+ OTF_INPUT_FORMAT_YUV444 = 1, /* 3 channels */
+ OTF_INPUT_FORMAT_YUV422 = 2, /* 3 channels */
+ OTF_INPUT_FORMAT_YUV420 = 3, /* 3 channels */
+ OTF_INPUT_FORMAT_STRGEN_COLORBAR_BAYER = 10,
+ OTF_INPUT_FORMAT_BAYER_DMA = 11,
+};
+
+#define OTF_INPUT_ORDER_BAYER_GR_BG 0
+
+/* OTF input error codes */
+#define OTF_INPUT_ERROR_NONE 0 /* Input setting is done */
+
+/* DMA input commands */
+#define DMA_INPUT_COMMAND_DISABLE 0
+#define DMA_INPUT_COMMAND_ENABLE 1
+
+/* DMA input color formats */
+enum dma_input_fmt {
+ DMA_INPUT_FORMAT_BAYER = 0,
+ DMA_INPUT_FORMAT_YUV444 = 1,
+ DMA_INPUT_FORMAT_YUV422 = 2,
+ DMA_INPUT_FORMAT_YUV420 = 3,
+};
+
+enum dma_input_order {
+ /* (for DMA_INPUT_PLANE_3) */
+ DMA_INPUT_ORDER_NO = 0,
+ /* (only valid at DMA_INPUT_PLANE_2) */
+ DMA_INPUT_ORDER_CBCR = 1,
+ /* (only valid at DMA_INPUT_PLANE_2) */
+ DMA_INPUT_ORDER_CRCB = 2,
+ /* (only valid at DMA_INPUT_PLANE_1 & DMA_INPUT_FORMAT_YUV444) */
+ DMA_INPUT_ORDER_YCBCR = 3,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_YYCBCR = 4,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_YCBYCR = 5,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_YCRYCB = 6,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_CBYCRY = 7,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_CRYCBY = 8,
+ /* (only valid at DMA_INPUT_FORMAT_BAYER) */
+ DMA_INPUT_ORDER_GR_BG = 9
+};
+
+#define DMA_INPUT_ERROR_NONE 0 /* DMA input setting
+ is done */
+/*
+ * Data output parameter definitions
+ */
+#define OTF_OUTPUT_CROP_DISABLE 0
+#define OTF_OUTPUT_CROP_ENABLE 1
+
+#define OTF_OUTPUT_COMMAND_DISABLE 0
+#define OTF_OUTPUT_COMMAND_ENABLE 1
+
+enum otf_output_fmt {
+ OTF_OUTPUT_FORMAT_YUV444 = 1,
+ OTF_OUTPUT_FORMAT_YUV422 = 2,
+ OTF_OUTPUT_FORMAT_YUV420 = 3,
+ OTF_OUTPUT_FORMAT_RGB = 4,
+};
+
+#define OTF_OUTPUT_ORDER_BAYER_GR_BG 0
+
+#define OTF_OUTPUT_ERROR_NONE 0 /* Output Setting is done */
+
+#define DMA_OUTPUT_COMMAND_DISABLE 0
+#define DMA_OUTPUT_COMMAND_ENABLE 1
+
+enum dma_output_fmt {
+ DMA_OUTPUT_FORMAT_BAYER = 0,
+ DMA_OUTPUT_FORMAT_YUV444 = 1,
+ DMA_OUTPUT_FORMAT_YUV422 = 2,
+ DMA_OUTPUT_FORMAT_YUV420 = 3,
+ DMA_OUTPUT_FORMAT_RGB = 4,
+};
+
+enum dma_output_order {
+ DMA_OUTPUT_ORDER_NO = 0,
+ /* for DMA_OUTPUT_PLANE_3 */
+ DMA_OUTPUT_ORDER_CBCR = 1,
+ /* only valid at DMA_INPUT_PLANE_2) */
+ DMA_OUTPUT_ORDER_CRCB = 2,
+ /* only valid at DMA_OUTPUT_PLANE_2) */
+ DMA_OUTPUT_ORDER_YYCBCR = 3,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_YCBYCR = 4,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_YCRYCB = 5,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CBYCRY = 6,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CRYCBY = 7,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_YCBCR = 8,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CRYCB = 9,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CRCBY = 10,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CBYCR = 11,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_YCRCB = 12,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CBCRY = 13,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_BGR = 14,
+ /* only valid at DMA_OUTPUT_FORMAT_RGB */
+ DMA_OUTPUT_ORDER_GB_BG = 15
+ /* only valid at DMA_OUTPUT_FORMAT_BAYER */
+};
+
+/* enum dma_output_notify_dma_done */
+#define DMA_OUTPUT_NOTIFY_DMA_DONE_DISABLE 0
+#define DMA_OUTPUT_NOTIFY_DMA_DONE_ENABLE 1
+
+/* DMA output error codes */
+#define DMA_OUTPUT_ERROR_NONE 0 /* DMA output setting
+ is done */
+
+/* ---------------------- Global ----------------------------------- */
+#define GLOBAL_SHOTMODE_ERROR_NONE 0 /* shot-mode setting
+ is done */
+/* 3A lock commands */
+#define ISP_AA_COMMAND_START 0
+#define ISP_AA_COMMAND_STOP 1
+
+/* 3A lock target */
+#define ISP_AA_TARGET_AF 1
+#define ISP_AA_TARGET_AE 2
+#define ISP_AA_TARGET_AWB 4
+
+enum isp_af_mode {
+ ISP_AF_MODE_MANUAL = 0,
+ ISP_AF_MODE_SINGLE = 1,
+ ISP_AF_MODE_CONTINUOUS = 2,
+ ISP_AF_MODE_TOUCH = 3,
+ ISP_AF_MODE_SLEEP = 4,
+ ISP_AF_MODE_INIT = 5,
+ ISP_AF_MODE_SET_CENTER_WINDOW = 6,
+ ISP_AF_MODE_SET_TOUCH_WINDOW = 7
+};
+
+/* Face AF commands */
+#define ISP_AF_FACE_DISABLE 0
+#define ISP_AF_FACE_ENABLE 1
+
+/* AF range */
+#define ISP_AF_RANGE_NORMAL 0
+#define ISP_AF_RANGE_MACRO 1
+
+/* AF sleep */
+#define ISP_AF_SLEEP_OFF 0
+#define ISP_AF_SLEEP_ON 1
+
+/* Continuous AF commands */
+#define ISP_AF_CONTINUOUS_DISABLE 0
+#define ISP_AF_CONTINUOUS_ENABLE 1
+
+/* ISP AF error codes */
+#define ISP_AF_ERROR_NONE 0 /* AF mode change is done */
+#define ISP_AF_ERROR_NONE_LOCK_DONE 1 /* AF lock is done */
+
+/* Flash commands */
+#define ISP_FLASH_COMMAND_DISABLE 0
+#define ISP_FLASH_COMMAND_MANUAL_ON 1 /* (forced flash) */
+#define ISP_FLASH_COMMAND_AUTO 2
+#define ISP_FLASH_COMMAND_TORCH 3 /* 3 sec */
+
+/* Flash red-eye commads */
+#define ISP_FLASH_REDEYE_DISABLE 0
+#define ISP_FLASH_REDEYE_ENABLE 1
+
+/* Flash error codes */
+#define ISP_FLASH_ERROR_NONE 0 /* Flash setting is done */
+
+/* -------------------------- AWB ------------------------------------ */
+enum isp_awb_command {
+ ISP_AWB_COMMAND_AUTO = 0,
+ ISP_AWB_COMMAND_ILLUMINATION = 1,
+ ISP_AWB_COMMAND_MANUAL = 2
+};
+
+enum isp_awb_illumination {
+ ISP_AWB_ILLUMINATION_DAYLIGHT = 0,
+ ISP_AWB_ILLUMINATION_CLOUDY = 1,
+ ISP_AWB_ILLUMINATION_TUNGSTEN = 2,
+ ISP_AWB_ILLUMINATION_FLUORESCENT = 3
+};
+
+/* ISP AWN error codes */
+#define ISP_AWB_ERROR_NONE 0 /* AWB setting is done */
+
+/* -------------------------- Effect ----------------------------------- */
+enum isp_imageeffect_command {
+ ISP_IMAGE_EFFECT_DISABLE = 0,
+ ISP_IMAGE_EFFECT_MONOCHROME = 1,
+ ISP_IMAGE_EFFECT_NEGATIVE_MONO = 2,
+ ISP_IMAGE_EFFECT_NEGATIVE_COLOR = 3,
+ ISP_IMAGE_EFFECT_SEPIA = 4
+};
+
+/* Image effect error codes */
+#define ISP_IMAGE_EFFECT_ERROR_NONE 0 /* Image effect setting
+ is done */
+/* ISO commands */
+#define ISP_ISO_COMMAND_AUTO 0
+#define ISP_ISO_COMMAND_MANUAL 1
+
+/* ISO error codes */
+#define ISP_ISO_ERROR_NONE 0 /* ISO setting is done */
+
+/* ISP adjust commands */
+#define ISP_ADJUST_COMMAND_AUTO (0 << 0)
+#define ISP_ADJUST_COMMAND_MANUAL_CONTRAST (1 << 0)
+#define ISP_ADJUST_COMMAND_MANUAL_SATURATION (1 << 1)
+#define ISP_ADJUST_COMMAND_MANUAL_SHARPNESS (1 << 2)
+#define ISP_ADJUST_COMMAND_MANUAL_EXPOSURE (1 << 3)
+#define ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS (1 << 4)
+#define ISP_ADJUST_COMMAND_MANUAL_HUE (1 << 5)
+#define ISP_ADJUST_COMMAND_MANUAL_ALL 0x7f
+
+/* ISP adjustment error codes */
+#define ISP_ADJUST_ERROR_NONE 0 /* Adjust setting is done */
+
+/*
+ * Exposure metering
+ */
+enum isp_metering_command {
+ ISP_METERING_COMMAND_AVERAGE = 0,
+ ISP_METERING_COMMAND_SPOT = 1,
+ ISP_METERING_COMMAND_MATRIX = 2,
+ ISP_METERING_COMMAND_CENTER = 3
+};
+
+/* ISP metering error codes */
+#define ISP_METERING_ERROR_NONE 0 /* Metering setting is done */
+
+/*
+ * AFC
+ */
+enum isp_afc_command {
+ ISP_AFC_COMMAND_DISABLE = 0,
+ ISP_AFC_COMMAND_AUTO = 1,
+ ISP_AFC_COMMAND_MANUAL = 2,
+};
+
+#define ISP_AFC_MANUAL_50HZ 50
+#define ISP_AFC_MANUAL_60HZ 60
+
+/* ------------------------ SCENE MODE--------------------------------- */
+enum isp_scene_mode {
+ ISP_SCENE_NONE = 0,
+ ISP_SCENE_PORTRAIT = 1,
+ ISP_SCENE_LANDSCAPE = 2,
+ ISP_SCENE_SPORTS = 3,
+ ISP_SCENE_PARTYINDOOR = 4,
+ ISP_SCENE_BEACHSNOW = 5,
+ ISP_SCENE_SUNSET = 6,
+ ISP_SCENE_DAWN = 7,
+ ISP_SCENE_FALL = 8,
+ ISP_SCENE_NIGHT = 9,
+ ISP_SCENE_AGAINSTLIGHTWLIGHT = 10,
+ ISP_SCENE_AGAINSTLIGHTWOLIGHT = 11,
+ ISP_SCENE_FIRE = 12,
+ ISP_SCENE_TEXT = 13,
+ ISP_SCENE_CANDLE = 14
+};
+
+/* AFC error codes */
+#define ISP_AFC_ERROR_NONE 0 /* AFC setting is done */
+
+/* ---------------------------- FD ------------------------------------- */
+enum fd_config_command {
+ FD_CONFIG_COMMAND_MAXIMUM_NUMBER = 0x1,
+ FD_CONFIG_COMMAND_ROLL_ANGLE = 0x2,
+ FD_CONFIG_COMMAND_YAW_ANGLE = 0x4,
+ FD_CONFIG_COMMAND_SMILE_MODE = 0x8,
+ FD_CONFIG_COMMAND_BLINK_MODE = 0x10,
+ FD_CONFIG_COMMAND_EYES_DETECT = 0x20,
+ FD_CONFIG_COMMAND_MOUTH_DETECT = 0x40,
+ FD_CONFIG_COMMAND_ORIENTATION = 0x80,
+ FD_CONFIG_COMMAND_ORIENTATION_VALUE = 0x100
+};
+
+enum fd_config_roll_angle {
+ FD_CONFIG_ROLL_ANGLE_BASIC = 0,
+ FD_CONFIG_ROLL_ANGLE_PRECISE_BASIC = 1,
+ FD_CONFIG_ROLL_ANGLE_SIDES = 2,
+ FD_CONFIG_ROLL_ANGLE_PRECISE_SIDES = 3,
+ FD_CONFIG_ROLL_ANGLE_FULL = 4,
+ FD_CONFIG_ROLL_ANGLE_PRECISE_FULL = 5,
+};
+
+enum fd_config_yaw_angle {
+ FD_CONFIG_YAW_ANGLE_0 = 0,
+ FD_CONFIG_YAW_ANGLE_45 = 1,
+ FD_CONFIG_YAW_ANGLE_90 = 2,
+ FD_CONFIG_YAW_ANGLE_45_90 = 3,
+};
+
+/* Smile mode configuration */
+#define FD_CONFIG_SMILE_MODE_DISABLE 0
+#define FD_CONFIG_SMILE_MODE_ENABLE 1
+
+/* Blink mode configuration */
+#define FD_CONFIG_BLINK_MODE_DISABLE 0
+#define FD_CONFIG_BLINK_MODE_ENABLE 1
+
+/* Eyes detection configuration */
+#define FD_CONFIG_EYES_DETECT_DISABLE 0
+#define FD_CONFIG_EYES_DETECT_ENABLE 1
+
+/* Mouth detection configuration */
+#define FD_CONFIG_MOUTH_DETECT_DISABLE 0
+#define FD_CONFIG_MOUTH_DETECT_ENABLE 1
+
+#define FD_CONFIG_ORIENTATION_DISABLE 0
+#define FD_CONFIG_ORIENTATION_ENABLE 1
+
+struct param_control {
+ u32 cmd;
+ u32 bypass;
+ u32 buffer_address;
+ u32 buffer_size;
+ u32 skip_frames; /* only valid at ISP */
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 6];
+ u32 err;
+};
+
+struct param_otf_input {
+ u32 cmd;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 bitwidth;
+ u32 order;
+ u32 crop_offset_x;
+ u32 crop_offset_y;
+ u32 crop_width;
+ u32 crop_height;
+ u32 frametime_min;
+ u32 frametime_max;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 13];
+ u32 err;
+};
+
+struct param_dma_input {
+ u32 cmd;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 bitwidth;
+ u32 plane;
+ u32 order;
+ u32 buffer_number;
+ u32 buffer_address;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 10];
+ u32 err;
+};
+
+struct param_otf_output {
+ u32 cmd;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 bitwidth;
+ u32 order;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 7];
+ u32 err;
+};
+
+struct param_dma_output {
+ u32 cmd;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 bitwidth;
+ u32 plane;
+ u32 order;
+ u32 buffer_number;
+ u32 buffer_address;
+ u32 notify_dma_done;
+ u32 dma_out_mask;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 12];
+ u32 err;
+};
+
+struct param_global_shotmode {
+ u32 cmd;
+ u32 skip_frames;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_sensor_framerate {
+ u32 frame_rate;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_isp_aa {
+ u32 cmd;
+ u32 target;
+ u32 mode;
+ u32 scene;
+ u32 sleep;
+ u32 face;
+ u32 touch_x;
+ u32 touch_y;
+ u32 manual_af_setting;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 10];
+ u32 err;
+};
+
+struct param_isp_flash {
+ u32 cmd;
+ u32 redeye;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_isp_awb {
+ u32 cmd;
+ u32 illumination;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_isp_imageeffect {
+ u32 cmd;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_isp_iso {
+ u32 cmd;
+ u32 value;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_isp_adjust {
+ u32 cmd;
+ s32 contrast;
+ s32 saturation;
+ s32 sharpness;
+ s32 exposure;
+ s32 brightness;
+ s32 hue;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 8];
+ u32 err;
+};
+
+struct param_isp_metering {
+ u32 cmd;
+ u32 win_pos_x;
+ u32 win_pos_y;
+ u32 win_width;
+ u32 win_height;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 6];
+ u32 err;
+};
+
+struct param_isp_afc {
+ u32 cmd;
+ u32 manual;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_scaler_imageeffect {
+ u32 cmd;
+ u32 arbitrary_cb;
+ u32 arbitrary_cr;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 4];
+ u32 err;
+};
+
+struct param_scaler_input_crop {
+ u32 cmd;
+ u32 crop_offset_x;
+ u32 crop_offset_y;
+ u32 crop_width;
+ u32 crop_height;
+ u32 in_width;
+ u32 in_height;
+ u32 out_width;
+ u32 out_height;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 10];
+ u32 err;
+};
+
+struct param_scaler_output_crop {
+ u32 cmd;
+ u32 crop_offset_x;
+ u32 crop_offset_y;
+ u32 crop_width;
+ u32 crop_height;
+ u32 out_format;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 7];
+ u32 err;
+};
+
+struct param_scaler_rotation {
+ u32 cmd;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_scaler_flip {
+ u32 cmd;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_3dnr_1stframe {
+ u32 cmd;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_fd_config {
+ u32 cmd;
+ u32 max_number;
+ u32 roll_angle;
+ u32 yaw_angle;
+ u32 smile_mode;
+ u32 blink_mode;
+ u32 eye_detect;
+ u32 mouth_detect;
+ u32 orientation;
+ u32 orientation_value;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 11];
+ u32 err;
+};
+
+struct global_param {
+ struct param_global_shotmode shotmode;
+};
+
+struct sensor_param {
+ struct param_control control;
+ struct param_otf_output otf_output;
+ struct param_sensor_framerate frame_rate;
+} __packed;
+
+struct buffer_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_otf_output otf_output;
+} __packed;
+
+struct isp_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_dma_input dma1_input;
+ struct param_dma_input dma2_input;
+ struct param_isp_aa aa;
+ struct param_isp_flash flash;
+ struct param_isp_awb awb;
+ struct param_isp_imageeffect effect;
+ struct param_isp_iso iso;
+ struct param_isp_adjust adjust;
+ struct param_isp_metering metering;
+ struct param_isp_afc afc;
+ struct param_otf_output otf_output;
+ struct param_dma_output dma1_output;
+ struct param_dma_output dma2_output;
+} __packed;
+
+struct drc_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_dma_input dma_input;
+ struct param_otf_output otf_output;
+} __packed;
+
+struct scalerc_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_scaler_imageeffect effect;
+ struct param_scaler_input_crop input_crop;
+ struct param_scaler_output_crop output_crop;
+ struct param_otf_output otf_output;
+ struct param_dma_output dma_output;
+} __packed;
+
+struct odc_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_otf_output otf_output;
+} __packed;
+
+struct dis_param {
+ struct param_control control;
+ struct param_otf_output otf_input;
+ struct param_otf_output otf_output;
+} __packed;
+
+struct tdnr_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_3dnr_1stframe frame;
+ struct param_otf_output otf_output;
+ struct param_dma_output dma_output;
+} __packed;
+
+struct scalerp_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_scaler_imageeffect effect;
+ struct param_scaler_input_crop input_crop;
+ struct param_scaler_output_crop output_crop;
+ struct param_scaler_rotation rotation;
+ struct param_scaler_flip flip;
+ struct param_otf_output otf_output;
+ struct param_dma_output dma_output;
+} __packed;
+
+struct fd_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_dma_input dma_input;
+ struct param_fd_config config;
+} __packed;
+
+struct is_param_region {
+ struct global_param global;
+ struct sensor_param sensor;
+ struct buffer_param buf;
+ struct isp_param isp;
+ struct drc_param drc;
+ struct scalerc_param scalerc;
+ struct odc_param odc;
+ struct dis_param dis;
+ struct tdnr_param tdnr;
+ struct scalerp_param scalerp;
+ struct fd_param fd;
+} __packed;
+
+#define NUMBER_OF_GAMMA_CURVE_POINTS 32
+
+struct is_tune_sensor {
+ u32 exposure;
+ u32 analog_gain;
+ u32 frame_rate;
+ u32 actuator_position;
+};
+
+struct is_tune_gammacurve {
+ u32 num_pts_x[NUMBER_OF_GAMMA_CURVE_POINTS];
+ u32 num_pts_y_r[NUMBER_OF_GAMMA_CURVE_POINTS];
+ u32 num_pts_y_g[NUMBER_OF_GAMMA_CURVE_POINTS];
+ u32 num_pts_y_b[NUMBER_OF_GAMMA_CURVE_POINTS];
+};
+
+struct is_tune_isp {
+ /* Brightness level: range 0...100, default 7. */
+ u32 brightness_level;
+ /* Contrast level: range -127...127, default 0. */
+ s32 contrast_level;
+ /* Saturation level: range -127...127, default 0. */
+ s32 saturation_level;
+ s32 gamma_level;
+ struct is_tune_gammacurve gamma_curve[4];
+ /* Hue: range -127...127, default 0. */
+ s32 hue;
+ /* Sharpness blur: range -127...127, default 0. */
+ s32 sharpness_blur;
+ /* Despeckle : range -127~127, default : 0 */
+ s32 despeckle;
+ /* Edge color supression: range -127...127, default 0. */
+ s32 edge_color_supression;
+ /* Noise reduction: range -127...127, default 0. */
+ s32 noise_reduction;
+ /* (32 * 4 + 9) * 4 = 548 bytes */
+} __packed;
+
+struct is_tune_region {
+ struct is_tune_sensor sensor;
+ struct is_tune_isp isp;
+} __packed;
+
+struct rational {
+ u32 num;
+ u32 den;
+};
+
+struct srational {
+ s32 num;
+ s32 den;
+};
+
+#define FLASH_FIRED_SHIFT 0
+#define FLASH_NOT_FIRED 0
+#define FLASH_FIRED 1
+
+#define FLASH_STROBE_SHIFT 1
+#define FLASH_STROBE_NO_DETECTION 0
+#define FLASH_STROBE_RESERVED 1
+#define FLASH_STROBE_RETURN_LIGHT_NOT_DETECTED 2
+#define FLASH_STROBE_RETURN_LIGHT_DETECTED 3
+
+#define FLASH_MODE_SHIFT 3
+#define FLASH_MODE_UNKNOWN 0
+#define FLASH_MODE_COMPULSORY_FLASH_FIRING 1
+#define FLASH_MODE_COMPULSORY_FLASH_SUPPRESSION 2
+#define FLASH_MODE_AUTO_MODE 3
+
+#define FLASH_FUNCTION_SHIFT 5
+#define FLASH_FUNCTION_PRESENT 0
+#define FLASH_FUNCTION_NONE 1
+
+#define FLASH_RED_EYE_SHIFT 6
+#define FLASH_RED_EYE_DISABLED 0
+#define FLASH_RED_EYE_SUPPORTED 1
+
+enum apex_aperture_value {
+ F1_0 = 0,
+ F1_4 = 1,
+ F2_0 = 2,
+ F2_8 = 3,
+ F4_0 = 4,
+ F5_6 = 5,
+ F8_9 = 6,
+ F11_0 = 7,
+ F16_0 = 8,
+ F22_0 = 9,
+ F32_0 = 10,
+};
+
+struct exif_attribute {
+ struct rational exposure_time;
+ struct srational shutter_speed;
+ u32 iso_speed_rating;
+ u32 flash;
+ struct srational brightness;
+} __packed;
+
+struct is_frame_header {
+ u32 valid;
+ u32 bad_mark;
+ u32 captured;
+ u32 frame_number;
+ struct exif_attribute exif;
+} __packed;
+
+struct is_fd_rect {
+ u32 offset_x;
+ u32 offset_y;
+ u32 width;
+ u32 height;
+};
+
+struct is_face_marker {
+ u32 frame_number;
+ struct is_fd_rect face;
+ struct is_fd_rect left_eye;
+ struct is_fd_rect right_eye;
+ struct is_fd_rect mouth;
+ u32 roll_angle;
+ u32 yaw_angle;
+ u32 confidence;
+ s32 smile_level;
+ s32 blink_level;
+} __packed;
+
+#define MAX_FRAME_COUNT 8
+#define MAX_FRAME_COUNT_PREVIEW 4
+#define MAX_FRAME_COUNT_CAPTURE 1
+#define MAX_FACE_COUNT 16
+#define MAX_SHARED_COUNT 500
+
+struct is_region {
+ struct is_param_region parameter;
+ struct is_tune_region tune;
+ struct is_frame_header header[MAX_FRAME_COUNT];
+ struct is_face_marker face[MAX_FACE_COUNT];
+ u32 shared[MAX_SHARED_COUNT];
+} __packed;
+
+/* Offset to the ISP DMA2 output buffer address array. */
+#define DMA2_OUTPUT_ADDR_ARRAY_OFFS \
+ (offsetof(struct is_region, shared) + 32 * sizeof(u32))
+
+struct is_debug_frame_descriptor {
+ u32 sensor_frame_time;
+ u32 sensor_exposure_time;
+ s32 sensor_analog_gain;
+ /* monitor for AA */
+ u32 req_lei;
+
+ u32 next_next_lei_exp;
+ u32 next_next_lei_a_gain;
+ u32 next_next_lei_d_gain;
+ u32 next_next_lei_statlei;
+ u32 next_next_lei_lei;
+
+ u32 dummy0;
+};
+
+#define MAX_FRAMEDESCRIPTOR_CONTEXT_NUM (30*20) /* 600 frames */
+#define MAX_VERSION_DISPLAY_BUF 32
+
+struct is_share_region {
+ u32 frame_time;
+ u32 exposure_time;
+ s32 analog_gain;
+
+ u32 r_gain;
+ u32 g_gain;
+ u32 b_gain;
+
+ u32 af_position;
+ u32 af_status;
+ /* 0 : SIRC_ISP_CAMERA_AUTOFOCUSMESSAGE_NOMESSAGE */
+ /* 1 : SIRC_ISP_CAMERA_AUTOFOCUSMESSAGE_REACHED */
+ /* 2 : SIRC_ISP_CAMERA_AUTOFOCUSMESSAGE_UNABLETOREACH */
+ /* 3 : SIRC_ISP_CAMERA_AUTOFOCUSMESSAGE_LOST */
+ /* default : unknown */
+ u32 af_scene_type;
+
+ u32 frame_descp_onoff_control;
+ u32 frame_descp_update_done;
+ u32 frame_descp_idx;
+ u32 frame_descp_max_idx;
+ struct is_debug_frame_descriptor
+ dbg_frame_descp_ctx[MAX_FRAMEDESCRIPTOR_CONTEXT_NUM];
+
+ u32 chip_id;
+ u32 chip_rev_no;
+ u8 isp_fw_ver_no[MAX_VERSION_DISPLAY_BUF];
+ u8 isp_fw_ver_date[MAX_VERSION_DISPLAY_BUF];
+ u8 sirc_sdk_ver_no[MAX_VERSION_DISPLAY_BUF];
+ u8 sirc_sdk_rev_no[MAX_VERSION_DISPLAY_BUF];
+ u8 sirc_sdk_rev_date[MAX_VERSION_DISPLAY_BUF];
+} __packed;
+
+struct is_debug_control {
+ u32 write_point; /* 0~ 500KB boundary */
+ u32 assert_flag; /* 0: Not invoked, 1: Invoked */
+ u32 pabort_flag; /* 0: Not invoked, 1: Invoked */
+ u32 dabort_flag; /* 0: Not invoked, 1: Invoked */
+};
+
+struct sensor_open_extended {
+ u32 actuator_type;
+ u32 mclk;
+ u32 mipi_lane_num;
+ u32 mipi_speed;
+ /* Skip setfile loading when fast_open_sensor is not 0 */
+ u32 fast_open_sensor;
+ /* Activating sensor self calibration mode (6A3) */
+ u32 self_calibration_mode;
+ /* This field is to adjust I2c clock based on ACLK200 */
+ /* This value is varied in case of rev 0.2 */
+ u32 i2c_sclk;
+};
+
+struct fimc_is;
+
+int fimc_is_hw_get_sensor_max_framerate(struct fimc_is *is);
+int __fimc_is_hw_update_param(struct fimc_is *is, u32 offset);
+void fimc_is_set_initial_params(struct fimc_is *is);
+unsigned int __get_pending_param_count(struct fimc_is *is);
+
+int __is_hw_update_params(struct fimc_is *is);
+void __is_get_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf);
+void __is_set_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf);
+void __is_set_sensor(struct fimc_is *is, int fps);
+void __is_set_isp_aa_ae(struct fimc_is *is);
+void __is_set_isp_flash(struct fimc_is *is, u32 cmd, u32 redeye);
+void __is_set_isp_awb(struct fimc_is *is, u32 cmd, u32 val);
+void __is_set_isp_effect(struct fimc_is *is, u32 cmd);
+void __is_set_isp_iso(struct fimc_is *is, u32 cmd, u32 val);
+void __is_set_isp_adjust(struct fimc_is *is, u32 cmd, u32 val);
+void __is_set_isp_metering(struct fimc_is *is, u32 id, u32 val);
+void __is_set_isp_afc(struct fimc_is *is, u32 cmd, u32 val);
+void __is_set_drc_control(struct fimc_is *is, u32 val);
+void __is_set_fd_control(struct fimc_is *is, u32 val);
+void __is_set_fd_config_maxface(struct fimc_is *is, u32 val);
+void __is_set_fd_config_rollangle(struct fimc_is *is, u32 val);
+void __is_set_fd_config_yawangle(struct fimc_is *is, u32 val);
+void __is_set_fd_config_smilemode(struct fimc_is *is, u32 val);
+void __is_set_fd_config_blinkmode(struct fimc_is *is, u32 val);
+void __is_set_fd_config_eyedetect(struct fimc_is *is, u32 val);
+void __is_set_fd_config_mouthdetect(struct fimc_is *is, u32 val);
+void __is_set_fd_config_orientation(struct fimc_is *is, u32 val);
+void __is_set_fd_config_orientation_val(struct fimc_is *is, u32 val);
+void __is_set_isp_aa_af_mode(struct fimc_is *is, int cmd);
+void __is_set_isp_aa_af_start_stop(struct fimc_is *is, int cmd);
+
+#endif
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c
new file mode 100644
index 00000000000..cfe4406a83f
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -0,0 +1,233 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+
+#include "fimc-is.h"
+#include "fimc-is-command.h"
+#include "fimc-is-regs.h"
+#include "fimc-is-sensor.h"
+
+void fimc_is_fw_clear_irq1(struct fimc_is *is, unsigned int nr)
+{
+ mcuctl_write(1UL << nr, is, MCUCTL_REG_INTCR1);
+}
+
+void fimc_is_fw_clear_irq2(struct fimc_is *is)
+{
+ u32 cfg = mcuctl_read(is, MCUCTL_REG_INTSR2);
+ mcuctl_write(cfg, is, MCUCTL_REG_INTCR2);
+}
+
+void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is)
+{
+ mcuctl_write(INTGR0_INTGD(0), is, MCUCTL_REG_INTGR0);
+}
+
+int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is)
+{
+ unsigned int timeout = 2000;
+ u32 cfg, status;
+
+ do {
+ cfg = mcuctl_read(is, MCUCTL_REG_INTMSR0);
+ status = INTMSR0_GET_INTMSD(0, cfg);
+
+ if (--timeout == 0) {
+ dev_warn(&is->pdev->dev, "%s timeout\n",
+ __func__);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ } while (status != 0);
+
+ return 0;
+}
+
+int fimc_is_hw_set_param(struct fimc_is *is)
+{
+ struct chain_config *config = &is->config[is->config_index];
+ unsigned int param_count = __get_pending_param_count(is);
+
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+
+ mcuctl_write(HIC_SET_PARAMETER, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(is->config_index, is, MCUCTL_REG_ISSR(2));
+
+ mcuctl_write(param_count, is, MCUCTL_REG_ISSR(3));
+ mcuctl_write(config->p_region_index[0], is, MCUCTL_REG_ISSR(4));
+ mcuctl_write(config->p_region_index[1], is, MCUCTL_REG_ISSR(5));
+
+ fimc_is_hw_set_intgr0_gd0(is);
+ return 0;
+}
+
+static int __maybe_unused fimc_is_hw_set_tune(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+
+ mcuctl_write(HIC_SET_TUNE, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(is->h2i_cmd.entry_id, is, MCUCTL_REG_ISSR(2));
+
+ fimc_is_hw_set_intgr0_gd0(is);
+ return 0;
+}
+
+#define FIMC_IS_MAX_PARAMS 4
+
+int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num_args)
+{
+ int i;
+
+ if (num_args > FIMC_IS_MAX_PARAMS)
+ return -EINVAL;
+
+ is->i2h_cmd.num_args = num_args;
+
+ for (i = 0; i < FIMC_IS_MAX_PARAMS; i++) {
+ if (i < num_args)
+ is->i2h_cmd.args[i] = mcuctl_read(is,
+ MCUCTL_REG_ISSR(12 + i));
+ else
+ is->i2h_cmd.args[i] = 0;
+ }
+ return 0;
+}
+
+void fimc_is_hw_set_isp_buf_mask(struct fimc_is *is, unsigned int mask)
+{
+ if (hweight32(mask) == 1) {
+ dev_err(&is->pdev->dev, "%s(): not enough buffers (mask %#x)\n",
+ __func__, mask);
+ return;
+ }
+
+ if (mcuctl_read(is, MCUCTL_REG_ISSR(23)) != 0)
+ dev_dbg(&is->pdev->dev, "non-zero DMA buffer mask\n");
+
+ mcuctl_write(mask, is, MCUCTL_REG_ISSR(23));
+}
+
+void fimc_is_hw_set_sensor_num(struct fimc_is *is)
+{
+ pr_debug("setting sensor index to: %d\n", is->sensor_index);
+
+ mcuctl_write(IH_REPLY_DONE, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(IHC_GET_SENSOR_NUM, is, MCUCTL_REG_ISSR(2));
+ mcuctl_write(FIMC_IS_SENSORS_NUM, is, MCUCTL_REG_ISSR(3));
+}
+
+void fimc_is_hw_close_sensor(struct fimc_is *is, unsigned int index)
+{
+ if (is->sensor_index != index)
+ return;
+
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_CLOSE_SENSOR, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(2));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+void fimc_is_hw_get_setfile_addr(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_GET_SET_FILE_ADDR, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+void fimc_is_hw_load_setfile(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_LOAD_SET_FILE, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+int fimc_is_hw_change_mode(struct fimc_is *is)
+{
+ const u8 cmd[] = {
+ HIC_PREVIEW_STILL, HIC_PREVIEW_VIDEO,
+ HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
+ };
+
+ if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
+ return -EINVAL;
+
+ mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(is->setfile.sub_index, is, MCUCTL_REG_ISSR(2));
+ fimc_is_hw_set_intgr0_gd0(is);
+ return 0;
+}
+
+void fimc_is_hw_stream_on(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_STREAM_ON, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(0, is, MCUCTL_REG_ISSR(2));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+void fimc_is_hw_stream_off(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_STREAM_OFF, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+void fimc_is_hw_subip_power_off(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_POWER_DOWN, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+int fimc_is_itf_s_param(struct fimc_is *is, bool update)
+{
+ int ret;
+
+ if (update)
+ __is_hw_update_params(is);
+
+ fimc_is_mem_barrier();
+
+ clear_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state);
+ fimc_is_hw_set_param(is);
+ ret = fimc_is_wait_event(is, IS_ST_BLOCK_CMD_CLEARED, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0)
+ dev_err(&is->pdev->dev, "%s() timeout\n", __func__);
+
+ return ret;
+}
+
+int fimc_is_itf_mode_change(struct fimc_is *is)
+{
+ int ret;
+
+ clear_bit(IS_ST_CHANGE_MODE, &is->state);
+ fimc_is_hw_change_mode(is);
+ ret = fimc_is_wait_event(is, IS_ST_CHANGE_MODE, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0)
+ dev_err(&is->pdev->dev, "%s(): mode change (%d) timeout\n",
+ __func__, is->config_index);
+ return ret;
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.h b/drivers/media/platform/exynos4-is/fimc-is-regs.h
new file mode 100644
index 00000000000..141e5ddadbe
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.h
@@ -0,0 +1,164 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_IS_REG_H_
+#define FIMC_IS_REG_H_
+
+/* WDT_ISP register */
+#define REG_WDT_ISP 0x00170000
+
+/* MCUCTL registers base offset */
+#define MCUCTL_BASE 0x00180000
+
+/* MCU Controller Register */
+#define MCUCTL_REG_MCUCTRL (MCUCTL_BASE + 0x00)
+#define MCUCTRL_MSWRST (1 << 0)
+
+/* Boot Base Offset Address Register */
+#define MCUCTL_REG_BBOAR (MCUCTL_BASE + 0x04)
+
+/* Interrupt Generation Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTGR0 (MCUCTL_BASE + 0x08)
+/* __n = 0...9 */
+#define INTGR0_INTGC(__n) (1 << ((__n) + 16))
+/* __n = 0...5 */
+#define INTGR0_INTGD(__n) (1 << (__n))
+
+/* Interrupt Clear Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTCR0 (MCUCTL_BASE + 0x0c)
+/* __n = 0...9 */
+#define INTCR0_INTGC(__n) (1 << ((__n) + 16))
+/* __n = 0...5 */
+#define INTCR0_INTCD(__n) (1 << ((__n) + 16))
+
+/* Interrupt Mask Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTMR0 (MCUCTL_BASE + 0x10)
+/* __n = 0...9 */
+#define INTMR0_INTMC(__n) (1 << ((__n) + 16))
+/* __n = 0...5 */
+#define INTMR0_INTMD(__n) (1 << (__n))
+
+/* Interrupt Status Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTSR0 (MCUCTL_BASE + 0x14)
+/* __n (bit number) = 0...4 */
+#define INTSR0_GET_INTSD(x, __n) (((x) >> (__n)) & 0x1)
+/* __n (bit number) = 0...9 */
+#define INTSR0_GET_INTSC(x, __n) (((x) >> ((__n) + 16)) & 0x1)
+
+/* Interrupt Mask Status Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTMSR0 (MCUCTL_BASE + 0x18)
+/* __n (bit number) = 0...4 */
+#define INTMSR0_GET_INTMSD(x, __n) (((x) >> (__n)) & 0x1)
+/* __n (bit number) = 0...9 */
+#define INTMSR0_GET_INTMSC(x, __n) (((x) >> ((__n) + 16)) & 0x1)
+
+/* Interrupt Generation Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTGR1 (MCUCTL_BASE + 0x1c)
+/* __n = 0...9 */
+#define INTGR1_INTGC(__n) (1 << (__n))
+
+/* Interrupt Clear Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTCR1 (MCUCTL_BASE + 0x20)
+/* __n = 0...9 */
+#define INTCR1_INTCC(__n) (1 << (__n))
+
+/* Interrupt Mask Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTMR1 (MCUCTL_BASE + 0x24)
+/* __n = 0...9 */
+#define INTMR1_INTMC(__n) (1 << (__n))
+
+/* Interrupt Status Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTSR1 (MCUCTL_BASE + 0x28)
+/* Interrupt Mask Status Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTMSR1 (MCUCTL_BASE + 0x2c)
+
+/* Interrupt Clear Register 2 from ISP BLK's interrupts to Host IC */
+#define MCUCTL_REG_INTCR2 (MCUCTL_BASE + 0x30)
+/* __n = 0...5 */
+#define INTCR2_INTCC(__n) (1 << ((__n) + 16))
+
+/* Interrupt Mask Register 2 from ISP BLK's interrupts to Host IC */
+#define MCUCTL_REG_INTMR2 (MCUCTL_BASE + 0x34)
+/* __n = 0...25 */
+#define INTMR2_INTMCIS(__n) (1 << (__n))
+
+/* Interrupt Status Register 2 from ISP BLK's interrupts to Host IC */
+#define MCUCTL_REG_INTSR2 (MCUCTL_BASE + 0x38)
+/* Interrupt Mask Status Register 2 from ISP BLK's interrupts to Host IC */
+#define MCUCTL_REG_INTMSR2 (MCUCTL_BASE + 0x3c)
+
+/* General Purpose Output Control Register (0~17) */
+#define MCUCTL_REG_GPOCTLR (MCUCTL_BASE + 0x40)
+/* __n = 0...17 */
+#define GPOCTLR_GPOG(__n) (1 << (__n))
+
+/* General Purpose Pad Output Enable Register (0~17) */
+#define MCUCTL_REG_GPOENCTLR (MCUCTL_BASE + 0x44)
+/* __n = 0...17 */
+#define GPOENCTLR_GPOEN(__n) (1 << (__n))
+
+/* General Purpose Input Control Register (0~17) */
+#define MCUCTL_REG_GPICTLR (MCUCTL_BASE + 0x48)
+
+/* Shared registers between ISP CPU and the host CPU - ISSRxx */
+
+/* ISSR(1): Command Host -> IS */
+/* ISSR(1): Sensor ID for Command, ISSR2...5 = Parameter 1...4 */
+
+/* ISSR(10): Reply IS -> Host */
+/* ISSR(11): Sensor ID for Reply, ISSR12...15 = Parameter 1...4 */
+
+/* ISSR(20): ISP_FRAME_DONE : SENSOR ID */
+/* ISSR(21): ISP_FRAME_DONE : PARAMETER 1 */
+
+/* ISSR(24): SCALERC_FRAME_DONE : SENSOR ID */
+/* ISSR(25): SCALERC_FRAME_DONE : PARAMETER 1 */
+
+/* ISSR(28): 3DNR_FRAME_DONE : SENSOR ID */
+/* ISSR(29): 3DNR_FRAME_DONE : PARAMETER 1 */
+
+/* ISSR(32): SCALERP_FRAME_DONE : SENSOR ID */
+/* ISSR(33): SCALERP_FRAME_DONE : PARAMETER 1 */
+
+/* __n = 0...63 */
+#define MCUCTL_REG_ISSR(__n) (MCUCTL_BASE + 0x80 + ((__n) * 4))
+
+/* PMU ISP register offsets */
+#define REG_CMU_RESET_ISP_SYS_PWR_REG 0x1174
+#define REG_CMU_SYSCLK_ISP_SYS_PWR_REG 0x13b8
+#define REG_PMU_ISP_ARM_SYS 0x1050
+#define REG_PMU_ISP_ARM_CONFIGURATION 0x2280
+#define REG_PMU_ISP_ARM_STATUS 0x2284
+#define REG_PMU_ISP_ARM_OPTION 0x2288
+
+void fimc_is_fw_clear_irq1(struct fimc_is *is, unsigned int bit);
+void fimc_is_fw_clear_irq2(struct fimc_is *is);
+int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num);
+
+void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is);
+int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is);
+void fimc_is_hw_set_sensor_num(struct fimc_is *is);
+void fimc_is_hw_set_isp_buf_mask(struct fimc_is *is, unsigned int mask);
+void fimc_is_hw_stream_on(struct fimc_is *is);
+void fimc_is_hw_stream_off(struct fimc_is *is);
+int fimc_is_hw_set_param(struct fimc_is *is);
+int fimc_is_hw_change_mode(struct fimc_is *is);
+
+void fimc_is_hw_close_sensor(struct fimc_is *is, unsigned int index);
+void fimc_is_hw_get_setfile_addr(struct fimc_is *is);
+void fimc_is_hw_load_setfile(struct fimc_is *is);
+void fimc_is_hw_subip_power_off(struct fimc_is *is);
+
+int fimc_is_itf_s_param(struct fimc_is *is, bool update);
+int fimc_is_itf_mode_change(struct fimc_is *is);
+
+#endif /* FIMC_IS_REG_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-sensor.c b/drivers/media/platform/exynos4-is/fimc-is-sensor.c
new file mode 100644
index 00000000000..10e82e21b5d
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-sensor.c
@@ -0,0 +1,34 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "fimc-is-sensor.h"
+
+static const struct sensor_drv_data s5k6a3_drvdata = {
+ .id = FIMC_IS_SENSOR_ID_S5K6A3,
+ .open_timeout = S5K6A3_OPEN_TIMEOUT,
+};
+
+static const struct of_device_id fimc_is_sensor_of_ids[] = {
+ {
+ .compatible = "samsung,s5k6a3",
+ .data = &s5k6a3_drvdata,
+ },
+ { }
+};
+
+const struct sensor_drv_data *fimc_is_sensor_get_drvdata(
+ struct device_node *node)
+{
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(fimc_is_sensor_of_ids, node);
+ return of_id ? of_id->data : NULL;
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-sensor.h b/drivers/media/platform/exynos4-is/fimc-is-sensor.h
new file mode 100644
index 00000000000..173ccffa4bc
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-sensor.h
@@ -0,0 +1,56 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_IS_SENSOR_H_
+#define FIMC_IS_SENSOR_H_
+
+#include <linux/of.h>
+#include <linux/types.h>
+
+#define S5K6A3_OPEN_TIMEOUT 2000 /* ms */
+#define S5K6A3_SENSOR_WIDTH 1392
+#define S5K6A3_SENSOR_HEIGHT 1392
+
+enum fimc_is_sensor_id {
+ FIMC_IS_SENSOR_ID_S5K3H2 = 1,
+ FIMC_IS_SENSOR_ID_S5K6A3,
+ FIMC_IS_SENSOR_ID_S5K4E5,
+ FIMC_IS_SENSOR_ID_S5K3H7,
+ FIMC_IS_SENSOR_ID_CUSTOM,
+ FIMC_IS_SENSOR_ID_END
+};
+
+#define IS_SENSOR_CTRL_BUS_I2C0 0
+#define IS_SENSOR_CTRL_BUS_I2C1 1
+
+struct sensor_drv_data {
+ enum fimc_is_sensor_id id;
+ /* sensor open timeout in ms */
+ unsigned short open_timeout;
+};
+
+/**
+ * struct fimc_is_sensor - fimc-is sensor data structure
+ * @drvdata: a pointer to the sensor's parameters data structure
+ * @i2c_bus: ISP I2C bus index (0...1)
+ * @test_pattern: true to enable video test pattern
+ */
+struct fimc_is_sensor {
+ const struct sensor_drv_data *drvdata;
+ unsigned int i2c_bus;
+ u8 test_pattern;
+};
+
+const struct sensor_drv_data *fimc_is_sensor_get_drvdata(
+ struct device_node *node);
+
+#endif /* FIMC_IS_SENSOR_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
new file mode 100644
index 00000000000..5476dce3ad2
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -0,0 +1,998 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-contiguous.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "media-dev.h"
+#include "fimc-is.h"
+#include "fimc-is-command.h"
+#include "fimc-is-errno.h"
+#include "fimc-is-i2c.h"
+#include "fimc-is-param.h"
+#include "fimc-is-regs.h"
+
+
+static char *fimc_is_clocks[ISS_CLKS_MAX] = {
+ [ISS_CLK_PPMUISPX] = "ppmuispx",
+ [ISS_CLK_PPMUISPMX] = "ppmuispmx",
+ [ISS_CLK_LITE0] = "lite0",
+ [ISS_CLK_LITE1] = "lite1",
+ [ISS_CLK_MPLL] = "mpll",
+ [ISS_CLK_ISP] = "isp",
+ [ISS_CLK_DRC] = "drc",
+ [ISS_CLK_FD] = "fd",
+ [ISS_CLK_MCUISP] = "mcuisp",
+ [ISS_CLK_UART] = "uart",
+ [ISS_CLK_ISP_DIV0] = "ispdiv0",
+ [ISS_CLK_ISP_DIV1] = "ispdiv1",
+ [ISS_CLK_MCUISP_DIV0] = "mcuispdiv0",
+ [ISS_CLK_MCUISP_DIV1] = "mcuispdiv1",
+ [ISS_CLK_ACLK200] = "aclk200",
+ [ISS_CLK_ACLK200_DIV] = "div_aclk200",
+ [ISS_CLK_ACLK400MCUISP] = "aclk400mcuisp",
+ [ISS_CLK_ACLK400MCUISP_DIV] = "div_aclk400mcuisp",
+};
+
+static void fimc_is_put_clocks(struct fimc_is *is)
+{
+ int i;
+
+ for (i = 0; i < ISS_CLKS_MAX; i++) {
+ if (IS_ERR(is->clocks[i]))
+ continue;
+ clk_put(is->clocks[i]);
+ is->clocks[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int fimc_is_get_clocks(struct fimc_is *is)
+{
+ int i, ret;
+
+ for (i = 0; i < ISS_CLKS_MAX; i++)
+ is->clocks[i] = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ISS_CLKS_MAX; i++) {
+ is->clocks[i] = clk_get(&is->pdev->dev, fimc_is_clocks[i]);
+ if (IS_ERR(is->clocks[i])) {
+ ret = PTR_ERR(is->clocks[i]);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ fimc_is_put_clocks(is);
+ dev_err(&is->pdev->dev, "failed to get clock: %s\n",
+ fimc_is_clocks[i]);
+ return ret;
+}
+
+static int fimc_is_setup_clocks(struct fimc_is *is)
+{
+ int ret;
+
+ ret = clk_set_parent(is->clocks[ISS_CLK_ACLK200],
+ is->clocks[ISS_CLK_ACLK200_DIV]);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_set_parent(is->clocks[ISS_CLK_ACLK400MCUISP],
+ is->clocks[ISS_CLK_ACLK400MCUISP_DIV]);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_set_rate(is->clocks[ISS_CLK_ISP_DIV0], ACLK_AXI_FREQUENCY);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_set_rate(is->clocks[ISS_CLK_ISP_DIV1], ACLK_AXI_FREQUENCY);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_set_rate(is->clocks[ISS_CLK_MCUISP_DIV0],
+ ATCLK_MCUISP_FREQUENCY);
+ if (ret < 0)
+ return ret;
+
+ return clk_set_rate(is->clocks[ISS_CLK_MCUISP_DIV1],
+ ATCLK_MCUISP_FREQUENCY);
+}
+
+static int fimc_is_enable_clocks(struct fimc_is *is)
+{
+ int i, ret;
+
+ for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
+ if (IS_ERR(is->clocks[i]))
+ continue;
+ ret = clk_prepare_enable(is->clocks[i]);
+ if (ret < 0) {
+ dev_err(&is->pdev->dev, "clock %s enable failed\n",
+ fimc_is_clocks[i]);
+ for (--i; i >= 0; i--)
+ clk_disable(is->clocks[i]);
+ return ret;
+ }
+ pr_debug("enabled clock: %s\n", fimc_is_clocks[i]);
+ }
+ return 0;
+}
+
+static void fimc_is_disable_clocks(struct fimc_is *is)
+{
+ int i;
+
+ for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
+ if (!IS_ERR(is->clocks[i])) {
+ clk_disable_unprepare(is->clocks[i]);
+ pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
+ }
+ }
+}
+
+static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index,
+ struct device_node *node)
+{
+ struct fimc_is_sensor *sensor = &is->sensor[index];
+ u32 tmp = 0;
+ int ret;
+
+ sensor->drvdata = fimc_is_sensor_get_drvdata(node);
+ if (!sensor->drvdata) {
+ dev_err(&is->pdev->dev, "no driver data found for: %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ node = of_graph_get_next_endpoint(node, NULL);
+ if (!node)
+ return -ENXIO;
+
+ node = of_graph_get_remote_port(node);
+ if (!node)
+ return -ENXIO;
+
+ /* Use MIPI-CSIS channel id to determine the ISP I2C bus index. */
+ ret = of_property_read_u32(node, "reg", &tmp);
+ if (ret < 0) {
+ dev_err(&is->pdev->dev, "reg property not found at: %s\n",
+ node->full_name);
+ return ret;
+ }
+
+ sensor->i2c_bus = tmp - FIMC_INPUT_MIPI_CSI2_0;
+ return 0;
+}
+
+static int fimc_is_register_subdevs(struct fimc_is *is)
+{
+ struct device_node *i2c_bus, *child;
+ int ret, index = 0;
+
+ ret = fimc_isp_subdev_create(&is->isp);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize memory allocator context for the ISP DMA. */
+ is->isp.alloc_ctx = is->alloc_ctx;
+
+ for_each_compatible_node(i2c_bus, NULL, FIMC_IS_I2C_COMPATIBLE) {
+ for_each_available_child_of_node(i2c_bus, child) {
+ ret = fimc_is_parse_sensor_config(is, index, child);
+
+ if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) {
+ of_node_put(child);
+ return ret;
+ }
+ index++;
+ }
+ }
+ return 0;
+}
+
+static int fimc_is_unregister_subdevs(struct fimc_is *is)
+{
+ fimc_isp_subdev_destroy(&is->isp);
+ return 0;
+}
+
+static int fimc_is_load_setfile(struct fimc_is *is, char *file_name)
+{
+ const struct firmware *fw;
+ void *buf;
+ int ret;
+
+ ret = request_firmware(&fw, file_name, &is->pdev->dev);
+ if (ret < 0) {
+ dev_err(&is->pdev->dev, "firmware request failed (%d)\n", ret);
+ return ret;
+ }
+ buf = is->memory.vaddr + is->setfile.base;
+ memcpy(buf, fw->data, fw->size);
+ fimc_is_mem_barrier();
+ is->setfile.size = fw->size;
+
+ pr_debug("mem vaddr: %p, setfile buf: %p\n", is->memory.vaddr, buf);
+
+ memcpy(is->fw.setfile_info,
+ fw->data + fw->size - FIMC_IS_SETFILE_INFO_LEN,
+ FIMC_IS_SETFILE_INFO_LEN - 1);
+
+ is->fw.setfile_info[FIMC_IS_SETFILE_INFO_LEN - 1] = '\0';
+ is->setfile.state = 1;
+
+ pr_debug("FIMC-IS setfile loaded: base: %#x, size: %zu B\n",
+ is->setfile.base, fw->size);
+
+ release_firmware(fw);
+ return ret;
+}
+
+int fimc_is_cpu_set_power(struct fimc_is *is, int on)
+{
+ unsigned int timeout = FIMC_IS_POWER_ON_TIMEOUT;
+
+ if (on) {
+ /* Disable watchdog */
+ mcuctl_write(0, is, REG_WDT_ISP);
+
+ /* Cortex-A5 start address setting */
+ mcuctl_write(is->memory.paddr, is, MCUCTL_REG_BBOAR);
+
+ /* Enable and start Cortex-A5 */
+ pmuisp_write(0x18000, is, REG_PMU_ISP_ARM_OPTION);
+ pmuisp_write(0x1, is, REG_PMU_ISP_ARM_CONFIGURATION);
+ } else {
+ /* A5 power off */
+ pmuisp_write(0x10000, is, REG_PMU_ISP_ARM_OPTION);
+ pmuisp_write(0x0, is, REG_PMU_ISP_ARM_CONFIGURATION);
+
+ while (pmuisp_read(is, REG_PMU_ISP_ARM_STATUS) & 1) {
+ if (timeout == 0)
+ return -ETIME;
+ timeout--;
+ udelay(1);
+ }
+ }
+
+ return 0;
+}
+
+/* Wait until @bit of @is->state is set to @state in the interrupt handler. */
+int fimc_is_wait_event(struct fimc_is *is, unsigned long bit,
+ unsigned int state, unsigned int timeout)
+{
+
+ int ret = wait_event_timeout(is->irq_queue,
+ !state ^ test_bit(bit, &is->state),
+ timeout);
+ if (ret == 0) {
+ dev_WARN(&is->pdev->dev, "%s() timed out\n", __func__);
+ return -ETIME;
+ }
+ return 0;
+}
+
+int fimc_is_start_firmware(struct fimc_is *is)
+{
+ struct device *dev = &is->pdev->dev;
+ int ret;
+
+ if (is->fw.f_w == NULL) {
+ dev_err(dev, "firmware is not loaded\n");
+ return -EINVAL;
+ }
+
+ memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
+ wmb();
+
+ ret = fimc_is_cpu_set_power(is, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = fimc_is_wait_event(is, IS_ST_A5_PWR_ON, 1,
+ msecs_to_jiffies(FIMC_IS_FW_LOAD_TIMEOUT));
+ if (ret < 0)
+ dev_err(dev, "FIMC-IS CPU power on failed\n");
+
+ return ret;
+}
+
+/* Allocate working memory for the FIMC-IS CPU. */
+static int fimc_is_alloc_cpu_memory(struct fimc_is *is)
+{
+ struct device *dev = &is->pdev->dev;
+
+ is->memory.vaddr = dma_alloc_coherent(dev, FIMC_IS_CPU_MEM_SIZE,
+ &is->memory.paddr, GFP_KERNEL);
+ if (is->memory.vaddr == NULL)
+ return -ENOMEM;
+
+ is->memory.size = FIMC_IS_CPU_MEM_SIZE;
+ memset(is->memory.vaddr, 0, is->memory.size);
+
+ dev_info(dev, "FIMC-IS CPU memory base: %#x\n", (u32)is->memory.paddr);
+
+ if (((u32)is->memory.paddr) & FIMC_IS_FW_ADDR_MASK) {
+ dev_err(dev, "invalid firmware memory alignment: %#x\n",
+ (u32)is->memory.paddr);
+ dma_free_coherent(dev, is->memory.size, is->memory.vaddr,
+ is->memory.paddr);
+ return -EIO;
+ }
+
+ is->is_p_region = (struct is_region *)(is->memory.vaddr +
+ FIMC_IS_CPU_MEM_SIZE - FIMC_IS_REGION_SIZE);
+
+ is->is_dma_p_region = is->memory.paddr +
+ FIMC_IS_CPU_MEM_SIZE - FIMC_IS_REGION_SIZE;
+
+ is->is_shared_region = (struct is_share_region *)(is->memory.vaddr +
+ FIMC_IS_SHARED_REGION_OFFSET);
+ return 0;
+}
+
+static void fimc_is_free_cpu_memory(struct fimc_is *is)
+{
+ struct device *dev = &is->pdev->dev;
+
+ if (is->memory.vaddr == NULL)
+ return;
+
+ dma_free_coherent(dev, is->memory.size, is->memory.vaddr,
+ is->memory.paddr);
+}
+
+static void fimc_is_load_firmware(const struct firmware *fw, void *context)
+{
+ struct fimc_is *is = context;
+ struct device *dev = &is->pdev->dev;
+ void *buf;
+ int ret;
+
+ if (fw == NULL) {
+ dev_err(dev, "firmware request failed\n");
+ return;
+ }
+ mutex_lock(&is->lock);
+
+ if (fw->size < FIMC_IS_FW_SIZE_MIN || fw->size > FIMC_IS_FW_SIZE_MAX) {
+ dev_err(dev, "wrong firmware size: %d\n", fw->size);
+ goto done;
+ }
+
+ is->fw.size = fw->size;
+
+ ret = fimc_is_alloc_cpu_memory(is);
+ if (ret < 0) {
+ dev_err(dev, "failed to allocate FIMC-IS CPU memory\n");
+ goto done;
+ }
+
+ memcpy(is->memory.vaddr, fw->data, fw->size);
+ wmb();
+
+ /* Read firmware description. */
+ buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_DESC_LEN);
+ memcpy(&is->fw.info, buf, FIMC_IS_FW_INFO_LEN);
+ is->fw.info[FIMC_IS_FW_INFO_LEN] = 0;
+
+ buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_VER_LEN);
+ memcpy(&is->fw.version, buf, FIMC_IS_FW_VER_LEN);
+ is->fw.version[FIMC_IS_FW_VER_LEN - 1] = 0;
+
+ is->fw.state = 1;
+
+ dev_info(dev, "loaded firmware: %s, rev. %s\n",
+ is->fw.info, is->fw.version);
+ dev_dbg(dev, "FW size: %d, paddr: %#x\n", fw->size, is->memory.paddr);
+
+ is->is_shared_region->chip_id = 0xe4412;
+ is->is_shared_region->chip_rev_no = 1;
+
+ fimc_is_mem_barrier();
+
+ /*
+ * FIXME: The firmware is not being released for now, as it is
+ * needed around for copying to the IS working memory every
+ * time before the Cortex-A5 is restarted.
+ */
+ if (is->fw.f_w)
+ release_firmware(is->fw.f_w);
+ is->fw.f_w = fw;
+done:
+ mutex_unlock(&is->lock);
+}
+
+static int fimc_is_request_firmware(struct fimc_is *is, const char *fw_name)
+{
+ return request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_HOTPLUG, fw_name, &is->pdev->dev,
+ GFP_KERNEL, is, fimc_is_load_firmware);
+}
+
+/* General IS interrupt handler */
+static void fimc_is_general_irq_handler(struct fimc_is *is)
+{
+ is->i2h_cmd.cmd = mcuctl_read(is, MCUCTL_REG_ISSR(10));
+
+ switch (is->i2h_cmd.cmd) {
+ case IHC_GET_SENSOR_NUM:
+ fimc_is_hw_get_params(is, 1);
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ fimc_is_hw_set_sensor_num(is);
+ pr_debug("ISP FW version: %#x\n", is->i2h_cmd.args[0]);
+ break;
+ case IHC_SET_FACE_MARK:
+ case IHC_FRAME_DONE:
+ fimc_is_hw_get_params(is, 2);
+ break;
+ case IHC_SET_SHOT_MARK:
+ case IHC_AA_DONE:
+ case IH_REPLY_DONE:
+ fimc_is_hw_get_params(is, 3);
+ break;
+ case IH_REPLY_NOT_DONE:
+ fimc_is_hw_get_params(is, 4);
+ break;
+ case IHC_NOT_READY:
+ break;
+ default:
+ pr_info("unknown command: %#x\n", is->i2h_cmd.cmd);
+ }
+
+ fimc_is_fw_clear_irq1(is, FIMC_IS_INT_GENERAL);
+
+ switch (is->i2h_cmd.cmd) {
+ case IHC_GET_SENSOR_NUM:
+ fimc_is_hw_set_intgr0_gd0(is);
+ set_bit(IS_ST_A5_PWR_ON, &is->state);
+ break;
+
+ case IHC_SET_SHOT_MARK:
+ break;
+
+ case IHC_SET_FACE_MARK:
+ is->fd_header.count = is->i2h_cmd.args[0];
+ is->fd_header.index = is->i2h_cmd.args[1];
+ is->fd_header.offset = 0;
+ break;
+
+ case IHC_FRAME_DONE:
+ break;
+
+ case IHC_AA_DONE:
+ pr_debug("AA_DONE - %d, %d, %d\n", is->i2h_cmd.args[0],
+ is->i2h_cmd.args[1], is->i2h_cmd.args[2]);
+ break;
+
+ case IH_REPLY_DONE:
+ pr_debug("ISR_DONE: args[0]: %#x\n", is->i2h_cmd.args[0]);
+
+ switch (is->i2h_cmd.args[0]) {
+ case HIC_PREVIEW_STILL...HIC_CAPTURE_VIDEO:
+ /* Get CAC margin */
+ set_bit(IS_ST_CHANGE_MODE, &is->state);
+ is->isp.cac_margin_x = is->i2h_cmd.args[1];
+ is->isp.cac_margin_y = is->i2h_cmd.args[2];
+ pr_debug("CAC margin (x,y): (%d,%d)\n",
+ is->isp.cac_margin_x, is->isp.cac_margin_y);
+ break;
+
+ case HIC_STREAM_ON:
+ clear_bit(IS_ST_STREAM_OFF, &is->state);
+ set_bit(IS_ST_STREAM_ON, &is->state);
+ break;
+
+ case HIC_STREAM_OFF:
+ clear_bit(IS_ST_STREAM_ON, &is->state);
+ set_bit(IS_ST_STREAM_OFF, &is->state);
+ break;
+
+ case HIC_SET_PARAMETER:
+ is->config[is->config_index].p_region_index[0] = 0;
+ is->config[is->config_index].p_region_index[1] = 0;
+ set_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state);
+ pr_debug("HIC_SET_PARAMETER\n");
+ break;
+
+ case HIC_GET_PARAMETER:
+ break;
+
+ case HIC_SET_TUNE:
+ break;
+
+ case HIC_GET_STATUS:
+ break;
+
+ case HIC_OPEN_SENSOR:
+ set_bit(IS_ST_OPEN_SENSOR, &is->state);
+ pr_debug("data lanes: %d, settle line: %d\n",
+ is->i2h_cmd.args[2], is->i2h_cmd.args[1]);
+ break;
+
+ case HIC_CLOSE_SENSOR:
+ clear_bit(IS_ST_OPEN_SENSOR, &is->state);
+ is->sensor_index = 0;
+ break;
+
+ case HIC_MSG_TEST:
+ pr_debug("config MSG level completed\n");
+ break;
+
+ case HIC_POWER_DOWN:
+ clear_bit(IS_ST_PWR_SUBIP_ON, &is->state);
+ break;
+
+ case HIC_GET_SET_FILE_ADDR:
+ is->setfile.base = is->i2h_cmd.args[1];
+ set_bit(IS_ST_SETFILE_LOADED, &is->state);
+ break;
+
+ case HIC_LOAD_SET_FILE:
+ set_bit(IS_ST_SETFILE_LOADED, &is->state);
+ break;
+ }
+ break;
+
+ case IH_REPLY_NOT_DONE:
+ pr_err("ISR_NDONE: %d: %#x, %s\n", is->i2h_cmd.args[0],
+ is->i2h_cmd.args[1],
+ fimc_is_strerr(is->i2h_cmd.args[1]));
+
+ if (is->i2h_cmd.args[1] & IS_ERROR_TIME_OUT_FLAG)
+ pr_err("IS_ERROR_TIME_OUT\n");
+
+ switch (is->i2h_cmd.args[1]) {
+ case IS_ERROR_SET_PARAMETER:
+ fimc_is_mem_barrier();
+ }
+
+ switch (is->i2h_cmd.args[0]) {
+ case HIC_SET_PARAMETER:
+ is->config[is->config_index].p_region_index[0] = 0;
+ is->config[is->config_index].p_region_index[1] = 0;
+ set_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state);
+ break;
+ }
+ break;
+
+ case IHC_NOT_READY:
+ pr_err("IS control sequence error: Not Ready\n");
+ break;
+ }
+
+ wake_up(&is->irq_queue);
+}
+
+static irqreturn_t fimc_is_irq_handler(int irq, void *priv)
+{
+ struct fimc_is *is = priv;
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&is->slock, flags);
+ status = mcuctl_read(is, MCUCTL_REG_INTSR1);
+
+ if (status & (1UL << FIMC_IS_INT_GENERAL))
+ fimc_is_general_irq_handler(is);
+
+ if (status & (1UL << FIMC_IS_INT_FRAME_DONE_ISP))
+ fimc_isp_irq_handler(is);
+
+ spin_unlock_irqrestore(&is->slock, flags);
+ return IRQ_HANDLED;
+}
+
+static int fimc_is_hw_open_sensor(struct fimc_is *is,
+ struct fimc_is_sensor *sensor)
+{
+ struct sensor_open_extended *soe = (void *)&is->is_p_region->shared;
+
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+
+ soe->self_calibration_mode = 1;
+ soe->actuator_type = 0;
+ soe->mipi_lane_num = 0;
+ soe->mclk = 0;
+ soe->mipi_speed = 0;
+ soe->fast_open_sensor = 0;
+ soe->i2c_sclk = 88000000;
+
+ fimc_is_mem_barrier();
+
+ mcuctl_write(HIC_OPEN_SENSOR, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(sensor->drvdata->id, is, MCUCTL_REG_ISSR(2));
+ mcuctl_write(sensor->i2c_bus, is, MCUCTL_REG_ISSR(3));
+ mcuctl_write(is->is_dma_p_region, is, MCUCTL_REG_ISSR(4));
+
+ fimc_is_hw_set_intgr0_gd0(is);
+
+ return fimc_is_wait_event(is, IS_ST_OPEN_SENSOR, 1,
+ sensor->drvdata->open_timeout);
+}
+
+
+int fimc_is_hw_initialize(struct fimc_is *is)
+{
+ const int config_ids[] = {
+ IS_SC_PREVIEW_STILL, IS_SC_PREVIEW_VIDEO,
+ IS_SC_CAPTURE_STILL, IS_SC_CAPTURE_VIDEO
+ };
+ struct device *dev = &is->pdev->dev;
+ u32 prev_id;
+ int i, ret;
+
+ /* Sensor initialization. Only one sensor is currently supported. */
+ ret = fimc_is_hw_open_sensor(is, &is->sensor[0]);
+ if (ret < 0)
+ return ret;
+
+ /* Get the setfile address. */
+ fimc_is_hw_get_setfile_addr(is);
+
+ ret = fimc_is_wait_event(is, IS_ST_SETFILE_LOADED, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev, "get setfile address timed out\n");
+ return ret;
+ }
+ pr_debug("setfile.base: %#x\n", is->setfile.base);
+
+ /* Load the setfile. */
+ fimc_is_load_setfile(is, FIMC_IS_SETFILE_6A3);
+ clear_bit(IS_ST_SETFILE_LOADED, &is->state);
+ fimc_is_hw_load_setfile(is);
+ ret = fimc_is_wait_event(is, IS_ST_SETFILE_LOADED, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev, "loading setfile timed out\n");
+ return ret;
+ }
+
+ pr_debug("setfile: base: %#x, size: %d\n",
+ is->setfile.base, is->setfile.size);
+ pr_info("FIMC-IS Setfile info: %s\n", is->fw.setfile_info);
+
+ /* Check magic number. */
+ if (is->is_p_region->shared[MAX_SHARED_COUNT - 1] !=
+ FIMC_IS_MAGIC_NUMBER) {
+ dev_err(dev, "magic number error!\n");
+ return -EIO;
+ }
+
+ pr_debug("shared region: %#x, parameter region: %#x\n",
+ is->memory.paddr + FIMC_IS_SHARED_REGION_OFFSET,
+ is->is_dma_p_region);
+
+ is->setfile.sub_index = 0;
+
+ /* Stream off. */
+ fimc_is_hw_stream_off(is);
+ ret = fimc_is_wait_event(is, IS_ST_STREAM_OFF, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev, "stream off timeout\n");
+ return ret;
+ }
+
+ /* Preserve previous mode. */
+ prev_id = is->config_index;
+
+ /* Set initial parameter values. */
+ for (i = 0; i < ARRAY_SIZE(config_ids); i++) {
+ is->config_index = config_ids[i];
+ fimc_is_set_initial_params(is);
+ ret = fimc_is_itf_s_param(is, true);
+ if (ret < 0) {
+ is->config_index = prev_id;
+ return ret;
+ }
+ }
+ is->config_index = prev_id;
+
+ set_bit(IS_ST_INIT_DONE, &is->state);
+ dev_info(dev, "initialization sequence completed (%d)\n",
+ is->config_index);
+ return 0;
+}
+
+static int fimc_is_log_show(struct seq_file *s, void *data)
+{
+ struct fimc_is *is = s->private;
+ const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET;
+
+ if (is->memory.vaddr == NULL) {
+ dev_err(&is->pdev->dev, "firmware memory is not initialized\n");
+ return -EIO;
+ }
+
+ seq_printf(s, "%s\n", buf);
+ return 0;
+}
+
+static int fimc_is_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fimc_is_log_show, inode->i_private);
+}
+
+static const struct file_operations fimc_is_debugfs_fops = {
+ .open = fimc_is_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void fimc_is_debugfs_remove(struct fimc_is *is)
+{
+ debugfs_remove_recursive(is->debugfs_entry);
+ is->debugfs_entry = NULL;
+}
+
+static int fimc_is_debugfs_create(struct fimc_is *is)
+{
+ struct dentry *dentry;
+
+ is->debugfs_entry = debugfs_create_dir("fimc_is", NULL);
+
+ dentry = debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry,
+ is, &fimc_is_debugfs_fops);
+ if (!dentry)
+ fimc_is_debugfs_remove(is);
+
+ return is->debugfs_entry == NULL ? -EIO : 0;
+}
+
+static int fimc_is_runtime_resume(struct device *dev);
+static int fimc_is_runtime_suspend(struct device *dev);
+
+static int fimc_is_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_is *is;
+ struct resource res;
+ struct device_node *node;
+ int ret;
+
+ is = devm_kzalloc(&pdev->dev, sizeof(*is), GFP_KERNEL);
+ if (!is)
+ return -ENOMEM;
+
+ is->pdev = pdev;
+ is->isp.pdev = pdev;
+
+ init_waitqueue_head(&is->irq_queue);
+ spin_lock_init(&is->slock);
+ mutex_init(&is->lock);
+
+ ret = of_address_to_resource(dev->of_node, 0, &res);
+ if (ret < 0)
+ return ret;
+
+ is->regs = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(is->regs))
+ return PTR_ERR(is->regs);
+
+ node = of_get_child_by_name(dev->of_node, "pmu");
+ if (!node)
+ return -ENODEV;
+
+ is->pmu_regs = of_iomap(node, 0);
+ if (!is->pmu_regs)
+ return -ENOMEM;
+
+ is->irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (is->irq < 0) {
+ dev_err(dev, "no irq found\n");
+ return is->irq;
+ }
+
+ ret = fimc_is_get_clocks(is);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, is);
+
+ ret = request_irq(is->irq, fimc_is_irq_handler, 0, dev_name(dev), is);
+ if (ret < 0) {
+ dev_err(dev, "irq request failed\n");
+ goto err_clk;
+ }
+ pm_runtime_enable(dev);
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = fimc_is_runtime_resume(dev);
+ if (ret < 0)
+ goto err_irq;
+ }
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm;
+
+ is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
+ if (IS_ERR(is->alloc_ctx)) {
+ ret = PTR_ERR(is->alloc_ctx);
+ goto err_pm;
+ }
+ /*
+ * Register FIMC-IS V4L2 subdevs to this driver. The video nodes
+ * will be created within the subdev's registered() callback.
+ */
+ ret = fimc_is_register_subdevs(is);
+ if (ret < 0)
+ goto err_vb;
+
+ ret = fimc_is_debugfs_create(is);
+ if (ret < 0)
+ goto err_sd;
+
+ ret = fimc_is_request_firmware(is, FIMC_IS_FW_FILENAME);
+ if (ret < 0)
+ goto err_dfs;
+
+ pm_runtime_put_sync(dev);
+
+ dev_dbg(dev, "FIMC-IS registered successfully\n");
+ return 0;
+
+err_dfs:
+ fimc_is_debugfs_remove(is);
+err_sd:
+ fimc_is_unregister_subdevs(is);
+err_vb:
+ vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
+err_pm:
+ if (!pm_runtime_enabled(dev))
+ fimc_is_runtime_suspend(dev);
+err_irq:
+ free_irq(is->irq, is);
+err_clk:
+ fimc_is_put_clocks(is);
+ return ret;
+}
+
+static int fimc_is_runtime_resume(struct device *dev)
+{
+ struct fimc_is *is = dev_get_drvdata(dev);
+ int ret;
+
+ ret = fimc_is_setup_clocks(is);
+ if (ret)
+ return ret;
+
+ return fimc_is_enable_clocks(is);
+}
+
+static int fimc_is_runtime_suspend(struct device *dev)
+{
+ struct fimc_is *is = dev_get_drvdata(dev);
+
+ fimc_is_disable_clocks(is);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_is_resume(struct device *dev)
+{
+ /* TODO: */
+ return 0;
+}
+
+static int fimc_is_suspend(struct device *dev)
+{
+ struct fimc_is *is = dev_get_drvdata(dev);
+
+ /* TODO: */
+ if (test_bit(IS_ST_A5_PWR_ON, &is->state))
+ return -EBUSY;
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int fimc_is_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_is *is = dev_get_drvdata(dev);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ if (!pm_runtime_status_suspended(dev))
+ fimc_is_runtime_suspend(dev);
+ free_irq(is->irq, is);
+ fimc_is_unregister_subdevs(is);
+ vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
+ fimc_is_put_clocks(is);
+ fimc_is_debugfs_remove(is);
+ if (is->fw.f_w)
+ release_firmware(is->fw.f_w);
+ fimc_is_free_cpu_memory(is);
+
+ return 0;
+}
+
+static const struct of_device_id fimc_is_of_match[] = {
+ { .compatible = "samsung,exynos4212-fimc-is" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, fimc_is_of_match);
+
+static const struct dev_pm_ops fimc_is_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_is_suspend, fimc_is_resume)
+ SET_RUNTIME_PM_OPS(fimc_is_runtime_suspend, fimc_is_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver fimc_is_driver = {
+ .probe = fimc_is_probe,
+ .remove = fimc_is_remove,
+ .driver = {
+ .of_match_table = fimc_is_of_match,
+ .name = FIMC_IS_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &fimc_is_pm_ops,
+ }
+};
+
+static int fimc_is_module_init(void)
+{
+ int ret;
+
+ ret = fimc_is_register_i2c_driver();
+ if (ret < 0)
+ return ret;
+
+ ret = platform_driver_register(&fimc_is_driver);
+
+ if (ret < 0)
+ fimc_is_unregister_i2c_driver();
+
+ return ret;
+}
+
+static void fimc_is_module_exit(void)
+{
+ fimc_is_unregister_i2c_driver();
+ platform_driver_unregister(&fimc_is_driver);
+}
+
+module_init(fimc_is_module_init);
+module_exit(fimc_is_module_exit);
+
+MODULE_ALIAS("platform:" FIMC_IS_DRV_NAME);
+MODULE_AUTHOR("Younghwan Joo <yhwan.joo@samsung.com>");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h
new file mode 100644
index 00000000000..e0be691af2d
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is.h
@@ -0,0 +1,344 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_IS_H_
+#define FIMC_IS_H_
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-ctrls.h>
+
+#include "fimc-isp.h"
+#include "fimc-is-command.h"
+#include "fimc-is-sensor.h"
+#include "fimc-is-param.h"
+#include "fimc-is-regs.h"
+
+#define FIMC_IS_DRV_NAME "exynos4-fimc-is"
+
+#define FIMC_IS_FW_FILENAME "exynos4_fimc_is_fw.bin"
+#define FIMC_IS_SETFILE_6A3 "exynos4_s5k6a3_setfile.bin"
+
+#define FIMC_IS_FW_LOAD_TIMEOUT 1000 /* ms */
+#define FIMC_IS_POWER_ON_TIMEOUT 1000 /* us */
+
+#define FIMC_IS_SENSORS_NUM 2
+
+/* Memory definitions */
+#define FIMC_IS_CPU_MEM_SIZE (0xa00000)
+#define FIMC_IS_CPU_BASE_MASK ((1 << 26) - 1)
+#define FIMC_IS_REGION_SIZE 0x5000
+
+#define FIMC_IS_DEBUG_REGION_OFFSET 0x0084b000
+#define FIMC_IS_SHARED_REGION_OFFSET 0x008c0000
+#define FIMC_IS_FW_INFO_LEN 31
+#define FIMC_IS_FW_VER_LEN 7
+#define FIMC_IS_FW_DESC_LEN (FIMC_IS_FW_INFO_LEN + \
+ FIMC_IS_FW_VER_LEN)
+#define FIMC_IS_SETFILE_INFO_LEN 39
+
+#define FIMC_IS_EXTRA_MEM_SIZE (FIMC_IS_EXTRA_FW_SIZE + \
+ FIMC_IS_EXTRA_SETFILE_SIZE + 0x1000)
+#define FIMC_IS_EXTRA_FW_SIZE 0x180000
+#define FIMC_IS_EXTRA_SETFILE_SIZE 0x4b000
+
+/* TODO: revisit */
+#define FIMC_IS_FW_ADDR_MASK ((1 << 26) - 1)
+#define FIMC_IS_FW_SIZE_MAX (SZ_4M)
+#define FIMC_IS_FW_SIZE_MIN (SZ_32K)
+
+#define ATCLK_MCUISP_FREQUENCY 100000000UL
+#define ACLK_AXI_FREQUENCY 100000000UL
+
+enum {
+ ISS_CLK_PPMUISPX,
+ ISS_CLK_PPMUISPMX,
+ ISS_CLK_LITE0,
+ ISS_CLK_LITE1,
+ ISS_CLK_MPLL,
+ ISS_CLK_ISP,
+ ISS_CLK_DRC,
+ ISS_CLK_FD,
+ ISS_CLK_MCUISP,
+ ISS_CLK_UART,
+ ISS_GATE_CLKS_MAX,
+ ISS_CLK_ISP_DIV0 = ISS_GATE_CLKS_MAX,
+ ISS_CLK_ISP_DIV1,
+ ISS_CLK_MCUISP_DIV0,
+ ISS_CLK_MCUISP_DIV1,
+ ISS_CLK_ACLK200,
+ ISS_CLK_ACLK200_DIV,
+ ISS_CLK_ACLK400MCUISP,
+ ISS_CLK_ACLK400MCUISP_DIV,
+ ISS_CLKS_MAX
+};
+
+/* The driver's internal state flags */
+enum {
+ IS_ST_IDLE,
+ IS_ST_PWR_ON,
+ IS_ST_A5_PWR_ON,
+ IS_ST_FW_LOADED,
+ IS_ST_OPEN_SENSOR,
+ IS_ST_SETFILE_LOADED,
+ IS_ST_INIT_DONE,
+ IS_ST_STREAM_ON,
+ IS_ST_STREAM_OFF,
+ IS_ST_CHANGE_MODE,
+ IS_ST_BLOCK_CMD_CLEARED,
+ IS_ST_SET_ZOOM,
+ IS_ST_PWR_SUBIP_ON,
+ IS_ST_END,
+};
+
+enum af_state {
+ FIMC_IS_AF_IDLE = 0,
+ FIMC_IS_AF_SETCONFIG = 1,
+ FIMC_IS_AF_RUNNING = 2,
+ FIMC_IS_AF_LOCK = 3,
+ FIMC_IS_AF_ABORT = 4,
+ FIMC_IS_AF_FAILED = 5,
+};
+
+enum af_lock_state {
+ FIMC_IS_AF_UNLOCKED = 0,
+ FIMC_IS_AF_LOCKED = 2
+};
+
+enum ae_lock_state {
+ FIMC_IS_AE_UNLOCKED = 0,
+ FIMC_IS_AE_LOCKED = 1
+};
+
+enum awb_lock_state {
+ FIMC_IS_AWB_UNLOCKED = 0,
+ FIMC_IS_AWB_LOCKED = 1
+};
+
+enum {
+ IS_METERING_CONFIG_CMD,
+ IS_METERING_CONFIG_WIN_POS_X,
+ IS_METERING_CONFIG_WIN_POS_Y,
+ IS_METERING_CONFIG_WIN_WIDTH,
+ IS_METERING_CONFIG_WIN_HEIGHT,
+ IS_METERING_CONFIG_MAX
+};
+
+struct is_setfile {
+ const struct firmware *info;
+ int state;
+ u32 sub_index;
+ u32 base;
+ size_t size;
+};
+
+struct is_fd_result_header {
+ u32 offset;
+ u32 count;
+ u32 index;
+ u32 curr_index;
+ u32 width;
+ u32 height;
+};
+
+struct is_af_info {
+ u16 mode;
+ u32 af_state;
+ u32 af_lock_state;
+ u32 ae_lock_state;
+ u32 awb_lock_state;
+ u16 pos_x;
+ u16 pos_y;
+ u16 prev_pos_x;
+ u16 prev_pos_y;
+ u16 use_af;
+};
+
+struct fimc_is_firmware {
+ const struct firmware *f_w;
+
+ dma_addr_t paddr;
+ void *vaddr;
+ unsigned int size;
+
+ char info[FIMC_IS_FW_INFO_LEN + 1];
+ char version[FIMC_IS_FW_VER_LEN + 1];
+ char setfile_info[FIMC_IS_SETFILE_INFO_LEN + 1];
+ u8 state;
+};
+
+struct fimc_is_memory {
+ /* physical base address */
+ dma_addr_t paddr;
+ /* virtual base address */
+ void *vaddr;
+ /* total length */
+ unsigned int size;
+};
+
+#define FIMC_IS_I2H_MAX_ARGS 12
+
+struct i2h_cmd {
+ u32 cmd;
+ u32 sensor_id;
+ u16 num_args;
+ u32 args[FIMC_IS_I2H_MAX_ARGS];
+};
+
+struct h2i_cmd {
+ u16 cmd_type;
+ u32 entry_id;
+};
+
+#define FIMC_IS_DEBUG_MSG 0x3f
+#define FIMC_IS_DEBUG_LEVEL 3
+
+struct fimc_is_setfile {
+ const struct firmware *info;
+ unsigned int state;
+ unsigned int size;
+ u32 sub_index;
+ u32 base;
+};
+
+struct chain_config {
+ struct global_param global;
+ struct sensor_param sensor;
+ struct isp_param isp;
+ struct drc_param drc;
+ struct fd_param fd;
+
+ unsigned long p_region_index[2];
+};
+
+/**
+ * struct fimc_is - fimc-is data structure
+ * @pdev: pointer to FIMC-IS platform device
+ * @pctrl: pointer to pinctrl structure for this device
+ * @v4l2_dev: pointer to top the level v4l2_device
+ * @alloc_ctx: videobuf2 memory allocator context
+ * @lock: mutex serializing video device and the subdev operations
+ * @slock: spinlock protecting this data structure and the hw registers
+ * @clocks: FIMC-LITE gate clock
+ * @regs: MCUCTL mmapped registers region
+ * @pmu_regs: PMU ISP mmapped registers region
+ * @irq_queue: interrupt handling waitqueue
+ * @lpm: low power mode flag
+ * @state: internal driver's state flags
+ */
+struct fimc_is {
+ struct platform_device *pdev;
+ struct pinctrl *pctrl;
+ struct v4l2_device *v4l2_dev;
+
+ struct fimc_is_firmware fw;
+ struct fimc_is_memory memory;
+ struct firmware *f_w;
+
+ struct fimc_isp isp;
+ struct fimc_is_sensor sensor[FIMC_IS_SENSORS_NUM];
+ struct fimc_is_setfile setfile;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct mutex lock;
+ spinlock_t slock;
+
+ struct clk *clocks[ISS_CLKS_MAX];
+ void __iomem *regs;
+ void __iomem *pmu_regs;
+ int irq;
+ wait_queue_head_t irq_queue;
+ u8 lpm;
+
+ unsigned long state;
+ unsigned int sensor_index;
+
+ struct i2h_cmd i2h_cmd;
+ struct h2i_cmd h2i_cmd;
+ struct is_fd_result_header fd_header;
+
+ struct chain_config config[IS_SC_MAX];
+ unsigned config_index;
+
+ struct is_region *is_p_region;
+ dma_addr_t is_dma_p_region;
+ struct is_share_region *is_shared_region;
+ struct is_af_info af;
+
+ struct dentry *debugfs_entry;
+};
+
+static inline struct fimc_is *fimc_isp_to_is(struct fimc_isp *isp)
+{
+ return container_of(isp, struct fimc_is, isp);
+}
+
+static inline struct chain_config *__get_curr_is_config(struct fimc_is *is)
+{
+ return &is->config[is->config_index];
+}
+
+static inline void fimc_is_mem_barrier(void)
+{
+ mb();
+}
+
+static inline void fimc_is_set_param_bit(struct fimc_is *is, int num)
+{
+ struct chain_config *cfg = &is->config[is->config_index];
+
+ set_bit(num, &cfg->p_region_index[0]);
+}
+
+static inline void fimc_is_set_param_ctrl_cmd(struct fimc_is *is, int cmd)
+{
+ is->is_p_region->parameter.isp.control.cmd = cmd;
+}
+
+static inline void mcuctl_write(u32 v, struct fimc_is *is, unsigned int offset)
+{
+ writel(v, is->regs + offset);
+}
+
+static inline u32 mcuctl_read(struct fimc_is *is, unsigned int offset)
+{
+ return readl(is->regs + offset);
+}
+
+static inline void pmuisp_write(u32 v, struct fimc_is *is, unsigned int offset)
+{
+ writel(v, is->pmu_regs + offset);
+}
+
+static inline u32 pmuisp_read(struct fimc_is *is, unsigned int offset)
+{
+ return readl(is->pmu_regs + offset);
+}
+
+int fimc_is_wait_event(struct fimc_is *is, unsigned long bit,
+ unsigned int state, unsigned int timeout);
+int fimc_is_cpu_set_power(struct fimc_is *is, int on);
+int fimc_is_start_firmware(struct fimc_is *is);
+int fimc_is_hw_initialize(struct fimc_is *is);
+void fimc_is_log_dump(const char *level, const void *buf, size_t len);
+
+#endif /* FIMC_IS_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
new file mode 100644
index 00000000000..93f9cf2ebcd
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -0,0 +1,659 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * FIMC-IS ISP video input and video output DMA interface driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * The hardware handling code derived from a driver written by
+ * Younghwan Joo <yhwan.joo@samsung.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/exynos-fimc.h>
+
+#include "common.h"
+#include "media-dev.h"
+#include "fimc-is.h"
+#include "fimc-isp-video.h"
+#include "fimc-is-param.h"
+
+static int isp_video_capture_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt;
+ const struct v4l2_pix_format_mplane *pixm = NULL;
+ const struct fimc_fmt *fmt;
+ unsigned int wh, i;
+
+ if (pfmt) {
+ pixm = &pfmt->fmt.pix_mp;
+ fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, -1);
+ wh = pixm->width * pixm->height;
+ } else {
+ fmt = isp->video_capture.format;
+ wh = vid_fmt->width * vid_fmt->height;
+ }
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ *num_buffers = clamp_t(u32, *num_buffers, FIMC_ISP_REQ_BUFS_MIN,
+ FIMC_ISP_REQ_BUFS_MAX);
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ unsigned int size = (wh * fmt->depth[i]) / 8;
+ if (pixm)
+ sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
+ else
+ sizes[i] = size;
+ allocators[i] = isp->alloc_ctx;
+ }
+
+ return 0;
+}
+
+static inline struct param_dma_output *__get_isp_dma2(struct fimc_is *is)
+{
+ return &__get_curr_is_config(is)->isp.dma2_output;
+}
+
+static int isp_video_capture_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(q);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct param_dma_output *dma = __get_isp_dma2(is);
+ struct fimc_is_video *video = &isp->video_capture;
+ int ret;
+
+ if (!test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state) ||
+ test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state))
+ return 0;
+
+
+ dma->cmd = DMA_OUTPUT_COMMAND_ENABLE;
+ dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_ENABLE;
+ dma->buffer_address = is->is_dma_p_region +
+ DMA2_OUTPUT_ADDR_ARRAY_OFFS;
+ dma->buffer_number = video->reqbufs_count;
+ dma->dma_out_mask = video->buf_mask;
+
+ isp_dbg(2, &video->ve.vdev,
+ "buf_count: %d, planes: %d, dma addr table: %#x\n",
+ video->buf_count, video->format->memplanes,
+ dma->buffer_address);
+
+ fimc_is_mem_barrier();
+
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
+ __fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT);
+
+ ret = fimc_is_itf_s_param(is, false);
+ if (ret < 0)
+ return ret;
+
+ ret = fimc_pipeline_call(&video->ve, set_stream, 1);
+ if (ret < 0)
+ return ret;
+
+ set_bit(ST_ISP_VID_CAP_STREAMING, &isp->state);
+ return ret;
+}
+
+static void isp_video_capture_stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(q);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct param_dma_output *dma = __get_isp_dma2(is);
+ int ret;
+
+ ret = fimc_pipeline_call(&isp->video_capture.ve, set_stream, 0);
+ if (ret < 0)
+ return;
+
+ dma->cmd = DMA_OUTPUT_COMMAND_DISABLE;
+ dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_DISABLE;
+ dma->buffer_number = 0;
+ dma->buffer_address = 0;
+ dma->dma_out_mask = 0;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
+ __fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT);
+
+ ret = fimc_is_itf_s_param(is, false);
+ if (ret < 0)
+ dev_warn(&is->pdev->dev, "%s: DMA stop failed\n", __func__);
+
+ fimc_is_hw_set_isp_buf_mask(is, 0);
+
+ clear_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state);
+ clear_bit(ST_ISP_VID_CAP_STREAMING, &isp->state);
+
+ isp->video_capture.buf_count = 0;
+}
+
+static int isp_video_capture_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_is_video *video = &isp->video_capture;
+ int i;
+
+ if (video->format == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < video->format->memplanes; i++) {
+ unsigned long size = video->pixfmt.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(&video->ve.vdev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ /* Check if we get one of the already known buffers. */
+ if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) {
+ dma_addr_t dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ int i;
+
+ for (i = 0; i < video->buf_count; i++)
+ if (video->buffers[i]->dma_addr[0] == dma_addr)
+ return 0;
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_is_video *video = &isp->video_capture;
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct isp_video_buf *ivb = to_isp_video_buf(vb);
+ unsigned long flags;
+ unsigned int i;
+
+ if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) {
+ spin_lock_irqsave(&is->slock, flags);
+ video->buf_mask |= BIT(ivb->index);
+ spin_unlock_irqrestore(&is->slock, flags);
+ } else {
+ unsigned int num_planes = video->format->memplanes;
+
+ ivb->index = video->buf_count;
+ video->buffers[ivb->index] = ivb;
+
+ for (i = 0; i < num_planes; i++) {
+ int buf_index = ivb->index * num_planes + i;
+
+ ivb->dma_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ is->is_p_region->shared[32 + buf_index] =
+ ivb->dma_addr[i];
+
+ isp_dbg(2, &video->ve.vdev,
+ "dma_buf %d (%d/%d/%d) addr: %#x\n",
+ buf_index, ivb->index, i, vb->v4l2_buf.index,
+ ivb->dma_addr[i]);
+ }
+
+ if (++video->buf_count < video->reqbufs_count)
+ return;
+
+ video->buf_mask = (1UL << video->buf_count) - 1;
+ set_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state);
+ }
+
+ if (!test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state))
+ isp_video_capture_start_streaming(vb->vb2_queue, 0);
+}
+
+/*
+ * FIMC-IS ISP input and output DMA interface interrupt handler.
+ * Locking: called with is->slock spinlock held.
+ */
+void fimc_isp_video_irq_handler(struct fimc_is *is)
+{
+ struct fimc_is_video *video = &is->isp.video_capture;
+ struct vb2_buffer *vb;
+ int buf_index;
+
+ /* TODO: Ensure the DMA is really stopped in stop_streaming callback */
+ if (!test_bit(ST_ISP_VID_CAP_STREAMING, &is->isp.state))
+ return;
+
+ buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count;
+ vb = &video->buffers[buf_index]->vb;
+
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+
+ video->buf_mask &= ~BIT(buf_index);
+ fimc_is_hw_set_isp_buf_mask(is, video->buf_mask);
+}
+
+static const struct vb2_ops isp_video_capture_qops = {
+ .queue_setup = isp_video_capture_queue_setup,
+ .buf_prepare = isp_video_capture_buffer_prepare,
+ .buf_queue = isp_video_capture_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = isp_video_capture_start_streaming,
+ .stop_streaming = isp_video_capture_stop_streaming,
+};
+
+static int isp_video_open(struct file *file)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct exynos_video_entity *ve = &isp->video_capture.ve;
+ struct media_entity *me = &ve->vdev.entity;
+ int ret;
+
+ if (mutex_lock_interruptible(&isp->video_lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ ret = pm_runtime_get_sync(&isp->pdev->dev);
+ if (ret < 0)
+ goto rel_fh;
+
+ if (v4l2_fh_is_singular_file(file)) {
+ mutex_lock(&me->parent->graph_mutex);
+
+ ret = fimc_pipeline_call(ve, open, me, true);
+
+ /* Mark the video pipeline as in use. */
+ if (ret == 0)
+ me->use_count++;
+
+ mutex_unlock(&me->parent->graph_mutex);
+ }
+ if (!ret)
+ goto unlock;
+rel_fh:
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&isp->video_lock);
+ return ret;
+}
+
+static int isp_video_release(struct file *file)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct fimc_is_video *ivc = &isp->video_capture;
+ struct media_entity *entity = &ivc->ve.vdev.entity;
+ struct media_device *mdev = entity->parent;
+ int ret = 0;
+
+ mutex_lock(&isp->video_lock);
+
+ if (v4l2_fh_is_singular_file(file) && ivc->streaming) {
+ media_entity_pipeline_stop(entity);
+ ivc->streaming = 0;
+ }
+
+ vb2_fop_release(file);
+
+ if (v4l2_fh_is_singular_file(file)) {
+ fimc_pipeline_call(&ivc->ve, close);
+
+ mutex_lock(&mdev->graph_mutex);
+ entity->use_count--;
+ mutex_unlock(&mdev->graph_mutex);
+ }
+
+ pm_runtime_put(&isp->pdev->dev);
+ mutex_unlock(&isp->video_lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations isp_video_fops = {
+ .owner = THIS_MODULE,
+ .open = isp_video_open,
+ .release = isp_video_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * Video node ioctl operations
+ */
+static int isp_video_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+
+ __fimc_vidioc_querycap(&isp->pdev->dev, cap, V4L2_CAP_STREAMING);
+ return 0;
+}
+
+static int isp_video_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct fimc_fmt *fmt;
+
+ if (f->index >= FIMC_ISP_NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = fimc_isp_find_format(NULL, NULL, f->index);
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int isp_video_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+
+ f->fmt.pix_mp = isp->video_capture.pixfmt;
+ return 0;
+}
+
+static void __isp_video_try_fmt(struct fimc_isp *isp,
+ struct v4l2_pix_format_mplane *pixm,
+ const struct fimc_fmt **fmt)
+{
+ *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+ pixm->colorspace = V4L2_COLORSPACE_SRGB;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->num_planes = (*fmt)->memplanes;
+ pixm->pixelformat = (*fmt)->fourcc;
+ /*
+ * TODO: double check with the docmentation these width/height
+ * constraints are correct.
+ */
+ v4l_bound_align_image(&pixm->width, FIMC_ISP_SOURCE_WIDTH_MIN,
+ FIMC_ISP_SOURCE_WIDTH_MAX, 3,
+ &pixm->height, FIMC_ISP_SOURCE_HEIGHT_MIN,
+ FIMC_ISP_SOURCE_HEIGHT_MAX, 0, 0);
+}
+
+static int isp_video_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+
+ __isp_video_try_fmt(isp, &f->fmt.pix_mp, NULL);
+ return 0;
+}
+
+static int isp_video_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ const struct fimc_fmt *ifmt = NULL;
+ struct param_dma_output *dma = __get_isp_dma2(is);
+
+ __isp_video_try_fmt(isp, pixm, &ifmt);
+
+ if (WARN_ON(ifmt == NULL))
+ return -EINVAL;
+
+ dma->format = DMA_OUTPUT_FORMAT_BAYER;
+ dma->order = DMA_OUTPUT_ORDER_GB_BG;
+ dma->plane = ifmt->memplanes;
+ dma->bitwidth = ifmt->depth[0];
+ dma->width = pixm->width;
+ dma->height = pixm->height;
+
+ fimc_is_mem_barrier();
+
+ isp->video_capture.format = ifmt;
+ isp->video_capture.pixfmt = *pixm;
+
+ return 0;
+}
+
+/*
+ * Check for source/sink format differences at each link.
+ * Return 0 if the formats match or -EPIPE otherwise.
+ */
+static int isp_video_pipeline_validate(struct fimc_isp *isp)
+{
+ struct v4l2_subdev *sd = &isp->subdev;
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ while (1) {
+ /* Retrieve format at the sink pad */
+ pad = &sd->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+ sink_fmt.pad = pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ /* Retrieve format at the source pad */
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static int isp_video_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct exynos_video_entity *ve = &isp->video_capture.ve;
+ struct media_entity *me = &ve->vdev.entity;
+ int ret;
+
+ ret = media_entity_pipeline_start(me, &ve->pipe->mp);
+ if (ret < 0)
+ return ret;
+
+ ret = isp_video_pipeline_validate(isp);
+ if (ret < 0)
+ goto p_stop;
+
+ ret = vb2_ioctl_streamon(file, priv, type);
+ if (ret < 0)
+ goto p_stop;
+
+ isp->video_capture.streaming = 1;
+ return 0;
+p_stop:
+ media_entity_pipeline_stop(me);
+ return ret;
+}
+
+static int isp_video_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct fimc_is_video *video = &isp->video_capture;
+ int ret;
+
+ ret = vb2_ioctl_streamoff(file, priv, type);
+ if (ret < 0)
+ return ret;
+
+ media_entity_pipeline_stop(&video->ve.vdev.entity);
+ video->streaming = 0;
+ return 0;
+}
+
+static int isp_video_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ int ret;
+
+ ret = vb2_ioctl_reqbufs(file, priv, rb);
+ if (ret < 0)
+ return ret;
+
+ if (rb->count && rb->count < FIMC_ISP_REQ_BUFS_MIN) {
+ rb->count = 0;
+ vb2_ioctl_reqbufs(file, priv, rb);
+ ret = -ENOMEM;
+ }
+
+ isp->video_capture.reqbufs_count = rb->count;
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
+ .vidioc_querycap = isp_video_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = isp_video_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = isp_video_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = isp_video_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = isp_video_g_fmt_mplane,
+ .vidioc_reqbufs = isp_video_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = isp_video_streamon,
+ .vidioc_streamoff = isp_video_streamoff,
+};
+
+int fimc_isp_video_device_register(struct fimc_isp *isp,
+ struct v4l2_device *v4l2_dev,
+ enum v4l2_buf_type type)
+{
+ struct vb2_queue *q = &isp->video_capture.vb_queue;
+ struct fimc_is_video *iv;
+ struct video_device *vdev;
+ int ret;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ iv = &isp->video_capture;
+ else
+ return -ENOSYS;
+
+ mutex_init(&isp->video_lock);
+ INIT_LIST_HEAD(&iv->pending_buf_q);
+ INIT_LIST_HEAD(&iv->active_buf_q);
+ iv->format = fimc_isp_find_format(NULL, NULL, 0);
+ iv->pixfmt.width = IS_DEFAULT_WIDTH;
+ iv->pixfmt.height = IS_DEFAULT_HEIGHT;
+ iv->pixfmt.pixelformat = iv->format->fourcc;
+ iv->pixfmt.colorspace = V4L2_COLORSPACE_SRGB;
+ iv->reqbufs_count = 0;
+
+ memset(q, 0, sizeof(*q));
+ q->type = type;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &isp_video_capture_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct isp_video_buf);
+ q->drv_priv = isp;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &isp->video_lock;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ return ret;
+
+ vdev = &iv->ve.vdev;
+ memset(vdev, 0, sizeof(*vdev));
+ snprintf(vdev->name, sizeof(vdev->name), "fimc-is-isp.%s",
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ?
+ "capture" : "output");
+ vdev->queue = q;
+ vdev->fops = &isp_video_fops;
+ vdev->ioctl_ops = &isp_video_ioctl_ops;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->minor = -1;
+ vdev->release = video_device_release_empty;
+ vdev->lock = &isp->video_lock;
+
+ iv->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vdev->entity, 1, &iv->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ video_set_drvdata(vdev, isp);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ media_entity_cleanup(&vdev->entity);
+ return ret;
+ }
+
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
+
+ return 0;
+}
+
+void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+ enum v4l2_buf_type type)
+{
+ struct exynos_video_entity *ve;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ve = &isp->video_capture.ve;
+ else
+ return;
+
+ mutex_lock(&isp->video_lock);
+
+ if (video_is_registered(&ve->vdev)) {
+ video_unregister_device(&ve->vdev);
+ media_entity_cleanup(&ve->vdev.entity);
+ ve->pipe = NULL;
+ }
+
+ mutex_unlock(&isp->video_lock);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.h b/drivers/media/platform/exynos4-is/fimc-isp-video.h
new file mode 100644
index 00000000000..98c662654bb
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.h
@@ -0,0 +1,44 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_ISP_VIDEO__
+#define FIMC_ISP_VIDEO__
+
+#include <media/videobuf2-core.h>
+#include "fimc-isp.h"
+
+#ifdef CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE
+int fimc_isp_video_device_register(struct fimc_isp *isp,
+ struct v4l2_device *v4l2_dev,
+ enum v4l2_buf_type type);
+
+void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+ enum v4l2_buf_type type);
+
+void fimc_isp_video_irq_handler(struct fimc_is *is);
+#else
+static inline void fimc_isp_video_irq_handler(struct fimc_is *is)
+{
+}
+
+static inline int fimc_isp_video_device_register(struct fimc_isp *isp,
+ struct v4l2_device *v4l2_dev,
+ enum v4l2_buf_type type)
+{
+ return 0;
+}
+
+void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+ enum v4l2_buf_type type)
+{
+}
+#endif /* !CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE */
+
+#endif /* FIMC_ISP_VIDEO__ */
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
new file mode 100644
index 00000000000..be62d6b9ac4
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -0,0 +1,786 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+
+#include "media-dev.h"
+#include "fimc-isp-video.h"
+#include "fimc-is-command.h"
+#include "fimc-is-param.h"
+#include "fimc-is-regs.h"
+#include "fimc-is.h"
+
+int fimc_isp_debug;
+module_param_named(debug_isp, fimc_isp_debug, int, S_IRUGO | S_IWUSR);
+
+static const struct fimc_fmt fimc_isp_formats[FIMC_ISP_NUM_FORMATS] = {
+ {
+ .name = "RAW8 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .depth = { 8 },
+ .color = FIMC_FMT_RAW8,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ }, {
+ .name = "RAW10 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .depth = { 10 },
+ .color = FIMC_FMT_RAW10,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ }, {
+ .name = "RAW12 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .depth = { 12 },
+ .color = FIMC_FMT_RAW12,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ },
+};
+
+/**
+ * fimc_isp_find_format - lookup color format by fourcc or media bus code
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @index: index to the fimc_isp_formats array, ignored if negative
+ */
+const struct fimc_fmt *fimc_isp_find_format(const u32 *pixelformat,
+ const u32 *mbus_code, int index)
+{
+ const struct fimc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(fimc_isp_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_isp_formats); ++i) {
+ fmt = &fimc_isp_formats[i];
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+void fimc_isp_irq_handler(struct fimc_is *is)
+{
+ is->i2h_cmd.args[0] = mcuctl_read(is, MCUCTL_REG_ISSR(20));
+ is->i2h_cmd.args[1] = mcuctl_read(is, MCUCTL_REG_ISSR(21));
+
+ fimc_is_fw_clear_irq1(is, FIMC_IS_INT_FRAME_DONE_ISP);
+ fimc_isp_video_irq_handler(is);
+
+ wake_up(&is->irq_queue);
+}
+
+/* Capture subdev media entity operations */
+static int fimc_is_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ return 0;
+}
+
+static const struct media_entity_operations fimc_is_subdev_media_ops = {
+ .link_setup = fimc_is_link_setup,
+};
+
+static int fimc_is_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_isp_find_format(NULL, NULL, code->index);
+ if (!fmt)
+ return -EINVAL;
+ code->code = fmt->mbus_code;
+ return 0;
+}
+
+static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *mf = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ return 0;
+ }
+
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+
+ mutex_lock(&isp->subdev_lock);
+
+ if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
+ /* ISP OTF input image format */
+ *mf = isp->sink_fmt;
+ } else {
+ /* ISP OTF output image format */
+ *mf = isp->src_fmt;
+
+ if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) {
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->code = V4L2_MBUS_FMT_YUV10_1X30;
+ }
+ }
+
+ mutex_unlock(&isp->subdev_lock);
+
+ isp_dbg(1, sd, "%s: pad%d: fmt: 0x%x, %dx%d\n", __func__,
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ return 0;
+}
+
+static void __isp_subdev_try_format(struct fimc_isp *isp,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct v4l2_mbus_framefmt *format;
+
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+
+ if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
+ v4l_bound_align_image(&mf->width, FIMC_ISP_SINK_WIDTH_MIN,
+ FIMC_ISP_SINK_WIDTH_MAX, 0,
+ &mf->height, FIMC_ISP_SINK_HEIGHT_MIN,
+ FIMC_ISP_SINK_HEIGHT_MAX, 0, 0);
+ mf->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ } else {
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ format = v4l2_subdev_get_try_format(fh,
+ FIMC_ISP_SD_PAD_SINK);
+ else
+ format = &isp->sink_fmt;
+
+ /* Allow changing format only on sink pad */
+ mf->width = format->width - FIMC_ISP_CAC_MARGIN_WIDTH;
+ mf->height = format->height - FIMC_ISP_CAC_MARGIN_HEIGHT;
+
+ if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) {
+ mf->code = V4L2_MBUS_FMT_YUV10_1X30;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ } else {
+ mf->code = format->code;
+ }
+ }
+}
+
+static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ int ret = 0;
+
+ isp_dbg(1, sd, "%s: pad%d: code: 0x%x, %dx%d\n",
+ __func__, fmt->pad, mf->code, mf->width, mf->height);
+
+ mutex_lock(&isp->subdev_lock);
+ __isp_subdev_try_format(isp, fh, fmt);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+
+ /* Propagate format to the source pads */
+ if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
+ struct v4l2_subdev_format format = *fmt;
+ unsigned int pad;
+
+ for (pad = FIMC_ISP_SD_PAD_SRC_FIFO;
+ pad < FIMC_ISP_SD_PADS_NUM; pad++) {
+ format.pad = pad;
+ __isp_subdev_try_format(isp, fh, &format);
+ mf = v4l2_subdev_get_try_format(fh, pad);
+ *mf = format.format;
+ }
+ }
+ } else {
+ if (sd->entity.stream_count == 0) {
+ if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
+ struct v4l2_subdev_format format = *fmt;
+
+ isp->sink_fmt = *mf;
+
+ format.pad = FIMC_ISP_SD_PAD_SRC_DMA;
+ __isp_subdev_try_format(isp, fh, &format);
+
+ isp->src_fmt = format.format;
+ __is_set_frame_size(is, &isp->src_fmt);
+ } else {
+ isp->src_fmt = *mf;
+ }
+ } else {
+ ret = -EBUSY;
+ }
+ }
+
+ mutex_unlock(&isp->subdev_lock);
+ return ret;
+}
+
+static int fimc_isp_subdev_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ int ret;
+
+ isp_dbg(1, sd, "%s: on: %d\n", __func__, on);
+
+ if (!test_bit(IS_ST_INIT_DONE, &is->state))
+ return -EBUSY;
+
+ fimc_is_mem_barrier();
+
+ if (on) {
+ if (__get_pending_param_count(is)) {
+ ret = fimc_is_itf_s_param(is, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ isp_dbg(1, sd, "changing mode to %d\n", is->config_index);
+
+ ret = fimc_is_itf_mode_change(is);
+ if (ret)
+ return -EINVAL;
+
+ clear_bit(IS_ST_STREAM_ON, &is->state);
+ fimc_is_hw_stream_on(is);
+ ret = fimc_is_wait_event(is, IS_ST_STREAM_ON, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ v4l2_err(sd, "stream on timeout\n");
+ return ret;
+ }
+ } else {
+ clear_bit(IS_ST_STREAM_OFF, &is->state);
+ fimc_is_hw_stream_off(is);
+ ret = fimc_is_wait_event(is, IS_ST_STREAM_OFF, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ v4l2_err(sd, "stream off timeout\n");
+ return ret;
+ }
+ is->setfile.sub_index = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ int ret = 0;
+
+ pr_debug("on: %d\n", on);
+
+ if (on) {
+ ret = pm_runtime_get_sync(&is->pdev->dev);
+ if (ret < 0)
+ return ret;
+ set_bit(IS_ST_PWR_ON, &is->state);
+
+ ret = fimc_is_start_firmware(is);
+ if (ret < 0) {
+ v4l2_err(sd, "firmware booting failed\n");
+ pm_runtime_put(&is->pdev->dev);
+ return ret;
+ }
+ set_bit(IS_ST_PWR_SUBIP_ON, &is->state);
+
+ ret = fimc_is_hw_initialize(is);
+ } else {
+ /* Close sensor */
+ if (!test_bit(IS_ST_PWR_ON, &is->state)) {
+ fimc_is_hw_close_sensor(is, 0);
+
+ ret = fimc_is_wait_event(is, IS_ST_OPEN_SENSOR, 0,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ v4l2_err(sd, "sensor close timeout\n");
+ return ret;
+ }
+ }
+
+ /* SUB IP power off */
+ if (test_bit(IS_ST_PWR_SUBIP_ON, &is->state)) {
+ fimc_is_hw_subip_power_off(is);
+ ret = fimc_is_wait_event(is, IS_ST_PWR_SUBIP_ON, 0,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ v4l2_err(sd, "sub-IP power off timeout\n");
+ return ret;
+ }
+ }
+
+ fimc_is_cpu_set_power(is, 0);
+ pm_runtime_put_sync(&is->pdev->dev);
+
+ clear_bit(IS_ST_PWR_ON, &is->state);
+ clear_bit(IS_ST_INIT_DONE, &is->state);
+ is->state = 0;
+ is->config[is->config_index].p_region_index[0] = 0;
+ is->config[is->config_index].p_region_index[1] = 0;
+ set_bit(IS_ST_IDLE, &is->state);
+ wmb();
+ }
+
+ return ret;
+}
+
+static int fimc_isp_subdev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, FIMC_ISP_SD_PAD_SINK);
+
+ fmt.colorspace = V4L2_COLORSPACE_SRGB;
+ fmt.code = fimc_isp_formats[0].mbus_code;
+ fmt.width = DEFAULT_PREVIEW_STILL_WIDTH + FIMC_ISP_CAC_MARGIN_WIDTH;
+ fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT + FIMC_ISP_CAC_MARGIN_HEIGHT;
+ fmt.field = V4L2_FIELD_NONE;
+ *format = fmt;
+
+ format = v4l2_subdev_get_try_format(fh, FIMC_ISP_SD_PAD_SRC_FIFO);
+ fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ *format = fmt;
+
+ format = v4l2_subdev_get_try_format(fh, FIMC_ISP_SD_PAD_SRC_DMA);
+ *format = fmt;
+
+ return 0;
+}
+
+static int fimc_isp_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ int ret;
+
+ /* Use pipeline object allocated by the media device. */
+ isp->video_capture.ve.pipe = v4l2_get_subdev_hostdata(sd);
+
+ ret = fimc_isp_video_device_register(isp, sd->v4l2_dev,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret < 0)
+ isp->video_capture.ve.pipe = NULL;
+
+ return ret;
+}
+
+static void fimc_isp_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+
+ fimc_isp_video_device_unregister(isp,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+}
+
+static const struct v4l2_subdev_internal_ops fimc_is_subdev_internal_ops = {
+ .registered = fimc_isp_subdev_registered,
+ .unregistered = fimc_isp_subdev_unregistered,
+ .open = fimc_isp_subdev_open,
+};
+
+static const struct v4l2_subdev_pad_ops fimc_is_subdev_pad_ops = {
+ .enum_mbus_code = fimc_is_subdev_enum_mbus_code,
+ .get_fmt = fimc_isp_subdev_get_fmt,
+ .set_fmt = fimc_isp_subdev_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops fimc_is_subdev_video_ops = {
+ .s_stream = fimc_isp_subdev_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops fimc_is_core_ops = {
+ .s_power = fimc_isp_subdev_s_power,
+};
+
+static struct v4l2_subdev_ops fimc_is_subdev_ops = {
+ .core = &fimc_is_core_ops,
+ .video = &fimc_is_subdev_video_ops,
+ .pad = &fimc_is_subdev_pad_ops,
+};
+
+static int __ctrl_set_white_balance(struct fimc_is *is, int value)
+{
+ switch (value) {
+ case V4L2_WHITE_BALANCE_AUTO:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_AUTO, 0);
+ break;
+ case V4L2_WHITE_BALANCE_DAYLIGHT:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_ILLUMINATION,
+ ISP_AWB_ILLUMINATION_DAYLIGHT);
+ break;
+ case V4L2_WHITE_BALANCE_CLOUDY:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_ILLUMINATION,
+ ISP_AWB_ILLUMINATION_CLOUDY);
+ break;
+ case V4L2_WHITE_BALANCE_INCANDESCENT:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_ILLUMINATION,
+ ISP_AWB_ILLUMINATION_TUNGSTEN);
+ break;
+ case V4L2_WHITE_BALANCE_FLUORESCENT:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_ILLUMINATION,
+ ISP_AWB_ILLUMINATION_FLUORESCENT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __ctrl_set_aewb_lock(struct fimc_is *is,
+ struct v4l2_ctrl *ctrl)
+{
+ bool awb_lock = ctrl->val & V4L2_LOCK_WHITE_BALANCE;
+ bool ae_lock = ctrl->val & V4L2_LOCK_EXPOSURE;
+ struct isp_param *isp = &is->is_p_region->parameter.isp;
+ int cmd, ret;
+
+ cmd = ae_lock ? ISP_AA_COMMAND_STOP : ISP_AA_COMMAND_START;
+ isp->aa.cmd = cmd;
+ isp->aa.target = ISP_AA_TARGET_AE;
+ fimc_is_set_param_bit(is, PARAM_ISP_AA);
+ is->af.ae_lock_state = ae_lock;
+ wmb();
+
+ ret = fimc_is_itf_s_param(is, false);
+ if (ret < 0)
+ return ret;
+
+ cmd = awb_lock ? ISP_AA_COMMAND_STOP : ISP_AA_COMMAND_START;
+ isp->aa.cmd = cmd;
+ isp->aa.target = ISP_AA_TARGET_AE;
+ fimc_is_set_param_bit(is, PARAM_ISP_AA);
+ is->af.awb_lock_state = awb_lock;
+ wmb();
+
+ return fimc_is_itf_s_param(is, false);
+}
+
+/* Supported manual ISO values */
+static const s64 iso_qmenu[] = {
+ 50, 100, 200, 400, 800,
+};
+
+static int __ctrl_set_iso(struct fimc_is *is, int value)
+{
+ unsigned int idx, iso;
+
+ if (value == V4L2_ISO_SENSITIVITY_AUTO) {
+ __is_set_isp_iso(is, ISP_ISO_COMMAND_AUTO, 0);
+ return 0;
+ }
+ idx = is->isp.ctrls.iso->val;
+ if (idx >= ARRAY_SIZE(iso_qmenu))
+ return -EINVAL;
+
+ iso = iso_qmenu[idx];
+ __is_set_isp_iso(is, ISP_ISO_COMMAND_MANUAL, iso);
+ return 0;
+}
+
+static int __ctrl_set_metering(struct fimc_is *is, unsigned int value)
+{
+ unsigned int val;
+
+ switch (value) {
+ case V4L2_EXPOSURE_METERING_AVERAGE:
+ val = ISP_METERING_COMMAND_AVERAGE;
+ break;
+ case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
+ val = ISP_METERING_COMMAND_CENTER;
+ break;
+ case V4L2_EXPOSURE_METERING_SPOT:
+ val = ISP_METERING_COMMAND_SPOT;
+ break;
+ case V4L2_EXPOSURE_METERING_MATRIX:
+ val = ISP_METERING_COMMAND_MATRIX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ __is_set_isp_metering(is, IS_METERING_CONFIG_CMD, val);
+ return 0;
+}
+
+static int __ctrl_set_afc(struct fimc_is *is, int value)
+{
+ switch (value) {
+ case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_DISABLE, 0);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_MANUAL, 50);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_MANUAL, 60);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_AUTO:
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_AUTO, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __ctrl_set_image_effect(struct fimc_is *is, int value)
+{
+ static const u8 effects[][2] = {
+ { V4L2_COLORFX_NONE, ISP_IMAGE_EFFECT_DISABLE },
+ { V4L2_COLORFX_BW, ISP_IMAGE_EFFECT_MONOCHROME },
+ { V4L2_COLORFX_SEPIA, ISP_IMAGE_EFFECT_SEPIA },
+ { V4L2_COLORFX_NEGATIVE, ISP_IMAGE_EFFECT_NEGATIVE_MONO },
+ { 16 /* TODO */, ISP_IMAGE_EFFECT_NEGATIVE_COLOR },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(effects); i++) {
+ if (effects[i][0] != value)
+ continue;
+
+ __is_set_isp_effect(is, effects[i][1]);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int fimc_is_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fimc_isp *isp = ctrl_to_fimc_isp(ctrl);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ bool set_param = true;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_CONTRAST,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_SATURATION:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_SATURATION,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_SHARPNESS,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_EXPOSURE,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_BRIGHTNESS:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_HUE:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_HUE,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_METERING:
+ ret = __ctrl_set_metering(is, ctrl->val);
+ break;
+
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ ret = __ctrl_set_white_balance(is, ctrl->val);
+ break;
+
+ case V4L2_CID_3A_LOCK:
+ ret = __ctrl_set_aewb_lock(is, ctrl);
+ set_param = false;
+ break;
+
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ ret = __ctrl_set_iso(is, ctrl->val);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ ret = __ctrl_set_afc(is, ctrl->val);
+ break;
+
+ case V4L2_CID_COLORFX:
+ __ctrl_set_image_effect(is, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0) {
+ v4l2_err(&isp->subdev, "Failed to set control: %s (%d)\n",
+ ctrl->name, ctrl->val);
+ return ret;
+ }
+
+ if (set_param && test_bit(IS_ST_STREAM_ON, &is->state))
+ return fimc_is_itf_s_param(is, true);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fimc_isp_ctrl_ops = {
+ .s_ctrl = fimc_is_s_ctrl,
+};
+
+static void __isp_subdev_set_default_format(struct fimc_isp *isp)
+{
+ struct fimc_is *is = fimc_isp_to_is(isp);
+
+ isp->sink_fmt.width = DEFAULT_PREVIEW_STILL_WIDTH +
+ FIMC_ISP_CAC_MARGIN_WIDTH;
+ isp->sink_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT +
+ FIMC_ISP_CAC_MARGIN_HEIGHT;
+ isp->sink_fmt.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ isp->src_fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ isp->src_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ isp->src_fmt.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ __is_set_frame_size(is, &isp->src_fmt);
+}
+
+int fimc_isp_subdev_create(struct fimc_isp *isp)
+{
+ const struct v4l2_ctrl_ops *ops = &fimc_isp_ctrl_ops;
+ struct v4l2_ctrl_handler *handler = &isp->ctrls.handler;
+ struct v4l2_subdev *sd = &isp->subdev;
+ struct fimc_isp_ctrls *ctrls = &isp->ctrls;
+ int ret;
+
+ mutex_init(&isp->subdev_lock);
+
+ v4l2_subdev_init(sd, &fimc_is_subdev_ops);
+
+ sd->owner = THIS_MODULE;
+ sd->grp_id = GRP_ID_FIMC_IS;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC-IS-ISP");
+
+ isp->subdev_pads[FIMC_ISP_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ isp->subdev_pads[FIMC_ISP_SD_PAD_SRC_FIFO].flags = MEDIA_PAD_FL_SOURCE;
+ isp->subdev_pads[FIMC_ISP_SD_PAD_SRC_DMA].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FIMC_ISP_SD_PADS_NUM,
+ isp->subdev_pads, 0);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 20);
+
+ ctrls->saturation = v4l2_ctrl_new_std(handler, ops, V4L2_CID_SATURATION,
+ -2, 2, 1, 0);
+ ctrls->brightness = v4l2_ctrl_new_std(handler, ops, V4L2_CID_BRIGHTNESS,
+ -4, 4, 1, 0);
+ ctrls->contrast = v4l2_ctrl_new_std(handler, ops, V4L2_CID_CONTRAST,
+ -2, 2, 1, 0);
+ ctrls->sharpness = v4l2_ctrl_new_std(handler, ops, V4L2_CID_SHARPNESS,
+ -2, 2, 1, 0);
+ ctrls->hue = v4l2_ctrl_new_std(handler, ops, V4L2_CID_HUE,
+ -2, 2, 1, 0);
+
+ ctrls->auto_wb = v4l2_ctrl_new_std_menu(handler, ops,
+ V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ 8, ~0x14e, V4L2_WHITE_BALANCE_AUTO);
+
+ ctrls->exposure = v4l2_ctrl_new_std(handler, ops,
+ V4L2_CID_EXPOSURE_ABSOLUTE,
+ -4, 4, 1, 0);
+
+ ctrls->exp_metering = v4l2_ctrl_new_std_menu(handler, ops,
+ V4L2_CID_EXPOSURE_METERING, 3,
+ ~0xf, V4L2_EXPOSURE_METERING_AVERAGE);
+
+ v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
+ /* ISO sensitivity */
+ ctrls->auto_iso = v4l2_ctrl_new_std_menu(handler, ops,
+ V4L2_CID_ISO_SENSITIVITY_AUTO, 1, 0,
+ V4L2_ISO_SENSITIVITY_AUTO);
+
+ ctrls->iso = v4l2_ctrl_new_int_menu(handler, ops,
+ V4L2_CID_ISO_SENSITIVITY, ARRAY_SIZE(iso_qmenu) - 1,
+ ARRAY_SIZE(iso_qmenu)/2 - 1, iso_qmenu);
+
+ ctrls->aewb_lock = v4l2_ctrl_new_std(handler, ops,
+ V4L2_CID_3A_LOCK, 0, 0x3, 0, 0);
+
+ /* TODO: Add support for NEGATIVE_COLOR option */
+ ctrls->colorfx = v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_COLORFX,
+ V4L2_COLORFX_SET_CBCR + 1, ~0x1000f, V4L2_COLORFX_NONE);
+
+ if (handler->error) {
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_iso,
+ V4L2_ISO_SENSITIVITY_MANUAL, false);
+
+ sd->ctrl_handler = handler;
+ sd->internal_ops = &fimc_is_subdev_internal_ops;
+ sd->entity.ops = &fimc_is_subdev_media_ops;
+ v4l2_set_subdevdata(sd, isp);
+
+ __isp_subdev_set_default_format(isp);
+
+ return 0;
+}
+
+void fimc_isp_subdev_destroy(struct fimc_isp *isp)
+{
+ struct v4l2_subdev *sd = &isp->subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&isp->ctrls.handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.h b/drivers/media/platform/exynos4-is/fimc-isp.h
new file mode 100644
index 00000000000..b99be09b49f
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp.h
@@ -0,0 +1,195 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_ISP_H_
+#define FIMC_ISP_H_
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/exynos-fimc.h>
+
+extern int fimc_isp_debug;
+
+#define isp_dbg(level, dev, fmt, arg...) \
+ v4l2_dbg(level, fimc_isp_debug, dev, fmt, ## arg)
+
+/* FIXME: revisit these constraints */
+#define FIMC_ISP_SINK_WIDTH_MIN (16 + 8)
+#define FIMC_ISP_SINK_HEIGHT_MIN (12 + 8)
+#define FIMC_ISP_SOURCE_WIDTH_MIN 8
+#define FIMC_ISP_SOURCE_HEIGHT_MIN 8
+#define FIMC_ISP_CAC_MARGIN_WIDTH 16
+#define FIMC_ISP_CAC_MARGIN_HEIGHT 12
+
+#define FIMC_ISP_SINK_WIDTH_MAX (4000 - 16)
+#define FIMC_ISP_SINK_HEIGHT_MAX (4000 + 12)
+#define FIMC_ISP_SOURCE_WIDTH_MAX 4000
+#define FIMC_ISP_SOURCE_HEIGHT_MAX 4000
+
+#define FIMC_ISP_NUM_FORMATS 3
+#define FIMC_ISP_REQ_BUFS_MIN 2
+#define FIMC_ISP_REQ_BUFS_MAX 32
+
+#define FIMC_ISP_SD_PAD_SINK 0
+#define FIMC_ISP_SD_PAD_SRC_FIFO 1
+#define FIMC_ISP_SD_PAD_SRC_DMA 2
+#define FIMC_ISP_SD_PADS_NUM 3
+#define FIMC_ISP_MAX_PLANES 1
+
+/**
+ * struct fimc_isp_frame - source/target frame properties
+ * @width: full image width
+ * @height: full image height
+ * @rect: crop/composition rectangle
+ */
+struct fimc_isp_frame {
+ u16 width;
+ u16 height;
+ struct v4l2_rect rect;
+};
+
+struct fimc_isp_ctrls {
+ struct v4l2_ctrl_handler handler;
+
+ /* Auto white balance */
+ struct v4l2_ctrl *auto_wb;
+ /* Auto ISO control cluster */
+ struct {
+ struct v4l2_ctrl *auto_iso;
+ struct v4l2_ctrl *iso;
+ };
+ /* Adjust - contrast */
+ struct v4l2_ctrl *contrast;
+ /* Adjust - saturation */
+ struct v4l2_ctrl *saturation;
+ /* Adjust - sharpness */
+ struct v4l2_ctrl *sharpness;
+ /* Adjust - brightness */
+ struct v4l2_ctrl *brightness;
+ /* Adjust - hue */
+ struct v4l2_ctrl *hue;
+
+ /* Auto/manual exposure */
+ struct v4l2_ctrl *auto_exp;
+ /* Manual exposure value */
+ struct v4l2_ctrl *exposure;
+ /* AE/AWB lock/unlock */
+ struct v4l2_ctrl *aewb_lock;
+ /* Exposure metering mode */
+ struct v4l2_ctrl *exp_metering;
+ /* AFC */
+ struct v4l2_ctrl *afc;
+ /* ISP image effect */
+ struct v4l2_ctrl *colorfx;
+};
+
+struct isp_video_buf {
+ struct vb2_buffer vb;
+ dma_addr_t dma_addr[FIMC_ISP_MAX_PLANES];
+ unsigned int index;
+};
+
+#define to_isp_video_buf(_b) container_of(_b, struct isp_video_buf, vb)
+
+#define FIMC_ISP_MAX_BUFS 4
+
+/**
+ * struct fimc_is_video - fimc-is video device structure
+ * @vdev: video_device structure
+ * @type: video device type (CAPTURE/OUTPUT)
+ * @pad: video device media (sink) pad
+ * @pending_buf_q: pending buffers queue head
+ * @active_buf_q: a queue head of buffers scheduled in hardware
+ * @vb_queue: vb2 buffer queue
+ * @active_buf_count: number of video buffers scheduled in hardware
+ * @frame_count: counter of frames dequeued to user space
+ * @reqbufs_count: number of buffers requested with REQBUFS ioctl
+ * @format: current pixel format
+ */
+struct fimc_is_video {
+ struct exynos_video_entity ve;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct vb2_queue vb_queue;
+ unsigned int reqbufs_count;
+ unsigned int buf_count;
+ unsigned int buf_mask;
+ unsigned int frame_count;
+ int streaming;
+ struct isp_video_buf *buffers[FIMC_ISP_MAX_BUFS];
+ const struct fimc_fmt *format;
+ struct v4l2_pix_format_mplane pixfmt;
+};
+
+/* struct fimc_isp:state bit definitions */
+#define ST_ISP_VID_CAP_BUF_PREP 0
+#define ST_ISP_VID_CAP_STREAMING 1
+
+/**
+ * struct fimc_isp - FIMC-IS ISP data structure
+ * @pdev: pointer to FIMC-IS platform device
+ * @alloc_ctx: videobuf2 memory allocator context
+ * @subdev: ISP v4l2_subdev
+ * @subdev_pads: the ISP subdev media pads
+ * @test_pattern: test pattern controls
+ * @ctrls: v4l2 controls structure
+ * @video_lock: mutex serializing video device and the subdev operations
+ * @cac_margin_x: horizontal CAC margin in pixels
+ * @cac_margin_y: vertical CAC margin in pixels
+ * @state: driver state flags
+ * @video_capture: the ISP block video capture device
+ */
+struct fimc_isp {
+ struct platform_device *pdev;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct v4l2_subdev subdev;
+ struct media_pad subdev_pads[FIMC_ISP_SD_PADS_NUM];
+ struct v4l2_mbus_framefmt src_fmt;
+ struct v4l2_mbus_framefmt sink_fmt;
+ struct v4l2_ctrl *test_pattern;
+ struct fimc_isp_ctrls ctrls;
+
+ struct mutex video_lock;
+ struct mutex subdev_lock;
+
+ unsigned int cac_margin_x;
+ unsigned int cac_margin_y;
+
+ unsigned long state;
+
+ struct fimc_is_video video_capture;
+};
+
+#define ctrl_to_fimc_isp(_ctrl) \
+ container_of(ctrl->handler, struct fimc_isp, ctrls.handler)
+
+struct fimc_is;
+
+int fimc_isp_subdev_create(struct fimc_isp *isp);
+void fimc_isp_subdev_destroy(struct fimc_isp *isp);
+void fimc_isp_irq_handler(struct fimc_is *is);
+int fimc_is_create_controls(struct fimc_isp *isp);
+int fimc_is_delete_controls(struct fimc_isp *isp);
+const struct fimc_fmt *fimc_isp_find_format(const u32 *pixelformat,
+ const u32 *mbus_code, int index);
+#endif /* FIMC_ISP_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
new file mode 100644
index 00000000000..bc3ec7d25a3
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
@@ -0,0 +1,349 @@
+/*
+ * Register interface file for EXYNOS FIMC-LITE (camera interface) driver
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <media/exynos-fimc.h>
+
+#include "fimc-lite-reg.h"
+#include "fimc-lite.h"
+#include "fimc-core.h"
+
+#define FLITE_RESET_TIMEOUT 50 /* in ms */
+
+void flite_hw_reset(struct fimc_lite *dev)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(FLITE_RESET_TIMEOUT);
+ u32 cfg;
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg |= FLITE_REG_CIGCTRL_SWRST_REQ;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ while (time_is_after_jiffies(end)) {
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ if (cfg & FLITE_REG_CIGCTRL_SWRST_RDY)
+ break;
+ usleep_range(1000, 5000);
+ }
+
+ cfg |= FLITE_REG_CIGCTRL_SWRST;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_clear_pending_irq(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS);
+ cfg &= ~FLITE_REG_CISTATUS_IRQ_CAM;
+ writel(cfg, dev->regs + FLITE_REG_CISTATUS);
+}
+
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev)
+{
+ u32 intsrc = readl(dev->regs + FLITE_REG_CISTATUS);
+ return intsrc & FLITE_REG_CISTATUS_IRQ_MASK;
+}
+
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev)
+{
+
+ u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS2);
+ cfg &= ~FLITE_REG_CISTATUS2_LASTCAPEND;
+ writel(cfg, dev->regs + FLITE_REG_CISTATUS2);
+}
+
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev)
+{
+ u32 cfg, intsrc;
+
+ /* Select interrupts to be enabled for each output mode */
+ if (atomic_read(&dev->out_path) == FIMC_IO_DMA) {
+ intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+ FLITE_REG_CIGCTRL_IRQ_LASTEN |
+ FLITE_REG_CIGCTRL_IRQ_STARTEN |
+ FLITE_REG_CIGCTRL_IRQ_ENDEN;
+ } else {
+ /* An output to the FIMC-IS */
+ intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+ FLITE_REG_CIGCTRL_IRQ_LASTEN;
+ }
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg |= FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK;
+ cfg &= ~intsrc;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_capture_start(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+ cfg |= FLITE_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+void flite_hw_capture_stop(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+ cfg &= ~FLITE_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+/*
+ * Test pattern (color bars) enable/disable. External sensor
+ * pixel clock must be active for the test pattern to work.
+ */
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ if (on)
+ cfg |= FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+ else
+ cfg &= ~FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+static const u32 src_pixfmt_map[8][3] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_SGRBG8_1X8, 0, FLITE_REG_CIGCTRL_RAW8 },
+ { V4L2_MBUS_FMT_SGRBG10_1X10, 0, FLITE_REG_CIGCTRL_RAW10 },
+ { V4L2_MBUS_FMT_SGRBG12_1X12, 0, FLITE_REG_CIGCTRL_RAW12 },
+ { V4L2_MBUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) },
+};
+
+/* Set camera input pixel format and resolution */
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
+{
+ enum v4l2_mbus_pixelcode pixelcode = f->fmt->mbus_code;
+ int i = ARRAY_SIZE(src_pixfmt_map);
+ u32 cfg;
+
+ while (--i) {
+ if (src_pixfmt_map[i][0] == pixelcode)
+ break;
+ }
+
+ if (i == 0 && src_pixfmt_map[i][0] != pixelcode) {
+ v4l2_err(&dev->ve.vdev,
+ "Unsupported pixel code, falling back to %#08x\n",
+ src_pixfmt_map[i][0]);
+ }
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg &= ~FLITE_REG_CIGCTRL_FMT_MASK;
+ cfg |= src_pixfmt_map[i][2];
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ cfg = readl(dev->regs + FLITE_REG_CISRCSIZE);
+ cfg &= ~(FLITE_REG_CISRCSIZE_ORDER422_MASK |
+ FLITE_REG_CISRCSIZE_SIZE_CAM_MASK);
+ cfg |= (f->f_width << 16) | f->f_height;
+ cfg |= src_pixfmt_map[i][1];
+ writel(cfg, dev->regs + FLITE_REG_CISRCSIZE);
+}
+
+/* Set the camera host input window offsets (cropping) */
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f)
+{
+ u32 hoff2, voff2;
+ u32 cfg;
+
+ cfg = readl(dev->regs + FLITE_REG_CIWDOFST);
+ cfg &= ~FLITE_REG_CIWDOFST_OFST_MASK;
+ cfg |= (f->rect.left << 16) | f->rect.top;
+ cfg |= FLITE_REG_CIWDOFST_WINOFSEN;
+ writel(cfg, dev->regs + FLITE_REG_CIWDOFST);
+
+ hoff2 = f->f_width - f->rect.width - f->rect.left;
+ voff2 = f->f_height - f->rect.height - f->rect.top;
+
+ cfg = (hoff2 << 16) | voff2;
+ writel(cfg, dev->regs + FLITE_REG_CIWDOFST2);
+}
+
+/* Select camera port (A, B) */
+static void flite_hw_set_camera_port(struct fimc_lite *dev, int id)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGENERAL);
+ if (id == 0)
+ cfg &= ~FLITE_REG_CIGENERAL_CAM_B;
+ else
+ cfg |= FLITE_REG_CIGENERAL_CAM_B;
+ writel(cfg, dev->regs + FLITE_REG_CIGENERAL);
+}
+
+/* Select serial or parallel bus, camera port (A,B) and set signals polarity */
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+ struct fimc_source_info *si)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ unsigned int flags = si->flags;
+
+ if (si->sensor_bus_type != FIMC_BUS_TYPE_MIPI_CSI2) {
+ cfg &= ~(FLITE_REG_CIGCTRL_SELCAM_MIPI |
+ FLITE_REG_CIGCTRL_INVPOLPCLK |
+ FLITE_REG_CIGCTRL_INVPOLVSYNC |
+ FLITE_REG_CIGCTRL_INVPOLHREF);
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLPCLK;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLVSYNC;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLHREF;
+ } else {
+ cfg |= FLITE_REG_CIGCTRL_SELCAM_MIPI;
+ }
+
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ flite_hw_set_camera_port(dev, si->mux_id);
+}
+
+static void flite_hw_set_pack12(struct fimc_lite *dev, int on)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
+
+ cfg &= ~FLITE_REG_CIODMAFMT_PACK12;
+
+ if (on)
+ cfg |= FLITE_REG_CIODMAFMT_PACK12;
+
+ writel(cfg, dev->regs + FLITE_REG_CIODMAFMT);
+}
+
+static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
+{
+ static const u32 pixcode[4][2] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
+ { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB },
+ { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY },
+ { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
+ };
+ u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
+ int i = ARRAY_SIZE(pixcode);
+
+ while (--i)
+ if (pixcode[i][0] == f->fmt->mbus_code)
+ break;
+ cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
+ writel(cfg | pixcode[i][1], dev->regs + FLITE_REG_CIODMAFMT);
+}
+
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f)
+{
+ u32 cfg;
+
+ /* Maximum output pixel size */
+ cfg = readl(dev->regs + FLITE_REG_CIOCAN);
+ cfg &= ~FLITE_REG_CIOCAN_MASK;
+ cfg = (f->f_height << 16) | f->f_width;
+ writel(cfg, dev->regs + FLITE_REG_CIOCAN);
+
+ /* DMA offsets */
+ cfg = readl(dev->regs + FLITE_REG_CIOOFF);
+ cfg &= ~FLITE_REG_CIOOFF_MASK;
+ cfg |= (f->rect.top << 16) | f->rect.left;
+ writel(cfg, dev->regs + FLITE_REG_CIOOFF);
+}
+
+void flite_hw_set_dma_buffer(struct fimc_lite *dev, struct flite_buffer *buf)
+{
+ unsigned int index;
+ u32 cfg;
+
+ if (dev->dd->max_dma_bufs == 1)
+ index = 0;
+ else
+ index = buf->index;
+
+ if (index == 0)
+ writel(buf->paddr, dev->regs + FLITE_REG_CIOSA);
+ else
+ writel(buf->paddr, dev->regs + FLITE_REG_CIOSAN(index - 1));
+
+ cfg = readl(dev->regs + FLITE_REG_CIFCNTSEQ);
+ cfg |= BIT(index);
+ writel(cfg, dev->regs + FLITE_REG_CIFCNTSEQ);
+}
+
+void flite_hw_mask_dma_buffer(struct fimc_lite *dev, u32 index)
+{
+ u32 cfg;
+
+ if (dev->dd->max_dma_bufs == 1)
+ index = 0;
+
+ cfg = readl(dev->regs + FLITE_REG_CIFCNTSEQ);
+ cfg &= ~BIT(index);
+ writel(cfg, dev->regs + FLITE_REG_CIFCNTSEQ);
+}
+
+/* Enable/disable output DMA, set output pixel size and offsets (composition) */
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+ bool enable)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+
+ if (!enable) {
+ cfg |= FLITE_REG_CIGCTRL_ODMA_DISABLE;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+ return;
+ }
+
+ cfg &= ~FLITE_REG_CIGCTRL_ODMA_DISABLE;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ flite_hw_set_out_order(dev, f);
+ flite_hw_set_dma_window(dev, f);
+ flite_hw_set_pack12(dev, 0);
+}
+
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x00, "CISRCSIZE" },
+ { 0x04, "CIGCTRL" },
+ { 0x08, "CIIMGCPT" },
+ { 0x0c, "CICPTSEQ" },
+ { 0x10, "CIWDOFST" },
+ { 0x14, "CIWDOFST2" },
+ { 0x18, "CIODMAFMT" },
+ { 0x20, "CIOCAN" },
+ { 0x24, "CIOOFF" },
+ { 0x30, "CIOSA" },
+ { 0x40, "CISTATUS" },
+ { 0x44, "CISTATUS2" },
+ { 0xf0, "CITHOLD" },
+ { 0xfc, "CIGENERAL" },
+ };
+ u32 i;
+
+ v4l2_info(&dev->subdev, "--- %s ---\n", label);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = readl(dev->regs + registers[i].offset);
+ v4l2_info(&dev->subdev, "%9s: 0x%08x\n",
+ registers[i].name, cfg);
+ }
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.h b/drivers/media/platform/exynos4-is/fimc-lite-reg.h
new file mode 100644
index 00000000000..10a7d7bbcc2
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_REG_H_
+#define FIMC_LITE_REG_H_
+
+#include "fimc-lite.h"
+
+/* Camera Source size */
+#define FLITE_REG_CISRCSIZE 0x00
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR (0 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB (1 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY (2 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY (3 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_MASK (0x3 << 14)
+#define FLITE_REG_CISRCSIZE_SIZE_CAM_MASK (0x3fff << 16 | 0x3fff)
+
+/* Global control */
+#define FLITE_REG_CIGCTRL 0x04
+#define FLITE_REG_CIGCTRL_YUV422_1P (0x1e << 24)
+#define FLITE_REG_CIGCTRL_RAW8 (0x2a << 24)
+#define FLITE_REG_CIGCTRL_RAW10 (0x2b << 24)
+#define FLITE_REG_CIGCTRL_RAW12 (0x2c << 24)
+#define FLITE_REG_CIGCTRL_RAW14 (0x2d << 24)
+/* User defined formats. x = 0...15 */
+#define FLITE_REG_CIGCTRL_USER(x) ((0x30 + x - 1) << 24)
+#define FLITE_REG_CIGCTRL_FMT_MASK (0x3f << 24)
+#define FLITE_REG_CIGCTRL_SHADOWMASK_DISABLE (1 << 21)
+#define FLITE_REG_CIGCTRL_ODMA_DISABLE (1 << 20)
+#define FLITE_REG_CIGCTRL_SWRST_REQ (1 << 19)
+#define FLITE_REG_CIGCTRL_SWRST_RDY (1 << 18)
+#define FLITE_REG_CIGCTRL_SWRST (1 << 17)
+#define FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR (1 << 15)
+#define FLITE_REG_CIGCTRL_INVPOLPCLK (1 << 14)
+#define FLITE_REG_CIGCTRL_INVPOLVSYNC (1 << 13)
+#define FLITE_REG_CIGCTRL_INVPOLHREF (1 << 12)
+/* Interrupts mask bits (1 disables an interrupt) */
+#define FLITE_REG_CIGCTRL_IRQ_LASTEN (1 << 8)
+#define FLITE_REG_CIGCTRL_IRQ_ENDEN (1 << 7)
+#define FLITE_REG_CIGCTRL_IRQ_STARTEN (1 << 6)
+#define FLITE_REG_CIGCTRL_IRQ_OVFEN (1 << 5)
+#define FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK (0xf << 5)
+#define FLITE_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
+
+/* Image Capture Enable */
+#define FLITE_REG_CIIMGCPT 0x08
+#define FLITE_REG_CIIMGCPT_IMGCPTEN (1 << 31)
+#define FLITE_REG_CIIMGCPT_CPT_FREN (1 << 25)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FRCNT (1 << 18)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FREN (0 << 18)
+
+/* Capture Sequence */
+#define FLITE_REG_CICPTSEQ 0x0c
+
+/* Camera Window Offset */
+#define FLITE_REG_CIWDOFST 0x10
+#define FLITE_REG_CIWDOFST_WINOFSEN (1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVIY (1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVFICB (1 << 15)
+#define FLITE_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FLITE_REG_CIWDOFST_OFST_MASK ((0x1fff << 16) | 0x1fff)
+
+/* Camera Window Offset2 */
+#define FLITE_REG_CIWDOFST2 0x14
+
+/* Camera Output DMA Format */
+#define FLITE_REG_CIODMAFMT 0x18
+#define FLITE_REG_CIODMAFMT_RAW_CON (1 << 15)
+#define FLITE_REG_CIODMAFMT_PACK12 (1 << 14)
+#define FLITE_REG_CIODMAFMT_YCBYCR (0 << 4)
+#define FLITE_REG_CIODMAFMT_YCRYCB (1 << 4)
+#define FLITE_REG_CIODMAFMT_CBYCRY (2 << 4)
+#define FLITE_REG_CIODMAFMT_CRYCBY (3 << 4)
+#define FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK (0x3 << 4)
+
+/* Camera Output Canvas */
+#define FLITE_REG_CIOCAN 0x20
+#define FLITE_REG_CIOCAN_MASK ((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Offset */
+#define FLITE_REG_CIOOFF 0x24
+#define FLITE_REG_CIOOFF_MASK ((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Start Address */
+#define FLITE_REG_CIOSA 0x30
+
+/* Camera Status */
+#define FLITE_REG_CISTATUS 0x40
+#define FLITE_REG_CISTATUS_MIPI_VVALID (1 << 22)
+#define FLITE_REG_CISTATUS_MIPI_HVALID (1 << 21)
+#define FLITE_REG_CISTATUS_MIPI_DVALID (1 << 20)
+#define FLITE_REG_CISTATUS_ITU_VSYNC (1 << 14)
+#define FLITE_REG_CISTATUS_ITU_HREFF (1 << 13)
+#define FLITE_REG_CISTATUS_OVFIY (1 << 10)
+#define FLITE_REG_CISTATUS_OVFICB (1 << 9)
+#define FLITE_REG_CISTATUS_OVFICR (1 << 8)
+#define FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW (1 << 7)
+#define FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND (1 << 6)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART (1 << 5)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMEND (1 << 4)
+#define FLITE_REG_CISTATUS_IRQ_CAM (1 << 0)
+#define FLITE_REG_CISTATUS_IRQ_MASK (0xf << 4)
+
+/* Camera Status2 */
+#define FLITE_REG_CISTATUS2 0x44
+#define FLITE_REG_CISTATUS2_LASTCAPEND (1 << 1)
+#define FLITE_REG_CISTATUS2_FRMEND (1 << 0)
+
+/* Qos Threshold */
+#define FLITE_REG_CITHOLD 0xf0
+#define FLITE_REG_CITHOLD_W_QOS_EN (1 << 30)
+
+/* Camera General Purpose */
+#define FLITE_REG_CIGENERAL 0xfc
+/* b0: 1 - camera B, 0 - camera A */
+#define FLITE_REG_CIGENERAL_CAM_B (1 << 0)
+
+#define FLITE_REG_CIFCNTSEQ 0x100
+#define FLITE_REG_CIOSAN(x) (0x200 + (4 * (x)))
+
+/* ----------------------------------------------------------------------------
+ * Function declarations
+ */
+void flite_hw_reset(struct fimc_lite *dev);
+void flite_hw_clear_pending_irq(struct fimc_lite *dev);
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev);
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev);
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev);
+void flite_hw_capture_start(struct fimc_lite *dev);
+void flite_hw_capture_stop(struct fimc_lite *dev);
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+ struct fimc_source_info *s_info);
+void flite_hw_set_camera_polarity(struct fimc_lite *dev,
+ struct fimc_source_info *cam);
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f);
+
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+ bool enable);
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on);
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label);
+void flite_hw_set_dma_buffer(struct fimc_lite *dev, struct flite_buffer *buf);
+void flite_hw_mask_dma_buffer(struct fimc_lite *dev, u32 index);
+
+static inline void flite_hw_set_dma_buf_mask(struct fimc_lite *dev, u32 mask)
+{
+ writel(mask, dev->regs + FLITE_REG_CIFCNTSEQ);
+}
+
+#endif /* FIMC_LITE_REG_H */
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
new file mode 100644
index 00000000000..a97d2352f1d
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -0,0 +1,1729 @@
+/*
+ * Samsung EXYNOS FIMC-LITE (camera host interface) driver
+*
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/exynos-fimc.h>
+
+#include "common.h"
+#include "fimc-core.h"
+#include "fimc-lite.h"
+#include "fimc-lite-reg.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+static const struct fimc_fmt fimc_lite_formats[] = {
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAGS_YUV,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .depth = { 16 },
+ .color = FIMC_FMT_CBYCRY422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .flags = FMT_FLAGS_YUV,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .depth = { 16 },
+ .color = FIMC_FMT_CRYCBY422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .flags = FMT_FLAGS_YUV,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCRYCB422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .flags = FMT_FLAGS_YUV,
+ }, {
+ .name = "RAW8 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = { 8 },
+ .color = FIMC_FMT_RAW8,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ .flags = FMT_FLAGS_RAW_BAYER,
+ }, {
+ .name = "RAW10 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = { 16 },
+ .color = FIMC_FMT_RAW10,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .flags = FMT_FLAGS_RAW_BAYER,
+ }, {
+ .name = "RAW12 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = { 16 },
+ .color = FIMC_FMT_RAW12,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .flags = FMT_FLAGS_RAW_BAYER,
+ },
+};
+
+/**
+ * fimc_lite_find_format - lookup fimc color format by fourcc or media bus code
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @mask: the color format flags to match
+ * @index: index to the fimc_lite_formats array, ignored if negative
+ */
+static const struct fimc_fmt *fimc_lite_find_format(const u32 *pixelformat,
+ const u32 *mbus_code, unsigned int mask, int index)
+{
+ const struct fimc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(fimc_lite_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_lite_formats); ++i) {
+ fmt = &fimc_lite_formats[i];
+ if (mask && !(fmt->flags & mask))
+ continue;
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static int fimc_lite_hw_init(struct fimc_lite *fimc, bool isp_output)
+{
+ struct fimc_source_info *si;
+ unsigned long flags;
+
+ if (fimc->sensor == NULL)
+ return -ENXIO;
+
+ if (fimc->inp_frame.fmt == NULL || fimc->out_frame.fmt == NULL)
+ return -EINVAL;
+
+ /* Get sensor configuration data from the sensor subdev */
+ si = v4l2_get_subdev_hostdata(fimc->sensor);
+ if (!si)
+ return -EINVAL;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ flite_hw_set_camera_bus(fimc, si);
+ flite_hw_set_source_format(fimc, &fimc->inp_frame);
+ flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+ flite_hw_set_dma_buf_mask(fimc, 0);
+ flite_hw_set_output_dma(fimc, &fimc->out_frame, !isp_output);
+ flite_hw_set_interrupt_mask(fimc);
+ flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+
+ if (debug > 0)
+ flite_hw_dump_regs(fimc, __func__);
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
+static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend)
+{
+ struct flite_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ streaming = fimc->state & (1 << ST_SENSOR_STREAM);
+
+ fimc->state &= ~(1 << ST_FLITE_RUN | 1 << ST_FLITE_OFF |
+ 1 << ST_FLITE_STREAM | 1 << ST_SENSOR_STREAM);
+ if (suspend)
+ fimc->state |= (1 << ST_FLITE_SUSPENDED);
+ else
+ fimc->state &= ~(1 << ST_FLITE_PENDING |
+ 1 << ST_FLITE_SUSPENDED);
+
+ /* Release unused buffers */
+ while (!suspend && !list_empty(&fimc->pending_buf_q)) {
+ buf = fimc_lite_pending_queue_pop(fimc);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ /* If suspending put unused buffers onto pending queue */
+ while (!list_empty(&fimc->active_buf_q)) {
+ buf = fimc_lite_active_queue_pop(fimc);
+ if (suspend)
+ fimc_lite_pending_queue_add(fimc, buf);
+ else
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ flite_hw_reset(fimc);
+
+ if (!streaming)
+ return 0;
+
+ return fimc_pipeline_call(&fimc->ve, set_stream, 0);
+}
+
+static int fimc_lite_stop_capture(struct fimc_lite *fimc, bool suspend)
+{
+ unsigned long flags;
+
+ if (!fimc_lite_active(fimc))
+ return 0;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_bit(ST_FLITE_OFF, &fimc->state);
+ flite_hw_capture_stop(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_FLITE_OFF, &fimc->state),
+ (2*HZ/10)); /* 200 ms */
+
+ return fimc_lite_reinit(fimc, suspend);
+}
+
+/* Must be called with fimc.slock spinlock held. */
+static void fimc_lite_config_update(struct fimc_lite *fimc)
+{
+ flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+ flite_hw_set_dma_window(fimc, &fimc->out_frame);
+ flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+ clear_bit(ST_FLITE_CONFIG, &fimc->state);
+}
+
+static irqreturn_t flite_irq_handler(int irq, void *priv)
+{
+ struct fimc_lite *fimc = priv;
+ struct flite_buffer *vbuf;
+ unsigned long flags;
+ struct timeval *tv;
+ struct timespec ts;
+ u32 intsrc;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ intsrc = flite_hw_get_interrupt_source(fimc);
+ flite_hw_clear_pending_irq(fimc);
+
+ if (test_and_clear_bit(ST_FLITE_OFF, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ goto done;
+ }
+
+ if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW) {
+ clear_bit(ST_FLITE_RUN, &fimc->state);
+ fimc->events.data_overflow++;
+ }
+
+ if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND) {
+ flite_hw_clear_last_capture_end(fimc);
+ clear_bit(ST_FLITE_STREAM, &fimc->state);
+ wake_up(&fimc->irq_queue);
+ }
+
+ if (atomic_read(&fimc->out_path) != FIMC_IO_DMA)
+ goto done;
+
+ if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART) &&
+ test_bit(ST_FLITE_RUN, &fimc->state) &&
+ !list_empty(&fimc->pending_buf_q)) {
+ vbuf = fimc_lite_pending_queue_pop(fimc);
+ flite_hw_set_dma_buffer(fimc, vbuf);
+ fimc_lite_active_queue_add(fimc, vbuf);
+ }
+
+ if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMEND) &&
+ test_bit(ST_FLITE_RUN, &fimc->state) &&
+ !list_empty(&fimc->active_buf_q)) {
+ vbuf = fimc_lite_active_queue_pop(fimc);
+ ktime_get_ts(&ts);
+ tv = &vbuf->vb.v4l2_buf.timestamp;
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ vbuf->vb.v4l2_buf.sequence = fimc->frame_count++;
+ flite_hw_mask_dma_buffer(fimc, vbuf->index);
+ vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+ }
+
+ if (test_bit(ST_FLITE_CONFIG, &fimc->state))
+ fimc_lite_config_update(fimc);
+
+ if (list_empty(&fimc->pending_buf_q)) {
+ flite_hw_capture_stop(fimc);
+ clear_bit(ST_FLITE_STREAM, &fimc->state);
+ }
+done:
+ set_bit(ST_FLITE_RUN, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return IRQ_HANDLED;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_lite *fimc = q->drv_priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ fimc->buf_index = 0;
+ fimc->frame_count = 0;
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ ret = fimc_lite_hw_init(fimc, false);
+ if (ret) {
+ fimc_lite_reinit(fimc, false);
+ return ret;
+ }
+
+ set_bit(ST_FLITE_PENDING, &fimc->state);
+
+ if (!list_empty(&fimc->active_buf_q) &&
+ !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+ flite_hw_capture_start(fimc);
+
+ if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+ fimc_pipeline_call(&fimc->ve, set_stream, 1);
+ }
+ if (debug > 0)
+ flite_hw_dump_regs(fimc, __func__);
+
+ return 0;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_lite *fimc = q->drv_priv;
+
+ if (!fimc_lite_active(fimc))
+ return;
+
+ fimc_lite_stop_capture(fimc, false);
+}
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ const struct v4l2_pix_format_mplane *pixm = NULL;
+ struct fimc_lite *fimc = vq->drv_priv;
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = frame->fmt;
+ unsigned long wh;
+ int i;
+
+ if (pfmt) {
+ pixm = &pfmt->fmt.pix_mp;
+ fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, 0, -1);
+ wh = pixm->width * pixm->height;
+ } else {
+ wh = frame->f_width * frame->f_height;
+ }
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ unsigned int size = (wh * fmt->depth[i]) / 8;
+ if (pixm)
+ sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
+ else
+ sizes[i] = size;
+ allocators[i] = fimc->alloc_ctx;
+ }
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct fimc_lite *fimc = vq->drv_priv;
+ int i;
+
+ if (fimc->out_frame.fmt == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < fimc->out_frame.fmt->memplanes; i++) {
+ unsigned long size = fimc->payload[i];
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(&fimc->ve.vdev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct flite_buffer *buf
+ = container_of(vb, struct flite_buffer, vb);
+ struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ buf->paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ buf->index = fimc->buf_index++;
+ if (fimc->buf_index >= fimc->reqbufs_count)
+ fimc->buf_index = 0;
+
+ if (!test_bit(ST_FLITE_SUSPENDED, &fimc->state) &&
+ !test_bit(ST_FLITE_STREAM, &fimc->state) &&
+ list_empty(&fimc->active_buf_q)) {
+ flite_hw_set_dma_buffer(fimc, buf);
+ fimc_lite_active_queue_add(fimc, buf);
+ } else {
+ fimc_lite_pending_queue_add(fimc, buf);
+ }
+
+ if (vb2_is_streaming(&fimc->vb_queue) &&
+ !list_empty(&fimc->pending_buf_q) &&
+ !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+ flite_hw_capture_start(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+ fimc_pipeline_call(&fimc->ve, set_stream, 1);
+ return;
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static const struct vb2_ops fimc_lite_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ memset(&fimc->events, 0, sizeof(fimc->events));
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static int fimc_lite_open(struct file *file)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct media_entity *me = &fimc->ve.vdev.entity;
+ int ret;
+
+ mutex_lock(&fimc->lock);
+ if (atomic_read(&fimc->out_path) != FIMC_IO_DMA) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ set_bit(ST_FLITE_IN_USE, &fimc->state);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ goto unlock;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto err_pm;
+
+ if (!v4l2_fh_is_singular_file(file) ||
+ atomic_read(&fimc->out_path) != FIMC_IO_DMA)
+ goto unlock;
+
+ mutex_lock(&me->parent->graph_mutex);
+
+ ret = fimc_pipeline_call(&fimc->ve, open, me, true);
+
+ /* Mark video pipeline ending at this video node as in use. */
+ if (ret == 0)
+ me->use_count++;
+
+ mutex_unlock(&me->parent->graph_mutex);
+
+ if (!ret) {
+ fimc_lite_clear_event_counters(fimc);
+ goto unlock;
+ }
+
+ v4l2_fh_release(file);
+err_pm:
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_lite_release(struct file *file)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct media_entity *entity = &fimc->ve.vdev.entity;
+
+ mutex_lock(&fimc->lock);
+
+ if (v4l2_fh_is_singular_file(file) &&
+ atomic_read(&fimc->out_path) == FIMC_IO_DMA) {
+ if (fimc->streaming) {
+ media_entity_pipeline_stop(entity);
+ fimc->streaming = false;
+ }
+ fimc_lite_stop_capture(fimc, false);
+ fimc_pipeline_call(&fimc->ve, close);
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+
+ mutex_lock(&entity->parent->graph_mutex);
+ entity->use_count--;
+ mutex_unlock(&entity->parent->graph_mutex);
+ }
+
+ _vb2_fop_release(file, NULL);
+ pm_runtime_put(&fimc->pdev->dev);
+ clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static const struct v4l2_file_operations fimc_lite_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_lite_open,
+ .release = fimc_lite_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * Format and crop negotiation helpers
+ */
+
+static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct flite_drvdata *dd = fimc->dd;
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ const struct fimc_fmt *fmt = NULL;
+
+ if (format->pad == FLITE_SD_PAD_SINK) {
+ v4l_bound_align_image(&mf->width, 8, dd->max_width,
+ ffs(dd->out_width_align) - 1,
+ &mf->height, 0, dd->max_height, 0, 0);
+
+ fmt = fimc_lite_find_format(NULL, &mf->code, 0, 0);
+ if (WARN_ON(!fmt))
+ return NULL;
+
+ mf->colorspace = fmt->colorspace;
+ mf->code = fmt->mbus_code;
+ } else {
+ struct flite_frame *sink = &fimc->inp_frame;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *rect;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sink_fmt = v4l2_subdev_get_try_format(fh,
+ FLITE_SD_PAD_SINK);
+
+ mf->code = sink_fmt->code;
+ mf->colorspace = sink_fmt->colorspace;
+
+ rect = v4l2_subdev_get_try_crop(fh,
+ FLITE_SD_PAD_SINK);
+ } else {
+ mf->code = sink->fmt->mbus_code;
+ mf->colorspace = sink->fmt->colorspace;
+ rect = &sink->rect;
+ }
+
+ /* Allow changing format only on sink pad */
+ mf->width = rect->width;
+ mf->height = rect->height;
+ }
+
+ mf->field = V4L2_FIELD_NONE;
+
+ v4l2_dbg(1, debug, &fimc->subdev, "code: %#x (%d), %dx%d\n",
+ mf->code, mf->colorspace, mf->width, mf->height);
+
+ return fmt;
+}
+
+static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+ struct flite_frame *frame = &fimc->inp_frame;
+
+ v4l_bound_align_image(&r->width, 0, frame->f_width, 0,
+ &r->height, 0, frame->f_height, 0, 0);
+
+ /* Adjust left/top if cropping rectangle got out of bounds */
+ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+ r->left = round_down(r->left, fimc->dd->win_hor_offs_align);
+ r->top = clamp_t(u32, r->top, 0, frame->f_height - r->height);
+
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d\n",
+ r->left, r->top, r->width, r->height,
+ frame->f_width, frame->f_height);
+}
+
+static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+ struct flite_frame *frame = &fimc->out_frame;
+ struct v4l2_rect *crop_rect = &fimc->inp_frame.rect;
+
+ /* Scaling is not supported so we enforce compose rectangle size
+ same as size of the sink crop rectangle. */
+ r->width = crop_rect->width;
+ r->height = crop_rect->height;
+
+ /* Adjust left/top if the composing rectangle got out of bounds */
+ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+ r->left = round_down(r->left, fimc->dd->out_hor_offs_align);
+ r->top = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height);
+
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d\n",
+ r->left, r->top, r->width, r->height,
+ frame->f_width, frame->f_height);
+}
+
+/*
+ * Video node ioctl operations
+ */
+static int fimc_lite_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ strlcpy(cap->driver, FIMC_LITE_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, FIMC_LITE_DRV_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&fimc->pdev->dev));
+
+ cap->device_caps = V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int fimc_lite_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct fimc_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(fimc_lite_formats))
+ return -EINVAL;
+
+ fmt = &fimc_lite_formats[f->index];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int fimc_lite_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt = &pixm->plane_fmt[0];
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = frame->fmt;
+
+ plane_fmt->bytesperline = (frame->f_width * fmt->depth[0]) / 8;
+ plane_fmt->sizeimage = plane_fmt->bytesperline * frame->f_height;
+
+ pixm->num_planes = fmt->memplanes;
+ pixm->pixelformat = fmt->fourcc;
+ pixm->width = frame->f_width;
+ pixm->height = frame->f_height;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->colorspace = fmt->colorspace;
+ return 0;
+}
+
+static int fimc_lite_try_fmt(struct fimc_lite *fimc,
+ struct v4l2_pix_format_mplane *pixm,
+ const struct fimc_fmt **ffmt)
+{
+ u32 bpl = pixm->plane_fmt[0].bytesperline;
+ struct flite_drvdata *dd = fimc->dd;
+ const struct fimc_fmt *inp_fmt = fimc->inp_frame.fmt;
+ const struct fimc_fmt *fmt;
+
+ if (WARN_ON(inp_fmt == NULL))
+ return -EINVAL;
+ /*
+ * We allow some flexibility only for YUV formats. In case of raw
+ * raw Bayer the FIMC-LITE's output format must match its camera
+ * interface input format.
+ */
+ if (inp_fmt->flags & FMT_FLAGS_YUV)
+ fmt = fimc_lite_find_format(&pixm->pixelformat, NULL,
+ inp_fmt->flags, 0);
+ else
+ fmt = inp_fmt;
+
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+ if (ffmt)
+ *ffmt = fmt;
+ v4l_bound_align_image(&pixm->width, 8, dd->max_width,
+ ffs(dd->out_width_align) - 1,
+ &pixm->height, 0, dd->max_height, 0, 0);
+
+ if ((bpl == 0 || ((bpl * 8) / fmt->depth[0]) < pixm->width))
+ pixm->plane_fmt[0].bytesperline = (pixm->width *
+ fmt->depth[0]) / 8;
+
+ if (pixm->plane_fmt[0].sizeimage == 0)
+ pixm->plane_fmt[0].sizeimage = (pixm->width * pixm->height *
+ fmt->depth[0]) / 8;
+ pixm->num_planes = fmt->memplanes;
+ pixm->pixelformat = fmt->fourcc;
+ pixm->colorspace = fmt->colorspace;
+ pixm->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static int fimc_lite_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ return fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, NULL);
+}
+
+static int fimc_lite_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = NULL;
+ int ret;
+
+ if (vb2_is_busy(&fimc->vb_queue))
+ return -EBUSY;
+
+ ret = fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, &fmt);
+ if (ret < 0)
+ return ret;
+
+ frame->fmt = fmt;
+ fimc->payload[0] = max((pixm->width * pixm->height * fmt->depth[0]) / 8,
+ pixm->plane_fmt[0].sizeimage);
+ frame->f_width = pixm->width;
+ frame->f_height = pixm->height;
+
+ return 0;
+}
+
+static int fimc_pipeline_validate(struct fimc_lite *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->subdev;
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ while (1) {
+ /* Retrieve format at the sink pad */
+ pad = &sd->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+ /* Don't call FIMC subdev operation to avoid nested locking */
+ if (sd == &fimc->subdev) {
+ struct flite_frame *ff = &fimc->out_frame;
+ sink_fmt.format.width = ff->f_width;
+ sink_fmt.format.height = ff->f_height;
+ sink_fmt.format.code = fimc->inp_frame.fmt->mbus_code;
+ } else {
+ sink_fmt.pad = pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
+ &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+ }
+ /* Retrieve format at the source pad */
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+ }
+ return 0;
+}
+
+static int fimc_lite_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct media_entity *entity = &fimc->ve.vdev.entity;
+ int ret;
+
+ if (fimc_lite_active(fimc))
+ return -EBUSY;
+
+ ret = media_entity_pipeline_start(entity, &fimc->ve.pipe->mp);
+ if (ret < 0)
+ return ret;
+
+ ret = fimc_pipeline_validate(fimc);
+ if (ret < 0)
+ goto err_p_stop;
+
+ fimc->sensor = fimc_find_remote_sensor(&fimc->subdev.entity);
+
+ ret = vb2_ioctl_streamon(file, priv, type);
+ if (!ret) {
+ fimc->streaming = true;
+ return ret;
+ }
+
+err_p_stop:
+ media_entity_pipeline_stop(entity);
+ return 0;
+}
+
+static int fimc_lite_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ ret = vb2_ioctl_streamoff(file, priv, type);
+ if (ret < 0)
+ return ret;
+
+ media_entity_pipeline_stop(&fimc->ve.vdev.entity);
+ fimc->streaming = false;
+ return 0;
+}
+
+static int fimc_lite_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ reqbufs->count = max_t(u32, FLITE_REQ_BUFS_MIN, reqbufs->count);
+ ret = vb2_ioctl_reqbufs(file, priv, reqbufs);
+ if (!ret)
+ fimc->reqbufs_count = reqbufs->count;
+
+ return ret;
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int fimc_lite_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *f = &fimc->out_frame;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = f->f_width;
+ sel->r.height = f->f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = f->rect;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int fimc_lite_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *f = &fimc->out_frame;
+ struct v4l2_rect rect = sel->r;
+ unsigned long flags;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ fimc_lite_try_compose(fimc, &rect);
+
+ if ((sel->flags & V4L2_SEL_FLAG_LE) &&
+ !enclosed_rectangle(&rect, &sel->r))
+ return -ERANGE;
+
+ if ((sel->flags & V4L2_SEL_FLAG_GE) &&
+ !enclosed_rectangle(&sel->r, &rect))
+ return -ERANGE;
+
+ sel->r = rect;
+ spin_lock_irqsave(&fimc->slock, flags);
+ f->rect = rect;
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_lite_ioctl_ops = {
+ .vidioc_querycap = fimc_lite_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_lite_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_lite_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_lite_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_lite_g_fmt_mplane,
+ .vidioc_g_selection = fimc_lite_g_selection,
+ .vidioc_s_selection = fimc_lite_s_selection,
+ .vidioc_reqbufs = fimc_lite_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = fimc_lite_streamon,
+ .vidioc_streamoff = fimc_lite_streamoff,
+};
+
+/* Capture subdev media entity operations */
+static int fimc_lite_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ unsigned int remote_ent_type = media_entity_type(remote->entity);
+ int ret = 0;
+
+ if (WARN_ON(fimc == NULL))
+ return 0;
+
+ v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x\n",
+ __func__, remote->entity->name, local->entity->name,
+ flags, fimc->source_subdev_grp_id);
+
+ switch (local->index) {
+ case FLITE_SD_PAD_SINK:
+ if (remote_ent_type != MEDIA_ENT_T_V4L2_SUBDEV) {
+ ret = -EINVAL;
+ break;
+ }
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (fimc->source_subdev_grp_id == 0)
+ fimc->source_subdev_grp_id = sd->grp_id;
+ else
+ ret = -EBUSY;
+ } else {
+ fimc->source_subdev_grp_id = 0;
+ fimc->sensor = NULL;
+ }
+ break;
+
+ case FLITE_SD_PAD_SOURCE_DMA:
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ atomic_set(&fimc->out_path, FIMC_IO_NONE);
+ else if (remote_ent_type == MEDIA_ENT_T_DEVNODE)
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
+ else
+ ret = -EINVAL;
+ break;
+
+ case FLITE_SD_PAD_SOURCE_ISP:
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ atomic_set(&fimc->out_path, FIMC_IO_NONE);
+ else if (remote_ent_type == MEDIA_ENT_T_V4L2_SUBDEV)
+ atomic_set(&fimc->out_path, FIMC_IO_ISP);
+ else
+ ret = -EINVAL;
+ break;
+
+ default:
+ v4l2_err(sd, "Invalid pad index\n");
+ ret = -EINVAL;
+ }
+ mb();
+
+ return ret;
+}
+
+static const struct media_entity_operations fimc_lite_subdev_media_ops = {
+ .link_setup = fimc_lite_link_setup,
+};
+
+static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_lite_find_format(NULL, NULL, 0, code->index);
+ if (!fmt)
+ return -EINVAL;
+ code->code = fmt->mbus_code;
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *__fimc_lite_subdev_get_try_fmt(
+ struct v4l2_subdev_fh *fh, unsigned int pad)
+{
+ if (pad != FLITE_SD_PAD_SINK)
+ pad = FLITE_SD_PAD_SOURCE_DMA;
+
+ return v4l2_subdev_get_try_format(fh, pad);
+}
+
+static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct flite_frame *f = &fimc->inp_frame;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = __fimc_lite_subdev_get_try_fmt(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mutex_lock(&fimc->lock);
+ mf->colorspace = f->fmt->colorspace;
+ mf->code = f->fmt->mbus_code;
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ /* full camera input frame size */
+ mf->width = f->f_width;
+ mf->height = f->f_height;
+ } else {
+ /* crop size */
+ mf->width = f->rect.width;
+ mf->height = f->rect.height;
+ }
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct flite_frame *sink = &fimc->inp_frame;
+ struct flite_frame *source = &fimc->out_frame;
+ const struct fimc_fmt *ffmt;
+
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d\n",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ mutex_lock(&fimc->lock);
+
+ if ((atomic_read(&fimc->out_path) == FIMC_IO_ISP &&
+ sd->entity.stream_count > 0) ||
+ (atomic_read(&fimc->out_path) == FIMC_IO_DMA &&
+ vb2_is_busy(&fimc->vb_queue))) {
+ mutex_unlock(&fimc->lock);
+ return -EBUSY;
+ }
+
+ ffmt = fimc_lite_subdev_try_fmt(fimc, fh, fmt);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ mf = __fimc_lite_subdev_get_try_fmt(fh, fmt->pad);
+ *mf = fmt->format;
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ unsigned int pad = FLITE_SD_PAD_SOURCE_DMA;
+ src_fmt = __fimc_lite_subdev_get_try_fmt(fh, pad);
+ *src_fmt = *mf;
+ }
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+ }
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ sink->f_width = mf->width;
+ sink->f_height = mf->height;
+ sink->fmt = ffmt;
+ /* Set sink crop rectangle */
+ sink->rect.width = mf->width;
+ sink->rect.height = mf->height;
+ sink->rect.left = 0;
+ sink->rect.top = 0;
+ /* Reset source format and crop rectangle */
+ source->rect = sink->rect;
+ source->f_width = mf->width;
+ source->f_height = mf->height;
+ }
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct flite_frame *f = &fimc->inp_frame;
+
+ if ((sel->target != V4L2_SEL_TGT_CROP &&
+ sel->target != V4L2_SEL_TGT_CROP_BOUNDS) ||
+ sel->pad != FLITE_SD_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&fimc->lock);
+ if (sel->target == V4L2_SEL_TGT_CROP) {
+ sel->r = f->rect;
+ } else {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = f->f_width;
+ sel->r.height = f->f_height;
+ }
+ mutex_unlock(&fimc->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
+ __func__, f->rect.left, f->rect.top, f->rect.width,
+ f->rect.height, f->f_width, f->f_height);
+
+ return 0;
+}
+
+static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct flite_frame *f = &fimc->inp_frame;
+ int ret = 0;
+
+ if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != FLITE_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
+ fimc_lite_try_crop(fimc, &sel->r);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r;
+ } else {
+ unsigned long flags;
+ spin_lock_irqsave(&fimc->slock, flags);
+ f->rect = sel->r;
+ /* Same crop rectangle on the source pad */
+ fimc->out_frame.rect = sel->r;
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+ mutex_unlock(&fimc->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
+ __func__, f->rect.left, f->rect.top, f->rect.width,
+ f->rect.height, f->f_width, f->f_height);
+
+ return ret;
+}
+
+static int fimc_lite_subdev_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Find sensor subdev linked to FIMC-LITE directly or through
+ * MIPI-CSIS. This is required for configuration where FIMC-LITE
+ * is used as a subdev only and feeds data internally to FIMC-IS.
+ * The pipeline links are protected through entity.stream_count
+ * so there is no need to take the media graph mutex here.
+ */
+ fimc->sensor = fimc_find_remote_sensor(&sd->entity);
+
+ if (atomic_read(&fimc->out_path) != FIMC_IO_ISP)
+ return -ENOIOCTLCMD;
+
+ mutex_lock(&fimc->lock);
+ if (on) {
+ flite_hw_reset(fimc);
+ ret = fimc_lite_hw_init(fimc, true);
+ if (!ret) {
+ spin_lock_irqsave(&fimc->slock, flags);
+ flite_hw_capture_start(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+ } else {
+ set_bit(ST_FLITE_OFF, &fimc->state);
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ flite_hw_capture_stop(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ ret = wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_FLITE_OFF, &fimc->state),
+ msecs_to_jiffies(200));
+ if (ret == 0)
+ v4l2_err(sd, "s_stream(0) timeout\n");
+ clear_bit(ST_FLITE_RUN, &fimc->state);
+ }
+
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_lite_log_status(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ flite_hw_dump_regs(fimc, __func__);
+ return 0;
+}
+
+static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct vb2_queue *q = &fimc->vb_queue;
+ struct video_device *vfd = &fimc->ve.vdev;
+ int ret;
+
+ memset(vfd, 0, sizeof(*vfd));
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
+
+ snprintf(vfd->name, sizeof(vfd->name), "fimc-lite.%d.capture",
+ fimc->index);
+
+ vfd->fops = &fimc_lite_fops;
+ vfd->ioctl_ops = &fimc_lite_ioctl_ops;
+ vfd->v4l2_dev = sd->v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->queue = q;
+ fimc->reqbufs_count = 0;
+
+ INIT_LIST_HEAD(&fimc->pending_buf_q);
+ INIT_LIST_HEAD(&fimc->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &fimc_lite_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct flite_buffer);
+ q->drv_priv = fimc;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &fimc->lock;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ return ret;
+
+ fimc->vd_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &fimc->vd_pad, 0);
+ if (ret < 0)
+ return ret;
+
+ video_set_drvdata(vfd, fimc);
+ fimc->ve.pipe = v4l2_get_subdev_hostdata(sd);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ media_entity_cleanup(&vfd->entity);
+ fimc->ve.pipe = NULL;
+ return ret;
+ }
+
+ v4l2_info(sd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+}
+
+static void fimc_lite_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc == NULL)
+ return;
+
+ mutex_lock(&fimc->lock);
+
+ if (video_is_registered(&fimc->ve.vdev)) {
+ video_unregister_device(&fimc->ve.vdev);
+ media_entity_cleanup(&fimc->ve.vdev.entity);
+ fimc->ve.pipe = NULL;
+ }
+
+ mutex_unlock(&fimc->lock);
+}
+
+static const struct v4l2_subdev_internal_ops fimc_lite_subdev_internal_ops = {
+ .registered = fimc_lite_subdev_registered,
+ .unregistered = fimc_lite_subdev_unregistered,
+};
+
+static const struct v4l2_subdev_pad_ops fimc_lite_subdev_pad_ops = {
+ .enum_mbus_code = fimc_lite_subdev_enum_mbus_code,
+ .get_selection = fimc_lite_subdev_get_selection,
+ .set_selection = fimc_lite_subdev_set_selection,
+ .get_fmt = fimc_lite_subdev_get_fmt,
+ .set_fmt = fimc_lite_subdev_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops fimc_lite_subdev_video_ops = {
+ .s_stream = fimc_lite_subdev_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops fimc_lite_core_ops = {
+ .log_status = fimc_lite_log_status,
+};
+
+static struct v4l2_subdev_ops fimc_lite_subdev_ops = {
+ .core = &fimc_lite_core_ops,
+ .video = &fimc_lite_subdev_video_ops,
+ .pad = &fimc_lite_subdev_pad_ops,
+};
+
+static int fimc_lite_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fimc_lite *fimc = container_of(ctrl->handler, struct fimc_lite,
+ ctrl_handler);
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fimc_lite_ctrl_ops = {
+ .s_ctrl = fimc_lite_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config fimc_lite_ctrl = {
+ .ops = &fimc_lite_ctrl_ops,
+ .id = V4L2_CTRL_CLASS_USER | 0x1001,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Test Pattern 640x480",
+ .step = 1,
+};
+
+static void fimc_lite_set_default_config(struct fimc_lite *fimc)
+{
+ struct flite_frame *sink = &fimc->inp_frame;
+ struct flite_frame *source = &fimc->out_frame;
+
+ sink->fmt = &fimc_lite_formats[0];
+ sink->f_width = FLITE_DEFAULT_WIDTH;
+ sink->f_height = FLITE_DEFAULT_HEIGHT;
+
+ sink->rect.width = FLITE_DEFAULT_WIDTH;
+ sink->rect.height = FLITE_DEFAULT_HEIGHT;
+ sink->rect.left = 0;
+ sink->rect.top = 0;
+
+ *source = *sink;
+}
+
+static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
+{
+ struct v4l2_ctrl_handler *handler = &fimc->ctrl_handler;
+ struct v4l2_subdev *sd = &fimc->subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &fimc_lite_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC-LITE.%d", fimc->index);
+
+ fimc->subdev_pads[FLITE_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ fimc->subdev_pads[FLITE_SD_PAD_SOURCE_DMA].flags = MEDIA_PAD_FL_SOURCE;
+ fimc->subdev_pads[FLITE_SD_PAD_SOURCE_ISP].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FLITE_SD_PADS_NUM,
+ fimc->subdev_pads, 0);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 1);
+ fimc->test_pattern = v4l2_ctrl_new_custom(handler, &fimc_lite_ctrl,
+ NULL);
+ if (handler->error) {
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ sd->ctrl_handler = handler;
+ sd->internal_ops = &fimc_lite_subdev_internal_ops;
+ sd->entity.ops = &fimc_lite_subdev_media_ops;
+ sd->owner = THIS_MODULE;
+ v4l2_set_subdevdata(sd, fimc);
+
+ return 0;
+}
+
+static void fimc_lite_unregister_capture_subdev(struct fimc_lite *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&fimc->ctrl_handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
+
+static void fimc_lite_clk_put(struct fimc_lite *fimc)
+{
+ if (IS_ERR(fimc->clock))
+ return;
+
+ clk_unprepare(fimc->clock);
+ clk_put(fimc->clock);
+ fimc->clock = ERR_PTR(-EINVAL);
+}
+
+static int fimc_lite_clk_get(struct fimc_lite *fimc)
+{
+ int ret;
+
+ fimc->clock = clk_get(&fimc->pdev->dev, FLITE_CLK_NAME);
+ if (IS_ERR(fimc->clock))
+ return PTR_ERR(fimc->clock);
+
+ ret = clk_prepare(fimc->clock);
+ if (ret < 0) {
+ clk_put(fimc->clock);
+ fimc->clock = ERR_PTR(-EINVAL);
+ }
+ return ret;
+}
+
+static const struct of_device_id flite_of_match[];
+
+static int fimc_lite_probe(struct platform_device *pdev)
+{
+ struct flite_drvdata *drv_data = NULL;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ struct fimc_lite *fimc;
+ struct resource *res;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ fimc = devm_kzalloc(dev, sizeof(*fimc), GFP_KERNEL);
+ if (!fimc)
+ return -ENOMEM;
+
+ of_id = of_match_node(flite_of_match, dev->of_node);
+ if (of_id)
+ drv_data = (struct flite_drvdata *)of_id->data;
+ fimc->index = of_alias_get_id(dev->of_node, "fimc-lite");
+
+ if (!drv_data || fimc->index >= drv_data->num_instances ||
+ fimc->index < 0) {
+ dev_err(dev, "Wrong %s node alias\n",
+ dev->of_node->full_name);
+ return -EINVAL;
+ }
+
+ fimc->dd = drv_data;
+ fimc->pdev = pdev;
+
+ init_waitqueue_head(&fimc->irq_queue);
+ spin_lock_init(&fimc->slock);
+ mutex_init(&fimc->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fimc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fimc->regs))
+ return PTR_ERR(fimc->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "Failed to get IRQ resource\n");
+ return -ENXIO;
+ }
+
+ ret = fimc_lite_clk_get(fimc);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, res->start, flite_irq_handler,
+ 0, dev_name(dev), fimc);
+ if (ret) {
+ dev_err(dev, "Failed to install irq (%d)\n", ret);
+ goto err_clk_put;
+ }
+
+ /* The video node will be created within the subdev's registered() op */
+ ret = fimc_lite_create_capture_subdev(fimc);
+ if (ret)
+ goto err_clk_put;
+
+ platform_set_drvdata(pdev, fimc);
+ pm_runtime_enable(dev);
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = clk_enable(fimc->clock);
+ if (ret < 0)
+ goto err_sd;
+ }
+
+ fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
+ if (IS_ERR(fimc->alloc_ctx)) {
+ ret = PTR_ERR(fimc->alloc_ctx);
+ goto err_clk_dis;
+ }
+
+ fimc_lite_set_default_config(fimc);
+
+ dev_dbg(dev, "FIMC-LITE.%d registered successfully\n",
+ fimc->index);
+ return 0;
+
+err_clk_dis:
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock);
+err_sd:
+ fimc_lite_unregister_capture_subdev(fimc);
+err_clk_put:
+ fimc_lite_clk_put(fimc);
+ return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_lite_runtime_resume(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+ clk_enable(fimc->clock);
+ return 0;
+}
+
+static int fimc_lite_runtime_suspend(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+ clk_disable(fimc->clock);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_lite_resume(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ struct flite_buffer *buf;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!test_and_clear_bit(ST_LPM, &fimc->state) ||
+ !test_bit(ST_FLITE_IN_USE, &fimc->state)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
+ flite_hw_reset(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (!test_and_clear_bit(ST_FLITE_SUSPENDED, &fimc->state))
+ return 0;
+
+ INIT_LIST_HEAD(&fimc->active_buf_q);
+ fimc_pipeline_call(&fimc->ve, open,
+ &fimc->ve.vdev.entity, false);
+ fimc_lite_hw_init(fimc, atomic_read(&fimc->out_path) == FIMC_IO_ISP);
+ clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+
+ for (i = 0; i < fimc->reqbufs_count; i++) {
+ if (list_empty(&fimc->pending_buf_q))
+ break;
+ buf = fimc_lite_pending_queue_pop(fimc);
+ buffer_queue(&buf->vb);
+ }
+ return 0;
+}
+
+static int fimc_lite_suspend(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ bool suspend = test_bit(ST_FLITE_IN_USE, &fimc->state);
+ int ret;
+
+ if (test_and_set_bit(ST_LPM, &fimc->state))
+ return 0;
+
+ ret = fimc_lite_stop_capture(fimc, suspend);
+ if (ret < 0 || !fimc_lite_active(fimc))
+ return ret;
+
+ return fimc_pipeline_call(&fimc->ve, close);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int fimc_lite_remove(struct platform_device *pdev)
+{
+ struct fimc_lite *fimc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ fimc_lite_unregister_capture_subdev(fimc);
+ vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+ fimc_lite_clk_put(fimc);
+
+ dev_info(dev, "Driver unloaded\n");
+ return 0;
+}
+
+static const struct dev_pm_ops fimc_lite_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_lite_suspend, fimc_lite_resume)
+ SET_RUNTIME_PM_OPS(fimc_lite_runtime_suspend, fimc_lite_runtime_resume,
+ NULL)
+};
+
+/* EXYNOS4212, EXYNOS4412 */
+static struct flite_drvdata fimc_lite_drvdata_exynos4 = {
+ .max_width = 8192,
+ .max_height = 8192,
+ .out_width_align = 8,
+ .win_hor_offs_align = 2,
+ .out_hor_offs_align = 8,
+ .max_dma_bufs = 1,
+ .num_instances = 2,
+};
+
+/* EXYNOS5250 */
+static struct flite_drvdata fimc_lite_drvdata_exynos5 = {
+ .max_width = 8192,
+ .max_height = 8192,
+ .out_width_align = 8,
+ .win_hor_offs_align = 2,
+ .out_hor_offs_align = 8,
+ .max_dma_bufs = 32,
+ .num_instances = 3,
+};
+
+static const struct of_device_id flite_of_match[] = {
+ {
+ .compatible = "samsung,exynos4212-fimc-lite",
+ .data = &fimc_lite_drvdata_exynos4,
+ },
+ {
+ .compatible = "samsung,exynos5250-fimc-lite",
+ .data = &fimc_lite_drvdata_exynos5,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, flite_of_match);
+
+static struct platform_driver fimc_lite_driver = {
+ .probe = fimc_lite_probe,
+ .remove = fimc_lite_remove,
+ .driver = {
+ .of_match_table = flite_of_match,
+ .name = FIMC_LITE_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &fimc_lite_pm_ops,
+ }
+};
+module_platform_driver(fimc_lite_driver);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" FIMC_LITE_DRV_NAME);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.h b/drivers/media/platform/exynos4-is/fimc-lite.h
new file mode 100644
index 00000000000..ea19dc7be63
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-lite.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_H_
+#define FIMC_LITE_H_
+
+#include <linux/sizes.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/exynos-fimc.h>
+
+#define FIMC_LITE_DRV_NAME "exynos-fimc-lite"
+#define FLITE_CLK_NAME "flite"
+#define FIMC_LITE_MAX_DEVS 3
+#define FLITE_REQ_BUFS_MIN 2
+#define FLITE_DEFAULT_WIDTH 640
+#define FLITE_DEFAULT_HEIGHT 480
+
+/* Bit index definitions for struct fimc_lite::state */
+enum {
+ ST_FLITE_LPM,
+ ST_FLITE_PENDING,
+ ST_FLITE_RUN,
+ ST_FLITE_STREAM,
+ ST_FLITE_SUSPENDED,
+ ST_FLITE_OFF,
+ ST_FLITE_IN_USE,
+ ST_FLITE_CONFIG,
+ ST_SENSOR_STREAM,
+};
+
+#define FLITE_SD_PAD_SINK 0
+#define FLITE_SD_PAD_SOURCE_DMA 1
+#define FLITE_SD_PAD_SOURCE_ISP 2
+#define FLITE_SD_PADS_NUM 3
+
+/**
+ * struct flite_drvdata - FIMC-LITE IP variant data structure
+ * @max_width: maximum camera interface input width in pixels
+ * @max_height: maximum camera interface input height in pixels
+ * @out_width_align: minimum output width alignment in pixels
+ * @win_hor_offs_align: minimum camera interface crop window horizontal
+ * offset alignment in pixels
+ * @out_hor_offs_align: minimum output DMA compose rectangle horizontal
+ * offset alignment in pixels
+ * @max_dma_bufs: number of output DMA buffer start address registers
+ * @num_instances: total number of FIMC-LITE IP instances available
+ */
+struct flite_drvdata {
+ unsigned short max_width;
+ unsigned short max_height;
+ unsigned short out_width_align;
+ unsigned short win_hor_offs_align;
+ unsigned short out_hor_offs_align;
+ unsigned short max_dma_bufs;
+ unsigned short num_instances;
+};
+
+struct fimc_lite_events {
+ unsigned int data_overflow;
+};
+
+#define FLITE_MAX_PLANES 1
+
+/**
+ * struct flite_frame - source/target frame properties
+ * @f_width: full pixel width
+ * @f_height: full pixel height
+ * @rect: crop/composition rectangle
+ * @fmt: pointer to pixel format description data structure
+ */
+struct flite_frame {
+ u16 f_width;
+ u16 f_height;
+ struct v4l2_rect rect;
+ const struct fimc_fmt *fmt;
+};
+
+/**
+ * struct flite_buffer - video buffer structure
+ * @vb: vb2 buffer
+ * @list: list head for the buffers queue
+ * @paddr: DMA buffer start address
+ * @index: DMA start address register's index
+ */
+struct flite_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ dma_addr_t paddr;
+ unsigned short index;
+};
+
+/**
+ * struct fimc_lite - fimc lite structure
+ * @pdev: pointer to FIMC-LITE platform device
+ * @dd: SoC specific driver data structure
+ * @ve: exynos video device entity structure
+ * @v4l2_dev: pointer to top the level v4l2_device
+ * @fh: v4l2 file handle
+ * @alloc_ctx: videobuf2 memory allocator context
+ * @subdev: FIMC-LITE subdev
+ * @vd_pad: media (sink) pad for the capture video node
+ * @subdev_pads: the subdev media pads
+ * @sensor: sensor subdev attached to FIMC-LITE directly or through MIPI-CSIS
+ * @ctrl_handler: v4l2 control handler
+ * @test_pattern: test pattern controls
+ * @index: FIMC-LITE platform device index
+ * @pipeline: video capture pipeline data structure
+ * @pipeline_ops: media pipeline ops for the video node driver
+ * @slock: spinlock protecting this data structure and the hw registers
+ * @lock: mutex serializing video device and the subdev operations
+ * @clock: FIMC-LITE gate clock
+ * @regs: memory mapped io registers
+ * @irq_queue: interrupt handler waitqueue
+ * @payload: image size in bytes (w x h x bpp)
+ * @inp_frame: camera input frame structure
+ * @out_frame: DMA output frame structure
+ * @out_path: output data path (DMA or FIFO)
+ * @source_subdev_grp_id: source subdev group id
+ * @state: driver state flags
+ * @pending_buf_q: pending buffers queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vb_queue: vb2 buffers queue
+ * @buf_index: helps to keep track of the DMA start address register index
+ * @active_buf_count: number of video buffers scheduled in hardware
+ * @frame_count: the captured frames counter
+ * @reqbufs_count: the number of buffers requested with REQBUFS ioctl
+ */
+struct fimc_lite {
+ struct platform_device *pdev;
+ struct flite_drvdata *dd;
+ struct exynos_video_entity ve;
+ struct v4l2_device *v4l2_dev;
+ struct v4l2_fh fh;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct v4l2_subdev subdev;
+ struct media_pad vd_pad;
+ struct media_pad subdev_pads[FLITE_SD_PADS_NUM];
+ struct v4l2_subdev *sensor;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *test_pattern;
+ int index;
+
+ struct mutex lock;
+ spinlock_t slock;
+
+ struct clk *clock;
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+
+ unsigned long payload[FLITE_MAX_PLANES];
+ struct flite_frame inp_frame;
+ struct flite_frame out_frame;
+ atomic_t out_path;
+ unsigned int source_subdev_grp_id;
+
+ unsigned long state;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct vb2_queue vb_queue;
+ unsigned short buf_index;
+ unsigned int frame_count;
+ unsigned int reqbufs_count;
+
+ struct fimc_lite_events events;
+ bool streaming;
+};
+
+static inline bool fimc_lite_active(struct fimc_lite *fimc)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ ret = fimc->state & (1 << ST_FLITE_RUN) ||
+ fimc->state & (1 << ST_FLITE_PENDING);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return ret;
+}
+
+static inline void fimc_lite_active_queue_add(struct fimc_lite *dev,
+ struct flite_buffer *buf)
+{
+ list_add_tail(&buf->list, &dev->active_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_active_queue_pop(
+ struct fimc_lite *dev)
+{
+ struct flite_buffer *buf = list_entry(dev->active_buf_q.next,
+ struct flite_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+static inline void fimc_lite_pending_queue_add(struct fimc_lite *dev,
+ struct flite_buffer *buf)
+{
+ list_add_tail(&buf->list, &dev->pending_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_pending_queue_pop(
+ struct fimc_lite *dev)
+{
+ struct flite_buffer *buf = list_entry(dev->pending_buf_q.next,
+ struct flite_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* FIMC_LITE_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
new file mode 100644
index 00000000000..0ad1b6f84a2
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -0,0 +1,772 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series FIMC (video postprocessor) driver
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "common.h"
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "media-dev.h"
+
+static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
+{
+ if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return FMT_FLAGS_M2M_IN;
+ else
+ return FMT_FLAGS_M2M_OUT;
+}
+
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
+{
+ struct vb2_buffer *src_vb, *dst_vb;
+
+ if (!ctx || !ctx->fh.m2m_ctx)
+ return;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ v4l2_m2m_buf_done(src_vb, vb_state);
+ v4l2_m2m_buf_done(dst_vb, vb_state);
+ v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
+ ctx->fh.m2m_ctx);
+ }
+}
+
+/* Complete the transaction which has been scheduled for execution. */
+static int fimc_m2m_shutdown(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret;
+
+ if (!fimc_m2m_pending(fimc))
+ return 0;
+
+ fimc_ctx_state_set(FIMC_CTX_SHUT, ctx);
+
+ ret = wait_event_timeout(fimc->irq_queue,
+ !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
+ FIMC_SHUTDOWN_TIMEOUT);
+
+ return ret == 0 ? -ETIMEDOUT : ret;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
+ return ret > 0 ? 0 : ret;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = fimc_m2m_shutdown(ctx);
+ if (ret == -ETIMEDOUT)
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+ pm_runtime_put(&ctx->fimc_dev->pdev->dev);
+}
+
+static void fimc_device_run(void *priv)
+{
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct fimc_ctx *ctx = priv;
+ struct fimc_frame *sf, *df;
+ struct fimc_dev *fimc;
+ unsigned long flags;
+ int ret;
+
+ if (WARN(!ctx, "Null context\n"))
+ return;
+
+ fimc = ctx->fimc_dev;
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ set_bit(ST_M2M_PEND, &fimc->state);
+ sf = &ctx->s_frame;
+ df = &ctx->d_frame;
+
+ if (ctx->state & FIMC_PARAMS) {
+ /* Prepare the DMA offsets for scaler */
+ fimc_prepare_dma_offset(ctx, sf);
+ fimc_prepare_dma_offset(ctx, df);
+ }
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr);
+ if (ret)
+ goto dma_unlock;
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr);
+ if (ret)
+ goto dma_unlock;
+
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.flags |=
+ src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ /* Reconfigure hardware if the context has changed. */
+ if (fimc->m2m.ctx != ctx) {
+ ctx->state |= FIMC_PARAMS;
+ fimc->m2m.ctx = ctx;
+ }
+
+ if (ctx->state & FIMC_PARAMS) {
+ fimc_set_yuv_order(ctx);
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_in_dma(ctx);
+ ret = fimc_set_scaler_info(ctx);
+ if (ret)
+ goto dma_unlock;
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->drv_data->alpha_color)
+ fimc_hw_set_rgb_alpha(ctx);
+ fimc_hw_set_output_path(ctx);
+ }
+ fimc_hw_set_input_addr(fimc, &sf->paddr);
+ fimc_hw_set_output_addr(fimc, &df->paddr, -1);
+
+ fimc_activate_capture(ctx);
+ ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
+ fimc_hw_activate_input_dma(fimc, true);
+
+dma_unlock:
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_job_abort(void *priv)
+{
+ fimc_m2m_shutdown(priv);
+}
+
+static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct fimc_frame *f;
+ int i;
+
+ f = ctx_get_frame(ctx, vq->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ /*
+ * Return number of non-contigous planes (plane buffers)
+ * depending on the configured color format.
+ */
+ if (!f->fmt)
+ return -EINVAL;
+
+ *num_planes = f->fmt->memplanes;
+ for (i = 0; i < f->fmt->memplanes; i++) {
+ sizes[i] = f->payload[i];
+ allocators[i] = ctx->fimc_dev->alloc_ctx;
+ }
+ return 0;
+}
+
+static int fimc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_frame *frame;
+ int i;
+
+ frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ for (i = 0; i < frame->fmt->memplanes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
+
+ return 0;
+}
+
+static void fimc_buf_queue(struct vb2_buffer *vb)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+}
+
+static struct vb2_ops fimc_qops = {
+ .queue_setup = fimc_queue_setup,
+ .buf_prepare = fimc_buf_prepare,
+ .buf_queue = fimc_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = stop_streaming,
+ .start_streaming = start_streaming,
+};
+
+/*
+ * V4L2 ioctl handlers
+ */
+static int fimc_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ unsigned int caps;
+
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+ __fimc_vidioc_querycap(&fimc->pdev->dev, cap, caps);
+ return 0;
+}
+
+static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
+ f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
+
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ __fimc_get_format(frame, f);
+ return 0;
+}
+
+static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ const struct fimc_variant *variant = fimc->variant;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct fimc_fmt *fmt;
+ u32 max_w, mod_x, mod_y;
+
+ if (!IS_M2M(f->type))
+ return -EINVAL;
+
+ fmt = fimc_find_format(&pix->pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (WARN(fmt == NULL, "Pixel format lookup failed"))
+ return -EINVAL;
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+ else if (pix->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ max_w = variant->pix_limit->scaler_dis_w;
+ mod_x = ffs(variant->min_inp_pixsize) - 1;
+ } else {
+ max_w = variant->pix_limit->out_rot_dis_w;
+ mod_x = ffs(variant->min_out_pixsize) - 1;
+ }
+
+ if (tiled_fmt(fmt)) {
+ mod_x = 6; /* 64 x 32 pixels tile */
+ mod_y = 5;
+ } else {
+ if (variant->min_vsize_align == 1)
+ mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
+ else
+ mod_y = ffs(variant->min_vsize_align) - 1;
+ }
+
+ v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
+ &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
+
+ fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
+ return 0;
+}
+
+static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ return fimc_try_fmt_mplane(ctx, f);
+}
+
+static void __set_frame_format(struct fimc_frame *frame, struct fimc_fmt *fmt,
+ struct v4l2_pix_format_mplane *pixm)
+{
+ int i;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ frame->bytesperline[i] = pixm->plane_fmt[i].bytesperline;
+ frame->payload[i] = pixm->plane_fmt[i].sizeimage;
+ }
+
+ frame->f_width = pixm->width;
+ frame->f_height = pixm->height;
+ frame->o_width = pixm->width;
+ frame->o_height = pixm->height;
+ frame->width = pixm->width;
+ frame->height = pixm->height;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+ frame->fmt = fmt;
+}
+
+static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_fmt *fmt;
+ struct vb2_queue *vq;
+ struct fimc_frame *frame;
+ int ret;
+
+ ret = fimc_try_fmt_mplane(ctx, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&fimc->m2m.vfd, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+
+ fmt = fimc_find_format(&f->fmt.pix_mp.pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (!fmt)
+ return -EINVAL;
+
+ __set_frame_format(frame, fmt, &f->fmt.pix_mp);
+
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
+ return 0;
+}
+
+static int fimc_m2m_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = frame->o_width;
+ cr->bounds.height = frame->o_height;
+ cr->defrect = cr->bounds;
+
+ return 0;
+}
+
+static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->c.left = frame->offs_h;
+ cr->c.top = frame->offs_v;
+ cr->c.width = frame->width;
+ cr->c.height = frame->height;
+
+ return 0;
+}
+
+static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_frame *f;
+ u32 min_size, halign, depth = 0;
+ int i;
+
+ if (cr->c.top < 0 || cr->c.left < 0) {
+ v4l2_err(&fimc->m2m.vfd,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ f = &ctx->d_frame;
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ f = &ctx->s_frame;
+ else
+ return -EINVAL;
+
+ min_size = (f == &ctx->s_frame) ?
+ fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
+
+ /* Get pixel alignment constraints. */
+ if (fimc->variant->min_vsize_align == 1)
+ halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
+ else
+ halign = ffs(fimc->variant->min_vsize_align) - 1;
+
+ for (i = 0; i < f->fmt->memplanes; i++)
+ depth += f->fmt->depth[i];
+
+ v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
+ ffs(min_size) - 1,
+ &cr->c.height, min_size, f->o_height,
+ halign, 64/(ALIGN(depth, 8)));
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ if (cr->c.left + cr->c.width > f->o_width)
+ cr->c.left = f->o_width - cr->c.width;
+ if (cr->c.top + cr->c.height > f->o_height)
+ cr->c.top = f->o_height - cr->c.height;
+
+ cr->c.left = round_down(cr->c.left, min_size);
+ cr->c.top = round_down(cr->c.top, fimc->variant->hor_offs_align);
+
+ dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
+ cr->c.left, cr->c.top, cr->c.width, cr->c.height,
+ f->f_width, f->f_height);
+
+ return 0;
+}
+
+static int fimc_m2m_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_crop cr = *crop;
+ struct fimc_frame *f;
+ int ret;
+
+ ret = fimc_m2m_try_crop(ctx, &cr);
+ if (ret)
+ return ret;
+
+ f = (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
+ &ctx->s_frame : &ctx->d_frame;
+
+ /* Check to see if scaling ratio is within supported range */
+ if (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = fimc_check_scaler_ratio(ctx, cr.c.width,
+ cr.c.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctx->rotation);
+ } else {
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, cr.c.width,
+ cr.c.height, ctx->rotation);
+ }
+ if (ret) {
+ v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
+ return -EINVAL;
+ }
+
+ f->offs_h = cr.c.left;
+ f->offs_v = cr.c.top;
+ f->width = cr.c.width;
+ f->height = cr.c.height;
+
+ fimc_ctx_state_set(FIMC_PARAMS, ctx);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
+ .vidioc_querycap = fimc_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_crop = fimc_m2m_g_crop,
+ .vidioc_s_crop = fimc_m2m_s_crop,
+ .vidioc_cropcap = fimc_m2m_cropcap
+
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &fimc_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->fimc_dev->lock;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &fimc_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->fimc_dev->lock;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int fimc_m2m_set_default_format(struct fimc_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane pixm = {
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .width = 800,
+ .height = 600,
+ .plane_fmt[0] = {
+ .bytesperline = 800 * 4,
+ .sizeimage = 800 * 4 * 600,
+ },
+ };
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(&pixm.pixelformat, NULL, FMT_FLAGS_M2M, 0);
+ if (!fmt)
+ return -EINVAL;
+
+ __set_frame_format(&ctx->s_frame, fmt, &pixm);
+ __set_frame_format(&ctx->d_frame, fmt, &pixm);
+
+ return 0;
+}
+
+static int fimc_m2m_open(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx;
+ int ret = -EBUSY;
+
+ pr_debug("pid: %d, state: %#lx\n", task_pid_nr(current), fimc->state);
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+ /*
+ * Don't allow simultaneous open() of the mem-to-mem and the
+ * capture video node that belong to same FIMC IP instance.
+ */
+ if (test_bit(ST_CAPT_BUSY, &fimc->state))
+ goto unlock;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ v4l2_fh_init(&ctx->fh, &fimc->m2m.vfd);
+ ctx->fimc_dev = fimc;
+
+ /* Default color format */
+ ctx->s_frame.fmt = fimc_get_format(0);
+ ctx->d_frame.fmt = fimc_get_format(0);
+
+ ret = fimc_ctrls_create(ctx);
+ if (ret)
+ goto error_fh;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrls.handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ /* Setup the device context for memory-to-memory mode */
+ ctx->state = FIMC_CTX_M2M;
+ ctx->flags = 0;
+ ctx->in_path = FIMC_IO_DMA;
+ ctx->out_path = FIMC_IO_DMA;
+ ctx->scaler.enabled = 1;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto error_c;
+ }
+
+ if (fimc->m2m.refcnt++ == 0)
+ set_bit(ST_M2M_RUN, &fimc->state);
+
+ ret = fimc_m2m_set_default_format(ctx);
+ if (ret < 0)
+ goto error_m2m_ctx;
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+
+error_m2m_ctx:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+error_c:
+ fimc_ctrls_delete(ctx);
+error_fh:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_m2m_release(struct file *file)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ dbg("pid: %d, state: 0x%lx, refcnt= %d",
+ task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
+
+ mutex_lock(&fimc->lock);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ fimc_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ if (--fimc->m2m.refcnt <= 0)
+ clear_bit(ST_M2M_RUN, &fimc->state);
+ kfree(ctx);
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static const struct v4l2_file_operations fimc_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_m2m_open,
+ .release = fimc_m2m_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = fimc_device_run,
+ .job_abort = fimc_job_abort,
+};
+
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev)
+{
+ struct video_device *vfd = &fimc->m2m.vfd;
+ int ret;
+
+ fimc->v4l2_dev = v4l2_dev;
+
+ memset(vfd, 0, sizeof(*vfd));
+ vfd->fops = &fimc_m2m_fops;
+ vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
+ vfd->v4l2_dev = v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &fimc->lock;
+ vfd->vfl_dir = VFL_DIR_M2M;
+
+ snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
+ video_set_drvdata(vfd, fimc);
+
+ fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(fimc->m2m.m2m_dev)) {
+ v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
+ return PTR_ERR(fimc->m2m.m2m_dev);
+ }
+
+ ret = media_entity_init(&vfd->entity, 0, NULL, 0);
+ if (ret)
+ goto err_me;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vd;
+
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+err_vd:
+ media_entity_cleanup(&vfd->entity);
+err_me:
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+ return ret;
+}
+
+void fimc_unregister_m2m_device(struct fimc_dev *fimc)
+{
+ if (!fimc)
+ return;
+
+ if (fimc->m2m.m2m_dev)
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+
+ if (video_is_registered(&fimc->m2m.vfd)) {
+ video_unregister_device(&fimc->m2m.vfd);
+ media_entity_cleanup(&fimc->m2m.vfd.entity);
+ }
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.c b/drivers/media/platform/exynos4-is/fimc-reg.c
new file mode 100644
index 00000000000..2d77fd8f440
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-reg.c
@@ -0,0 +1,842 @@
+/*
+ * Register interface file for Samsung Camera Interface (FIMC) driver
+ *
+ * Copyright (C) 2010 - 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+
+#include <media/exynos-fimc.h>
+#include "media-dev.h"
+
+#include "fimc-reg.h"
+#include "fimc-core.h"
+
+void fimc_hw_reset(struct fimc_dev *dev)
+{
+ u32 cfg;
+
+ cfg = readl(dev->regs + FIMC_REG_CISRCFMT);
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ writel(cfg, dev->regs + FIMC_REG_CISRCFMT);
+
+ /* Software reset. */
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg |= (FIMC_REG_CIGCTRL_SWRST | FIMC_REG_CIGCTRL_IRQ_LEVEL);
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+ udelay(10);
+
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg &= ~FIMC_REG_CIGCTRL_SWRST;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+
+ if (dev->drv_data->out_buf_count > 4)
+ fimc_hw_set_dma_seq(dev, 0xF);
+}
+
+static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
+{
+ u32 flip = FIMC_REG_MSCTRL_FLIP_NORMAL;
+
+ if (ctx->hflip)
+ flip = FIMC_REG_MSCTRL_FLIP_Y_MIRROR;
+ if (ctx->vflip)
+ flip = FIMC_REG_MSCTRL_FLIP_X_MIRROR;
+
+ if (ctx->rotation <= 90)
+ return flip;
+
+ return (flip ^ FIMC_REG_MSCTRL_FLIP_180) & FIMC_REG_MSCTRL_FLIP_180;
+}
+
+static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx)
+{
+ u32 flip = FIMC_REG_CITRGFMT_FLIP_NORMAL;
+
+ if (ctx->hflip)
+ flip |= FIMC_REG_CITRGFMT_FLIP_Y_MIRROR;
+ if (ctx->vflip)
+ flip |= FIMC_REG_CITRGFMT_FLIP_X_MIRROR;
+
+ if (ctx->rotation <= 90)
+ return flip;
+
+ return (flip ^ FIMC_REG_CITRGFMT_FLIP_180) & FIMC_REG_CITRGFMT_FLIP_180;
+}
+
+void fimc_hw_set_rotation(struct fimc_ctx *ctx)
+{
+ u32 cfg, flip;
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+ cfg &= ~(FIMC_REG_CITRGFMT_INROT90 | FIMC_REG_CITRGFMT_OUTROT90 |
+ FIMC_REG_CITRGFMT_FLIP_180);
+
+ /*
+ * The input and output rotator cannot work simultaneously.
+ * Use the output rotator in output DMA mode or the input rotator
+ * in direct fifo output mode.
+ */
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ if (ctx->out_path == FIMC_IO_LCDFIFO)
+ cfg |= FIMC_REG_CITRGFMT_INROT90;
+ else
+ cfg |= FIMC_REG_CITRGFMT_OUTROT90;
+ }
+
+ if (ctx->out_path == FIMC_IO_DMA) {
+ cfg |= fimc_hw_get_target_flip(ctx);
+ writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
+ } else {
+ /* LCD FIFO path */
+ flip = readl(dev->regs + FIMC_REG_MSCTRL);
+ flip &= ~FIMC_REG_MSCTRL_FLIP_MASK;
+ flip |= fimc_hw_get_in_flip(ctx);
+ writel(flip, dev->regs + FIMC_REG_MSCTRL);
+ }
+}
+
+void fimc_hw_set_target_format(struct fimc_ctx *ctx)
+{
+ u32 cfg;
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+
+ dbg("w= %d, h= %d color: %d", frame->width,
+ frame->height, frame->fmt->color);
+
+ cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+ cfg &= ~(FIMC_REG_CITRGFMT_FMT_MASK | FIMC_REG_CITRGFMT_HSIZE_MASK |
+ FIMC_REG_CITRGFMT_VSIZE_MASK);
+
+ switch (frame->fmt->color) {
+ case FIMC_FMT_RGB444...FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_CITRGFMT_RGB;
+ break;
+ case FIMC_FMT_YCBCR420:
+ cfg |= FIMC_REG_CITRGFMT_YCBCR420;
+ break;
+ case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
+ if (frame->fmt->colplanes == 1)
+ cfg |= FIMC_REG_CITRGFMT_YCBCR422_1P;
+ else
+ cfg |= FIMC_REG_CITRGFMT_YCBCR422;
+ break;
+ default:
+ break;
+ }
+
+ if (ctx->rotation == 90 || ctx->rotation == 270)
+ cfg |= (frame->height << 16) | frame->width;
+ else
+ cfg |= (frame->width << 16) | frame->height;
+
+ writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
+
+ cfg = readl(dev->regs + FIMC_REG_CITAREA);
+ cfg &= ~FIMC_REG_CITAREA_MASK;
+ cfg |= (frame->width * frame->height);
+ writel(cfg, dev->regs + FIMC_REG_CITAREA);
+}
+
+static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ cfg = (frame->f_height << 16) | frame->f_width;
+ writel(cfg, dev->regs + FIMC_REG_ORGOSIZE);
+
+ /* Select color space conversion equation (HD/SD size).*/
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ if (frame->f_width >= 1280) /* HD */
+ cfg |= FIMC_REG_CIGCTRL_CSC_ITU601_709;
+ else /* SD */
+ cfg &= ~FIMC_REG_CIGCTRL_CSC_ITU601_709;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+
+}
+
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+ struct fimc_dma_offset *offset = &frame->dma_offset;
+ struct fimc_fmt *fmt = frame->fmt;
+ u32 cfg;
+
+ /* Set the input dma offsets. */
+ cfg = (offset->y_v << 16) | offset->y_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOYOFF);
+
+ cfg = (offset->cb_v << 16) | offset->cb_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOCBOFF);
+
+ cfg = (offset->cr_v << 16) | offset->cr_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOCROFF);
+
+ fimc_hw_set_out_dma_size(ctx);
+
+ /* Configure chroma components order. */
+ cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+
+ cfg &= ~(FIMC_REG_CIOCTRL_ORDER2P_MASK |
+ FIMC_REG_CIOCTRL_ORDER422_MASK |
+ FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK |
+ FIMC_REG_CIOCTRL_RGB16FMT_MASK);
+
+ if (fmt->colplanes == 1)
+ cfg |= ctx->out_order_1p;
+ else if (fmt->colplanes == 2)
+ cfg |= ctx->out_order_2p | FIMC_REG_CIOCTRL_YCBCR_2PLANE;
+ else if (fmt->colplanes == 3)
+ cfg |= FIMC_REG_CIOCTRL_YCBCR_3PLANE;
+
+ if (fmt->color == FIMC_FMT_RGB565)
+ cfg |= FIMC_REG_CIOCTRL_RGB565;
+ else if (fmt->color == FIMC_FMT_RGB555)
+ cfg |= FIMC_REG_CIOCTRL_ARGB1555;
+ else if (fmt->color == FIMC_FMT_RGB444)
+ cfg |= FIMC_REG_CIOCTRL_ARGB4444;
+
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
+}
+
+static void fimc_hw_en_autoload(struct fimc_dev *dev, int enable)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_ORGISIZE);
+ if (enable)
+ cfg |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+ else
+ cfg &= ~FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+ writel(cfg, dev->regs + FIMC_REG_ORGISIZE);
+}
+
+void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+ if (enable)
+ cfg |= FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
+ else
+ cfg &= ~FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
+}
+
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_scaler *sc = &ctx->scaler;
+ u32 cfg, shfactor;
+
+ shfactor = 10 - (sc->hfactor + sc->vfactor);
+ cfg = shfactor << 28;
+
+ cfg |= (sc->pre_hratio << 16) | sc->pre_vratio;
+ writel(cfg, dev->regs + FIMC_REG_CISCPRERATIO);
+
+ cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
+ writel(cfg, dev->regs + FIMC_REG_CISCPREDST);
+}
+
+static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_scaler *sc = &ctx->scaler;
+ struct fimc_frame *src_frame = &ctx->s_frame;
+ struct fimc_frame *dst_frame = &ctx->d_frame;
+
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+
+ cfg &= ~(FIMC_REG_CISCCTRL_CSCR2Y_WIDE | FIMC_REG_CISCCTRL_CSCY2R_WIDE |
+ FIMC_REG_CISCCTRL_SCALEUP_H | FIMC_REG_CISCCTRL_SCALEUP_V |
+ FIMC_REG_CISCCTRL_SCALERBYPASS | FIMC_REG_CISCCTRL_ONE2ONE |
+ FIMC_REG_CISCCTRL_INRGB_FMT_MASK | FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK |
+ FIMC_REG_CISCCTRL_INTERLACE | FIMC_REG_CISCCTRL_RGB_EXT);
+
+ if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
+ cfg |= (FIMC_REG_CISCCTRL_CSCR2Y_WIDE |
+ FIMC_REG_CISCCTRL_CSCY2R_WIDE);
+
+ if (!sc->enabled)
+ cfg |= FIMC_REG_CISCCTRL_SCALERBYPASS;
+
+ if (sc->scaleup_h)
+ cfg |= FIMC_REG_CISCCTRL_SCALEUP_H;
+
+ if (sc->scaleup_v)
+ cfg |= FIMC_REG_CISCCTRL_SCALEUP_V;
+
+ if (sc->copy_mode)
+ cfg |= FIMC_REG_CISCCTRL_ONE2ONE;
+
+ if (ctx->in_path == FIMC_IO_DMA) {
+ switch (src_frame->fmt->color) {
+ case FIMC_FMT_RGB565:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB565;
+ break;
+ case FIMC_FMT_RGB666:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB666;
+ break;
+ case FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB888;
+ break;
+ }
+ }
+
+ if (ctx->out_path == FIMC_IO_DMA) {
+ u32 color = dst_frame->fmt->color;
+
+ if (color >= FIMC_FMT_RGB444 && color <= FIMC_FMT_RGB565)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565;
+ else if (color == FIMC_FMT_RGB666)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666;
+ else if (color == FIMC_FMT_RGB888)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
+ } else {
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
+
+ if (ctx->flags & FIMC_SCAN_MODE_INTERLACED)
+ cfg |= FIMC_REG_CISCCTRL_INTERLACE;
+ }
+
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ const struct fimc_variant *variant = dev->variant;
+ struct fimc_scaler *sc = &ctx->scaler;
+ u32 cfg;
+
+ dbg("main_hratio= 0x%X main_vratio= 0x%X",
+ sc->main_hratio, sc->main_vratio);
+
+ fimc_hw_set_scaler(ctx);
+
+ cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ cfg &= ~(FIMC_REG_CISCCTRL_MHRATIO_MASK |
+ FIMC_REG_CISCCTRL_MVRATIO_MASK);
+
+ if (variant->has_mainscaler_ext) {
+ cfg |= FIMC_REG_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
+ cfg |= FIMC_REG_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+
+ cfg = readl(dev->regs + FIMC_REG_CIEXTEN);
+
+ cfg &= ~(FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK |
+ FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK);
+ cfg |= FIMC_REG_CIEXTEN_MHRATIO_EXT(sc->main_hratio);
+ cfg |= FIMC_REG_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CIEXTEN);
+ } else {
+ cfg |= FIMC_REG_CISCCTRL_MHRATIO(sc->main_hratio);
+ cfg |= FIMC_REG_CISCCTRL_MVRATIO(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+ }
+}
+
+void fimc_hw_enable_capture(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ u32 cfg;
+
+ cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg |= FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE;
+
+ if (ctx->scaler.enabled)
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
+ else
+ cfg &= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
+
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
+void fimc_hw_disable_capture(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg &= ~(FIMC_REG_CIIMGCPT_IMGCPTEN |
+ FIMC_REG_CIIMGCPT_IMGCPTEN_SC);
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
+void fimc_hw_set_effect(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_effect *effect = &ctx->effect;
+ u32 cfg = 0;
+
+ if (effect->type != FIMC_REG_CIIMGEFF_FIN_BYPASS) {
+ cfg |= FIMC_REG_CIIMGEFF_IE_SC_AFTER |
+ FIMC_REG_CIIMGEFF_IE_ENABLE;
+ cfg |= effect->type;
+ if (effect->type == FIMC_REG_CIIMGEFF_FIN_ARBITRARY)
+ cfg |= (effect->pat_cb << 13) | effect->pat_cr;
+ }
+
+ writel(cfg, dev->regs + FIMC_REG_CIIMGEFF);
+}
+
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ if (!(frame->fmt->flags & FMT_HAS_ALPHA))
+ return;
+
+ cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+ cfg &= ~FIMC_REG_CIOCTRL_ALPHA_OUT_MASK;
+ cfg |= (frame->alpha << 4);
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
+}
+
+static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->s_frame;
+ u32 cfg_o = 0;
+ u32 cfg_r = 0;
+
+ if (FIMC_IO_LCDFIFO == ctx->out_path)
+ cfg_r |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+
+ cfg_o |= (frame->f_height << 16) | frame->f_width;
+ cfg_r |= (frame->height << 16) | frame->width;
+
+ writel(cfg_o, dev->regs + FIMC_REG_ORGISIZE);
+ writel(cfg_r, dev->regs + FIMC_REG_CIREAL_ISIZE);
+}
+
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->s_frame;
+ struct fimc_dma_offset *offset = &frame->dma_offset;
+ u32 cfg;
+
+ /* Set the pixel offsets. */
+ cfg = (offset->y_v << 16) | offset->y_h;
+ writel(cfg, dev->regs + FIMC_REG_CIIYOFF);
+
+ cfg = (offset->cb_v << 16) | offset->cb_h;
+ writel(cfg, dev->regs + FIMC_REG_CIICBOFF);
+
+ cfg = (offset->cr_v << 16) | offset->cr_h;
+ writel(cfg, dev->regs + FIMC_REG_CIICROFF);
+
+ /* Input original and real size. */
+ fimc_hw_set_in_dma_size(ctx);
+
+ /* Use DMA autoload only in FIFO mode. */
+ fimc_hw_en_autoload(dev, ctx->out_path == FIMC_IO_LCDFIFO);
+
+ /* Set the input DMA to process single frame only. */
+ cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ cfg &= ~(FIMC_REG_MSCTRL_INFORMAT_MASK
+ | FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK
+ | FIMC_REG_MSCTRL_INPUT_MASK
+ | FIMC_REG_MSCTRL_C_INT_IN_MASK
+ | FIMC_REG_MSCTRL_2P_IN_ORDER_MASK
+ | FIMC_REG_MSCTRL_ORDER422_MASK);
+
+ cfg |= (FIMC_REG_MSCTRL_IN_BURST_COUNT(4)
+ | FIMC_REG_MSCTRL_INPUT_MEMORY
+ | FIMC_REG_MSCTRL_FIFO_CTRL_FULL);
+
+ switch (frame->fmt->color) {
+ case FIMC_FMT_RGB565...FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_RGB;
+ break;
+ case FIMC_FMT_YCBCR420:
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR420;
+
+ if (frame->fmt->colplanes == 2)
+ cfg |= ctx->in_order_2p | FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
+ else
+ cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
+
+ break;
+ case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
+ if (frame->fmt->colplanes == 1) {
+ cfg |= ctx->in_order_1p
+ | FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P;
+ } else {
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR422;
+
+ if (frame->fmt->colplanes == 2)
+ cfg |= ctx->in_order_2p
+ | FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
+ else
+ cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
+ }
+ break;
+ default:
+ break;
+ }
+
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+
+ /* Input/output DMA linear/tiled mode. */
+ cfg = readl(dev->regs + FIMC_REG_CIDMAPARAM);
+ cfg &= ~FIMC_REG_CIDMAPARAM_TILE_MASK;
+
+ if (tiled_fmt(ctx->s_frame.fmt))
+ cfg |= FIMC_REG_CIDMAPARAM_R_64X32;
+
+ if (tiled_fmt(ctx->d_frame.fmt))
+ cfg |= FIMC_REG_CIDMAPARAM_W_64X32;
+
+ writel(cfg, dev->regs + FIMC_REG_CIDMAPARAM);
+}
+
+
+void fimc_hw_set_input_path(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ cfg &= ~FIMC_REG_MSCTRL_INPUT_MASK;
+
+ if (ctx->in_path == FIMC_IO_DMA)
+ cfg |= FIMC_REG_MSCTRL_INPUT_MEMORY;
+ else
+ cfg |= FIMC_REG_MSCTRL_INPUT_EXTCAM;
+
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+}
+
+void fimc_hw_set_output_path(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ cfg &= ~FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+ if (ctx->out_path == FIMC_IO_LCDFIFO)
+ cfg |= FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIREAL_ISIZE);
+ cfg |= FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
+
+ writel(paddr->y, dev->regs + FIMC_REG_CIIYSA(0));
+ writel(paddr->cb, dev->regs + FIMC_REG_CIICBSA(0));
+ writel(paddr->cr, dev->regs + FIMC_REG_CIICRSA(0));
+
+ cfg &= ~FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
+}
+
+void fimc_hw_set_output_addr(struct fimc_dev *dev,
+ struct fimc_addr *paddr, int index)
+{
+ int i = (index == -1) ? 0 : index;
+ do {
+ writel(paddr->y, dev->regs + FIMC_REG_CIOYSA(i));
+ writel(paddr->cb, dev->regs + FIMC_REG_CIOCBSA(i));
+ writel(paddr->cr, dev->regs + FIMC_REG_CIOCRSA(i));
+ dbg("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X",
+ i, paddr->y, paddr->cb, paddr->cr);
+ } while (index == -1 && ++i < FIMC_MAX_OUT_BUFS);
+}
+
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct fimc_source_info *cam)
+{
+ u32 cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
+
+ cfg &= ~(FIMC_REG_CIGCTRL_INVPOLPCLK | FIMC_REG_CIGCTRL_INVPOLVSYNC |
+ FIMC_REG_CIGCTRL_INVPOLHREF | FIMC_REG_CIGCTRL_INVPOLHSYNC |
+ FIMC_REG_CIGCTRL_INVPOLFIELD);
+
+ if (cam->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLPCLK;
+
+ if (cam->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLVSYNC;
+
+ if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLHREF;
+
+ if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLHSYNC;
+
+ if (cam->flags & V4L2_MBUS_FIELD_EVEN_LOW)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLFIELD;
+
+ writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
+
+ return 0;
+}
+
+struct mbus_pixfmt_desc {
+ u32 pixelcode;
+ u32 cisrcfmt;
+ u16 bus_width;
+};
+
+static const struct mbus_pixfmt_desc pix_desc[] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCBYCR, 8 },
+ { V4L2_MBUS_FMT_YVYU8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCRYCB, 8 },
+ { V4L2_MBUS_FMT_VYUY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CRYCBY, 8 },
+ { V4L2_MBUS_FMT_UYVY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CBYCRY, 8 },
+};
+
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct fimc_source_info *source)
+{
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct fimc_frame *f = &vc->ctx->s_frame;
+ u32 bus_width, cfg = 0;
+ int i;
+
+ switch (source->fimc_bus_type) {
+ case FIMC_BUS_TYPE_ITU_601:
+ case FIMC_BUS_TYPE_ITU_656:
+ for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
+ if (vc->ci_fmt.code == pix_desc[i].pixelcode) {
+ cfg = pix_desc[i].cisrcfmt;
+ bus_width = pix_desc[i].bus_width;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(pix_desc)) {
+ v4l2_err(&vc->ve.vdev,
+ "Camera color format not supported: %d\n",
+ vc->ci_fmt.code);
+ return -EINVAL;
+ }
+
+ if (source->fimc_bus_type == FIMC_BUS_TYPE_ITU_601) {
+ if (bus_width == 8)
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ else if (bus_width == 16)
+ cfg |= FIMC_REG_CISRCFMT_ITU601_16BIT;
+ } /* else defaults to ITU-R BT.656 8-bit */
+ break;
+ case FIMC_BUS_TYPE_MIPI_CSI2:
+ if (fimc_fmt_is_user_defined(f->fmt->color))
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ break;
+ default:
+ case FIMC_BUS_TYPE_ISP_WRITEBACK:
+ /* Anything to do here ? */
+ break;
+ }
+
+ cfg |= (f->o_width << 16) | f->o_height;
+ writel(cfg, fimc->regs + FIMC_REG_CISRCFMT);
+ return 0;
+}
+
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
+{
+ u32 hoff2, voff2;
+
+ u32 cfg = readl(fimc->regs + FIMC_REG_CIWDOFST);
+
+ cfg &= ~(FIMC_REG_CIWDOFST_HOROFF_MASK | FIMC_REG_CIWDOFST_VEROFF_MASK);
+ cfg |= FIMC_REG_CIWDOFST_OFF_EN |
+ (f->offs_h << 16) | f->offs_v;
+
+ writel(cfg, fimc->regs + FIMC_REG_CIWDOFST);
+
+ /* See CIWDOFSTn register description in the datasheet for details. */
+ hoff2 = f->o_width - f->width - f->offs_h;
+ voff2 = f->o_height - f->height - f->offs_v;
+ cfg = (hoff2 << 16) | voff2;
+ writel(cfg, fimc->regs + FIMC_REG_CIWDOFST2);
+}
+
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct fimc_source_info *source)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ u32 csis_data_alignment = 32;
+ u32 cfg, tmp;
+
+ cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
+
+ /* Select ITU B interface, disable Writeback path and test pattern. */
+ cfg &= ~(FIMC_REG_CIGCTRL_TESTPAT_MASK | FIMC_REG_CIGCTRL_SELCAM_ITU_A |
+ FIMC_REG_CIGCTRL_SELCAM_MIPI | FIMC_REG_CIGCTRL_CAMIF_SELWB |
+ FIMC_REG_CIGCTRL_SELCAM_MIPI_A | FIMC_REG_CIGCTRL_CAM_JPEG |
+ FIMC_REG_CIGCTRL_SELWB_A);
+
+ switch (source->fimc_bus_type) {
+ case FIMC_BUS_TYPE_MIPI_CSI2:
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI;
+
+ if (source->mux_id == 0)
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI_A;
+
+ /* TODO: add remaining supported formats. */
+ switch (vid_cap->ci_fmt.code) {
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT;
+ break;
+ case V4L2_MBUS_FMT_JPEG_1X8:
+ case V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8:
+ tmp = FIMC_REG_CSIIMGFMT_USER(1);
+ cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
+ break;
+ default:
+ v4l2_err(&vid_cap->ve.vdev,
+ "Not supported camera pixel format: %#x\n",
+ vid_cap->ci_fmt.code);
+ return -EINVAL;
+ }
+ tmp |= (csis_data_alignment == 32) << 8;
+
+ writel(tmp, fimc->regs + FIMC_REG_CSIIMGFMT);
+ break;
+ case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
+ if (source->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_ITU_A;
+ break;
+ case FIMC_BUS_TYPE_LCD_WRITEBACK_A:
+ cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
+ /* fall through */
+ case FIMC_BUS_TYPE_ISP_WRITEBACK:
+ if (fimc->variant->has_isp_wb)
+ cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
+ else
+ WARN_ONCE(1, "ISP Writeback input is not supported\n");
+ break;
+ default:
+ v4l2_err(&vid_cap->ve.vdev,
+ "Invalid FIMC bus type selected: %d\n",
+ source->fimc_bus_type);
+ return -EINVAL;
+ }
+ writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
+
+ return 0;
+}
+
+void fimc_hw_clear_irq(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg |= FIMC_REG_CIGCTRL_IRQ_CLR;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+}
+
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ if (on)
+ cfg |= FIMC_REG_CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~FIMC_REG_CISCCTRL_SCALERSTART;
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ if (on)
+ cfg |= FIMC_REG_MSCTRL_ENVID;
+ else
+ cfg &= ~FIMC_REG_MSCTRL_ENVID;
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+}
+
+/* Return an index to the buffer actually being written. */
+s32 fimc_hw_get_frame_index(struct fimc_dev *dev)
+{
+ s32 reg;
+
+ if (dev->drv_data->cistatus2) {
+ reg = readl(dev->regs + FIMC_REG_CISTATUS2) & 0x3f;
+ return reg - 1;
+ }
+
+ reg = readl(dev->regs + FIMC_REG_CISTATUS);
+
+ return (reg & FIMC_REG_CISTATUS_FRAMECNT_MASK) >>
+ FIMC_REG_CISTATUS_FRAMECNT_SHIFT;
+}
+
+/* Return an index to the buffer being written previously. */
+s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev)
+{
+ s32 reg;
+
+ if (!dev->drv_data->cistatus2)
+ return -1;
+
+ reg = readl(dev->regs + FIMC_REG_CISTATUS2);
+ return ((reg >> 7) & 0x3f) - 1;
+}
+
+/* Locking: the caller holds fimc->slock */
+void fimc_activate_capture(struct fimc_ctx *ctx)
+{
+ fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
+ fimc_hw_enable_capture(ctx);
+}
+
+void fimc_deactivate_capture(struct fimc_dev *fimc)
+{
+ fimc_hw_en_lastirq(fimc, true);
+ fimc_hw_disable_capture(fimc);
+ fimc_hw_enable_scaler(fimc, false);
+ fimc_hw_en_lastirq(fimc, false);
+}
+
+int fimc_hw_camblk_cfg_writeback(struct fimc_dev *fimc)
+{
+ struct regmap *map = fimc->sysreg;
+ unsigned int mask, val, camblk_cfg;
+ int ret;
+
+ if (map == NULL)
+ return 0;
+
+ ret = regmap_read(map, SYSREG_CAMBLK, &camblk_cfg);
+ if (ret < 0 || ((camblk_cfg & 0x00700000) >> 20 != 0x3))
+ return ret;
+
+ if (!WARN(fimc->id >= 3, "not supported id: %d\n", fimc->id))
+ val = 0x1 << (fimc->id + 20);
+ else
+ val = 0;
+
+ mask = SYSREG_CAMBLK_FIFORST_ISP | SYSREG_CAMBLK_ISPWB_FULL_EN;
+ ret = regmap_update_bits(map, SYSREG_CAMBLK, mask, val);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ val |= SYSREG_CAMBLK_FIFORST_ISP;
+ ret = regmap_update_bits(map, SYSREG_CAMBLK, mask, val);
+ if (ret < 0)
+ return ret;
+
+ mask = SYSREG_ISPBLK_FIFORST_CAM_BLK;
+ ret = regmap_update_bits(map, SYSREG_ISPBLK, mask, ~mask);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ return regmap_update_bits(map, SYSREG_ISPBLK, mask, mask);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.h b/drivers/media/platform/exynos4-is/fimc-reg.h
new file mode 100644
index 00000000000..6c97798c75a
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-reg.h
@@ -0,0 +1,338 @@
+/*
+ * Samsung camera host interface (FIMC) registers definition
+ *
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_REG_H_
+#define FIMC_REG_H_
+
+#include "fimc-core.h"
+
+/* Input source format */
+#define FIMC_REG_CISRCFMT 0x00
+#define FIMC_REG_CISRCFMT_ITU601_8BIT (1 << 31)
+#define FIMC_REG_CISRCFMT_ITU601_16BIT (1 << 29)
+#define FIMC_REG_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+
+/* Window offset */
+#define FIMC_REG_CIWDOFST 0x04
+#define FIMC_REG_CIWDOFST_OFF_EN (1 << 31)
+#define FIMC_REG_CIWDOFST_CLROVFIY (1 << 30)
+#define FIMC_REG_CIWDOFST_CLROVRLB (1 << 29)
+#define FIMC_REG_CIWDOFST_HOROFF_MASK (0x7ff << 16)
+#define FIMC_REG_CIWDOFST_CLROVFICB (1 << 15)
+#define FIMC_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FIMC_REG_CIWDOFST_VEROFF_MASK (0xfff << 0)
+
+/* Global control */
+#define FIMC_REG_CIGCTRL 0x08
+#define FIMC_REG_CIGCTRL_SWRST (1 << 31)
+#define FIMC_REG_CIGCTRL_CAMRST_A (1 << 30)
+#define FIMC_REG_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define FIMC_REG_CIGCTRL_TESTPAT_NORMAL (0 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_VER_INC (3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_MASK (3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_SHIFT 27
+#define FIMC_REG_CIGCTRL_INVPOLPCLK (1 << 26)
+#define FIMC_REG_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define FIMC_REG_CIGCTRL_INVPOLHREF (1 << 24)
+#define FIMC_REG_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define FIMC_REG_CIGCTRL_HREF_MASK (1 << 21)
+#define FIMC_REG_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define FIMC_REG_CIGCTRL_IRQ_CLR (1 << 19)
+#define FIMC_REG_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define FIMC_REG_CIGCTRL_SHDW_DISABLE (1 << 12)
+/* 0 - selects Writeback A (LCD), 1 - selects Writeback B (LCD/ISP) */
+#define FIMC_REG_CIGCTRL_SELWB_A (1 << 10)
+#define FIMC_REG_CIGCTRL_CAM_JPEG (1 << 8)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define FIMC_REG_CIGCTRL_CAMIF_SELWB (1 << 6)
+/* 0 - ITU601; 1 - ITU709 */
+#define FIMC_REG_CIGCTRL_CSC_ITU601_709 (1 << 5)
+#define FIMC_REG_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
+#define FIMC_REG_CIGCTRL_INVPOLFIELD (1 << 1)
+#define FIMC_REG_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset 2 */
+#define FIMC_REG_CIWDOFST2 0x14
+#define FIMC_REG_CIWDOFST2_HOROFF_MASK (0xfff << 16)
+#define FIMC_REG_CIWDOFST2_VEROFF_MASK (0xfff << 0)
+
+/* Output DMA Y/Cb/Cr plane start addresses */
+#define FIMC_REG_CIOYSA(n) (0x18 + (n) * 4)
+#define FIMC_REG_CIOCBSA(n) (0x28 + (n) * 4)
+#define FIMC_REG_CIOCRSA(n) (0x38 + (n) * 4)
+
+/* Target image format */
+#define FIMC_REG_CITRGFMT 0x48
+#define FIMC_REG_CITRGFMT_INROT90 (1 << 31)
+#define FIMC_REG_CITRGFMT_YCBCR420 (0 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422 (1 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422_1P (2 << 29)
+#define FIMC_REG_CITRGFMT_RGB (3 << 29)
+#define FIMC_REG_CITRGFMT_FMT_MASK (3 << 29)
+#define FIMC_REG_CITRGFMT_HSIZE_MASK (0xfff << 16)
+#define FIMC_REG_CITRGFMT_FLIP_SHIFT 14
+#define FIMC_REG_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_180 (3 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_MASK (3 << 14)
+#define FIMC_REG_CITRGFMT_OUTROT90 (1 << 13)
+#define FIMC_REG_CITRGFMT_VSIZE_MASK (0xfff << 0)
+
+/* Output DMA control */
+#define FIMC_REG_CIOCTRL 0x4c
+#define FIMC_REG_CIOCTRL_ORDER422_MASK (3 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCBYCR (0 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CRYCBY (3 << 0)
+#define FIMC_REG_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define FIMC_REG_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define FIMC_REG_CIOCTRL_ALPHA_OUT_MASK (0xff << 4)
+#define FIMC_REG_CIOCTRL_RGB16FMT_MASK (3 << 16)
+#define FIMC_REG_CIOCTRL_RGB565 (0 << 16)
+#define FIMC_REG_CIOCTRL_ARGB1555 (1 << 16)
+#define FIMC_REG_CIOCTRL_ARGB4444 (2 << 16)
+#define FIMC_REG_CIOCTRL_ORDER2P_SHIFT 24
+#define FIMC_REG_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB (0 << 24)
+
+/* Pre-scaler control 1 */
+#define FIMC_REG_CISCPRERATIO 0x50
+
+#define FIMC_REG_CISCPREDST 0x54
+
+/* Main scaler control */
+#define FIMC_REG_CISCCTRL 0x58
+#define FIMC_REG_CISCCTRL_SCALERBYPASS (1 << 31)
+#define FIMC_REG_CISCCTRL_SCALEUP_H (1 << 30)
+#define FIMC_REG_CISCCTRL_SCALEUP_V (1 << 29)
+#define FIMC_REG_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define FIMC_REG_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define FIMC_REG_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define FIMC_REG_CISCCTRL_INTERLACE (1 << 25)
+#define FIMC_REG_CISCCTRL_SCALERSTART (1 << 15)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_MASK (3 << 13)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
+#define FIMC_REG_CISCCTRL_RGB_EXT (1 << 10)
+#define FIMC_REG_CISCCTRL_ONE2ONE (1 << 9)
+#define FIMC_REG_CISCCTRL_MHRATIO(x) ((x) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO(x) ((x) << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_MASK (0x1ff << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_MASK (0x1ff << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_EXT(x) (((x) >> 6) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_EXT(x) (((x) >> 6) << 0)
+
+/* Target area */
+#define FIMC_REG_CITAREA 0x5c
+#define FIMC_REG_CITAREA_MASK 0x0fffffff
+
+/* General status */
+#define FIMC_REG_CISTATUS 0x64
+#define FIMC_REG_CISTATUS_OVFIY (1 << 31)
+#define FIMC_REG_CISTATUS_OVFICB (1 << 30)
+#define FIMC_REG_CISTATUS_OVFICR (1 << 29)
+#define FIMC_REG_CISTATUS_VSYNC (1 << 28)
+#define FIMC_REG_CISTATUS_FRAMECNT_MASK (3 << 26)
+#define FIMC_REG_CISTATUS_FRAMECNT_SHIFT 26
+#define FIMC_REG_CISTATUS_WINOFF_EN (1 << 25)
+#define FIMC_REG_CISTATUS_IMGCPT_EN (1 << 22)
+#define FIMC_REG_CISTATUS_IMGCPT_SCEN (1 << 21)
+#define FIMC_REG_CISTATUS_VSYNC_A (1 << 20)
+#define FIMC_REG_CISTATUS_VSYNC_B (1 << 19)
+#define FIMC_REG_CISTATUS_OVRLB (1 << 18)
+#define FIMC_REG_CISTATUS_FRAME_END (1 << 17)
+#define FIMC_REG_CISTATUS_LASTCAPT_END (1 << 16)
+#define FIMC_REG_CISTATUS_VVALID_A (1 << 15)
+#define FIMC_REG_CISTATUS_VVALID_B (1 << 14)
+
+/* Indexes to the last and the currently processed buffer. */
+#define FIMC_REG_CISTATUS2 0x68
+
+/* Image capture control */
+#define FIMC_REG_CIIMGCPT 0xc0
+#define FIMC_REG_CIIMGCPT_IMGCPTEN (1 << 31)
+#define FIMC_REG_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Frame capture sequence */
+#define FIMC_REG_CICPTSEQ 0xc4
+
+/* Image effect */
+#define FIMC_REG_CIIMGEFF 0xd0
+#define FIMC_REG_CIIMGEFF_IE_ENABLE (1 << 30)
+#define FIMC_REG_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define FIMC_REG_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define FIMC_REG_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_MASK (7 << 26)
+#define FIMC_REG_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | 0xff)
+
+/* Input DMA Y/Cb/Cr plane start address 0/1 */
+#define FIMC_REG_CIIYSA(n) (0xd4 + (n) * 0x70)
+#define FIMC_REG_CIICBSA(n) (0xd8 + (n) * 0x70)
+#define FIMC_REG_CIICRSA(n) (0xdc + (n) * 0x70)
+
+/* Real input DMA image size */
+#define FIMC_REG_CIREAL_ISIZE 0xf8
+#define FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN (1 << 31)
+#define FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS (1 << 30)
+
+/* Input DMA control */
+#define FIMC_REG_MSCTRL 0xfc
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK (0xf << 24)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_MASK (3 << 16)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_SHIFT 16
+#define FIMC_REG_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_MASK (1 << 15)
+#define FIMC_REG_MSCTRL_FLIP_SHIFT 13
+#define FIMC_REG_MSCTRL_FLIP_MASK (3 << 13)
+#define FIMC_REG_MSCTRL_FLIP_NORMAL (0 << 13)
+#define FIMC_REG_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define FIMC_REG_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define FIMC_REG_MSCTRL_FLIP_180 (3 << 13)
+#define FIMC_REG_MSCTRL_FIFO_CTRL_FULL (1 << 12)
+#define FIMC_REG_MSCTRL_ORDER422_SHIFT 4
+#define FIMC_REG_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_MASK (3 << 4)
+#define FIMC_REG_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MEMORY (1 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MASK (1 << 3)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P (2 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_RGB (3 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_MASK (3 << 1)
+#define FIMC_REG_MSCTRL_ENVID (1 << 0)
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT(x) ((x) << 24)
+
+/* Output DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIOYOFF 0x168
+#define FIMC_REG_CIOCBOFF 0x16c
+#define FIMC_REG_CIOCROFF 0x170
+
+/* Input DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIIYOFF 0x174
+#define FIMC_REG_CIICBOFF 0x178
+#define FIMC_REG_CIICROFF 0x17c
+
+/* Input DMA original image size */
+#define FIMC_REG_ORGISIZE 0x180
+
+/* Output DMA original image size */
+#define FIMC_REG_ORGOSIZE 0x184
+
+/* Real output DMA image size (extension register) */
+#define FIMC_REG_CIEXTEN 0x188
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT(x) (((x) & 0x3f) << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT(x) ((x) & 0x3f)
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK (0x3f << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK 0x3f
+
+#define FIMC_REG_CIDMAPARAM 0x18c
+#define FIMC_REG_CIDMAPARAM_R_LINEAR (0 << 29)
+#define FIMC_REG_CIDMAPARAM_R_64X32 (3 << 29)
+#define FIMC_REG_CIDMAPARAM_W_LINEAR (0 << 13)
+#define FIMC_REG_CIDMAPARAM_W_64X32 (3 << 13)
+#define FIMC_REG_CIDMAPARAM_TILE_MASK ((3 << 29) | (3 << 13))
+
+/* MIPI CSI image format */
+#define FIMC_REG_CSIIMGFMT 0x194
+#define FIMC_REG_CSIIMGFMT_YCBCR422_8BIT 0x1e
+#define FIMC_REG_CSIIMGFMT_RAW8 0x2a
+#define FIMC_REG_CSIIMGFMT_RAW10 0x2b
+#define FIMC_REG_CSIIMGFMT_RAW12 0x2c
+/* User defined formats. x = 0...16. */
+#define FIMC_REG_CSIIMGFMT_USER(x) (0x30 + x - 1)
+
+/* Output frame buffer sequence mask */
+#define FIMC_REG_CIFCNTSEQ 0x1fc
+
+/* SYSREG ISP Writeback register address offsets */
+#define SYSREG_ISPBLK 0x020c
+#define SYSREG_ISPBLK_FIFORST_CAM_BLK (1 << 7)
+
+#define SYSREG_CAMBLK 0x0218
+#define SYSREG_CAMBLK_FIFORST_ISP (1 << 15)
+#define SYSREG_CAMBLK_ISPWB_FULL_EN (7 << 20)
+
+/*
+ * Function declarations
+ */
+void fimc_hw_reset(struct fimc_dev *fimc);
+void fimc_hw_set_rotation(struct fimc_ctx *ctx);
+void fimc_hw_set_target_format(struct fimc_ctx *ctx);
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
+void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
+void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
+void fimc_hw_enable_capture(struct fimc_ctx *ctx);
+void fimc_hw_set_effect(struct fimc_ctx *ctx);
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
+void fimc_hw_set_input_path(struct fimc_ctx *ctx);
+void fimc_hw_set_output_path(struct fimc_ctx *ctx);
+void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
+void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
+ int index);
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct fimc_source_info *cam);
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct fimc_source_info *cam);
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct fimc_source_info *cam);
+void fimc_hw_clear_irq(struct fimc_dev *dev);
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on);
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on);
+void fimc_hw_disable_capture(struct fimc_dev *dev);
+s32 fimc_hw_get_frame_index(struct fimc_dev *dev);
+s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev);
+int fimc_hw_camblk_cfg_writeback(struct fimc_dev *fimc);
+void fimc_activate_capture(struct fimc_ctx *ctx);
+void fimc_deactivate_capture(struct fimc_dev *fimc);
+
+/**
+ * fimc_hw_set_dma_seq - configure output DMA buffer sequence
+ * @mask: bitmask for the DMA output buffer registers, set to 0 to skip buffer
+ * This function masks output DMA ring buffers, it allows to select which of
+ * the 32 available output buffer address registers will be used by the DMA
+ * engine.
+ */
+static inline void fimc_hw_set_dma_seq(struct fimc_dev *dev, u32 mask)
+{
+ writel(mask, dev->regs + FIMC_REG_CIFCNTSEQ);
+}
+
+#endif /* FIMC_REG_H_ */
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
new file mode 100644
index 00000000000..344718df5c6
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -0,0 +1,1498 @@
+/*
+ * S5P/EXYNOS4 SoC series camera host interface media device driver
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-of.h>
+#include <media/media-device.h>
+#include <media/exynos-fimc.h>
+
+#include "media-dev.h"
+#include "fimc-core.h"
+#include "fimc-is.h"
+#include "fimc-lite.h"
+#include "mipi-csis.h"
+
+/* Set up image sensor subdev -> FIMC capture node notifications. */
+static void __setup_sensor_notification(struct fimc_md *fmd,
+ struct v4l2_subdev *sensor,
+ struct v4l2_subdev *fimc_sd)
+{
+ struct fimc_source_info *src_inf;
+ struct fimc_sensor_info *md_si;
+ unsigned long flags;
+
+ src_inf = v4l2_get_subdev_hostdata(sensor);
+ if (!src_inf || WARN_ON(fmd == NULL))
+ return;
+
+ md_si = source_to_sensor_info(src_inf);
+ spin_lock_irqsave(&fmd->slock, flags);
+ md_si->host = v4l2_get_subdevdata(fimc_sd);
+ spin_unlock_irqrestore(&fmd->slock, flags);
+}
+
+/**
+ * fimc_pipeline_prepare - update pipeline information with subdevice pointers
+ * @me: media entity terminating the pipeline
+ *
+ * Caller holds the graph mutex.
+ */
+static void fimc_pipeline_prepare(struct fimc_pipeline *p,
+ struct media_entity *me)
+{
+ struct fimc_md *fmd = entity_to_fimc_mdev(me);
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev *sensor = NULL;
+ int i;
+
+ for (i = 0; i < IDX_MAX; i++)
+ p->subdevs[i] = NULL;
+
+ while (1) {
+ struct media_pad *pad = NULL;
+
+ /* Find remote source pad */
+ for (i = 0; i < me->num_pads; i++) {
+ struct media_pad *spad = &me->pads[i];
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+ pad = media_entity_remote_pad(spad);
+ if (pad)
+ break;
+ }
+
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ switch (sd->grp_id) {
+ case GRP_ID_SENSOR:
+ sensor = sd;
+ /* fall through */
+ case GRP_ID_FIMC_IS_SENSOR:
+ p->subdevs[IDX_SENSOR] = sd;
+ break;
+ case GRP_ID_CSIS:
+ p->subdevs[IDX_CSIS] = sd;
+ break;
+ case GRP_ID_FLITE:
+ p->subdevs[IDX_FLITE] = sd;
+ break;
+ case GRP_ID_FIMC:
+ p->subdevs[IDX_FIMC] = sd;
+ break;
+ case GRP_ID_FIMC_IS:
+ p->subdevs[IDX_IS_ISP] = sd;
+ break;
+ default:
+ break;
+ }
+ me = &sd->entity;
+ if (me->num_pads == 1)
+ break;
+ }
+
+ if (sensor && p->subdevs[IDX_FIMC])
+ __setup_sensor_notification(fmd, sensor, p->subdevs[IDX_FIMC]);
+}
+
+/**
+ * __subdev_set_power - change power state of a single subdev
+ * @sd: subdevice to change power state for
+ * @on: 1 to enable power or 0 to disable
+ *
+ * Return result of s_power subdev operation or -ENXIO if sd argument
+ * is NULL. Return 0 if the subdevice does not implement s_power.
+ */
+static int __subdev_set_power(struct v4l2_subdev *sd, int on)
+{
+ int *use_count;
+ int ret;
+
+ if (sd == NULL)
+ return -ENXIO;
+
+ use_count = &sd->entity.use_count;
+ if (on && (*use_count)++ > 0)
+ return 0;
+ else if (!on && (*use_count == 0 || --(*use_count) > 0))
+ return 0;
+ ret = v4l2_subdev_call(sd, core, s_power, on);
+
+ return ret != -ENOIOCTLCMD ? ret : 0;
+}
+
+/**
+ * fimc_pipeline_s_power - change power state of all pipeline subdevs
+ * @fimc: fimc device terminating the pipeline
+ * @state: true to power on, false to power off
+ *
+ * Needs to be called with the graph mutex held.
+ */
+static int fimc_pipeline_s_power(struct fimc_pipeline *p, bool on)
+{
+ static const u8 seq[2][IDX_MAX - 1] = {
+ { IDX_IS_ISP, IDX_SENSOR, IDX_CSIS, IDX_FLITE },
+ { IDX_CSIS, IDX_FLITE, IDX_SENSOR, IDX_IS_ISP },
+ };
+ int i, ret = 0;
+
+ if (p->subdevs[IDX_SENSOR] == NULL)
+ return -ENXIO;
+
+ for (i = 0; i < IDX_MAX - 1; i++) {
+ unsigned int idx = seq[on][i];
+
+ ret = __subdev_set_power(p->subdevs[idx], on);
+
+
+ if (ret < 0 && ret != -ENXIO)
+ goto error;
+ }
+ return 0;
+error:
+ for (; i >= 0; i--) {
+ unsigned int idx = seq[on][i];
+ __subdev_set_power(p->subdevs[idx], !on);
+ }
+ return ret;
+}
+
+/**
+ * __fimc_pipeline_open - update the pipeline information, enable power
+ * of all pipeline subdevs and the sensor clock
+ * @me: media entity to start graph walk with
+ * @prepare: true to walk the current pipeline and acquire all subdevs
+ *
+ * Called with the graph mutex held.
+ */
+static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
+ struct media_entity *me, bool prepare)
+{
+ struct fimc_md *fmd = entity_to_fimc_mdev(me);
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ if (WARN_ON(p == NULL || me == NULL))
+ return -EINVAL;
+
+ if (prepare)
+ fimc_pipeline_prepare(p, me);
+
+ sd = p->subdevs[IDX_SENSOR];
+ if (sd == NULL)
+ return -EINVAL;
+
+ /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */
+ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
+ ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = fimc_pipeline_s_power(p, 1);
+ if (!ret)
+ return 0;
+
+ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
+ clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
+
+ return ret;
+}
+
+/**
+ * __fimc_pipeline_close - disable the sensor clock and pipeline power
+ * @fimc: fimc device terminating the pipeline
+ *
+ * Disable power of all subdevs and turn the external sensor clock off.
+ */
+static int __fimc_pipeline_close(struct exynos_media_pipeline *ep)
+{
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+ struct v4l2_subdev *sd = p ? p->subdevs[IDX_SENSOR] : NULL;
+ struct fimc_md *fmd;
+ int ret;
+
+ if (sd == NULL) {
+ pr_warn("%s(): No sensor subdev\n", __func__);
+ return 0;
+ }
+
+ ret = fimc_pipeline_s_power(p, 0);
+
+ fmd = entity_to_fimc_mdev(&sd->entity);
+
+ /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */
+ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
+ clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
+
+ return ret == -ENXIO ? 0 : ret;
+}
+
+/**
+ * __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs
+ * @pipeline: video pipeline structure
+ * @on: passed as the s_stream() callback argument
+ */
+static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
+{
+ static const u8 seq[2][IDX_MAX] = {
+ { IDX_FIMC, IDX_SENSOR, IDX_IS_ISP, IDX_CSIS, IDX_FLITE },
+ { IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP },
+ };
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+ int i, ret = 0;
+
+ if (p->subdevs[IDX_SENSOR] == NULL)
+ return -ENODEV;
+
+ for (i = 0; i < IDX_MAX; i++) {
+ unsigned int idx = seq[on][i];
+
+ ret = v4l2_subdev_call(p->subdevs[idx], video, s_stream, on);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ goto error;
+ }
+ return 0;
+error:
+ for (; i >= 0; i--) {
+ unsigned int idx = seq[on][i];
+ v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on);
+ }
+ return ret;
+}
+
+/* Media pipeline operations for the FIMC/FIMC-LITE video device driver */
+static const struct exynos_media_pipeline_ops fimc_pipeline_ops = {
+ .open = __fimc_pipeline_open,
+ .close = __fimc_pipeline_close,
+ .set_stream = __fimc_pipeline_s_stream,
+};
+
+static struct exynos_media_pipeline *fimc_md_pipeline_create(
+ struct fimc_md *fmd)
+{
+ struct fimc_pipeline *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ list_add_tail(&p->list, &fmd->pipelines);
+
+ p->ep.ops = &fimc_pipeline_ops;
+ return &p->ep;
+}
+
+static void fimc_md_pipelines_free(struct fimc_md *fmd)
+{
+ while (!list_empty(&fmd->pipelines)) {
+ struct fimc_pipeline *p;
+
+ p = list_entry(fmd->pipelines.next, typeof(*p), list);
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+/* Parse port node and register as a sub-device any sensor specified there. */
+static int fimc_md_parse_port_node(struct fimc_md *fmd,
+ struct device_node *port,
+ unsigned int index)
+{
+ struct fimc_source_info *pd = &fmd->sensor[index].pdata;
+ struct device_node *rem, *ep, *np;
+ struct v4l2_of_endpoint endpoint;
+
+ /* Assume here a port node can have only one endpoint node. */
+ ep = of_get_next_child(port, NULL);
+ if (!ep)
+ return 0;
+
+ v4l2_of_parse_endpoint(ep, &endpoint);
+ if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS)
+ return -EINVAL;
+
+ pd->mux_id = (endpoint.base.port - 1) & 0x1;
+
+ rem = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (rem == NULL) {
+ v4l2_info(&fmd->v4l2_dev, "Remote device at %s not found\n",
+ ep->full_name);
+ return 0;
+ }
+
+ if (fimc_input_is_parallel(endpoint.base.port)) {
+ if (endpoint.bus_type == V4L2_MBUS_PARALLEL)
+ pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_601;
+ else
+ pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_656;
+ pd->flags = endpoint.bus.parallel.flags;
+ } else if (fimc_input_is_mipi_csi(endpoint.base.port)) {
+ /*
+ * MIPI CSI-2: only input mux selection and
+ * the sensor's clock frequency is needed.
+ */
+ pd->sensor_bus_type = FIMC_BUS_TYPE_MIPI_CSI2;
+ } else {
+ v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %s\n",
+ endpoint.base.port, rem->full_name);
+ }
+ /*
+ * For FIMC-IS handled sensors, that are placed under i2c-isp device
+ * node, FIMC is connected to the FIMC-IS through its ISP Writeback
+ * input. Sensors are attached to the FIMC-LITE hostdata interface
+ * directly or through MIPI-CSIS, depending on the external media bus
+ * used. This needs to be handled in a more reliable way, not by just
+ * checking parent's node name.
+ */
+ np = of_get_parent(rem);
+
+ if (np && !of_node_cmp(np->name, "i2c-isp"))
+ pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK;
+ else
+ pd->fimc_bus_type = pd->sensor_bus_type;
+
+ if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor)))
+ return -EINVAL;
+
+ fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_OF;
+ fmd->sensor[index].asd.match.of.node = rem;
+ fmd->async_subdevs[index] = &fmd->sensor[index].asd;
+
+ fmd->num_sensors++;
+
+ of_node_put(rem);
+ return 0;
+}
+
+/* Register all SoC external sub-devices */
+static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
+{
+ struct device_node *parent = fmd->pdev->dev.of_node;
+ struct device_node *node, *ports;
+ int index = 0;
+ int ret;
+
+ /*
+ * Runtime resume one of the FIMC entities to make sure
+ * the sclk_cam clocks are not globally disabled.
+ */
+ if (!fmd->pmf)
+ return -ENXIO;
+
+ ret = pm_runtime_get_sync(fmd->pmf);
+ if (ret < 0)
+ return ret;
+
+ fmd->num_sensors = 0;
+
+ /* Attach sensors linked to MIPI CSI-2 receivers */
+ for_each_available_child_of_node(parent, node) {
+ struct device_node *port;
+
+ if (of_node_cmp(node->name, "csis"))
+ continue;
+ /* The csis node can have only port subnode. */
+ port = of_get_next_child(node, NULL);
+ if (!port)
+ continue;
+
+ ret = fimc_md_parse_port_node(fmd, port, index);
+ if (ret < 0)
+ goto rpm_put;
+ index++;
+ }
+
+ /* Attach sensors listed in the parallel-ports node */
+ ports = of_get_child_by_name(parent, "parallel-ports");
+ if (!ports)
+ goto rpm_put;
+
+ for_each_child_of_node(ports, node) {
+ ret = fimc_md_parse_port_node(fmd, node, index);
+ if (ret < 0)
+ break;
+ index++;
+ }
+rpm_put:
+ pm_runtime_put(fmd->pmf);
+ return ret;
+}
+
+static int __of_get_csis_id(struct device_node *np)
+{
+ u32 reg = 0;
+
+ np = of_get_child_by_name(np, "port");
+ if (!np)
+ return -EINVAL;
+ of_property_read_u32(np, "reg", &reg);
+ return reg - FIMC_INPUT_MIPI_CSI2_0;
+}
+
+/*
+ * MIPI-CSIS, FIMC and FIMC-LITE platform devices registration.
+ */
+static int register_fimc_lite_entity(struct fimc_md *fmd,
+ struct fimc_lite *fimc_lite)
+{
+ struct v4l2_subdev *sd;
+ struct exynos_media_pipeline *ep;
+ int ret;
+
+ if (WARN_ON(fimc_lite->index >= FIMC_LITE_MAX_DEVS ||
+ fmd->fimc_lite[fimc_lite->index]))
+ return -EBUSY;
+
+ sd = &fimc_lite->subdev;
+ sd->grp_id = GRP_ID_FLITE;
+
+ ep = fimc_md_pipeline_create(fmd);
+ if (!ep)
+ return -ENOMEM;
+
+ v4l2_set_subdev_hostdata(sd, ep);
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (!ret)
+ fmd->fimc_lite[fimc_lite->index] = fimc_lite;
+ else
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.LITE%d\n",
+ fimc_lite->index);
+ return ret;
+}
+
+static int register_fimc_entity(struct fimc_md *fmd, struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd;
+ struct exynos_media_pipeline *ep;
+ int ret;
+
+ if (WARN_ON(fimc->id >= FIMC_MAX_DEVS || fmd->fimc[fimc->id]))
+ return -EBUSY;
+
+ sd = &fimc->vid_cap.subdev;
+ sd->grp_id = GRP_ID_FIMC;
+
+ ep = fimc_md_pipeline_create(fmd);
+ if (!ep)
+ return -ENOMEM;
+
+ v4l2_set_subdev_hostdata(sd, ep);
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (!ret) {
+ if (!fmd->pmf && fimc->pdev)
+ fmd->pmf = &fimc->pdev->dev;
+ fmd->fimc[fimc->id] = fimc;
+ fimc->vid_cap.user_subdev_api = fmd->user_subdev_api;
+ } else {
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
+ fimc->id, ret);
+ }
+ return ret;
+}
+
+static int register_csis_entity(struct fimc_md *fmd,
+ struct platform_device *pdev,
+ struct v4l2_subdev *sd)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int id, ret;
+
+ id = node ? __of_get_csis_id(node) : max(0, pdev->id);
+
+ if (WARN_ON(id < 0 || id >= CSIS_MAX_ENTITIES))
+ return -ENOENT;
+
+ if (WARN_ON(fmd->csis[id].sd))
+ return -EBUSY;
+
+ sd->grp_id = GRP_ID_CSIS;
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (!ret)
+ fmd->csis[id].sd = sd;
+ else
+ v4l2_err(&fmd->v4l2_dev,
+ "Failed to register MIPI-CSIS.%d (%d)\n", id, ret);
+ return ret;
+}
+
+static int register_fimc_is_entity(struct fimc_md *fmd, struct fimc_is *is)
+{
+ struct v4l2_subdev *sd = &is->isp.subdev;
+ struct exynos_media_pipeline *ep;
+ int ret;
+
+ /* Allocate pipeline object for the ISP capture video node. */
+ ep = fimc_md_pipeline_create(fmd);
+ if (!ep)
+ return -ENOMEM;
+
+ v4l2_set_subdev_hostdata(sd, ep);
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (ret) {
+ v4l2_err(&fmd->v4l2_dev,
+ "Failed to register FIMC-ISP (%d)\n", ret);
+ return ret;
+ }
+
+ fmd->fimc_is = is;
+ return 0;
+}
+
+static int fimc_md_register_platform_entity(struct fimc_md *fmd,
+ struct platform_device *pdev,
+ int plat_entity)
+{
+ struct device *dev = &pdev->dev;
+ int ret = -EPROBE_DEFER;
+ void *drvdata;
+
+ /* Lock to ensure dev->driver won't change. */
+ device_lock(dev);
+
+ if (!dev->driver || !try_module_get(dev->driver->owner))
+ goto dev_unlock;
+
+ drvdata = dev_get_drvdata(dev);
+ /* Some subdev didn't probe successfully id drvdata is NULL */
+ if (drvdata) {
+ switch (plat_entity) {
+ case IDX_FIMC:
+ ret = register_fimc_entity(fmd, drvdata);
+ break;
+ case IDX_FLITE:
+ ret = register_fimc_lite_entity(fmd, drvdata);
+ break;
+ case IDX_CSIS:
+ ret = register_csis_entity(fmd, pdev, drvdata);
+ break;
+ case IDX_IS_ISP:
+ ret = register_fimc_is_entity(fmd, drvdata);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ }
+
+ module_put(dev->driver->owner);
+dev_unlock:
+ device_unlock(dev);
+ if (ret == -EPROBE_DEFER)
+ dev_info(&fmd->pdev->dev, "deferring %s device registration\n",
+ dev_name(dev));
+ else if (ret < 0)
+ dev_err(&fmd->pdev->dev, "%s device registration failed (%d)\n",
+ dev_name(dev), ret);
+ return ret;
+}
+
+/* Register FIMC, FIMC-LITE and CSIS media entities */
+static int fimc_md_register_platform_entities(struct fimc_md *fmd,
+ struct device_node *parent)
+{
+ struct device_node *node;
+ int ret = 0;
+
+ for_each_available_child_of_node(parent, node) {
+ struct platform_device *pdev;
+ int plat_entity = -1;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ continue;
+
+ /* If driver of any entity isn't ready try all again later. */
+ if (!strcmp(node->name, CSIS_OF_NODE_NAME))
+ plat_entity = IDX_CSIS;
+ else if (!strcmp(node->name, FIMC_IS_OF_NODE_NAME))
+ plat_entity = IDX_IS_ISP;
+ else if (!strcmp(node->name, FIMC_LITE_OF_NODE_NAME))
+ plat_entity = IDX_FLITE;
+ else if (!strcmp(node->name, FIMC_OF_NODE_NAME) &&
+ !of_property_read_bool(node, "samsung,lcd-wb"))
+ plat_entity = IDX_FIMC;
+
+ if (plat_entity >= 0)
+ ret = fimc_md_register_platform_entity(fmd, pdev,
+ plat_entity);
+ put_device(&pdev->dev);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static void fimc_md_unregister_entities(struct fimc_md *fmd)
+{
+ int i;
+
+ for (i = 0; i < FIMC_MAX_DEVS; i++) {
+ struct fimc_dev *dev = fmd->fimc[i];
+ if (dev == NULL)
+ continue;
+ v4l2_device_unregister_subdev(&dev->vid_cap.subdev);
+ dev->vid_cap.ve.pipe = NULL;
+ fmd->fimc[i] = NULL;
+ }
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ struct fimc_lite *dev = fmd->fimc_lite[i];
+ if (dev == NULL)
+ continue;
+ v4l2_device_unregister_subdev(&dev->subdev);
+ dev->ve.pipe = NULL;
+ fmd->fimc_lite[i] = NULL;
+ }
+ for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
+ if (fmd->csis[i].sd == NULL)
+ continue;
+ v4l2_device_unregister_subdev(fmd->csis[i].sd);
+ fmd->csis[i].sd = NULL;
+ }
+
+ if (fmd->fimc_is)
+ v4l2_device_unregister_subdev(&fmd->fimc_is->isp.subdev);
+
+ v4l2_info(&fmd->v4l2_dev, "Unregistered all entities\n");
+}
+
+/**
+ * __fimc_md_create_fimc_links - create links to all FIMC entities
+ * @fmd: fimc media device
+ * @source: the source entity to create links to all fimc entities from
+ * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
+ * @pad: the source entity pad index
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
+ */
+static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
+ struct media_entity *source,
+ struct v4l2_subdev *sensor,
+ int pad, int link_mask)
+{
+ struct fimc_source_info *si = NULL;
+ struct media_entity *sink;
+ unsigned int flags = 0;
+ int i, ret = 0;
+
+ if (sensor) {
+ si = v4l2_get_subdev_hostdata(sensor);
+ /* Skip direct FIMC links in the logical FIMC-IS sensor path */
+ if (si && si->fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK)
+ ret = 1;
+ }
+
+ for (i = 0; !ret && i < FIMC_MAX_DEVS; i++) {
+ if (!fmd->fimc[i])
+ continue;
+ /*
+ * Some FIMC variants are not fitted with camera capture
+ * interface. Skip creating a link from sensor for those.
+ */
+ if (!fmd->fimc[i]->variant->has_cam_if)
+ continue;
+
+ flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
+
+ sink = &fmd->fimc[i]->vid_cap.subdev.entity;
+ ret = media_entity_create_link(source, pad, sink,
+ FIMC_SD_PAD_SINK_CAM, flags);
+ if (ret)
+ return ret;
+
+ /* Notify FIMC capture subdev entity */
+ ret = media_entity_call(sink, link_setup, &sink->pads[0],
+ &source->pads[pad], flags);
+ if (ret)
+ break;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n",
+ source->name, flags ? '=' : '-', sink->name);
+ }
+
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ if (!fmd->fimc_lite[i])
+ continue;
+
+ sink = &fmd->fimc_lite[i]->subdev.entity;
+ ret = media_entity_create_link(source, pad, sink,
+ FLITE_SD_PAD_SINK, 0);
+ if (ret)
+ return ret;
+
+ /* Notify FIMC-LITE subdev entity */
+ ret = media_entity_call(sink, link_setup, &sink->pads[0],
+ &source->pads[pad], 0);
+ if (ret)
+ break;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] -> [%s]\n",
+ source->name, sink->name);
+ }
+ return 0;
+}
+
+/* Create links from FIMC-LITE source pads to other entities */
+static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
+{
+ struct media_entity *source, *sink;
+ int i, ret = 0;
+
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ struct fimc_lite *fimc = fmd->fimc_lite[i];
+
+ if (fimc == NULL)
+ continue;
+
+ source = &fimc->subdev.entity;
+ sink = &fimc->ve.vdev.entity;
+ /* FIMC-LITE's subdev and video node */
+ ret = media_entity_create_link(source, FLITE_SD_PAD_SOURCE_DMA,
+ sink, 0, 0);
+ if (ret)
+ break;
+ /* Link from FIMC-LITE to IS-ISP subdev */
+ sink = &fmd->fimc_is->isp.subdev.entity;
+ ret = media_entity_create_link(source, FLITE_SD_PAD_SOURCE_ISP,
+ sink, 0, 0);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* Create FIMC-IS links */
+static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd)
+{
+ struct fimc_isp *isp = &fmd->fimc_is->isp;
+ struct media_entity *source, *sink;
+ int i, ret;
+
+ source = &isp->subdev.entity;
+
+ for (i = 0; i < FIMC_MAX_DEVS; i++) {
+ if (fmd->fimc[i] == NULL)
+ continue;
+
+ /* Link from FIMC-IS-ISP subdev to FIMC */
+ sink = &fmd->fimc[i]->vid_cap.subdev.entity;
+ ret = media_entity_create_link(source, FIMC_ISP_SD_PAD_SRC_FIFO,
+ sink, FIMC_SD_PAD_SINK_FIFO, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Link from FIMC-IS-ISP subdev to fimc-is-isp.capture video node */
+ sink = &isp->video_capture.ve.vdev.entity;
+
+ /* Skip this link if the fimc-is-isp video node driver isn't built-in */
+ if (sink->num_pads == 0)
+ return 0;
+
+ return media_entity_create_link(source, FIMC_ISP_SD_PAD_SRC_DMA,
+ sink, 0, 0);
+}
+
+/**
+ * fimc_md_create_links - create default links between registered entities
+ *
+ * Parallel interface sensor entities are connected directly to FIMC capture
+ * entities. The sensors using MIPI CSIS bus are connected through immutable
+ * link with CSI receiver entity specified by mux_id. Any registered CSIS
+ * entity has a link to each registered FIMC capture entity. Enabled links
+ * are created by default between each subsequent registered sensor and
+ * subsequent FIMC capture entity. The number of default active links is
+ * determined by the number of available sensors or FIMC entities,
+ * whichever is less.
+ */
+static int fimc_md_create_links(struct fimc_md *fmd)
+{
+ struct v4l2_subdev *csi_sensors[CSIS_MAX_ENTITIES] = { NULL };
+ struct v4l2_subdev *sensor, *csis;
+ struct fimc_source_info *pdata;
+ struct media_entity *source, *sink;
+ int i, pad, fimc_id = 0, ret = 0;
+ u32 flags, link_mask = 0;
+
+ for (i = 0; i < fmd->num_sensors; i++) {
+ if (fmd->sensor[i].subdev == NULL)
+ continue;
+
+ sensor = fmd->sensor[i].subdev;
+ pdata = v4l2_get_subdev_hostdata(sensor);
+ if (!pdata)
+ continue;
+
+ source = NULL;
+
+ switch (pdata->sensor_bus_type) {
+ case FIMC_BUS_TYPE_MIPI_CSI2:
+ if (WARN(pdata->mux_id >= CSIS_MAX_ENTITIES,
+ "Wrong CSI channel id: %d\n", pdata->mux_id))
+ return -EINVAL;
+
+ csis = fmd->csis[pdata->mux_id].sd;
+ if (WARN(csis == NULL,
+ "MIPI-CSI interface specified "
+ "but s5p-csis module is not loaded!\n"))
+ return -EINVAL;
+
+ pad = sensor->entity.num_pads - 1;
+ ret = media_entity_create_link(&sensor->entity, pad,
+ &csis->entity, CSIS_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]\n",
+ sensor->entity.name, csis->entity.name);
+
+ source = NULL;
+ csi_sensors[pdata->mux_id] = sensor;
+ break;
+
+ case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
+ source = &sensor->entity;
+ pad = 0;
+ break;
+
+ default:
+ v4l2_err(&fmd->v4l2_dev, "Wrong bus_type: %x\n",
+ pdata->sensor_bus_type);
+ return -EINVAL;
+ }
+ if (source == NULL)
+ continue;
+
+ link_mask = 1 << fimc_id++;
+ ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
+ pad, link_mask);
+ }
+
+ for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
+ if (fmd->csis[i].sd == NULL)
+ continue;
+
+ source = &fmd->csis[i].sd->entity;
+ pad = CSIS_PAD_SOURCE;
+ sensor = csi_sensors[i];
+
+ link_mask = 1 << fimc_id++;
+ ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
+ pad, link_mask);
+ }
+
+ /* Create immutable links between each FIMC's subdev and video node */
+ flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
+ for (i = 0; i < FIMC_MAX_DEVS; i++) {
+ if (!fmd->fimc[i])
+ continue;
+
+ source = &fmd->fimc[i]->vid_cap.subdev.entity;
+ sink = &fmd->fimc[i]->vid_cap.ve.vdev.entity;
+
+ ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
+ sink, 0, flags);
+ if (ret)
+ break;
+ }
+
+ ret = __fimc_md_create_flite_source_links(fmd);
+ if (ret < 0)
+ return ret;
+
+ if (fmd->use_isp)
+ ret = __fimc_md_create_fimc_is_links(fmd);
+
+ return ret;
+}
+
+/*
+ * The peripheral sensor and CAM_BLK (PIXELASYNCMx) clocks management.
+ */
+static void fimc_md_put_clocks(struct fimc_md *fmd)
+{
+ int i = FIMC_MAX_CAMCLKS;
+
+ while (--i >= 0) {
+ if (IS_ERR(fmd->camclk[i].clock))
+ continue;
+ clk_put(fmd->camclk[i].clock);
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+ }
+
+ /* Writeback (PIXELASYNCMx) clocks */
+ for (i = 0; i < FIMC_MAX_WBCLKS; i++) {
+ if (IS_ERR(fmd->wbclk[i]))
+ continue;
+ clk_put(fmd->wbclk[i]);
+ fmd->wbclk[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int fimc_md_get_clocks(struct fimc_md *fmd)
+{
+ struct device *dev = &fmd->pdev->dev;
+ char clk_name[32];
+ struct clk *clock;
+ int i, ret = 0;
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++)
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
+ snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i);
+ clock = clk_get(dev, clk_name);
+
+ if (IS_ERR(clock)) {
+ dev_err(dev, "Failed to get clock: %s\n", clk_name);
+ ret = PTR_ERR(clock);
+ break;
+ }
+ fmd->camclk[i].clock = clock;
+ }
+ if (ret)
+ fimc_md_put_clocks(fmd);
+
+ if (!fmd->use_isp)
+ return 0;
+ /*
+ * For now get only PIXELASYNCM1 clock (Writeback B/ISP),
+ * leave PIXELASYNCM0 out for the LCD Writeback driver.
+ */
+ fmd->wbclk[CLK_IDX_WB_A] = ERR_PTR(-EINVAL);
+
+ for (i = CLK_IDX_WB_B; i < FIMC_MAX_WBCLKS; i++) {
+ snprintf(clk_name, sizeof(clk_name), "pxl_async%u", i);
+ clock = clk_get(dev, clk_name);
+ if (IS_ERR(clock)) {
+ v4l2_err(&fmd->v4l2_dev, "Failed to get clock: %s\n",
+ clk_name);
+ ret = PTR_ERR(clock);
+ break;
+ }
+ fmd->wbclk[i] = clock;
+ }
+ if (ret)
+ fimc_md_put_clocks(fmd);
+
+ return ret;
+}
+
+static int __fimc_md_modify_pipeline(struct media_entity *entity, bool enable)
+{
+ struct exynos_video_entity *ve;
+ struct fimc_pipeline *p;
+ struct video_device *vdev;
+ int ret;
+
+ vdev = media_entity_to_video_device(entity);
+ if (vdev->entity.use_count == 0)
+ return 0;
+
+ ve = vdev_to_exynos_video_entity(vdev);
+ p = to_fimc_pipeline(ve->pipe);
+ /*
+ * Nothing to do if we are disabling the pipeline, some link
+ * has been disconnected and p->subdevs array is cleared now.
+ */
+ if (!enable && p->subdevs[IDX_SENSOR] == NULL)
+ return 0;
+
+ if (enable)
+ ret = __fimc_pipeline_open(ve->pipe, entity, true);
+ else
+ ret = __fimc_pipeline_close(ve->pipe);
+
+ if (ret == 0 && !enable)
+ memset(p->subdevs, 0, sizeof(p->subdevs));
+
+ return ret;
+}
+
+/* Locking: called with entity->parent->graph_mutex mutex held. */
+static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable)
+{
+ struct media_entity *entity_err = entity;
+ struct media_entity_graph graph;
+ int ret;
+
+ /*
+ * Walk current graph and call the pipeline open/close routine for each
+ * opened video node that belongs to the graph of entities connected
+ * through active links. This is needed as we cannot power on/off the
+ * subdevs in random order.
+ */
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+ continue;
+
+ ret = __fimc_md_modify_pipeline(entity, enable);
+
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+ err:
+ media_entity_graph_walk_start(&graph, entity_err);
+
+ while ((entity_err = media_entity_graph_walk_next(&graph))) {
+ if (media_entity_type(entity_err) != MEDIA_ENT_T_DEVNODE)
+ continue;
+
+ __fimc_md_modify_pipeline(entity_err, !enable);
+
+ if (entity_err == entity)
+ break;
+ }
+
+ return ret;
+}
+
+static int fimc_md_link_notify(struct media_link *link, unsigned int flags,
+ unsigned int notification)
+{
+ struct media_entity *sink = link->sink->entity;
+ int ret = 0;
+
+ /* Before link disconnection */
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH) {
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ ret = __fimc_md_modify_pipelines(sink, false);
+ else
+ ; /* TODO: Link state change validation */
+ /* After link activation */
+ } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ (link->flags & MEDIA_LNK_FL_ENABLED)) {
+ ret = __fimc_md_modify_pipelines(sink, true);
+ }
+
+ return ret ? -EPIPE : 0;
+}
+
+static ssize_t fimc_md_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fimc_md *fmd = platform_get_drvdata(pdev);
+
+ if (fmd->user_subdev_api)
+ return strlcpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE);
+
+ return strlcpy(buf, "V4L2 video node only API (vid-dev)\n", PAGE_SIZE);
+}
+
+static ssize_t fimc_md_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fimc_md *fmd = platform_get_drvdata(pdev);
+ bool subdev_api;
+ int i;
+
+ if (!strcmp(buf, "vid-dev\n"))
+ subdev_api = false;
+ else if (!strcmp(buf, "sub-dev\n"))
+ subdev_api = true;
+ else
+ return count;
+
+ fmd->user_subdev_api = subdev_api;
+ for (i = 0; i < FIMC_MAX_DEVS; i++)
+ if (fmd->fimc[i])
+ fmd->fimc[i]->vid_cap.user_subdev_api = subdev_api;
+ return count;
+}
+/*
+ * This device attribute is to select video pipeline configuration method.
+ * There are following valid values:
+ * vid-dev - for V4L2 video node API only, subdevice will be configured
+ * by the host driver.
+ * sub-dev - for media controller API, subdevs must be configured in user
+ * space before starting streaming.
+ */
+static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
+ fimc_md_sysfs_show, fimc_md_sysfs_store);
+
+static int fimc_md_get_pinctrl(struct fimc_md *fmd)
+{
+ struct device *dev = &fmd->pdev->dev;
+ struct fimc_pinctrl *pctl = &fmd->pinctl;
+
+ pctl->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(pctl->pinctrl))
+ return PTR_ERR(pctl->pinctrl);
+
+ pctl->state_default = pinctrl_lookup_state(pctl->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(pctl->state_default))
+ return PTR_ERR(pctl->state_default);
+
+ pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
+ PINCTRL_STATE_IDLE);
+ return 0;
+}
+
+static int cam_clk_prepare(struct clk_hw *hw)
+{
+ struct cam_clk *camclk = to_cam_clk(hw);
+ int ret;
+
+ if (camclk->fmd->pmf == NULL)
+ return -ENODEV;
+
+ ret = pm_runtime_get_sync(camclk->fmd->pmf);
+ return ret < 0 ? ret : 0;
+}
+
+static void cam_clk_unprepare(struct clk_hw *hw)
+{
+ struct cam_clk *camclk = to_cam_clk(hw);
+
+ if (camclk->fmd->pmf == NULL)
+ return;
+
+ pm_runtime_put_sync(camclk->fmd->pmf);
+}
+
+static const struct clk_ops cam_clk_ops = {
+ .prepare = cam_clk_prepare,
+ .unprepare = cam_clk_unprepare,
+};
+
+static void fimc_md_unregister_clk_provider(struct fimc_md *fmd)
+{
+ struct cam_clk_provider *cp = &fmd->clk_provider;
+ unsigned int i;
+
+ if (cp->of_node)
+ of_clk_del_provider(cp->of_node);
+
+ for (i = 0; i < cp->num_clocks; i++)
+ clk_unregister(cp->clks[i]);
+}
+
+static int fimc_md_register_clk_provider(struct fimc_md *fmd)
+{
+ struct cam_clk_provider *cp = &fmd->clk_provider;
+ struct device *dev = &fmd->pdev->dev;
+ int i, ret;
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
+ struct cam_clk *camclk = &cp->camclk[i];
+ struct clk_init_data init;
+ const char *p_name;
+
+ ret = of_property_read_string_index(dev->of_node,
+ "clock-output-names", i, &init.name);
+ if (ret < 0)
+ break;
+
+ p_name = __clk_get_name(fmd->camclk[i].clock);
+
+ /* It's safe since clk_register() will duplicate the string. */
+ init.parent_names = &p_name;
+ init.num_parents = 1;
+ init.ops = &cam_clk_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ camclk->hw.init = &init;
+ camclk->fmd = fmd;
+
+ cp->clks[i] = clk_register(NULL, &camclk->hw);
+ if (IS_ERR(cp->clks[i])) {
+ dev_err(dev, "failed to register clock: %s (%ld)\n",
+ init.name, PTR_ERR(cp->clks[i]));
+ ret = PTR_ERR(cp->clks[i]);
+ goto err;
+ }
+ cp->num_clocks++;
+ }
+
+ if (cp->num_clocks == 0) {
+ dev_warn(dev, "clk provider not registered\n");
+ return 0;
+ }
+
+ cp->clk_data.clks = cp->clks;
+ cp->clk_data.clk_num = cp->num_clocks;
+ cp->of_node = dev->of_node;
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
+ &cp->clk_data);
+ if (ret == 0)
+ return 0;
+err:
+ fimc_md_unregister_clk_provider(fmd);
+ return ret;
+}
+
+static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct fimc_md *fmd = notifier_to_fimc_md(notifier);
+ struct fimc_sensor_info *si = NULL;
+ int i;
+
+ /* Find platform data for this sensor subdev */
+ for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++)
+ if (fmd->sensor[i].asd.match.of.node == subdev->dev->of_node)
+ si = &fmd->sensor[i];
+
+ if (si == NULL)
+ return -EINVAL;
+
+ v4l2_set_subdev_hostdata(subdev, &si->pdata);
+
+ if (si->pdata.fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK)
+ subdev->grp_id = GRP_ID_FIMC_IS_SENSOR;
+ else
+ subdev->grp_id = GRP_ID_SENSOR;
+
+ si->subdev = subdev;
+
+ v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice: %s (%d)\n",
+ subdev->name, fmd->num_sensors);
+
+ fmd->num_sensors++;
+
+ return 0;
+}
+
+static int subdev_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct fimc_md *fmd = notifier_to_fimc_md(notifier);
+ int ret;
+
+ mutex_lock(&fmd->media_dev.graph_mutex);
+
+ ret = fimc_md_create_links(fmd);
+ if (ret < 0)
+ goto unlock;
+
+ ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev);
+unlock:
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+ return ret;
+}
+
+static int fimc_md_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct v4l2_device *v4l2_dev;
+ struct fimc_md *fmd;
+ int ret;
+
+ fmd = devm_kzalloc(dev, sizeof(*fmd), GFP_KERNEL);
+ if (!fmd)
+ return -ENOMEM;
+
+ spin_lock_init(&fmd->slock);
+ INIT_LIST_HEAD(&fmd->pipelines);
+ fmd->pdev = pdev;
+
+ strlcpy(fmd->media_dev.model, "SAMSUNG S5P FIMC",
+ sizeof(fmd->media_dev.model));
+ fmd->media_dev.link_notify = fimc_md_link_notify;
+ fmd->media_dev.dev = dev;
+
+ v4l2_dev = &fmd->v4l2_dev;
+ v4l2_dev->mdev = &fmd->media_dev;
+ v4l2_dev->notify = fimc_sensor_notify;
+ strlcpy(v4l2_dev->name, "s5p-fimc-md", sizeof(v4l2_dev->name));
+
+ fmd->use_isp = fimc_md_is_isp_available(dev->of_node);
+ fmd->user_subdev_api = true;
+
+ ret = v4l2_device_register(dev, &fmd->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
+ return ret;
+ }
+
+ ret = media_device_register(&fmd->media_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Failed to register media device: %d\n", ret);
+ goto err_v4l2_dev;
+ }
+
+ ret = fimc_md_get_clocks(fmd);
+ if (ret)
+ goto err_md;
+
+ ret = fimc_md_get_pinctrl(fmd);
+ if (ret < 0) {
+ if (ret != EPROBE_DEFER)
+ dev_err(dev, "Failed to get pinctrl: %d\n", ret);
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, fmd);
+
+ /* Protect the media graph while we're registering entities */
+ mutex_lock(&fmd->media_dev.graph_mutex);
+
+ ret = fimc_md_register_platform_entities(fmd, dev->of_node);
+ if (ret) {
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+ goto err_clk;
+ }
+
+ ret = fimc_md_register_sensor_entities(fmd);
+ if (ret) {
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+ goto err_m_ent;
+ }
+
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+
+ ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+ if (ret)
+ goto err_m_ent;
+ /*
+ * FIMC platform devices need to be registered before the sclk_cam
+ * clocks provider, as one of these devices needs to be activated
+ * to enable the clock.
+ */
+ ret = fimc_md_register_clk_provider(fmd);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "clock provider registration failed\n");
+ goto err_attr;
+ }
+
+ if (fmd->num_sensors > 0) {
+ fmd->subdev_notifier.subdevs = fmd->async_subdevs;
+ fmd->subdev_notifier.num_subdevs = fmd->num_sensors;
+ fmd->subdev_notifier.bound = subdev_notifier_bound;
+ fmd->subdev_notifier.complete = subdev_notifier_complete;
+ fmd->num_sensors = 0;
+
+ ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
+ &fmd->subdev_notifier);
+ if (ret)
+ goto err_clk_p;
+ }
+
+ return 0;
+
+err_clk_p:
+ fimc_md_unregister_clk_provider(fmd);
+err_attr:
+ device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+err_clk:
+ fimc_md_put_clocks(fmd);
+err_m_ent:
+ fimc_md_unregister_entities(fmd);
+err_md:
+ media_device_unregister(&fmd->media_dev);
+err_v4l2_dev:
+ v4l2_device_unregister(&fmd->v4l2_dev);
+ return ret;
+}
+
+static int fimc_md_remove(struct platform_device *pdev)
+{
+ struct fimc_md *fmd = platform_get_drvdata(pdev);
+
+ if (!fmd)
+ return 0;
+
+ fimc_md_unregister_clk_provider(fmd);
+ v4l2_async_notifier_unregister(&fmd->subdev_notifier);
+
+ v4l2_device_unregister(&fmd->v4l2_dev);
+ device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+ fimc_md_unregister_entities(fmd);
+ fimc_md_pipelines_free(fmd);
+ media_device_unregister(&fmd->media_dev);
+ fimc_md_put_clocks(fmd);
+
+ return 0;
+}
+
+static struct platform_device_id fimc_driver_ids[] __always_unused = {
+ { .name = "s5p-fimc-md" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct of_device_id fimc_md_of_match[] = {
+ { .compatible = "samsung,fimc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, fimc_md_of_match);
+
+static struct platform_driver fimc_md_driver = {
+ .probe = fimc_md_probe,
+ .remove = fimc_md_remove,
+ .driver = {
+ .of_match_table = of_match_ptr(fimc_md_of_match),
+ .name = "s5p-fimc-md",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init fimc_md_init(void)
+{
+ int ret;
+
+ request_module("s5p-csis");
+ ret = fimc_register_driver();
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&fimc_md_driver);
+}
+
+static void __exit fimc_md_exit(void)
+{
+ platform_driver_unregister(&fimc_md_driver);
+ fimc_unregister_driver();
+}
+
+module_init(fimc_md_init);
+module_exit(fimc_md_exit);
+
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.0.1");
diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h
new file mode 100644
index 00000000000..03214541f14
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/media-dev.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_MDEVICE_H_
+#define FIMC_MDEVICE_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/exynos-fimc.h>
+
+#include "fimc-core.h"
+#include "fimc-lite.h"
+#include "mipi-csis.h"
+
+#define FIMC_OF_NODE_NAME "fimc"
+#define FIMC_LITE_OF_NODE_NAME "fimc-lite"
+#define FIMC_IS_OF_NODE_NAME "fimc-is"
+#define CSIS_OF_NODE_NAME "csis"
+
+#define PINCTRL_STATE_IDLE "idle"
+
+#define FIMC_MAX_SENSORS 4
+#define FIMC_MAX_CAMCLKS 2
+#define DEFAULT_SENSOR_CLK_FREQ 24000000U
+
+/* LCD/ISP Writeback clocks (PIXELASYNCMx) */
+enum {
+ CLK_IDX_WB_A,
+ CLK_IDX_WB_B,
+ FIMC_MAX_WBCLKS
+};
+
+enum fimc_subdev_index {
+ IDX_SENSOR,
+ IDX_CSIS,
+ IDX_FLITE,
+ IDX_IS_ISP,
+ IDX_FIMC,
+ IDX_MAX,
+};
+
+/*
+ * This structure represents a chain of media entities, including a data
+ * source entity (e.g. an image sensor subdevice), a data capture entity
+ * - a video capture device node and any remaining entities.
+ */
+struct fimc_pipeline {
+ struct exynos_media_pipeline ep;
+ struct list_head list;
+ struct media_entity *vdev_entity;
+ struct v4l2_subdev *subdevs[IDX_MAX];
+};
+
+#define to_fimc_pipeline(_ep) container_of(_ep, struct fimc_pipeline, ep)
+
+struct fimc_csis_info {
+ struct v4l2_subdev *sd;
+ int id;
+};
+
+struct fimc_camclk_info {
+ struct clk *clock;
+ int use_count;
+ unsigned long frequency;
+};
+
+/**
+ * struct fimc_sensor_info - image data source subdev information
+ * @pdata: sensor's atrributes passed as media device's platform data
+ * @asd: asynchronous subdev registration data structure
+ * @subdev: image sensor v4l2 subdev
+ * @host: fimc device the sensor is currently linked to
+ *
+ * This data structure applies to image sensor and the writeback subdevs.
+ */
+struct fimc_sensor_info {
+ struct fimc_source_info pdata;
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+ struct fimc_dev *host;
+};
+
+struct cam_clk {
+ struct clk_hw hw;
+ struct fimc_md *fmd;
+};
+#define to_cam_clk(_hw) container_of(_hw, struct cam_clk, hw)
+
+/**
+ * struct fimc_md - fimc media device information
+ * @csis: MIPI CSIS subdevs data
+ * @sensor: array of registered sensor subdevs
+ * @num_sensors: actual number of registered sensors
+ * @camclk: external sensor clock information
+ * @fimc: array of registered fimc devices
+ * @fimc_is: fimc-is data structure
+ * @use_isp: set to true when FIMC-IS subsystem is used
+ * @pmf: handle to the CAMCLK clock control FIMC helper device
+ * @media_dev: top level media device
+ * @v4l2_dev: top level v4l2_device holding up the subdevs
+ * @pdev: platform device this media device is hooked up into
+ * @pinctrl: camera port pinctrl handle
+ * @state_default: pinctrl default state handle
+ * @state_idle: pinctrl idle state handle
+ * @cam_clk_provider: CAMCLK clock provider structure
+ * @user_subdev_api: true if subdevs are not configured by the host driver
+ * @slock: spinlock protecting @sensor array
+ */
+struct fimc_md {
+ struct fimc_csis_info csis[CSIS_MAX_ENTITIES];
+ struct fimc_sensor_info sensor[FIMC_MAX_SENSORS];
+ int num_sensors;
+ struct fimc_camclk_info camclk[FIMC_MAX_CAMCLKS];
+ struct clk *wbclk[FIMC_MAX_WBCLKS];
+ struct fimc_lite *fimc_lite[FIMC_LITE_MAX_DEVS];
+ struct fimc_dev *fimc[FIMC_MAX_DEVS];
+ struct fimc_is *fimc_is;
+ bool use_isp;
+ struct device *pmf;
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct platform_device *pdev;
+
+ struct fimc_pinctrl {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *state_default;
+ struct pinctrl_state *state_idle;
+ } pinctl;
+
+ struct cam_clk_provider {
+ struct clk *clks[FIMC_MAX_CAMCLKS];
+ struct clk_onecell_data clk_data;
+ struct device_node *of_node;
+ struct cam_clk camclk[FIMC_MAX_CAMCLKS];
+ int num_clocks;
+ } clk_provider;
+
+ struct v4l2_async_notifier subdev_notifier;
+ struct v4l2_async_subdev *async_subdevs[FIMC_MAX_SENSORS];
+
+ bool user_subdev_api;
+ spinlock_t slock;
+ struct list_head pipelines;
+};
+
+static inline
+struct fimc_sensor_info *source_to_sensor_info(struct fimc_source_info *si)
+{
+ return container_of(si, struct fimc_sensor_info, pdata);
+}
+
+static inline struct fimc_md *entity_to_fimc_mdev(struct media_entity *me)
+{
+ return me->parent == NULL ? NULL :
+ container_of(me->parent, struct fimc_md, media_dev);
+}
+
+static inline struct fimc_md *notifier_to_fimc_md(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct fimc_md, subdev_notifier);
+}
+
+static inline void fimc_md_graph_lock(struct exynos_video_entity *ve)
+{
+ mutex_lock(&ve->vdev.entity.parent->graph_mutex);
+}
+
+static inline void fimc_md_graph_unlock(struct exynos_video_entity *ve)
+{
+ mutex_unlock(&ve->vdev.entity.parent->graph_mutex);
+}
+
+int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on);
+
+#ifdef CONFIG_OF
+static inline bool fimc_md_is_isp_available(struct device_node *node)
+{
+ node = of_get_child_by_name(node, FIMC_IS_OF_NODE_NAME);
+ return node ? of_device_is_available(node) : false;
+}
+#else
+#define fimc_md_is_isp_available(node) (false)
+#endif /* CONFIG_OF */
+
+static inline struct v4l2_subdev *__fimc_md_get_subdev(
+ struct exynos_media_pipeline *ep,
+ unsigned int index)
+{
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+
+ if (!p || index >= IDX_MAX)
+ return NULL;
+ else
+ return p->subdevs[index];
+}
+
+#endif
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
new file mode 100644
index 00000000000..ae54ef5f535
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -0,0 +1,1052 @@
+/*
+ * Samsung S5P/EXYNOS SoC series MIPI-CSI receiver driver
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <media/exynos-fimc.h>
+#include <media/v4l2-of.h>
+#include <media/v4l2-subdev.h>
+
+#include "mipi-csis.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* Register map definition */
+
+/* CSIS global control */
+#define S5PCSIS_CTRL 0x00
+#define S5PCSIS_CTRL_DPDN_DEFAULT (0 << 31)
+#define S5PCSIS_CTRL_DPDN_SWAP (1 << 31)
+#define S5PCSIS_CTRL_ALIGN_32BIT (1 << 20)
+#define S5PCSIS_CTRL_UPDATE_SHADOW (1 << 16)
+#define S5PCSIS_CTRL_WCLK_EXTCLK (1 << 8)
+#define S5PCSIS_CTRL_RESET (1 << 4)
+#define S5PCSIS_CTRL_ENABLE (1 << 0)
+
+/* D-PHY control */
+#define S5PCSIS_DPHYCTRL 0x04
+#define S5PCSIS_DPHYCTRL_HSS_MASK (0x1f << 27)
+#define S5PCSIS_DPHYCTRL_ENABLE (0x1f << 0)
+
+#define S5PCSIS_CONFIG 0x08
+#define S5PCSIS_CFG_FMT_YCBCR422_8BIT (0x1e << 2)
+#define S5PCSIS_CFG_FMT_RAW8 (0x2a << 2)
+#define S5PCSIS_CFG_FMT_RAW10 (0x2b << 2)
+#define S5PCSIS_CFG_FMT_RAW12 (0x2c << 2)
+/* User defined formats, x = 1...4 */
+#define S5PCSIS_CFG_FMT_USER(x) ((0x30 + x - 1) << 2)
+#define S5PCSIS_CFG_FMT_MASK (0x3f << 2)
+#define S5PCSIS_CFG_NR_LANE_MASK 3
+
+/* Interrupt mask */
+#define S5PCSIS_INTMSK 0x10
+#define S5PCSIS_INTMSK_EVEN_BEFORE (1 << 31)
+#define S5PCSIS_INTMSK_EVEN_AFTER (1 << 30)
+#define S5PCSIS_INTMSK_ODD_BEFORE (1 << 29)
+#define S5PCSIS_INTMSK_ODD_AFTER (1 << 28)
+#define S5PCSIS_INTMSK_FRAME_START (1 << 27)
+#define S5PCSIS_INTMSK_FRAME_END (1 << 26)
+#define S5PCSIS_INTMSK_ERR_SOT_HS (1 << 12)
+#define S5PCSIS_INTMSK_ERR_LOST_FS (1 << 5)
+#define S5PCSIS_INTMSK_ERR_LOST_FE (1 << 4)
+#define S5PCSIS_INTMSK_ERR_OVER (1 << 3)
+#define S5PCSIS_INTMSK_ERR_ECC (1 << 2)
+#define S5PCSIS_INTMSK_ERR_CRC (1 << 1)
+#define S5PCSIS_INTMSK_ERR_UNKNOWN (1 << 0)
+#define S5PCSIS_INTMSK_EXYNOS4_EN_ALL 0xf000103f
+#define S5PCSIS_INTMSK_EXYNOS5_EN_ALL 0xfc00103f
+
+/* Interrupt source */
+#define S5PCSIS_INTSRC 0x14
+#define S5PCSIS_INTSRC_EVEN_BEFORE (1 << 31)
+#define S5PCSIS_INTSRC_EVEN_AFTER (1 << 30)
+#define S5PCSIS_INTSRC_EVEN (0x3 << 30)
+#define S5PCSIS_INTSRC_ODD_BEFORE (1 << 29)
+#define S5PCSIS_INTSRC_ODD_AFTER (1 << 28)
+#define S5PCSIS_INTSRC_ODD (0x3 << 28)
+#define S5PCSIS_INTSRC_NON_IMAGE_DATA (0xf << 28)
+#define S5PCSIS_INTSRC_FRAME_START (1 << 27)
+#define S5PCSIS_INTSRC_FRAME_END (1 << 26)
+#define S5PCSIS_INTSRC_ERR_SOT_HS (0xf << 12)
+#define S5PCSIS_INTSRC_ERR_LOST_FS (1 << 5)
+#define S5PCSIS_INTSRC_ERR_LOST_FE (1 << 4)
+#define S5PCSIS_INTSRC_ERR_OVER (1 << 3)
+#define S5PCSIS_INTSRC_ERR_ECC (1 << 2)
+#define S5PCSIS_INTSRC_ERR_CRC (1 << 1)
+#define S5PCSIS_INTSRC_ERR_UNKNOWN (1 << 0)
+#define S5PCSIS_INTSRC_ERRORS 0xf03f
+
+/* Pixel resolution */
+#define S5PCSIS_RESOL 0x2c
+#define CSIS_MAX_PIX_WIDTH 0xffff
+#define CSIS_MAX_PIX_HEIGHT 0xffff
+
+/* Non-image packet data buffers */
+#define S5PCSIS_PKTDATA_ODD 0x2000
+#define S5PCSIS_PKTDATA_EVEN 0x3000
+#define S5PCSIS_PKTDATA_SIZE SZ_4K
+
+enum {
+ CSIS_CLK_MUX,
+ CSIS_CLK_GATE,
+};
+
+static char *csi_clock_name[] = {
+ [CSIS_CLK_MUX] = "sclk_csis",
+ [CSIS_CLK_GATE] = "csis",
+};
+#define NUM_CSIS_CLOCKS ARRAY_SIZE(csi_clock_name)
+#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
+
+static const char * const csis_supply_name[] = {
+ "vddcore", /* CSIS Core (1.0V, 1.1V or 1.2V) suppply */
+ "vddio", /* CSIS I/O and PLL (1.8V) supply */
+};
+#define CSIS_NUM_SUPPLIES ARRAY_SIZE(csis_supply_name)
+
+enum {
+ ST_POWERED = 1,
+ ST_STREAMING = 2,
+ ST_SUSPENDED = 4,
+};
+
+struct s5pcsis_event {
+ u32 mask;
+ const char * const name;
+ unsigned int counter;
+};
+
+static const struct s5pcsis_event s5pcsis_events[] = {
+ /* Errors */
+ { S5PCSIS_INTSRC_ERR_SOT_HS, "SOT Error" },
+ { S5PCSIS_INTSRC_ERR_LOST_FS, "Lost Frame Start Error" },
+ { S5PCSIS_INTSRC_ERR_LOST_FE, "Lost Frame End Error" },
+ { S5PCSIS_INTSRC_ERR_OVER, "FIFO Overflow Error" },
+ { S5PCSIS_INTSRC_ERR_ECC, "ECC Error" },
+ { S5PCSIS_INTSRC_ERR_CRC, "CRC Error" },
+ { S5PCSIS_INTSRC_ERR_UNKNOWN, "Unknown Error" },
+ /* Non-image data receive events */
+ { S5PCSIS_INTSRC_EVEN_BEFORE, "Non-image data before even frame" },
+ { S5PCSIS_INTSRC_EVEN_AFTER, "Non-image data after even frame" },
+ { S5PCSIS_INTSRC_ODD_BEFORE, "Non-image data before odd frame" },
+ { S5PCSIS_INTSRC_ODD_AFTER, "Non-image data after odd frame" },
+ /* Frame start/end */
+ { S5PCSIS_INTSRC_FRAME_START, "Frame Start" },
+ { S5PCSIS_INTSRC_FRAME_END, "Frame End" },
+};
+#define S5PCSIS_NUM_EVENTS ARRAY_SIZE(s5pcsis_events)
+
+struct csis_pktbuf {
+ u32 *data;
+ unsigned int len;
+};
+
+struct csis_drvdata {
+ /* Mask of all used interrupts in S5PCSIS_INTMSK register */
+ u32 interrupt_mask;
+};
+
+/**
+ * struct csis_state - the driver's internal state data structure
+ * @lock: mutex serializing the subdev and power management operations,
+ * protecting @format and @flags members
+ * @pads: CSIS pads array
+ * @sd: v4l2_subdev associated with CSIS device instance
+ * @index: the hardware instance index
+ * @pdev: CSIS platform device
+ * @phy: pointer to the CSIS generic PHY
+ * @regs: mmaped I/O registers memory
+ * @supplies: CSIS regulator supplies
+ * @clock: CSIS clocks
+ * @irq: requested s5p-mipi-csis irq number
+ * @interrupt_mask: interrupt mask of the all used interrupts
+ * @flags: the state variable for power and streaming control
+ * @clock_frequency: device bus clock frequency
+ * @hs_settle: HS-RX settle time
+ * @num_lanes: number of MIPI-CSI data lanes used
+ * @max_num_lanes: maximum number of MIPI-CSI data lanes supported
+ * @wclk_ext: CSI wrapper clock: 0 - bus clock, 1 - external SCLK_CAM
+ * @csis_fmt: current CSIS pixel format
+ * @format: common media bus format for the source and sink pad
+ * @slock: spinlock protecting structure members below
+ * @pkt_buf: the frame embedded (non-image) data buffer
+ * @events: MIPI-CSIS event (error) counters
+ */
+struct csis_state {
+ struct mutex lock;
+ struct media_pad pads[CSIS_PADS_NUM];
+ struct v4l2_subdev sd;
+ u8 index;
+ struct platform_device *pdev;
+ struct phy *phy;
+ void __iomem *regs;
+ struct regulator_bulk_data supplies[CSIS_NUM_SUPPLIES];
+ struct clk *clock[NUM_CSIS_CLOCKS];
+ int irq;
+ u32 interrupt_mask;
+ u32 flags;
+
+ u32 clk_frequency;
+ u32 hs_settle;
+ u32 num_lanes;
+ u32 max_num_lanes;
+ u8 wclk_ext;
+
+ const struct csis_pix_format *csis_fmt;
+ struct v4l2_mbus_framefmt format;
+
+ spinlock_t slock;
+ struct csis_pktbuf pkt_buf;
+ struct s5pcsis_event events[S5PCSIS_NUM_EVENTS];
+};
+
+/**
+ * struct csis_pix_format - CSIS pixel format description
+ * @pix_width_alignment: horizontal pixel alignment, width will be
+ * multiple of 2^pix_width_alignment
+ * @code: corresponding media bus code
+ * @fmt_reg: S5PCSIS_CONFIG register value
+ * @data_alignment: MIPI-CSI data alignment in bits
+ */
+struct csis_pix_format {
+ unsigned int pix_width_alignment;
+ enum v4l2_mbus_pixelcode code;
+ u32 fmt_reg;
+ u8 data_alignment;
+};
+
+static const struct csis_pix_format s5pcsis_formats[] = {
+ {
+ .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT,
+ .data_alignment = 32,
+ }, {
+ .code = V4L2_MBUS_FMT_JPEG_1X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_USER(1),
+ .data_alignment = 32,
+ }, {
+ .code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_USER(1),
+ .data_alignment = 32,
+ }, {
+ .code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW8,
+ .data_alignment = 24,
+ }, {
+ .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW10,
+ .data_alignment = 24,
+ }, {
+ .code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW12,
+ .data_alignment = 24,
+ }
+};
+
+#define s5pcsis_write(__csis, __r, __v) writel(__v, __csis->regs + __r)
+#define s5pcsis_read(__csis, __r) readl(__csis->regs + __r)
+
+static struct csis_state *sd_to_csis_state(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csis_state, sd);
+}
+
+static const struct csis_pix_format *find_csis_format(
+ struct v4l2_mbus_framefmt *mf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s5pcsis_formats); i++)
+ if (mf->code == s5pcsis_formats[i].code)
+ return &s5pcsis_formats[i];
+ return NULL;
+}
+
+static void s5pcsis_enable_interrupts(struct csis_state *state, bool on)
+{
+ u32 val = s5pcsis_read(state, S5PCSIS_INTMSK);
+ if (on)
+ val |= state->interrupt_mask;
+ else
+ val &= ~state->interrupt_mask;
+ s5pcsis_write(state, S5PCSIS_INTMSK, val);
+}
+
+static void s5pcsis_reset(struct csis_state *state)
+{
+ u32 val = s5pcsis_read(state, S5PCSIS_CTRL);
+
+ s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_RESET);
+ udelay(10);
+}
+
+static void s5pcsis_system_enable(struct csis_state *state, int on)
+{
+ u32 val, mask;
+
+ val = s5pcsis_read(state, S5PCSIS_CTRL);
+ if (on)
+ val |= S5PCSIS_CTRL_ENABLE;
+ else
+ val &= ~S5PCSIS_CTRL_ENABLE;
+ s5pcsis_write(state, S5PCSIS_CTRL, val);
+
+ val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
+ val &= ~S5PCSIS_DPHYCTRL_ENABLE;
+ if (on) {
+ mask = (1 << (state->num_lanes + 1)) - 1;
+ val |= (mask & S5PCSIS_DPHYCTRL_ENABLE);
+ }
+ s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
+}
+
+/* Called with the state.lock mutex held */
+static void __s5pcsis_set_format(struct csis_state *state)
+{
+ struct v4l2_mbus_framefmt *mf = &state->format;
+ u32 val;
+
+ v4l2_dbg(1, debug, &state->sd, "fmt: %#x, %d x %d\n",
+ mf->code, mf->width, mf->height);
+
+ /* Color format */
+ val = s5pcsis_read(state, S5PCSIS_CONFIG);
+ val = (val & ~S5PCSIS_CFG_FMT_MASK) | state->csis_fmt->fmt_reg;
+ s5pcsis_write(state, S5PCSIS_CONFIG, val);
+
+ /* Pixel resolution */
+ val = (mf->width << 16) | mf->height;
+ s5pcsis_write(state, S5PCSIS_RESOL, val);
+}
+
+static void s5pcsis_set_hsync_settle(struct csis_state *state, int settle)
+{
+ u32 val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
+
+ val = (val & ~S5PCSIS_DPHYCTRL_HSS_MASK) | (settle << 27);
+ s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
+}
+
+static void s5pcsis_set_params(struct csis_state *state)
+{
+ u32 val;
+
+ val = s5pcsis_read(state, S5PCSIS_CONFIG);
+ val = (val & ~S5PCSIS_CFG_NR_LANE_MASK) | (state->num_lanes - 1);
+ s5pcsis_write(state, S5PCSIS_CONFIG, val);
+
+ __s5pcsis_set_format(state);
+ s5pcsis_set_hsync_settle(state, state->hs_settle);
+
+ val = s5pcsis_read(state, S5PCSIS_CTRL);
+ if (state->csis_fmt->data_alignment == 32)
+ val |= S5PCSIS_CTRL_ALIGN_32BIT;
+ else /* 24-bits */
+ val &= ~S5PCSIS_CTRL_ALIGN_32BIT;
+
+ val &= ~S5PCSIS_CTRL_WCLK_EXTCLK;
+ if (state->wclk_ext)
+ val |= S5PCSIS_CTRL_WCLK_EXTCLK;
+ s5pcsis_write(state, S5PCSIS_CTRL, val);
+
+ /* Update the shadow register. */
+ val = s5pcsis_read(state, S5PCSIS_CTRL);
+ s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_UPDATE_SHADOW);
+}
+
+static void s5pcsis_clk_put(struct csis_state *state)
+{
+ int i;
+
+ for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
+ if (IS_ERR(state->clock[i]))
+ continue;
+ clk_unprepare(state->clock[i]);
+ clk_put(state->clock[i]);
+ state->clock[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int s5pcsis_clk_get(struct csis_state *state)
+{
+ struct device *dev = &state->pdev->dev;
+ int i, ret;
+
+ for (i = 0; i < NUM_CSIS_CLOCKS; i++)
+ state->clock[i] = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
+ state->clock[i] = clk_get(dev, csi_clock_name[i]);
+ if (IS_ERR(state->clock[i])) {
+ ret = PTR_ERR(state->clock[i]);
+ goto err;
+ }
+ ret = clk_prepare(state->clock[i]);
+ if (ret < 0) {
+ clk_put(state->clock[i]);
+ state->clock[i] = ERR_PTR(-EINVAL);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ s5pcsis_clk_put(state);
+ dev_err(dev, "failed to get clock: %s\n", csi_clock_name[i]);
+ return ret;
+}
+
+static void dump_regs(struct csis_state *state, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x00, "CTRL" },
+ { 0x04, "DPHYCTRL" },
+ { 0x08, "CONFIG" },
+ { 0x0c, "DPHYSTS" },
+ { 0x10, "INTMSK" },
+ { 0x2c, "RESOL" },
+ { 0x38, "SDW_CONFIG" },
+ };
+ u32 i;
+
+ v4l2_info(&state->sd, "--- %s ---\n", label);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = s5pcsis_read(state, registers[i].offset);
+ v4l2_info(&state->sd, "%10s: 0x%08x\n", registers[i].name, cfg);
+ }
+}
+
+static void s5pcsis_start_stream(struct csis_state *state)
+{
+ s5pcsis_reset(state);
+ s5pcsis_set_params(state);
+ s5pcsis_system_enable(state, true);
+ s5pcsis_enable_interrupts(state, true);
+}
+
+static void s5pcsis_stop_stream(struct csis_state *state)
+{
+ s5pcsis_enable_interrupts(state, false);
+ s5pcsis_system_enable(state, false);
+}
+
+static void s5pcsis_clear_counters(struct csis_state *state)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&state->slock, flags);
+ for (i = 0; i < S5PCSIS_NUM_EVENTS; i++)
+ state->events[i].counter = 0;
+ spin_unlock_irqrestore(&state->slock, flags);
+}
+
+static void s5pcsis_log_counters(struct csis_state *state, bool non_errors)
+{
+ int i = non_errors ? S5PCSIS_NUM_EVENTS : S5PCSIS_NUM_EVENTS - 4;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->slock, flags);
+
+ for (i--; i >= 0; i--) {
+ if (state->events[i].counter > 0 || debug)
+ v4l2_info(&state->sd, "%s events: %d\n",
+ state->events[i].name,
+ state->events[i].counter);
+ }
+ spin_unlock_irqrestore(&state->slock, flags);
+}
+
+/*
+ * V4L2 subdev operations
+ */
+static int s5pcsis_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ struct device *dev = &state->pdev->dev;
+
+ if (on)
+ return pm_runtime_get_sync(dev);
+
+ return pm_runtime_put_sync(dev);
+}
+
+static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ int ret = 0;
+
+ v4l2_dbg(1, debug, sd, "%s: %d, state: 0x%x\n",
+ __func__, enable, state->flags);
+
+ if (enable) {
+ s5pcsis_clear_counters(state);
+ ret = pm_runtime_get_sync(&state->pdev->dev);
+ if (ret && ret != 1)
+ return ret;
+ }
+
+ mutex_lock(&state->lock);
+ if (enable) {
+ if (state->flags & ST_SUSPENDED) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ s5pcsis_start_stream(state);
+ state->flags |= ST_STREAMING;
+ } else {
+ s5pcsis_stop_stream(state);
+ state->flags &= ~ST_STREAMING;
+ if (debug > 0)
+ s5pcsis_log_counters(state, true);
+ }
+unlock:
+ mutex_unlock(&state->lock);
+ if (!enable)
+ pm_runtime_put(&state->pdev->dev);
+
+ return ret == 1 ? 0 : ret;
+}
+
+static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(s5pcsis_formats))
+ return -EINVAL;
+
+ code->code = s5pcsis_formats[code->index].code;
+ return 0;
+}
+
+static struct csis_pix_format const *s5pcsis_try_format(
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct csis_pix_format const *csis_fmt;
+
+ csis_fmt = find_csis_format(mf);
+ if (csis_fmt == NULL)
+ csis_fmt = &s5pcsis_formats[0];
+
+ mf->code = csis_fmt->code;
+ v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH,
+ csis_fmt->pix_width_alignment,
+ &mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1,
+ 0);
+ return csis_fmt;
+}
+
+static struct v4l2_mbus_framefmt *__s5pcsis_get_format(
+ struct csis_state *state, struct v4l2_subdev_fh *fh,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL;
+
+ return &state->format;
+}
+
+static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ struct csis_pix_format const *csis_fmt;
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = __s5pcsis_get_format(state, fh, fmt->which);
+
+ if (fmt->pad == CSIS_PAD_SOURCE) {
+ if (mf) {
+ mutex_lock(&state->lock);
+ fmt->format = *mf;
+ mutex_unlock(&state->lock);
+ }
+ return 0;
+ }
+ csis_fmt = s5pcsis_try_format(&fmt->format);
+ if (mf) {
+ mutex_lock(&state->lock);
+ *mf = fmt->format;
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ state->csis_fmt = csis_fmt;
+ mutex_unlock(&state->lock);
+ }
+ return 0;
+}
+
+static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = __s5pcsis_get_format(state, fh, fmt->which);
+ if (!mf)
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ fmt->format = *mf;
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+static int s5pcsis_s_rx_buffer(struct v4l2_subdev *sd, void *buf,
+ unsigned int *size)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ unsigned long flags;
+
+ *size = min_t(unsigned int, *size, S5PCSIS_PKTDATA_SIZE);
+
+ spin_lock_irqsave(&state->slock, flags);
+ state->pkt_buf.data = buf;
+ state->pkt_buf.len = *size;
+ spin_unlock_irqrestore(&state->slock, flags);
+
+ return 0;
+}
+
+static int s5pcsis_log_status(struct v4l2_subdev *sd)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+
+ mutex_lock(&state->lock);
+ s5pcsis_log_counters(state, true);
+ if (debug && (state->flags & ST_POWERED))
+ dump_regs(state, __func__);
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+static int s5pcsis_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
+
+ format->colorspace = V4L2_COLORSPACE_JPEG;
+ format->code = s5pcsis_formats[0].code;
+ format->width = S5PCSIS_DEF_PIX_WIDTH;
+ format->height = S5PCSIS_DEF_PIX_HEIGHT;
+ format->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops s5pcsis_sd_internal_ops = {
+ .open = s5pcsis_open,
+};
+
+static struct v4l2_subdev_core_ops s5pcsis_core_ops = {
+ .s_power = s5pcsis_s_power,
+ .log_status = s5pcsis_log_status,
+};
+
+static struct v4l2_subdev_pad_ops s5pcsis_pad_ops = {
+ .enum_mbus_code = s5pcsis_enum_mbus_code,
+ .get_fmt = s5pcsis_get_fmt,
+ .set_fmt = s5pcsis_set_fmt,
+};
+
+static struct v4l2_subdev_video_ops s5pcsis_video_ops = {
+ .s_rx_buffer = s5pcsis_s_rx_buffer,
+ .s_stream = s5pcsis_s_stream,
+};
+
+static struct v4l2_subdev_ops s5pcsis_subdev_ops = {
+ .core = &s5pcsis_core_ops,
+ .pad = &s5pcsis_pad_ops,
+ .video = &s5pcsis_video_ops,
+};
+
+static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id)
+{
+ struct csis_state *state = dev_id;
+ struct csis_pktbuf *pktbuf = &state->pkt_buf;
+ unsigned long flags;
+ u32 status;
+
+ status = s5pcsis_read(state, S5PCSIS_INTSRC);
+ spin_lock_irqsave(&state->slock, flags);
+
+ if ((status & S5PCSIS_INTSRC_NON_IMAGE_DATA) && pktbuf->data) {
+ u32 offset;
+
+ if (status & S5PCSIS_INTSRC_EVEN)
+ offset = S5PCSIS_PKTDATA_EVEN;
+ else
+ offset = S5PCSIS_PKTDATA_ODD;
+
+ memcpy(pktbuf->data, state->regs + offset, pktbuf->len);
+ pktbuf->data = NULL;
+ rmb();
+ }
+
+ /* Update the event/error counters */
+ if ((status & S5PCSIS_INTSRC_ERRORS) || debug) {
+ int i;
+ for (i = 0; i < S5PCSIS_NUM_EVENTS; i++) {
+ if (!(status & state->events[i].mask))
+ continue;
+ state->events[i].counter++;
+ v4l2_dbg(2, debug, &state->sd, "%s: %d\n",
+ state->events[i].name,
+ state->events[i].counter);
+ }
+ v4l2_dbg(2, debug, &state->sd, "status: %08x\n", status);
+ }
+ spin_unlock_irqrestore(&state->slock, flags);
+
+ s5pcsis_write(state, S5PCSIS_INTSRC, status);
+ return IRQ_HANDLED;
+}
+
+static int s5pcsis_parse_dt(struct platform_device *pdev,
+ struct csis_state *state)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct v4l2_of_endpoint endpoint;
+
+ if (of_property_read_u32(node, "clock-frequency",
+ &state->clk_frequency))
+ state->clk_frequency = DEFAULT_SCLK_CSIS_FREQ;
+ if (of_property_read_u32(node, "bus-width",
+ &state->max_num_lanes))
+ return -EINVAL;
+
+ node = of_graph_get_next_endpoint(node, NULL);
+ if (!node) {
+ dev_err(&pdev->dev, "No port node at %s\n",
+ pdev->dev.of_node->full_name);
+ return -EINVAL;
+ }
+ /* Get port node and validate MIPI-CSI channel id. */
+ v4l2_of_parse_endpoint(node, &endpoint);
+
+ state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
+ if (state->index < 0 || state->index >= CSIS_MAX_ENTITIES)
+ return -ENXIO;
+
+ /* Get MIPI CSI-2 bus configration from the endpoint node. */
+ of_property_read_u32(node, "samsung,csis-hs-settle",
+ &state->hs_settle);
+ state->wclk_ext = of_property_read_bool(node,
+ "samsung,csis-wclk");
+
+ state->num_lanes = endpoint.bus.mipi_csi2.num_data_lanes;
+ of_node_put(node);
+
+ return 0;
+}
+
+static int s5pcsis_pm_resume(struct device *dev, bool runtime);
+static const struct of_device_id s5pcsis_of_match[];
+
+static int s5pcsis_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ const struct csis_drvdata *drv_data;
+ struct device *dev = &pdev->dev;
+ struct resource *mem_res;
+ struct csis_state *state;
+ int ret = -ENOMEM;
+ int i;
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->lock);
+ spin_lock_init(&state->slock);
+ state->pdev = pdev;
+
+ of_id = of_match_node(s5pcsis_of_match, dev->of_node);
+ if (WARN_ON(of_id == NULL))
+ return -EINVAL;
+
+ drv_data = of_id->data;
+ state->interrupt_mask = drv_data->interrupt_mask;
+
+ ret = s5pcsis_parse_dt(pdev, state);
+ if (ret < 0)
+ return ret;
+
+ if (state->num_lanes == 0 || state->num_lanes > state->max_num_lanes) {
+ dev_err(dev, "Unsupported number of data lanes: %d (max. %d)\n",
+ state->num_lanes, state->max_num_lanes);
+ return -EINVAL;
+ }
+
+ state->phy = devm_phy_get(dev, "csis");
+ if (IS_ERR(state->phy))
+ return PTR_ERR(state->phy);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ state->regs = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(state->regs))
+ return PTR_ERR(state->regs);
+
+ state->irq = platform_get_irq(pdev, 0);
+ if (state->irq < 0) {
+ dev_err(dev, "Failed to get irq\n");
+ return state->irq;
+ }
+
+ for (i = 0; i < CSIS_NUM_SUPPLIES; i++)
+ state->supplies[i].supply = csis_supply_name[i];
+
+ ret = devm_regulator_bulk_get(dev, CSIS_NUM_SUPPLIES,
+ state->supplies);
+ if (ret)
+ return ret;
+
+ ret = s5pcsis_clk_get(state);
+ if (ret < 0)
+ return ret;
+
+ if (state->clk_frequency)
+ ret = clk_set_rate(state->clock[CSIS_CLK_MUX],
+ state->clk_frequency);
+ else
+ dev_WARN(dev, "No clock frequency specified!\n");
+ if (ret < 0)
+ goto e_clkput;
+
+ ret = clk_enable(state->clock[CSIS_CLK_MUX]);
+ if (ret < 0)
+ goto e_clkput;
+
+ ret = devm_request_irq(dev, state->irq, s5pcsis_irq_handler,
+ 0, dev_name(dev), state);
+ if (ret) {
+ dev_err(dev, "Interrupt request failed\n");
+ goto e_clkdis;
+ }
+
+ v4l2_subdev_init(&state->sd, &s5pcsis_subdev_ops);
+ state->sd.owner = THIS_MODULE;
+ snprintf(state->sd.name, sizeof(state->sd.name), "%s.%d",
+ CSIS_SUBDEV_NAME, state->index);
+ state->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ state->csis_fmt = &s5pcsis_formats[0];
+
+ state->format.code = s5pcsis_formats[0].code;
+ state->format.width = S5PCSIS_DEF_PIX_WIDTH;
+ state->format.height = S5PCSIS_DEF_PIX_HEIGHT;
+
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&state->sd.entity,
+ CSIS_PADS_NUM, state->pads, 0);
+ if (ret < 0)
+ goto e_clkdis;
+
+ /* This allows to retrieve the platform device id by the host driver */
+ v4l2_set_subdevdata(&state->sd, pdev);
+
+ /* .. and a pointer to the subdev. */
+ platform_set_drvdata(pdev, &state->sd);
+ memcpy(state->events, s5pcsis_events, sizeof(state->events));
+
+ pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = s5pcsis_pm_resume(dev, true);
+ if (ret < 0)
+ goto e_m_ent;
+ }
+
+ dev_info(&pdev->dev, "lanes: %d, hs_settle: %d, wclk: %d, freq: %u\n",
+ state->num_lanes, state->hs_settle, state->wclk_ext,
+ state->clk_frequency);
+ return 0;
+
+e_m_ent:
+ media_entity_cleanup(&state->sd.entity);
+e_clkdis:
+ clk_disable(state->clock[CSIS_CLK_MUX]);
+e_clkput:
+ s5pcsis_clk_put(state);
+ return ret;
+}
+
+static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csis_state *state = sd_to_csis_state(sd);
+ int ret = 0;
+
+ v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
+ __func__, state->flags);
+
+ mutex_lock(&state->lock);
+ if (state->flags & ST_POWERED) {
+ s5pcsis_stop_stream(state);
+ ret = phy_power_off(state->phy);
+ if (ret)
+ goto unlock;
+ ret = regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+ state->supplies);
+ if (ret)
+ goto unlock;
+ clk_disable(state->clock[CSIS_CLK_GATE]);
+ state->flags &= ~ST_POWERED;
+ if (!runtime)
+ state->flags |= ST_SUSPENDED;
+ }
+ unlock:
+ mutex_unlock(&state->lock);
+ return ret ? -EAGAIN : 0;
+}
+
+static int s5pcsis_pm_resume(struct device *dev, bool runtime)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csis_state *state = sd_to_csis_state(sd);
+ int ret = 0;
+
+ v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
+ __func__, state->flags);
+
+ mutex_lock(&state->lock);
+ if (!runtime && !(state->flags & ST_SUSPENDED))
+ goto unlock;
+
+ if (!(state->flags & ST_POWERED)) {
+ ret = regulator_bulk_enable(CSIS_NUM_SUPPLIES,
+ state->supplies);
+ if (ret)
+ goto unlock;
+ ret = phy_power_on(state->phy);
+ if (!ret) {
+ state->flags |= ST_POWERED;
+ } else {
+ regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+ state->supplies);
+ goto unlock;
+ }
+ clk_enable(state->clock[CSIS_CLK_GATE]);
+ }
+ if (state->flags & ST_STREAMING)
+ s5pcsis_start_stream(state);
+
+ state->flags &= ~ST_SUSPENDED;
+ unlock:
+ mutex_unlock(&state->lock);
+ return ret ? -EAGAIN : 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int s5pcsis_suspend(struct device *dev)
+{
+ return s5pcsis_pm_suspend(dev, false);
+}
+
+static int s5pcsis_resume(struct device *dev)
+{
+ return s5pcsis_pm_resume(dev, false);
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int s5pcsis_runtime_suspend(struct device *dev)
+{
+ return s5pcsis_pm_suspend(dev, true);
+}
+
+static int s5pcsis_runtime_resume(struct device *dev)
+{
+ return s5pcsis_pm_resume(dev, true);
+}
+#endif
+
+static int s5pcsis_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csis_state *state = sd_to_csis_state(sd);
+
+ pm_runtime_disable(&pdev->dev);
+ s5pcsis_pm_suspend(&pdev->dev, true);
+ clk_disable(state->clock[CSIS_CLK_MUX]);
+ pm_runtime_set_suspended(&pdev->dev);
+ s5pcsis_clk_put(state);
+
+ media_entity_cleanup(&state->sd.entity);
+
+ return 0;
+}
+
+static const struct dev_pm_ops s5pcsis_pm_ops = {
+ SET_RUNTIME_PM_OPS(s5pcsis_runtime_suspend, s5pcsis_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(s5pcsis_suspend, s5pcsis_resume)
+};
+
+static const struct csis_drvdata exynos4_csis_drvdata = {
+ .interrupt_mask = S5PCSIS_INTMSK_EXYNOS4_EN_ALL,
+};
+
+static const struct csis_drvdata exynos5_csis_drvdata = {
+ .interrupt_mask = S5PCSIS_INTMSK_EXYNOS5_EN_ALL,
+};
+
+static const struct of_device_id s5pcsis_of_match[] = {
+ {
+ .compatible = "samsung,s5pv210-csis",
+ .data = &exynos4_csis_drvdata,
+ }, {
+ .compatible = "samsung,exynos4210-csis",
+ .data = &exynos4_csis_drvdata,
+ }, {
+ .compatible = "samsung,exynos5250-csis",
+ .data = &exynos5_csis_drvdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s5pcsis_of_match);
+
+static struct platform_driver s5pcsis_driver = {
+ .probe = s5pcsis_probe,
+ .remove = s5pcsis_remove,
+ .driver = {
+ .of_match_table = s5pcsis_of_match,
+ .name = CSIS_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &s5pcsis_pm_ops,
+ },
+};
+
+module_platform_driver(s5pcsis_driver);
+
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC MIPI-CSI2 receiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.h b/drivers/media/platform/exynos4-is/mipi-csis.h
new file mode 100644
index 00000000000..28c11c4085d
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/mipi-csis.h
@@ -0,0 +1,26 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series MIPI-CSI receiver driver
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef S5P_MIPI_CSIS_H_
+#define S5P_MIPI_CSIS_H_
+
+#define CSIS_DRIVER_NAME "s5p-mipi-csis"
+#define CSIS_SUBDEV_NAME CSIS_DRIVER_NAME
+#define CSIS_MAX_ENTITIES 2
+#define CSIS0_MAX_LANES 4
+#define CSIS1_MAX_LANES 2
+
+#define CSIS_PAD_SINK 0
+#define CSIS_PAD_SOURCE 1
+#define CSIS_PADS_NUM 2
+
+#define S5PCSIS_DEF_PIX_WIDTH 640
+#define S5PCSIS_DEF_PIX_HEIGHT 480
+
+#endif
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
new file mode 100644
index 00000000000..d5dc198502e
--- /dev/null
+++ b/drivers/media/platform/fsl-viu.c
@@ -0,0 +1,1693 @@
+/*
+ * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Freescale VIU video driver
+ *
+ * Authors: Hongjun Chen <hong-jun.chen@freescale.com>
+ * Porting to 2.6.35 by DENX Software Engineering,
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-dma-contig.h>
+
+#define DRV_NAME "fsl_viu"
+#define VIU_VERSION "0.5.1"
+
+#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+
+#define VIU_VID_MEM_LIMIT 4 /* Video memory limit, in Mb */
+
+/* I2C address of video decoder chip is 0x4A */
+#define VIU_VIDEO_DECODER_ADDR 0x25
+
+/* supported controls */
+static struct v4l2_queryctrl viu_qctrl[] = {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 127,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 0x1,
+ .default_value = 0x10,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 0x1,
+ .default_value = 127,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = -128,
+ .maximum = 127,
+ .step = 0x1,
+ .default_value = 0,
+ .flags = 0,
+ }
+};
+
+static int qctl_regs[ARRAY_SIZE(viu_qctrl)];
+
+static int info_level;
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (level <= info_level) \
+ printk(KERN_DEBUG "viu: " fmt , ## arg); \
+ } while (0)
+
+/*
+ * Basic structures
+ */
+struct viu_fmt {
+ char name[32];
+ u32 fourcc; /* v4l2 format id */
+ u32 pixelformat;
+ int depth;
+};
+
+static struct viu_fmt formats[] = {
+ {
+ .name = "RGB-16 (5/B-6/G-5/R)",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ }, {
+ .name = "RGB-32 (A-R-G-B)",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ }
+};
+
+struct viu_dev;
+struct viu_buf;
+
+/* buffer for one video frame */
+struct viu_buf {
+ /* common v4l buffer stuff -- must be first */
+ struct videobuf_buffer vb;
+ struct viu_fmt *fmt;
+};
+
+struct viu_dmaqueue {
+ struct viu_dev *dev;
+ struct list_head active;
+ struct list_head queued;
+ struct timer_list timeout;
+};
+
+struct viu_status {
+ u32 field_irq;
+ u32 vsync_irq;
+ u32 hsync_irq;
+ u32 vstart_irq;
+ u32 dma_end_irq;
+ u32 error_irq;
+};
+
+struct viu_reg {
+ u32 status_cfg;
+ u32 luminance;
+ u32 chroma_r;
+ u32 chroma_g;
+ u32 chroma_b;
+ u32 field_base_addr;
+ u32 dma_inc;
+ u32 picture_count;
+ u32 req_alarm;
+ u32 alpha;
+} __attribute__ ((packed));
+
+struct viu_dev {
+ struct v4l2_device v4l2_dev;
+ struct mutex lock;
+ spinlock_t slock;
+ int users;
+
+ struct device *dev;
+ /* various device info */
+ struct video_device *vdev;
+ struct viu_dmaqueue vidq;
+ enum v4l2_field capfield;
+ int field;
+ int first;
+ int dma_done;
+
+ /* Hardware register area */
+ struct viu_reg *vr;
+
+ /* Interrupt vector */
+ int irq;
+ struct viu_status irqs;
+
+ /* video overlay */
+ struct v4l2_framebuffer ovbuf;
+ struct viu_fmt *ovfmt;
+ unsigned int ovenable;
+ enum v4l2_field ovfield;
+
+ /* crop */
+ struct v4l2_rect crop_current;
+
+ /* clock pointer */
+ struct clk *clk;
+
+ /* decoder */
+ struct v4l2_subdev *decoder;
+
+ v4l2_std_id std;
+};
+
+struct viu_fh {
+ struct viu_dev *dev;
+
+ /* video capture */
+ struct videobuf_queue vb_vidq;
+ spinlock_t vbq_lock; /* spinlock for the videobuf queue */
+
+ /* video overlay */
+ struct v4l2_window win;
+ struct v4l2_clip clips[1];
+
+ /* video capture */
+ struct viu_fmt *fmt;
+ int width, height, sizeimage;
+ enum v4l2_buf_type type;
+};
+
+static struct viu_reg reg_val;
+
+/*
+ * Macro definitions of VIU registers
+ */
+
+/* STATUS_CONFIG register */
+enum status_config {
+ SOFT_RST = 1 << 0,
+
+ ERR_MASK = 0x0f << 4, /* Error code mask */
+ ERR_NO = 0x00, /* No error */
+ ERR_DMA_V = 0x01 << 4, /* DMA in vertical active */
+ ERR_DMA_VB = 0x02 << 4, /* DMA in vertical blanking */
+ ERR_LINE_TOO_LONG = 0x04 << 4, /* Line too long */
+ ERR_TOO_MANG_LINES = 0x05 << 4, /* Too many lines in field */
+ ERR_LINE_TOO_SHORT = 0x06 << 4, /* Line too short */
+ ERR_NOT_ENOUGH_LINE = 0x07 << 4, /* Not enough lines in field */
+ ERR_FIFO_OVERFLOW = 0x08 << 4, /* FIFO overflow */
+ ERR_FIFO_UNDERFLOW = 0x09 << 4, /* FIFO underflow */
+ ERR_1bit_ECC = 0x0a << 4, /* One bit ECC error */
+ ERR_MORE_ECC = 0x0b << 4, /* Two/more bits ECC error */
+
+ INT_FIELD_EN = 0x01 << 8, /* Enable field interrupt */
+ INT_VSYNC_EN = 0x01 << 9, /* Enable vsync interrupt */
+ INT_HSYNC_EN = 0x01 << 10, /* Enable hsync interrupt */
+ INT_VSTART_EN = 0x01 << 11, /* Enable vstart interrupt */
+ INT_DMA_END_EN = 0x01 << 12, /* Enable DMA end interrupt */
+ INT_ERROR_EN = 0x01 << 13, /* Enable error interrupt */
+ INT_ECC_EN = 0x01 << 14, /* Enable ECC interrupt */
+
+ INT_FIELD_STATUS = 0x01 << 16, /* field interrupt status */
+ INT_VSYNC_STATUS = 0x01 << 17, /* vsync interrupt status */
+ INT_HSYNC_STATUS = 0x01 << 18, /* hsync interrupt status */
+ INT_VSTART_STATUS = 0x01 << 19, /* vstart interrupt status */
+ INT_DMA_END_STATUS = 0x01 << 20, /* DMA end interrupt status */
+ INT_ERROR_STATUS = 0x01 << 21, /* error interrupt status */
+
+ DMA_ACT = 0x01 << 27, /* Enable DMA transfer */
+ FIELD_NO = 0x01 << 28, /* Field number */
+ DITHER_ON = 0x01 << 29, /* Dithering is on */
+ ROUND_ON = 0x01 << 30, /* Round is on */
+ MODE_32BIT = 0x01 << 31, /* Data in RGBa888,
+ * 0 in RGB565
+ */
+};
+
+#define norm_maxw() 720
+#define norm_maxh() 576
+
+#define INT_ALL_STATUS (INT_FIELD_STATUS | INT_VSYNC_STATUS | \
+ INT_HSYNC_STATUS | INT_VSTART_STATUS | \
+ INT_DMA_END_STATUS | INT_ERROR_STATUS)
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static irqreturn_t viu_intr(int irq, void *dev_id);
+
+struct viu_fmt *format_by_fourcc(int fourcc)
+{
+ int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].pixelformat == fourcc)
+ return formats + i;
+ }
+
+ dprintk(0, "unknown pixelformat:'%4.4s'\n", (char *)&fourcc);
+ return NULL;
+}
+
+void viu_start_dma(struct viu_dev *dev)
+{
+ struct viu_reg *vr = dev->vr;
+
+ dev->field = 0;
+
+ /* Enable DMA operation */
+ out_be32(&vr->status_cfg, SOFT_RST);
+ out_be32(&vr->status_cfg, INT_FIELD_EN);
+}
+
+void viu_stop_dma(struct viu_dev *dev)
+{
+ struct viu_reg *vr = dev->vr;
+ int cnt = 100;
+ u32 status_cfg;
+
+ out_be32(&vr->status_cfg, 0);
+
+ /* Clear pending interrupts */
+ status_cfg = in_be32(&vr->status_cfg);
+ if (status_cfg & 0x3f0000)
+ out_be32(&vr->status_cfg, status_cfg & 0x3f0000);
+
+ if (status_cfg & DMA_ACT) {
+ do {
+ status_cfg = in_be32(&vr->status_cfg);
+ if (status_cfg & INT_DMA_END_STATUS)
+ break;
+ } while (cnt--);
+
+ if (cnt < 0) {
+ /* timed out, issue soft reset */
+ out_be32(&vr->status_cfg, SOFT_RST);
+ out_be32(&vr->status_cfg, 0);
+ } else {
+ /* clear DMA_END and other pending irqs */
+ out_be32(&vr->status_cfg, status_cfg & 0x3f0000);
+ }
+ }
+
+ dev->field = 0;
+}
+
+static int restart_video_queue(struct viu_dmaqueue *vidq)
+{
+ struct viu_buf *buf, *prev;
+
+ dprintk(1, "%s vidq=0x%08lx\n", __func__, (unsigned long)vidq);
+ if (!list_empty(&vidq->active)) {
+ buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
+ dprintk(2, "restart_queue [%p/%d]: restart dma\n",
+ buf, buf->vb.i);
+
+ viu_stop_dma(vidq->dev);
+
+ /* cancel all outstanding capture requests */
+ list_for_each_entry_safe(buf, prev, &vidq->active, vb.queue) {
+ list_del(&buf->vb.queue);
+ buf->vb.state = VIDEOBUF_ERROR;
+ wake_up(&buf->vb.done);
+ }
+ mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
+ return 0;
+ }
+
+ prev = NULL;
+ for (;;) {
+ if (list_empty(&vidq->queued))
+ return 0;
+ buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue);
+ if (prev == NULL) {
+ list_move_tail(&buf->vb.queue, &vidq->active);
+
+ dprintk(1, "Restarting video dma\n");
+ viu_stop_dma(vidq->dev);
+ viu_start_dma(vidq->dev);
+
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
+ dprintk(2, "[%p/%d] restart_queue - first active\n",
+ buf, buf->vb.i);
+
+ } else if (prev->vb.width == buf->vb.width &&
+ prev->vb.height == buf->vb.height &&
+ prev->fmt == buf->fmt) {
+ list_move_tail(&buf->vb.queue, &vidq->active);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ dprintk(2, "[%p/%d] restart_queue - move to active\n",
+ buf, buf->vb.i);
+ } else {
+ return 0;
+ }
+ prev = buf;
+ }
+}
+
+static void viu_vid_timeout(unsigned long data)
+{
+ struct viu_dev *dev = (struct viu_dev *)data;
+ struct viu_buf *buf;
+ struct viu_dmaqueue *vidq = &dev->vidq;
+
+ while (!list_empty(&vidq->active)) {
+ buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
+ list_del(&buf->vb.queue);
+ buf->vb.state = VIDEOBUF_ERROR;
+ wake_up(&buf->vb.done);
+ dprintk(1, "viu/0: [%p/%d] timeout\n", buf, buf->vb.i);
+ }
+
+ restart_video_queue(vidq);
+}
+
+/*
+ * Videobuf operations
+ */
+static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct viu_fh *fh = vq->priv_data;
+
+ *size = fh->width * fh->height * fh->fmt->depth >> 3;
+ if (*count == 0)
+ *count = 32;
+
+ while (*size * *count > VIU_VID_MEM_LIMIT * 1024 * 1024)
+ (*count)--;
+
+ dprintk(1, "%s, count=%d, size=%d\n", __func__, *count, *size);
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct viu_buf *buf)
+{
+ struct videobuf_buffer *vb = &buf->vb;
+ void *vaddr = NULL;
+
+ BUG_ON(in_interrupt());
+
+ videobuf_waiton(vq, &buf->vb, 0, 0);
+
+ if (vq->int_ops && vq->int_ops->vaddr)
+ vaddr = vq->int_ops->vaddr(vb);
+
+ if (vaddr)
+ videobuf_dma_contig_free(vq, &buf->vb);
+
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+inline int buffer_activate(struct viu_dev *dev, struct viu_buf *buf)
+{
+ struct viu_reg *vr = dev->vr;
+ int bpp;
+
+ /* setup the DMA base address */
+ reg_val.field_base_addr = videobuf_to_dma_contig(&buf->vb);
+
+ dprintk(1, "buffer_activate [%p/%d]: dma addr 0x%lx\n",
+ buf, buf->vb.i, (unsigned long)reg_val.field_base_addr);
+
+ /* interlace is on by default, set horizontal DMA increment */
+ reg_val.status_cfg = 0;
+ bpp = buf->fmt->depth >> 3;
+ switch (bpp) {
+ case 2:
+ reg_val.status_cfg &= ~MODE_32BIT;
+ reg_val.dma_inc = buf->vb.width * 2;
+ break;
+ case 4:
+ reg_val.status_cfg |= MODE_32BIT;
+ reg_val.dma_inc = buf->vb.width * 4;
+ break;
+ default:
+ dprintk(0, "doesn't support color depth(%d)\n",
+ bpp * 8);
+ return -EINVAL;
+ }
+
+ /* setup picture_count register */
+ reg_val.picture_count = (buf->vb.height / 2) << 16 |
+ buf->vb.width;
+
+ reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN;
+
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ dev->capfield = buf->vb.field;
+
+ /* reset dma increment if needed */
+ if (!V4L2_FIELD_HAS_BOTH(buf->vb.field))
+ reg_val.dma_inc = 0;
+
+ out_be32(&vr->dma_inc, reg_val.dma_inc);
+ out_be32(&vr->picture_count, reg_val.picture_count);
+ out_be32(&vr->field_base_addr, reg_val.field_base_addr);
+ mod_timer(&dev->vidq.timeout, jiffies + BUFFER_TIMEOUT);
+ return 0;
+}
+
+static int buffer_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct viu_fh *fh = vq->priv_data;
+ struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
+ int rc;
+
+ BUG_ON(fh->fmt == NULL);
+
+ if (fh->width < 48 || fh->width > norm_maxw() ||
+ fh->height < 32 || fh->height > norm_maxh())
+ return -EINVAL;
+ buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3;
+ if (buf->vb.baddr != 0 && buf->vb.bsize < buf->vb.size)
+ return -EINVAL;
+
+ if (buf->fmt != fh->fmt ||
+ buf->vb.width != fh->width ||
+ buf->vb.height != fh->height ||
+ buf->vb.field != field) {
+ buf->fmt = fh->fmt;
+ buf->vb.width = fh->width;
+ buf->vb.height = fh->height;
+ buf->vb.field = field;
+ }
+
+ if (buf->vb.state == VIDEOBUF_NEEDS_INIT) {
+ rc = videobuf_iolock(vq, &buf->vb, NULL);
+ if (rc != 0)
+ goto fail;
+
+ buf->vb.width = fh->width;
+ buf->vb.height = fh->height;
+ buf->vb.field = field;
+ buf->fmt = fh->fmt;
+ }
+
+ buf->vb.state = VIDEOBUF_PREPARED;
+ return 0;
+
+fail:
+ free_buffer(vq, buf);
+ return rc;
+}
+
+static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
+ struct viu_fh *fh = vq->priv_data;
+ struct viu_dev *dev = fh->dev;
+ struct viu_dmaqueue *vidq = &dev->vidq;
+ struct viu_buf *prev;
+
+ if (!list_empty(&vidq->queued)) {
+ dprintk(1, "adding vb queue=0x%08lx\n",
+ (unsigned long)&buf->vb.queue);
+ dprintk(1, "vidq pointer 0x%p, queued 0x%p\n",
+ vidq, &vidq->queued);
+ dprintk(1, "dev %p, queued: self %p, next %p, head %p\n",
+ dev, &vidq->queued, vidq->queued.next,
+ vidq->queued.prev);
+ list_add_tail(&buf->vb.queue, &vidq->queued);
+ buf->vb.state = VIDEOBUF_QUEUED;
+ dprintk(2, "[%p/%d] buffer_queue - append to queued\n",
+ buf, buf->vb.i);
+ } else if (list_empty(&vidq->active)) {
+ dprintk(1, "adding vb active=0x%08lx\n",
+ (unsigned long)&buf->vb.queue);
+ list_add_tail(&buf->vb.queue, &vidq->active);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
+ dprintk(2, "[%p/%d] buffer_queue - first active\n",
+ buf, buf->vb.i);
+
+ buffer_activate(dev, buf);
+ } else {
+ dprintk(1, "adding vb queue2=0x%08lx\n",
+ (unsigned long)&buf->vb.queue);
+ prev = list_entry(vidq->active.prev, struct viu_buf, vb.queue);
+ if (prev->vb.width == buf->vb.width &&
+ prev->vb.height == buf->vb.height &&
+ prev->fmt == buf->fmt) {
+ list_add_tail(&buf->vb.queue, &vidq->active);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ dprintk(2, "[%p/%d] buffer_queue - append to active\n",
+ buf, buf->vb.i);
+ } else {
+ list_add_tail(&buf->vb.queue, &vidq->queued);
+ buf->vb.state = VIDEOBUF_QUEUED;
+ dprintk(2, "[%p/%d] buffer_queue - first queued\n",
+ buf, buf->vb.i);
+ }
+ }
+}
+
+static void buffer_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
+ struct viu_fh *fh = vq->priv_data;
+ struct viu_dev *dev = (struct viu_dev *)fh->dev;
+
+ viu_stop_dma(dev);
+ free_buffer(vq, buf);
+}
+
+static struct videobuf_queue_ops viu_video_qops = {
+ .buf_setup = buffer_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .buf_release = buffer_release,
+};
+
+/*
+ * IOCTL vidioc handling
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "viu");
+ strcpy(cap->card, "viu");
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_OVERLAY |
+ V4L2_CAP_READWRITE;
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ int index = f->index;
+
+ if (f->index > NUM_FORMATS)
+ return -EINVAL;
+
+ strlcpy(f->description, formats[index].name, sizeof(f->description));
+ f->pixelformat = formats[index].fourcc;
+ return 0;
+}
+
+static int vidioc_g_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fh *fh = priv;
+
+ f->fmt.pix.width = fh->width;
+ f->fmt.pix.height = fh->height;
+ f->fmt.pix.field = fh->vb_vidq.field;
+ f->fmt.pix.pixelformat = fh->fmt->pixelformat;
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = fh->sizeimage;
+ return 0;
+}
+
+static int vidioc_try_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fmt *fmt;
+ enum v4l2_field field;
+ unsigned int maxw, maxh;
+
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ if (!fmt) {
+ dprintk(1, "Fourcc format (0x%08x) invalid.",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ field = f->fmt.pix.field;
+
+ if (field == V4L2_FIELD_ANY) {
+ field = V4L2_FIELD_INTERLACED;
+ } else if (field != V4L2_FIELD_INTERLACED) {
+ dprintk(1, "Field type invalid.\n");
+ return -EINVAL;
+ }
+
+ maxw = norm_maxw();
+ maxh = norm_maxh();
+
+ f->fmt.pix.field = field;
+ if (f->fmt.pix.height < 32)
+ f->fmt.pix.height = 32;
+ if (f->fmt.pix.height > maxh)
+ f->fmt.pix.height = maxh;
+ if (f->fmt.pix.width < 48)
+ f->fmt.pix.width = 48;
+ if (f->fmt.pix.width > maxw)
+ f->fmt.pix.width = maxw;
+ f->fmt.pix.width &= ~0x03;
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fmt->depth) >> 3;
+
+ return 0;
+}
+
+static int vidioc_s_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fh *fh = priv;
+ int ret;
+
+ ret = vidioc_try_fmt_cap(file, fh, f);
+ if (ret < 0)
+ return ret;
+
+ fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ fh->width = f->fmt.pix.width;
+ fh->height = f->fmt.pix.height;
+ fh->sizeimage = f->fmt.pix.sizeimage;
+ fh->vb_vidq.field = f->fmt.pix.field;
+ fh->type = f->type;
+ dprintk(1, "set to pixelformat '%4.6s'\n", (char *)&fh->fmt->name);
+ return 0;
+}
+
+static int vidioc_g_fmt_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fh *fh = priv;
+
+ f->fmt.win = fh->win;
+ return 0;
+}
+
+static int verify_preview(struct viu_dev *dev, struct v4l2_window *win)
+{
+ enum v4l2_field field;
+ int maxw, maxh;
+
+ if (dev->ovbuf.base == NULL)
+ return -EINVAL;
+ if (dev->ovfmt == NULL)
+ return -EINVAL;
+ if (win->w.width < 48 || win->w.height < 32)
+ return -EINVAL;
+
+ field = win->field;
+ maxw = dev->crop_current.width;
+ maxh = dev->crop_current.height;
+
+ if (field == V4L2_FIELD_ANY) {
+ field = (win->w.height > maxh/2)
+ ? V4L2_FIELD_INTERLACED
+ : V4L2_FIELD_TOP;
+ }
+ switch (field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ maxh = maxh / 2;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ win->field = field;
+ if (win->w.width > maxw)
+ win->w.width = maxw;
+ if (win->w.height > maxh)
+ win->w.height = maxh;
+ return 0;
+}
+
+inline void viu_activate_overlay(struct viu_reg *viu_reg)
+{
+ struct viu_reg *vr = viu_reg;
+
+ out_be32(&vr->field_base_addr, reg_val.field_base_addr);
+ out_be32(&vr->dma_inc, reg_val.dma_inc);
+ out_be32(&vr->picture_count, reg_val.picture_count);
+}
+
+static int viu_setup_preview(struct viu_dev *dev, struct viu_fh *fh)
+{
+ int bpp;
+
+ dprintk(1, "%s %dx%d %s\n", __func__,
+ fh->win.w.width, fh->win.w.height, dev->ovfmt->name);
+
+ reg_val.status_cfg = 0;
+
+ /* setup window */
+ reg_val.picture_count = (fh->win.w.height / 2) << 16 |
+ fh->win.w.width;
+
+ /* setup color depth and dma increment */
+ bpp = dev->ovfmt->depth / 8;
+ switch (bpp) {
+ case 2:
+ reg_val.status_cfg &= ~MODE_32BIT;
+ reg_val.dma_inc = fh->win.w.width * 2;
+ break;
+ case 4:
+ reg_val.status_cfg |= MODE_32BIT;
+ reg_val.dma_inc = fh->win.w.width * 4;
+ break;
+ default:
+ dprintk(0, "device doesn't support color depth(%d)\n",
+ bpp * 8);
+ return -EINVAL;
+ }
+
+ dev->ovfield = fh->win.field;
+ if (!V4L2_FIELD_HAS_BOTH(dev->ovfield))
+ reg_val.dma_inc = 0;
+
+ reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN;
+
+ /* setup the base address of the overlay buffer */
+ reg_val.field_base_addr = (u32)dev->ovbuf.base;
+
+ return 0;
+}
+
+static int vidioc_s_fmt_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = (struct viu_dev *)fh->dev;
+ unsigned long flags;
+ int err;
+
+ err = verify_preview(dev, &f->fmt.win);
+ if (err)
+ return err;
+
+ fh->win = f->fmt.win;
+
+ spin_lock_irqsave(&dev->slock, flags);
+ viu_setup_preview(dev, fh);
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return 0;
+}
+
+static int vidioc_try_fmt_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return 0;
+}
+
+static int vidioc_overlay(struct file *file, void *priv, unsigned int on)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = (struct viu_dev *)fh->dev;
+ unsigned long flags;
+
+ if (on) {
+ spin_lock_irqsave(&dev->slock, flags);
+ viu_activate_overlay(dev->vr);
+ dev->ovenable = 1;
+
+ /* start dma */
+ viu_start_dma(dev);
+ spin_unlock_irqrestore(&dev->slock, flags);
+ } else {
+ viu_stop_dma(dev);
+ dev->ovenable = 0;
+ }
+
+ return 0;
+}
+
+int vidioc_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *arg)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = fh->dev;
+ struct v4l2_framebuffer *fb = arg;
+
+ *fb = dev->ovbuf;
+ fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
+ return 0;
+}
+
+int vidioc_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *arg)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = fh->dev;
+ const struct v4l2_framebuffer *fb = arg;
+ struct viu_fmt *fmt;
+
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ /* check args */
+ fmt = format_by_fourcc(fb->fmt.pixelformat);
+ if (fmt == NULL)
+ return -EINVAL;
+
+ /* ok, accept it */
+ dev->ovbuf = *fb;
+ dev->ovfmt = fmt;
+ if (dev->ovbuf.fmt.bytesperline == 0) {
+ dev->ovbuf.fmt.bytesperline =
+ dev->ovbuf.fmt.width * fmt->depth / 8;
+ }
+ return 0;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct viu_fh *fh = priv;
+
+ return videobuf_reqbufs(&fh->vb_vidq, p);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct viu_fh *fh = priv;
+
+ return videobuf_querybuf(&fh->vb_vidq, p);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct viu_fh *fh = priv;
+
+ return videobuf_qbuf(&fh->vb_vidq, p);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct viu_fh *fh = priv;
+
+ return videobuf_dqbuf(&fh->vb_vidq, p,
+ file->f_flags & O_NONBLOCK);
+}
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = fh->dev;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (fh->type != i)
+ return -EINVAL;
+
+ if (dev->ovenable)
+ dev->ovenable = 0;
+
+ viu_start_dma(fh->dev);
+
+ return videobuf_streamon(&fh->vb_vidq);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct viu_fh *fh = priv;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (fh->type != i)
+ return -EINVAL;
+
+ viu_stop_dma(fh->dev);
+
+ return videobuf_streamoff(&fh->vb_vidq);
+}
+
+#define decoder_call(viu, o, f, args...) \
+ v4l2_subdev_call(viu->decoder, o, f, ##args)
+
+static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct viu_fh *fh = priv;
+
+ decoder_call(fh->dev, video, querystd, std_id);
+ return 0;
+}
+
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct viu_fh *fh = priv;
+
+ fh->dev->std = id;
+ decoder_call(fh->dev, video, s_std, id);
+ return 0;
+}
+
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct viu_fh *fh = priv;
+
+ *std_id = fh->dev->std;
+ return 0;
+}
+
+/* only one input in this driver */
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct viu_fh *fh = priv;
+
+ if (inp->index != 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = fh->dev->vdev->tvnorms;
+ strcpy(inp->name, "Camera");
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct viu_fh *fh = priv;
+
+ if (i > 1)
+ return -EINVAL;
+
+ decoder_call(fh->dev, video, s_routing, i, 0, 0);
+ return 0;
+}
+
+/* Controls */
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
+ if (qc->id && qc->id == viu_qctrl[i].id) {
+ memcpy(qc, &(viu_qctrl[i]), sizeof(*qc));
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
+ if (ctrl->id == viu_qctrl[i].id) {
+ ctrl->value = qctl_regs[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
+ if (ctrl->id == viu_qctrl[i].id) {
+ if (ctrl->value < viu_qctrl[i].minimum
+ || ctrl->value > viu_qctrl[i].maximum)
+ return -ERANGE;
+ qctl_regs[i] = ctrl->value;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+inline void viu_activate_next_buf(struct viu_dev *dev,
+ struct viu_dmaqueue *viuq)
+{
+ struct viu_dmaqueue *vidq = viuq;
+ struct viu_buf *buf;
+
+ /* launch another DMA operation for an active/queued buffer */
+ if (!list_empty(&vidq->active)) {
+ buf = list_entry(vidq->active.next, struct viu_buf,
+ vb.queue);
+ dprintk(1, "start another queued buffer: 0x%p\n", buf);
+ buffer_activate(dev, buf);
+ } else if (!list_empty(&vidq->queued)) {
+ buf = list_entry(vidq->queued.next, struct viu_buf,
+ vb.queue);
+ list_del(&buf->vb.queue);
+
+ dprintk(1, "start another queued buffer: 0x%p\n", buf);
+ list_add_tail(&buf->vb.queue, &vidq->active);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ buffer_activate(dev, buf);
+ }
+}
+
+inline void viu_default_settings(struct viu_reg *viu_reg)
+{
+ struct viu_reg *vr = viu_reg;
+
+ out_be32(&vr->luminance, 0x9512A254);
+ out_be32(&vr->chroma_r, 0x03310000);
+ out_be32(&vr->chroma_g, 0x06600F38);
+ out_be32(&vr->chroma_b, 0x00000409);
+ out_be32(&vr->alpha, 0x000000ff);
+ out_be32(&vr->req_alarm, 0x00000090);
+ dprintk(1, "status reg: 0x%08x, field base: 0x%08x\n",
+ in_be32(&vr->status_cfg), in_be32(&vr->field_base_addr));
+}
+
+static void viu_overlay_intr(struct viu_dev *dev, u32 status)
+{
+ struct viu_reg *vr = dev->vr;
+
+ if (status & INT_DMA_END_STATUS)
+ dev->dma_done = 1;
+
+ if (status & INT_FIELD_STATUS) {
+ if (dev->dma_done) {
+ u32 addr = reg_val.field_base_addr;
+
+ dev->dma_done = 0;
+ if (status & FIELD_NO)
+ addr += reg_val.dma_inc;
+
+ out_be32(&vr->field_base_addr, addr);
+ out_be32(&vr->dma_inc, reg_val.dma_inc);
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) |
+ (status & INT_ALL_STATUS) |
+ reg_val.status_cfg);
+ } else if (status & INT_VSYNC_STATUS) {
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) |
+ (status & INT_ALL_STATUS) |
+ reg_val.status_cfg);
+ }
+ }
+}
+
+static void viu_capture_intr(struct viu_dev *dev, u32 status)
+{
+ struct viu_dmaqueue *vidq = &dev->vidq;
+ struct viu_reg *vr = dev->vr;
+ struct viu_buf *buf;
+ int field_num;
+ int need_two;
+ int dma_done = 0;
+
+ field_num = status & FIELD_NO;
+ need_two = V4L2_FIELD_HAS_BOTH(dev->capfield);
+
+ if (status & INT_DMA_END_STATUS) {
+ dma_done = 1;
+ if (((field_num == 0) && (dev->field == 0)) ||
+ (field_num && (dev->field == 1)))
+ dev->field++;
+ }
+
+ if (status & INT_FIELD_STATUS) {
+ dprintk(1, "irq: field %d, done %d\n",
+ !!field_num, dma_done);
+ if (unlikely(dev->first)) {
+ if (field_num == 0) {
+ dev->first = 0;
+ dprintk(1, "activate first buf\n");
+ viu_activate_next_buf(dev, vidq);
+ } else
+ dprintk(1, "wait field 0\n");
+ return;
+ }
+
+ /* setup buffer address for next dma operation */
+ if (!list_empty(&vidq->active)) {
+ u32 addr = reg_val.field_base_addr;
+
+ if (field_num && need_two) {
+ addr += reg_val.dma_inc;
+ dprintk(1, "field 1, 0x%lx, dev field %d\n",
+ (unsigned long)addr, dev->field);
+ }
+ out_be32(&vr->field_base_addr, addr);
+ out_be32(&vr->dma_inc, reg_val.dma_inc);
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) |
+ (status & INT_ALL_STATUS) |
+ reg_val.status_cfg);
+ return;
+ }
+ }
+
+ if (dma_done && field_num && (dev->field == 2)) {
+ dev->field = 0;
+ buf = list_entry(vidq->active.next,
+ struct viu_buf, vb.queue);
+ dprintk(1, "viu/0: [%p/%d] 0x%lx/0x%lx: dma complete\n",
+ buf, buf->vb.i,
+ (unsigned long)videobuf_to_dma_contig(&buf->vb),
+ (unsigned long)in_be32(&vr->field_base_addr));
+
+ if (waitqueue_active(&buf->vb.done)) {
+ list_del(&buf->vb.queue);
+ v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.field_count++;
+ wake_up(&buf->vb.done);
+ }
+ /* activate next dma buffer */
+ viu_activate_next_buf(dev, vidq);
+ }
+}
+
+static irqreturn_t viu_intr(int irq, void *dev_id)
+{
+ struct viu_dev *dev = (struct viu_dev *)dev_id;
+ struct viu_reg *vr = dev->vr;
+ u32 status;
+ u32 error;
+
+ status = in_be32(&vr->status_cfg);
+
+ if (status & INT_ERROR_STATUS) {
+ dev->irqs.error_irq++;
+ error = status & ERR_MASK;
+ if (error)
+ dprintk(1, "Err: error(%d), times:%d!\n",
+ error >> 4, dev->irqs.error_irq);
+ /* Clear interrupt error bit and error flags */
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) | INT_ERROR_STATUS);
+ }
+
+ if (status & INT_DMA_END_STATUS) {
+ dev->irqs.dma_end_irq++;
+ dev->dma_done = 1;
+ dprintk(2, "VIU DMA end interrupt times: %d\n",
+ dev->irqs.dma_end_irq);
+ }
+
+ if (status & INT_HSYNC_STATUS)
+ dev->irqs.hsync_irq++;
+
+ if (status & INT_FIELD_STATUS) {
+ dev->irqs.field_irq++;
+ dprintk(2, "VIU field interrupt times: %d\n",
+ dev->irqs.field_irq);
+ }
+
+ if (status & INT_VSTART_STATUS)
+ dev->irqs.vstart_irq++;
+
+ if (status & INT_VSYNC_STATUS) {
+ dev->irqs.vsync_irq++;
+ dprintk(2, "VIU vsync interrupt times: %d\n",
+ dev->irqs.vsync_irq);
+ }
+
+ /* clear all pending irqs */
+ status = in_be32(&vr->status_cfg);
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) | (status & INT_ALL_STATUS));
+
+ if (dev->ovenable) {
+ viu_overlay_intr(dev, status);
+ return IRQ_HANDLED;
+ }
+
+ /* Capture mode */
+ viu_capture_intr(dev, status);
+ return IRQ_HANDLED;
+}
+
+/*
+ * File operations for the device
+ */
+static int viu_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct viu_dev *dev = video_get_drvdata(vdev);
+ struct viu_fh *fh;
+ struct viu_reg *vr;
+ int minor = vdev->minor;
+ u32 status_cfg;
+ int i;
+
+ dprintk(1, "viu: open (minor=%d)\n", minor);
+
+ dev->users++;
+ if (dev->users > 1) {
+ dev->users--;
+ return -EBUSY;
+ }
+
+ vr = dev->vr;
+
+ dprintk(1, "open minor=%d type=%s users=%d\n", minor,
+ v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users);
+
+ if (mutex_lock_interruptible(&dev->lock)) {
+ dev->users--;
+ return -ERESTARTSYS;
+ }
+
+ /* allocate and initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh) {
+ dev->users--;
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+
+ file->private_data = fh;
+ fh->dev = dev;
+
+ fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ fh->fmt = format_by_fourcc(V4L2_PIX_FMT_RGB32);
+ fh->width = norm_maxw();
+ fh->height = norm_maxh();
+ dev->crop_current.width = fh->width;
+ dev->crop_current.height = fh->height;
+
+ /* Put all controls at a sane state */
+ for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++)
+ qctl_regs[i] = viu_qctrl[i].default_value;
+
+ dprintk(1, "Open: fh=0x%08lx, dev=0x%08lx, dev->vidq=0x%08lx\n",
+ (unsigned long)fh, (unsigned long)dev,
+ (unsigned long)&dev->vidq);
+ dprintk(1, "Open: list_empty queued=%d\n",
+ list_empty(&dev->vidq.queued));
+ dprintk(1, "Open: list_empty active=%d\n",
+ list_empty(&dev->vidq.active));
+
+ viu_default_settings(vr);
+
+ status_cfg = in_be32(&vr->status_cfg);
+ out_be32(&vr->status_cfg,
+ status_cfg & ~(INT_VSYNC_EN | INT_HSYNC_EN |
+ INT_FIELD_EN | INT_VSTART_EN |
+ INT_DMA_END_EN | INT_ERROR_EN | INT_ECC_EN));
+
+ status_cfg = in_be32(&vr->status_cfg);
+ out_be32(&vr->status_cfg, status_cfg | INT_ALL_STATUS);
+
+ spin_lock_init(&fh->vbq_lock);
+ videobuf_queue_dma_contig_init(&fh->vb_vidq, &viu_video_qops,
+ dev->dev, &fh->vbq_lock,
+ fh->type, V4L2_FIELD_INTERLACED,
+ sizeof(struct viu_buf), fh,
+ &fh->dev->lock);
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+static ssize_t viu_read(struct file *file, char __user *data, size_t count,
+ loff_t *ppos)
+{
+ struct viu_fh *fh = file->private_data;
+ struct viu_dev *dev = fh->dev;
+ int ret = 0;
+
+ dprintk(2, "%s\n", __func__);
+ if (dev->ovenable)
+ dev->ovenable = 0;
+
+ if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+ viu_start_dma(dev);
+ ret = videobuf_read_stream(&fh->vb_vidq, data, count,
+ ppos, 0, file->f_flags & O_NONBLOCK);
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+ return 0;
+}
+
+static unsigned int viu_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct viu_fh *fh = file->private_data;
+ struct videobuf_queue *q = &fh->vb_vidq;
+ struct viu_dev *dev = fh->dev;
+ unsigned int res;
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
+ return POLLERR;
+
+ mutex_lock(&dev->lock);
+ res = videobuf_poll_stream(file, q, wait);
+ mutex_unlock(&dev->lock);
+ return res;
+}
+
+static int viu_release(struct file *file)
+{
+ struct viu_fh *fh = file->private_data;
+ struct viu_dev *dev = fh->dev;
+ int minor = video_devdata(file)->minor;
+
+ mutex_lock(&dev->lock);
+ viu_stop_dma(dev);
+ videobuf_stop(&fh->vb_vidq);
+ videobuf_mmap_free(&fh->vb_vidq);
+ mutex_unlock(&dev->lock);
+
+ kfree(fh);
+
+ dev->users--;
+ dprintk(1, "close (minor=%d, users=%d)\n",
+ minor, dev->users);
+ return 0;
+}
+
+void viu_reset(struct viu_reg *reg)
+{
+ out_be32(&reg->status_cfg, 0);
+ out_be32(&reg->luminance, 0x9512a254);
+ out_be32(&reg->chroma_r, 0x03310000);
+ out_be32(&reg->chroma_g, 0x06600f38);
+ out_be32(&reg->chroma_b, 0x00000409);
+ out_be32(&reg->field_base_addr, 0);
+ out_be32(&reg->dma_inc, 0);
+ out_be32(&reg->picture_count, 0x01e002d0);
+ out_be32(&reg->req_alarm, 0x00000090);
+ out_be32(&reg->alpha, 0x000000ff);
+}
+
+static int viu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct viu_fh *fh = file->private_data;
+ struct viu_dev *dev = fh->dev;
+ int ret;
+
+ dprintk(1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
+
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+ ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
+ mutex_unlock(&dev->lock);
+
+ dprintk(1, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ (unsigned long)vma->vm_start,
+ (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
+ ret);
+
+ return ret;
+}
+
+static struct v4l2_file_operations viu_fops = {
+ .owner = THIS_MODULE,
+ .open = viu_open,
+ .release = viu_release,
+ .read = viu_read,
+ .poll = viu_poll,
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .mmap = viu_mmap,
+};
+
+static const struct v4l2_ioctl_ops viu_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_cap,
+ .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_overlay,
+ .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_overlay,
+ .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_overlay,
+ .vidioc_overlay = vidioc_overlay,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_querystd = vidioc_querystd,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+static struct video_device viu_template = {
+ .name = "FSL viu",
+ .fops = &viu_fops,
+ .minor = -1,
+ .ioctl_ops = &viu_ioctl_ops,
+ .release = video_device_release,
+
+ .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL,
+};
+
+static int viu_of_probe(struct platform_device *op)
+{
+ struct viu_dev *viu_dev;
+ struct video_device *vdev;
+ struct resource r;
+ struct viu_reg __iomem *viu_regs;
+ struct i2c_adapter *ad;
+ int ret, viu_irq;
+ struct clk *clk;
+
+ ret = of_address_to_resource(op->dev.of_node, 0, &r);
+ if (ret) {
+ dev_err(&op->dev, "Can't parse device node resource\n");
+ return -ENODEV;
+ }
+
+ viu_irq = irq_of_parse_and_map(op->dev.of_node, 0);
+ if (viu_irq == NO_IRQ) {
+ dev_err(&op->dev, "Error while mapping the irq\n");
+ return -EINVAL;
+ }
+
+ /* request mem region */
+ if (!devm_request_mem_region(&op->dev, r.start,
+ sizeof(struct viu_reg), DRV_NAME)) {
+ dev_err(&op->dev, "Error while requesting mem region\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* remap registers */
+ viu_regs = devm_ioremap(&op->dev, r.start, sizeof(struct viu_reg));
+ if (!viu_regs) {
+ dev_err(&op->dev, "Can't map register set\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Prepare our private structure */
+ viu_dev = devm_kzalloc(&op->dev, sizeof(struct viu_dev), GFP_ATOMIC);
+ if (!viu_dev) {
+ dev_err(&op->dev, "Can't allocate private structure\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ viu_dev->vr = viu_regs;
+ viu_dev->irq = viu_irq;
+ viu_dev->dev = &op->dev;
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&viu_dev->vidq.active);
+ INIT_LIST_HEAD(&viu_dev->vidq.queued);
+
+ snprintf(viu_dev->v4l2_dev.name,
+ sizeof(viu_dev->v4l2_dev.name), "%s", "VIU");
+ ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
+ goto err;
+ }
+
+ ad = i2c_get_adapter(0);
+ viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
+ "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
+
+ viu_dev->vidq.timeout.function = viu_vid_timeout;
+ viu_dev->vidq.timeout.data = (unsigned long)viu_dev;
+ init_timer(&viu_dev->vidq.timeout);
+ viu_dev->std = V4L2_STD_NTSC_M;
+ viu_dev->first = 1;
+
+ /* Allocate memory for video device */
+ vdev = video_device_alloc();
+ if (vdev == NULL) {
+ ret = -ENOMEM;
+ goto err_vdev;
+ }
+
+ memcpy(vdev, &viu_template, sizeof(viu_template));
+
+ vdev->v4l2_dev = &viu_dev->v4l2_dev;
+
+ viu_dev->vdev = vdev;
+
+ /* initialize locks */
+ mutex_init(&viu_dev->lock);
+ viu_dev->vdev->lock = &viu_dev->lock;
+ spin_lock_init(&viu_dev->slock);
+
+ video_set_drvdata(viu_dev->vdev, viu_dev);
+
+ mutex_lock(&viu_dev->lock);
+
+ ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ video_device_release(viu_dev->vdev);
+ goto err_vdev;
+ }
+
+ /* enable VIU clock */
+ clk = devm_clk_get(&op->dev, "ipg");
+ if (IS_ERR(clk)) {
+ dev_err(&op->dev, "failed to lookup the clock!\n");
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&op->dev, "failed to enable the clock!\n");
+ goto err_clk;
+ }
+ viu_dev->clk = clk;
+
+ /* reset VIU module */
+ viu_reset(viu_dev->vr);
+
+ /* install interrupt handler */
+ if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
+ dev_err(&op->dev, "Request VIU IRQ failed.\n");
+ ret = -ENODEV;
+ goto err_irq;
+ }
+
+ mutex_unlock(&viu_dev->lock);
+
+ dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
+ return ret;
+
+err_irq:
+ clk_disable_unprepare(viu_dev->clk);
+err_clk:
+ video_unregister_device(viu_dev->vdev);
+err_vdev:
+ mutex_unlock(&viu_dev->lock);
+ i2c_put_adapter(ad);
+ v4l2_device_unregister(&viu_dev->v4l2_dev);
+err:
+ irq_dispose_mapping(viu_irq);
+ return ret;
+}
+
+static int viu_of_remove(struct platform_device *op)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
+ struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
+ struct v4l2_subdev *sdev = list_entry(v4l2_dev->subdevs.next,
+ struct v4l2_subdev, list);
+ struct i2c_client *client = v4l2_get_subdevdata(sdev);
+
+ free_irq(dev->irq, (void *)dev);
+ irq_dispose_mapping(dev->irq);
+
+ clk_disable_unprepare(dev->clk);
+
+ video_unregister_device(dev->vdev);
+ i2c_put_adapter(client->adapter);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int viu_suspend(struct platform_device *op, pm_message_t state)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
+ struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
+
+ clk_disable(dev->clk);
+ return 0;
+}
+
+static int viu_resume(struct platform_device *op)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
+ struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
+
+ clk_enable(dev->clk);
+ return 0;
+}
+#endif
+
+/*
+ * Initialization and module stuff
+ */
+static struct of_device_id mpc512x_viu_of_match[] = {
+ {
+ .compatible = "fsl,mpc5121-viu",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpc512x_viu_of_match);
+
+static struct platform_driver viu_of_platform_driver = {
+ .probe = viu_of_probe,
+ .remove = viu_of_remove,
+#ifdef CONFIG_PM
+ .suspend = viu_suspend,
+ .resume = viu_resume,
+#endif
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc512x_viu_of_match,
+ },
+};
+
+module_platform_driver(viu_of_platform_driver);
+
+MODULE_DESCRIPTION("Freescale Video-In(VIU)");
+MODULE_AUTHOR("Hongjun Chen");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VIU_VERSION);
diff --git a/drivers/media/platform/indycam.c b/drivers/media/platform/indycam.c
new file mode 100644
index 00000000000..f1d192bbcb4
--- /dev/null
+++ b/drivers/media/platform/indycam.c
@@ -0,0 +1,378 @@
+/*
+ * indycam.c - Silicon Graphics IndyCam digital camera driver
+ *
+ * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
+ * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+/* IndyCam decodes stream of photons into digital image representation ;-) */
+#include <linux/videodev2.h>
+#include <linux/i2c.h>
+#include <media/v4l2-device.h>
+
+#include "indycam.h"
+
+#define INDYCAM_MODULE_VERSION "0.0.5"
+
+MODULE_DESCRIPTION("SGI IndyCam driver");
+MODULE_VERSION(INDYCAM_MODULE_VERSION);
+MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
+MODULE_LICENSE("GPL");
+
+
+// #define INDYCAM_DEBUG
+
+#ifdef INDYCAM_DEBUG
+#define dprintk(x...) printk("IndyCam: " x);
+#define indycam_regdump(client) indycam_regdump_debug(client)
+#else
+#define dprintk(x...)
+#define indycam_regdump(client)
+#endif
+
+struct indycam {
+ struct v4l2_subdev sd;
+ u8 version;
+};
+
+static inline struct indycam *to_indycam(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct indycam, sd);
+}
+
+static const u8 initseq[] = {
+ INDYCAM_CONTROL_AGCENA, /* INDYCAM_CONTROL */
+ INDYCAM_SHUTTER_60, /* INDYCAM_SHUTTER */
+ INDYCAM_GAIN_DEFAULT, /* INDYCAM_GAIN */
+ 0x00, /* INDYCAM_BRIGHTNESS (read-only) */
+ INDYCAM_RED_BALANCE_DEFAULT, /* INDYCAM_RED_BALANCE */
+ INDYCAM_BLUE_BALANCE_DEFAULT, /* INDYCAM_BLUE_BALANCE */
+ INDYCAM_RED_SATURATION_DEFAULT, /* INDYCAM_RED_SATURATION */
+ INDYCAM_BLUE_SATURATION_DEFAULT,/* INDYCAM_BLUE_SATURATION */
+};
+
+/* IndyCam register handling */
+
+static int indycam_read_reg(struct v4l2_subdev *sd, u8 reg, u8 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (reg == INDYCAM_REG_RESET) {
+ dprintk("indycam_read_reg(): "
+ "skipping write-only register %d\n", reg);
+ *value = 0;
+ return 0;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0) {
+ printk(KERN_ERR "IndyCam: indycam_read_reg(): read failed, "
+ "register = 0x%02x\n", reg);
+ return ret;
+ }
+
+ *value = (u8)ret;
+
+ return 0;
+}
+
+static int indycam_write_reg(struct v4l2_subdev *sd, u8 reg, u8 value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int err;
+
+ if (reg == INDYCAM_REG_BRIGHTNESS || reg == INDYCAM_REG_VERSION) {
+ dprintk("indycam_write_reg(): "
+ "skipping read-only register %d\n", reg);
+ return 0;
+ }
+
+ dprintk("Writing Reg %d = 0x%02x\n", reg, value);
+ err = i2c_smbus_write_byte_data(client, reg, value);
+
+ if (err) {
+ printk(KERN_ERR "IndyCam: indycam_write_reg(): write failed, "
+ "register = 0x%02x, value = 0x%02x\n", reg, value);
+ }
+ return err;
+}
+
+static int indycam_write_block(struct v4l2_subdev *sd, u8 reg,
+ u8 length, u8 *data)
+{
+ int i, err;
+
+ for (i = 0; i < length; i++) {
+ err = indycam_write_reg(sd, reg + i, data[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Helper functions */
+
+#ifdef INDYCAM_DEBUG
+static void indycam_regdump_debug(struct v4l2_subdev *sd)
+{
+ int i;
+ u8 val;
+
+ for (i = 0; i < 9; i++) {
+ indycam_read_reg(sd, i, &val);
+ dprintk("Reg %d = 0x%02x\n", i, val);
+ }
+}
+#endif
+
+static int indycam_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct indycam *camera = to_indycam(sd);
+ u8 reg;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg);
+ if (ret)
+ return -EIO;
+ if (ctrl->id == V4L2_CID_AUTOGAIN)
+ ctrl->value = (reg & INDYCAM_CONTROL_AGCENA)
+ ? 1 : 0;
+ else
+ ctrl->value = (reg & INDYCAM_CONTROL_AWBCTL)
+ ? 1 : 0;
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = indycam_read_reg(sd, INDYCAM_REG_SHUTTER, &reg);
+ if (ret)
+ return -EIO;
+ ctrl->value = ((s32)reg == 0x00) ? 0xff : ((s32)reg - 1);
+ break;
+ case V4L2_CID_GAIN:
+ ret = indycam_read_reg(sd, INDYCAM_REG_GAIN, &reg);
+ if (ret)
+ return -EIO;
+ ctrl->value = (s32)reg;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ ret = indycam_read_reg(sd, INDYCAM_REG_RED_BALANCE, &reg);
+ if (ret)
+ return -EIO;
+ ctrl->value = (s32)reg;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ ret = indycam_read_reg(sd, INDYCAM_REG_BLUE_BALANCE, &reg);
+ if (ret)
+ return -EIO;
+ ctrl->value = (s32)reg;
+ break;
+ case INDYCAM_CONTROL_RED_SATURATION:
+ ret = indycam_read_reg(sd,
+ INDYCAM_REG_RED_SATURATION, &reg);
+ if (ret)
+ return -EIO;
+ ctrl->value = (s32)reg;
+ break;
+ case INDYCAM_CONTROL_BLUE_SATURATION:
+ ret = indycam_read_reg(sd,
+ INDYCAM_REG_BLUE_SATURATION, &reg);
+ if (ret)
+ return -EIO;
+ ctrl->value = (s32)reg;
+ break;
+ case V4L2_CID_GAMMA:
+ if (camera->version == CAMERA_VERSION_MOOSE) {
+ ret = indycam_read_reg(sd,
+ INDYCAM_REG_GAMMA, &reg);
+ if (ret)
+ return -EIO;
+ ctrl->value = (s32)reg;
+ } else {
+ ctrl->value = INDYCAM_GAMMA_DEFAULT;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int indycam_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct indycam *camera = to_indycam(sd);
+ u8 reg;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg);
+ if (ret)
+ break;
+
+ if (ctrl->id == V4L2_CID_AUTOGAIN) {
+ if (ctrl->value)
+ reg |= INDYCAM_CONTROL_AGCENA;
+ else
+ reg &= ~INDYCAM_CONTROL_AGCENA;
+ } else {
+ if (ctrl->value)
+ reg |= INDYCAM_CONTROL_AWBCTL;
+ else
+ reg &= ~INDYCAM_CONTROL_AWBCTL;
+ }
+
+ ret = indycam_write_reg(sd, INDYCAM_REG_CONTROL, reg);
+ break;
+ case V4L2_CID_EXPOSURE:
+ reg = (ctrl->value == 0xff) ? 0x00 : (ctrl->value + 1);
+ ret = indycam_write_reg(sd, INDYCAM_REG_SHUTTER, reg);
+ break;
+ case V4L2_CID_GAIN:
+ ret = indycam_write_reg(sd, INDYCAM_REG_GAIN, ctrl->value);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ ret = indycam_write_reg(sd, INDYCAM_REG_RED_BALANCE,
+ ctrl->value);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_BALANCE,
+ ctrl->value);
+ break;
+ case INDYCAM_CONTROL_RED_SATURATION:
+ ret = indycam_write_reg(sd, INDYCAM_REG_RED_SATURATION,
+ ctrl->value);
+ break;
+ case INDYCAM_CONTROL_BLUE_SATURATION:
+ ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_SATURATION,
+ ctrl->value);
+ break;
+ case V4L2_CID_GAMMA:
+ if (camera->version == CAMERA_VERSION_MOOSE) {
+ ret = indycam_write_reg(sd, INDYCAM_REG_GAMMA,
+ ctrl->value);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* I2C-interface */
+
+/* ----------------------------------------------------------------------- */
+
+static const struct v4l2_subdev_core_ops indycam_core_ops = {
+ .g_ctrl = indycam_g_ctrl,
+ .s_ctrl = indycam_s_ctrl,
+};
+
+static const struct v4l2_subdev_ops indycam_ops = {
+ .core = &indycam_core_ops,
+};
+
+static int indycam_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err = 0;
+ struct indycam *camera;
+ struct v4l2_subdev *sd;
+
+ v4l_info(client, "chip found @ 0x%x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ camera = kzalloc(sizeof(struct indycam), GFP_KERNEL);
+ if (!camera)
+ return -ENOMEM;
+
+ sd = &camera->sd;
+ v4l2_i2c_subdev_init(sd, client, &indycam_ops);
+
+ camera->version = i2c_smbus_read_byte_data(client,
+ INDYCAM_REG_VERSION);
+ if (camera->version != CAMERA_VERSION_INDY &&
+ camera->version != CAMERA_VERSION_MOOSE) {
+ kfree(camera);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "IndyCam v%d.%d detected\n",
+ INDYCAM_VERSION_MAJOR(camera->version),
+ INDYCAM_VERSION_MINOR(camera->version));
+
+ indycam_regdump(sd);
+
+ // initialize
+ err = indycam_write_block(sd, 0, sizeof(initseq), (u8 *)&initseq);
+ if (err) {
+ printk(KERN_ERR "IndyCam initialization failed\n");
+ kfree(camera);
+ return -EIO;
+ }
+
+ indycam_regdump(sd);
+
+ // white balance
+ err = indycam_write_reg(sd, INDYCAM_REG_CONTROL,
+ INDYCAM_CONTROL_AGCENA | INDYCAM_CONTROL_AWBCTL);
+ if (err) {
+ printk(KERN_ERR "IndyCam: White balancing camera failed\n");
+ kfree(camera);
+ return -EIO;
+ }
+
+ indycam_regdump(sd);
+
+ printk(KERN_INFO "IndyCam initialized\n");
+
+ return 0;
+}
+
+static int indycam_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_device_unregister_subdev(sd);
+ kfree(to_indycam(sd));
+ return 0;
+}
+
+static const struct i2c_device_id indycam_id[] = {
+ { "indycam", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, indycam_id);
+
+static struct i2c_driver indycam_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "indycam",
+ },
+ .probe = indycam_probe,
+ .remove = indycam_remove,
+ .id_table = indycam_id,
+};
+
+module_i2c_driver(indycam_driver);
diff --git a/drivers/media/platform/indycam.h b/drivers/media/platform/indycam.h
new file mode 100644
index 00000000000..881f21c474c
--- /dev/null
+++ b/drivers/media/platform/indycam.h
@@ -0,0 +1,93 @@
+/*
+ * indycam.h - Silicon Graphics IndyCam digital camera driver
+ *
+ * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
+ * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _INDYCAM_H_
+#define _INDYCAM_H_
+
+/* I2C address for the Guinness Camera */
+#define INDYCAM_ADDR 0x56
+
+/* Camera version */
+#define CAMERA_VERSION_INDY 0x10 /* v1.0 */
+#define CAMERA_VERSION_MOOSE 0x12 /* v1.2 */
+#define INDYCAM_VERSION_MAJOR(x) (((x) & 0xf0) >> 4)
+#define INDYCAM_VERSION_MINOR(x) ((x) & 0x0f)
+
+/* Register bus addresses */
+#define INDYCAM_REG_CONTROL 0x00
+#define INDYCAM_REG_SHUTTER 0x01
+#define INDYCAM_REG_GAIN 0x02
+#define INDYCAM_REG_BRIGHTNESS 0x03 /* read-only */
+#define INDYCAM_REG_RED_BALANCE 0x04
+#define INDYCAM_REG_BLUE_BALANCE 0x05
+#define INDYCAM_REG_RED_SATURATION 0x06
+#define INDYCAM_REG_BLUE_SATURATION 0x07
+#define INDYCAM_REG_GAMMA 0x08
+#define INDYCAM_REG_VERSION 0x0e /* read-only */
+#define INDYCAM_REG_RESET 0x0f /* write-only */
+
+#define INDYCAM_REG_LED 0x46
+#define INDYCAM_REG_ORIENTATION 0x47
+#define INDYCAM_REG_BUTTON 0x48
+
+/* Field definitions of registers */
+#define INDYCAM_CONTROL_AGCENA (1<<0) /* automatic gain control */
+#define INDYCAM_CONTROL_AWBCTL (1<<1) /* automatic white balance */
+ /* 2-3 are reserved */
+#define INDYCAM_CONTROL_EVNFLD (1<<4) /* read-only */
+
+#define INDYCAM_SHUTTER_10000 0x02 /* 1/10000 second */
+#define INDYCAM_SHUTTER_4000 0x04 /* 1/4000 second */
+#define INDYCAM_SHUTTER_2000 0x08 /* 1/2000 second */
+#define INDYCAM_SHUTTER_1000 0x10 /* 1/1000 second */
+#define INDYCAM_SHUTTER_500 0x20 /* 1/500 second */
+#define INDYCAM_SHUTTER_250 0x3f /* 1/250 second */
+#define INDYCAM_SHUTTER_125 0x7e /* 1/125 second */
+#define INDYCAM_SHUTTER_100 0x9e /* 1/100 second */
+#define INDYCAM_SHUTTER_60 0x00 /* 1/60 second */
+
+#define INDYCAM_LED_ACTIVE 0x10
+#define INDYCAM_LED_INACTIVE 0x30
+#define INDYCAM_ORIENTATION_BOTTOM_TO_TOP 0x40
+#define INDYCAM_BUTTON_RELEASED 0x10
+
+/* Values for controls */
+#define INDYCAM_SHUTTER_MIN 0x00
+#define INDYCAM_SHUTTER_MAX 0xff
+#define INDYCAM_GAIN_MIN 0x00
+#define INDYCAM_GAIN_MAX 0xff
+#define INDYCAM_RED_BALANCE_MIN 0x00
+#define INDYCAM_RED_BALANCE_MAX 0xff
+#define INDYCAM_BLUE_BALANCE_MIN 0x00
+#define INDYCAM_BLUE_BALANCE_MAX 0xff
+#define INDYCAM_RED_SATURATION_MIN 0x00
+#define INDYCAM_RED_SATURATION_MAX 0xff
+#define INDYCAM_BLUE_SATURATION_MIN 0x00
+#define INDYCAM_BLUE_SATURATION_MAX 0xff
+#define INDYCAM_GAMMA_MIN 0x00
+#define INDYCAM_GAMMA_MAX 0xff
+
+#define INDYCAM_AGC_DEFAULT 1
+#define INDYCAM_AWB_DEFAULT 0
+#define INDYCAM_SHUTTER_DEFAULT 0xff
+#define INDYCAM_GAIN_DEFAULT 0x80
+#define INDYCAM_RED_BALANCE_DEFAULT 0x18
+#define INDYCAM_BLUE_BALANCE_DEFAULT 0xa4
+#define INDYCAM_RED_SATURATION_DEFAULT 0x80
+#define INDYCAM_BLUE_SATURATION_DEFAULT 0xc0
+#define INDYCAM_GAMMA_DEFAULT 0x80
+
+/* Driver interface definitions */
+
+#define INDYCAM_CONTROL_RED_SATURATION (V4L2_CID_PRIVATE_BASE + 0)
+#define INDYCAM_CONTROL_BLUE_SATURATION (V4L2_CID_PRIVATE_BASE + 1)
+
+#endif
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
new file mode 100644
index 00000000000..c21d14fd61d
--- /dev/null
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -0,0 +1,1111 @@
+/*
+ * V4L2 deinterlacing support.
+ *
+ * Copyright (c) 2012 Vista Silicon S.L.
+ * Javier Martin <javier.martin@vista-silicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define MEM2MEM_TEST_MODULE_NAME "mem2mem-deinterlace"
+
+MODULE_DESCRIPTION("mem2mem device which supports deinterlacing using dmaengine");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.1");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+#define MEM2MEM_NAME "m2m-deinterlace"
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+struct deinterlace_fmt {
+ char *name;
+ u32 fourcc;
+ /* Types the format can be used for */
+ u32 types;
+};
+
+static struct deinterlace_fmt formats[] = {
+ {
+ .name = "YUV 4:2:0 Planar",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+ {
+ .name = "YUYV 4:2:2",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Per-queue, driver-specific private data */
+struct deinterlace_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ struct deinterlace_fmt *fmt;
+ enum v4l2_field field;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+enum {
+ YUV420_DMA_Y_ODD,
+ YUV420_DMA_Y_EVEN,
+ YUV420_DMA_U_ODD,
+ YUV420_DMA_U_EVEN,
+ YUV420_DMA_V_ODD,
+ YUV420_DMA_V_EVEN,
+ YUV420_DMA_Y_ODD_DOUBLING,
+ YUV420_DMA_U_ODD_DOUBLING,
+ YUV420_DMA_V_ODD_DOUBLING,
+ YUYV_DMA_ODD,
+ YUYV_DMA_EVEN,
+ YUYV_DMA_EVEN_DOUBLING,
+};
+
+/* Source and destination queue data */
+static struct deinterlace_q_data q_data[2];
+
+static struct deinterlace_q_data *get_q_data(enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+static struct deinterlace_fmt *find_format(struct v4l2_format *f)
+{
+ struct deinterlace_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &formats[k];
+ if ((fmt->types & f->type) &&
+ (fmt->fourcc == f->fmt.pix.pixelformat))
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &formats[k];
+}
+
+struct deinterlace_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd;
+
+ atomic_t busy;
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+
+ struct dma_chan *dma_chan;
+
+ struct v4l2_m2m_dev *m2m_dev;
+ struct vb2_alloc_ctx *alloc_ctx;
+};
+
+struct deinterlace_ctx {
+ struct deinterlace_dev *dev;
+
+ /* Abort requested by m2m */
+ int aborting;
+ enum v4l2_colorspace colorspace;
+ dma_cookie_t cookie;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct dma_interleaved_template *xt;
+};
+
+/*
+ * mem2mem callbacks
+ */
+static int deinterlace_job_ready(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *pcdev = ctx->dev;
+
+ if ((v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0)
+ && (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0)
+ && (atomic_read(&ctx->dev->busy) == 0)) {
+ dprintk(pcdev, "Task ready\n");
+ return 1;
+ }
+
+ dprintk(pcdev, "Task not ready to run\n");
+
+ return 0;
+}
+
+static void deinterlace_job_abort(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *pcdev = ctx->dev;
+
+ ctx->aborting = 1;
+
+ dprintk(pcdev, "Aborting task\n");
+
+ v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void deinterlace_lock(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *pcdev = ctx->dev;
+ mutex_lock(&pcdev->dev_mutex);
+}
+
+static void deinterlace_unlock(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *pcdev = ctx->dev;
+ mutex_unlock(&pcdev->dev_mutex);
+}
+
+static void dma_callback(void *data)
+{
+ struct deinterlace_ctx *curr_ctx = data;
+ struct deinterlace_dev *pcdev = curr_ctx->dev;
+ struct vb2_buffer *src_vb, *dst_vb;
+
+ atomic_set(&pcdev->busy, 0);
+
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.flags |=
+ src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
+
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->m2m_ctx);
+
+ dprintk(pcdev, "dma transfers completed.\n");
+}
+
+static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
+ int do_callback)
+{
+ struct deinterlace_q_data *s_q_data;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct deinterlace_dev *pcdev = ctx->dev;
+ struct dma_chan *chan = pcdev->dma_chan;
+ struct dma_device *dmadev = chan->device;
+ struct dma_async_tx_descriptor *tx;
+ unsigned int s_width, s_height;
+ unsigned int s_size;
+ dma_addr_t p_in, p_out;
+ enum dma_ctrl_flags flags;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+ s_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ s_width = s_q_data->width;
+ s_height = s_q_data->height;
+ s_size = s_width * s_height;
+
+ p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&pcdev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return;
+ }
+
+ switch (op) {
+ case YUV420_DMA_Y_ODD:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width;
+ ctx->xt->sgl[0].icg = s_width;
+ ctx->xt->src_start = p_in;
+ ctx->xt->dst_start = p_out;
+ break;
+ case YUV420_DMA_Y_EVEN:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width;
+ ctx->xt->sgl[0].icg = s_width;
+ ctx->xt->src_start = p_in + s_size / 2;
+ ctx->xt->dst_start = p_out + s_width;
+ break;
+ case YUV420_DMA_U_ODD:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + s_size;
+ ctx->xt->dst_start = p_out + s_size;
+ break;
+ case YUV420_DMA_U_EVEN:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + (9 * s_size) / 8;
+ ctx->xt->dst_start = p_out + s_size + s_width / 2;
+ break;
+ case YUV420_DMA_V_ODD:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + (5 * s_size) / 4;
+ ctx->xt->dst_start = p_out + (5 * s_size) / 4;
+ break;
+ case YUV420_DMA_V_EVEN:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + (11 * s_size) / 8;
+ ctx->xt->dst_start = p_out + (5 * s_size) / 4 + s_width / 2;
+ break;
+ case YUV420_DMA_Y_ODD_DOUBLING:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width;
+ ctx->xt->sgl[0].icg = s_width;
+ ctx->xt->src_start = p_in;
+ ctx->xt->dst_start = p_out + s_width;
+ break;
+ case YUV420_DMA_U_ODD_DOUBLING:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + s_size;
+ ctx->xt->dst_start = p_out + s_size + s_width / 2;
+ break;
+ case YUV420_DMA_V_ODD_DOUBLING:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + (5 * s_size) / 4;
+ ctx->xt->dst_start = p_out + (5 * s_size) / 4 + s_width / 2;
+ break;
+ case YUYV_DMA_ODD:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width * 2;
+ ctx->xt->sgl[0].icg = s_width * 2;
+ ctx->xt->src_start = p_in;
+ ctx->xt->dst_start = p_out;
+ break;
+ case YUYV_DMA_EVEN:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width * 2;
+ ctx->xt->sgl[0].icg = s_width * 2;
+ ctx->xt->src_start = p_in + s_size;
+ ctx->xt->dst_start = p_out + s_width * 2;
+ break;
+ case YUYV_DMA_EVEN_DOUBLING:
+ default:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width * 2;
+ ctx->xt->sgl[0].icg = s_width * 2;
+ ctx->xt->src_start = p_in;
+ ctx->xt->dst_start = p_out + s_width * 2;
+ break;
+ }
+
+ /* Common parameters for al transfers */
+ ctx->xt->frame_size = 1;
+ ctx->xt->dir = DMA_MEM_TO_MEM;
+ ctx->xt->src_sgl = false;
+ ctx->xt->dst_sgl = true;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
+ if (tx == NULL) {
+ v4l2_warn(&pcdev->v4l2_dev, "DMA interleaved prep error\n");
+ return;
+ }
+
+ if (do_callback) {
+ tx->callback = dma_callback;
+ tx->callback_param = ctx;
+ }
+
+ ctx->cookie = dmaengine_submit(tx);
+ if (dma_submit_error(ctx->cookie)) {
+ v4l2_warn(&pcdev->v4l2_dev,
+ "DMA submit error %d with src=0x%x dst=0x%x len=0x%x\n",
+ ctx->cookie, (unsigned)p_in, (unsigned)p_out,
+ s_size * 3/2);
+ return;
+ }
+
+ dma_async_issue_pending(chan);
+}
+
+static void deinterlace_device_run(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_q_data *dst_q_data;
+
+ atomic_set(&ctx->dev->busy, 1);
+
+ dprintk(ctx->dev, "%s: DMA try issue.\n", __func__);
+
+ dst_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ /*
+ * 4 possible field conversions are possible at the moment:
+ * V4L2_FIELD_SEQ_TB --> V4L2_FIELD_INTERLACED_TB:
+ * two separate fields in the same input buffer are interlaced
+ * in the output buffer using weaving. Top field comes first.
+ * V4L2_FIELD_SEQ_TB --> V4L2_FIELD_NONE:
+ * top field from the input buffer is copied to the output buffer
+ * using line doubling. Bottom field from the input buffer is discarded.
+ * V4L2_FIELD_SEQ_BT --> V4L2_FIELD_INTERLACED_BT:
+ * two separate fields in the same input buffer are interlaced
+ * in the output buffer using weaving. Bottom field comes first.
+ * V4L2_FIELD_SEQ_BT --> V4L2_FIELD_NONE:
+ * bottom field from the input buffer is copied to the output buffer
+ * using line doubling. Top field from the input buffer is discarded.
+ */
+ switch (dst_q_data->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ switch (dst_q_data->field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ dprintk(ctx->dev, "%s: yuv420 interlaced tb.\n",
+ __func__);
+ deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_Y_EVEN, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_U_EVEN, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_V_EVEN, 1);
+ break;
+ case V4L2_FIELD_NONE:
+ default:
+ dprintk(ctx->dev, "%s: yuv420 interlaced line doubling.\n",
+ __func__);
+ deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD_DOUBLING, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD_DOUBLING, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD_DOUBLING, 1);
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ switch (dst_q_data->field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ dprintk(ctx->dev, "%s: yuyv interlaced_tb.\n",
+ __func__);
+ deinterlace_issue_dma(ctx, YUYV_DMA_ODD, 0);
+ deinterlace_issue_dma(ctx, YUYV_DMA_EVEN, 1);
+ break;
+ case V4L2_FIELD_NONE:
+ default:
+ dprintk(ctx->dev, "%s: yuyv interlaced line doubling.\n",
+ __func__);
+ deinterlace_issue_dma(ctx, YUYV_DMA_ODD, 0);
+ deinterlace_issue_dma(ctx, YUYV_DMA_EVEN_DOUBLING, 1);
+ break;
+ }
+ break;
+ }
+
+ dprintk(ctx->dev, "%s: DMA issue done.\n", __func__);
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, MEM2MEM_NAME, sizeof(cap->card));
+ strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->card));
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num;
+ struct deinterlace_fmt *fmt;
+
+ num = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ if (i < NUM_FORMATS) {
+ /* Format found */
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct deinterlace_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.field = q_data->field;
+ f->fmt.pix.pixelformat = q_data->fmt->fourcc;
+
+ switch (q_data->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ f->fmt.pix.bytesperline = q_data->width * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ f->fmt.pix.bytesperline = q_data->width * 2;
+ }
+
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct deinterlace_fmt *fmt)
+{
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ }
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_fmt *fmt;
+ struct deinterlace_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_CAPTURE))
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ if (f->fmt.pix.field != V4L2_FIELD_INTERLACED_TB &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED_BT &&
+ f->fmt.pix.field != V4L2_FIELD_NONE)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED_TB;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_fmt *fmt;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_OUTPUT))
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+
+ if (!f->fmt.pix.colorspace)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+ if (f->fmt.pix.field != V4L2_FIELD_SEQ_TB &&
+ f->fmt.pix.field != V4L2_FIELD_SEQ_BT)
+ f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_s_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
+{
+ struct deinterlace_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = find_format(f);
+ if (!q_data->fmt) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Couldn't set format type %d, wxh: %dx%d. fmt: %d, field: %d\n",
+ f->type, f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.pixelformat, f->fmt.pix.field);
+ return -EINVAL;
+ }
+
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->field = f->fmt.pix.field;
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2;
+ q_data->sizeimage = (q_data->width * q_data->height * 3) / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ q_data->sizeimage = q_data->width * q_data->height * 2;
+ }
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d, field: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
+ q_data->field);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = priv;
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = vidioc_s_fmt(priv, f);
+ if (!ret)
+ ctx->colorspace = f->fmt.pix.colorspace;
+
+ return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct deinterlace_q_data *s_q_data, *d_q_data;
+ struct deinterlace_ctx *ctx = priv;
+
+ s_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ d_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ /* Check that src and dst queues have the same pix format */
+ if (s_q_data->fmt->fourcc != d_q_data->fmt->fourcc) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "src and dst formats don't match.\n");
+ return -EINVAL;
+ }
+
+ /* Check that input and output deinterlacing types are compatible */
+ switch (s_q_data->field) {
+ case V4L2_FIELD_SEQ_BT:
+ if (d_q_data->field != V4L2_FIELD_NONE &&
+ d_q_data->field != V4L2_FIELD_INTERLACED_BT) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "src and dst field conversion [(%d)->(%d)] not supported.\n",
+ s_q_data->field, d_q_data->field);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ if (d_q_data->field != V4L2_FIELD_NONE &&
+ d_q_data->field != V4L2_FIELD_INTERLACED_TB) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "src and dst field conversion [(%d)->(%d)] not supported.\n",
+ s_q_data->field, d_q_data->field);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+
+/*
+ * Queue operations
+ */
+struct vb2_dc_conf {
+ struct device *dev;
+};
+
+static int deinterlace_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct deinterlace_q_data *q_data;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(vq->type);
+
+ switch (q_data->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ size = q_data->width * q_data->height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ size = q_data->width * q_data->height * 2;
+ }
+
+ *nplanes = 1;
+ *nbuffers = count;
+ sizes[0] = size;
+
+ alloc_ctxs[0] = ctx->dev->alloc_ctx;
+
+ dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+ return 0;
+}
+
+static int deinterlace_buf_prepare(struct vb2_buffer *vb)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct deinterlace_q_data *q_data;
+
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(vb->vb2_queue->type);
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+ return 0;
+}
+
+static void deinterlace_buf_queue(struct vb2_buffer *vb)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static struct vb2_ops deinterlace_qops = {
+ .queue_setup = deinterlace_queue_setup,
+ .buf_prepare = deinterlace_buf_prepare,
+ .buf_queue = deinterlace_buf_queue,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct deinterlace_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &deinterlace_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q_data[V4L2_M2M_SRC].fmt = &formats[0];
+ q_data[V4L2_M2M_SRC].width = 640;
+ q_data[V4L2_M2M_SRC].height = 480;
+ q_data[V4L2_M2M_SRC].sizeimage = (640 * 480 * 3) / 2;
+ q_data[V4L2_M2M_SRC].field = V4L2_FIELD_SEQ_TB;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &deinterlace_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q_data[V4L2_M2M_DST].fmt = &formats[0];
+ q_data[V4L2_M2M_DST].width = 640;
+ q_data[V4L2_M2M_DST].height = 480;
+ q_data[V4L2_M2M_DST].sizeimage = (640 * 480 * 3) / 2;
+ q_data[V4L2_M2M_SRC].field = V4L2_FIELD_INTERLACED_TB;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int deinterlace_open(struct file *file)
+{
+ struct deinterlace_dev *pcdev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = NULL;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->dev = pcdev;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ int ret = PTR_ERR(ctx->m2m_ctx);
+
+ kfree(ctx);
+ return ret;
+ }
+
+ ctx->xt = kzalloc(sizeof(struct dma_interleaved_template) +
+ sizeof(struct data_chunk), GFP_KERNEL);
+ if (!ctx->xt) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
+
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+
+ dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+ return 0;
+}
+
+static int deinterlace_release(struct file *file)
+{
+ struct deinterlace_dev *pcdev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = file->private_data;
+
+ dprintk(pcdev, "Releasing instance %p\n", ctx);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ kfree(ctx->xt);
+ kfree(ctx);
+
+ return 0;
+}
+
+static unsigned int deinterlace_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct deinterlace_ctx *ctx = file->private_data;
+ int ret;
+
+ deinterlace_lock(ctx);
+ ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ deinterlace_unlock(ctx);
+
+ return ret;
+}
+
+static int deinterlace_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct deinterlace_ctx *ctx = file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations deinterlace_fops = {
+ .owner = THIS_MODULE,
+ .open = deinterlace_open,
+ .release = deinterlace_release,
+ .poll = deinterlace_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = deinterlace_mmap,
+};
+
+static struct video_device deinterlace_videodev = {
+ .name = MEM2MEM_NAME,
+ .fops = &deinterlace_fops,
+ .ioctl_ops = &deinterlace_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = deinterlace_device_run,
+ .job_ready = deinterlace_job_ready,
+ .job_abort = deinterlace_job_abort,
+ .lock = deinterlace_lock,
+ .unlock = deinterlace_unlock,
+};
+
+static int deinterlace_probe(struct platform_device *pdev)
+{
+ struct deinterlace_dev *pcdev;
+ struct video_device *vfd;
+ dma_cap_mask_t mask;
+ int ret = 0;
+
+ pcdev = kzalloc(sizeof *pcdev, GFP_KERNEL);
+ if (!pcdev)
+ return -ENOMEM;
+
+ spin_lock_init(&pcdev->irqlock);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_INTERLEAVE, mask);
+ pcdev->dma_chan = dma_request_channel(mask, NULL, pcdev);
+ if (!pcdev->dma_chan)
+ goto free_dev;
+
+ if (!dma_has_cap(DMA_INTERLEAVE, pcdev->dma_chan->device->cap_mask)) {
+ v4l2_err(&pcdev->v4l2_dev, "DMA does not support INTERLEAVE\n");
+ goto rel_dma;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
+ if (ret)
+ goto rel_dma;
+
+ atomic_set(&pcdev->busy, 0);
+ mutex_init(&pcdev->dev_mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_dev;
+ }
+
+ *vfd = deinterlace_videodev;
+ vfd->lock = &pcdev->dev_mutex;
+ vfd->v4l2_dev = &pcdev->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+
+ video_set_drvdata(vfd, pcdev);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", deinterlace_videodev.name);
+ pcdev->vfd = vfd;
+ v4l2_info(&pcdev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
+ " Device registered as /dev/video%d\n", vfd->num);
+
+ platform_set_drvdata(pdev, pcdev);
+
+ pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(pcdev->alloc_ctx)) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to alloc vb2 context\n");
+ ret = PTR_ERR(pcdev->alloc_ctx);
+ goto err_ctx;
+ }
+
+ pcdev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(pcdev->m2m_dev)) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(pcdev->m2m_dev);
+ goto err_m2m;
+ }
+
+ return 0;
+
+ v4l2_m2m_release(pcdev->m2m_dev);
+err_m2m:
+ video_unregister_device(pcdev->vfd);
+err_ctx:
+ vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+rel_vdev:
+ video_device_release(vfd);
+unreg_dev:
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+rel_dma:
+ dma_release_channel(pcdev->dma_chan);
+free_dev:
+ kfree(pcdev);
+
+ return ret;
+}
+
+static int deinterlace_remove(struct platform_device *pdev)
+{
+ struct deinterlace_dev *pcdev = platform_get_drvdata(pdev);
+
+ v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
+ v4l2_m2m_release(pcdev->m2m_dev);
+ video_unregister_device(pcdev->vfd);
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+ vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+ dma_release_channel(pcdev->dma_chan);
+ kfree(pcdev);
+
+ return 0;
+}
+
+static struct platform_driver deinterlace_pdrv = {
+ .probe = deinterlace_probe,
+ .remove = deinterlace_remove,
+ .driver = {
+ .name = MEM2MEM_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(deinterlace_pdrv);
+
diff --git a/drivers/media/platform/marvell-ccic/Kconfig b/drivers/media/platform/marvell-ccic/Kconfig
new file mode 100644
index 00000000000..bf739e3b339
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/Kconfig
@@ -0,0 +1,23 @@
+config VIDEO_CAFE_CCIC
+ tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
+ depends on PCI && I2C && VIDEO_V4L2
+ select VIDEO_OV7670
+ select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a video4linux2 driver for the Marvell 88ALP01 integrated
+ CMOS camera controller. This is the controller found on first-
+ generation OLPC systems.
+
+config VIDEO_MMP_CAMERA
+ tristate "Marvell Armada 610 integrated camera controller support"
+ depends on ARCH_MMP && I2C && VIDEO_V4L2
+ select VIDEO_OV7670
+ select I2C_GPIO
+ select VIDEOBUF2_DMA_SG
+ ---help---
+ This is a Video4Linux2 driver for the integrated camera
+ controller found on Marvell Armada 610 application
+ processors (and likely beyond). This is the controller found
+ in OLPC XO 1.75 systems.
+
diff --git a/drivers/media/platform/marvell-ccic/Makefile b/drivers/media/platform/marvell-ccic/Makefile
new file mode 100644
index 00000000000..05a792c579a
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+cafe_ccic-y := cafe-driver.o mcam-core.o
+
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += mmp_camera.o
+mmp_camera-y := mmp-driver.o mcam-core.o
+
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
new file mode 100644
index 00000000000..56284536124
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -0,0 +1,656 @@
+/*
+ * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
+ * multifunction chip. Currently works with the Omnivision OV7670
+ * sensor.
+ *
+ * The data sheet for this device can be found at:
+ * http://www.marvell.com/products/pc_connectivity/88alp01/
+ *
+ * Copyright 2006-11 One Laptop Per Child Association, Inc.
+ * Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
+ *
+ * Written by Jonathan Corbet, corbet@lwn.net.
+ *
+ * v4l2_device/v4l2_subdev conversion by:
+ * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "mcam-core.h"
+
+#define CAFE_VERSION 0x000002
+
+
+/*
+ * Parameters.
+ */
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("Video");
+
+
+
+
+struct cafe_camera {
+ int registered; /* Fully initialized? */
+ struct mcam_camera mcam;
+ struct pci_dev *pdev;
+ wait_queue_head_t smbus_wait; /* Waiting on i2c events */
+};
+
+/*
+ * Most of the camera controller registers are defined in mcam-core.h,
+ * but the Cafe platform has some additional registers of its own;
+ * they are described here.
+ */
+
+/*
+ * "General purpose register" has a couple of GPIOs used for sensor
+ * power and reset on OLPC XO 1.0 systems.
+ */
+#define REG_GPR 0xb4
+#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
+#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
+#define GPR_C1 0x00000002 /* Control 1 value */
+/*
+ * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
+ * it is active low.
+ */
+#define GPR_C0 0x00000001 /* Control 0 value */
+
+/*
+ * These registers control the SMBUS module for communicating
+ * with the sensor.
+ */
+#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
+#define TWSIC0_EN 0x00000001 /* TWSI enable */
+#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
+#define TWSIC0_SID 0x000003fc /* Slave ID */
+/*
+ * Subtle trickery: the slave ID field starts with bit 2. But the
+ * Linux i2c stack wants to treat the bottommost bit as a separate
+ * read/write bit, which is why slave ID's are usually presented
+ * >>1. For consistency with that behavior, we shift over three
+ * bits instead of two.
+ */
+#define TWSIC0_SID_SHIFT 3
+#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
+#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
+#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
+
+#define REG_TWSIC1 0xbc /* TWSI control 1 */
+#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
+#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
+#define TWSIC1_ADDR_SHIFT 16
+#define TWSIC1_READ 0x01000000 /* Set for read op */
+#define TWSIC1_WSTAT 0x02000000 /* Write status */
+#define TWSIC1_RVALID 0x04000000 /* Read data valid */
+#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
+
+/*
+ * Here's the weird global control registers
+ */
+#define REG_GL_CSR 0x3004 /* Control/status register */
+#define GCSR_SRS 0x00000001 /* SW Reset set */
+#define GCSR_SRC 0x00000002 /* SW Reset clear */
+#define GCSR_MRS 0x00000004 /* Master reset set */
+#define GCSR_MRC 0x00000008 /* HW Reset clear */
+#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
+#define REG_GL_IMASK 0x300c /* Interrupt mask register */
+#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
+
+#define REG_GL_FCR 0x3038 /* GPIO functional control register */
+#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
+#define REG_GL_GPIOR 0x315c /* GPIO register */
+#define GGPIO_OUT 0x80000 /* GPIO output */
+#define GGPIO_VAL 0x00008 /* Output pin value */
+
+#define REG_LEN (REG_GL_IMASK + 4)
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err(&(cam)->pdev->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn(&(cam)->pdev->dev, fmt, ##arg);
+
+/* -------------------------------------------------------------------- */
+/*
+ * The I2C/SMBUS interface to the camera itself starts here. The
+ * controller handles SMBUS itself, presenting a relatively simple register
+ * interface; all we have to do is to tell it where to route the data.
+ */
+#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
+
+static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
+{
+ struct mcam_camera *m = container_of(dev, struct mcam_camera, v4l2_dev);
+ return container_of(m, struct cafe_camera, mcam);
+}
+
+
+static int cafe_smbus_write_done(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+ int c1;
+
+ /*
+ * We must delay after the interrupt, or the controller gets confused
+ * and never does give us good status. Fortunately, we don't do this
+ * often.
+ */
+ udelay(20);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ c1 = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
+}
+
+static int cafe_smbus_write_data(struct cafe_camera *cam,
+ u16 addr, u8 command, u8 value)
+{
+ unsigned int rval;
+ unsigned long flags;
+ struct mcam_camera *mcam = &cam->mcam;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+ rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
+ /*
+ * Marvell sez set clkdiv to all 1's for now.
+ */
+ rval |= TWSIC0_CLKDIV;
+ mcam_reg_write(mcam, REG_TWSIC0, rval);
+ (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+ rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+ mcam_reg_write(mcam, REG_TWSIC1, rval);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ /* Unfortunately, reading TWSIC1 too soon after sending a command
+ * causes the device to die.
+ * Use a busy-wait because we often send a large quantity of small
+ * commands at-once; using msleep() would cause a lot of context
+ * switches which take longer than 2ms, resulting in a noticeable
+ * boot-time and capture-start delays.
+ */
+ mdelay(2);
+
+ /*
+ * Another sad fact is that sometimes, commands silently complete but
+ * cafe_smbus_write_done() never becomes aware of this.
+ * This happens at random and appears to possible occur with any
+ * command.
+ * We don't understand why this is. We work around this issue
+ * with the timeout in the wait below, assuming that all commands
+ * complete within the timeout.
+ */
+ wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(mcam),
+ CAFE_SMBUS_TIMEOUT);
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ if (rval & TWSIC1_WSTAT) {
+ cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
+ command, value);
+ return -EIO;
+ }
+ if (rval & TWSIC1_ERROR) {
+ cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
+ command, value);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+
+static int cafe_smbus_read_done(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+ int c1;
+
+ /*
+ * We must delay after the interrupt, or the controller gets confused
+ * and never does give us good status. Fortunately, we don't do this
+ * often.
+ */
+ udelay(20);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ c1 = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
+}
+
+
+
+static int cafe_smbus_read_data(struct cafe_camera *cam,
+ u16 addr, u8 command, u8 *value)
+{
+ unsigned int rval;
+ unsigned long flags;
+ struct mcam_camera *mcam = &cam->mcam;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+ rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
+ /*
+ * Marvel sez set clkdiv to all 1's for now.
+ */
+ rval |= TWSIC0_CLKDIV;
+ mcam_reg_write(mcam, REG_TWSIC0, rval);
+ (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+ rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+ mcam_reg_write(mcam, REG_TWSIC1, rval);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ wait_event_timeout(cam->smbus_wait,
+ cafe_smbus_read_done(mcam), CAFE_SMBUS_TIMEOUT);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ if (rval & TWSIC1_ERROR) {
+ cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
+ return -EIO;
+ }
+ if (!(rval & TWSIC1_RVALID)) {
+ cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
+ command);
+ return -EIO;
+ }
+ *value = rval & 0xff;
+ return 0;
+}
+
+/*
+ * Perform a transfer over SMBUS. This thing is called under
+ * the i2c bus lock, so we shouldn't race with ourselves...
+ */
+static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char rw, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct cafe_camera *cam = i2c_get_adapdata(adapter);
+ int ret = -EINVAL;
+
+ /*
+ * This interface would appear to only do byte data ops. OK
+ * it can do word too, but the cam chip has no use for that.
+ */
+ if (size != I2C_SMBUS_BYTE_DATA) {
+ cam_err(cam, "funky xfer size %d\n", size);
+ return -EINVAL;
+ }
+
+ if (rw == I2C_SMBUS_WRITE)
+ ret = cafe_smbus_write_data(cam, addr, command, data->byte);
+ else if (rw == I2C_SMBUS_READ)
+ ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
+ return ret;
+}
+
+
+static void cafe_smbus_enable_irq(struct cafe_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->mcam.dev_lock, flags);
+ mcam_reg_set_bit(&cam->mcam, REG_IRQMASK, TWSIIRQS);
+ spin_unlock_irqrestore(&cam->mcam.dev_lock, flags);
+}
+
+static u32 cafe_smbus_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+}
+
+static struct i2c_algorithm cafe_smbus_algo = {
+ .smbus_xfer = cafe_smbus_xfer,
+ .functionality = cafe_smbus_func
+};
+
+static int cafe_smbus_setup(struct cafe_camera *cam)
+{
+ struct i2c_adapter *adap;
+ int ret;
+
+ adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+ if (adap == NULL)
+ return -ENOMEM;
+ cam->mcam.i2c_adapter = adap;
+ cafe_smbus_enable_irq(cam);
+ adap->owner = THIS_MODULE;
+ adap->algo = &cafe_smbus_algo;
+ strcpy(adap->name, "cafe_ccic");
+ adap->dev.parent = &cam->pdev->dev;
+ i2c_set_adapdata(adap, cam);
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ printk(KERN_ERR "Unable to register cafe i2c adapter\n");
+ return ret;
+}
+
+static void cafe_smbus_shutdown(struct cafe_camera *cam)
+{
+ i2c_del_adapter(cam->mcam.i2c_adapter);
+ kfree(cam->mcam.i2c_adapter);
+}
+
+
+/*
+ * Controller-level stuff
+ */
+
+static void cafe_ctlr_init(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ /*
+ * Added magic to bring up the hardware on the B-Test board
+ */
+ mcam_reg_write(mcam, 0x3038, 0x8);
+ mcam_reg_write(mcam, 0x315c, 0x80008);
+ /*
+ * Go through the dance needed to wake the device up.
+ * Note that these registers are global and shared
+ * with the NAND and SD devices. Interaction between the
+ * three still needs to be examined.
+ */
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
+ /*
+ * Here we must wait a bit for the controller to come around.
+ */
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ msleep(5);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
+ mcam_reg_set_bit(mcam, REG_GL_IMASK, GIMSK_CCIC_EN);
+ /*
+ * Mask all interrupts.
+ */
+ mcam_reg_write(mcam, REG_IRQMASK, 0);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+}
+
+
+static int cafe_ctlr_power_up(struct mcam_camera *mcam)
+{
+ /*
+ * Part one of the sensor dance: turn the global
+ * GPIO signal on.
+ */
+ mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+ mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
+ /*
+ * Put the sensor into operational mode (assumes OLPC-style
+ * wiring). Control 0 is reset - set to 1 to operate.
+ * Control 1 is power down, set to 0 to operate.
+ */
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
+
+ return 0;
+}
+
+static void cafe_ctlr_power_down(struct mcam_camera *mcam)
+{
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
+ mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+ mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT);
+}
+
+
+
+/*
+ * The platform interrupt handler.
+ */
+static irqreturn_t cafe_irq(int irq, void *data)
+{
+ struct cafe_camera *cam = data;
+ struct mcam_camera *mcam = &cam->mcam;
+ unsigned int irqs, handled;
+
+ spin_lock(&mcam->dev_lock);
+ irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+ handled = cam->registered && mccic_irq(mcam, irqs);
+ if (irqs & TWSIIRQS) {
+ mcam_reg_write(mcam, REG_IRQSTAT, TWSIIRQS);
+ wake_up(&cam->smbus_wait);
+ handled = 1;
+ }
+ spin_unlock(&mcam->dev_lock);
+ return IRQ_RETVAL(handled);
+}
+
+
+/* -------------------------------------------------------------------------- */
+/*
+ * PCI interface stuff.
+ */
+
+static int cafe_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ struct cafe_camera *cam;
+ struct mcam_camera *mcam;
+
+ /*
+ * Start putting together one of our big camera structures.
+ */
+ ret = -ENOMEM;
+ cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
+ if (cam == NULL)
+ goto out;
+ cam->pdev = pdev;
+ mcam = &cam->mcam;
+ mcam->chip_id = MCAM_CAFE;
+ spin_lock_init(&mcam->dev_lock);
+ init_waitqueue_head(&cam->smbus_wait);
+ mcam->plat_power_up = cafe_ctlr_power_up;
+ mcam->plat_power_down = cafe_ctlr_power_down;
+ mcam->dev = &pdev->dev;
+ /*
+ * Set the clock speed for the XO 1; I don't believe this
+ * driver has ever run anywhere else.
+ */
+ mcam->clock_speed = 45;
+ mcam->use_smbus = 1;
+ /*
+ * Vmalloc mode for buffers is traditional with this driver.
+ * We *might* be able to run DMA_contig, especially on a system
+ * with CMA in it.
+ */
+ mcam->buffer_mode = B_vmalloc;
+ /*
+ * Get set up on the PCI bus.
+ */
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out_free;
+ pci_set_master(pdev);
+
+ ret = -EIO;
+ mcam->regs = pci_iomap(pdev, 0, 0);
+ if (!mcam->regs) {
+ printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
+ goto out_disable;
+ }
+ mcam->regs_size = pci_resource_len(pdev, 0);
+ ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
+ if (ret)
+ goto out_iounmap;
+
+ /*
+ * Initialize the controller and leave it powered up. It will
+ * stay that way until the sensor driver shows up.
+ */
+ cafe_ctlr_init(mcam);
+ cafe_ctlr_power_up(mcam);
+ /*
+ * Set up I2C/SMBUS communications. We have to drop the mutex here
+ * because the sensor could attach in this call chain, leading to
+ * unsightly deadlocks.
+ */
+ ret = cafe_smbus_setup(cam);
+ if (ret)
+ goto out_pdown;
+
+ ret = mccic_register(mcam);
+ if (ret == 0) {
+ cam->registered = 1;
+ return 0;
+ }
+
+ cafe_smbus_shutdown(cam);
+out_pdown:
+ cafe_ctlr_power_down(mcam);
+ free_irq(pdev->irq, cam);
+out_iounmap:
+ pci_iounmap(pdev, mcam->regs);
+out_disable:
+ pci_disable_device(pdev);
+out_free:
+ kfree(cam);
+out:
+ return ret;
+}
+
+
+/*
+ * Shut down an initialized device
+ */
+static void cafe_shutdown(struct cafe_camera *cam)
+{
+ mccic_shutdown(&cam->mcam);
+ cafe_smbus_shutdown(cam);
+ free_irq(cam->pdev->irq, cam);
+ pci_iounmap(cam->pdev, cam->mcam.regs);
+}
+
+
+static void cafe_pci_remove(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+
+ if (cam == NULL) {
+ printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
+ return;
+ }
+ cafe_shutdown(cam);
+ kfree(cam);
+}
+
+
+#ifdef CONFIG_PM
+/*
+ * Basic power management.
+ */
+static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+ mccic_suspend(&cam->mcam);
+ pci_disable_device(pdev);
+ return 0;
+}
+
+
+static int cafe_pci_resume(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+ int ret = 0;
+
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+
+ if (ret) {
+ cam_warn(cam, "Unable to re-enable device on resume!\n");
+ return ret;
+ }
+ cafe_ctlr_init(&cam->mcam);
+ return mccic_resume(&cam->mcam);
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_device_id cafe_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
+ PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_ids);
+
+static struct pci_driver cafe_pci_driver = {
+ .name = "cafe1000-ccic",
+ .id_table = cafe_ids,
+ .probe = cafe_pci_probe,
+ .remove = cafe_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = cafe_pci_suspend,
+ .resume = cafe_pci_resume,
+#endif
+};
+
+
+
+
+static int __init cafe_init(void)
+{
+ int ret;
+
+ printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
+ CAFE_VERSION);
+ ret = pci_register_driver(&cafe_pci_driver);
+ if (ret) {
+ printk(KERN_ERR "Unable to register cafe_ccic driver\n");
+ goto out;
+ }
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+static void __exit cafe_exit(void)
+{
+ pci_unregister_driver(&cafe_pci_driver);
+}
+
+module_init(cafe_init);
+module_exit(cafe_exit);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
new file mode 100644
index 00000000000..be4b5121210
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -0,0 +1,2034 @@
+/*
+ * The Marvell camera core. This device appears in a number of settings,
+ * so it needs platform-specific support outside of the core.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/ov7670.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "mcam-core.h"
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Internal DMA buffer management. Since the controller cannot do S/G I/O,
+ * we must have physically contiguous buffers to bring frames into.
+ * These parameters control how many buffers we use, whether we
+ * allocate them at load time (better chance of success, but nails down
+ * memory) or when somebody tries to use the camera (riskier), and,
+ * for load-time allocation, how big they should be.
+ *
+ * The controller can cycle through three buffers. We could use
+ * more by flipping pointers around, but it probably makes little
+ * sense.
+ */
+
+static bool alloc_bufs_at_read;
+module_param(alloc_bufs_at_read, bool, 0444);
+MODULE_PARM_DESC(alloc_bufs_at_read,
+ "Non-zero value causes DMA buffers to be allocated when the "
+ "video capture device is read, rather than at module load "
+ "time. This saves memory, but decreases the chances of "
+ "successfully getting those buffers. This parameter is "
+ "only used in the vmalloc buffer mode");
+
+static int n_dma_bufs = 3;
+module_param(n_dma_bufs, uint, 0644);
+MODULE_PARM_DESC(n_dma_bufs,
+ "The number of DMA buffers to allocate. Can be either two "
+ "(saves memory, makes timing tighter) or three.");
+
+static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
+module_param(dma_buf_size, uint, 0444);
+MODULE_PARM_DESC(dma_buf_size,
+ "The size of the allocated DMA buffers. If actual operating "
+ "parameters require larger buffers, an attempt to reallocate "
+ "will be made.");
+#else /* MCAM_MODE_VMALLOC */
+static const bool alloc_bufs_at_read = 0;
+static const int n_dma_bufs = 3; /* Used by S/G_PARM */
+#endif /* MCAM_MODE_VMALLOC */
+
+static bool flip;
+module_param(flip, bool, 0444);
+MODULE_PARM_DESC(flip,
+ "If set, the sensor will be instructed to flip the image "
+ "vertically.");
+
+static int buffer_mode = -1;
+module_param(buffer_mode, int, 0444);
+MODULE_PARM_DESC(buffer_mode,
+ "Set the buffer mode to be used; default is to go with what "
+ "the platform driver asks for. Set to 0 for vmalloc, 1 for "
+ "DMA contiguous.");
+
+/*
+ * Status flags. Always manipulated with bit operations.
+ */
+#define CF_BUF0_VALID 0 /* Buffers valid - first three */
+#define CF_BUF1_VALID 1
+#define CF_BUF2_VALID 2
+#define CF_DMA_ACTIVE 3 /* A frame is incoming */
+#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
+#define CF_SINGLE_BUFFER 5 /* Running with a single buffer */
+#define CF_SG_RESTART 6 /* SG restart needed */
+#define CF_FRAME_SOF0 7 /* Frame 0 started */
+#define CF_FRAME_SOF1 8
+#define CF_FRAME_SOF2 9
+
+#define sensor_call(cam, o, f, args...) \
+ v4l2_subdev_call(cam->sensor, o, f, ##args)
+
+static struct mcam_format_struct {
+ __u8 *desc;
+ __u32 pixelformat;
+ int bpp; /* Bytes per pixel */
+ bool planar;
+ enum v4l2_mbus_pixelcode mbus_code;
+} mcam_formats[] = {
+ {
+ .desc = "YUYV 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "UYVY 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "YUV 4:2:2 PLANAR",
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = true,
+ },
+ {
+ .desc = "YUV 4:2:0 PLANAR",
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = true,
+ },
+ {
+ .desc = "YVU 4:2:0 PLANAR",
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = true,
+ },
+ {
+ .desc = "RGB 444",
+ .pixelformat = V4L2_PIX_FMT_RGB444,
+ .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "RGB 565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "Raw RGB Bayer",
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .bpp = 1,
+ .planar = false,
+ },
+};
+#define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
+
+static struct mcam_format_struct *mcam_find_format(u32 pixelformat)
+{
+ unsigned i;
+
+ for (i = 0; i < N_MCAM_FMTS; i++)
+ if (mcam_formats[i].pixelformat == pixelformat)
+ return mcam_formats + i;
+ /* Not found? Then return the first format. */
+ return mcam_formats;
+}
+
+/*
+ * The default format we use until somebody says otherwise.
+ */
+static const struct v4l2_pix_format mcam_def_pix_format = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = VGA_WIDTH*2,
+ .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
+};
+
+static const enum v4l2_mbus_pixelcode mcam_def_mbus_code =
+ V4L2_MBUS_FMT_YUYV8_2X8;
+
+
+/*
+ * The two-word DMA descriptor format used by the Armada 610 and like. There
+ * Is a three-word format as well (set C1_DESC_3WORD) where the third
+ * word is a pointer to the next descriptor, but we don't use it. Two-word
+ * descriptors have to be contiguous in memory.
+ */
+struct mcam_dma_desc {
+ u32 dma_addr;
+ u32 segment_len;
+};
+
+struct yuv_pointer_t {
+ dma_addr_t y;
+ dma_addr_t u;
+ dma_addr_t v;
+};
+
+/*
+ * Our buffer type for working with videobuf2. Note that the vb2
+ * developers have decreed that struct vb2_buffer must be at the
+ * beginning of this structure.
+ */
+struct mcam_vb_buffer {
+ struct vb2_buffer vb_buf;
+ struct list_head queue;
+ struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
+ dma_addr_t dma_desc_pa; /* Descriptor physical address */
+ int dma_desc_nent; /* Number of mapped descriptors */
+ struct yuv_pointer_t yuv_p;
+};
+
+static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct mcam_vb_buffer, vb_buf);
+}
+
+/*
+ * Hand a completed buffer back to user space.
+ */
+static void mcam_buffer_done(struct mcam_camera *cam, int frame,
+ struct vb2_buffer *vbuf)
+{
+ vbuf->v4l2_buf.bytesused = cam->pix_format.sizeimage;
+ vbuf->v4l2_buf.sequence = cam->buf_seq[frame];
+ vb2_set_plane_payload(vbuf, 0, cam->pix_format.sizeimage);
+ vb2_buffer_done(vbuf, VB2_BUF_STATE_DONE);
+}
+
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err((cam)->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn((cam)->dev, fmt, ##arg);
+#define cam_dbg(cam, fmt, arg...) \
+ dev_dbg((cam)->dev, fmt, ##arg);
+
+
+/*
+ * Flag manipulation helpers
+ */
+static void mcam_reset_buffers(struct mcam_camera *cam)
+{
+ int i;
+
+ cam->next_buf = -1;
+ for (i = 0; i < cam->nbufs; i++) {
+ clear_bit(i, &cam->flags);
+ clear_bit(CF_FRAME_SOF0 + i, &cam->flags);
+ }
+}
+
+static inline int mcam_needs_config(struct mcam_camera *cam)
+{
+ return test_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+static void mcam_set_config_needed(struct mcam_camera *cam, int needed)
+{
+ if (needed)
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ else
+ clear_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+/* ------------------------------------------------------------------- */
+/*
+ * Make the controller start grabbing images. Everything must
+ * be set up before doing this.
+ */
+static void mcam_ctlr_start(struct mcam_camera *cam)
+{
+ /* set_bit performs a read, so no other barrier should be
+ needed here */
+ mcam_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+static void mcam_ctlr_stop(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+static void mcam_enable_mipi(struct mcam_camera *mcam)
+{
+ /* Using MIPI mode and enable MIPI */
+ cam_dbg(mcam, "camera: DPHY3=0x%x, DPHY5=0x%x, DPHY6=0x%x\n",
+ mcam->dphy[0], mcam->dphy[1], mcam->dphy[2]);
+ mcam_reg_write(mcam, REG_CSI2_DPHY3, mcam->dphy[0]);
+ mcam_reg_write(mcam, REG_CSI2_DPHY5, mcam->dphy[1]);
+ mcam_reg_write(mcam, REG_CSI2_DPHY6, mcam->dphy[2]);
+
+ if (!mcam->mipi_enabled) {
+ if (mcam->lane > 4 || mcam->lane <= 0) {
+ cam_warn(mcam, "lane number error\n");
+ mcam->lane = 1; /* set the default value */
+ }
+ /*
+ * 0x41 actives 1 lane
+ * 0x43 actives 2 lanes
+ * 0x45 actives 3 lanes (never happen)
+ * 0x47 actives 4 lanes
+ */
+ mcam_reg_write(mcam, REG_CSI2_CTRL0,
+ CSI2_C0_MIPI_EN | CSI2_C0_ACT_LANE(mcam->lane));
+ mcam_reg_write(mcam, REG_CLKCTRL,
+ (mcam->mclk_src << 29) | mcam->mclk_div);
+
+ mcam->mipi_enabled = true;
+ }
+}
+
+static void mcam_disable_mipi(struct mcam_camera *mcam)
+{
+ /* Using Parallel mode or disable MIPI */
+ mcam_reg_write(mcam, REG_CSI2_CTRL0, 0x0);
+ mcam_reg_write(mcam, REG_CSI2_DPHY3, 0x0);
+ mcam_reg_write(mcam, REG_CSI2_DPHY5, 0x0);
+ mcam_reg_write(mcam, REG_CSI2_DPHY6, 0x0);
+ mcam->mipi_enabled = false;
+}
+
+/* ------------------------------------------------------------------- */
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Code specific to the vmalloc buffer mode.
+ */
+
+/*
+ * Allocate in-kernel DMA buffers for vmalloc mode.
+ */
+static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+ int i;
+
+ mcam_set_config_needed(cam, 1);
+ if (loadtime)
+ cam->dma_buf_size = dma_buf_size;
+ else
+ cam->dma_buf_size = cam->pix_format.sizeimage;
+ if (n_dma_bufs > 3)
+ n_dma_bufs = 3;
+
+ cam->nbufs = 0;
+ for (i = 0; i < n_dma_bufs; i++) {
+ cam->dma_bufs[i] = dma_alloc_coherent(cam->dev,
+ cam->dma_buf_size, cam->dma_handles + i,
+ GFP_KERNEL);
+ if (cam->dma_bufs[i] == NULL) {
+ cam_warn(cam, "Failed to allocate DMA buffer\n");
+ break;
+ }
+ (cam->nbufs)++;
+ }
+
+ switch (cam->nbufs) {
+ case 1:
+ dma_free_coherent(cam->dev, cam->dma_buf_size,
+ cam->dma_bufs[0], cam->dma_handles[0]);
+ cam->nbufs = 0;
+ case 0:
+ cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
+ return -ENOMEM;
+
+ case 2:
+ if (n_dma_bufs > 2)
+ cam_warn(cam, "Will limp along with only 2 buffers\n");
+ break;
+ }
+ return 0;
+}
+
+static void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+ int i;
+
+ for (i = 0; i < cam->nbufs; i++) {
+ dma_free_coherent(cam->dev, cam->dma_buf_size,
+ cam->dma_bufs[i], cam->dma_handles[i]);
+ cam->dma_bufs[i] = NULL;
+ }
+ cam->nbufs = 0;
+}
+
+
+/*
+ * Set up DMA buffers when operating in vmalloc mode
+ */
+static void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam)
+{
+ /*
+ * Store the first two Y buffers (we aren't supporting
+ * planar formats for now, so no UV bufs). Then either
+ * set the third if it exists, or tell the controller
+ * to just use two.
+ */
+ mcam_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
+ mcam_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
+ if (cam->nbufs > 2) {
+ mcam_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ } else
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ if (cam->chip_id == MCAM_CAFE)
+ mcam_reg_write(cam, REG_UBAR, 0); /* 32 bits only */
+}
+
+/*
+ * Copy data out to user space in the vmalloc case
+ */
+static void mcam_frame_tasklet(unsigned long data)
+{
+ struct mcam_camera *cam = (struct mcam_camera *) data;
+ int i;
+ unsigned long flags;
+ struct mcam_vb_buffer *buf;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ for (i = 0; i < cam->nbufs; i++) {
+ int bufno = cam->next_buf;
+
+ if (cam->state != S_STREAMING || bufno < 0)
+ break; /* I/O got stopped */
+ if (++(cam->next_buf) >= cam->nbufs)
+ cam->next_buf = 0;
+ if (!test_bit(bufno, &cam->flags))
+ continue;
+ if (list_empty(&cam->buffers)) {
+ cam->frame_state.singles++;
+ break; /* Leave it valid, hope for better later */
+ }
+ cam->frame_state.delivered++;
+ clear_bit(bufno, &cam->flags);
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
+ queue);
+ list_del_init(&buf->queue);
+ /*
+ * Drop the lock during the big copy. This *should* be safe...
+ */
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ memcpy(vb2_plane_vaddr(&buf->vb_buf, 0), cam->dma_bufs[bufno],
+ cam->pix_format.sizeimage);
+ mcam_buffer_done(cam, bufno, &buf->vb_buf);
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ }
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Make sure our allocated buffers are up to the task.
+ */
+static int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+ if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
+ mcam_free_dma_bufs(cam);
+ if (cam->nbufs == 0)
+ return mcam_alloc_dma_bufs(cam, 0);
+ return 0;
+}
+
+static void mcam_vmalloc_done(struct mcam_camera *cam, int frame)
+{
+ tasklet_schedule(&cam->s_tasklet);
+}
+
+#else /* MCAM_MODE_VMALLOC */
+
+static inline int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+ return 0;
+}
+
+static inline void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+ return;
+}
+
+static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+ return 0;
+}
+
+
+
+#endif /* MCAM_MODE_VMALLOC */
+
+
+#ifdef MCAM_MODE_DMA_CONTIG
+/* ---------------------------------------------------------------------- */
+/*
+ * DMA-contiguous code.
+ */
+
+static bool mcam_fmt_is_planar(__u32 pfmt)
+{
+ struct mcam_format_struct *f;
+
+ f = mcam_find_format(pfmt);
+ return f->planar;
+}
+
+/*
+ * Set up a contiguous buffer for the given frame. Here also is where
+ * the underrun strategy is set: if there is no buffer available, reuse
+ * the buffer from the other BAR and set the CF_SINGLE_BUFFER flag to
+ * keep the interrupt handler from giving that buffer back to user
+ * space. In this way, we always have a buffer to DMA to and don't
+ * have to try to play games stopping and restarting the controller.
+ */
+static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf;
+ struct v4l2_pix_format *fmt = &cam->pix_format;
+ dma_addr_t dma_handle;
+ u32 pixel_count = fmt->width * fmt->height;
+ struct vb2_buffer *vb;
+
+ /*
+ * If there are no available buffers, go into single mode
+ */
+ if (list_empty(&cam->buffers)) {
+ buf = cam->vb_bufs[frame ^ 0x1];
+ set_bit(CF_SINGLE_BUFFER, &cam->flags);
+ cam->frame_state.singles++;
+ } else {
+ /*
+ * OK, we have a buffer we can use.
+ */
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
+ queue);
+ list_del_init(&buf->queue);
+ clear_bit(CF_SINGLE_BUFFER, &cam->flags);
+ }
+
+ cam->vb_bufs[frame] = buf;
+ vb = &buf->vb_buf;
+
+ dma_handle = vb2_dma_contig_plane_dma_addr(vb, 0);
+ buf->yuv_p.y = dma_handle;
+
+ switch (cam->pix_format.pixelformat) {
+ case V4L2_PIX_FMT_YUV422P:
+ buf->yuv_p.u = buf->yuv_p.y + pixel_count;
+ buf->yuv_p.v = buf->yuv_p.u + pixel_count / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ buf->yuv_p.u = buf->yuv_p.y + pixel_count;
+ buf->yuv_p.v = buf->yuv_p.u + pixel_count / 4;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ buf->yuv_p.v = buf->yuv_p.y + pixel_count;
+ buf->yuv_p.u = buf->yuv_p.v + pixel_count / 4;
+ break;
+ default:
+ break;
+ }
+
+ mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR, buf->yuv_p.y);
+ if (mcam_fmt_is_planar(fmt->pixelformat)) {
+ mcam_reg_write(cam, frame == 0 ?
+ REG_U0BAR : REG_U1BAR, buf->yuv_p.u);
+ mcam_reg_write(cam, frame == 0 ?
+ REG_V0BAR : REG_V1BAR, buf->yuv_p.v);
+ }
+}
+
+/*
+ * Initial B_DMA_contig setup.
+ */
+static void mcam_ctlr_dma_contig(struct mcam_camera *cam)
+{
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ cam->nbufs = 2;
+ mcam_set_contig_buffer(cam, 0);
+ mcam_set_contig_buffer(cam, 1);
+}
+
+/*
+ * Frame completion handling.
+ */
+static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
+
+ if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
+ cam->frame_state.delivered++;
+ mcam_buffer_done(cam, frame, &buf->vb_buf);
+ }
+ mcam_set_contig_buffer(cam, frame);
+}
+
+#endif /* MCAM_MODE_DMA_CONTIG */
+
+#ifdef MCAM_MODE_DMA_SG
+/* ---------------------------------------------------------------------- */
+/*
+ * Scatter/gather-specific code.
+ */
+
+/*
+ * Set up the next buffer for S/G I/O; caller should be sure that
+ * the controller is stopped and a buffer is available.
+ */
+static void mcam_sg_next_buffer(struct mcam_camera *cam)
+{
+ struct mcam_vb_buffer *buf;
+
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
+ list_del_init(&buf->queue);
+ /*
+ * Very Bad Not Good Things happen if you don't clear
+ * C1_DESC_ENA before making any descriptor changes.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
+ mcam_reg_write(cam, REG_DESC_LEN_Y,
+ buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+ mcam_reg_write(cam, REG_DESC_LEN_U, 0);
+ mcam_reg_write(cam, REG_DESC_LEN_V, 0);
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ cam->vb_bufs[0] = buf;
+}
+
+/*
+ * Initial B_DMA_sg setup
+ */
+static void mcam_ctlr_dma_sg(struct mcam_camera *cam)
+{
+ /*
+ * The list-empty condition can hit us at resume time
+ * if the buffer list was empty when the system was suspended.
+ */
+ if (list_empty(&cam->buffers)) {
+ set_bit(CF_SG_RESTART, &cam->flags);
+ return;
+ }
+
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_3WORD);
+ mcam_sg_next_buffer(cam);
+ cam->nbufs = 3;
+}
+
+
+/*
+ * Frame completion with S/G is trickier. We can't muck with
+ * a descriptor chain on the fly, since the controller buffers it
+ * internally. So we have to actually stop and restart; Marvell
+ * says this is the way to do it.
+ *
+ * Of course, stopping is easier said than done; experience shows
+ * that the controller can start a frame *after* C0_ENABLE has been
+ * cleared. So when running in S/G mode, the controller is "stopped"
+ * on receipt of the start-of-frame interrupt. That means we can
+ * safely change the DMA descriptor array here and restart things
+ * (assuming there's another buffer waiting to go).
+ */
+static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf = cam->vb_bufs[0];
+
+ /*
+ * If we're no longer supposed to be streaming, don't do anything.
+ */
+ if (cam->state != S_STREAMING)
+ return;
+ /*
+ * If we have another buffer available, put it in and
+ * restart the engine.
+ */
+ if (!list_empty(&cam->buffers)) {
+ mcam_sg_next_buffer(cam);
+ mcam_ctlr_start(cam);
+ /*
+ * Otherwise set CF_SG_RESTART and the controller will
+ * be restarted once another buffer shows up.
+ */
+ } else {
+ set_bit(CF_SG_RESTART, &cam->flags);
+ cam->frame_state.singles++;
+ cam->vb_bufs[0] = NULL;
+ }
+ /*
+ * Now we can give the completed frame back to user space.
+ */
+ cam->frame_state.delivered++;
+ mcam_buffer_done(cam, frame, &buf->vb_buf);
+}
+
+
+/*
+ * Scatter/gather mode requires stopping the controller between
+ * frames so we can put in a new DMA descriptor array. If no new
+ * buffer exists at frame completion, the controller is left stopped;
+ * this function is charged with gettig things going again.
+ */
+static void mcam_sg_restart(struct mcam_camera *cam)
+{
+ mcam_ctlr_dma_sg(cam);
+ mcam_ctlr_start(cam);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+}
+
+#else /* MCAM_MODE_DMA_SG */
+
+static inline void mcam_sg_restart(struct mcam_camera *cam)
+{
+ return;
+}
+
+#endif /* MCAM_MODE_DMA_SG */
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Buffer-mode-independent controller code.
+ */
+
+/*
+ * Image format setup
+ */
+static void mcam_ctlr_image(struct mcam_camera *cam)
+{
+ struct v4l2_pix_format *fmt = &cam->pix_format;
+ u32 widthy = 0, widthuv = 0, imgsz_h, imgsz_w;
+
+ cam_dbg(cam, "camera: bytesperline = %d; height = %d\n",
+ fmt->bytesperline, fmt->sizeimage / fmt->bytesperline);
+ imgsz_h = (fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK;
+ imgsz_w = (fmt->width * 2) & IMGSZ_H_MASK;
+
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ widthy = fmt->width * 2;
+ widthuv = 0;
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ imgsz_h = (fmt->sizeimage / fmt->bytesperline) << IMGSZ_V_SHIFT;
+ widthy = fmt->bytesperline;
+ widthuv = 0;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ widthy = fmt->width;
+ widthuv = fmt->width / 2;
+ break;
+ default:
+ widthy = fmt->bytesperline;
+ widthuv = 0;
+ }
+
+ mcam_reg_write_mask(cam, REG_IMGPITCH, widthuv << 16 | widthy,
+ IMGP_YP_MASK | IMGP_UVP_MASK);
+ mcam_reg_write(cam, REG_IMGSIZE, imgsz_h | imgsz_w);
+ mcam_reg_write(cam, REG_IMGOFFSET, 0x0);
+
+ /*
+ * Tell the controller about the image format we are using.
+ */
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUV422P:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PLANAR | C0_YUVE_YVYU, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_RGB444:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB | C0_RGBF_444 | C0_RGB4_XRGB, C0_DF_MASK);
+ /* Alpha value? */
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB | C0_RGBF_565 | C0_RGB5_BGGR, C0_DF_MASK);
+ break;
+ default:
+ cam_err(cam, "camera: unknown format: %#x\n", fmt->pixelformat);
+ break;
+ }
+
+ /*
+ * Make sure it knows we want to use hsync/vsync.
+ */
+ mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC, C0_SIFM_MASK);
+ /*
+ * This field controls the generation of EOF(DVP only)
+ */
+ if (cam->bus_type != V4L2_MBUS_CSI2)
+ mcam_reg_set_bit(cam, REG_CTRL0,
+ C0_EOF_VSYNC | C0_VEDGE_CTRL);
+}
+
+
+/*
+ * Configure the controller for operation; caller holds the
+ * device mutex.
+ */
+static int mcam_ctlr_configure(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+ cam->dma_setup(cam);
+ mcam_ctlr_image(cam);
+ mcam_set_config_needed(cam, 0);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+static void mcam_ctlr_irq_enable(struct mcam_camera *cam)
+{
+ /*
+ * Clear any pending interrupts, since we do not
+ * expect to have I/O active prior to enabling.
+ */
+ mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
+ mcam_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+static void mcam_ctlr_irq_disable(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+
+
+static void mcam_ctlr_init(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ /*
+ * Make sure it's not powered down.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+ /*
+ * Turn off the enable bit. It sure should be off anyway,
+ * but it's good to be sure.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+ /*
+ * Clock the sensor appropriately. Controller clock should
+ * be 48MHz, sensor "typical" value is half that.
+ */
+ mcam_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Stop the controller, and don't return until we're really sure that no
+ * further DMA is going on.
+ */
+static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ /*
+ * Theory: stop the camera controller (whether it is operating
+ * or not). Delay briefly just in case we race with the SOF
+ * interrupt, then wait until no DMA is active.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+ mcam_ctlr_stop(cam);
+ cam->state = S_IDLE;
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ /*
+ * This is a brutally long sleep, but experience shows that
+ * it can take the controller a while to get the message that
+ * it needs to stop grabbing frames. In particular, we can
+ * sometimes (on mmp) get a frame at the end WITHOUT the
+ * start-of-frame indication.
+ */
+ msleep(150);
+ if (test_bit(CF_DMA_ACTIVE, &cam->flags))
+ cam_err(cam, "Timeout waiting for DMA to end\n");
+ /* This would be bad news - what now? */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ mcam_ctlr_irq_disable(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/*
+ * Power up and down.
+ */
+static int mcam_ctlr_power_up(struct mcam_camera *cam)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ ret = cam->plat_power_up(cam);
+ if (ret) {
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return ret;
+ }
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ msleep(5); /* Just to be sure */
+ return 0;
+}
+
+static void mcam_ctlr_power_down(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ /*
+ * School of hard knocks department: be sure we do any register
+ * twiddling on the controller *before* calling the platform
+ * power down routine.
+ */
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
+ cam->plat_power_down(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/* -------------------------------------------------------------------- */
+/*
+ * Communications with the sensor.
+ */
+
+static int __mcam_cam_reset(struct mcam_camera *cam)
+{
+ return sensor_call(cam, core, reset, 0);
+}
+
+/*
+ * We have found the sensor on the i2c. Let's try to have a
+ * conversation.
+ */
+static int mcam_cam_init(struct mcam_camera *cam)
+{
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ if (cam->state != S_NOTREADY)
+ cam_warn(cam, "Cam init with device in funky state %d",
+ cam->state);
+ ret = __mcam_cam_reset(cam);
+ /* Get/set parameters? */
+ cam->state = S_IDLE;
+ mcam_ctlr_power_down(cam);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+/*
+ * Configure the sensor to match the parameters we have. Caller should
+ * hold s_mutex
+ */
+static int mcam_cam_set_flip(struct mcam_camera *cam)
+{
+ struct v4l2_control ctrl;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_VFLIP;
+ ctrl.value = flip;
+ return sensor_call(cam, core, s_ctrl, &ctrl);
+}
+
+
+static int mcam_cam_configure(struct mcam_camera *cam)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
+ ret = sensor_call(cam, core, init, 0);
+ if (ret == 0)
+ ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
+ /*
+ * OV7670 does weird things if flip is set *before* format...
+ */
+ ret += mcam_cam_set_flip(cam);
+ return ret;
+}
+
+/*
+ * Get everything ready, and start grabbing frames.
+ */
+static int mcam_read_setup(struct mcam_camera *cam)
+{
+ int ret;
+ unsigned long flags;
+
+ /*
+ * Configuration. If we still don't have DMA buffers,
+ * make one last, desperate attempt.
+ */
+ if (cam->buffer_mode == B_vmalloc && cam->nbufs == 0 &&
+ mcam_alloc_dma_bufs(cam, 0))
+ return -ENOMEM;
+
+ if (mcam_needs_config(cam)) {
+ mcam_cam_configure(cam);
+ ret = mcam_ctlr_configure(cam);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Turn it loose.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ clear_bit(CF_DMA_ACTIVE, &cam->flags);
+ mcam_reset_buffers(cam);
+ /*
+ * Update CSI2_DPHY value
+ */
+ if (cam->calc_dphy)
+ cam->calc_dphy(cam);
+ cam_dbg(cam, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
+ cam->dphy[0], cam->dphy[1], cam->dphy[2]);
+ if (cam->bus_type == V4L2_MBUS_CSI2)
+ mcam_enable_mipi(cam);
+ else
+ mcam_disable_mipi(cam);
+ mcam_ctlr_irq_enable(cam);
+ cam->state = S_STREAMING;
+ if (!test_bit(CF_SG_RESTART, &cam->flags))
+ mcam_ctlr_start(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+/*
+ * Videobuf2 interface code.
+ */
+
+static int mcam_vb_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt, unsigned int *nbufs,
+ unsigned int *num_planes, unsigned int sizes[],
+ void *alloc_ctxs[])
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
+
+ sizes[0] = cam->pix_format.sizeimage;
+ *num_planes = 1; /* Someday we have to support planar formats... */
+ if (*nbufs < minbufs)
+ *nbufs = minbufs;
+ if (cam->buffer_mode == B_DMA_contig)
+ alloc_ctxs[0] = cam->vb_alloc_ctx;
+ return 0;
+}
+
+
+static void mcam_vb_buf_queue(struct vb2_buffer *vb)
+{
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+ int start;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ start = (cam->state == S_BUFWAIT) && !list_empty(&cam->buffers);
+ list_add(&mvb->queue, &cam->buffers);
+ if (cam->state == S_STREAMING && test_bit(CF_SG_RESTART, &cam->flags))
+ mcam_sg_restart(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ if (start)
+ mcam_read_setup(cam);
+}
+
+
+/*
+ * vb2 uses these to release the mutex when waiting in dqbuf. I'm
+ * not actually sure we need to do this (I'm not sure that vb2_dqbuf() needs
+ * to be called with the mutex held), but better safe than sorry.
+ */
+static void mcam_vb_wait_prepare(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&cam->s_mutex);
+}
+
+static void mcam_vb_wait_finish(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+ mutex_lock(&cam->s_mutex);
+}
+
+/*
+ * These need to be called with the mutex held from vb2
+ */
+static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ unsigned int frame;
+
+ if (cam->state != S_IDLE) {
+ INIT_LIST_HEAD(&cam->buffers);
+ return -EINVAL;
+ }
+ cam->sequence = 0;
+ /*
+ * Videobuf2 sneakily hoards all the buffers and won't
+ * give them to us until *after* streaming starts. But
+ * we can't actually start streaming until we have a
+ * destination. So go into a wait state and hope they
+ * give us buffers soon.
+ */
+ if (cam->buffer_mode != B_vmalloc && list_empty(&cam->buffers)) {
+ cam->state = S_BUFWAIT;
+ return 0;
+ }
+
+ /*
+ * Ensure clear the left over frame flags
+ * before every really start streaming
+ */
+ for (frame = 0; frame < cam->nbufs; frame++)
+ clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+
+ return mcam_read_setup(cam);
+}
+
+static void mcam_vb_stop_streaming(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ unsigned long flags;
+
+ if (cam->state == S_BUFWAIT) {
+ /* They never gave us buffers */
+ cam->state = S_IDLE;
+ return;
+ }
+ if (cam->state != S_STREAMING)
+ return;
+ mcam_ctlr_stop_dma(cam);
+ /*
+ * Reset the CCIC PHY after stopping streaming,
+ * otherwise, the CCIC may be unstable.
+ */
+ if (cam->ctlr_reset)
+ cam->ctlr_reset(cam);
+ /*
+ * VB2 reclaims the buffers, so we need to forget
+ * about them.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ INIT_LIST_HEAD(&cam->buffers);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+static const struct vb2_ops mcam_vb2_ops = {
+ .queue_setup = mcam_vb_queue_setup,
+ .buf_queue = mcam_vb_buf_queue,
+ .start_streaming = mcam_vb_start_streaming,
+ .stop_streaming = mcam_vb_stop_streaming,
+ .wait_prepare = mcam_vb_wait_prepare,
+ .wait_finish = mcam_vb_wait_finish,
+};
+
+
+#ifdef MCAM_MODE_DMA_SG
+/*
+ * Scatter/gather mode uses all of the above functions plus a
+ * few extras to deal with DMA mapping.
+ */
+static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
+{
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+ mvb->dma_desc = dma_alloc_coherent(cam->dev,
+ ndesc * sizeof(struct mcam_dma_desc),
+ &mvb->dma_desc_pa, GFP_KERNEL);
+ if (mvb->dma_desc == NULL) {
+ cam_err(cam, "Unable to get DMA descriptor array\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
+ struct mcam_dma_desc *desc = mvb->dma_desc;
+ struct scatterlist *sg;
+ int i;
+
+ mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl,
+ sg_table->nents, DMA_FROM_DEVICE);
+ if (mvb->dma_desc_nent <= 0)
+ return -EIO; /* Not sure what's right here */
+ for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
+ desc->dma_addr = sg_dma_address(sg);
+ desc->segment_len = sg_dma_len(sg);
+ desc++;
+ }
+ return 0;
+}
+
+static void mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
+
+ if (sg_table)
+ dma_unmap_sg(cam->dev, sg_table->sgl,
+ sg_table->nents, DMA_FROM_DEVICE);
+}
+
+static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+ dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
+ mvb->dma_desc, mvb->dma_desc_pa);
+}
+
+
+static const struct vb2_ops mcam_vb2_sg_ops = {
+ .queue_setup = mcam_vb_queue_setup,
+ .buf_init = mcam_vb_sg_buf_init,
+ .buf_prepare = mcam_vb_sg_buf_prepare,
+ .buf_queue = mcam_vb_buf_queue,
+ .buf_finish = mcam_vb_sg_buf_finish,
+ .buf_cleanup = mcam_vb_sg_buf_cleanup,
+ .start_streaming = mcam_vb_start_streaming,
+ .stop_streaming = mcam_vb_stop_streaming,
+ .wait_prepare = mcam_vb_wait_prepare,
+ .wait_finish = mcam_vb_wait_finish,
+};
+
+#endif /* MCAM_MODE_DMA_SG */
+
+static int mcam_setup_vb2(struct mcam_camera *cam)
+{
+ struct vb2_queue *vq = &cam->vb_queue;
+
+ memset(vq, 0, sizeof(*vq));
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->drv_priv = cam;
+ INIT_LIST_HEAD(&cam->buffers);
+ switch (cam->buffer_mode) {
+ case B_DMA_contig:
+#ifdef MCAM_MODE_DMA_CONTIG
+ vq->ops = &mcam_vb2_ops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
+ cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
+ vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ cam->dma_setup = mcam_ctlr_dma_contig;
+ cam->frame_complete = mcam_dma_contig_done;
+#endif
+ break;
+ case B_DMA_sg:
+#ifdef MCAM_MODE_DMA_SG
+ vq->ops = &mcam_vb2_sg_ops;
+ vq->mem_ops = &vb2_dma_sg_memops;
+ vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
+ vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ cam->dma_setup = mcam_ctlr_dma_sg;
+ cam->frame_complete = mcam_dma_sg_done;
+#endif
+ break;
+ case B_vmalloc:
+#ifdef MCAM_MODE_VMALLOC
+ tasklet_init(&cam->s_tasklet, mcam_frame_tasklet,
+ (unsigned long) cam);
+ vq->ops = &mcam_vb2_ops;
+ vq->mem_ops = &vb2_vmalloc_memops;
+ vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
+ vq->io_modes = VB2_MMAP;
+ cam->dma_setup = mcam_ctlr_dma_vmalloc;
+ cam->frame_complete = mcam_vmalloc_done;
+#endif
+ break;
+ }
+ return vb2_queue_init(vq);
+}
+
+static void mcam_cleanup_vb2(struct mcam_camera *cam)
+{
+ vb2_queue_release(&cam->vb_queue);
+#ifdef MCAM_MODE_DMA_CONTIG
+ if (cam->buffer_mode == B_DMA_contig)
+ vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
+#endif
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*
+ * The long list of V4L2 ioctl() operations.
+ */
+
+static int mcam_vidioc_streamon(struct file *filp, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_streamon(&cam->vb_queue, type);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_streamoff(struct file *filp, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_streamoff(&cam->vb_queue, type);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_reqbufs(struct file *filp, void *priv,
+ struct v4l2_requestbuffers *req)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_reqbufs(&cam->vb_queue, req);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_querybuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_querybuf(&cam->vb_queue, buf);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_qbuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_qbuf(&cam->vb_queue, buf);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_dqbuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "marvell_ccic");
+ strcpy(cap->card, "marvell_ccic");
+ cap->version = 1;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+
+static int mcam_vidioc_enum_fmt_vid_cap(struct file *filp,
+ void *priv, struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index >= N_MCAM_FMTS)
+ return -EINVAL;
+ strlcpy(fmt->description, mcam_formats[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = mcam_formats[fmt->index].pixelformat;
+ return 0;
+}
+
+static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct mcam_camera *cam = priv;
+ struct mcam_format_struct *f;
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ f = mcam_find_format(pix->pixelformat);
+ pix->pixelformat = f->pixelformat;
+ v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
+ mutex_unlock(&cam->s_mutex);
+ v4l2_fill_pix_format(pix, &mbus_fmt);
+ switch (f->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ pix->bytesperline = pix->width * 3 / 2;
+ break;
+ default:
+ pix->bytesperline = pix->width * f->bpp;
+ break;
+ }
+ pix->sizeimage = pix->height * pix->bytesperline;
+ return ret;
+}
+
+static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct mcam_camera *cam = priv;
+ struct mcam_format_struct *f;
+ int ret;
+
+ /*
+ * Can't do anything if the device is not idle
+ * Also can't if there are streaming buffers in place.
+ */
+ if (cam->state != S_IDLE || cam->vb_queue.num_buffers > 0)
+ return -EBUSY;
+
+ f = mcam_find_format(fmt->fmt.pix.pixelformat);
+
+ /*
+ * See if the formatting works in principle.
+ */
+ ret = mcam_vidioc_try_fmt_vid_cap(filp, priv, fmt);
+ if (ret)
+ return ret;
+ /*
+ * Now we start to change things for real, so let's do it
+ * under lock.
+ */
+ mutex_lock(&cam->s_mutex);
+ cam->pix_format = fmt->fmt.pix;
+ cam->mbus_code = f->mbus_code;
+
+ /*
+ * Make sure we have appropriate DMA buffers.
+ */
+ if (cam->buffer_mode == B_vmalloc) {
+ ret = mcam_check_dma_buffers(cam);
+ if (ret)
+ goto out;
+ }
+ mcam_set_config_needed(cam, 1);
+out:
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+/*
+ * Return our stored notion of how the camera is/should be configured.
+ * The V4l2 spec wants us to be smarter, and actually get this from
+ * the camera (and not mess with it at open time). Someday.
+ */
+static int mcam_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
+{
+ struct mcam_camera *cam = priv;
+
+ f->fmt.pix = cam->pix_format;
+ return 0;
+}
+
+/*
+ * We only have one input - the sensor - so minimize the nonsense here.
+ */
+static int mcam_vidioc_enum_input(struct file *filp, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index != 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->std = V4L2_STD_ALL; /* Not sure what should go here */
+ strcpy(input->name, "Camera");
+ return 0;
+}
+
+static int mcam_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int mcam_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/* from vivi.c */
+static int mcam_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id a)
+{
+ return 0;
+}
+
+static int mcam_vidioc_g_std(struct file *filp, void *priv, v4l2_std_id *a)
+{
+ *a = V4L2_STD_NTSC_M;
+ return 0;
+}
+
+/*
+ * G/S_PARM. Most of this is done by the sensor, but we are
+ * the level which controls the number of read buffers.
+ */
+static int mcam_vidioc_g_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parms)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, g_parm, parms);
+ mutex_unlock(&cam->s_mutex);
+ parms->parm.capture.readbuffers = n_dma_bufs;
+ return ret;
+}
+
+static int mcam_vidioc_s_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parms)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, s_parm, parms);
+ mutex_unlock(&cam->s_mutex);
+ parms->parm.capture.readbuffers = n_dma_bufs;
+ return ret;
+}
+
+static int mcam_vidioc_enum_framesizes(struct file *filp, void *priv,
+ struct v4l2_frmsizeenum *sizes)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_framesizes, sizes);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *interval)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_frameintervals, interval);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mcam_vidioc_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct mcam_camera *cam = priv;
+
+ if (reg->reg > cam->regs_size - 4)
+ return -EINVAL;
+ reg->val = mcam_reg_read(cam, reg->reg);
+ reg->size = 4;
+ return 0;
+}
+
+static int mcam_vidioc_s_register(struct file *file, void *priv,
+ const struct v4l2_dbg_register *reg)
+{
+ struct mcam_camera *cam = priv;
+
+ if (reg->reg > cam->regs_size - 4)
+ return -EINVAL;
+ mcam_reg_write(cam, reg->reg, reg->val);
+ return 0;
+}
+#endif
+
+static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
+ .vidioc_querycap = mcam_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = mcam_vidioc_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = mcam_vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = mcam_vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = mcam_vidioc_g_fmt_vid_cap,
+ .vidioc_enum_input = mcam_vidioc_enum_input,
+ .vidioc_g_input = mcam_vidioc_g_input,
+ .vidioc_s_input = mcam_vidioc_s_input,
+ .vidioc_s_std = mcam_vidioc_s_std,
+ .vidioc_g_std = mcam_vidioc_g_std,
+ .vidioc_reqbufs = mcam_vidioc_reqbufs,
+ .vidioc_querybuf = mcam_vidioc_querybuf,
+ .vidioc_qbuf = mcam_vidioc_qbuf,
+ .vidioc_dqbuf = mcam_vidioc_dqbuf,
+ .vidioc_streamon = mcam_vidioc_streamon,
+ .vidioc_streamoff = mcam_vidioc_streamoff,
+ .vidioc_g_parm = mcam_vidioc_g_parm,
+ .vidioc_s_parm = mcam_vidioc_s_parm,
+ .vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = mcam_vidioc_enum_frameintervals,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = mcam_vidioc_g_register,
+ .vidioc_s_register = mcam_vidioc_s_register,
+#endif
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Our various file operations.
+ */
+static int mcam_v4l_open(struct file *filp)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ int ret = 0;
+
+ filp->private_data = cam;
+
+ cam->frame_state.frames = 0;
+ cam->frame_state.singles = 0;
+ cam->frame_state.delivered = 0;
+ mutex_lock(&cam->s_mutex);
+ if (cam->users == 0) {
+ ret = mcam_setup_vb2(cam);
+ if (ret)
+ goto out;
+ ret = mcam_ctlr_power_up(cam);
+ if (ret)
+ goto out;
+ __mcam_cam_reset(cam);
+ mcam_set_config_needed(cam, 1);
+ }
+ (cam->users)++;
+out:
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_v4l_release(struct file *filp)
+{
+ struct mcam_camera *cam = filp->private_data;
+
+ cam_dbg(cam, "Release, %d frames, %d singles, %d delivered\n",
+ cam->frame_state.frames, cam->frame_state.singles,
+ cam->frame_state.delivered);
+ mutex_lock(&cam->s_mutex);
+ (cam->users)--;
+ if (cam->users == 0) {
+ mcam_ctlr_stop_dma(cam);
+ mcam_cleanup_vb2(cam);
+ mcam_disable_mipi(cam);
+ mcam_ctlr_power_down(cam);
+ if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
+ mcam_free_dma_bufs(cam);
+ }
+
+ mutex_unlock(&cam->s_mutex);
+ return 0;
+}
+
+static ssize_t mcam_v4l_read(struct file *filp,
+ char __user *buffer, size_t len, loff_t *pos)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_read(&cam->vb_queue, buffer, len, pos,
+ filp->f_flags & O_NONBLOCK);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+
+static unsigned int mcam_v4l_poll(struct file *filp,
+ struct poll_table_struct *pt)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_poll(&cam->vb_queue, filp, pt);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_mmap(&cam->vb_queue, vma);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+
+static const struct v4l2_file_operations mcam_v4l_fops = {
+ .owner = THIS_MODULE,
+ .open = mcam_v4l_open,
+ .release = mcam_v4l_release,
+ .read = mcam_v4l_read,
+ .poll = mcam_v4l_poll,
+ .mmap = mcam_v4l_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+
+/*
+ * This template device holds all of those v4l2 methods; we
+ * clone it for specific real devices.
+ */
+static struct video_device mcam_v4l_template = {
+ .name = "mcam",
+ .tvnorms = V4L2_STD_NTSC_M,
+
+ .fops = &mcam_v4l_fops,
+ .ioctl_ops = &mcam_v4l_ioctl_ops,
+ .release = video_device_release_empty,
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Interrupt handler stuff
+ */
+static void mcam_frame_complete(struct mcam_camera *cam, int frame)
+{
+ /*
+ * Basic frame housekeeping.
+ */
+ set_bit(frame, &cam->flags);
+ clear_bit(CF_DMA_ACTIVE, &cam->flags);
+ cam->next_buf = frame;
+ cam->buf_seq[frame] = ++(cam->sequence);
+ cam->frame_state.frames++;
+ /*
+ * "This should never happen"
+ */
+ if (cam->state != S_STREAMING)
+ return;
+ /*
+ * Process the frame and set up the next one.
+ */
+ cam->frame_complete(cam, frame);
+}
+
+
+/*
+ * The interrupt handler; this needs to be called from the
+ * platform irq handler with the lock held.
+ */
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
+{
+ unsigned int frame, handled = 0;
+
+ mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
+ /*
+ * Handle any frame completions. There really should
+ * not be more than one of these, or we have fallen
+ * far behind.
+ *
+ * When running in S/G mode, the frame number lacks any
+ * real meaning - there's only one descriptor array - but
+ * the controller still picks a different one to signal
+ * each time.
+ */
+ for (frame = 0; frame < cam->nbufs; frame++)
+ if (irqs & (IRQ_EOF0 << frame) &&
+ test_bit(CF_FRAME_SOF0 + frame, &cam->flags)) {
+ mcam_frame_complete(cam, frame);
+ handled = 1;
+ clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+ if (cam->buffer_mode == B_DMA_sg)
+ break;
+ }
+ /*
+ * If a frame starts, note that we have DMA active. This
+ * code assumes that we won't get multiple frame interrupts
+ * at once; may want to rethink that.
+ */
+ for (frame = 0; frame < cam->nbufs; frame++) {
+ if (irqs & (IRQ_SOF0 << frame)) {
+ set_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+ handled = IRQ_HANDLED;
+ }
+ }
+
+ if (handled == IRQ_HANDLED) {
+ set_bit(CF_DMA_ACTIVE, &cam->flags);
+ if (cam->buffer_mode == B_DMA_sg)
+ mcam_ctlr_stop(cam);
+ }
+ return handled;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Registration and such.
+ */
+static struct ov7670_config sensor_cfg = {
+ /*
+ * Exclude QCIF mode, because it only captures a tiny portion
+ * of the sensor FOV
+ */
+ .min_width = 320,
+ .min_height = 240,
+};
+
+
+int mccic_register(struct mcam_camera *cam)
+{
+ struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42 >> 1,
+ .platform_data = &sensor_cfg,
+ };
+ int ret;
+
+ /*
+ * Validate the requested buffer mode.
+ */
+ if (buffer_mode >= 0)
+ cam->buffer_mode = buffer_mode;
+ if (cam->buffer_mode == B_DMA_sg &&
+ cam->chip_id == MCAM_CAFE) {
+ printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, "
+ "attempting vmalloc mode instead\n");
+ cam->buffer_mode = B_vmalloc;
+ }
+ if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
+ printk(KERN_ERR "marvell-cam: buffer mode %d unsupported\n",
+ cam->buffer_mode);
+ return -EINVAL;
+ }
+ /*
+ * Register with V4L
+ */
+ ret = v4l2_device_register(cam->dev, &cam->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&cam->s_mutex);
+ cam->state = S_NOTREADY;
+ mcam_set_config_needed(cam, 1);
+ cam->pix_format = mcam_def_pix_format;
+ cam->mbus_code = mcam_def_mbus_code;
+ INIT_LIST_HEAD(&cam->buffers);
+ mcam_ctlr_init(cam);
+
+ /*
+ * Try to find the sensor.
+ */
+ sensor_cfg.clock_speed = cam->clock_speed;
+ sensor_cfg.use_smbus = cam->use_smbus;
+ cam->sensor_addr = ov7670_info.addr;
+ cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev,
+ cam->i2c_adapter, &ov7670_info, NULL);
+ if (cam->sensor == NULL) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+
+ ret = mcam_cam_init(cam);
+ if (ret)
+ goto out_unregister;
+ /*
+ * Get the v4l2 setup done.
+ */
+ ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
+ if (ret)
+ goto out_unregister;
+ cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
+
+ mutex_lock(&cam->s_mutex);
+ cam->vdev = mcam_v4l_template;
+ cam->vdev.debug = 0;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ video_set_drvdata(&cam->vdev, cam);
+ ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto out;
+
+ /*
+ * If so requested, try to get our DMA buffers now.
+ */
+ if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
+ if (mcam_alloc_dma_bufs(cam, 1))
+ cam_warn(cam, "Unable to alloc DMA buffers at load"
+ " will try again later.");
+ }
+
+out:
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+out_unregister:
+ v4l2_device_unregister(&cam->v4l2_dev);
+ return ret;
+}
+
+
+void mccic_shutdown(struct mcam_camera *cam)
+{
+ /*
+ * If we have no users (and we really, really should have no
+ * users) the device will already be powered down. Trying to
+ * take it down again will wedge the machine, which is frowned
+ * upon.
+ */
+ if (cam->users > 0) {
+ cam_warn(cam, "Removing a device with users!\n");
+ mcam_ctlr_power_down(cam);
+ }
+ vb2_queue_release(&cam->vb_queue);
+ if (cam->buffer_mode == B_vmalloc)
+ mcam_free_dma_bufs(cam);
+ video_unregister_device(&cam->vdev);
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ v4l2_device_unregister(&cam->v4l2_dev);
+}
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+void mccic_suspend(struct mcam_camera *cam)
+{
+ mutex_lock(&cam->s_mutex);
+ if (cam->users > 0) {
+ enum mcam_state cstate = cam->state;
+
+ mcam_ctlr_stop_dma(cam);
+ mcam_ctlr_power_down(cam);
+ cam->state = cstate;
+ }
+ mutex_unlock(&cam->s_mutex);
+}
+
+int mccic_resume(struct mcam_camera *cam)
+{
+ int ret = 0;
+
+ mutex_lock(&cam->s_mutex);
+ if (cam->users > 0) {
+ ret = mcam_ctlr_power_up(cam);
+ if (ret) {
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+ }
+ __mcam_cam_reset(cam);
+ } else {
+ mcam_ctlr_power_down(cam);
+ }
+ mutex_unlock(&cam->s_mutex);
+
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ if (cam->state == S_STREAMING) {
+ /*
+ * If there was a buffer in the DMA engine at suspend
+ * time, put it back on the queue or we'll forget about it.
+ */
+ if (cam->buffer_mode == B_DMA_sg && cam->vb_bufs[0])
+ list_add(&cam->vb_bufs[0]->queue, &cam->buffers);
+ ret = mcam_read_setup(cam);
+ }
+ return ret;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
new file mode 100644
index 00000000000..e0e628cb98f
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -0,0 +1,382 @@
+/*
+ * Marvell camera core structures.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#ifndef _MCAM_CORE_H
+#define _MCAM_CORE_H
+
+#include <linux/list.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+
+/*
+ * Create our own symbols for the supported buffer modes, but, for now,
+ * base them entirely on which videobuf2 options have been selected.
+ */
+#if IS_ENABLED(CONFIG_VIDEOBUF2_VMALLOC)
+#define MCAM_MODE_VMALLOC 1
+#endif
+
+#if IS_ENABLED(CONFIG_VIDEOBUF2_DMA_CONTIG)
+#define MCAM_MODE_DMA_CONTIG 1
+#endif
+
+#if IS_ENABLED(CONFIG_VIDEOBUF2_DMA_SG)
+#define MCAM_MODE_DMA_SG 1
+#endif
+
+#if !defined(MCAM_MODE_VMALLOC) && !defined(MCAM_MODE_DMA_CONTIG) && \
+ !defined(MCAM_MODE_DMA_SG)
+#error One of the videobuf buffer modes must be selected in the config
+#endif
+
+
+enum mcam_state {
+ S_NOTREADY, /* Not yet initialized */
+ S_IDLE, /* Just hanging around */
+ S_FLAKED, /* Some sort of problem */
+ S_STREAMING, /* Streaming data */
+ S_BUFWAIT /* streaming requested but no buffers yet */
+};
+#define MAX_DMA_BUFS 3
+
+/*
+ * Different platforms work best with different buffer modes, so we
+ * let the platform pick.
+ */
+enum mcam_buffer_mode {
+ B_vmalloc = 0,
+ B_DMA_contig = 1,
+ B_DMA_sg = 2
+};
+
+enum mcam_chip_id {
+ MCAM_CAFE,
+ MCAM_ARMADA610,
+};
+
+/*
+ * Is a given buffer mode supported by the current kernel configuration?
+ */
+static inline int mcam_buffer_mode_supported(enum mcam_buffer_mode mode)
+{
+ switch (mode) {
+#ifdef MCAM_MODE_VMALLOC
+ case B_vmalloc:
+#endif
+#ifdef MCAM_MODE_DMA_CONTIG
+ case B_DMA_contig:
+#endif
+#ifdef MCAM_MODE_DMA_SG
+ case B_DMA_sg:
+#endif
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Basic frame states
+ */
+struct mcam_frame_state {
+ unsigned int frames;
+ unsigned int singles;
+ unsigned int delivered;
+};
+
+#define NR_MCAM_CLK 3
+
+/*
+ * A description of one of our devices.
+ * Locking: controlled by s_mutex. Certain fields, however, require
+ * the dev_lock spinlock; they are marked as such by comments.
+ * dev_lock is also required for access to device registers.
+ */
+struct mcam_camera {
+ /*
+ * These fields should be set by the platform code prior to
+ * calling mcam_register().
+ */
+ struct i2c_adapter *i2c_adapter;
+ unsigned char __iomem *regs;
+ unsigned regs_size; /* size in bytes of the register space */
+ spinlock_t dev_lock;
+ struct device *dev; /* For messages, dma alloc */
+ enum mcam_chip_id chip_id;
+ short int clock_speed; /* Sensor clock speed, default 30 */
+ short int use_smbus; /* SMBUS or straight I2c? */
+ enum mcam_buffer_mode buffer_mode;
+
+ int mclk_min; /* The minimal value of mclk */
+ int mclk_src; /* which clock source the mclk derives from */
+ int mclk_div; /* Clock Divider Value for MCLK */
+
+ int ccic_id;
+ enum v4l2_mbus_type bus_type;
+ /* MIPI support */
+ /* The dphy config value, allocated in board file
+ * dphy[0]: DPHY3
+ * dphy[1]: DPHY5
+ * dphy[2]: DPHY6
+ */
+ int *dphy;
+ bool mipi_enabled; /* flag whether mipi is enabled already */
+ int lane; /* lane number */
+
+ /* clock tree support */
+ struct clk *clk[NR_MCAM_CLK];
+
+ /*
+ * Callbacks from the core to the platform code.
+ */
+ int (*plat_power_up) (struct mcam_camera *cam);
+ void (*plat_power_down) (struct mcam_camera *cam);
+ void (*calc_dphy) (struct mcam_camera *cam);
+ void (*ctlr_reset) (struct mcam_camera *cam);
+
+ /*
+ * Everything below here is private to the mcam core and
+ * should not be touched by the platform code.
+ */
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ enum mcam_state state;
+ unsigned long flags; /* Buffer status, mainly (dev_lock) */
+ int users; /* How many open FDs */
+
+ struct mcam_frame_state frame_state; /* Frame state counter */
+ /*
+ * Subsystem structures.
+ */
+ struct video_device vdev;
+ struct v4l2_subdev *sensor;
+ unsigned short sensor_addr;
+
+ /* Videobuf2 stuff */
+ struct vb2_queue vb_queue;
+ struct list_head buffers; /* Available frames */
+
+ unsigned int nbufs; /* How many are alloc'd */
+ int next_buf; /* Next to consume (dev_lock) */
+
+ /* DMA buffers - vmalloc mode */
+#ifdef MCAM_MODE_VMALLOC
+ unsigned int dma_buf_size; /* allocated size */
+ void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
+ dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
+ struct tasklet_struct s_tasklet;
+#endif
+ unsigned int sequence; /* Frame sequence number */
+ unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual bufs */
+
+ /* DMA buffers - DMA modes */
+ struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
+ struct vb2_alloc_ctx *vb_alloc_ctx;
+
+ /* Mode-specific ops, set at open time */
+ void (*dma_setup)(struct mcam_camera *cam);
+ void (*frame_complete)(struct mcam_camera *cam, int frame);
+
+ /* Current operating parameters */
+ struct v4l2_pix_format pix_format;
+ enum v4l2_mbus_pixelcode mbus_code;
+
+ /* Locks */
+ struct mutex s_mutex; /* Access to this structure */
+};
+
+
+/*
+ * Register I/O functions. These are here because the platform code
+ * may legitimately need to mess with the register space.
+ */
+/*
+ * Device register I/O
+ */
+static inline void mcam_reg_write(struct mcam_camera *cam, unsigned int reg,
+ unsigned int val)
+{
+ iowrite32(val, cam->regs + reg);
+}
+
+static inline unsigned int mcam_reg_read(struct mcam_camera *cam,
+ unsigned int reg)
+{
+ return ioread32(cam->regs + reg);
+}
+
+
+static inline void mcam_reg_write_mask(struct mcam_camera *cam, unsigned int reg,
+ unsigned int val, unsigned int mask)
+{
+ unsigned int v = mcam_reg_read(cam, reg);
+
+ v = (v & ~mask) | (val & mask);
+ mcam_reg_write(cam, reg, v);
+}
+
+static inline void mcam_reg_clear_bit(struct mcam_camera *cam,
+ unsigned int reg, unsigned int val)
+{
+ mcam_reg_write_mask(cam, reg, 0, val);
+}
+
+static inline void mcam_reg_set_bit(struct mcam_camera *cam,
+ unsigned int reg, unsigned int val)
+{
+ mcam_reg_write_mask(cam, reg, val, val);
+}
+
+/*
+ * Functions for use by platform code.
+ */
+int mccic_register(struct mcam_camera *cam);
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs);
+void mccic_shutdown(struct mcam_camera *cam);
+#ifdef CONFIG_PM
+void mccic_suspend(struct mcam_camera *cam);
+int mccic_resume(struct mcam_camera *cam);
+#endif
+
+/*
+ * Register definitions for the m88alp01 camera interface. Offsets in bytes
+ * as given in the spec.
+ */
+#define REG_Y0BAR 0x00
+#define REG_Y1BAR 0x04
+#define REG_Y2BAR 0x08
+#define REG_U0BAR 0x0c
+#define REG_U1BAR 0x10
+#define REG_U2BAR 0x14
+#define REG_V0BAR 0x18
+#define REG_V1BAR 0x1C
+#define REG_V2BAR 0x20
+
+/*
+ * register definitions for MIPI support
+ */
+#define REG_CSI2_CTRL0 0x100
+#define CSI2_C0_MIPI_EN (0x1 << 0)
+#define CSI2_C0_ACT_LANE(n) ((n-1) << 1)
+#define REG_CSI2_DPHY3 0x12c
+#define REG_CSI2_DPHY5 0x134
+#define REG_CSI2_DPHY6 0x138
+
+/* ... */
+
+#define REG_IMGPITCH 0x24 /* Image pitch register */
+#define IMGP_YP_SHFT 2 /* Y pitch params */
+#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
+#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
+#define IMGP_UVP_MASK 0x3ffc0000
+#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
+#define IRQ_EOF0 0x00000001 /* End of frame 0 */
+#define IRQ_EOF1 0x00000002 /* End of frame 1 */
+#define IRQ_EOF2 0x00000004 /* End of frame 2 */
+#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
+#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
+#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
+#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
+#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
+#define IRQ_TWSIR 0x00020000 /* TWSI read */
+#define IRQ_TWSIE 0x00040000 /* TWSI error */
+#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
+#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
+#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
+#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
+#define REG_IRQSTAT 0x30 /* IRQ status / clear */
+
+#define REG_IMGSIZE 0x34 /* Image size */
+#define IMGSZ_V_MASK 0x1fff0000
+#define IMGSZ_V_SHIFT 16
+#define IMGSZ_H_MASK 0x00003fff
+#define REG_IMGOFFSET 0x38 /* IMage offset */
+
+#define REG_CTRL0 0x3c /* Control 0 */
+#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
+
+/* Mask for all the format bits */
+#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
+
+/* RGB ordering */
+#define C0_RGB4_RGBX 0x00000000
+#define C0_RGB4_XRGB 0x00000004
+#define C0_RGB4_BGRX 0x00000008
+#define C0_RGB4_XBGR 0x0000000c
+#define C0_RGB5_RGGB 0x00000000
+#define C0_RGB5_GRBG 0x00000004
+#define C0_RGB5_GBRG 0x00000008
+#define C0_RGB5_BGGR 0x0000000c
+
+/* Spec has two fields for DIN and DOUT, but they must match, so
+ combine them here. */
+#define C0_DF_YUV 0x00000000 /* Data is YUV */
+#define C0_DF_RGB 0x000000a0 /* ... RGB */
+#define C0_DF_BAYER 0x00000140 /* ... Bayer */
+/* 8-8-8 must be missing from the below - ask */
+#define C0_RGBF_565 0x00000000
+#define C0_RGBF_444 0x00000800
+#define C0_RGB_BGR 0x00001000 /* Blue comes first */
+#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
+#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
+#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
+/* Think that 420 packed must be 111 - ask */
+#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
+#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
+#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
+#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
+#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
+#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
+#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
+#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
+/* Bayer bits 18,19 if needed */
+#define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
+#define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
+#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
+#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
+#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
+#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
+/* SIFMODE */
+#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
+#define C0_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
+#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
+
+/* Bits below C1_444ALPHA are not present in Cafe */
+#define REG_CTRL1 0x40 /* Control 1 */
+#define C1_CLKGATE 0x00000001 /* Sensor clock gate */
+#define C1_DESC_ENA 0x00000100 /* DMA descriptor enable */
+#define C1_DESC_3WORD 0x00000200 /* Three-word descriptors used */
+#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
+#define C1_ALPHA_SHFT 20
+#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
+#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
+#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
+#define C1_DMAB_MASK 0x06000000
+#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
+#define C1_PWRDWN 0x10000000 /* Power down */
+
+#define REG_CLKCTRL 0x88 /* Clock control */
+#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
+
+/* This appears to be a Cafe-only register */
+#define REG_UBAR 0xc4 /* Upper base address register */
+
+/* Armada 610 DMA descriptor registers */
+#define REG_DMA_DESC_Y 0x200
+#define REG_DMA_DESC_U 0x204
+#define REG_DMA_DESC_V 0x208
+#define REG_DESC_LEN_Y 0x20c /* Lengths are in bytes */
+#define REG_DESC_LEN_U 0x210
+#define REG_DESC_LEN_V 0x214
+
+/*
+ * Useful stuff that probably belongs somewhere global.
+ */
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+
+#endif /* _MCAM_CORE_H */
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
new file mode 100644
index 00000000000..054507f1673
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -0,0 +1,537 @@
+/*
+ * Support for the camera device found on Marvell MMP processors; known
+ * to work with the Armada 610 as used in the OLPC 1.75 system.
+ *
+ * Copyright 2011 Jonathan Corbet <corbet@lwn.net>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-gpio.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/mmp-camera.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+
+#include "mcam-core.h"
+
+MODULE_ALIAS("platform:mmp-camera");
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_LICENSE("GPL");
+
+static char *mcam_clks[] = {"CCICAXICLK", "CCICFUNCLK", "CCICPHYCLK"};
+
+struct mmp_camera {
+ void *power_regs;
+ struct platform_device *pdev;
+ struct mcam_camera mcam;
+ struct list_head devlist;
+ struct clk *mipi_clk;
+ int irq;
+};
+
+static inline struct mmp_camera *mcam_to_cam(struct mcam_camera *mcam)
+{
+ return container_of(mcam, struct mmp_camera, mcam);
+}
+
+/*
+ * A silly little infrastructure so we can keep track of our devices.
+ * Chances are that we will never have more than one of them, but
+ * the Armada 610 *does* have two controllers...
+ */
+
+static LIST_HEAD(mmpcam_devices);
+static struct mutex mmpcam_devices_lock;
+
+static void mmpcam_add_device(struct mmp_camera *cam)
+{
+ mutex_lock(&mmpcam_devices_lock);
+ list_add(&cam->devlist, &mmpcam_devices);
+ mutex_unlock(&mmpcam_devices_lock);
+}
+
+static void mmpcam_remove_device(struct mmp_camera *cam)
+{
+ mutex_lock(&mmpcam_devices_lock);
+ list_del(&cam->devlist);
+ mutex_unlock(&mmpcam_devices_lock);
+}
+
+/*
+ * Platform dev remove passes us a platform_device, and there's
+ * no handy unused drvdata to stash a backpointer in. So just
+ * dig it out of our list.
+ */
+static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
+{
+ struct mmp_camera *cam;
+
+ mutex_lock(&mmpcam_devices_lock);
+ list_for_each_entry(cam, &mmpcam_devices, devlist) {
+ if (cam->pdev == pdev) {
+ mutex_unlock(&mmpcam_devices_lock);
+ return cam;
+ }
+ }
+ mutex_unlock(&mmpcam_devices_lock);
+ return NULL;
+}
+
+
+
+
+/*
+ * Power-related registers; this almost certainly belongs
+ * somewhere else.
+ *
+ * ARMADA 610 register manual, sec 7.2.1, p1842.
+ */
+#define CPU_SUBSYS_PMU_BASE 0xd4282800
+#define REG_CCIC_DCGCR 0x28 /* CCIC dyn clock gate ctrl reg */
+#define REG_CCIC_CRCR 0x50 /* CCIC clk reset ctrl reg */
+#define REG_CCIC2_CRCR 0xf4 /* CCIC2 clk reset ctrl reg */
+
+static void mcam_clk_enable(struct mcam_camera *mcam)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_MCAM_CLK; i++) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_prepare_enable(mcam->clk[i]);
+ }
+}
+
+static void mcam_clk_disable(struct mcam_camera *mcam)
+{
+ int i;
+
+ for (i = NR_MCAM_CLK - 1; i >= 0; i--) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_disable_unprepare(mcam->clk[i]);
+ }
+}
+
+/*
+ * Power control.
+ */
+static void mmpcam_power_up_ctlr(struct mmp_camera *cam)
+{
+ iowrite32(0x3f, cam->power_regs + REG_CCIC_DCGCR);
+ iowrite32(0x3805b, cam->power_regs + REG_CCIC_CRCR);
+ mdelay(1);
+}
+
+static int mmpcam_power_up(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata;
+
+/*
+ * Turn on power and clocks to the controller.
+ */
+ mmpcam_power_up_ctlr(cam);
+/*
+ * Provide power to the sensor.
+ */
+ mcam_reg_write(mcam, REG_CLKCTRL, 0x60000002);
+ pdata = cam->pdev->dev.platform_data;
+ gpio_set_value(pdata->sensor_power_gpio, 1);
+ mdelay(5);
+ mcam_reg_clear_bit(mcam, REG_CTRL1, 0x10000000);
+ gpio_set_value(pdata->sensor_reset_gpio, 0); /* reset is active low */
+ mdelay(5);
+ gpio_set_value(pdata->sensor_reset_gpio, 1); /* reset is active low */
+ mdelay(5);
+
+ mcam_clk_enable(mcam);
+
+ return 0;
+}
+
+static void mmpcam_power_down(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata;
+/*
+ * Turn off clocks and set reset lines
+ */
+ iowrite32(0, cam->power_regs + REG_CCIC_DCGCR);
+ iowrite32(0, cam->power_regs + REG_CCIC_CRCR);
+/*
+ * Shut down the sensor.
+ */
+ pdata = cam->pdev->dev.platform_data;
+ gpio_set_value(pdata->sensor_power_gpio, 0);
+ gpio_set_value(pdata->sensor_reset_gpio, 0);
+
+ mcam_clk_disable(mcam);
+}
+
+void mcam_ctlr_reset(struct mcam_camera *mcam)
+{
+ unsigned long val;
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+
+ if (mcam->ccic_id) {
+ /*
+ * Using CCIC2
+ */
+ val = ioread32(cam->power_regs + REG_CCIC2_CRCR);
+ iowrite32(val & ~0x2, cam->power_regs + REG_CCIC2_CRCR);
+ iowrite32(val | 0x2, cam->power_regs + REG_CCIC2_CRCR);
+ } else {
+ /*
+ * Using CCIC1
+ */
+ val = ioread32(cam->power_regs + REG_CCIC_CRCR);
+ iowrite32(val & ~0x2, cam->power_regs + REG_CCIC_CRCR);
+ iowrite32(val | 0x2, cam->power_regs + REG_CCIC_CRCR);
+ }
+}
+
+/*
+ * calc the dphy register values
+ * There are three dphy registers being used.
+ * dphy[0] - CSI2_DPHY3
+ * dphy[1] - CSI2_DPHY5
+ * dphy[2] - CSI2_DPHY6
+ * CSI2_DPHY3 and CSI2_DPHY6 can be set with a default value
+ * or be calculated dynamically
+ */
+void mmpcam_calc_dphy(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata = cam->pdev->dev.platform_data;
+ struct device *dev = &cam->pdev->dev;
+ unsigned long tx_clk_esc;
+
+ /*
+ * If CSI2_DPHY3 is calculated dynamically,
+ * pdata->lane_clk should be already set
+ * either in the board driver statically
+ * or in the sensor driver dynamically.
+ */
+ /*
+ * dphy[0] - CSI2_DPHY3:
+ * bit 0 ~ bit 7: HS Term Enable.
+ * defines the time that the DPHY
+ * wait before enabling the data
+ * lane termination after detecting
+ * that the sensor has driven the data
+ * lanes to the LP00 bridge state.
+ * The value is calculated by:
+ * (Max T(D_TERM_EN)/Period(DDR)) - 1
+ * bit 8 ~ bit 15: HS_SETTLE
+ * Time interval during which the HS
+ * receiver shall ignore any Data Lane
+ * HS transistions.
+ * The vaule has been calibrated on
+ * different boards. It seems to work well.
+ *
+ * More detail please refer
+ * MIPI Alliance Spectification for D-PHY
+ * document for explanation of HS-SETTLE
+ * and D-TERM-EN.
+ */
+ switch (pdata->dphy3_algo) {
+ case DPHY3_ALGO_PXA910:
+ /*
+ * Calculate CSI2_DPHY3 algo for PXA910
+ */
+ pdata->dphy[0] =
+ (((1 + (pdata->lane_clk * 80) / 1000) & 0xff) << 8)
+ | (1 + pdata->lane_clk * 35 / 1000);
+ break;
+ case DPHY3_ALGO_PXA2128:
+ /*
+ * Calculate CSI2_DPHY3 algo for PXA2128
+ */
+ pdata->dphy[0] =
+ (((2 + (pdata->lane_clk * 110) / 1000) & 0xff) << 8)
+ | (1 + pdata->lane_clk * 35 / 1000);
+ break;
+ default:
+ /*
+ * Use default CSI2_DPHY3 value for PXA688/PXA988
+ */
+ dev_dbg(dev, "camera: use the default CSI2_DPHY3 value\n");
+ }
+
+ /*
+ * mipi_clk will never be changed, it is a fixed value on MMP
+ */
+ if (IS_ERR(cam->mipi_clk))
+ return;
+
+ /* get the escape clk, this is hard coded */
+ clk_prepare_enable(cam->mipi_clk);
+ tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12;
+ clk_disable_unprepare(cam->mipi_clk);
+ /*
+ * dphy[2] - CSI2_DPHY6:
+ * bit 0 ~ bit 7: CK Term Enable
+ * Time for the Clock Lane receiver to enable the HS line
+ * termination. The value is calculated similarly with
+ * HS Term Enable
+ * bit 8 ~ bit 15: CK Settle
+ * Time interval during which the HS receiver shall ignore
+ * any Clock Lane HS transitions.
+ * The value is calibrated on the boards.
+ */
+ pdata->dphy[2] =
+ ((((534 * tx_clk_esc) / 2000 - 1) & 0xff) << 8)
+ | (((38 * tx_clk_esc) / 1000 - 1) & 0xff);
+
+ dev_dbg(dev, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
+ pdata->dphy[0], pdata->dphy[1], pdata->dphy[2]);
+}
+
+static irqreturn_t mmpcam_irq(int irq, void *data)
+{
+ struct mcam_camera *mcam = data;
+ unsigned int irqs, handled;
+
+ spin_lock(&mcam->dev_lock);
+ irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+ handled = mccic_irq(mcam, irqs);
+ spin_unlock(&mcam->dev_lock);
+ return IRQ_RETVAL(handled);
+}
+
+static void mcam_init_clk(struct mcam_camera *mcam)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_MCAM_CLK; i++) {
+ if (mcam_clks[i] != NULL) {
+ /* Some clks are not necessary on some boards
+ * We still try to run even it fails getting clk
+ */
+ mcam->clk[i] = devm_clk_get(mcam->dev, mcam_clks[i]);
+ if (IS_ERR(mcam->clk[i]))
+ dev_warn(mcam->dev, "Could not get clk: %s\n",
+ mcam_clks[i]);
+ }
+ }
+}
+
+static int mmpcam_probe(struct platform_device *pdev)
+{
+ struct mmp_camera *cam;
+ struct mcam_camera *mcam;
+ struct resource *res;
+ struct mmp_camera_platform_data *pdata;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -ENODEV;
+
+ cam = devm_kzalloc(&pdev->dev, sizeof(*cam), GFP_KERNEL);
+ if (cam == NULL)
+ return -ENOMEM;
+ cam->pdev = pdev;
+ INIT_LIST_HEAD(&cam->devlist);
+
+ mcam = &cam->mcam;
+ mcam->plat_power_up = mmpcam_power_up;
+ mcam->plat_power_down = mmpcam_power_down;
+ mcam->ctlr_reset = mcam_ctlr_reset;
+ mcam->calc_dphy = mmpcam_calc_dphy;
+ mcam->dev = &pdev->dev;
+ mcam->use_smbus = 0;
+ mcam->ccic_id = pdev->id;
+ mcam->mclk_min = pdata->mclk_min;
+ mcam->mclk_src = pdata->mclk_src;
+ mcam->mclk_div = pdata->mclk_div;
+ mcam->bus_type = pdata->bus_type;
+ mcam->dphy = pdata->dphy;
+ if (mcam->bus_type == V4L2_MBUS_CSI2) {
+ cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
+ if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
+ return PTR_ERR(cam->mipi_clk);
+ }
+ mcam->mipi_enabled = false;
+ mcam->lane = pdata->lane;
+ mcam->chip_id = MCAM_ARMADA610;
+ mcam->buffer_mode = B_DMA_sg;
+ spin_lock_init(&mcam->dev_lock);
+ /*
+ * Get our I/O memory.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mcam->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcam->regs))
+ return PTR_ERR(mcam->regs);
+ mcam->regs_size = resource_size(res);
+ /*
+ * Power/clock memory is elsewhere; get it too. Perhaps this
+ * should really be managed outside of this driver?
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cam->power_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(cam->power_regs))
+ return PTR_ERR(cam->power_regs);
+ /*
+ * Find the i2c adapter. This assumes, of course, that the
+ * i2c bus is already up and functioning.
+ */
+ mcam->i2c_adapter = platform_get_drvdata(pdata->i2c_device);
+ if (mcam->i2c_adapter == NULL) {
+ dev_err(&pdev->dev, "No i2c adapter\n");
+ return -ENODEV;
+ }
+ /*
+ * Sensor GPIO pins.
+ */
+ ret = devm_gpio_request(&pdev->dev, pdata->sensor_power_gpio,
+ "cam-power");
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get sensor power gpio %d",
+ pdata->sensor_power_gpio);
+ return ret;
+ }
+ gpio_direction_output(pdata->sensor_power_gpio, 0);
+ ret = devm_gpio_request(&pdev->dev, pdata->sensor_reset_gpio,
+ "cam-reset");
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get sensor reset gpio %d",
+ pdata->sensor_reset_gpio);
+ return ret;
+ }
+ gpio_direction_output(pdata->sensor_reset_gpio, 0);
+
+ mcam_init_clk(mcam);
+
+ /*
+ * Power the device up and hand it off to the core.
+ */
+ ret = mmpcam_power_up(mcam);
+ if (ret)
+ return ret;
+ ret = mccic_register(mcam);
+ if (ret)
+ goto out_power_down;
+ /*
+ * Finally, set up our IRQ now that the core is ready to
+ * deal with it.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+ cam->irq = res->start;
+ ret = devm_request_irq(&pdev->dev, cam->irq, mmpcam_irq, IRQF_SHARED,
+ "mmp-camera", mcam);
+ if (ret == 0) {
+ mmpcam_add_device(cam);
+ return 0;
+ }
+
+out_unregister:
+ mccic_shutdown(mcam);
+out_power_down:
+ mmpcam_power_down(mcam);
+ return ret;
+}
+
+
+static int mmpcam_remove(struct mmp_camera *cam)
+{
+ struct mcam_camera *mcam = &cam->mcam;
+
+ mmpcam_remove_device(cam);
+ mccic_shutdown(mcam);
+ mmpcam_power_down(mcam);
+ return 0;
+}
+
+static int mmpcam_platform_remove(struct platform_device *pdev)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ if (cam == NULL)
+ return -ENODEV;
+ return mmpcam_remove(cam);
+}
+
+/*
+ * Suspend/resume support.
+ */
+#ifdef CONFIG_PM
+
+static int mmpcam_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ if (state.event != PM_EVENT_SUSPEND)
+ return 0;
+ mccic_suspend(&cam->mcam);
+ return 0;
+}
+
+static int mmpcam_resume(struct platform_device *pdev)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ /*
+ * Power up unconditionally just in case the core tries to
+ * touch a register even if nothing was active before; trust
+ * me, it's better this way.
+ */
+ mmpcam_power_up_ctlr(cam);
+ return mccic_resume(&cam->mcam);
+}
+
+#endif
+
+
+static struct platform_driver mmpcam_driver = {
+ .probe = mmpcam_probe,
+ .remove = mmpcam_platform_remove,
+#ifdef CONFIG_PM
+ .suspend = mmpcam_suspend,
+ .resume = mmpcam_resume,
+#endif
+ .driver = {
+ .name = "mmp-camera",
+ .owner = THIS_MODULE
+ }
+};
+
+
+static int __init mmpcam_init_module(void)
+{
+ mutex_init(&mmpcam_devices_lock);
+ return platform_driver_register(&mmpcam_driver);
+}
+
+static void __exit mmpcam_exit_module(void)
+{
+ platform_driver_unregister(&mmpcam_driver);
+ /*
+ * platform_driver_unregister() should have emptied the list
+ */
+ if (!list_empty(&mmpcam_devices))
+ printk(KERN_ERR "mmp_camera leaving devices behind\n");
+}
+
+module_init(mmpcam_init_module);
+module_exit(mmpcam_exit_module);
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
new file mode 100644
index 00000000000..0714070ed7f
--- /dev/null
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -0,0 +1,1090 @@
+/*
+ * A virtual v4l2-mem2mem example device.
+ *
+ * This is a virtual device driver for testing mem-to-mem videobuf framework.
+ * It simulates a device that uses memory buffers for both source and
+ * destination, processes the data and issues an "irq" (simulated by a timer).
+ * The device is capable of multi-instance, multi-buffer-per-transaction
+ * operation (via the mem2mem framework).
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+
+#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
+
+MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
+MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.1");
+
+static unsigned debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activates debug info");
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 640
+#define MAX_H 480
+#define DIM_ALIGN_MASK 7 /* 8-byte alignment for line length */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+#define MEM2MEM_NAME "m2m-testdev"
+
+/* Per queue */
+#define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024)
+
+/* Default transaction time in msec */
+#define MEM2MEM_DEF_TRANSTIME 40
+#define MEM2MEM_COLOR_STEP (0xff >> 4)
+#define MEM2MEM_NUM_TILES 8
+
+/* Flags that indicate processing mode */
+#define MEM2MEM_HFLIP (1 << 0)
+#define MEM2MEM_VFLIP (1 << 1)
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+
+static void m2mtest_dev_release(struct device *dev)
+{}
+
+static struct platform_device m2mtest_pdev = {
+ .name = MEM2MEM_NAME,
+ .dev.release = m2mtest_dev_release,
+};
+
+struct m2mtest_fmt {
+ char *name;
+ u32 fourcc;
+ int depth;
+ /* Types the format can be used for */
+ u32 types;
+};
+
+static struct m2mtest_fmt formats[] = {
+ {
+ .name = "RGB565 (BE)",
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .depth = 16,
+ /* Both capture and output format */
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+ {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ /* Output-only format */
+ .types = MEM2MEM_OUTPUT,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Per-queue, driver-specific private data */
+struct m2mtest_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ unsigned int sequence;
+ struct m2mtest_fmt *fmt;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+#define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000)
+#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001)
+
+static struct m2mtest_fmt *find_format(struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &formats[k];
+}
+
+struct m2mtest_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd;
+
+ atomic_t num_inst;
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+
+ struct timer_list timer;
+
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct m2mtest_ctx {
+ struct v4l2_fh fh;
+ struct m2mtest_dev *dev;
+
+ struct v4l2_ctrl_handler hdl;
+
+ /* Processed buffers in this transaction */
+ u8 num_processed;
+
+ /* Transaction length (i.e. how many buffers per transaction) */
+ u32 translen;
+ /* Transaction time (i.e. simulated processing time) in milliseconds */
+ u32 transtime;
+
+ /* Abort requested by m2m */
+ int aborting;
+
+ /* Processing mode */
+ int mode;
+
+ enum v4l2_colorspace colorspace;
+
+ /* Source and destination queue data */
+ struct m2mtest_q_data q_data[2];
+};
+
+static inline struct m2mtest_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct m2mtest_ctx, fh);
+}
+
+static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+
+static int device_process(struct m2mtest_ctx *ctx,
+ struct vb2_buffer *in_vb,
+ struct vb2_buffer *out_vb)
+{
+ struct m2mtest_dev *dev = ctx->dev;
+ struct m2mtest_q_data *q_data;
+ u8 *p_in, *p_out;
+ int x, y, t, w;
+ int tile_w, bytes_left;
+ int width, height, bytesperline;
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ width = q_data->width;
+ height = q_data->height;
+ bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+
+ p_in = vb2_plane_vaddr(in_vb, 0);
+ p_out = vb2_plane_vaddr(out_vb, 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&dev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return -EFAULT;
+ }
+
+ if (vb2_plane_size(in_vb, 0) > vb2_plane_size(out_vb, 0)) {
+ v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
+ return -EINVAL;
+ }
+
+ tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
+ / MEM2MEM_NUM_TILES;
+ bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
+ w = 0;
+
+ out_vb->v4l2_buf.sequence = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
+ in_vb->v4l2_buf.sequence = q_data->sequence++;
+ memcpy(&out_vb->v4l2_buf.timestamp,
+ &in_vb->v4l2_buf.timestamp,
+ sizeof(struct timeval));
+ if (in_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE)
+ memcpy(&out_vb->v4l2_buf.timecode, &in_vb->v4l2_buf.timecode,
+ sizeof(struct v4l2_timecode));
+ out_vb->v4l2_buf.field = in_vb->v4l2_buf.field;
+ out_vb->v4l2_buf.flags = in_vb->v4l2_buf.flags &
+ (V4L2_BUF_FLAG_TIMECODE |
+ V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+
+ switch (ctx->mode) {
+ case MEM2MEM_HFLIP | MEM2MEM_VFLIP:
+ p_out += bytesperline * height - bytes_left;
+ for (y = 0; y < height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out -= bytes_left;
+ }
+ break;
+
+ case MEM2MEM_HFLIP:
+ for (y = 0; y < height; ++y) {
+ p_out += MEM2MEM_NUM_TILES * tile_w;
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x01) {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytesperline;
+ }
+ break;
+
+ case MEM2MEM_VFLIP:
+ p_out += bytesperline * (height - 1);
+ for (y = 0; y < height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytes_left - 2 * bytesperline;
+ }
+ break;
+
+ default:
+ for (y = 0; y < height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytes_left;
+ }
+ }
+
+ return 0;
+}
+
+static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout)
+{
+ dprintk(dev, "Scheduling a simulated irq\n");
+ mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen
+ || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) {
+ dprintk(ctx->dev, "Not enough buffers available\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/* device_run() - prepares and starts the device
+ *
+ * This simulates all the immediate preparations required before starting
+ * a device. This will be called by the framework when it decides to schedule
+ * a particular instance.
+ */
+static void device_run(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_dev *dev = ctx->dev;
+ struct vb2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ device_process(ctx, src_buf, dst_buf);
+
+ /* Run a timer, which simulates a hardware irq */
+ schedule_irq(dev, ctx->transtime);
+}
+
+static void device_isr(unsigned long priv)
+{
+ struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv;
+ struct m2mtest_ctx *curr_ctx;
+ struct vb2_buffer *src_vb, *dst_vb;
+ unsigned long flags;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev);
+
+ if (NULL == curr_ctx) {
+ pr_err("Instance released before the end of transaction\n");
+ return;
+ }
+
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ curr_ctx->num_processed++;
+
+ spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+
+ if (curr_ctx->num_processed == curr_ctx->translen
+ || curr_ctx->aborting) {
+ dprintk(curr_ctx->dev, "Finishing transaction\n");
+ curr_ctx->num_processed = 0;
+ v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
+ } else {
+ device_run(curr_ctx);
+ }
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", MEM2MEM_NAME);
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num;
+ struct m2mtest_fmt *fmt;
+
+ num = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ if (i < NUM_FORMATS) {
+ /* Format found */
+ fmt = &formats[i];
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct m2mtest_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = q_data->fmt->fourcc;
+ f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
+{
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ if (f->fmt.pix.height < MIN_H)
+ f->fmt.pix.height = MIN_H;
+ else if (f->fmt.pix.height > MAX_H)
+ f->fmt.pix.height = MAX_H;
+
+ if (f->fmt.pix.width < MIN_W)
+ f->fmt.pix.width = MIN_W;
+ else if (f->fmt.pix.width > MAX_W)
+ f->fmt.pix.width = MAX_W;
+
+ f->fmt.pix.width &= ~DIM_ALIGN_MASK;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.priv = 0;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ struct m2mtest_ctx *ctx = file2ctx(file);
+
+ fmt = find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ fmt = find_format(f);
+ }
+ if (!(fmt->types & MEM2MEM_CAPTURE)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ struct m2mtest_ctx *ctx = file2ctx(file);
+
+ fmt = find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ fmt = find_format(f);
+ }
+ if (!(fmt->types & MEM2MEM_OUTPUT)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+ if (!f->fmt.pix.colorspace)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+ struct m2mtest_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = find_format(f);
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->sizeimage = q_data->width * q_data->height
+ * q_data->fmt->depth >> 3;
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(file2ctx(file), f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct m2mtest_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = vidioc_s_fmt(file2ctx(file), f);
+ if (!ret)
+ ctx->colorspace = f->fmt.pix.colorspace;
+ return ret;
+}
+
+static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct m2mtest_ctx *ctx =
+ container_of(ctrl->handler, struct m2mtest_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ ctx->mode |= MEM2MEM_HFLIP;
+ else
+ ctx->mode &= ~MEM2MEM_HFLIP;
+ break;
+
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ ctx->mode |= MEM2MEM_VFLIP;
+ else
+ ctx->mode &= ~MEM2MEM_VFLIP;
+ break;
+
+ case V4L2_CID_TRANS_TIME_MSEC:
+ ctx->transtime = ctrl->val;
+ break;
+
+ case V4L2_CID_TRANS_NUM_BUFS:
+ ctx->translen = ctrl->val;
+ break;
+
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops m2mtest_ctrl_ops = {
+ .s_ctrl = m2mtest_s_ctrl,
+};
+
+
+static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+
+/*
+ * Queue operations
+ */
+
+static int m2mtest_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(vq);
+ struct m2mtest_q_data *q_data;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
+
+ while (size * count > MEM2MEM_VID_MEM_LIMIT)
+ (count)--;
+
+ *nplanes = 1;
+ *nbuffers = count;
+ sizes[0] = size;
+
+ /*
+ * videobuf2-vmalloc allocator is context-less so no need to set
+ * alloc_ctxs array.
+ */
+
+ dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+ return 0;
+}
+
+static int m2mtest_buf_prepare(struct vb2_buffer *vb)
+{
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct m2mtest_q_data *q_data;
+
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vb->v4l2_buf.field == V4L2_FIELD_ANY)
+ vb->v4l2_buf.field = V4L2_FIELD_NONE;
+ if (vb->v4l2_buf.field != V4L2_FIELD_NONE) {
+ dprintk(ctx->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+ return 0;
+}
+
+static void m2mtest_buf_queue(struct vb2_buffer *vb)
+{
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+}
+
+static int m2mtest_start_streaming(struct vb2_queue *q, unsigned count)
+{
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
+ struct m2mtest_q_data *q_data = get_q_data(ctx, q->type);
+
+ q_data->sequence = 0;
+ return 0;
+}
+
+static void m2mtest_stop_streaming(struct vb2_queue *q)
+{
+ struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vb == NULL)
+ return;
+ spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+ }
+}
+
+static struct vb2_ops m2mtest_qops = {
+ .queue_setup = m2mtest_queue_setup,
+ .buf_prepare = m2mtest_buf_prepare,
+ .buf_queue = m2mtest_buf_queue,
+ .start_streaming = m2mtest_start_streaming,
+ .stop_streaming = m2mtest_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct m2mtest_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &m2mtest_qops;
+ src_vq->mem_ops = &vb2_vmalloc_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &m2mtest_qops;
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static const struct v4l2_ctrl_config m2mtest_ctrl_trans_time_msec = {
+ .ops = &m2mtest_ctrl_ops,
+ .id = V4L2_CID_TRANS_TIME_MSEC,
+ .name = "Transaction Time (msec)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = MEM2MEM_DEF_TRANSTIME,
+ .min = 1,
+ .max = 10001,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config m2mtest_ctrl_trans_num_bufs = {
+ .ops = &m2mtest_ctrl_ops,
+ .id = V4L2_CID_TRANS_NUM_BUFS,
+ .name = "Buffers Per Transaction",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = 1,
+ .min = 1,
+ .max = MEM2MEM_DEF_NUM_BUFS,
+ .step = 1,
+};
+
+/*
+ * File operations
+ */
+static int m2mtest_open(struct file *file)
+{
+ struct m2mtest_dev *dev = video_drvdata(file);
+ struct m2mtest_ctx *ctx = NULL;
+ struct v4l2_ctrl_handler *hdl;
+ int rc = 0;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto open_unlock;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_time_msec, NULL);
+ v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_num_bufs, NULL);
+ if (hdl->error) {
+ rc = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ goto open_unlock;
+ }
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+ ctx->q_data[V4L2_M2M_SRC].width = 640;
+ ctx->q_data[V4L2_M2M_SRC].height = 480;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage =
+ ctx->q_data[V4L2_M2M_SRC].width *
+ ctx->q_data[V4L2_M2M_SRC].height *
+ (ctx->q_data[V4L2_M2M_SRC].fmt->depth >> 3);
+ ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ rc = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(ctx);
+ goto open_unlock;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+ atomic_inc(&dev->num_inst);
+
+ dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
+
+open_unlock:
+ mutex_unlock(&dev->dev_mutex);
+ return rc;
+}
+
+static int m2mtest_release(struct file *file)
+{
+ struct m2mtest_dev *dev = video_drvdata(file);
+ struct m2mtest_ctx *ctx = file2ctx(file);
+
+ dprintk(dev, "Releasing instance %p\n", ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ mutex_lock(&dev->dev_mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&dev->dev_mutex);
+ kfree(ctx);
+
+ atomic_dec(&dev->num_inst);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations m2mtest_fops = {
+ .owner = THIS_MODULE,
+ .open = m2mtest_open,
+ .release = m2mtest_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static struct video_device m2mtest_videodev = {
+ .name = MEM2MEM_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &m2mtest_fops,
+ .ioctl_ops = &m2mtest_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+};
+
+static int m2mtest_probe(struct platform_device *pdev)
+{
+ struct m2mtest_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->irqlock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ atomic_set(&dev->num_inst, 0);
+ mutex_init(&dev->dev_mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_dev;
+ }
+
+ *vfd = m2mtest_videodev;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+
+ video_set_drvdata(vfd, dev);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name);
+ dev->vfd = vfd;
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ setup_timer(&dev->timer, device_isr, (long)dev);
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto err_m2m;
+ }
+
+ return 0;
+
+err_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(dev->vfd);
+rel_vdev:
+ video_device_release(vfd);
+unreg_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int m2mtest_remove(struct platform_device *pdev)
+{
+ struct m2mtest_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
+ v4l2_m2m_release(dev->m2m_dev);
+ del_timer_sync(&dev->timer);
+ video_unregister_device(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return 0;
+}
+
+static struct platform_driver m2mtest_pdrv = {
+ .probe = m2mtest_probe,
+ .remove = m2mtest_remove,
+ .driver = {
+ .name = MEM2MEM_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static void __exit m2mtest_exit(void)
+{
+ platform_driver_unregister(&m2mtest_pdrv);
+ platform_device_unregister(&m2mtest_pdev);
+}
+
+static int __init m2mtest_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&m2mtest_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&m2mtest_pdrv);
+ if (ret)
+ platform_device_unregister(&m2mtest_pdev);
+
+ return 0;
+}
+
+module_init(m2mtest_init);
+module_exit(m2mtest_exit);
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
new file mode 100644
index 00000000000..fa8f7cabe36
--- /dev/null
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -0,0 +1,1016 @@
+/*
+ * Support eMMa-PrP through mem2mem framework.
+ *
+ * eMMa-PrP is a piece of HW that allows fetching buffers
+ * from one memory location and do several operations on
+ * them such as scaling or format conversion giving, as a result
+ * a new processed buffer in another memory location.
+ *
+ * Based on mem2mem_testdev.c by Pawel Osciak.
+ *
+ * Copyright (c) 2011 Vista Silicon S.L.
+ * Javier Martin <javier.martin@vista-silicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+#include <asm/sizes.h>
+
+#define EMMAPRP_MODULE_NAME "mem2mem-emmaprp"
+
+MODULE_DESCRIPTION("Mem-to-mem device which supports eMMa-PrP present in mx2 SoCs");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.1");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 2040
+#define MAX_H 2046
+
+#define S_ALIGN 1 /* multiple of 2 */
+#define W_ALIGN_YUV420 3 /* multiple of 8 */
+#define W_ALIGN_OTHERS 2 /* multiple of 4 */
+#define H_ALIGN 1 /* multiple of 2 */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+#define MEM2MEM_NAME "m2m-emmaprp"
+
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT SZ_16M
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+/* EMMA PrP */
+#define PRP_CNTL 0x00
+#define PRP_INTR_CNTL 0x04
+#define PRP_INTRSTATUS 0x08
+#define PRP_SOURCE_Y_PTR 0x0c
+#define PRP_SOURCE_CB_PTR 0x10
+#define PRP_SOURCE_CR_PTR 0x14
+#define PRP_DEST_RGB1_PTR 0x18
+#define PRP_DEST_RGB2_PTR 0x1c
+#define PRP_DEST_Y_PTR 0x20
+#define PRP_DEST_CB_PTR 0x24
+#define PRP_DEST_CR_PTR 0x28
+#define PRP_SRC_FRAME_SIZE 0x2c
+#define PRP_DEST_CH1_LINE_STRIDE 0x30
+#define PRP_SRC_PIXEL_FORMAT_CNTL 0x34
+#define PRP_CH1_PIXEL_FORMAT_CNTL 0x38
+#define PRP_CH1_OUT_IMAGE_SIZE 0x3c
+#define PRP_CH2_OUT_IMAGE_SIZE 0x40
+#define PRP_SRC_LINE_STRIDE 0x44
+#define PRP_CSC_COEF_012 0x48
+#define PRP_CSC_COEF_345 0x4c
+#define PRP_CSC_COEF_678 0x50
+#define PRP_CH1_RZ_HORI_COEF1 0x54
+#define PRP_CH1_RZ_HORI_COEF2 0x58
+#define PRP_CH1_RZ_HORI_VALID 0x5c
+#define PRP_CH1_RZ_VERT_COEF1 0x60
+#define PRP_CH1_RZ_VERT_COEF2 0x64
+#define PRP_CH1_RZ_VERT_VALID 0x68
+#define PRP_CH2_RZ_HORI_COEF1 0x6c
+#define PRP_CH2_RZ_HORI_COEF2 0x70
+#define PRP_CH2_RZ_HORI_VALID 0x74
+#define PRP_CH2_RZ_VERT_COEF1 0x78
+#define PRP_CH2_RZ_VERT_COEF2 0x7c
+#define PRP_CH2_RZ_VERT_VALID 0x80
+
+#define PRP_CNTL_CH1EN (1 << 0)
+#define PRP_CNTL_CH2EN (1 << 1)
+#define PRP_CNTL_CSIEN (1 << 2)
+#define PRP_CNTL_DATA_IN_YUV420 (0 << 3)
+#define PRP_CNTL_DATA_IN_YUV422 (1 << 3)
+#define PRP_CNTL_DATA_IN_RGB16 (2 << 3)
+#define PRP_CNTL_DATA_IN_RGB32 (3 << 3)
+#define PRP_CNTL_CH1_OUT_RGB8 (0 << 5)
+#define PRP_CNTL_CH1_OUT_RGB16 (1 << 5)
+#define PRP_CNTL_CH1_OUT_RGB32 (2 << 5)
+#define PRP_CNTL_CH1_OUT_YUV422 (3 << 5)
+#define PRP_CNTL_CH2_OUT_YUV420 (0 << 7)
+#define PRP_CNTL_CH2_OUT_YUV422 (1 << 7)
+#define PRP_CNTL_CH2_OUT_YUV444 (2 << 7)
+#define PRP_CNTL_CH1_LEN (1 << 9)
+#define PRP_CNTL_CH2_LEN (1 << 10)
+#define PRP_CNTL_SKIP_FRAME (1 << 11)
+#define PRP_CNTL_SWRST (1 << 12)
+#define PRP_CNTL_CLKEN (1 << 13)
+#define PRP_CNTL_WEN (1 << 14)
+#define PRP_CNTL_CH1BYP (1 << 15)
+#define PRP_CNTL_IN_TSKIP(x) ((x) << 16)
+#define PRP_CNTL_CH1_TSKIP(x) ((x) << 19)
+#define PRP_CNTL_CH2_TSKIP(x) ((x) << 22)
+#define PRP_CNTL_INPUT_FIFO_LEVEL(x) ((x) << 25)
+#define PRP_CNTL_RZ_FIFO_LEVEL(x) ((x) << 27)
+#define PRP_CNTL_CH2B1EN (1 << 29)
+#define PRP_CNTL_CH2B2EN (1 << 30)
+#define PRP_CNTL_CH2FEN (1 << 31)
+
+#define PRP_SIZE_HEIGHT(x) (x)
+#define PRP_SIZE_WIDTH(x) ((x) << 16)
+
+/* IRQ Enable and status register */
+#define PRP_INTR_RDERR (1 << 0)
+#define PRP_INTR_CH1WERR (1 << 1)
+#define PRP_INTR_CH2WERR (1 << 2)
+#define PRP_INTR_CH1FC (1 << 3)
+#define PRP_INTR_CH2FC (1 << 5)
+#define PRP_INTR_LBOVF (1 << 7)
+#define PRP_INTR_CH2OVF (1 << 8)
+
+#define PRP_INTR_ST_RDERR (1 << 0)
+#define PRP_INTR_ST_CH1WERR (1 << 1)
+#define PRP_INTR_ST_CH2WERR (1 << 2)
+#define PRP_INTR_ST_CH2B2CI (1 << 3)
+#define PRP_INTR_ST_CH2B1CI (1 << 4)
+#define PRP_INTR_ST_CH1B2CI (1 << 5)
+#define PRP_INTR_ST_CH1B1CI (1 << 6)
+#define PRP_INTR_ST_LBOVF (1 << 7)
+#define PRP_INTR_ST_CH2OVF (1 << 8)
+
+struct emmaprp_fmt {
+ char *name;
+ u32 fourcc;
+ /* Types the format can be used for */
+ u32 types;
+};
+
+static struct emmaprp_fmt formats[] = {
+ {
+ .name = "YUV 4:2:0 Planar",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .types = MEM2MEM_CAPTURE,
+ },
+ {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .types = MEM2MEM_OUTPUT,
+ },
+};
+
+/* Per-queue, driver-specific private data */
+struct emmaprp_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ struct emmaprp_fmt *fmt;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct emmaprp_fmt *find_format(struct v4l2_format *f)
+{
+ struct emmaprp_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &formats[k];
+}
+
+struct emmaprp_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd;
+
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+
+ void __iomem *base_emma;
+ struct clk *clk_emma_ahb, *clk_emma_ipg;
+
+ struct v4l2_m2m_dev *m2m_dev;
+ struct vb2_alloc_ctx *alloc_ctx;
+};
+
+struct emmaprp_ctx {
+ struct emmaprp_dev *dev;
+ /* Abort requested by m2m */
+ int aborting;
+ struct emmaprp_q_data q_data[2];
+ struct v4l2_m2m_ctx *m2m_ctx;
+};
+
+static struct emmaprp_q_data *get_q_data(struct emmaprp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &(ctx->q_data[V4L2_M2M_SRC]);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &(ctx->q_data[V4L2_M2M_DST]);
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/*
+ * mem2mem callbacks
+ */
+static void emmaprp_job_abort(void *priv)
+{
+ struct emmaprp_ctx *ctx = priv;
+ struct emmaprp_dev *pcdev = ctx->dev;
+
+ ctx->aborting = 1;
+
+ dprintk(pcdev, "Aborting task\n");
+
+ v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void emmaprp_lock(void *priv)
+{
+ struct emmaprp_ctx *ctx = priv;
+ struct emmaprp_dev *pcdev = ctx->dev;
+ mutex_lock(&pcdev->dev_mutex);
+}
+
+static void emmaprp_unlock(void *priv)
+{
+ struct emmaprp_ctx *ctx = priv;
+ struct emmaprp_dev *pcdev = ctx->dev;
+ mutex_unlock(&pcdev->dev_mutex);
+}
+
+static inline void emmaprp_dump_regs(struct emmaprp_dev *pcdev)
+{
+ dprintk(pcdev,
+ "eMMa-PrP Registers:\n"
+ " SOURCE_Y_PTR = 0x%08X\n"
+ " SRC_FRAME_SIZE = 0x%08X\n"
+ " DEST_Y_PTR = 0x%08X\n"
+ " DEST_CR_PTR = 0x%08X\n"
+ " DEST_CB_PTR = 0x%08X\n"
+ " CH2_OUT_IMAGE_SIZE = 0x%08X\n"
+ " CNTL = 0x%08X\n",
+ readl(pcdev->base_emma + PRP_SOURCE_Y_PTR),
+ readl(pcdev->base_emma + PRP_SRC_FRAME_SIZE),
+ readl(pcdev->base_emma + PRP_DEST_Y_PTR),
+ readl(pcdev->base_emma + PRP_DEST_CR_PTR),
+ readl(pcdev->base_emma + PRP_DEST_CB_PTR),
+ readl(pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE),
+ readl(pcdev->base_emma + PRP_CNTL));
+}
+
+static void emmaprp_device_run(void *priv)
+{
+ struct emmaprp_ctx *ctx = priv;
+ struct emmaprp_q_data *s_q_data, *d_q_data;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct emmaprp_dev *pcdev = ctx->dev;
+ unsigned int s_width, s_height;
+ unsigned int d_width, d_height;
+ unsigned int d_size;
+ dma_addr_t p_in, p_out;
+ u32 tmp;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ s_width = s_q_data->width;
+ s_height = s_q_data->height;
+
+ d_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ d_width = d_q_data->width;
+ d_height = d_q_data->height;
+ d_size = d_width * d_height;
+
+ p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&pcdev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return;
+ }
+
+ /* Input frame parameters */
+ writel(p_in, pcdev->base_emma + PRP_SOURCE_Y_PTR);
+ writel(PRP_SIZE_WIDTH(s_width) | PRP_SIZE_HEIGHT(s_height),
+ pcdev->base_emma + PRP_SRC_FRAME_SIZE);
+
+ /* Output frame parameters */
+ writel(p_out, pcdev->base_emma + PRP_DEST_Y_PTR);
+ writel(p_out + d_size, pcdev->base_emma + PRP_DEST_CB_PTR);
+ writel(p_out + d_size + (d_size >> 2),
+ pcdev->base_emma + PRP_DEST_CR_PTR);
+ writel(PRP_SIZE_WIDTH(d_width) | PRP_SIZE_HEIGHT(d_height),
+ pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE);
+
+ /* IRQ configuration */
+ tmp = readl(pcdev->base_emma + PRP_INTR_CNTL);
+ writel(tmp | PRP_INTR_RDERR |
+ PRP_INTR_CH2WERR |
+ PRP_INTR_CH2FC,
+ pcdev->base_emma + PRP_INTR_CNTL);
+
+ emmaprp_dump_regs(pcdev);
+
+ /* Enable transfer */
+ tmp = readl(pcdev->base_emma + PRP_CNTL);
+ writel(tmp | PRP_CNTL_CH2_OUT_YUV420 |
+ PRP_CNTL_DATA_IN_YUV422 |
+ PRP_CNTL_CH2EN,
+ pcdev->base_emma + PRP_CNTL);
+}
+
+static irqreturn_t emmaprp_irq(int irq_emma, void *data)
+{
+ struct emmaprp_dev *pcdev = data;
+ struct emmaprp_ctx *curr_ctx;
+ struct vb2_buffer *src_vb, *dst_vb;
+ unsigned long flags;
+ u32 irqst;
+
+ /* Check irq flags and clear irq */
+ irqst = readl(pcdev->base_emma + PRP_INTRSTATUS);
+ writel(irqst, pcdev->base_emma + PRP_INTRSTATUS);
+ dprintk(pcdev, "irqst = 0x%08x\n", irqst);
+
+ curr_ctx = v4l2_m2m_get_curr_priv(pcdev->m2m_dev);
+ if (curr_ctx == NULL) {
+ pr_err("Instance released before the end of transaction\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!curr_ctx->aborting) {
+ if ((irqst & PRP_INTR_ST_RDERR) ||
+ (irqst & PRP_INTR_ST_CH2WERR)) {
+ pr_err("PrP bus error occurred, this transfer is probably corrupted\n");
+ writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL);
+ } else if (irqst & PRP_INTR_ST_CH2B1CI) { /* buffer ready */
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->v4l2_buf.flags &=
+ ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.flags |=
+ src_vb->v4l2_buf.flags
+ & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
+
+ spin_lock_irqsave(&pcdev->irqlock, flags);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&pcdev->irqlock, flags);
+ }
+ }
+
+ v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->m2m_ctx);
+ return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num;
+ struct emmaprp_fmt *fmt;
+
+ num = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ if (i < NUM_FORMATS) {
+ /* Format found */
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct emmaprp_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = q_data->fmt->fourcc;
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
+ f->fmt.pix.bytesperline = q_data->width * 3 / 2;
+ else /* YUYV */
+ f->fmt.pix.bytesperline = q_data->width * 2;
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f)
+{
+ enum v4l2_field field;
+
+
+ if (!find_format(f))
+ return -EINVAL;
+
+ field = f->fmt.pix.field;
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != field)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ f->fmt.pix.field = field;
+
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) {
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W,
+ W_ALIGN_YUV420, &f->fmt.pix.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2;
+ } else {
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W,
+ W_ALIGN_OTHERS, &f->fmt.pix.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ }
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct emmaprp_fmt *fmt;
+ struct emmaprp_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct emmaprp_fmt *fmt;
+ struct emmaprp_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f);
+}
+
+static int vidioc_s_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
+{
+ struct emmaprp_q_data *q_data;
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = vidioc_try_fmt(f);
+ if (ret)
+ return ret;
+
+ q_data->fmt = find_format(f);
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ if (q_data->fmt->fourcc == V4L2_PIX_FMT_YUV420)
+ q_data->sizeimage = q_data->width * q_data->height * 3 / 2;
+ else /* YUYV */
+ q_data->sizeimage = q_data->width * q_data->height * 2;
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+
+/*
+ * Queue operations
+ */
+static int emmaprp_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct emmaprp_ctx *ctx = vb2_get_drv_priv(vq);
+ struct emmaprp_q_data *q_data;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ if (q_data->fmt->fourcc == V4L2_PIX_FMT_YUV420)
+ size = q_data->width * q_data->height * 3 / 2;
+ else
+ size = q_data->width * q_data->height * 2;
+
+ while (size * count > MEM2MEM_VID_MEM_LIMIT)
+ (count)--;
+
+ *nplanes = 1;
+ *nbuffers = count;
+ sizes[0] = size;
+
+ alloc_ctxs[0] = ctx->dev->alloc_ctx;
+
+ dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+ return 0;
+}
+
+static int emmaprp_buf_prepare(struct vb2_buffer *vb)
+{
+ struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct emmaprp_q_data *q_data;
+
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev, "%s data will not fit into plane"
+ "(%lu < %lu)\n", __func__,
+ vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+ return 0;
+}
+
+static void emmaprp_buf_queue(struct vb2_buffer *vb)
+{
+ struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static struct vb2_ops emmaprp_qops = {
+ .queue_setup = emmaprp_queue_setup,
+ .buf_prepare = emmaprp_buf_prepare,
+ .buf_queue = emmaprp_buf_queue,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct emmaprp_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &emmaprp_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &emmaprp_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int emmaprp_open(struct file *file)
+{
+ struct emmaprp_dev *pcdev = video_drvdata(file);
+ struct emmaprp_ctx *ctx;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->dev = pcdev;
+
+ if (mutex_lock_interruptible(&pcdev->dev_mutex)) {
+ kfree(ctx);
+ return -ERESTARTSYS;
+ }
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->m2m_ctx)) {
+ int ret = PTR_ERR(ctx->m2m_ctx);
+
+ mutex_unlock(&pcdev->dev_mutex);
+ kfree(ctx);
+ return ret;
+ }
+
+ clk_prepare_enable(pcdev->clk_emma_ipg);
+ clk_prepare_enable(pcdev->clk_emma_ahb);
+ ctx->q_data[V4L2_M2M_SRC].fmt = &formats[1];
+ ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+ mutex_unlock(&pcdev->dev_mutex);
+
+ dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+ return 0;
+}
+
+static int emmaprp_release(struct file *file)
+{
+ struct emmaprp_dev *pcdev = video_drvdata(file);
+ struct emmaprp_ctx *ctx = file->private_data;
+
+ dprintk(pcdev, "Releasing instance %p\n", ctx);
+
+ mutex_lock(&pcdev->dev_mutex);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ mutex_unlock(&pcdev->dev_mutex);
+ kfree(ctx);
+
+ return 0;
+}
+
+static unsigned int emmaprp_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct emmaprp_dev *pcdev = video_drvdata(file);
+ struct emmaprp_ctx *ctx = file->private_data;
+ unsigned int res;
+
+ mutex_lock(&pcdev->dev_mutex);
+ res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ mutex_unlock(&pcdev->dev_mutex);
+ return res;
+}
+
+static int emmaprp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct emmaprp_dev *pcdev = video_drvdata(file);
+ struct emmaprp_ctx *ctx = file->private_data;
+ int ret;
+
+ if (mutex_lock_interruptible(&pcdev->dev_mutex))
+ return -ERESTARTSYS;
+ ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+ mutex_unlock(&pcdev->dev_mutex);
+ return ret;
+}
+
+static const struct v4l2_file_operations emmaprp_fops = {
+ .owner = THIS_MODULE,
+ .open = emmaprp_open,
+ .release = emmaprp_release,
+ .poll = emmaprp_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = emmaprp_mmap,
+};
+
+static struct video_device emmaprp_videodev = {
+ .name = MEM2MEM_NAME,
+ .fops = &emmaprp_fops,
+ .ioctl_ops = &emmaprp_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = emmaprp_device_run,
+ .job_abort = emmaprp_job_abort,
+ .lock = emmaprp_lock,
+ .unlock = emmaprp_unlock,
+};
+
+static int emmaprp_probe(struct platform_device *pdev)
+{
+ struct emmaprp_dev *pcdev;
+ struct video_device *vfd;
+ struct resource *res;
+ int irq, ret;
+
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
+ if (!pcdev)
+ return -ENOMEM;
+
+ spin_lock_init(&pcdev->irqlock);
+
+ pcdev->clk_emma_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(pcdev->clk_emma_ipg)) {
+ return PTR_ERR(pcdev->clk_emma_ipg);
+ }
+
+ pcdev->clk_emma_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(pcdev->clk_emma_ahb))
+ return PTR_ERR(pcdev->clk_emma_ahb);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pcdev->base_emma = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pcdev->base_emma))
+ return PTR_ERR(pcdev->base_emma);
+
+ ret = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&pcdev->dev_mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_dev;
+ }
+
+ *vfd = emmaprp_videodev;
+ vfd->lock = &pcdev->dev_mutex;
+ vfd->v4l2_dev = &pcdev->v4l2_dev;
+
+ video_set_drvdata(vfd, pcdev);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", emmaprp_videodev.name);
+ pcdev->vfd = vfd;
+ v4l2_info(&pcdev->v4l2_dev, EMMAPRP_MODULE_NAME
+ " Device registered as /dev/video%d\n", vfd->num);
+
+ platform_set_drvdata(pdev, pcdev);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
+ dev_name(&pdev->dev), pcdev);
+ if (ret)
+ goto rel_vdev;
+
+ pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(pcdev->alloc_ctx)) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to alloc vb2 context\n");
+ ret = PTR_ERR(pcdev->alloc_ctx);
+ goto rel_vdev;
+ }
+
+ pcdev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(pcdev->m2m_dev)) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(pcdev->m2m_dev);
+ goto rel_ctx;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n");
+ goto rel_m2m;
+ }
+
+ return 0;
+
+
+rel_m2m:
+ v4l2_m2m_release(pcdev->m2m_dev);
+rel_ctx:
+ vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+rel_vdev:
+ video_device_release(vfd);
+unreg_dev:
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+
+ mutex_destroy(&pcdev->dev_mutex);
+
+ return ret;
+}
+
+static int emmaprp_remove(struct platform_device *pdev)
+{
+ struct emmaprp_dev *pcdev = platform_get_drvdata(pdev);
+
+ v4l2_info(&pcdev->v4l2_dev, "Removing " EMMAPRP_MODULE_NAME);
+
+ video_unregister_device(pcdev->vfd);
+ v4l2_m2m_release(pcdev->m2m_dev);
+ vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+ mutex_destroy(&pcdev->dev_mutex);
+
+ return 0;
+}
+
+static struct platform_driver emmaprp_pdrv = {
+ .probe = emmaprp_probe,
+ .remove = emmaprp_remove,
+ .driver = {
+ .name = MEM2MEM_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(emmaprp_pdrv);
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
new file mode 100644
index 00000000000..37ad446b35b
--- /dev/null
+++ b/drivers/media/platform/omap/Kconfig
@@ -0,0 +1,14 @@
+config VIDEO_OMAP2_VOUT_VRFB
+ bool
+
+config VIDEO_OMAP2_VOUT
+ tristate "OMAP2/OMAP3 V4L2-Display driver"
+ depends on ARCH_OMAP2 || ARCH_OMAP3
+ select VIDEOBUF_GEN
+ select VIDEOBUF_DMA_CONTIG
+ select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
+ select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
+ select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
+ default n
+ ---help---
+ V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/omap/Makefile b/drivers/media/platform/omap/Makefile
new file mode 100644
index 00000000000..d80df41fde2
--- /dev/null
+++ b/drivers/media/platform/omap/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the omap video device drivers.
+#
+
+# OMAP2/3 Display driver
+omap-vout-y += omap_vout.o omap_voutlib.o
+omap-vout-$(CONFIG_VIDEO_OMAP2_VOUT_VRFB) += omap_vout_vrfb.o
+obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout.o
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
new file mode 100644
index 00000000000..9a726eacb29
--- /dev/null
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -0,0 +1,2294 @@
+/*
+ * omap_vout.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Leveraged code from the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * History:
+ * 20-APR-2006 Khasim Modified VRFB based Rotation,
+ * The image data is always read from 0 degree
+ * view and written
+ * to the virtual space of desired rotation angle
+ * 4-DEC-2006 Jian Changed to support better memory management
+ *
+ * 17-Nov-2008 Hardik Changed driver to use video_ioctl2
+ *
+ * 23-Feb-2010 Vaibhav H Modified to use new DSS2 interface
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/videodev2.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#include <video/omapvrfb.h>
+#include <video/omapdss.h>
+
+#include "omap_voutlib.h"
+#include "omap_voutdef.h"
+#include "omap_vout_vrfb.h"
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
+MODULE_LICENSE("GPL");
+
+/* Driver Configuration macros */
+#define VOUT_NAME "omap_vout"
+
+enum omap_vout_channels {
+ OMAP_VIDEO1,
+ OMAP_VIDEO2,
+};
+
+static struct videobuf_queue_ops video_vbq_ops;
+/* Variables configurable through module params*/
+static u32 video1_numbuffers = 3;
+static u32 video2_numbuffers = 3;
+static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static bool vid1_static_vrfb_alloc;
+static bool vid2_static_vrfb_alloc;
+static bool debug;
+
+/* Module parameters */
+module_param(video1_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_numbuffers,
+ "Number of buffers to be allocated at init time for Video1 device.");
+
+module_param(video2_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_numbuffers,
+ "Number of buffers to be allocated at init time for Video2 device.");
+
+module_param(video1_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_bufsize,
+ "Size of the buffer to be allocated for video1 device");
+
+module_param(video2_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_bufsize,
+ "Size of the buffer to be allocated for video2 device");
+
+module_param(vid1_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid1_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video1 device");
+
+module_param(vid2_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid2_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video2 device");
+
+module_param(debug, bool, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+/* list of image formats supported by OMAP2 video pipelines */
+static const struct v4l2_fmtdesc omap_formats[] = {
+ {
+ /* Note: V4L2 defines RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
+ *
+ * We interpret RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
+ */
+ .description = "RGB565, le",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ /* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use
+ * this for RGB24 unpack mode, the last 8 bits are ignored
+ * */
+ .description = "RGB32, le",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ },
+ {
+ /* Note: V4L2 defines RGB24 as: RGB-8-8-8 we use
+ * this for RGB24 packed mode
+ *
+ */
+ .description = "RGB24, le",
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ },
+ {
+ .description = "YUYV (YUV 4:2:2), packed",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .description = "UYVY, packed",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+};
+
+#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
+
+/*
+ * Try format
+ */
+static int omap_vout_try_format(struct v4l2_pix_format *pix)
+{
+ int ifmt, bpp = 0;
+
+ pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT,
+ (u32)VID_MAX_HEIGHT);
+ pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH);
+
+ for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) {
+ if (pix->pixelformat == omap_formats[ifmt].pixelformat)
+ break;
+ }
+
+ if (ifmt == NUM_OUTPUT_FORMATS)
+ ifmt = 0;
+
+ pix->pixelformat = omap_formats[ifmt].pixelformat;
+ pix->field = V4L2_FIELD_ANY;
+ pix->priv = 0;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ default:
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ bpp = YUYV_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB565_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB24_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB32_BPP;
+ break;
+ }
+ pix->bytesperline = pix->width * bpp;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ return bpp;
+}
+
+/*
+ * omap_vout_uservirt_to_phys: This inline function is used to convert user
+ * space virtual address to physical address.
+ */
+static u32 omap_vout_uservirt_to_phys(u32 virtp)
+{
+ unsigned long physp = 0;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+
+ /* For kernel direct-mapped memory, take the easy way */
+ if (virtp >= PAGE_OFFSET)
+ return virt_to_phys((void *) virtp);
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(mm, virtp);
+ if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+ /* this will catch, kernel-allocated, mmaped-to-usermode
+ addresses */
+ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
+ up_read(&current->mm->mmap_sem);
+ } else {
+ /* otherwise, use get_user_pages() for general userland pages */
+ int res, nr_pages = 1;
+ struct page *pages;
+
+ res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
+ 0, &pages, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (res == nr_pages) {
+ physp = __pa(page_address(&pages[0]) +
+ (virtp & ~PAGE_MASK));
+ } else {
+ printk(KERN_WARNING VOUT_NAME
+ "get_user_pages failed\n");
+ return 0;
+ }
+ }
+
+ return physp;
+}
+
+/*
+ * Free the V4L2 buffers
+ */
+void omap_vout_free_buffers(struct omap_vout_device *vout)
+{
+ int i, numbuffers;
+
+ /* Allocate memory for the buffers */
+ numbuffers = (vout->vid) ? video2_numbuffers : video1_numbuffers;
+ vout->buffer_size = (vout->vid) ? video2_bufsize : video1_bufsize;
+
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_phy_addr[i] = 0;
+ vout->buf_virt_addr[i] = 0;
+ }
+}
+
+/*
+ * Convert V4L2 rotation to DSS rotation
+ * V4L2 understand 0, 90, 180, 270.
+ * Convert to 0, 1, 2 and 3 respectively for DSS
+ */
+static int v4l2_rot_to_dss_rot(int v4l2_rotation,
+ enum dss_rotation *rotation, bool mirror)
+{
+ int ret = 0;
+
+ switch (v4l2_rotation) {
+ case 90:
+ *rotation = dss_rotation_90_degree;
+ break;
+ case 180:
+ *rotation = dss_rotation_180_degree;
+ break;
+ case 270:
+ *rotation = dss_rotation_270_degree;
+ break;
+ case 0:
+ *rotation = dss_rotation_0_degree;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int omap_vout_calculate_offset(struct omap_vout_device *vout)
+{
+ struct omapvideo_info *ovid;
+ struct v4l2_rect *crop = &vout->crop;
+ struct v4l2_pix_format *pix = &vout->pix;
+ int *cropped_offset = &vout->cropped_offset;
+ int ps = 2, line_length = 0;
+
+ ovid = &vout->vid_info;
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ omap_vout_calculate_vrfb_offset(vout);
+ } else {
+ vout->line_length = line_length = pix->width;
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat)
+ ps = 2;
+ else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat)
+ ps = 4;
+ else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat)
+ ps = 3;
+
+ vout->ps = ps;
+
+ *cropped_offset = (line_length * ps) *
+ crop->top + crop->left * ps;
+ }
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
+ __func__, vout->cropped_offset);
+
+ return 0;
+}
+
+/*
+ * Convert V4L2 pixel format to DSS pixel format
+ */
+static int video_mode_to_dss_mode(struct omap_vout_device *vout)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct v4l2_pix_format *pix = &vout->pix;
+ enum omap_color_mode mode;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ mode = OMAP_DSS_COLOR_YUV2;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mode = OMAP_DSS_COLOR_UYVY;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ mode = OMAP_DSS_COLOR_RGB16;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ mode = OMAP_DSS_COLOR_RGB24P;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ mode = (ovl->id == OMAP_DSS_VIDEO1) ?
+ OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ mode = OMAP_DSS_COLOR_RGBX32;
+ break;
+ default:
+ mode = -EINVAL;
+ break;
+ }
+ return mode;
+}
+
+/*
+ * Setup the overlay
+ */
+static int omapvid_setup_overlay(struct omap_vout_device *vout,
+ struct omap_overlay *ovl, int posx, int posy, int outw,
+ int outh, u32 addr)
+{
+ int ret = 0;
+ struct omap_overlay_info info;
+ int cropheight, cropwidth, pixheight, pixwidth;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 &&
+ (outw != vout->pix.width || outh != vout->pix.height)) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ vout->dss_mode = video_mode_to_dss_mode(vout);
+ if (vout->dss_mode == -EINVAL) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ /* Setup the input plane parameters according to
+ * rotation value selected.
+ */
+ if (is_rotation_90_or_270(vout)) {
+ cropheight = vout->crop.width;
+ cropwidth = vout->crop.height;
+ pixheight = vout->pix.width;
+ pixwidth = vout->pix.height;
+ } else {
+ cropheight = vout->crop.height;
+ cropwidth = vout->crop.width;
+ pixheight = vout->pix.height;
+ pixwidth = vout->pix.width;
+ }
+
+ ovl->get_overlay_info(ovl, &info);
+ info.paddr = addr;
+ info.width = cropwidth;
+ info.height = cropheight;
+ info.color_mode = vout->dss_mode;
+ info.mirror = vout->mirror;
+ info.pos_x = posx;
+ info.pos_y = posy;
+ info.out_width = outw;
+ info.out_height = outh;
+ info.global_alpha = vout->win.global_alpha;
+ if (!is_rotation_enabled(vout)) {
+ info.rotation = 0;
+ info.rotation_type = OMAP_DSS_ROT_DMA;
+ info.screen_width = pixwidth;
+ } else {
+ info.rotation = vout->rotation;
+ info.rotation_type = OMAP_DSS_ROT_VRFB;
+ info.screen_width = 2048;
+ }
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n"
+ "rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
+ "out_height=%d rotation_type=%d screen_width=%d\n",
+ __func__, ovl->is_enabled(ovl), info.paddr, info.width, info.height,
+ info.color_mode, info.rotation, info.mirror, info.pos_x,
+ info.pos_y, info.out_width, info.out_height, info.rotation_type,
+ info.screen_width);
+
+ ret = ovl->set_overlay_info(ovl, &info);
+ if (ret)
+ goto setup_ovl_err;
+
+ return 0;
+
+setup_ovl_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "setup_overlay failed\n");
+ return ret;
+}
+
+/*
+ * Initialize the overlay structure
+ */
+static int omapvid_init(struct omap_vout_device *vout, u32 addr)
+{
+ int ret = 0, i;
+ struct v4l2_window *win;
+ struct omap_overlay *ovl;
+ int posx, posy, outw, outh, temp;
+ struct omap_video_timings *timing;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ win = &vout->win;
+ for (i = 0; i < ovid->num_overlays; i++) {
+ struct omap_dss_device *dssdev;
+
+ ovl = ovid->overlays[i];
+ dssdev = ovl->get_device(ovl);
+
+ if (!dssdev)
+ return -EINVAL;
+
+ timing = &dssdev->panel.timings;
+
+ outw = win->w.width;
+ outh = win->w.height;
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ /* Invert the height and width for 90
+ * and 270 degree rotation
+ */
+ temp = outw;
+ outw = outh;
+ outh = temp;
+ posy = (timing->y_res - win->w.width) - win->w.left;
+ posx = win->w.top;
+ break;
+
+ case dss_rotation_180_degree:
+ posx = (timing->x_res - win->w.width) - win->w.left;
+ posy = (timing->y_res - win->w.height) - win->w.top;
+ break;
+
+ case dss_rotation_270_degree:
+ temp = outw;
+ outw = outh;
+ outh = temp;
+ posy = win->w.left;
+ posx = (timing->x_res - win->w.height) - win->w.top;
+ break;
+
+ default:
+ posx = win->w.left;
+ posy = win->w.top;
+ break;
+ }
+
+ ret = omapvid_setup_overlay(vout, ovl, posx, posy,
+ outw, outh, addr);
+ if (ret)
+ goto omapvid_init_err;
+ }
+ return 0;
+
+omapvid_init_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "apply_changes failed\n");
+ return ret;
+}
+
+/*
+ * Apply the changes set the go bit of DSS
+ */
+static int omapvid_apply_changes(struct omap_vout_device *vout)
+{
+ int i;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ for (i = 0; i < ovid->num_overlays; i++) {
+ struct omap_dss_device *dssdev;
+
+ ovl = ovid->overlays[i];
+ dssdev = ovl->get_device(ovl);
+ if (!dssdev)
+ return -EINVAL;
+ ovl->manager->apply(ovl->manager);
+ }
+
+ return 0;
+}
+
+static int omapvid_handle_interlace_display(struct omap_vout_device *vout,
+ unsigned int irqstatus, struct timeval timevalue)
+{
+ u32 fid;
+
+ if (vout->first_int) {
+ vout->first_int = 0;
+ goto err;
+ }
+
+ if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
+ fid = 1;
+ else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
+ fid = 0;
+ else
+ goto err;
+
+ vout->field_id ^= 1;
+ if (fid != vout->field_id) {
+ if (fid == 0)
+ vout->field_id = fid;
+ } else if (0 == fid) {
+ if (vout->cur_frm == vout->next_frm)
+ goto err;
+
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ } else {
+ if (list_empty(&vout->dma_queue) ||
+ (vout->cur_frm != vout->next_frm))
+ goto err;
+ }
+
+ return vout->field_id;
+err:
+ return 0;
+}
+
+static void omap_vout_isr(void *arg, unsigned int irqstatus)
+{
+ int ret, fid, mgr_id;
+ u32 addr, irq;
+ struct omap_overlay *ovl;
+ struct timeval timevalue;
+ struct omapvideo_info *ovid;
+ struct omap_dss_device *cur_display;
+ struct omap_vout_device *vout = (struct omap_vout_device *)arg;
+
+ if (!vout->streaming)
+ return;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ mgr_id = ovl->manager->id;
+
+ /* get the display device attached to the overlay */
+ cur_display = ovl->get_device(ovl);
+
+ if (!cur_display)
+ return;
+
+ spin_lock(&vout->vbq_lock);
+ v4l2_get_timestamp(&timevalue);
+
+ switch (cur_display->type) {
+ case OMAP_DISPLAY_TYPE_DSI:
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DVI:
+ if (mgr_id == OMAP_DSS_CHANNEL_LCD)
+ irq = DISPC_IRQ_VSYNC;
+ else if (mgr_id == OMAP_DSS_CHANNEL_LCD2)
+ irq = DISPC_IRQ_VSYNC2;
+ else
+ goto vout_isr_err;
+
+ if (!(irqstatus & irq))
+ goto vout_isr_err;
+ break;
+ case OMAP_DISPLAY_TYPE_VENC:
+ fid = omapvid_handle_interlace_display(vout, irqstatus,
+ timevalue);
+ if (!fid)
+ goto vout_isr_err;
+ break;
+ case OMAP_DISPLAY_TYPE_HDMI:
+ if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN))
+ goto vout_isr_err;
+ break;
+ default:
+ goto vout_isr_err;
+ }
+
+ if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ }
+
+ vout->first_int = 0;
+ if (list_empty(&vout->dma_queue))
+ goto vout_isr_err;
+
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
+
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
+
+ addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
+ + vout->cropped_offset;
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret) {
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ goto vout_isr_err;
+ }
+
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME "failed to change mode\n");
+
+vout_isr_err:
+ spin_unlock(&vout->vbq_lock);
+}
+
+/* Video buffer call backs */
+
+/*
+ * Buffer setup function is called by videobuf layer when REQBUF ioctl is
+ * called. This is used to setup buffers and return size and count of
+ * buffers allocated. After the call to this buffer, videobuf layer will
+ * setup buffer queue depending on the size and count of buffers
+ */
+static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
+ unsigned int *size)
+{
+ int startindex = 0, i, j;
+ u32 phy_addr = 0, virt_addr = 0;
+ struct omap_vout_device *vout = q->priv_data;
+ struct omapvideo_info *ovid = &vout->vid_info;
+ int vid_max_buf_size;
+
+ if (!vout)
+ return -EINVAL;
+
+ vid_max_buf_size = vout->vid == OMAP_VIDEO1 ? video1_bufsize :
+ video2_bufsize;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
+ return -EINVAL;
+
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
+ *count = startindex;
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
+ return -ENOMEM;
+ }
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return 0;
+
+ /* Now allocated the V4L2 buffers */
+ *size = PAGE_ALIGN(vout->pix.width * vout->pix.height * vout->bpp);
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ /* Check the size of the buffer */
+ if (*size > vid_max_buf_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "buffer allocation mismatch [%u] [%u]\n",
+ *size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
+ for (i = startindex; i < *count; i++) {
+ vout->buffer_size = *size;
+
+ virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
+ &phy_addr);
+ if (!virt_addr) {
+ if (ovid->rotation_type == VOUT_ROT_NONE) {
+ break;
+ } else {
+ if (!is_rotation_enabled(vout))
+ break;
+ /* Free the VRFB buffers if no space for V4L2 buffers */
+ for (j = i; j < *count; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ }
+ }
+ vout->buf_virt_addr[i] = virt_addr;
+ vout->buf_phy_addr[i] = phy_addr;
+ }
+ *count = vout->buffer_allocated = i;
+
+ return 0;
+}
+
+/*
+ * Free the V4L2 buffers additionally allocated than default
+ * number of buffers
+ */
+static void omap_vout_free_extra_buffers(struct omap_vout_device *vout)
+{
+ int num_buffers = 0, i;
+
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ if (vout->buf_virt_addr[i])
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ vout->buffer_allocated = num_buffers;
+}
+
+/*
+ * This function will be called when VIDIOC_QBUF ioctl is called.
+ * It prepare buffers before give out for the display. This function
+ * converts user space virtual address into physical address if userptr memory
+ * exchange mechanism is used. If rotation is enabled, it copies entire
+ * buffer into VRFB memory space before giving it to the DSS.
+ */
+static int omap_vout_buffer_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct omap_vout_device *vout = q->priv_data;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = vout->pix.width;
+ vb->height = vout->pix.height;
+ vb->size = vb->width * vb->height * vout->bpp;
+ vb->field = field;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ /* if user pointer memory mechanism is used, get the physical
+ * address of the buffer
+ */
+ if (V4L2_MEMORY_USERPTR == vb->memory) {
+ if (0 == vb->baddr)
+ return -EINVAL;
+ /* Physical address */
+ vout->queued_buf_addr[vb->i] = (u8 *)
+ omap_vout_uservirt_to_phys(vb->baddr);
+ } else {
+ u32 addr, dma_addr;
+ unsigned long size;
+
+ addr = (unsigned long) vout->buf_virt_addr[vb->i];
+ size = (unsigned long) vb->size;
+
+ dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
+ v4l2_err(&vout->vid_dev->v4l2_dev, "dma_map_single failed\n");
+
+ vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
+ }
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB)
+ return omap_vout_prepare_vrfb(vout, vb);
+ else
+ return 0;
+}
+
+/*
+ * Buffer queue function will be called from the videobuf layer when _QBUF
+ * ioctl is called. It is used to enqueue buffer, which is ready to be
+ * displayed.
+ */
+static void omap_vout_buffer_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ /* Driver is also maintainig a queue. So enqueue buffer in the driver
+ * queue */
+ list_add_tail(&vb->queue, &vout->dma_queue);
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * Buffer release function is called from videobuf layer to release buffer
+ * which are already allocated
+ */
+static void omap_vout_buffer_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return;
+}
+
+/*
+ * File operations
+ */
+static unsigned int omap_vout_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+
+ return videobuf_poll_stream(file, q, wait);
+}
+
+static void omap_vout_vm_open(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count++;
+}
+
+static void omap_vout_vm_close(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count--;
+}
+
+static struct vm_operations_struct omap_vout_vm_ops = {
+ .open = omap_vout_vm_open,
+ .close = omap_vout_vm_close,
+};
+
+static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int i;
+ void *pos;
+ unsigned long start = vma->vm_start;
+ unsigned long size = (vma->vm_end - vma->vm_start);
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ " %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__,
+ vma->vm_pgoff, vma->vm_start, vma->vm_end);
+
+ /* look for the buffer to map */
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
+ continue;
+ if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ break;
+ }
+
+ if (VIDEO_MAX_FRAME == i) {
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
+ return -EINVAL;
+ }
+ /* Check the size of the buffer */
+ if (size > vout->buffer_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "insufficient memory [%lu] [%u]\n",
+ size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
+ q->bufs[i]->baddr = vma->vm_start;
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &omap_vout_vm_ops;
+ vma->vm_private_data = (void *) vout;
+ pos = (void *)vout->buf_virt_addr[i];
+ vma->vm_pgoff = virt_to_phys((void *)pos) >> PAGE_SHIFT;
+ while (size > 0) {
+ unsigned long pfn;
+ pfn = virt_to_phys((void *) pos) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ vout->mmap_count++;
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+
+ return 0;
+}
+
+static int omap_vout_release(struct file *file)
+{
+ unsigned int ret, i;
+ struct videobuf_queue *q;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = file->private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+ ovid = &vout->vid_info;
+
+ if (!vout)
+ return 0;
+
+ q = &vout->vbq;
+ /* Disable all the overlay managers connected with this interface */
+ for (i = 0; i < ovid->num_overlays; i++) {
+ struct omap_overlay *ovl = ovid->overlays[i];
+ struct omap_dss_device *dssdev = ovl->get_device(ovl);
+
+ if (dssdev)
+ ovl->disable(ovl);
+ }
+ /* Turn off the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "Unable to apply changes\n");
+
+ /* Free all buffers */
+ omap_vout_free_extra_buffers(vout);
+
+ /* Free the VRFB buffers only if they are allocated
+ * during reqbufs. Don't free if init time allocated
+ */
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ if (!vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+ }
+ videobuf_mmap_free(q);
+
+ /* Even if apply changes fails we should continue
+ freeing allocated memory */
+ if (vout->streaming) {
+ u32 mask = 0;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2;
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+ vout->streaming = 0;
+
+ videobuf_streamoff(q);
+ videobuf_queue_cancel(q);
+ }
+
+ if (vout->mmap_count != 0)
+ vout->mmap_count = 0;
+
+ vout->opened -= 1;
+ file->private_data = NULL;
+
+ if (vout->buffer_allocated)
+ videobuf_mmap_free(q);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return ret;
+}
+
+static int omap_vout_open(struct file *file)
+{
+ struct videobuf_queue *q;
+ struct omap_vout_device *vout = NULL;
+
+ vout = video_drvdata(file);
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
+ if (vout == NULL)
+ return -ENODEV;
+
+ /* for now, we only support single open */
+ if (vout->opened)
+ return -EBUSY;
+
+ vout->opened += 1;
+
+ file->private_data = vout;
+ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ q = &vout->vbq;
+ video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+ video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+ video_vbq_ops.buf_release = omap_vout_buffer_release;
+ video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+ spin_lock_init(&vout->vbq_lock);
+
+ videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
+ &vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), vout, NULL);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return 0;
+}
+
+/*
+ * V4L2 ioctls
+ */
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct omap_vout_device *vout = fh;
+
+ strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
+ cap->bus_info[0] = '\0';
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+
+ f->fmt.pix = vout->pix;
+ return 0;
+
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+ struct omap_dss_device *dssdev;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ dssdev = ovl->get_device(ovl);
+
+ if (!dssdev)
+ return -EINVAL;
+
+ timing = &dssdev->panel.timings;
+
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+
+ omap_vout_try_format(&f->fmt.pix);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret, bpp;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+ struct omap_dss_device *dssdev;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ dssdev = ovl->get_device(ovl);
+
+ /* get the display device attached to the overlay */
+ if (!dssdev) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+ timing = &dssdev->panel.timings;
+
+ /* We dont support RGB24-packed mode if vrfb rotation
+ * is enabled*/
+ if ((is_rotation_enabled(vout)) &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+
+ /* get the framebuffer parameters */
+
+ if (is_rotation_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ /* change to samller size is OK */
+
+ bpp = omap_vout_try_format(&f->fmt.pix);
+ f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp;
+
+ /* try & set the new output format */
+ vout->bpp = bpp;
+ vout->pix = f->fmt.pix;
+ vout->vrfb_bpp = 1;
+
+ /* If YUYV then vrfb bpp is 2, for others its 1 */
+ if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat ||
+ V4L2_PIX_FMT_UYVY == vout->pix.pixelformat)
+ vout->vrfb_bpp = 2;
+
+ /* set default crop and win */
+ omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ ret = 0;
+
+s_fmt_vid_out_exit:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ ret = omap_vout_try_window(&vout->fbuf, win);
+
+ if (!ret) {
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
+ win->global_alpha = 255;
+ else
+ win->global_alpha = f->fmt.win.global_alpha;
+ }
+
+ return ret;
+}
+
+static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct v4l2_window *win = &f->fmt.win;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
+ if (!ret) {
+ /* Video1 plane does not support global alpha on OMAP3 */
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
+ vout->win.global_alpha = 255;
+ else
+ vout->win.global_alpha = f->fmt.win.global_alpha;
+
+ vout->win.chromakey = f->fmt.win.chromakey;
+ }
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ u32 key_value = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ win->w = vout->win.w;
+ win->field = vout->win.field;
+ win->global_alpha = vout->win.global_alpha;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ key_value = info.trans_key;
+ }
+ win->chromakey = key_value;
+ return 0;
+}
+
+static int vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cropcap)
+{
+ struct omap_vout_device *vout = fh;
+ struct v4l2_pix_format *pix = &vout->pix;
+
+ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Width and height are always even */
+ cropcap->bounds.width = pix->width & ~1;
+ cropcap->bounds.height = pix->height & ~1;
+
+ omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect);
+ cropcap->pixelaspect.numerator = 1;
+ cropcap->pixelaspect.denominator = 1;
+ return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct omap_vout_device *vout = fh;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ crop->c = vout->crop;
+ return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+{
+ int ret = -EINVAL;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_video_timings *timing;
+ struct omap_dss_device *dssdev;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ dssdev = ovl->get_device(ovl);
+
+ if (!dssdev) {
+ ret = -EINVAL;
+ goto s_crop_err;
+ }
+
+ timing = &dssdev->panel.timings;
+
+ if (is_rotation_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
+ &vout->fbuf, &crop->c);
+
+s_crop_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_queryctrl(struct file *file, void *fh,
+ struct v4l2_queryctrl *ctrl)
+{
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
+ break;
+ case V4L2_CID_BG_COLOR:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 0xFFFFFF, 1, 0);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
+ break;
+ default:
+ ctrl->name[0] = '\0';
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ ctrl->value = vout->control[0].value;
+ break;
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay_manager_info info;
+ struct omap_overlay *ovl;
+
+ ovl = vout->vid_info.overlays[0];
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ ctrl->value = info.default_color;
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ ctrl->value = vout->control[2].value;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+
+ switch (a->id) {
+ case V4L2_CID_ROTATE:
+ {
+ struct omapvideo_info *ovid;
+ int rotation = a->value;
+
+ ovid = &vout->vid_info;
+
+ mutex_lock(&vout->lock);
+ if (rotation && ovid->rotation_type == VOUT_ROT_NONE) {
+ mutex_unlock(&vout->lock);
+ ret = -ERANGE;
+ break;
+ }
+
+ if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (v4l2_rot_to_dss_rot(rotation, &vout->rotation,
+ vout->mirror)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ vout->control[0].value = rotation;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay *ovl;
+ unsigned int color = a->value;
+ struct omap_overlay_manager_info info;
+
+ ovl = vout->vid_info.overlays[0];
+
+ mutex_lock(&vout->lock);
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.default_color = color;
+ if (ovl->manager->set_manager_info(ovl->manager, &info)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ vout->control[1].value = color;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ {
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ unsigned int mirror = a->value;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ mutex_lock(&vout->lock);
+ if (mirror && ovid->rotation_type == VOUT_ROT_NONE) {
+ mutex_unlock(&vout->lock);
+ ret = -ERANGE;
+ break;
+ }
+
+ if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+ vout->mirror = mirror;
+ vout->control[2].value = mirror;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ int ret = 0;
+ unsigned int i, num_buffers = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) || (req->count < 0))
+ return -EINVAL;
+ /* if memory is not mmp or userptr
+ return error */
+ if ((V4L2_MEMORY_MMAP != req->memory) &&
+ (V4L2_MEMORY_USERPTR != req->memory))
+ return -EINVAL;
+
+ mutex_lock(&vout->lock);
+ /* Cannot be requested when streaming is on */
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+
+ /* If buffers are already allocated free them */
+ if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
+ if (vout->mmap_count) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ vout->buffer_allocated = num_buffers;
+ videobuf_mmap_free(q);
+ } else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) {
+ if (vout->buffer_allocated) {
+ videobuf_mmap_free(q);
+ for (i = 0; i < vout->buffer_allocated; i++) {
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+ vout->buffer_allocated = 0;
+ }
+ }
+
+ /*store the memory type in data structure */
+ vout->memory = req->memory;
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+
+ /* call videobuf_reqbufs api */
+ ret = videobuf_reqbufs(q, req);
+ if (ret < 0)
+ goto reqbuf_err;
+
+ vout->buffer_allocated = req->count;
+
+reqbuf_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+
+ return videobuf_querybuf(&vout->vbq, b);
+}
+
+static int vidioc_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buffer)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) ||
+ (buffer->index >= vout->buffer_allocated) ||
+ (q->bufs[buffer->index]->memory != buffer->memory)) {
+ return -EINVAL;
+ }
+ if (V4L2_MEMORY_USERPTR == buffer->memory) {
+ if ((buffer->length < vout->pix.sizeimage) ||
+ (0 == buffer->m.userptr)) {
+ return -EINVAL;
+ }
+ }
+
+ if ((is_rotation_enabled(vout)) &&
+ vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "DMA Channel not allocated for Rotation\n");
+ return -EINVAL;
+ }
+
+ return videobuf_qbuf(q, buffer);
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ int ret;
+ u32 addr;
+ unsigned long size;
+ struct videobuf_buffer *vb;
+
+ vb = q->bufs[b->index];
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ if (file->f_flags & O_NONBLOCK)
+ /* Call videobuf_dqbuf for non blocking mode */
+ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
+ else
+ /* Call videobuf_dqbuf for blocking mode */
+ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+
+ addr = (unsigned long) vout->buf_phy_addr[vb->i];
+ size = (unsigned long) vb->size;
+ dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
+ size, DMA_TO_DEVICE);
+ return ret;
+}
+
+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ int ret = 0, j;
+ u32 addr = 0, mask = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ mutex_lock(&vout->lock);
+
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto streamon_err;
+ }
+
+ ret = videobuf_streamon(q);
+ if (ret)
+ goto streamon_err;
+
+ if (list_empty(&vout->dma_queue)) {
+ ret = -EIO;
+ goto streamon_err1;
+ }
+
+ /* Get the next frame from the buffer queue */
+ vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ /* Remove buffer from the buffer queue */
+ list_del(&vout->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ vout->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ vout->field_id = 0;
+
+ /* set flag here. Next QBUF will start DMA */
+ vout->streaming = 1;
+
+ vout->first_int = 1;
+
+ if (omap_vout_calculate_offset(vout)) {
+ ret = -EINVAL;
+ goto streamon_err1;
+ }
+ addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ + vout->cropped_offset;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_VSYNC2;
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to set overlay info\n");
+ goto streamon_err1;
+ }
+
+ omap_dispc_register_isr(omap_vout_isr, vout, mask);
+
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+ struct omap_dss_device *dssdev = ovl->get_device(ovl);
+
+ if (dssdev) {
+ ret = ovl->enable(ovl);
+ if (ret)
+ goto streamon_err1;
+ }
+ }
+
+ ret = 0;
+
+streamon_err1:
+ if (ret)
+ ret = videobuf_streamoff(q);
+streamon_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ u32 mask = 0;
+ int ret = 0, j;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ vout->streaming = 0;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_VSYNC2;
+
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+ struct omap_dss_device *dssdev = ovl->get_device(ovl);
+
+ if (dssdev)
+ ovl->disable(ovl);
+ }
+
+ /* Turn of the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in"
+ " streamoff\n");
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+ ret = videobuf_streamoff(&vout->vbq);
+
+ return ret;
+}
+
+static int vidioc_s_fbuf(struct file *file, void *fh,
+ const struct v4l2_framebuffer *a)
+{
+ int enable = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* OMAP DSS doesn't support Source and Destination color
+ key together */
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_CHROMAKEY))
+ return -EINVAL;
+ /* OMAP DSS Doesn't support the Destination color key
+ and alpha blending together */
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA))
+ return -EINVAL;
+
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_VID_SRC;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_CHROMAKEY;
+
+ if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_SRC_CHROMAKEY))
+ enable = 1;
+ else
+ enable = 0;
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.trans_enabled = enable;
+ info.trans_key_type = key_type;
+ info.trans_key = vout->win.chromakey;
+
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+ if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 1;
+ } else {
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 0;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ /* enable this only if there is no zorder cap */
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
+ info.partial_alpha_enabled = enable;
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* The video overlay must stay within the framebuffer and can't be
+ positioned independently. */
+ a->flags = V4L2_FBUF_FLAG_OVERLAY;
+ a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
+ | V4L2_FBUF_CAP_SRC_CHROMAKEY;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC)
+ a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST)
+ a->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.partial_alpha_enabled)
+ a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vout_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_s_crop = vidioc_s_crop,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+static const struct v4l2_file_operations omap_vout_fops = {
+ .owner = THIS_MODULE,
+ .poll = omap_vout_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = omap_vout_mmap,
+ .open = omap_vout_open,
+ .release = omap_vout_release,
+};
+
+/* Init functions used during driver initialization */
+/* Initial setup of video_data */
+static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+ struct v4l2_pix_format *pix;
+ struct v4l2_control *control;
+ struct omap_overlay *ovl = vout->vid_info.overlays[0];
+ struct omap_dss_device *display = ovl->get_device(ovl);
+
+ /* set the default pix */
+ pix = &vout->pix;
+
+ /* Set the default picture of QVGA */
+ pix->width = QQVGA_WIDTH;
+ pix->height = QQVGA_HEIGHT;
+
+ /* Default pixel format is RGB 5-6-5 */
+ pix->pixelformat = V4L2_PIX_FMT_RGB565;
+ pix->field = V4L2_FIELD_ANY;
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->priv = 0;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ vout->bpp = RGB565_BPP;
+ vout->fbuf.fmt.width = display->panel.timings.x_res;
+ vout->fbuf.fmt.height = display->panel.timings.y_res;
+
+ /* Set the data structures for the overlay parameters*/
+ vout->win.global_alpha = 255;
+ vout->fbuf.flags = 0;
+ vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA |
+ V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY;
+ vout->win.chromakey = 0;
+
+ omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ /*Initialize the control variables for
+ rotation, flipping and background color. */
+ control = vout->control;
+ control[0].id = V4L2_CID_ROTATE;
+ control[0].value = 0;
+ vout->rotation = 0;
+ vout->mirror = 0;
+ vout->control[2].id = V4L2_CID_HFLIP;
+ vout->control[2].value = 0;
+ if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+ vout->vrfb_bpp = 2;
+
+ control[1].id = V4L2_CID_BG_COLOR;
+ control[1].value = 0;
+
+ /* initialize the video_device struct */
+ vfd = vout->vfd = video_device_alloc();
+
+ if (!vfd) {
+ printk(KERN_ERR VOUT_NAME ": could not allocate"
+ " video device struct\n");
+ return -ENOMEM;
+ }
+ vfd->release = video_device_release;
+ vfd->ioctl_ops = &vout_ioctl_ops;
+
+ strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
+
+ vfd->fops = &omap_vout_fops;
+ vfd->v4l2_dev = &vout->vid_dev->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_TX;
+ mutex_init(&vout->lock);
+
+ vfd->minor = -1;
+ return 0;
+
+}
+
+/* Setup video buffers */
+static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
+ int vid_num)
+{
+ u32 numbuffers;
+ int ret = 0, i;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev =
+ container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+ vout = vid_dev->vouts[vid_num];
+ ovid = &vout->vid_info;
+
+ numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
+ vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
+ dev_info(&pdev->dev, "Buffer Size = %d\n", vout->buffer_size);
+
+ for (i = 0; i < numbuffers; i++) {
+ vout->buf_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->buffer_size,
+ (u32 *) &vout->buf_phy_addr[i]);
+ if (!vout->buf_virt_addr[i]) {
+ numbuffers = i;
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+
+ vout->cropped_offset = 0;
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ int static_vrfb_allocation = (vid_num == 0) ?
+ vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
+ ret = omap_vout_setup_vrfb_bufs(pdev, vid_num,
+ static_vrfb_allocation);
+ }
+
+ return ret;
+
+free_buffers:
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ return ret;
+
+}
+
+/* Create video out devices */
+static int __init omap_vout_create_video_devices(struct platform_device *pdev)
+{
+ int ret = 0, k;
+ struct omap_vout_device *vout;
+ struct video_device *vfd = NULL;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev,
+ struct omap2video_device, v4l2_dev);
+
+ for (k = 0; k < pdev->num_resources; k++) {
+
+ vout = kzalloc(sizeof(struct omap_vout_device), GFP_KERNEL);
+ if (!vout) {
+ dev_err(&pdev->dev, ": could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vout->vid = k;
+ vid_dev->vouts[k] = vout;
+ vout->vid_dev = vid_dev;
+ /* Select video2 if only 1 overlay is controlled by V4L2 */
+ if (pdev->num_resources == 1)
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 2];
+ else
+ /* Else select video1 and video2 one by one. */
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 1];
+ vout->vid_info.num_overlays = 1;
+ vout->vid_info.id = k + 1;
+
+ /* Set VRFB as rotation_type for omap2 and omap3 */
+ if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx())
+ vout->vid_info.rotation_type = VOUT_ROT_VRFB;
+
+ /* Setup the default configuration for the video devices
+ */
+ if (omap_vout_setup_video_data(vout) != 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Allocate default number of buffers for the video streaming
+ * and reserve the VRFB space for rotation
+ */
+ if (omap_vout_setup_video_bufs(pdev, k) != 0) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ /* Register the Video device with V4L2
+ */
+ vfd = vout->vfd;
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
+ dev_err(&pdev->dev, ": Could not register "
+ "Video for Linux device\n");
+ vfd->minor = -1;
+ ret = -ENODEV;
+ goto error2;
+ }
+ video_set_drvdata(vfd, vout);
+
+ dev_info(&pdev->dev, ": registered and initialized"
+ " video device %d\n", vfd->minor);
+ if (k == (pdev->num_resources - 1))
+ return 0;
+
+ continue;
+error2:
+ if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+ omap_vout_release_vrfb(vout);
+ omap_vout_free_buffers(vout);
+error1:
+ video_device_release(vfd);
+error:
+ kfree(vout);
+ return ret;
+ }
+
+ return -ENODEV;
+}
+/* Driver functions */
+static void omap_vout_cleanup_device(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+ struct omapvideo_info *ovid;
+
+ if (!vout)
+ return;
+
+ vfd = vout->vfd;
+ ovid = &vout->vid_info;
+ if (vfd) {
+ if (!video_is_registered(vfd)) {
+ /*
+ * The device was never registered, so release the
+ * video_device struct directly.
+ */
+ video_device_release(vfd);
+ } else {
+ /*
+ * The unregister function will release the video_device
+ * struct as well as unregistering it.
+ */
+ video_unregister_device(vfd);
+ }
+ }
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ omap_vout_release_vrfb(vout);
+ /* Free the VRFB buffer if allocated
+ * init time
+ */
+ if (vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+ }
+ omap_vout_free_buffers(vout);
+
+ kfree(vout);
+}
+
+static int omap_vout_remove(struct platform_device *pdev)
+{
+ int k;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
+ omap2video_device, v4l2_dev);
+
+ v4l2_device_unregister(v4l2_dev);
+ for (k = 0; k < pdev->num_resources; k++)
+ omap_vout_cleanup_device(vid_dev->vouts[k]);
+
+ for (k = 0; k < vid_dev->num_displays; k++) {
+ if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED)
+ vid_dev->displays[k]->driver->disable(vid_dev->displays[k]);
+
+ omap_dss_put_device(vid_dev->displays[k]);
+ }
+ kfree(vid_dev);
+ return 0;
+}
+
+static int __init omap_vout_probe(struct platform_device *pdev)
+{
+ int ret = 0, i;
+ struct omap_overlay *ovl;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *def_display;
+ struct omap2video_device *vid_dev = NULL;
+
+ if (omapdss_is_initialized() == false)
+ return -EPROBE_DEFER;
+
+ ret = omapdss_compat_init();
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init dss\n");
+ return ret;
+ }
+
+ if (pdev->num_resources == 0) {
+ dev_err(&pdev->dev, "probed for an unknown device\n");
+ ret = -ENODEV;
+ goto err_dss_init;
+ }
+
+ vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
+ if (vid_dev == NULL) {
+ ret = -ENOMEM;
+ goto err_dss_init;
+ }
+
+ vid_dev->num_displays = 0;
+ for_each_dss_dev(dssdev) {
+ omap_dss_get_device(dssdev);
+
+ if (!dssdev->driver) {
+ dev_warn(&pdev->dev, "no driver for display: %s\n",
+ dssdev->name);
+ omap_dss_put_device(dssdev);
+ continue;
+ }
+
+ vid_dev->displays[vid_dev->num_displays++] = dssdev;
+ }
+
+ if (vid_dev->num_displays == 0) {
+ dev_err(&pdev->dev, "no displays\n");
+ ret = -EINVAL;
+ goto probe_err0;
+ }
+
+ vid_dev->num_overlays = omap_dss_get_num_overlays();
+ for (i = 0; i < vid_dev->num_overlays; i++)
+ vid_dev->overlays[i] = omap_dss_get_overlay(i);
+
+ vid_dev->num_managers = omap_dss_get_num_overlay_managers();
+ for (i = 0; i < vid_dev->num_managers; i++)
+ vid_dev->managers[i] = omap_dss_get_overlay_manager(i);
+
+ /* Get the Video1 overlay and video2 overlay.
+ * Setup the Display attached to that overlays
+ */
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ ovl = omap_dss_get_overlay(i);
+ dssdev = ovl->get_device(ovl);
+
+ if (dssdev) {
+ def_display = dssdev;
+ } else {
+ dev_warn(&pdev->dev, "cannot find display\n");
+ def_display = NULL;
+ }
+ if (def_display) {
+ struct omap_dss_driver *dssdrv = def_display->driver;
+
+ ret = dssdrv->enable(def_display);
+ if (ret) {
+ /* Here we are not considering a error
+ * as display may be enabled by frame
+ * buffer driver
+ */
+ dev_warn(&pdev->dev,
+ "'%s' Display already enabled\n",
+ def_display->name);
+ }
+ }
+ }
+
+ if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) {
+ dev_err(&pdev->dev, "v4l2_device_register failed\n");
+ ret = -ENODEV;
+ goto probe_err1;
+ }
+
+ ret = omap_vout_create_video_devices(pdev);
+ if (ret)
+ goto probe_err2;
+
+ for (i = 0; i < vid_dev->num_displays; i++) {
+ struct omap_dss_device *display = vid_dev->displays[i];
+
+ if (display->driver->update)
+ display->driver->update(display, 0, 0,
+ display->panel.timings.x_res,
+ display->panel.timings.y_res);
+ }
+ return 0;
+
+probe_err2:
+ v4l2_device_unregister(&vid_dev->v4l2_dev);
+probe_err1:
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ def_display = NULL;
+ ovl = omap_dss_get_overlay(i);
+ dssdev = ovl->get_device(ovl);
+
+ if (dssdev)
+ def_display = dssdev;
+
+ if (def_display && def_display->driver)
+ def_display->driver->disable(def_display);
+ }
+probe_err0:
+ kfree(vid_dev);
+err_dss_init:
+ omapdss_compat_uninit();
+ return ret;
+}
+
+static struct platform_driver omap_vout_driver = {
+ .driver = {
+ .name = VOUT_NAME,
+ },
+ .remove = omap_vout_remove,
+};
+
+static int __init omap_vout_init(void)
+{
+ if (platform_driver_probe(&omap_vout_driver, omap_vout_probe) != 0) {
+ printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void omap_vout_cleanup(void)
+{
+ platform_driver_unregister(&omap_vout_driver);
+}
+
+late_initcall(omap_vout_init);
+module_exit(omap_vout_cleanup);
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
new file mode 100644
index 00000000000..62e7e5783ce
--- /dev/null
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -0,0 +1,393 @@
+/*
+ * omap_vout_vrfb.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-device.h>
+
+#include <linux/omap-dma.h>
+#include <video/omapvrfb.h>
+
+#include "omap_voutdef.h"
+#include "omap_voutlib.h"
+
+#define OMAP_DMA_NO_DEVICE 0
+
+/*
+ * Function for allocating video buffers
+ */
+static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
+ unsigned int *count, int startindex)
+{
+ int i, j;
+
+ for (i = 0; i < *count; i++) {
+ if (!vout->smsshado_virt_addr[i]) {
+ vout->smsshado_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->smsshado_size,
+ &vout->smsshado_phy_addr[i]);
+ }
+ if (!vout->smsshado_virt_addr[i] && startindex != -1) {
+ if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
+ break;
+ }
+ if (!vout->smsshado_virt_addr[i]) {
+ for (j = 0; j < i; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ *count = 0;
+ return -ENOMEM;
+ }
+ memset((void *) vout->smsshado_virt_addr[i], 0,
+ vout->smsshado_size);
+ }
+ return 0;
+}
+
+/*
+ * Wakes up the application once the DMA transfer to VRFB space is completed.
+ */
+static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+ struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
+
+ t->tx_status = 1;
+ wake_up_interruptible(&t->wait);
+}
+
+/*
+ * Free VRFB buffers
+ */
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
+{
+ int j;
+
+ for (j = 0; j < VRFB_NUM_BUFS; j++) {
+ omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+}
+
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ bool static_vrfb_allocation)
+{
+ int ret = 0, i, j;
+ struct omap_vout_device *vout;
+ struct video_device *vfd;
+ int image_width, image_height;
+ int vrfb_num_bufs = VRFB_NUM_BUFS;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev =
+ container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+ vout = vid_dev->vouts[vid_num];
+ vfd = vout->vfd;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
+ dev_info(&pdev->dev, ": VRFB allocation failed\n");
+ for (j = 0; j < i; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+
+ /* Calculate VRFB memory size */
+ /* allocate for worst case size */
+ image_width = VID_MAX_WIDTH / TILE_SIZE;
+ if (VID_MAX_WIDTH % TILE_SIZE)
+ image_width++;
+
+ image_width = image_width * TILE_SIZE;
+ image_height = VID_MAX_HEIGHT / TILE_SIZE;
+
+ if (VID_MAX_HEIGHT % TILE_SIZE)
+ image_height++;
+
+ image_height = image_height * TILE_SIZE;
+ vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
+
+ /*
+ * Request and Initialize DMA, for DMA based VRFB transfer
+ */
+ vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
+ vout->vrfb_dma_tx.dma_ch = -1;
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
+ ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
+ omap_vout_vrfb_dma_tx_callback,
+ (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
+ if (ret < 0) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
+ " video%d\n", vfd->minor);
+ }
+ init_waitqueue_head(&vout->vrfb_dma_tx.wait);
+
+ /* statically allocated the VRFB buffer is done through
+ commands line aruments */
+ if (static_vrfb_allocation) {
+ if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
+ ret = -ENOMEM;
+ goto release_vrfb_ctx;
+ }
+ vout->vrfb_static_allocation = 1;
+ }
+ return 0;
+
+release_vrfb_ctx:
+ for (j = 0; j < VRFB_NUM_BUFS; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+free_buffers:
+ omap_vout_free_buffers(vout);
+
+ return ret;
+}
+
+/*
+ * Release the VRFB context once the module exits
+ */
+void omap_vout_release_vrfb(struct omap_vout_device *vout)
+{
+ int i;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[i]);
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+ }
+}
+
+/*
+ * Allocate the buffers for the VRFB space. Data is copied from V4L2
+ * buffers to the VRFB buffers using the DMA engine.
+ */
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+{
+ int i;
+ bool yuv_mode;
+
+ if (!is_rotation_enabled(vout))
+ return 0;
+
+ /* If rotation is enabled, allocate memory for VRFB space also */
+ *count = *count > VRFB_NUM_BUFS ? VRFB_NUM_BUFS : *count;
+
+ /* Allocate the VRFB buffers only if the buffers are not
+ * allocated during init time.
+ */
+ if (!vout->vrfb_static_allocation)
+ if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
+ return -ENOMEM;
+
+ if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
+ vout->dss_mode == OMAP_DSS_COLOR_UYVY)
+ yuv_mode = true;
+ else
+ yuv_mode = false;
+
+ for (i = 0; i < *count; i++)
+ omap_vrfb_setup(&vout->vrfb_context[i],
+ vout->smsshado_phy_addr[i], vout->pix.width,
+ vout->pix.height, vout->bpp, yuv_mode);
+
+ return 0;
+}
+
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb)
+{
+ dma_addr_t dmabuf;
+ struct vid_vrfb_dma *tx;
+ enum dss_rotation rotation;
+ u32 dest_frame_index = 0, src_element_index = 0;
+ u32 dest_element_index = 0, src_frame_index = 0;
+ u32 elem_count = 0, frame_count = 0, pixsize = 2;
+
+ if (!is_rotation_enabled(vout))
+ return 0;
+
+ dmabuf = vout->buf_phy_addr[vb->i];
+ /* If rotation is enabled, copy input buffer into VRFB
+ * memory space using DMA. We are copying input buffer
+ * into VRFB memory space of desired angle and DSS will
+ * read image VRFB memory for 0 degree angle
+ */
+ pixsize = vout->bpp * vout->vrfb_bpp;
+ /*
+ * DMA transfer in double index mode
+ */
+
+ /* Frame index */
+ dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
+ (vout->pix.width * vout->bpp)) + 1;
+
+ /* Source and destination parameters */
+ src_element_index = 0;
+ src_frame_index = 0;
+ dest_element_index = 1;
+ /* Number of elements per frame */
+ elem_count = vout->pix.width * vout->bpp;
+ frame_count = vout->pix.height;
+ tx = &vout->vrfb_dma_tx;
+ tx->tx_status = 0;
+ omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
+ tx->dev_id, 0x0);
+ /* src_port required only for OMAP1 */
+ omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dmabuf, src_element_index, src_frame_index);
+ /*set dma source burst mode for VRFB */
+ omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ rotation = calc_rotation(vout);
+
+ /* dest_port required only for OMAP1 */
+ omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
+ vout->vrfb_context[vb->i].paddr[0], dest_element_index,
+ dest_frame_index);
+ /*set dma dest burst mode for VRFB */
+ omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+
+ omap_start_dma(tx->dma_ch);
+ wait_event_interruptible_timeout(tx->wait, tx->tx_status == 1,
+ VRFB_TX_TIMEOUT);
+
+ if (tx->tx_status == 0) {
+ omap_stop_dma(tx->dma_ch);
+ return -EINVAL;
+ }
+ /* Store buffers physical address into an array. Addresses
+ * from this array will be used to configure DSS */
+ vout->queued_buf_addr[vb->i] = (u8 *)
+ vout->vrfb_context[vb->i].paddr[rotation];
+ return 0;
+}
+
+/*
+ * Calculate the buffer offsets from which the streaming should
+ * start. This offset calculation is mainly required because of
+ * the VRFB 32 pixels alignment with rotation.
+ */
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
+{
+ enum dss_rotation rotation;
+ bool mirroring = vout->mirror;
+ struct v4l2_rect *crop = &vout->crop;
+ struct v4l2_pix_format *pix = &vout->pix;
+ int *cropped_offset = &vout->cropped_offset;
+ int vr_ps = 1, ps = 2, temp_ps = 2;
+ int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+
+ rotation = calc_rotation(vout);
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat) {
+ if (is_rotation_enabled(vout)) {
+ /*
+ * ps - Actual pixel size for YUYV/UYVY for
+ * VRFB/Mirroring is 4 bytes
+ * vr_ps - Virtually pixel size for YUYV/UYVY is
+ * 2 bytes
+ */
+ ps = 4;
+ vr_ps = 2;
+ } else {
+ ps = 2; /* otherwise the pixel size is 2 byte */
+ }
+ } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
+ ps = 4;
+ } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
+ ps = 3;
+ }
+ vout->ps = ps;
+ vout->vr_ps = vr_ps;
+
+ if (is_rotation_enabled(vout)) {
+ line_length = MAX_PIXELS_PER_LINE;
+ ctop = (pix->height - crop->height) - crop->top;
+ cleft = (pix->width - crop->width) - crop->left;
+ } else {
+ line_length = pix->width;
+ }
+ vout->line_length = line_length;
+ switch (rotation) {
+ case dss_rotation_90_degree:
+ offset = vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * cleft + crop->top * temp_ps;
+ } else {
+ *cropped_offset = offset + line_length * temp_ps *
+ cleft + crop->top * temp_ps + (line_length *
+ ((crop->width / (vr_ps)) - 1) * ps);
+ }
+ break;
+ case dss_rotation_180_degree:
+ offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp) +
+ (vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp));
+ if (mirroring == 0) {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps;
+
+ } else {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps + (line_length *
+ (crop->height - 1) * ps);
+ }
+ break;
+ case dss_rotation_270_degree:
+ offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps;
+ } else {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps +
+ (line_length * ((crop->width / vr_ps) - 1) *
+ ps);
+ }
+ break;
+ case dss_rotation_0_degree:
+ if (mirroring == 0) {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps;
+ } else {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps +
+ (line_length * (crop->height - 1) * ps);
+ }
+ break;
+ default:
+ *cropped_offset = (line_length * ps * crop->top) /
+ vr_ps + (crop->left * ps) / vr_ps +
+ ((crop->width / vr_ps) - 1) * ps;
+ break;
+ }
+}
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.h b/drivers/media/platform/omap/omap_vout_vrfb.h
new file mode 100644
index 00000000000..ffde741e059
--- /dev/null
+++ b/drivers/media/platform/omap/omap_vout_vrfb.h
@@ -0,0 +1,40 @@
+/*
+ * omap_vout_vrfb.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUT_VRFB_H
+#define OMAP_VOUT_VRFB_H
+
+#ifdef CONFIG_VIDEO_OMAP2_VOUT_VRFB
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout);
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ u32 static_vrfb_allocation);
+void omap_vout_release_vrfb(struct omap_vout_device *vout);
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex);
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb);
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout);
+#else
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { }
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ u32 static_vrfb_allocation)
+ { return 0; }
+void omap_vout_release_vrfb(struct omap_vout_device *vout) { }
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+ { return 0; }
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb)
+ { return 0; }
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { }
+#endif
+
+#endif
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
new file mode 100644
index 00000000000..9ccfe1f475a
--- /dev/null
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -0,0 +1,225 @@
+/*
+ * omap_voutdef.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef OMAP_VOUTDEF_H
+#define OMAP_VOUTDEF_H
+
+#include <video/omapdss.h>
+#include <video/omapvrfb.h>
+
+#define YUYV_BPP 2
+#define RGB565_BPP 2
+#define RGB24_BPP 3
+#define RGB32_BPP 4
+#define TILE_SIZE 32
+#define YUYV_VRFB_BPP 2
+#define RGB_VRFB_BPP 1
+#define MAX_CID 3
+#define MAC_VRFB_CTXS 4
+#define MAX_VOUT_DEV 2
+#define MAX_OVLS 3
+#define MAX_DISPLAYS 10
+#define MAX_MANAGERS 3
+
+#define QQVGA_WIDTH 160
+#define QQVGA_HEIGHT 120
+
+/* Max Resolution supported by the driver */
+#define VID_MAX_WIDTH 1280 /* Largest width */
+#define VID_MAX_HEIGHT 720 /* Largest height */
+
+/* Mimimum requirement is 2x2 for DSS */
+#define VID_MIN_WIDTH 2
+#define VID_MIN_HEIGHT 2
+
+/* 2048 x 2048 is max res supported by OMAP display controller */
+#define MAX_PIXELS_PER_LINE 2048
+
+#define VRFB_TX_TIMEOUT 1000
+#define VRFB_NUM_BUFS 4
+
+/* Max buffer size tobe allocated during init */
+#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
+
+enum dma_channel_state {
+ DMA_CHAN_NOT_ALLOTED,
+ DMA_CHAN_ALLOTED,
+};
+
+/* Enum for Rotation
+ * DSS understands rotation in 0, 1, 2, 3 context
+ * while V4L2 driver understands it as 0, 90, 180, 270
+ */
+enum dss_rotation {
+ dss_rotation_0_degree = 0,
+ dss_rotation_90_degree = 1,
+ dss_rotation_180_degree = 2,
+ dss_rotation_270_degree = 3,
+};
+
+/* Enum for choosing rotation type for vout
+ * DSS2 doesn't understand no rotation as an
+ * option while V4L2 driver doesn't support
+ * rotation in the case where VRFB is not built in
+ * the kernel
+ */
+enum vout_rotaion_type {
+ VOUT_ROT_NONE = 0,
+ VOUT_ROT_VRFB = 1,
+};
+
+/*
+ * This structure is used to store the DMA transfer parameters
+ * for VRFB hidden buffer
+ */
+struct vid_vrfb_dma {
+ int dev_id;
+ int dma_ch;
+ int req_status;
+ int tx_status;
+ wait_queue_head_t wait;
+};
+
+struct omapvideo_info {
+ int id;
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+ enum vout_rotaion_type rotation_type;
+};
+
+struct omap2video_device {
+ struct mutex mtx;
+
+ int state;
+
+ struct v4l2_device v4l2_dev;
+ struct omap_vout_device *vouts[MAX_VOUT_DEV];
+
+ int num_displays;
+ struct omap_dss_device *displays[MAX_DISPLAYS];
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+ int num_managers;
+ struct omap_overlay_manager *managers[MAX_MANAGERS];
+};
+
+/* per-device data structure */
+struct omap_vout_device {
+
+ struct omapvideo_info vid_info;
+ struct video_device *vfd;
+ struct omap2video_device *vid_dev;
+ int vid;
+ int opened;
+
+ /* we don't allow to change image fmt/size once buffer has
+ * been allocated
+ */
+ int buffer_allocated;
+ /* allow to reuse previously allocated buffer which is big enough */
+ int buffer_size;
+ /* keep buffer info across opens */
+ unsigned long buf_virt_addr[VIDEO_MAX_FRAME];
+ unsigned long buf_phy_addr[VIDEO_MAX_FRAME];
+ enum omap_color_mode dss_mode;
+
+ /* we don't allow to request new buffer when old buffers are
+ * still mmaped
+ */
+ int mmap_count;
+
+ spinlock_t vbq_lock; /* spinlock for videobuf queues */
+ unsigned long field_count; /* field counter for videobuf_buffer */
+
+ /* non-NULL means streaming is in progress. */
+ bool streaming;
+
+ struct v4l2_pix_format pix;
+ struct v4l2_rect crop;
+ struct v4l2_window win;
+ struct v4l2_framebuffer fbuf;
+
+ /* Lock to protect the shared data structures in ioctl */
+ struct mutex lock;
+
+ /* V4L2 control structure for different control id */
+ struct v4l2_control control[MAX_CID];
+ enum dss_rotation rotation;
+ bool mirror;
+ int flicker_filter;
+ /* V4L2 control structure for different control id */
+
+ int bpp; /* bytes per pixel */
+ int vrfb_bpp; /* bytes per pixel with respect to VRFB */
+
+ struct vid_vrfb_dma vrfb_dma_tx;
+ unsigned int smsshado_phy_addr[MAC_VRFB_CTXS];
+ unsigned int smsshado_virt_addr[MAC_VRFB_CTXS];
+ struct vrfb vrfb_context[MAC_VRFB_CTXS];
+ bool vrfb_static_allocation;
+ unsigned int smsshado_size;
+ unsigned char pos;
+
+ int ps, vr_ps, line_length, first_int, field_id;
+ enum v4l2_memory memory;
+ struct videobuf_buffer *cur_frm, *next_frm;
+ struct list_head dma_queue;
+ u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+ u32 cropped_offset;
+ s32 tv_field1_offset;
+ void *isr_handle;
+
+ /* Buffer queue variables */
+ struct omap_vout_device *vout;
+ enum v4l2_buf_type type;
+ struct videobuf_queue vbq;
+ int io_allowed;
+
+};
+
+/*
+ * Return true if rotation is 90 or 270
+ */
+static inline int is_rotation_90_or_270(const struct omap_vout_device *vout)
+{
+ return (vout->rotation == dss_rotation_90_degree ||
+ vout->rotation == dss_rotation_270_degree);
+}
+
+/*
+ * Return true if rotation is enabled
+ */
+static inline int is_rotation_enabled(const struct omap_vout_device *vout)
+{
+ return vout->rotation || vout->mirror;
+}
+
+/*
+ * Reverse the rotation degree if mirroring is enabled
+ */
+static inline int calc_rotation(const struct omap_vout_device *vout)
+{
+ if (!vout->mirror)
+ return vout->rotation;
+
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ return dss_rotation_270_degree;
+ case dss_rotation_270_degree:
+ return dss_rotation_90_degree;
+ case dss_rotation_180_degree:
+ return dss_rotation_0_degree;
+ default:
+ return dss_rotation_180_degree;
+ }
+}
+
+void omap_vout_free_buffers(struct omap_vout_device *vout);
+#endif /* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
new file mode 100644
index 00000000000..80b0d88f125
--- /dev/null
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -0,0 +1,357 @@
+/*
+ * omap_voutlib.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Based on the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <linux/dma-mapping.h>
+
+#include <video/omapdss.h>
+
+#include "omap_voutlib.h"
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video library");
+MODULE_LICENSE("GPL");
+
+/* Return the default overlay cropping rectangle in crop given the image
+ * size in pix and the video display size in fbuf. The default
+ * cropping rectangle is the largest rectangle no larger than the capture size
+ * that will fit on the display. The default cropping rectangle is centered in
+ * the image. All dimensions and offsets are rounded down to even numbers.
+ */
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop)
+{
+ crop->width = (pix->width < fbuf->fmt.width) ?
+ pix->width : fbuf->fmt.width;
+ crop->height = (pix->height < fbuf->fmt.height) ?
+ pix->height : fbuf->fmt.height;
+ crop->width &= ~1;
+ crop->height &= ~1;
+ crop->left = ((pix->width - crop->width) >> 1) & ~1;
+ crop->top = ((pix->height - crop->height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_default_crop);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The adjusted window parameters are
+ * returned in new_win.
+ * Returns zero if successful, or -EINVAL if the requested window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ struct v4l2_rect try_win;
+
+ /* make a working copy of the new_win rectangle */
+ try_win = new_win->w;
+
+ /* adjust the preview window so it fits on the display by clipping any
+ * offscreen areas
+ */
+ if (try_win.left < 0) {
+ try_win.width += try_win.left;
+ try_win.left = 0;
+ }
+ if (try_win.top < 0) {
+ try_win.height += try_win.top;
+ try_win.top = 0;
+ }
+ try_win.width = (try_win.width < fbuf->fmt.width) ?
+ try_win.width : fbuf->fmt.width;
+ try_win.height = (try_win.height < fbuf->fmt.height) ?
+ try_win.height : fbuf->fmt.height;
+ if (try_win.left + try_win.width > fbuf->fmt.width)
+ try_win.width = fbuf->fmt.width - try_win.left;
+ if (try_win.top + try_win.height > fbuf->fmt.height)
+ try_win.height = fbuf->fmt.height - try_win.top;
+ try_win.width &= ~1;
+ try_win.height &= ~1;
+
+ if (try_win.width <= 0 || try_win.height <= 0)
+ return -EINVAL;
+
+ /* We now have a valid preview window, so go with it */
+ new_win->w = try_win;
+ new_win->field = V4L2_FIELD_ANY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_try_window);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The image cropping window in crop
+ * will also be adjusted if necessary. Preference is given to keeping the
+ * the window as close to the requested configuration as possible. If
+ * successful, new_win, vout->win, and crop are updated.
+ * Returns zero if successful, or -EINVAL if the requested preview window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ int err;
+
+ err = omap_vout_try_window(fbuf, new_win);
+ if (err)
+ return err;
+
+ /* update our preview window */
+ win->w = new_win->w;
+ win->field = new_win->field;
+ win->chromakey = new_win->chromakey;
+
+ /* Adjust the cropping window to allow for resizing limitation */
+ if (omap_vout_dss_omap24xx()) {
+ /* For 24xx limit is 8x to 1/2x scaling. */
+ if ((crop->height/win->w.height) >= 2)
+ crop->height = win->w.height * 2;
+
+ if ((crop->width/win->w.width) >= 2)
+ crop->width = win->w.width * 2;
+
+ if (crop->width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is 768
+ * pixels wide. If the cropped image is wider than
+ * 768 pixels then it cannot be vertically resized.
+ */
+ if (crop->height != win->w.height)
+ crop->width = 768;
+ }
+ } else if (omap_vout_dss_omap34xx()) {
+ /* For 34xx limit is 8x to 1/4x scaling. */
+ if ((crop->height/win->w.height) >= 4)
+ crop->height = win->w.height * 4;
+
+ if ((crop->width/win->w.width) >= 4)
+ crop->width = win->w.width * 4;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_window);
+
+/* Given a new cropping rectangle in new_crop, adjust the cropping rectangle to
+ * the nearest supported configuration. The image render window in win will
+ * also be adjusted if necessary. The preview window is adjusted such that the
+ * horizontal and vertical rescaling ratios stay constant. If the render
+ * window would fall outside the display boundaries, the cropping rectangle
+ * will also be adjusted to maintain the rescaling ratios. If successful, crop
+ * and win are updated.
+ * Returns zero if successful, or -EINVAL if the requested cropping rectangle is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf, const struct v4l2_rect *new_crop)
+{
+ struct v4l2_rect try_crop;
+ unsigned long vresize, hresize;
+
+ /* make a working copy of the new_crop rectangle */
+ try_crop = *new_crop;
+
+ /* adjust the cropping rectangle so it fits in the image */
+ if (try_crop.left < 0) {
+ try_crop.width += try_crop.left;
+ try_crop.left = 0;
+ }
+ if (try_crop.top < 0) {
+ try_crop.height += try_crop.top;
+ try_crop.top = 0;
+ }
+ try_crop.width = (try_crop.width < pix->width) ?
+ try_crop.width : pix->width;
+ try_crop.height = (try_crop.height < pix->height) ?
+ try_crop.height : pix->height;
+ if (try_crop.left + try_crop.width > pix->width)
+ try_crop.width = pix->width - try_crop.left;
+ if (try_crop.top + try_crop.height > pix->height)
+ try_crop.height = pix->height - try_crop.top;
+
+ try_crop.width &= ~1;
+ try_crop.height &= ~1;
+
+ if (try_crop.width <= 0 || try_crop.height <= 0)
+ return -EINVAL;
+
+ if (omap_vout_dss_omap24xx()) {
+ if (try_crop.height != win->w.height) {
+ /* If we're resizing vertically, we can't support a
+ * crop width wider than 768 pixels.
+ */
+ if (try_crop.width > 768)
+ try_crop.width = 768;
+ }
+ }
+ /* vertical resizing */
+ vresize = (1024 * try_crop.height) / win->w.height;
+ if (omap_vout_dss_omap24xx() && (vresize > 2048))
+ vresize = 2048;
+ else if (omap_vout_dss_omap34xx() && (vresize > 4096))
+ vresize = 4096;
+
+ win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
+ if (win->w.height == 0)
+ win->w.height = 2;
+ if (win->w.height + win->w.top > fbuf->fmt.height) {
+ /* We made the preview window extend below the bottom of the
+ * display, so clip it to the display boundary and resize the
+ * cropping height to maintain the vertical resizing ratio.
+ */
+ win->w.height = (fbuf->fmt.height - win->w.top) & ~1;
+ if (try_crop.height == 0)
+ try_crop.height = 2;
+ }
+ /* horizontal resizing */
+ hresize = (1024 * try_crop.width) / win->w.width;
+ if (omap_vout_dss_omap24xx() && (hresize > 2048))
+ hresize = 2048;
+ else if (omap_vout_dss_omap34xx() && (hresize > 4096))
+ hresize = 4096;
+
+ win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
+ if (win->w.width == 0)
+ win->w.width = 2;
+ if (win->w.width + win->w.left > fbuf->fmt.width) {
+ /* We made the preview window extend past the right side of the
+ * display, so clip it to the display boundary and resize the
+ * cropping width to maintain the horizontal resizing ratio.
+ */
+ win->w.width = (fbuf->fmt.width - win->w.left) & ~1;
+ if (try_crop.width == 0)
+ try_crop.width = 2;
+ }
+ if (omap_vout_dss_omap24xx()) {
+ if ((try_crop.height/win->w.height) >= 2)
+ try_crop.height = win->w.height * 2;
+
+ if ((try_crop.width/win->w.width) >= 2)
+ try_crop.width = win->w.width * 2;
+
+ if (try_crop.width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is
+ * 768 pixels wide. If the cropped image is wider
+ * than 768 pixels then it cannot be vertically resized.
+ */
+ if (try_crop.height != win->w.height)
+ try_crop.width = 768;
+ }
+ } else if (omap_vout_dss_omap34xx()) {
+ if ((try_crop.height/win->w.height) >= 4)
+ try_crop.height = win->w.height * 4;
+
+ if ((try_crop.width/win->w.width) >= 4)
+ try_crop.width = win->w.width * 4;
+ }
+ /* update our cropping rectangle and we're done */
+ *crop = try_crop;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_crop);
+
+/* Given a new format in pix and fbuf, crop and win
+ * structures are initialized to default values. crop
+ * is initialized to the largest window size that will fit on the display. The
+ * crop window is centered in the image. win is initialized to
+ * the same size as crop and is centered on the display.
+ * All sizes and offsets are constrained to be even numbers.
+ */
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win)
+{
+ /* crop defines the preview source window in the image capture
+ * buffer
+ */
+ omap_vout_default_crop(pix, fbuf, crop);
+
+ /* win defines the preview target window on the display */
+ win->w.width = crop->width;
+ win->w.height = crop->height;
+ win->w.left = ((fbuf->fmt.width - win->w.width) >> 1) & ~1;
+ win->w.top = ((fbuf->fmt.height - win->w.height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_format);
+
+/*
+ * Allocate buffers
+ */
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
+{
+ u32 order, size;
+ unsigned long virt_addr, addr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+ virt_addr = __get_free_pages(GFP_KERNEL, order);
+ addr = virt_addr;
+
+ if (virt_addr) {
+ while (size > 0) {
+ SetPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+ *phys_addr = (u32) virt_to_phys((void *) virt_addr);
+ return virt_addr;
+}
+
+/*
+ * Free buffers
+ */
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
+{
+ u32 order, size;
+ unsigned long addr = virtaddr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages((unsigned long) virtaddr, order);
+}
+
+bool omap_vout_dss_omap24xx(void)
+{
+ return omapdss_get_version() == OMAPDSS_VER_OMAP24xx;
+}
+
+bool omap_vout_dss_omap34xx(void)
+{
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/media/platform/omap/omap_voutlib.h b/drivers/media/platform/omap/omap_voutlib.h
new file mode 100644
index 00000000000..f9d1c0779f3
--- /dev/null
+++ b/drivers/media/platform/omap/omap_voutlib.h
@@ -0,0 +1,39 @@
+/*
+ * omap_voutlib.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUTLIB_H
+#define OMAP_VOUTLIB_H
+
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
+
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf,
+ const struct v4l2_rect *new_crop);
+
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win);
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr);
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size);
+
+bool omap_vout_dss_omap24xx(void);
+bool omap_vout_dss_omap34xx(void);
+#endif /* #ifndef OMAP_VOUTLIB_H */
+
diff --git a/drivers/media/platform/omap3isp/Makefile b/drivers/media/platform/omap3isp/Makefile
new file mode 100644
index 00000000000..254975a9174
--- /dev/null
+++ b/drivers/media/platform/omap3isp/Makefile
@@ -0,0 +1,11 @@
+# Makefile for OMAP3 ISP driver
+
+ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG
+
+omap3-isp-objs += \
+ isp.o ispvideo.o \
+ ispcsiphy.o ispccp2.o ispcsi2.o \
+ ispccdc.o isppreview.o ispresizer.o \
+ ispstat.o isph3a_aewb.o isph3a_af.o isphist.o
+
+obj-$(CONFIG_VIDEO_OMAP3) += omap3-isp.o
diff --git a/drivers/media/platform/omap3isp/cfa_coef_table.h b/drivers/media/platform/omap3isp/cfa_coef_table.h
new file mode 100644
index 00000000000..c84df0706f3
--- /dev/null
+++ b/drivers/media/platform/omap3isp/cfa_coef_table.h
@@ -0,0 +1,61 @@
+/*
+ * cfa_coef_table.h
+ *
+ * TI OMAP3 ISP - CFA coefficients table
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+{ 244, 0, 247, 0, 12, 27, 36, 247, 250, 0, 27, 0, 4, 250, 12, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 12, 250, 4, 0, 27, 0, 250,
+247, 36, 27, 12, 0, 247, 0, 244, 0, 0, 40, 0, 0, 0, 0, 248,
+244, 0, 247, 0, 12, 27, 36, 247, 250, 0, 27, 0, 4, 250, 12, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 12, 250, 4, 0, 27, 0, 250,
+247, 36, 27, 12, 0, 247, 0, 244, 0, 0, 40, 0, 0, 0, 0, 248,
+244, 0, 247, 0, 12, 27, 36, 247, 250, 0, 27, 0, 4, 250, 12, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 12, 250, 4, 0, 27, 0, 250,
+247, 36, 27, 12, 0, 247, 0, 244, 0, 0, 40, 0, 0, 0, 0, 248 },
+{ 0, 247, 0, 244, 247, 36, 27, 12, 0, 27, 0, 250, 244, 12, 250, 4,
+ 0, 0, 0, 248, 0, 0, 40, 0, 4, 250, 12, 244, 250, 0, 27, 0,
+ 12, 27, 36, 247, 244, 0, 247, 0, 0, 40, 0, 0, 248, 0, 0, 0,
+ 0, 247, 0, 244, 247, 36, 27, 12, 0, 27, 0, 250, 244, 12, 250, 4,
+ 0, 0, 0, 248, 0, 0, 40, 0, 4, 250, 12, 244, 250, 0, 27, 0,
+ 12, 27, 36, 247, 244, 0, 247, 0, 0, 40, 0, 0, 248, 0, 0, 0,
+ 0, 247, 0, 244, 247, 36, 27, 12, 0, 27, 0, 250, 244, 12, 250, 4,
+ 0, 0, 0, 248, 0, 0, 40, 0, 4, 250, 12, 244, 250, 0, 27, 0,
+ 12, 27, 36, 247, 244, 0, 247, 0, 0, 40, 0, 0, 248, 0, 0, 0 },
+{ 4, 250, 12, 244, 250, 0, 27, 0, 12, 27, 36, 247, 244, 0, 247, 0,
+ 0, 0, 0, 248, 0, 0, 40, 0, 0, 247, 0, 244, 247, 36, 27, 12,
+ 0, 27, 0, 250, 244, 12, 250, 4, 0, 40, 0, 0, 248, 0, 0, 0,
+ 4, 250, 12, 244, 250, 0, 27, 0, 12, 27, 36, 247, 244, 0, 247, 0,
+ 0, 0, 0, 248, 0, 0, 40, 0, 0, 247, 0, 244, 247, 36, 27, 12,
+ 0, 27, 0, 250, 244, 12, 250, 4, 0, 40, 0, 0, 248, 0, 0, 0,
+ 4, 250, 12, 244, 250, 0, 27, 0, 12, 27, 36, 247, 244, 0, 247, 0,
+ 0, 0, 0, 248, 0, 0, 40, 0, 0, 247, 0, 244, 247, 36, 27, 12,
+ 0, 27, 0, 250, 244, 12, 250, 4, 0, 40, 0, 0, 248, 0, 0, 0 },
+{ 244,12, 250, 4, 0, 27, 0, 250, 247, 36, 27, 12, 0, 247, 0, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 0, 247, 0, 12, 27, 36, 247,
+250, 0, 27, 0, 4, 250, 12, 244, 0, 0, 40, 0, 0, 0, 0, 248,
+244, 12, 250, 4, 0, 27, 0, 250, 247, 36, 27, 12, 0, 247, 0, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 0, 247, 0, 12, 27, 36, 247,
+250, 0, 27, 0, 4, 250, 12, 244, 0, 0, 40, 0, 0, 0, 0, 248,
+244, 12, 250, 4, 0, 27, 0, 250, 247, 36, 27, 12, 0, 247, 0, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 0, 247, 0, 12, 27, 36, 247,
+250, 0, 27, 0, 4, 250, 12, 244, 0, 0, 40, 0, 0, 0, 0, 248 },
diff --git a/drivers/media/platform/omap3isp/gamma_table.h b/drivers/media/platform/omap3isp/gamma_table.h
new file mode 100644
index 00000000000..78deebf7d96
--- /dev/null
+++ b/drivers/media/platform/omap3isp/gamma_table.h
@@ -0,0 +1,90 @@
+/*
+ * gamma_table.h
+ *
+ * TI OMAP3 ISP - Default gamma table for all components
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+ 0, 0, 1, 2, 3, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20,
+ 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 36, 37, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 63, 64, 65, 66, 66, 67, 68, 69, 69, 70,
+ 71, 72, 72, 73, 74, 75, 75, 76, 77, 78, 78, 79, 80, 81, 81, 82,
+ 83, 84, 84, 85, 86, 87, 88, 88, 89, 90, 91, 91, 92, 93, 94, 94,
+ 95, 96, 97, 97, 98, 98, 99, 99, 100, 100, 101, 101, 102, 103, 104, 104,
+105, 106, 107, 108, 108, 109, 110, 111, 111, 112, 113, 114, 114, 115, 116, 117,
+117, 118, 119, 119, 120, 120, 121, 121, 122, 122, 123, 123, 124, 124, 125, 125,
+126, 126, 127, 127, 128, 128, 129, 129, 130, 130, 131, 131, 132, 132, 133, 133,
+134, 134, 135, 135, 136, 136, 137, 137, 138, 138, 139, 139, 140, 140, 141, 141,
+142, 142, 143, 143, 144, 144, 145, 145, 146, 146, 147, 147, 148, 148, 149, 149,
+150, 150, 151, 151, 152, 152, 153, 153, 153, 153, 154, 154, 154, 154, 155, 155,
+156, 156, 157, 157, 158, 158, 158, 159, 159, 159, 160, 160, 160, 161, 161, 162,
+162, 163, 163, 164, 164, 164, 164, 165, 165, 165, 165, 166, 166, 167, 167, 168,
+168, 169, 169, 170, 170, 170, 170, 171, 171, 171, 171, 172, 172, 173, 173, 174,
+174, 175, 175, 176, 176, 176, 176, 177, 177, 177, 177, 178, 178, 178, 178, 179,
+179, 179, 179, 180, 180, 180, 180, 181, 181, 181, 181, 182, 182, 182, 182, 183,
+183, 183, 183, 184, 184, 184, 184, 185, 185, 185, 185, 186, 186, 186, 186, 187,
+187, 187, 187, 188, 188, 188, 188, 189, 189, 189, 189, 190, 190, 190, 190, 191,
+191, 191, 191, 192, 192, 192, 192, 193, 193, 193, 193, 194, 194, 194, 194, 195,
+195, 195, 195, 196, 196, 196, 196, 197, 197, 197, 197, 198, 198, 198, 198, 199,
+199, 199, 199, 200, 200, 200, 200, 201, 201, 201, 201, 202, 202, 202, 203, 203,
+203, 203, 204, 204, 204, 204, 205, 205, 205, 205, 206, 206, 206, 206, 207, 207,
+207, 207, 208, 208, 208, 208, 209, 209, 209, 209, 210, 210, 210, 210, 210, 210,
+210, 210, 210, 210, 210, 210, 211, 211, 211, 211, 211, 211, 211, 211, 211, 211,
+211, 212, 212, 212, 212, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
+213, 214, 214, 214, 214, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215,
+216, 216, 216, 216, 217, 217, 217, 217, 218, 218, 218, 218, 219, 219, 219, 219,
+219, 219, 219, 219, 219, 219, 219, 219, 220, 220, 220, 220, 221, 221, 221, 221,
+221, 221, 221, 221, 221, 221, 221, 222, 222, 222, 222, 223, 223, 223, 223, 223,
+223, 223, 223, 223, 223, 223, 223, 224, 224, 224, 224, 225, 225, 225, 225, 225,
+225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 226, 226,
+226, 226, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 228, 228,
+228, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 230, 230, 230,
+230, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 232, 232, 232,
+232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232,
+233, 233, 233, 233, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 235,
+235, 235, 235, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236,
+236, 236, 236, 236, 236, 236, 237, 237, 237, 237, 238, 238, 238, 238, 238, 238,
+238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238,
+238, 238, 238, 238, 238, 239, 239, 239, 239, 240, 240, 240, 240, 240, 240, 240,
+240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240,
+240, 240, 240, 240, 241, 241, 241, 241, 242, 242, 242, 242, 242, 242, 242, 242,
+242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242,
+242, 242, 243, 243, 243, 243, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244,
+244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244,
+244, 245, 245, 245, 245, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246,
+246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246,
+246, 246, 246, 246, 246, 246, 246, 247, 247, 247, 247, 248, 248, 248, 248, 248,
+248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248,
+248, 248, 248, 248, 248, 248, 249, 249, 249, 249, 250, 250, 250, 250, 250, 250,
+250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250,
+250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250,
+250, 250, 250, 250, 251, 251, 251, 251, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 254, 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
new file mode 100644
index 00000000000..2c7aa672056
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -0,0 +1,2406 @@
+/*
+ * isp.c
+ *
+ * TI OMAP3 ISP - Core
+ *
+ * Copyright (C) 2006-2010 Nokia Corporation
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * Contributors:
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ * David Cohen <dacohen@gmail.com>
+ * Stanimir Varbanov <svarbanov@mm-sol.com>
+ * Vimarsh Zutshi <vimarsh.zutshi@gmail.com>
+ * Tuukka Toivonen <tuukkat76@gmail.com>
+ * Sergio Aguirre <saaguirre@ti.com>
+ * Antti Koskipaa <akoskipa@gmail.com>
+ * Ivan T. Ivanov <iivanov@mm-sol.com>
+ * RaniSuneela <r-m@ti.com>
+ * Atanas Filipov <afilipov@mm-sol.com>
+ * Gjorgji Rosikopulos <grosikopulos@mm-sol.com>
+ * Hiroshi DOYU <hiroshi.doyu@nokia.com>
+ * Nayden Kanchev <nkanchev@mm-sol.com>
+ * Phil Carmody <ext-phil.2.carmody@nokia.com>
+ * Artem Bityutskiy <artem.bityutskiy@nokia.com>
+ * Dominic Curran <dcurran@ti.com>
+ * Ilkka Myllyperkio <ilkka.myllyperkio@sofica.fi>
+ * Pallavi Kulkarni <p-kulkarni@ti.com>
+ * Vaibhav Hiremath <hvaibhav@ti.com>
+ * Mohit Jalori <mjalori@ti.com>
+ * Sameer Venkatraman <sameerv@ti.com>
+ * Senthilvadivu Guruswamy <svadivu@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ * Toni Leinonen <toni.leinonen@nokia.com>
+ * Troy Laramy <t-laramy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <asm/cacheflush.h>
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/omap-iommu.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include <asm/dma-iommu.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispccdc.h"
+#include "isppreview.h"
+#include "ispresizer.h"
+#include "ispcsi2.h"
+#include "ispccp2.h"
+#include "isph3a.h"
+#include "isphist.h"
+
+static unsigned int autoidle;
+module_param(autoidle, int, 0444);
+MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
+
+static void isp_save_ctx(struct isp_device *isp);
+
+static void isp_restore_ctx(struct isp_device *isp);
+
+static const struct isp_res_mapping isp_res_maps[] = {
+ {
+ .isp_rev = ISP_REVISION_2_0,
+ .map = 1 << OMAP3_ISP_IOMEM_MAIN |
+ 1 << OMAP3_ISP_IOMEM_CCP2 |
+ 1 << OMAP3_ISP_IOMEM_CCDC |
+ 1 << OMAP3_ISP_IOMEM_HIST |
+ 1 << OMAP3_ISP_IOMEM_H3A |
+ 1 << OMAP3_ISP_IOMEM_PREV |
+ 1 << OMAP3_ISP_IOMEM_RESZ |
+ 1 << OMAP3_ISP_IOMEM_SBL |
+ 1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
+ 1 << OMAP3_ISP_IOMEM_CSIPHY2 |
+ 1 << OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
+ },
+ {
+ .isp_rev = ISP_REVISION_15_0,
+ .map = 1 << OMAP3_ISP_IOMEM_MAIN |
+ 1 << OMAP3_ISP_IOMEM_CCP2 |
+ 1 << OMAP3_ISP_IOMEM_CCDC |
+ 1 << OMAP3_ISP_IOMEM_HIST |
+ 1 << OMAP3_ISP_IOMEM_H3A |
+ 1 << OMAP3_ISP_IOMEM_PREV |
+ 1 << OMAP3_ISP_IOMEM_RESZ |
+ 1 << OMAP3_ISP_IOMEM_SBL |
+ 1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
+ 1 << OMAP3_ISP_IOMEM_CSIPHY2 |
+ 1 << OMAP3_ISP_IOMEM_CSI2A_REGS2 |
+ 1 << OMAP3_ISP_IOMEM_CSI2C_REGS1 |
+ 1 << OMAP3_ISP_IOMEM_CSIPHY1 |
+ 1 << OMAP3_ISP_IOMEM_CSI2C_REGS2 |
+ 1 << OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
+ },
+};
+
+/* Structure for saving/restoring ISP module registers */
+static struct isp_reg isp_reg_list[] = {
+ {OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG, 0},
+ {OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 0},
+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 0},
+ {0, ISP_TOK_TERM, 0}
+};
+
+/*
+ * omap3isp_flush - Post pending L3 bus writes by doing a register readback
+ * @isp: OMAP3 ISP device
+ *
+ * In order to force posting of pending writes, we need to write and
+ * readback the same register, in this case the revision register.
+ *
+ * See this link for reference:
+ * http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
+ */
+void omap3isp_flush(struct isp_device *isp)
+{
+ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+}
+
+/* -----------------------------------------------------------------------------
+ * XCLK
+ */
+
+#define to_isp_xclk(_hw) container_of(_hw, struct isp_xclk, hw)
+
+static void isp_xclk_update(struct isp_xclk *xclk, u32 divider)
+{
+ switch (xclk->id) {
+ case ISP_XCLK_A:
+ isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
+ ISPTCTRL_CTRL_DIVA_MASK,
+ divider << ISPTCTRL_CTRL_DIVA_SHIFT);
+ break;
+ case ISP_XCLK_B:
+ isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
+ ISPTCTRL_CTRL_DIVB_MASK,
+ divider << ISPTCTRL_CTRL_DIVB_SHIFT);
+ break;
+ }
+}
+
+static int isp_xclk_prepare(struct clk_hw *hw)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+
+ omap3isp_get(xclk->isp);
+
+ return 0;
+}
+
+static void isp_xclk_unprepare(struct clk_hw *hw)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+
+ omap3isp_put(xclk->isp);
+}
+
+static int isp_xclk_enable(struct clk_hw *hw)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xclk->lock, flags);
+ isp_xclk_update(xclk, xclk->divider);
+ xclk->enabled = true;
+ spin_unlock_irqrestore(&xclk->lock, flags);
+
+ return 0;
+}
+
+static void isp_xclk_disable(struct clk_hw *hw)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xclk->lock, flags);
+ isp_xclk_update(xclk, 0);
+ xclk->enabled = false;
+ spin_unlock_irqrestore(&xclk->lock, flags);
+}
+
+static unsigned long isp_xclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+
+ return parent_rate / xclk->divider;
+}
+
+static u32 isp_xclk_calc_divider(unsigned long *rate, unsigned long parent_rate)
+{
+ u32 divider;
+
+ if (*rate >= parent_rate) {
+ *rate = parent_rate;
+ return ISPTCTRL_CTRL_DIV_BYPASS;
+ }
+
+ divider = DIV_ROUND_CLOSEST(parent_rate, *rate);
+ if (divider >= ISPTCTRL_CTRL_DIV_BYPASS)
+ divider = ISPTCTRL_CTRL_DIV_BYPASS - 1;
+
+ *rate = parent_rate / divider;
+ return divider;
+}
+
+static long isp_xclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ isp_xclk_calc_divider(&rate, *parent_rate);
+ return rate;
+}
+
+static int isp_xclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+ unsigned long flags;
+ u32 divider;
+
+ divider = isp_xclk_calc_divider(&rate, parent_rate);
+
+ spin_lock_irqsave(&xclk->lock, flags);
+
+ xclk->divider = divider;
+ if (xclk->enabled)
+ isp_xclk_update(xclk, divider);
+
+ spin_unlock_irqrestore(&xclk->lock, flags);
+
+ dev_dbg(xclk->isp->dev, "%s: cam_xclk%c set to %lu Hz (div %u)\n",
+ __func__, xclk->id == ISP_XCLK_A ? 'a' : 'b', rate, divider);
+ return 0;
+}
+
+static const struct clk_ops isp_xclk_ops = {
+ .prepare = isp_xclk_prepare,
+ .unprepare = isp_xclk_unprepare,
+ .enable = isp_xclk_enable,
+ .disable = isp_xclk_disable,
+ .recalc_rate = isp_xclk_recalc_rate,
+ .round_rate = isp_xclk_round_rate,
+ .set_rate = isp_xclk_set_rate,
+};
+
+static const char *isp_xclk_parent_name = "cam_mclk";
+
+static const struct clk_init_data isp_xclk_init_data = {
+ .name = "cam_xclk",
+ .ops = &isp_xclk_ops,
+ .parent_names = &isp_xclk_parent_name,
+ .num_parents = 1,
+};
+
+static int isp_xclk_init(struct isp_device *isp)
+{
+ struct isp_platform_data *pdata = isp->pdata;
+ struct clk_init_data init;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
+ isp->xclks[i].clk = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
+ struct isp_xclk *xclk = &isp->xclks[i];
+
+ xclk->isp = isp;
+ xclk->id = i == 0 ? ISP_XCLK_A : ISP_XCLK_B;
+ xclk->divider = 1;
+ spin_lock_init(&xclk->lock);
+
+ init.name = i == 0 ? "cam_xclka" : "cam_xclkb";
+ init.ops = &isp_xclk_ops;
+ init.parent_names = &isp_xclk_parent_name;
+ init.num_parents = 1;
+
+ xclk->hw.init = &init;
+ /*
+ * The first argument is NULL in order to avoid circular
+ * reference, as this driver takes reference on the
+ * sensor subdevice modules and the sensors would take
+ * reference on this module through clk_get().
+ */
+ xclk->clk = clk_register(NULL, &xclk->hw);
+ if (IS_ERR(xclk->clk))
+ return PTR_ERR(xclk->clk);
+
+ if (pdata->xclks[i].con_id == NULL &&
+ pdata->xclks[i].dev_id == NULL)
+ continue;
+
+ xclk->lookup = kzalloc(sizeof(*xclk->lookup), GFP_KERNEL);
+ if (xclk->lookup == NULL)
+ return -ENOMEM;
+
+ xclk->lookup->con_id = pdata->xclks[i].con_id;
+ xclk->lookup->dev_id = pdata->xclks[i].dev_id;
+ xclk->lookup->clk = xclk->clk;
+
+ clkdev_add(xclk->lookup);
+ }
+
+ return 0;
+}
+
+static void isp_xclk_cleanup(struct isp_device *isp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
+ struct isp_xclk *xclk = &isp->xclks[i];
+
+ if (!IS_ERR(xclk->clk))
+ clk_unregister(xclk->clk);
+
+ if (xclk->lookup)
+ clkdev_drop(xclk->lookup);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupts
+ */
+
+/*
+ * isp_enable_interrupts - Enable ISP interrupts.
+ * @isp: OMAP3 ISP device
+ */
+static void isp_enable_interrupts(struct isp_device *isp)
+{
+ static const u32 irq = IRQ0ENABLE_CSIA_IRQ
+ | IRQ0ENABLE_CSIB_IRQ
+ | IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ
+ | IRQ0ENABLE_CCDC_LSC_DONE_IRQ
+ | IRQ0ENABLE_CCDC_VD0_IRQ
+ | IRQ0ENABLE_CCDC_VD1_IRQ
+ | IRQ0ENABLE_HS_VS_IRQ
+ | IRQ0ENABLE_HIST_DONE_IRQ
+ | IRQ0ENABLE_H3A_AWB_DONE_IRQ
+ | IRQ0ENABLE_H3A_AF_DONE_IRQ
+ | IRQ0ENABLE_PRV_DONE_IRQ
+ | IRQ0ENABLE_RSZ_DONE_IRQ;
+
+ isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+ isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
+}
+
+/*
+ * isp_disable_interrupts - Disable ISP interrupts.
+ * @isp: OMAP3 ISP device
+ */
+static void isp_disable_interrupts(struct isp_device *isp)
+{
+ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
+}
+
+/*
+ * isp_core_init - ISP core settings
+ * @isp: OMAP3 ISP device
+ * @idle: Consider idle state.
+ *
+ * Set the power settings for the ISP and SBL bus and configure the HS/VS
+ * interrupt source.
+ *
+ * We need to configure the HS/VS interrupt source before interrupts get
+ * enabled, as the sensor might be free-running and the ISP default setting
+ * (HS edge) would put an unnecessary burden on the CPU.
+ */
+static void isp_core_init(struct isp_device *isp, int idle)
+{
+ isp_reg_writel(isp,
+ ((idle ? ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY :
+ ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY) <<
+ ISP_SYSCONFIG_MIDLEMODE_SHIFT) |
+ ((isp->revision == ISP_REVISION_15_0) ?
+ ISP_SYSCONFIG_AUTOIDLE : 0),
+ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
+
+ isp_reg_writel(isp,
+ (isp->autoidle ? ISPCTRL_SBL_AUTOIDLE : 0) |
+ ISPCTRL_SYNC_DETECT_VSRISE,
+ OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
+}
+
+/*
+ * Configure the bridge and lane shifter. Valid inputs are
+ *
+ * CCDC_INPUT_PARALLEL: Parallel interface
+ * CCDC_INPUT_CSI2A: CSI2a receiver
+ * CCDC_INPUT_CCP2B: CCP2b receiver
+ * CCDC_INPUT_CSI2C: CSI2c receiver
+ *
+ * The bridge and lane shifter are configured according to the selected input
+ * and the ISP platform data.
+ */
+void omap3isp_configure_bridge(struct isp_device *isp,
+ enum ccdc_input_entity input,
+ const struct isp_parallel_platform_data *pdata,
+ unsigned int shift, unsigned int bridge)
+{
+ u32 ispctrl_val;
+
+ ispctrl_val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
+ ispctrl_val &= ~ISPCTRL_SHIFT_MASK;
+ ispctrl_val &= ~ISPCTRL_PAR_CLK_POL_INV;
+ ispctrl_val &= ~ISPCTRL_PAR_SER_CLK_SEL_MASK;
+ ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_MASK;
+ ispctrl_val |= bridge;
+
+ switch (input) {
+ case CCDC_INPUT_PARALLEL:
+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
+ ispctrl_val |= pdata->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
+ shift += pdata->data_lane_shift * 2;
+ break;
+
+ case CCDC_INPUT_CSI2A:
+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIA;
+ break;
+
+ case CCDC_INPUT_CCP2B:
+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIB;
+ break;
+
+ case CCDC_INPUT_CSI2C:
+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIC;
+ break;
+
+ default:
+ return;
+ }
+
+ ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK;
+
+ isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
+}
+
+void omap3isp_hist_dma_done(struct isp_device *isp)
+{
+ if (omap3isp_ccdc_busy(&isp->isp_ccdc) ||
+ omap3isp_stat_pcr_busy(&isp->isp_hist)) {
+ /* Histogram cannot be enabled in this frame anymore */
+ atomic_set(&isp->isp_hist.buf_err, 1);
+ dev_dbg(isp->dev, "hist: Out of synchronization with "
+ "CCDC. Ignoring next buffer.\n");
+ }
+}
+
+static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
+{
+ static const char *name[] = {
+ "CSIA_IRQ",
+ "res1",
+ "res2",
+ "CSIB_LCM_IRQ",
+ "CSIB_IRQ",
+ "res5",
+ "res6",
+ "res7",
+ "CCDC_VD0_IRQ",
+ "CCDC_VD1_IRQ",
+ "CCDC_VD2_IRQ",
+ "CCDC_ERR_IRQ",
+ "H3A_AF_DONE_IRQ",
+ "H3A_AWB_DONE_IRQ",
+ "res14",
+ "res15",
+ "HIST_DONE_IRQ",
+ "CCDC_LSC_DONE",
+ "CCDC_LSC_PREFETCH_COMPLETED",
+ "CCDC_LSC_PREFETCH_ERROR",
+ "PRV_DONE_IRQ",
+ "CBUFF_IRQ",
+ "res22",
+ "res23",
+ "RSZ_DONE_IRQ",
+ "OVF_IRQ",
+ "res26",
+ "res27",
+ "MMU_ERR_IRQ",
+ "OCP_ERR_IRQ",
+ "SEC_ERR_IRQ",
+ "HS_VS_IRQ",
+ };
+ int i;
+
+ dev_dbg(isp->dev, "ISP IRQ: ");
+
+ for (i = 0; i < ARRAY_SIZE(name); i++) {
+ if ((1 << i) & irqstatus)
+ printk(KERN_CONT "%s ", name[i]);
+ }
+ printk(KERN_CONT "\n");
+}
+
+static void isp_isr_sbl(struct isp_device *isp)
+{
+ struct device *dev = isp->dev;
+ struct isp_pipeline *pipe;
+ u32 sbl_pcr;
+
+ /*
+ * Handle shared buffer logic overflows for video buffers.
+ * ISPSBL_PCR_CCDCPRV_2_RSZ_OVF can be safely ignored.
+ */
+ sbl_pcr = isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
+ isp_reg_writel(isp, sbl_pcr, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
+ sbl_pcr &= ~ISPSBL_PCR_CCDCPRV_2_RSZ_OVF;
+
+ if (sbl_pcr)
+ dev_dbg(dev, "SBL overflow (PCR = 0x%08x)\n", sbl_pcr);
+
+ if (sbl_pcr & ISPSBL_PCR_CSIB_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_ccp2.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_CSIA_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_csi2a.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_CCDC_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_ccdc.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_PRV_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_prev.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF
+ | ISPSBL_PCR_RSZ2_WBL_OVF
+ | ISPSBL_PCR_RSZ3_WBL_OVF
+ | ISPSBL_PCR_RSZ4_WBL_OVF)) {
+ pipe = to_isp_pipeline(&isp->isp_res.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF)
+ omap3isp_stat_sbl_overflow(&isp->isp_af);
+
+ if (sbl_pcr & ISPSBL_PCR_H3A_AEAWB_WBL_OVF)
+ omap3isp_stat_sbl_overflow(&isp->isp_aewb);
+}
+
+/*
+ * isp_isr - Interrupt Service Routine for Camera ISP module.
+ * @irq: Not used currently.
+ * @_isp: Pointer to the OMAP3 ISP device
+ *
+ * Handles the corresponding callback if plugged in.
+ */
+static irqreturn_t isp_isr(int irq, void *_isp)
+{
+ static const u32 ccdc_events = IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ |
+ IRQ0STATUS_CCDC_LSC_DONE_IRQ |
+ IRQ0STATUS_CCDC_VD0_IRQ |
+ IRQ0STATUS_CCDC_VD1_IRQ |
+ IRQ0STATUS_HS_VS_IRQ;
+ struct isp_device *isp = _isp;
+ u32 irqstatus;
+
+ irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+ isp_reg_writel(isp, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+
+ isp_isr_sbl(isp);
+
+ if (irqstatus & IRQ0STATUS_CSIA_IRQ)
+ omap3isp_csi2_isr(&isp->isp_csi2a);
+
+ if (irqstatus & IRQ0STATUS_CSIB_IRQ)
+ omap3isp_ccp2_isr(&isp->isp_ccp2);
+
+ if (irqstatus & IRQ0STATUS_CCDC_VD0_IRQ) {
+ if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
+ omap3isp_preview_isr_frame_sync(&isp->isp_prev);
+ if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER)
+ omap3isp_resizer_isr_frame_sync(&isp->isp_res);
+ omap3isp_stat_isr_frame_sync(&isp->isp_aewb);
+ omap3isp_stat_isr_frame_sync(&isp->isp_af);
+ omap3isp_stat_isr_frame_sync(&isp->isp_hist);
+ }
+
+ if (irqstatus & ccdc_events)
+ omap3isp_ccdc_isr(&isp->isp_ccdc, irqstatus & ccdc_events);
+
+ if (irqstatus & IRQ0STATUS_PRV_DONE_IRQ) {
+ if (isp->isp_prev.output & PREVIEW_OUTPUT_RESIZER)
+ omap3isp_resizer_isr_frame_sync(&isp->isp_res);
+ omap3isp_preview_isr(&isp->isp_prev);
+ }
+
+ if (irqstatus & IRQ0STATUS_RSZ_DONE_IRQ)
+ omap3isp_resizer_isr(&isp->isp_res);
+
+ if (irqstatus & IRQ0STATUS_H3A_AWB_DONE_IRQ)
+ omap3isp_stat_isr(&isp->isp_aewb);
+
+ if (irqstatus & IRQ0STATUS_H3A_AF_DONE_IRQ)
+ omap3isp_stat_isr(&isp->isp_af);
+
+ if (irqstatus & IRQ0STATUS_HIST_DONE_IRQ)
+ omap3isp_stat_isr(&isp->isp_hist);
+
+ omap3isp_flush(isp);
+
+#if defined(DEBUG) && defined(ISP_ISR_DEBUG)
+ isp_isr_dbg(isp, irqstatus);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline power management
+ *
+ * Entities must be powered up when part of a pipeline that contains at least
+ * one open video device node.
+ *
+ * To achieve this use the entity use_count field to track the number of users.
+ * For entities corresponding to video device nodes the use_count field stores
+ * the users count of the node. For entities corresponding to subdevs the
+ * use_count field stores the total number of users of all video device nodes
+ * in the pipeline.
+ *
+ * The omap3isp_pipeline_pm_use() function must be called in the open() and
+ * close() handlers of video device nodes. It increments or decrements the use
+ * count of all subdev entities in the pipeline.
+ *
+ * To react to link management on powered pipelines, the link setup notification
+ * callback updates the use count of all entities in the source and sink sides
+ * of the link.
+ */
+
+/*
+ * isp_pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int isp_pipeline_pm_use_count(struct media_entity *entity)
+{
+ struct media_entity_graph graph;
+ int use = 0;
+
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+ use += entity->use_count;
+ }
+
+ return use;
+}
+
+/*
+ * isp_pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int isp_pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV
+ ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+ if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ if (entity->use_count == 0 && change < 0 && subdev != NULL)
+ v4l2_subdev_call(subdev, core, s_power, 0);
+
+ return 0;
+}
+
+/*
+ * isp_pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int isp_pipeline_pm_power(struct media_entity *entity, int change)
+{
+ struct media_entity_graph graph;
+ struct media_entity *first = entity;
+ int ret = 0;
+
+ if (!change)
+ return 0;
+
+ media_entity_graph_walk_start(&graph, entity);
+
+ while (!ret && (entity = media_entity_graph_walk_next(&graph)))
+ if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+ ret = isp_pipeline_pm_power_one(entity, change);
+
+ if (!ret)
+ return 0;
+
+ media_entity_graph_walk_start(&graph, first);
+
+ while ((first = media_entity_graph_walk_next(&graph))
+ && first != entity)
+ if (media_entity_type(first) != MEDIA_ENT_T_DEVNODE)
+ isp_pipeline_pm_power_one(first, -change);
+
+ return ret;
+}
+
+/*
+ * omap3isp_pipeline_pm_use - Update the use count of an entity
+ * @entity: The entity
+ * @use: Use (1) or stop using (0) the entity
+ *
+ * Update the use count of all entities in the pipeline and power entities on or
+ * off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. No failure can occur when the use parameter is
+ * set to 0.
+ */
+int omap3isp_pipeline_pm_use(struct media_entity *entity, int use)
+{
+ int change = use ? 1 : -1;
+ int ret;
+
+ mutex_lock(&entity->parent->graph_mutex);
+
+ /* Apply use count to node. */
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ /* Apply power change to connected non-nodes. */
+ ret = isp_pipeline_pm_power(entity, change);
+ if (ret < 0)
+ entity->use_count -= change;
+
+ mutex_unlock(&entity->parent->graph_mutex);
+
+ return ret;
+}
+
+/*
+ * isp_pipeline_link_notify - Link management notification callback
+ * @link: The link
+ * @flags: New link flags that will be applied
+ * @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*)
+ *
+ * React to link management on powered pipelines by updating the use count of
+ * all entities in the source and sink sides of the link. Entities are powered
+ * on or off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. This function will not fail for disconnection
+ * events.
+ */
+static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct media_entity *source = link->source->entity;
+ struct media_entity *sink = link->sink->entity;
+ int source_use = isp_pipeline_pm_use_count(source);
+ int sink_use = isp_pipeline_pm_use_count(sink);
+ int ret;
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ !(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ /* Powering off entities is assumed to never fail. */
+ isp_pipeline_pm_power(source, -sink_use);
+ isp_pipeline_pm_power(sink, -source_use);
+ return 0;
+ }
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+
+ ret = isp_pipeline_pm_power(source, sink_use);
+ if (ret < 0)
+ return ret;
+
+ ret = isp_pipeline_pm_power(sink, source_use);
+ if (ret < 0)
+ isp_pipeline_pm_power(source, -sink_use);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline stream management
+ */
+
+/*
+ * isp_pipeline_enable - Enable streaming on a pipeline
+ * @pipe: ISP pipeline
+ * @mode: Stream mode (single shot or continuous)
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * all modules in the chain in the given mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int isp_pipeline_enable(struct isp_pipeline *pipe,
+ enum isp_pipeline_stream_state mode)
+{
+ struct isp_device *isp = pipe->output->isp;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ unsigned long flags;
+ int ret;
+
+ /* Refuse to start streaming if an entity included in the pipeline has
+ * crashed. This check must be performed before the loop below to avoid
+ * starting entities if the pipeline won't start anyway (those entities
+ * would then likely fail to stop, making the problem worse).
+ */
+ if (pipe->entities & isp->crashed)
+ return -EIO;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ pipe->do_propagation = false;
+
+ entity = &pipe->output->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, mode);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (subdev == &isp->isp_ccdc.subdev) {
+ v4l2_subdev_call(&isp->isp_aewb.subdev, video,
+ s_stream, mode);
+ v4l2_subdev_call(&isp->isp_af.subdev, video,
+ s_stream, mode);
+ v4l2_subdev_call(&isp->isp_hist.subdev, video,
+ s_stream, mode);
+ pipe->do_propagation = true;
+ }
+ }
+
+ return 0;
+}
+
+static int isp_pipeline_wait_resizer(struct isp_device *isp)
+{
+ return omap3isp_resizer_busy(&isp->isp_res);
+}
+
+static int isp_pipeline_wait_preview(struct isp_device *isp)
+{
+ return omap3isp_preview_busy(&isp->isp_prev);
+}
+
+static int isp_pipeline_wait_ccdc(struct isp_device *isp)
+{
+ return omap3isp_stat_busy(&isp->isp_af)
+ || omap3isp_stat_busy(&isp->isp_aewb)
+ || omap3isp_stat_busy(&isp->isp_hist)
+ || omap3isp_ccdc_busy(&isp->isp_ccdc);
+}
+
+#define ISP_STOP_TIMEOUT msecs_to_jiffies(1000)
+
+static int isp_pipeline_wait(struct isp_device *isp,
+ int(*busy)(struct isp_device *isp))
+{
+ unsigned long timeout = jiffies + ISP_STOP_TIMEOUT;
+
+ while (!time_after(jiffies, timeout)) {
+ if (!busy(isp))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * isp_pipeline_disable - Disable streaming on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and stop
+ * all modules in the chain. Wait synchronously for the modules to be stopped if
+ * necessary.
+ *
+ * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
+ * can't be stopped (in which case a software reset of the ISP is probably
+ * necessary).
+ */
+static int isp_pipeline_disable(struct isp_pipeline *pipe)
+{
+ struct isp_device *isp = pipe->output->isp;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ int failure = 0;
+ int ret;
+
+ /*
+ * We need to stop all the modules after CCDC first or they'll
+ * never stop since they may not get a full frame from CCDC.
+ */
+ entity = &pipe->output->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ if (subdev == &isp->isp_ccdc.subdev) {
+ v4l2_subdev_call(&isp->isp_aewb.subdev,
+ video, s_stream, 0);
+ v4l2_subdev_call(&isp->isp_af.subdev,
+ video, s_stream, 0);
+ v4l2_subdev_call(&isp->isp_hist.subdev,
+ video, s_stream, 0);
+ }
+
+ v4l2_subdev_call(subdev, video, s_stream, 0);
+
+ if (subdev == &isp->isp_res.subdev)
+ ret = isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
+ else if (subdev == &isp->isp_prev.subdev)
+ ret = isp_pipeline_wait(isp, isp_pipeline_wait_preview);
+ else if (subdev == &isp->isp_ccdc.subdev)
+ ret = isp_pipeline_wait(isp, isp_pipeline_wait_ccdc);
+ else
+ ret = 0;
+
+ /* Handle stop failures. An entity that fails to stop can
+ * usually just be restarted. Flag the stop failure nonetheless
+ * to trigger an ISP reset the next time the device is released,
+ * just in case.
+ *
+ * The preview engine is a special case. A failure to stop can
+ * mean a hardware crash. When that happens the preview engine
+ * won't respond to read/write operations on the L4 bus anymore,
+ * resulting in a bus fault and a kernel oops next time it gets
+ * accessed. Mark it as crashed to prevent pipelines including
+ * it from being started.
+ */
+ if (ret) {
+ dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
+ isp->stop_failure = true;
+ if (subdev == &isp->isp_prev.subdev)
+ isp->crashed |= 1U << subdev->entity.id;
+ failure = -ETIMEDOUT;
+ }
+ }
+
+ return failure;
+}
+
+/*
+ * omap3isp_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: ISP pipeline
+ * @state: Stream state (stopped, single shot or continuous)
+ *
+ * Set the pipeline to the given stream state. Pipelines can be started in
+ * single-shot or continuous mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. The pipeline state is not updated when the operation
+ * fails, except when stopping the pipeline.
+ */
+int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
+ enum isp_pipeline_stream_state state)
+{
+ int ret;
+
+ if (state == ISP_PIPELINE_STREAM_STOPPED)
+ ret = isp_pipeline_disable(pipe);
+ else
+ ret = isp_pipeline_enable(pipe, state);
+
+ if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED)
+ pipe->stream_state = state;
+
+ return ret;
+}
+
+/*
+ * omap3isp_pipeline_cancel_stream - Cancel stream on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Cancelling a stream mark all buffers on all video nodes in the pipeline as
+ * erroneous and makes sure no new buffer can be queued. This function is called
+ * when a fatal error that prevents any further operation on the pipeline
+ * occurs.
+ */
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe)
+{
+ if (pipe->input)
+ omap3isp_video_cancel_stream(pipe->input);
+ if (pipe->output)
+ omap3isp_video_cancel_stream(pipe->output);
+}
+
+/*
+ * isp_pipeline_resume - Resume streaming on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Resume video output and input and re-enable pipeline.
+ */
+static void isp_pipeline_resume(struct isp_pipeline *pipe)
+{
+ int singleshot = pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT;
+
+ omap3isp_video_resume(pipe->output, !singleshot);
+ if (singleshot)
+ omap3isp_video_resume(pipe->input, 0);
+ isp_pipeline_enable(pipe, pipe->stream_state);
+}
+
+/*
+ * isp_pipeline_suspend - Suspend streaming on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Suspend pipeline.
+ */
+static void isp_pipeline_suspend(struct isp_pipeline *pipe)
+{
+ isp_pipeline_disable(pipe);
+}
+
+/*
+ * isp_pipeline_is_last - Verify if entity has an enabled link to the output
+ * video node
+ * @me: ISP module's media entity
+ *
+ * Returns 1 if the entity has an enabled link to the output video node or 0
+ * otherwise. It's true only while pipeline can have no more than one output
+ * node.
+ */
+static int isp_pipeline_is_last(struct media_entity *me)
+{
+ struct isp_pipeline *pipe;
+ struct media_pad *pad;
+
+ if (!me->pipe)
+ return 0;
+ pipe = to_isp_pipeline(me);
+ if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+ pad = media_entity_remote_pad(&pipe->output->pad);
+ return pad->entity == me;
+}
+
+/*
+ * isp_suspend_module_pipeline - Suspend pipeline to which belongs the module
+ * @me: ISP module's media entity
+ *
+ * Suspend the whole pipeline if module's entity has an enabled link to the
+ * output video node. It works only while pipeline can have no more than one
+ * output node.
+ */
+static void isp_suspend_module_pipeline(struct media_entity *me)
+{
+ if (isp_pipeline_is_last(me))
+ isp_pipeline_suspend(to_isp_pipeline(me));
+}
+
+/*
+ * isp_resume_module_pipeline - Resume pipeline to which belongs the module
+ * @me: ISP module's media entity
+ *
+ * Resume the whole pipeline if module's entity has an enabled link to the
+ * output video node. It works only while pipeline can have no more than one
+ * output node.
+ */
+static void isp_resume_module_pipeline(struct media_entity *me)
+{
+ if (isp_pipeline_is_last(me))
+ isp_pipeline_resume(to_isp_pipeline(me));
+}
+
+/*
+ * isp_suspend_modules - Suspend ISP submodules.
+ * @isp: OMAP3 ISP device
+ *
+ * Returns 0 if suspend left in idle state all the submodules properly,
+ * or returns 1 if a general Reset is required to suspend the submodules.
+ */
+static int isp_suspend_modules(struct isp_device *isp)
+{
+ unsigned long timeout;
+
+ omap3isp_stat_suspend(&isp->isp_aewb);
+ omap3isp_stat_suspend(&isp->isp_af);
+ omap3isp_stat_suspend(&isp->isp_hist);
+ isp_suspend_module_pipeline(&isp->isp_res.subdev.entity);
+ isp_suspend_module_pipeline(&isp->isp_prev.subdev.entity);
+ isp_suspend_module_pipeline(&isp->isp_ccdc.subdev.entity);
+ isp_suspend_module_pipeline(&isp->isp_csi2a.subdev.entity);
+ isp_suspend_module_pipeline(&isp->isp_ccp2.subdev.entity);
+
+ timeout = jiffies + ISP_STOP_TIMEOUT;
+ while (omap3isp_stat_busy(&isp->isp_af)
+ || omap3isp_stat_busy(&isp->isp_aewb)
+ || omap3isp_stat_busy(&isp->isp_hist)
+ || omap3isp_preview_busy(&isp->isp_prev)
+ || omap3isp_resizer_busy(&isp->isp_res)
+ || omap3isp_ccdc_busy(&isp->isp_ccdc)) {
+ if (time_after(jiffies, timeout)) {
+ dev_info(isp->dev, "can't stop modules.\n");
+ return 1;
+ }
+ msleep(1);
+ }
+
+ return 0;
+}
+
+/*
+ * isp_resume_modules - Resume ISP submodules.
+ * @isp: OMAP3 ISP device
+ */
+static void isp_resume_modules(struct isp_device *isp)
+{
+ omap3isp_stat_resume(&isp->isp_aewb);
+ omap3isp_stat_resume(&isp->isp_af);
+ omap3isp_stat_resume(&isp->isp_hist);
+ isp_resume_module_pipeline(&isp->isp_res.subdev.entity);
+ isp_resume_module_pipeline(&isp->isp_prev.subdev.entity);
+ isp_resume_module_pipeline(&isp->isp_ccdc.subdev.entity);
+ isp_resume_module_pipeline(&isp->isp_csi2a.subdev.entity);
+ isp_resume_module_pipeline(&isp->isp_ccp2.subdev.entity);
+}
+
+/*
+ * isp_reset - Reset ISP with a timeout wait for idle.
+ * @isp: OMAP3 ISP device
+ */
+static int isp_reset(struct isp_device *isp)
+{
+ unsigned long timeout = 0;
+
+ isp_reg_writel(isp,
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG)
+ | ISP_SYSCONFIG_SOFTRESET,
+ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
+ while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN,
+ ISP_SYSSTATUS) & 0x1)) {
+ if (timeout++ > 10000) {
+ dev_alert(isp->dev, "cannot reset ISP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+
+ isp->stop_failure = false;
+ isp->crashed = 0;
+ return 0;
+}
+
+/*
+ * isp_save_context - Saves the values of the ISP module registers.
+ * @isp: OMAP3 ISP device
+ * @reg_list: Structure containing pairs of register address and value to
+ * modify on OMAP.
+ */
+static void
+isp_save_context(struct isp_device *isp, struct isp_reg *reg_list)
+{
+ struct isp_reg *next = reg_list;
+
+ for (; next->reg != ISP_TOK_TERM; next++)
+ next->val = isp_reg_readl(isp, next->mmio_range, next->reg);
+}
+
+/*
+ * isp_restore_context - Restores the values of the ISP module registers.
+ * @isp: OMAP3 ISP device
+ * @reg_list: Structure containing pairs of register address and value to
+ * modify on OMAP.
+ */
+static void
+isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
+{
+ struct isp_reg *next = reg_list;
+
+ for (; next->reg != ISP_TOK_TERM; next++)
+ isp_reg_writel(isp, next->val, next->mmio_range, next->reg);
+}
+
+/*
+ * isp_save_ctx - Saves ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
+ * @isp: OMAP3 ISP device
+ *
+ * Routine for saving the context of each module in the ISP.
+ * CCDC, HIST, H3A, PREV, RESZ and MMU.
+ */
+static void isp_save_ctx(struct isp_device *isp)
+{
+ isp_save_context(isp, isp_reg_list);
+ omap_iommu_save_ctx(isp->dev);
+}
+
+/*
+ * isp_restore_ctx - Restores ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
+ * @isp: OMAP3 ISP device
+ *
+ * Routine for restoring the context of each module in the ISP.
+ * CCDC, HIST, H3A, PREV, RESZ and MMU.
+ */
+static void isp_restore_ctx(struct isp_device *isp)
+{
+ isp_restore_context(isp, isp_reg_list);
+ omap_iommu_restore_ctx(isp->dev);
+ omap3isp_ccdc_restore_context(isp);
+ omap3isp_preview_restore_context(isp);
+}
+
+/* -----------------------------------------------------------------------------
+ * SBL resources management
+ */
+#define OMAP3_ISP_SBL_READ (OMAP3_ISP_SBL_CSI1_READ | \
+ OMAP3_ISP_SBL_CCDC_LSC_READ | \
+ OMAP3_ISP_SBL_PREVIEW_READ | \
+ OMAP3_ISP_SBL_RESIZER_READ)
+#define OMAP3_ISP_SBL_WRITE (OMAP3_ISP_SBL_CSI1_WRITE | \
+ OMAP3_ISP_SBL_CSI2A_WRITE | \
+ OMAP3_ISP_SBL_CSI2C_WRITE | \
+ OMAP3_ISP_SBL_CCDC_WRITE | \
+ OMAP3_ISP_SBL_PREVIEW_WRITE)
+
+void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res)
+{
+ u32 sbl = 0;
+
+ isp->sbl_resources |= res;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ)
+ sbl |= ISPCTRL_SBL_SHARED_RPORTA;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ)
+ sbl |= ISPCTRL_SBL_SHARED_RPORTB;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE)
+ sbl |= ISPCTRL_SBL_SHARED_WPORTC;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE)
+ sbl |= ISPCTRL_SBL_WR0_RAM_EN;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_WRITE)
+ sbl |= ISPCTRL_SBL_WR1_RAM_EN;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_READ)
+ sbl |= ISPCTRL_SBL_RD_RAM_EN;
+
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
+}
+
+void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res)
+{
+ u32 sbl = 0;
+
+ isp->sbl_resources &= ~res;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ))
+ sbl |= ISPCTRL_SBL_SHARED_RPORTA;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ))
+ sbl |= ISPCTRL_SBL_SHARED_RPORTB;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE))
+ sbl |= ISPCTRL_SBL_SHARED_WPORTC;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE))
+ sbl |= ISPCTRL_SBL_WR0_RAM_EN;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_WRITE))
+ sbl |= ISPCTRL_SBL_WR1_RAM_EN;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_READ))
+ sbl |= ISPCTRL_SBL_RD_RAM_EN;
+
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
+}
+
+/*
+ * isp_module_sync_idle - Helper to sync module with its idle state
+ * @me: ISP submodule's media entity
+ * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISP submodule needs to wait for next interrupt. If
+ * yes, makes the caller to sleep while waiting for such event.
+ */
+int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+ atomic_t *stopping)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(me);
+
+ if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED ||
+ (pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT &&
+ !isp_pipeline_ready(pipe)))
+ return 0;
+
+ /*
+ * atomic_set() doesn't include memory barrier on ARM platform for SMP
+ * scenario. We'll call it here to avoid race conditions.
+ */
+ atomic_set(stopping, 1);
+ smp_mb();
+
+ /*
+ * If module is the last one, it's writing to memory. In this case,
+ * it's necessary to check if the module is already paused due to
+ * DMA queue underrun or if it has to wait for next interrupt to be
+ * idle.
+ * If it isn't the last one, the function won't sleep but *stopping
+ * will still be set to warn next submodule caller's interrupt the
+ * module wants to be idle.
+ */
+ if (isp_pipeline_is_last(me)) {
+ struct isp_video *video = pipe->output;
+ unsigned long flags;
+ spin_lock_irqsave(&video->irqlock, flags);
+ if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ atomic_set(stopping, 0);
+ smp_mb();
+ return 0;
+ }
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ if (!wait_event_timeout(*wait, !atomic_read(stopping),
+ msecs_to_jiffies(1000))) {
+ atomic_set(stopping, 0);
+ smp_mb();
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * omap3isp_module_sync_is_stopping - Helper to verify if module was stopping
+ * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISP submodule was stopping. In case of yes, it
+ * notices the caller by setting stopping to 0 and waking up the wait queue.
+ * Returns 1 if it was stopping or 0 otherwise.
+ */
+int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
+ atomic_t *stopping)
+{
+ if (atomic_cmpxchg(stopping, 1, 0)) {
+ wake_up(wait);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ * Clock management
+ */
+
+#define ISPCTRL_CLKS_MASK (ISPCTRL_H3A_CLK_EN | \
+ ISPCTRL_HIST_CLK_EN | \
+ ISPCTRL_RSZ_CLK_EN | \
+ (ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN) | \
+ (ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN))
+
+static void __isp_subclk_update(struct isp_device *isp)
+{
+ u32 clk = 0;
+
+ /* AEWB and AF share the same clock. */
+ if (isp->subclk_resources &
+ (OMAP3_ISP_SUBCLK_AEWB | OMAP3_ISP_SUBCLK_AF))
+ clk |= ISPCTRL_H3A_CLK_EN;
+
+ if (isp->subclk_resources & OMAP3_ISP_SUBCLK_HIST)
+ clk |= ISPCTRL_HIST_CLK_EN;
+
+ if (isp->subclk_resources & OMAP3_ISP_SUBCLK_RESIZER)
+ clk |= ISPCTRL_RSZ_CLK_EN;
+
+ /* NOTE: For CCDC & Preview submodules, we need to affect internal
+ * RAM as well.
+ */
+ if (isp->subclk_resources & OMAP3_ISP_SUBCLK_CCDC)
+ clk |= ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN;
+
+ if (isp->subclk_resources & OMAP3_ISP_SUBCLK_PREVIEW)
+ clk |= ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN;
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
+ ISPCTRL_CLKS_MASK, clk);
+}
+
+void omap3isp_subclk_enable(struct isp_device *isp,
+ enum isp_subclk_resource res)
+{
+ isp->subclk_resources |= res;
+
+ __isp_subclk_update(isp);
+}
+
+void omap3isp_subclk_disable(struct isp_device *isp,
+ enum isp_subclk_resource res)
+{
+ isp->subclk_resources &= ~res;
+
+ __isp_subclk_update(isp);
+}
+
+/*
+ * isp_enable_clocks - Enable ISP clocks
+ * @isp: OMAP3 ISP device
+ *
+ * Return 0 if successful, or clk_prepare_enable return value if any of them
+ * fails.
+ */
+static int isp_enable_clocks(struct isp_device *isp)
+{
+ int r;
+ unsigned long rate;
+
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ if (r) {
+ dev_err(isp->dev, "failed to enable cam_ick clock\n");
+ goto out_clk_enable_ick;
+ }
+ r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ);
+ if (r) {
+ dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n");
+ goto out_clk_enable_mclk;
+ }
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
+ if (r) {
+ dev_err(isp->dev, "failed to enable cam_mclk clock\n");
+ goto out_clk_enable_mclk;
+ }
+ rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
+ if (rate != CM_CAM_MCLK_HZ)
+ dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
+ " expected : %d\n"
+ " actual : %ld\n", CM_CAM_MCLK_HZ, rate);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CSI2_FCK]);
+ if (r) {
+ dev_err(isp->dev, "failed to enable csi2_fck clock\n");
+ goto out_clk_enable_csi2_fclk;
+ }
+ return 0;
+
+out_clk_enable_csi2_fclk:
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
+out_clk_enable_mclk:
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
+out_clk_enable_ick:
+ return r;
+}
+
+/*
+ * isp_disable_clocks - Disable ISP clocks
+ * @isp: OMAP3 ISP device
+ */
+static void isp_disable_clocks(struct isp_device *isp)
+{
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CSI2_FCK]);
+}
+
+static const char *isp_clocks[] = {
+ "cam_ick",
+ "cam_mclk",
+ "csi2_96m_fck",
+ "l3_ick",
+};
+
+static int isp_get_clocks(struct isp_device *isp)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
+ clk = devm_clk_get(isp->dev, isp_clocks[i]);
+ if (IS_ERR(clk)) {
+ dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]);
+ return PTR_ERR(clk);
+ }
+
+ isp->clock[i] = clk;
+ }
+
+ return 0;
+}
+
+/*
+ * omap3isp_get - Acquire the ISP resource.
+ *
+ * Initializes the clocks for the first acquire.
+ *
+ * Increment the reference count on the ISP. If the first reference is taken,
+ * enable clocks and power-up all submodules.
+ *
+ * Return a pointer to the ISP device structure, or NULL if an error occurred.
+ */
+static struct isp_device *__omap3isp_get(struct isp_device *isp, bool irq)
+{
+ struct isp_device *__isp = isp;
+
+ if (isp == NULL)
+ return NULL;
+
+ mutex_lock(&isp->isp_mutex);
+ if (isp->ref_count > 0)
+ goto out;
+
+ if (isp_enable_clocks(isp) < 0) {
+ __isp = NULL;
+ goto out;
+ }
+
+ /* We don't want to restore context before saving it! */
+ if (isp->has_context)
+ isp_restore_ctx(isp);
+
+ if (irq)
+ isp_enable_interrupts(isp);
+
+out:
+ if (__isp != NULL)
+ isp->ref_count++;
+ mutex_unlock(&isp->isp_mutex);
+
+ return __isp;
+}
+
+struct isp_device *omap3isp_get(struct isp_device *isp)
+{
+ return __omap3isp_get(isp, true);
+}
+
+/*
+ * omap3isp_put - Release the ISP
+ *
+ * Decrement the reference count on the ISP. If the last reference is released,
+ * power-down all submodules, disable clocks and free temporary buffers.
+ */
+static void __omap3isp_put(struct isp_device *isp, bool save_ctx)
+{
+ if (isp == NULL)
+ return;
+
+ mutex_lock(&isp->isp_mutex);
+ BUG_ON(isp->ref_count == 0);
+ if (--isp->ref_count == 0) {
+ isp_disable_interrupts(isp);
+ if (save_ctx) {
+ isp_save_ctx(isp);
+ isp->has_context = 1;
+ }
+ /* Reset the ISP if an entity has failed to stop. This is the
+ * only way to recover from such conditions.
+ */
+ if (isp->crashed || isp->stop_failure)
+ isp_reset(isp);
+ isp_disable_clocks(isp);
+ }
+ mutex_unlock(&isp->isp_mutex);
+}
+
+void omap3isp_put(struct isp_device *isp)
+{
+ __omap3isp_put(isp, true);
+}
+
+/* --------------------------------------------------------------------------
+ * Platform device driver
+ */
+
+/*
+ * omap3isp_print_status - Prints the values of the ISP Control Module registers
+ * @isp: OMAP3 ISP device
+ */
+#define ISP_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###ISP " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_##name))
+#define SBL_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###SBL " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_##name))
+
+void omap3isp_print_status(struct isp_device *isp)
+{
+ dev_dbg(isp->dev, "-------------ISP Register dump--------------\n");
+
+ ISP_PRINT_REGISTER(isp, SYSCONFIG);
+ ISP_PRINT_REGISTER(isp, SYSSTATUS);
+ ISP_PRINT_REGISTER(isp, IRQ0ENABLE);
+ ISP_PRINT_REGISTER(isp, IRQ0STATUS);
+ ISP_PRINT_REGISTER(isp, TCTRL_GRESET_LENGTH);
+ ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_REPLAY);
+ ISP_PRINT_REGISTER(isp, CTRL);
+ ISP_PRINT_REGISTER(isp, TCTRL_CTRL);
+ ISP_PRINT_REGISTER(isp, TCTRL_FRAME);
+ ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_DELAY);
+ ISP_PRINT_REGISTER(isp, TCTRL_STRB_DELAY);
+ ISP_PRINT_REGISTER(isp, TCTRL_SHUT_DELAY);
+ ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_LENGTH);
+ ISP_PRINT_REGISTER(isp, TCTRL_STRB_LENGTH);
+ ISP_PRINT_REGISTER(isp, TCTRL_SHUT_LENGTH);
+
+ SBL_PRINT_REGISTER(isp, PCR);
+ SBL_PRINT_REGISTER(isp, SDR_REQ_EXP);
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * Power management support.
+ *
+ * As the ISP can't properly handle an input video stream interruption on a non
+ * frame boundary, the ISP pipelines need to be stopped before sensors get
+ * suspended. However, as suspending the sensors can require a running clock,
+ * which can be provided by the ISP, the ISP can't be completely suspended
+ * before the sensor.
+ *
+ * To solve this problem power management support is split into prepare/complete
+ * and suspend/resume operations. The pipelines are stopped in prepare() and the
+ * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in
+ * resume(), and the the pipelines are restarted in complete().
+ *
+ * TODO: PM dependencies between the ISP and sensors are not modelled explicitly
+ * yet.
+ */
+static int isp_pm_prepare(struct device *dev)
+{
+ struct isp_device *isp = dev_get_drvdata(dev);
+ int reset;
+
+ WARN_ON(mutex_is_locked(&isp->isp_mutex));
+
+ if (isp->ref_count == 0)
+ return 0;
+
+ reset = isp_suspend_modules(isp);
+ isp_disable_interrupts(isp);
+ isp_save_ctx(isp);
+ if (reset)
+ isp_reset(isp);
+
+ return 0;
+}
+
+static int isp_pm_suspend(struct device *dev)
+{
+ struct isp_device *isp = dev_get_drvdata(dev);
+
+ WARN_ON(mutex_is_locked(&isp->isp_mutex));
+
+ if (isp->ref_count)
+ isp_disable_clocks(isp);
+
+ return 0;
+}
+
+static int isp_pm_resume(struct device *dev)
+{
+ struct isp_device *isp = dev_get_drvdata(dev);
+
+ if (isp->ref_count == 0)
+ return 0;
+
+ return isp_enable_clocks(isp);
+}
+
+static void isp_pm_complete(struct device *dev)
+{
+ struct isp_device *isp = dev_get_drvdata(dev);
+
+ if (isp->ref_count == 0)
+ return;
+
+ isp_restore_ctx(isp);
+ isp_enable_interrupts(isp);
+ isp_resume_modules(isp);
+}
+
+#else
+
+#define isp_pm_prepare NULL
+#define isp_pm_suspend NULL
+#define isp_pm_resume NULL
+#define isp_pm_complete NULL
+
+#endif /* CONFIG_PM */
+
+static void isp_unregister_entities(struct isp_device *isp)
+{
+ omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
+ omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
+ omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
+ omap3isp_preview_unregister_entities(&isp->isp_prev);
+ omap3isp_resizer_unregister_entities(&isp->isp_res);
+ omap3isp_stat_unregister_entities(&isp->isp_aewb);
+ omap3isp_stat_unregister_entities(&isp->isp_af);
+ omap3isp_stat_unregister_entities(&isp->isp_hist);
+
+ v4l2_device_unregister(&isp->v4l2_dev);
+ media_device_unregister(&isp->media_dev);
+}
+
+/*
+ * isp_register_subdev_group - Register a group of subdevices
+ * @isp: OMAP3 ISP device
+ * @board_info: I2C subdevs board information array
+ *
+ * Register all I2C subdevices in the board_info array. The array must be
+ * terminated by a NULL entry, and the first entry must be the sensor.
+ *
+ * Return a pointer to the sensor media entity if it has been successfully
+ * registered, or NULL otherwise.
+ */
+static struct v4l2_subdev *
+isp_register_subdev_group(struct isp_device *isp,
+ struct isp_subdev_i2c_board_info *board_info)
+{
+ struct v4l2_subdev *sensor = NULL;
+ unsigned int first;
+
+ if (board_info->board_info == NULL)
+ return NULL;
+
+ for (first = 1; board_info->board_info; ++board_info, first = 0) {
+ struct v4l2_subdev *subdev;
+ struct i2c_adapter *adapter;
+
+ adapter = i2c_get_adapter(board_info->i2c_adapter_id);
+ if (adapter == NULL) {
+ dev_err(isp->dev, "%s: Unable to get I2C adapter %d for "
+ "device %s\n", __func__,
+ board_info->i2c_adapter_id,
+ board_info->board_info->type);
+ continue;
+ }
+
+ subdev = v4l2_i2c_new_subdev_board(&isp->v4l2_dev, adapter,
+ board_info->board_info, NULL);
+ if (subdev == NULL) {
+ dev_err(isp->dev, "%s: Unable to register subdev %s\n",
+ __func__, board_info->board_info->type);
+ continue;
+ }
+
+ if (first)
+ sensor = subdev;
+ }
+
+ return sensor;
+}
+
+static int isp_register_entities(struct isp_device *isp)
+{
+ struct isp_platform_data *pdata = isp->pdata;
+ struct isp_v4l2_subdevs_group *subdevs;
+ int ret;
+
+ isp->media_dev.dev = isp->dev;
+ strlcpy(isp->media_dev.model, "TI OMAP3 ISP",
+ sizeof(isp->media_dev.model));
+ isp->media_dev.hw_revision = isp->revision;
+ isp->media_dev.link_notify = isp_pipeline_link_notify;
+ ret = media_device_register(&isp->media_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "%s: Media device registration failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ isp->v4l2_dev.mdev = &isp->media_dev;
+ ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
+ __func__, ret);
+ goto done;
+ }
+
+ /* Register internal entities */
+ ret = omap3isp_ccp2_register_entities(&isp->isp_ccp2, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_csi2_register_entities(&isp->isp_csi2a, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_ccdc_register_entities(&isp->isp_ccdc, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_preview_register_entities(&isp->isp_prev,
+ &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_resizer_register_entities(&isp->isp_res, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_stat_register_entities(&isp->isp_aewb, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_stat_register_entities(&isp->isp_af, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_stat_register_entities(&isp->isp_hist, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ /* Register external entities */
+ for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
+ struct v4l2_subdev *sensor;
+ struct media_entity *input;
+ unsigned int flags;
+ unsigned int pad;
+ unsigned int i;
+
+ sensor = isp_register_subdev_group(isp, subdevs->subdevs);
+ if (sensor == NULL)
+ continue;
+
+ sensor->host_priv = subdevs;
+
+ /* Connect the sensor to the correct interface module. Parallel
+ * sensors are connected directly to the CCDC, while serial
+ * sensors are connected to the CSI2a, CCP2b or CSI2c receiver
+ * through CSIPHY1 or CSIPHY2.
+ */
+ switch (subdevs->interface) {
+ case ISP_INTERFACE_PARALLEL:
+ input = &isp->isp_ccdc.subdev.entity;
+ pad = CCDC_PAD_SINK;
+ flags = 0;
+ break;
+
+ case ISP_INTERFACE_CSI2A_PHY2:
+ input = &isp->isp_csi2a.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE
+ | MEDIA_LNK_FL_ENABLED;
+ break;
+
+ case ISP_INTERFACE_CCP2B_PHY1:
+ case ISP_INTERFACE_CCP2B_PHY2:
+ input = &isp->isp_ccp2.subdev.entity;
+ pad = CCP2_PAD_SINK;
+ flags = 0;
+ break;
+
+ case ISP_INTERFACE_CSI2C_PHY1:
+ input = &isp->isp_csi2c.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE
+ | MEDIA_LNK_FL_ENABLED;
+ break;
+
+ default:
+ dev_err(isp->dev, "%s: invalid interface type %u\n",
+ __func__, subdevs->interface);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ for (i = 0; i < sensor->entity.num_pads; i++) {
+ if (sensor->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+ if (i == sensor->entity.num_pads) {
+ dev_err(isp->dev,
+ "%s: no source pad in external entity\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = media_entity_create_link(&sensor->entity, i, input, pad,
+ flags);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+
+done:
+ if (ret < 0)
+ isp_unregister_entities(isp);
+
+ return ret;
+}
+
+static void isp_cleanup_modules(struct isp_device *isp)
+{
+ omap3isp_h3a_aewb_cleanup(isp);
+ omap3isp_h3a_af_cleanup(isp);
+ omap3isp_hist_cleanup(isp);
+ omap3isp_resizer_cleanup(isp);
+ omap3isp_preview_cleanup(isp);
+ omap3isp_ccdc_cleanup(isp);
+ omap3isp_ccp2_cleanup(isp);
+ omap3isp_csi2_cleanup(isp);
+}
+
+static int isp_initialize_modules(struct isp_device *isp)
+{
+ int ret;
+
+ ret = omap3isp_csiphy_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "CSI PHY initialization failed\n");
+ goto error_csiphy;
+ }
+
+ ret = omap3isp_csi2_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "CSI2 initialization failed\n");
+ goto error_csi2;
+ }
+
+ ret = omap3isp_ccp2_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "CCP2 initialization failed\n");
+ goto error_ccp2;
+ }
+
+ ret = omap3isp_ccdc_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "CCDC initialization failed\n");
+ goto error_ccdc;
+ }
+
+ ret = omap3isp_preview_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "Preview initialization failed\n");
+ goto error_preview;
+ }
+
+ ret = omap3isp_resizer_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "Resizer initialization failed\n");
+ goto error_resizer;
+ }
+
+ ret = omap3isp_hist_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "Histogram initialization failed\n");
+ goto error_hist;
+ }
+
+ ret = omap3isp_h3a_aewb_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "H3A AEWB initialization failed\n");
+ goto error_h3a_aewb;
+ }
+
+ ret = omap3isp_h3a_af_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "H3A AF initialization failed\n");
+ goto error_h3a_af;
+ }
+
+ /* Connect the submodules. */
+ ret = media_entity_create_link(
+ &isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &isp->isp_ccp2.subdev.entity, CCP2_PAD_SOURCE,
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+ &isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
+ &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
+ &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+ &isp->isp_aewb.subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+ &isp->isp_af.subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+ &isp->isp_hist.subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ goto error_link;
+
+ return 0;
+
+error_link:
+ omap3isp_h3a_af_cleanup(isp);
+error_h3a_af:
+ omap3isp_h3a_aewb_cleanup(isp);
+error_h3a_aewb:
+ omap3isp_hist_cleanup(isp);
+error_hist:
+ omap3isp_resizer_cleanup(isp);
+error_resizer:
+ omap3isp_preview_cleanup(isp);
+error_preview:
+ omap3isp_ccdc_cleanup(isp);
+error_ccdc:
+ omap3isp_ccp2_cleanup(isp);
+error_ccp2:
+ omap3isp_csi2_cleanup(isp);
+error_csi2:
+error_csiphy:
+ return ret;
+}
+
+static void isp_detach_iommu(struct isp_device *isp)
+{
+ arm_iommu_release_mapping(isp->mapping);
+ isp->mapping = NULL;
+ iommu_group_remove_device(isp->dev);
+}
+
+static int isp_attach_iommu(struct isp_device *isp)
+{
+ struct dma_iommu_mapping *mapping;
+ struct iommu_group *group;
+ int ret;
+
+ /* Create a device group and add the device to it. */
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(isp->dev, "failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+
+ ret = iommu_group_add_device(group, isp->dev);
+ iommu_group_put(group);
+
+ if (ret < 0) {
+ dev_err(isp->dev, "failed to add device to IPMMU group\n");
+ return ret;
+ }
+
+ /*
+ * Create the ARM mapping, used by the ARM DMA mapping core to allocate
+ * VAs. This will allocate a corresponding IOMMU domain.
+ */
+ mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
+ if (IS_ERR(mapping)) {
+ dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
+ ret = PTR_ERR(mapping);
+ goto error;
+ }
+
+ isp->mapping = mapping;
+
+ /* Attach the ARM VA mapping to the device. */
+ ret = arm_iommu_attach_device(isp->dev, mapping);
+ if (ret < 0) {
+ dev_err(isp->dev, "failed to attach device to VA mapping\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ isp_detach_iommu(isp);
+ return ret;
+}
+
+/*
+ * isp_remove - Remove ISP platform device
+ * @pdev: Pointer to ISP platform device
+ *
+ * Always returns 0.
+ */
+static int isp_remove(struct platform_device *pdev)
+{
+ struct isp_device *isp = platform_get_drvdata(pdev);
+
+ isp_unregister_entities(isp);
+ isp_cleanup_modules(isp);
+ isp_xclk_cleanup(isp);
+
+ __omap3isp_get(isp, false);
+ isp_detach_iommu(isp);
+ __omap3isp_put(isp, false);
+
+ return 0;
+}
+
+static int isp_map_mem_resource(struct platform_device *pdev,
+ struct isp_device *isp,
+ enum isp_mem_resources res)
+{
+ struct resource *mem;
+
+ /* request the mem region for the camera registers */
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
+
+ /* map the region */
+ isp->mmio_base[res] = devm_ioremap_resource(isp->dev, mem);
+ if (IS_ERR(isp->mmio_base[res]))
+ return PTR_ERR(isp->mmio_base[res]);
+
+ isp->mmio_base_phys[res] = mem->start;
+
+ return 0;
+}
+
+/*
+ * isp_probe - Probe ISP platform device
+ * @pdev: Pointer to ISP platform device
+ *
+ * Returns 0 if successful,
+ * -ENOMEM if no memory available,
+ * -ENODEV if no platform device resources found
+ * or no space for remapping registers,
+ * -EINVAL if couldn't install ISR,
+ * or clk_get return error value.
+ */
+static int isp_probe(struct platform_device *pdev)
+{
+ struct isp_platform_data *pdata = pdev->dev.platform_data;
+ struct isp_device *isp;
+ int ret;
+ int i, m;
+
+ if (pdata == NULL)
+ return -EINVAL;
+
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp) {
+ dev_err(&pdev->dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ isp->autoidle = autoidle;
+
+ mutex_init(&isp->isp_mutex);
+ spin_lock_init(&isp->stat_lock);
+
+ isp->dev = &pdev->dev;
+ isp->pdata = pdata;
+ isp->ref_count = 0;
+
+ ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, isp);
+
+ /* Regulators */
+ isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "VDD_CSIPHY1");
+ isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "VDD_CSIPHY2");
+
+ /* Clocks
+ *
+ * The ISP clock tree is revision-dependent. We thus need to enable ICLK
+ * manually to read the revision before calling __omap3isp_get().
+ */
+ ret = isp_map_mem_resource(pdev, isp, OMAP3_ISP_IOMEM_MAIN);
+ if (ret < 0)
+ goto error;
+
+ ret = isp_get_clocks(isp);
+ if (ret < 0)
+ goto error;
+
+ ret = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ if (ret < 0)
+ goto error;
+
+ isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+ dev_info(isp->dev, "Revision %d.%d found\n",
+ (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
+
+ clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+
+ if (__omap3isp_get(isp, false) == NULL) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ret = isp_reset(isp);
+ if (ret < 0)
+ goto error_isp;
+
+ ret = isp_xclk_init(isp);
+ if (ret < 0)
+ goto error_isp;
+
+ /* Memory resources */
+ for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
+ if (isp->revision == isp_res_maps[m].isp_rev)
+ break;
+
+ if (m == ARRAY_SIZE(isp_res_maps)) {
+ dev_err(isp->dev, "No resource map found for ISP rev %d.%d\n",
+ (isp->revision & 0xf0) >> 4, isp->revision & 0xf);
+ ret = -ENODEV;
+ goto error_isp;
+ }
+
+ for (i = 1; i < OMAP3_ISP_IOMEM_LAST; i++) {
+ if (isp_res_maps[m].map & 1 << i) {
+ ret = isp_map_mem_resource(pdev, isp, i);
+ if (ret)
+ goto error_isp;
+ }
+ }
+
+ /* IOMMU */
+ ret = isp_attach_iommu(isp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to attach to IOMMU\n");
+ goto error_isp;
+ }
+
+ /* Interrupt */
+ isp->irq_num = platform_get_irq(pdev, 0);
+ if (isp->irq_num <= 0) {
+ dev_err(isp->dev, "No IRQ resource\n");
+ ret = -ENODEV;
+ goto error_iommu;
+ }
+
+ if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
+ "OMAP3 ISP", isp)) {
+ dev_err(isp->dev, "Unable to request IRQ\n");
+ ret = -EINVAL;
+ goto error_iommu;
+ }
+
+ /* Entities */
+ ret = isp_initialize_modules(isp);
+ if (ret < 0)
+ goto error_iommu;
+
+ ret = isp_register_entities(isp);
+ if (ret < 0)
+ goto error_modules;
+
+ isp_core_init(isp, 1);
+ omap3isp_put(isp);
+
+ return 0;
+
+error_modules:
+ isp_cleanup_modules(isp);
+error_iommu:
+ isp_detach_iommu(isp);
+error_isp:
+ isp_xclk_cleanup(isp);
+ __omap3isp_put(isp, false);
+error:
+ mutex_destroy(&isp->isp_mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops omap3isp_pm_ops = {
+ .prepare = isp_pm_prepare,
+ .suspend = isp_pm_suspend,
+ .resume = isp_pm_resume,
+ .complete = isp_pm_complete,
+};
+
+static struct platform_device_id omap3isp_id_table[] = {
+ { "omap3isp", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
+
+static struct platform_driver omap3isp_driver = {
+ .probe = isp_probe,
+ .remove = isp_remove,
+ .id_table = omap3isp_id_table,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "omap3isp",
+ .pm = &omap3isp_pm_ops,
+ },
+};
+
+module_platform_driver(omap3isp_driver);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("TI OMAP3 ISP driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
new file mode 100644
index 00000000000..2c314eea125
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -0,0 +1,356 @@
+/*
+ * isp.h
+ *
+ * TI OMAP3 ISP - Core
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CORE_H
+#define OMAP3_ISP_CORE_H
+
+#include <media/omap3isp.h>
+#include <media/v4l2-device.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include "ispstat.h"
+#include "ispccdc.h"
+#include "ispreg.h"
+#include "ispresizer.h"
+#include "isppreview.h"
+#include "ispcsiphy.h"
+#include "ispcsi2.h"
+#include "ispccp2.h"
+
+#define ISP_TOK_TERM 0xFFFFFFFF /*
+ * terminating token for ISP
+ * modules reg list
+ */
+#define to_isp_device(ptr_module) \
+ container_of(ptr_module, struct isp_device, isp_##ptr_module)
+#define to_device(ptr_module) \
+ (to_isp_device(ptr_module)->dev)
+
+enum isp_mem_resources {
+ OMAP3_ISP_IOMEM_MAIN,
+ OMAP3_ISP_IOMEM_CCP2,
+ OMAP3_ISP_IOMEM_CCDC,
+ OMAP3_ISP_IOMEM_HIST,
+ OMAP3_ISP_IOMEM_H3A,
+ OMAP3_ISP_IOMEM_PREV,
+ OMAP3_ISP_IOMEM_RESZ,
+ OMAP3_ISP_IOMEM_SBL,
+ OMAP3_ISP_IOMEM_CSI2A_REGS1,
+ OMAP3_ISP_IOMEM_CSIPHY2,
+ OMAP3_ISP_IOMEM_CSI2A_REGS2,
+ OMAP3_ISP_IOMEM_CSI2C_REGS1,
+ OMAP3_ISP_IOMEM_CSIPHY1,
+ OMAP3_ISP_IOMEM_CSI2C_REGS2,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
+ OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
+ OMAP3_ISP_IOMEM_LAST
+};
+
+enum isp_sbl_resource {
+ OMAP3_ISP_SBL_CSI1_READ = 0x1,
+ OMAP3_ISP_SBL_CSI1_WRITE = 0x2,
+ OMAP3_ISP_SBL_CSI2A_WRITE = 0x4,
+ OMAP3_ISP_SBL_CSI2C_WRITE = 0x8,
+ OMAP3_ISP_SBL_CCDC_LSC_READ = 0x10,
+ OMAP3_ISP_SBL_CCDC_WRITE = 0x20,
+ OMAP3_ISP_SBL_PREVIEW_READ = 0x40,
+ OMAP3_ISP_SBL_PREVIEW_WRITE = 0x80,
+ OMAP3_ISP_SBL_RESIZER_READ = 0x100,
+ OMAP3_ISP_SBL_RESIZER_WRITE = 0x200,
+};
+
+enum isp_subclk_resource {
+ OMAP3_ISP_SUBCLK_CCDC = (1 << 0),
+ OMAP3_ISP_SUBCLK_AEWB = (1 << 1),
+ OMAP3_ISP_SUBCLK_AF = (1 << 2),
+ OMAP3_ISP_SUBCLK_HIST = (1 << 3),
+ OMAP3_ISP_SUBCLK_PREVIEW = (1 << 4),
+ OMAP3_ISP_SUBCLK_RESIZER = (1 << 5),
+};
+
+/* ISP: OMAP 34xx ES 1.0 */
+#define ISP_REVISION_1_0 0x10
+/* ISP2: OMAP 34xx ES 2.0, 2.1 and 3.0 */
+#define ISP_REVISION_2_0 0x20
+/* ISP2P: OMAP 36xx */
+#define ISP_REVISION_15_0 0xF0
+
+/*
+ * struct isp_res_mapping - Map ISP io resources to ISP revision.
+ * @isp_rev: ISP_REVISION_x_x
+ * @map: bitmap for enum isp_mem_resources
+ */
+struct isp_res_mapping {
+ u32 isp_rev;
+ u32 map;
+};
+
+/*
+ * struct isp_reg - Structure for ISP register values.
+ * @reg: 32-bit Register address.
+ * @val: 32-bit Register value.
+ */
+struct isp_reg {
+ enum isp_mem_resources mmio_range;
+ u32 reg;
+ u32 val;
+};
+
+enum isp_xclk_id {
+ ISP_XCLK_A,
+ ISP_XCLK_B,
+};
+
+struct isp_xclk {
+ struct isp_device *isp;
+ struct clk_hw hw;
+ struct clk_lookup *lookup;
+ struct clk *clk;
+ enum isp_xclk_id id;
+
+ spinlock_t lock; /* Protects enabled and divider */
+ bool enabled;
+ unsigned int divider;
+};
+
+/*
+ * struct isp_device - ISP device structure.
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @revision: Stores current ISP module revision.
+ * @irq_num: Currently used IRQ number.
+ * @mmio_base: Array with kernel base addresses for ioremapped ISP register
+ * regions.
+ * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
+ * regions.
+ * @mapping: IOMMU mapping
+ * @stat_lock: Spinlock for handling statistics
+ * @isp_mutex: Mutex for serializing requests to ISP.
+ * @stop_failure: Indicates that an entity failed to stop.
+ * @crashed: Bitmask of crashed entities (indexed by entity ID)
+ * @has_context: Context has been saved at least once and can be restored.
+ * @ref_count: Reference count for handling multiple ISP requests.
+ * @cam_ick: Pointer to camera interface clock structure.
+ * @cam_mclk: Pointer to camera functional clock structure.
+ * @csi2_fck: Pointer to camera CSI2 complexIO clock structure.
+ * @l3_ick: Pointer to OMAP3 L3 bus interface clock.
+ * @xclks: External clocks provided by the ISP
+ * @irq: Currently attached ISP ISR callbacks information structure.
+ * @isp_af: Pointer to current settings for ISP AutoFocus SCM.
+ * @isp_hist: Pointer to current settings for ISP Histogram SCM.
+ * @isp_h3a: Pointer to current settings for ISP Auto Exposure and
+ * White Balance SCM.
+ * @isp_res: Pointer to current settings for ISP Resizer.
+ * @isp_prev: Pointer to current settings for ISP Preview.
+ * @isp_ccdc: Pointer to current settings for ISP CCDC.
+ * @platform_cb: ISP driver callback function pointers for platform code
+ *
+ * This structure is used to store the OMAP ISP Information.
+ */
+struct isp_device {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct device *dev;
+ u32 revision;
+
+ /* platform HW resources */
+ struct isp_platform_data *pdata;
+ unsigned int irq_num;
+
+ void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
+ unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
+
+ struct dma_iommu_mapping *mapping;
+
+ /* ISP Obj */
+ spinlock_t stat_lock; /* common lock for statistic drivers */
+ struct mutex isp_mutex; /* For handling ref_count field */
+ bool stop_failure;
+ u32 crashed;
+ int has_context;
+ int ref_count;
+ unsigned int autoidle;
+#define ISP_CLK_CAM_ICK 0
+#define ISP_CLK_CAM_MCLK 1
+#define ISP_CLK_CSI2_FCK 2
+#define ISP_CLK_L3_ICK 3
+ struct clk *clock[4];
+ struct isp_xclk xclks[2];
+
+ /* ISP modules */
+ struct ispstat isp_af;
+ struct ispstat isp_aewb;
+ struct ispstat isp_hist;
+ struct isp_res_device isp_res;
+ struct isp_prev_device isp_prev;
+ struct isp_ccdc_device isp_ccdc;
+ struct isp_csi2_device isp_csi2a;
+ struct isp_csi2_device isp_csi2c;
+ struct isp_ccp2_device isp_ccp2;
+ struct isp_csiphy isp_csiphy1;
+ struct isp_csiphy isp_csiphy2;
+
+ unsigned int sbl_resources;
+ unsigned int subclk_resources;
+};
+
+#define v4l2_dev_to_isp_device(dev) \
+ container_of(dev, struct isp_device, v4l2_dev)
+
+void omap3isp_hist_dma_done(struct isp_device *isp);
+
+void omap3isp_flush(struct isp_device *isp);
+
+int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+ atomic_t *stopping);
+
+int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
+ atomic_t *stopping);
+
+int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
+ enum isp_pipeline_stream_state state);
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe);
+void omap3isp_configure_bridge(struct isp_device *isp,
+ enum ccdc_input_entity input,
+ const struct isp_parallel_platform_data *pdata,
+ unsigned int shift, unsigned int bridge);
+
+struct isp_device *omap3isp_get(struct isp_device *isp);
+void omap3isp_put(struct isp_device *isp);
+
+void omap3isp_print_status(struct isp_device *isp);
+
+void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res);
+void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res);
+
+void omap3isp_subclk_enable(struct isp_device *isp,
+ enum isp_subclk_resource res);
+void omap3isp_subclk_disable(struct isp_device *isp,
+ enum isp_subclk_resource res);
+
+int omap3isp_pipeline_pm_use(struct media_entity *entity, int use);
+
+int omap3isp_register_entities(struct platform_device *pdev,
+ struct v4l2_device *v4l2_dev);
+void omap3isp_unregister_entities(struct platform_device *pdev);
+
+/*
+ * isp_reg_readl - Read value of an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @isp_mmio_range: Range to which the register offset refers to.
+ * @reg_offset: Register offset to read from.
+ *
+ * Returns an unsigned 32 bit value with the required register contents.
+ */
+static inline
+u32 isp_reg_readl(struct isp_device *isp, enum isp_mem_resources isp_mmio_range,
+ u32 reg_offset)
+{
+ return __raw_readl(isp->mmio_base[isp_mmio_range] + reg_offset);
+}
+
+/*
+ * isp_reg_writel - Write value to an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @reg_value: 32 bit value to write to the register.
+ * @isp_mmio_range: Range to which the register offset refers to.
+ * @reg_offset: Register offset to write into.
+ */
+static inline
+void isp_reg_writel(struct isp_device *isp, u32 reg_value,
+ enum isp_mem_resources isp_mmio_range, u32 reg_offset)
+{
+ __raw_writel(reg_value, isp->mmio_base[isp_mmio_range] + reg_offset);
+}
+
+/*
+ * isp_reg_clr - Clear individual bits in an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @mmio_range: Range to which the register offset refers to.
+ * @reg: Register offset to work on.
+ * @clr_bits: 32 bit value which would be cleared in the register.
+ */
+static inline
+void isp_reg_clr(struct isp_device *isp, enum isp_mem_resources mmio_range,
+ u32 reg, u32 clr_bits)
+{
+ u32 v = isp_reg_readl(isp, mmio_range, reg);
+
+ isp_reg_writel(isp, v & ~clr_bits, mmio_range, reg);
+}
+
+/*
+ * isp_reg_set - Set individual bits in an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @mmio_range: Range to which the register offset refers to.
+ * @reg: Register offset to work on.
+ * @set_bits: 32 bit value which would be set in the register.
+ */
+static inline
+void isp_reg_set(struct isp_device *isp, enum isp_mem_resources mmio_range,
+ u32 reg, u32 set_bits)
+{
+ u32 v = isp_reg_readl(isp, mmio_range, reg);
+
+ isp_reg_writel(isp, v | set_bits, mmio_range, reg);
+}
+
+/*
+ * isp_reg_clr_set - Clear and set invidial bits in an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @mmio_range: Range to which the register offset refers to.
+ * @reg: Register offset to work on.
+ * @clr_bits: 32 bit value which would be cleared in the register.
+ * @set_bits: 32 bit value which would be set in the register.
+ *
+ * The clear operation is done first, and then the set operation.
+ */
+static inline
+void isp_reg_clr_set(struct isp_device *isp, enum isp_mem_resources mmio_range,
+ u32 reg, u32 clr_bits, u32 set_bits)
+{
+ u32 v = isp_reg_readl(isp, mmio_range, reg);
+
+ isp_reg_writel(isp, (v & ~clr_bits) | set_bits, mmio_range, reg);
+}
+
+static inline enum v4l2_buf_type
+isp_pad_buffer_type(const struct v4l2_subdev *subdev, int pad)
+{
+ if (pad >= subdev->entity.num_pads)
+ return 0;
+
+ if (subdev->entity.pads[pad].flags & MEDIA_PAD_FL_SINK)
+ return V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ else
+ return V4L2_BUF_TYPE_VIDEO_CAPTURE;
+}
+
+#endif /* OMAP3_ISP_CORE_H */
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
new file mode 100644
index 00000000000..9f727d20f06
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -0,0 +1,2586 @@
+/*
+ * ispccdc.c
+ *
+ * TI OMAP3 ISP - CCDC module
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <media/v4l2-event.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispccdc.h"
+
+#define CCDC_MIN_WIDTH 32
+#define CCDC_MIN_HEIGHT 32
+
+static struct v4l2_mbus_framefmt *
+__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which);
+
+static const unsigned int ccdc_fmts[] = {
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_Y10_1X10,
+ V4L2_MBUS_FMT_Y12_1X12,
+ V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+ V4L2_MBUS_FMT_SRGGB12_1X12,
+ V4L2_MBUS_FMT_SBGGR12_1X12,
+ V4L2_MBUS_FMT_SGBRG12_1X12,
+ V4L2_MBUS_FMT_YUYV8_2X8,
+ V4L2_MBUS_FMT_UYVY8_2X8,
+};
+
+/*
+ * ccdc_print_status - Print current CCDC Module register values.
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Also prints other debug information stored in the CCDC module.
+ */
+#define CCDC_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###CCDC " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_##name))
+
+static void ccdc_print_status(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ dev_dbg(isp->dev, "-------------CCDC Register dump-------------\n");
+
+ CCDC_PRINT_REGISTER(isp, PCR);
+ CCDC_PRINT_REGISTER(isp, SYN_MODE);
+ CCDC_PRINT_REGISTER(isp, HD_VD_WID);
+ CCDC_PRINT_REGISTER(isp, PIX_LINES);
+ CCDC_PRINT_REGISTER(isp, HORZ_INFO);
+ CCDC_PRINT_REGISTER(isp, VERT_START);
+ CCDC_PRINT_REGISTER(isp, VERT_LINES);
+ CCDC_PRINT_REGISTER(isp, CULLING);
+ CCDC_PRINT_REGISTER(isp, HSIZE_OFF);
+ CCDC_PRINT_REGISTER(isp, SDOFST);
+ CCDC_PRINT_REGISTER(isp, SDR_ADDR);
+ CCDC_PRINT_REGISTER(isp, CLAMP);
+ CCDC_PRINT_REGISTER(isp, DCSUB);
+ CCDC_PRINT_REGISTER(isp, COLPTN);
+ CCDC_PRINT_REGISTER(isp, BLKCMP);
+ CCDC_PRINT_REGISTER(isp, FPC);
+ CCDC_PRINT_REGISTER(isp, FPC_ADDR);
+ CCDC_PRINT_REGISTER(isp, VDINT);
+ CCDC_PRINT_REGISTER(isp, ALAW);
+ CCDC_PRINT_REGISTER(isp, REC656IF);
+ CCDC_PRINT_REGISTER(isp, CFG);
+ CCDC_PRINT_REGISTER(isp, FMTCFG);
+ CCDC_PRINT_REGISTER(isp, FMT_HORZ);
+ CCDC_PRINT_REGISTER(isp, FMT_VERT);
+ CCDC_PRINT_REGISTER(isp, PRGEVEN0);
+ CCDC_PRINT_REGISTER(isp, PRGEVEN1);
+ CCDC_PRINT_REGISTER(isp, PRGODD0);
+ CCDC_PRINT_REGISTER(isp, PRGODD1);
+ CCDC_PRINT_REGISTER(isp, VP_OUT);
+ CCDC_PRINT_REGISTER(isp, LSC_CONFIG);
+ CCDC_PRINT_REGISTER(isp, LSC_INITIAL);
+ CCDC_PRINT_REGISTER(isp, LSC_TABLE_BASE);
+ CCDC_PRINT_REGISTER(isp, LSC_TABLE_OFFSET);
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * omap3isp_ccdc_busy - Get busy state of the CCDC.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+int omap3isp_ccdc_busy(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR) &
+ ISPCCDC_PCR_BUSY;
+}
+
+/* -----------------------------------------------------------------------------
+ * Lens Shading Compensation
+ */
+
+/*
+ * ccdc_lsc_validate_config - Check that LSC configuration is valid.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @lsc_cfg: the LSC configuration to check.
+ *
+ * Returns 0 if the LSC configuration is valid, or -EINVAL if invalid.
+ */
+static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
+ struct omap3isp_ccdc_lsc_config *lsc_cfg)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ struct v4l2_mbus_framefmt *format;
+ unsigned int paxel_width, paxel_height;
+ unsigned int paxel_shift_x, paxel_shift_y;
+ unsigned int min_width, min_height, min_size;
+ unsigned int input_width, input_height;
+
+ paxel_shift_x = lsc_cfg->gain_mode_m;
+ paxel_shift_y = lsc_cfg->gain_mode_n;
+
+ if ((paxel_shift_x < 2) || (paxel_shift_x > 6) ||
+ (paxel_shift_y < 2) || (paxel_shift_y > 6)) {
+ dev_dbg(isp->dev, "CCDC: LSC: Invalid paxel size\n");
+ return -EINVAL;
+ }
+
+ if (lsc_cfg->offset & 3) {
+ dev_dbg(isp->dev, "CCDC: LSC: Offset must be a multiple of "
+ "4\n");
+ return -EINVAL;
+ }
+
+ if ((lsc_cfg->initial_x & 1) || (lsc_cfg->initial_y & 1)) {
+ dev_dbg(isp->dev, "CCDC: LSC: initial_x and y must be even\n");
+ return -EINVAL;
+ }
+
+ format = __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ input_width = format->width;
+ input_height = format->height;
+
+ /* Calculate minimum bytesize for validation */
+ paxel_width = 1 << paxel_shift_x;
+ min_width = ((input_width + lsc_cfg->initial_x + paxel_width - 1)
+ >> paxel_shift_x) + 1;
+
+ paxel_height = 1 << paxel_shift_y;
+ min_height = ((input_height + lsc_cfg->initial_y + paxel_height - 1)
+ >> paxel_shift_y) + 1;
+
+ min_size = 4 * min_width * min_height;
+ if (min_size > lsc_cfg->size) {
+ dev_dbg(isp->dev, "CCDC: LSC: too small table\n");
+ return -EINVAL;
+ }
+ if (lsc_cfg->offset < (min_width * 4)) {
+ dev_dbg(isp->dev, "CCDC: LSC: Offset is too small\n");
+ return -EINVAL;
+ }
+ if ((lsc_cfg->size / lsc_cfg->offset) < min_height) {
+ dev_dbg(isp->dev, "CCDC: LSC: Wrong size/offset combination\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * ccdc_lsc_program_table - Program Lens Shading Compensation table address.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc,
+ dma_addr_t addr)
+{
+ isp_reg_writel(to_isp_device(ccdc), addr,
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE);
+}
+
+/*
+ * ccdc_lsc_setup_regs - Configures the lens shading compensation module
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_lsc_setup_regs(struct isp_ccdc_device *ccdc,
+ struct omap3isp_ccdc_lsc_config *cfg)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ int reg;
+
+ isp_reg_writel(isp, cfg->offset, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_LSC_TABLE_OFFSET);
+
+ reg = 0;
+ reg |= cfg->gain_mode_n << ISPCCDC_LSC_GAIN_MODE_N_SHIFT;
+ reg |= cfg->gain_mode_m << ISPCCDC_LSC_GAIN_MODE_M_SHIFT;
+ reg |= cfg->gain_format << ISPCCDC_LSC_GAIN_FORMAT_SHIFT;
+ isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG);
+
+ reg = 0;
+ reg &= ~ISPCCDC_LSC_INITIAL_X_MASK;
+ reg |= cfg->initial_x << ISPCCDC_LSC_INITIAL_X_SHIFT;
+ reg &= ~ISPCCDC_LSC_INITIAL_Y_MASK;
+ reg |= cfg->initial_y << ISPCCDC_LSC_INITIAL_Y_SHIFT;
+ isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_LSC_INITIAL);
+}
+
+static int ccdc_lsc_wait_prefetch(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ unsigned int wait;
+
+ isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ,
+ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+
+ /* timeout 1 ms */
+ for (wait = 0; wait < 1000; wait++) {
+ if (isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS) &
+ IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ) {
+ isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ,
+ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+ return 0;
+ }
+
+ rmb();
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * __ccdc_lsc_enable - Enables/Disables the Lens Shading Compensation module.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @enable: 0 Disables LSC, 1 Enables LSC.
+ */
+static int __ccdc_lsc_enable(struct isp_ccdc_device *ccdc, int enable)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ const struct v4l2_mbus_framefmt *format =
+ __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+
+ if ((format->code != V4L2_MBUS_FMT_SGRBG10_1X10) &&
+ (format->code != V4L2_MBUS_FMT_SRGGB10_1X10) &&
+ (format->code != V4L2_MBUS_FMT_SBGGR10_1X10) &&
+ (format->code != V4L2_MBUS_FMT_SGBRG10_1X10))
+ return -EINVAL;
+
+ if (enable)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_LSC_READ);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG,
+ ISPCCDC_LSC_ENABLE, enable ? ISPCCDC_LSC_ENABLE : 0);
+
+ if (enable) {
+ if (ccdc_lsc_wait_prefetch(ccdc) < 0) {
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE);
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+ dev_warn(to_device(ccdc), "LSC prefetch timeout\n");
+ return -ETIMEDOUT;
+ }
+ ccdc->lsc.state = LSC_STATE_RUNNING;
+ } else {
+ ccdc->lsc.state = LSC_STATE_STOPPING;
+ }
+
+ return 0;
+}
+
+static int ccdc_lsc_busy(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG) &
+ ISPCCDC_LSC_BUSY;
+}
+
+/* __ccdc_lsc_configure - Apply a new configuration to the LSC engine
+ * @ccdc: Pointer to ISP CCDC device
+ * @req: New configuration request
+ *
+ * context: in_interrupt()
+ */
+static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc,
+ struct ispccdc_lsc_config_req *req)
+{
+ if (!req->enable)
+ return -EINVAL;
+
+ if (ccdc_lsc_validate_config(ccdc, &req->config) < 0) {
+ dev_dbg(to_device(ccdc), "Discard LSC configuration\n");
+ return -EINVAL;
+ }
+
+ if (ccdc_lsc_busy(ccdc))
+ return -EBUSY;
+
+ ccdc_lsc_setup_regs(ccdc, &req->config);
+ ccdc_lsc_program_table(ccdc, req->table.dma);
+ return 0;
+}
+
+/*
+ * ccdc_lsc_error_handler - Handle LSC prefetch error scenario.
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Disables LSC, and defers enablement to shadow registers update time.
+ */
+static void ccdc_lsc_error_handler(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ /*
+ * From OMAP3 TRM: When this event is pending, the module
+ * goes into transparent mode (output =input). Normal
+ * operation can be resumed at the start of the next frame
+ * after:
+ * 1) Clearing this event
+ * 2) Disabling the LSC module
+ * 3) Enabling it
+ */
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG,
+ ISPCCDC_LSC_ENABLE);
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+}
+
+static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
+ struct ispccdc_lsc_config_req *req)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ if (req == NULL)
+ return;
+
+ if (req->table.addr) {
+ sg_free_table(&req->table.sgt);
+ dma_free_coherent(isp->dev, req->config.size, req->table.addr,
+ req->table.dma);
+ }
+
+ kfree(req);
+}
+
+static void ccdc_lsc_free_queue(struct isp_ccdc_device *ccdc,
+ struct list_head *queue)
+{
+ struct ispccdc_lsc_config_req *req, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ list_for_each_entry_safe(req, n, queue, list) {
+ list_del(&req->list);
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+ ccdc_lsc_free_request(ccdc, req);
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ }
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+}
+
+static void ccdc_lsc_free_table_work(struct work_struct *work)
+{
+ struct isp_ccdc_device *ccdc;
+ struct ispccdc_lsc *lsc;
+
+ lsc = container_of(work, struct ispccdc_lsc, table_work);
+ ccdc = container_of(lsc, struct isp_ccdc_device, lsc);
+
+ ccdc_lsc_free_queue(ccdc, &lsc->free_queue);
+}
+
+/*
+ * ccdc_lsc_config - Configure the LSC module from a userspace request
+ *
+ * Store the request LSC configuration in the LSC engine request pointer. The
+ * configuration will be applied to the hardware when the CCDC will be enabled,
+ * or at the next LSC interrupt if the CCDC is already running.
+ */
+static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
+ struct omap3isp_ccdc_update_config *config)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ struct ispccdc_lsc_config_req *req;
+ unsigned long flags;
+ u16 update;
+ int ret;
+
+ update = config->update &
+ (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC);
+ if (!update)
+ return 0;
+
+ if (update != (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC)) {
+ dev_dbg(to_device(ccdc), "%s: Both LSC configuration and table "
+ "need to be supplied\n", __func__);
+ return -EINVAL;
+ }
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (req == NULL)
+ return -ENOMEM;
+
+ if (config->flag & OMAP3ISP_CCDC_CONFIG_LSC) {
+ if (copy_from_user(&req->config, config->lsc_cfg,
+ sizeof(req->config))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ req->enable = 1;
+
+ req->table.addr = dma_alloc_coherent(isp->dev, req->config.size,
+ &req->table.dma,
+ GFP_KERNEL);
+ if (req->table.addr == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = dma_get_sgtable(isp->dev, &req->table.sgt,
+ req->table.addr, req->table.dma,
+ req->config.size);
+ if (ret < 0)
+ goto done;
+
+ dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
+ req->table.sgt.nents, DMA_TO_DEVICE);
+
+ if (copy_from_user(req->table.addr, config->lsc,
+ req->config.size)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
+ req->table.sgt.nents, DMA_TO_DEVICE);
+ }
+
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ if (ccdc->lsc.request) {
+ list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue);
+ schedule_work(&ccdc->lsc.table_work);
+ }
+ ccdc->lsc.request = req;
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+
+ ret = 0;
+
+done:
+ if (ret < 0)
+ ccdc_lsc_free_request(ccdc, req);
+
+ return ret;
+}
+
+static inline int ccdc_lsc_is_configured(struct isp_ccdc_device *ccdc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ if (ccdc->lsc.active) {
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+ return 0;
+}
+
+static int ccdc_lsc_enable(struct isp_ccdc_device *ccdc)
+{
+ struct ispccdc_lsc *lsc = &ccdc->lsc;
+
+ if (lsc->state != LSC_STATE_STOPPED)
+ return -EINVAL;
+
+ if (lsc->active) {
+ list_add_tail(&lsc->active->list, &lsc->free_queue);
+ lsc->active = NULL;
+ }
+
+ if (__ccdc_lsc_configure(ccdc, lsc->request) < 0) {
+ omap3isp_sbl_disable(to_isp_device(ccdc),
+ OMAP3_ISP_SBL_CCDC_LSC_READ);
+ list_add_tail(&lsc->request->list, &lsc->free_queue);
+ lsc->request = NULL;
+ goto done;
+ }
+
+ lsc->active = lsc->request;
+ lsc->request = NULL;
+ __ccdc_lsc_enable(ccdc, 1);
+
+done:
+ if (!list_empty(&lsc->free_queue))
+ schedule_work(&lsc->table_work);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Parameters configuration
+ */
+
+/*
+ * ccdc_configure_clamp - Configure optical-black or digital clamping
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * The CCDC performs either optical-black or digital clamp. Configure and enable
+ * the selected clamp method.
+ */
+static void ccdc_configure_clamp(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ u32 clamp;
+
+ if (ccdc->obclamp) {
+ clamp = ccdc->clamp.obgain << ISPCCDC_CLAMP_OBGAIN_SHIFT;
+ clamp |= ccdc->clamp.oblen << ISPCCDC_CLAMP_OBSLEN_SHIFT;
+ clamp |= ccdc->clamp.oblines << ISPCCDC_CLAMP_OBSLN_SHIFT;
+ clamp |= ccdc->clamp.obstpixel << ISPCCDC_CLAMP_OBST_SHIFT;
+ isp_reg_writel(isp, clamp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP);
+ } else {
+ isp_reg_writel(isp, ccdc->clamp.dcsubval,
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB);
+ }
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP,
+ ISPCCDC_CLAMP_CLAMPEN,
+ ccdc->obclamp ? ISPCCDC_CLAMP_CLAMPEN : 0);
+}
+
+/*
+ * ccdc_configure_fpc - Configure Faulty Pixel Correction
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, ISPCCDC_FPC_FPCEN);
+
+ if (!ccdc->fpc_en)
+ return;
+
+ isp_reg_writel(isp, ccdc->fpc.dma, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_FPC_ADDR);
+ /* The FPNUM field must be set before enabling FPC. */
+ isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC);
+ isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT) |
+ ISPCCDC_FPC_FPCEN, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC);
+}
+
+/*
+ * ccdc_configure_black_comp - Configure Black Level Compensation.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_black_comp(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ u32 blcomp;
+
+ blcomp = ccdc->blcomp.b_mg << ISPCCDC_BLKCMP_B_MG_SHIFT;
+ blcomp |= ccdc->blcomp.gb_g << ISPCCDC_BLKCMP_GB_G_SHIFT;
+ blcomp |= ccdc->blcomp.gr_cy << ISPCCDC_BLKCMP_GR_CY_SHIFT;
+ blcomp |= ccdc->blcomp.r_ye << ISPCCDC_BLKCMP_R_YE_SHIFT;
+
+ isp_reg_writel(isp, blcomp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_BLKCMP);
+}
+
+/*
+ * ccdc_configure_lpf - Configure Low-Pass Filter (LPF).
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_lpf(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE,
+ ISPCCDC_SYN_MODE_LPF,
+ ccdc->lpf ? ISPCCDC_SYN_MODE_LPF : 0);
+}
+
+/*
+ * ccdc_configure_alaw - Configure A-law compression.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_alaw(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ const struct isp_format_info *info;
+ u32 alaw = 0;
+
+ info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code);
+
+ switch (info->width) {
+ case 8:
+ return;
+
+ case 10:
+ alaw = ISPCCDC_ALAW_GWDI_9_0;
+ break;
+ case 11:
+ alaw = ISPCCDC_ALAW_GWDI_10_1;
+ break;
+ case 12:
+ alaw = ISPCCDC_ALAW_GWDI_11_2;
+ break;
+ case 13:
+ alaw = ISPCCDC_ALAW_GWDI_12_3;
+ break;
+ }
+
+ if (ccdc->alaw)
+ alaw |= ISPCCDC_ALAW_CCDTBL;
+
+ isp_reg_writel(isp, alaw, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW);
+}
+
+/*
+ * ccdc_config_imgattr - Configure sensor image specific attributes.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @colptn: Color pattern of the sensor.
+ */
+static void ccdc_config_imgattr(struct isp_ccdc_device *ccdc, u32 colptn)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_writel(isp, colptn, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN);
+}
+
+/*
+ * ccdc_config - Set CCDC configuration from userspace
+ * @ccdc: Pointer to ISP CCDC device.
+ * @ccdc_struct: Structure containing CCDC configuration sent from userspace.
+ *
+ * Returns 0 if successful, -EINVAL if the pointer to the configuration
+ * structure is null, or the copy_from_user function fails to copy user space
+ * memory to kernel space memory.
+ */
+static int ccdc_config(struct isp_ccdc_device *ccdc,
+ struct omap3isp_ccdc_update_config *ccdc_struct)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccdc->lock, flags);
+ ccdc->shadow_update = 1;
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+
+ if (OMAP3ISP_CCDC_ALAW & ccdc_struct->update) {
+ ccdc->alaw = !!(OMAP3ISP_CCDC_ALAW & ccdc_struct->flag);
+ ccdc->update |= OMAP3ISP_CCDC_ALAW;
+ }
+
+ if (OMAP3ISP_CCDC_LPF & ccdc_struct->update) {
+ ccdc->lpf = !!(OMAP3ISP_CCDC_LPF & ccdc_struct->flag);
+ ccdc->update |= OMAP3ISP_CCDC_LPF;
+ }
+
+ if (OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->update) {
+ if (copy_from_user(&ccdc->clamp, ccdc_struct->bclamp,
+ sizeof(ccdc->clamp))) {
+ ccdc->shadow_update = 0;
+ return -EFAULT;
+ }
+
+ ccdc->obclamp = !!(OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->flag);
+ ccdc->update |= OMAP3ISP_CCDC_BLCLAMP;
+ }
+
+ if (OMAP3ISP_CCDC_BCOMP & ccdc_struct->update) {
+ if (copy_from_user(&ccdc->blcomp, ccdc_struct->blcomp,
+ sizeof(ccdc->blcomp))) {
+ ccdc->shadow_update = 0;
+ return -EFAULT;
+ }
+
+ ccdc->update |= OMAP3ISP_CCDC_BCOMP;
+ }
+
+ ccdc->shadow_update = 0;
+
+ if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) {
+ struct omap3isp_ccdc_fpc fpc;
+ struct ispccdc_fpc fpc_old = { .addr = NULL, };
+ struct ispccdc_fpc fpc_new;
+ u32 size;
+
+ if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
+ return -EBUSY;
+
+ ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag);
+
+ if (ccdc->fpc_en) {
+ if (copy_from_user(&fpc, ccdc_struct->fpc, sizeof(fpc)))
+ return -EFAULT;
+
+ size = fpc.fpnum * 4;
+
+ /*
+ * The table address must be 64-bytes aligned, which is
+ * guaranteed by dma_alloc_coherent().
+ */
+ fpc_new.fpnum = fpc.fpnum;
+ fpc_new.addr = dma_alloc_coherent(isp->dev, size,
+ &fpc_new.dma,
+ GFP_KERNEL);
+ if (fpc_new.addr == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(fpc_new.addr,
+ (__force void __user *)fpc.fpcaddr,
+ size)) {
+ dma_free_coherent(isp->dev, size, fpc_new.addr,
+ fpc_new.dma);
+ return -EFAULT;
+ }
+
+ fpc_old = ccdc->fpc;
+ ccdc->fpc = fpc_new;
+ }
+
+ ccdc_configure_fpc(ccdc);
+
+ if (fpc_old.addr != NULL)
+ dma_free_coherent(isp->dev, fpc_old.fpnum * 4,
+ fpc_old.addr, fpc_old.dma);
+ }
+
+ return ccdc_lsc_config(ccdc, ccdc_struct);
+}
+
+static void ccdc_apply_controls(struct isp_ccdc_device *ccdc)
+{
+ if (ccdc->update & OMAP3ISP_CCDC_ALAW) {
+ ccdc_configure_alaw(ccdc);
+ ccdc->update &= ~OMAP3ISP_CCDC_ALAW;
+ }
+
+ if (ccdc->update & OMAP3ISP_CCDC_LPF) {
+ ccdc_configure_lpf(ccdc);
+ ccdc->update &= ~OMAP3ISP_CCDC_LPF;
+ }
+
+ if (ccdc->update & OMAP3ISP_CCDC_BLCLAMP) {
+ ccdc_configure_clamp(ccdc);
+ ccdc->update &= ~OMAP3ISP_CCDC_BLCLAMP;
+ }
+
+ if (ccdc->update & OMAP3ISP_CCDC_BCOMP) {
+ ccdc_configure_black_comp(ccdc);
+ ccdc->update &= ~OMAP3ISP_CCDC_BCOMP;
+ }
+}
+
+/*
+ * omap3isp_ccdc_restore_context - Restore values of the CCDC module registers
+ * @isp: Pointer to ISP device
+ */
+void omap3isp_ccdc_restore_context(struct isp_device *isp)
+{
+ struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_VDLC);
+
+ ccdc->update = OMAP3ISP_CCDC_ALAW | OMAP3ISP_CCDC_LPF
+ | OMAP3ISP_CCDC_BLCLAMP | OMAP3ISP_CCDC_BCOMP;
+ ccdc_apply_controls(ccdc);
+ ccdc_configure_fpc(ccdc);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+/*
+ * ccdc_config_vp - Configure the Video Port.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_config_vp(struct isp_ccdc_device *ccdc)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccdc);
+ const struct isp_format_info *info;
+ unsigned long l3_ick = pipe->l3_ick;
+ unsigned int max_div = isp->revision == ISP_REVISION_15_0 ? 64 : 8;
+ unsigned int div = 0;
+ u32 fmtcfg_vp;
+
+ fmtcfg_vp = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG)
+ & ~(ISPCCDC_FMTCFG_VPIN_MASK | ISPCCDC_FMTCFG_VPIF_FRQ_MASK);
+
+ info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code);
+
+ switch (info->width) {
+ case 8:
+ case 10:
+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_9_0;
+ break;
+ case 11:
+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_10_1;
+ break;
+ case 12:
+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_11_2;
+ break;
+ case 13:
+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_12_3;
+ break;
+ }
+
+ if (pipe->input)
+ div = DIV_ROUND_UP(l3_ick, pipe->max_rate);
+ else if (pipe->external_rate)
+ div = l3_ick / pipe->external_rate;
+
+ div = clamp(div, 2U, max_div);
+ fmtcfg_vp |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT;
+
+ isp_reg_writel(isp, fmtcfg_vp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG);
+}
+
+/*
+ * ccdc_enable_vp - Enable Video Port.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @enable: 0 Disables VP, 1 Enables VP
+ *
+ * This is needed for outputting image to Preview, H3A and HIST ISP submodules.
+ */
+static void ccdc_enable_vp(struct isp_ccdc_device *ccdc, u8 enable)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG,
+ ISPCCDC_FMTCFG_VPEN, enable ? ISPCCDC_FMTCFG_VPEN : 0);
+}
+
+/*
+ * ccdc_config_outlineoffset - Configure memory saving output line offset
+ * @ccdc: Pointer to ISP CCDC device.
+ * @offset: Address offset to start a new line. Must be twice the
+ * Output width and aligned on 32 byte boundary
+ * @oddeven: Specifies the odd/even line pattern to be chosen to store the
+ * output.
+ * @numlines: Set the value 0-3 for +1-4lines, 4-7 for -1-4lines.
+ *
+ * - Configures the output line offset when stored in memory
+ * - Sets the odd/even line pattern to store the output
+ * (EVENEVEN (1), ODDEVEN (2), EVENODD (3), ODDODD (4))
+ * - Configures the number of even and odd line fields in case of rearranging
+ * the lines.
+ */
+static void ccdc_config_outlineoffset(struct isp_ccdc_device *ccdc,
+ u32 offset, u8 oddeven, u8 numlines)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_writel(isp, offset & 0xffff,
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF);
+
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+ ISPCCDC_SDOFST_FINV);
+
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+ ISPCCDC_SDOFST_FOFST_4L);
+
+ switch (oddeven) {
+ case EVENEVEN:
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST0_SHIFT);
+ break;
+ case ODDEVEN:
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST1_SHIFT);
+ break;
+ case EVENODD:
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST2_SHIFT);
+ break;
+ case ODDODD:
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST3_SHIFT);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * ccdc_set_outaddr - Set memory address to save output image
+ * @ccdc: Pointer to ISP CCDC device.
+ * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ */
+static void ccdc_set_outaddr(struct isp_ccdc_device *ccdc, u32 addr)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDR_ADDR);
+}
+
+/*
+ * omap3isp_ccdc_max_rate - Calculate maximum input data rate based on the input
+ * @ccdc: Pointer to ISP CCDC device.
+ * @max_rate: Maximum calculated data rate.
+ *
+ * Returns in *max_rate less value between calculated and passed
+ */
+void omap3isp_ccdc_max_rate(struct isp_ccdc_device *ccdc,
+ unsigned int *max_rate)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ unsigned int rate;
+
+ if (pipe == NULL)
+ return;
+
+ /*
+ * TRM says that for parallel sensors the maximum data rate
+ * should be 90% form L3/2 clock, otherwise just L3/2.
+ */
+ if (ccdc->input == CCDC_INPUT_PARALLEL)
+ rate = pipe->l3_ick / 2 * 9 / 10;
+ else
+ rate = pipe->l3_ick / 2;
+
+ *max_rate = min(*max_rate, rate);
+}
+
+/*
+ * ccdc_config_sync_if - Set CCDC sync interface configuration
+ * @ccdc: Pointer to ISP CCDC device.
+ * @pdata: Parallel interface platform data (may be NULL)
+ * @data_size: Data size
+ */
+static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
+ struct isp_parallel_platform_data *pdata,
+ unsigned int data_size)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ const struct v4l2_mbus_framefmt *format;
+ u32 syn_mode = ISPCCDC_SYN_MODE_VDHDEN;
+
+ format = &ccdc->formats[CCDC_PAD_SINK];
+
+ if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+ format->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ /* The bridge is enabled for YUV8 formats. Configure the input
+ * mode accordingly.
+ */
+ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16;
+ }
+
+ switch (data_size) {
+ case 8:
+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_8;
+ break;
+ case 10:
+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_10;
+ break;
+ case 11:
+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_11;
+ break;
+ case 12:
+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_12;
+ break;
+ }
+
+ if (pdata && pdata->data_pol)
+ syn_mode |= ISPCCDC_SYN_MODE_DATAPOL;
+
+ if (pdata && pdata->hs_pol)
+ syn_mode |= ISPCCDC_SYN_MODE_HDPOL;
+
+ if (pdata && pdata->vs_pol)
+ syn_mode |= ISPCCDC_SYN_MODE_VDPOL;
+
+ isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
+
+ /* The CCDC_CFG.Y8POS bit is used in YCbCr8 input mode only. The
+ * hardware seems to ignore it in all other input modes.
+ */
+ if (format->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_Y8POS);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_Y8POS);
+
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF,
+ ISPCCDC_REC656IF_R656ON);
+}
+
+/* CCDC formats descriptions */
+static const u32 ccdc_sgrbg_pattern =
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static const u32 ccdc_srggb_pattern =
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static const u32 ccdc_sbggr_pattern =
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static const u32 ccdc_sgbrg_pattern =
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static void ccdc_configure(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ struct isp_parallel_platform_data *pdata = NULL;
+ struct v4l2_subdev *sensor;
+ struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
+ const struct isp_format_info *fmt_info;
+ struct v4l2_subdev_format fmt_src;
+ unsigned int depth_out;
+ unsigned int depth_in = 0;
+ struct media_pad *pad;
+ unsigned long flags;
+ unsigned int bridge;
+ unsigned int shift;
+ u32 syn_mode;
+ u32 ccdc_pattern;
+
+ pad = media_entity_remote_pad(&ccdc->pads[CCDC_PAD_SINK]);
+ sensor = media_entity_to_v4l2_subdev(pad->entity);
+ if (ccdc->input == CCDC_INPUT_PARALLEL)
+ pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv)
+ ->bus.parallel;
+
+ /* Compute the lane shifter shift value and enable the bridge when the
+ * input format is YUV.
+ */
+ fmt_src.pad = pad->index;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ if (!v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt_src)) {
+ fmt_info = omap3isp_video_format_info(fmt_src.format.code);
+ depth_in = fmt_info->width;
+ }
+
+ fmt_info = omap3isp_video_format_info
+ (isp->isp_ccdc.formats[CCDC_PAD_SINK].code);
+ depth_out = fmt_info->width;
+ shift = depth_in - depth_out;
+
+ if (fmt_info->code == V4L2_MBUS_FMT_YUYV8_2X8)
+ bridge = ISPCTRL_PAR_BRIDGE_LENDIAN;
+ else if (fmt_info->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ bridge = ISPCTRL_PAR_BRIDGE_BENDIAN;
+ else
+ bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
+
+ omap3isp_configure_bridge(isp, ccdc->input, pdata, shift, bridge);
+
+ ccdc_config_sync_if(ccdc, pdata, depth_out);
+
+ syn_mode = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
+
+ /* Use the raw, unprocessed data when writing to memory. The H3A and
+ * histogram modules are still fed with lens shading corrected data.
+ */
+ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR;
+
+ if (ccdc->output & CCDC_OUTPUT_MEMORY)
+ syn_mode |= ISPCCDC_SYN_MODE_WEN;
+ else
+ syn_mode &= ~ISPCCDC_SYN_MODE_WEN;
+
+ if (ccdc->output & CCDC_OUTPUT_RESIZER)
+ syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ;
+ else
+ syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ;
+
+ /* CCDC_PAD_SINK */
+ format = &ccdc->formats[CCDC_PAD_SINK];
+
+ /* Mosaic filter */
+ switch (format->code) {
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ case V4L2_MBUS_FMT_SRGGB12_1X12:
+ ccdc_pattern = ccdc_srggb_pattern;
+ break;
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SBGGR12_1X12:
+ ccdc_pattern = ccdc_sbggr_pattern;
+ break;
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case V4L2_MBUS_FMT_SGBRG12_1X12:
+ ccdc_pattern = ccdc_sgbrg_pattern;
+ break;
+ default:
+ /* Use GRBG */
+ ccdc_pattern = ccdc_sgrbg_pattern;
+ break;
+ }
+ ccdc_config_imgattr(ccdc, ccdc_pattern);
+
+ /* Generate VD0 on the last line of the image and VD1 on the
+ * 2/3 height line.
+ */
+ isp_reg_writel(isp, ((format->height - 2) << ISPCCDC_VDINT_0_SHIFT) |
+ ((format->height * 2 / 3) << ISPCCDC_VDINT_1_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT);
+
+ /* CCDC_PAD_SOURCE_OF */
+ format = &ccdc->formats[CCDC_PAD_SOURCE_OF];
+ crop = &ccdc->crop;
+
+ isp_reg_writel(isp, (crop->left << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
+ ((crop->width - 1) << ISPCCDC_HORZ_INFO_NPH_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO);
+ isp_reg_writel(isp, crop->top << ISPCCDC_VERT_START_SLV0_SHIFT,
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START);
+ isp_reg_writel(isp, (crop->height - 1)
+ << ISPCCDC_VERT_LINES_NLV_SHIFT,
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES);
+
+ ccdc_config_outlineoffset(ccdc, ccdc->video_out.bpl_value, 0, 0);
+
+ /* The CCDC outputs data in UYVY order by default. Swap bytes to get
+ * YUYV.
+ */
+ if (format->code == V4L2_MBUS_FMT_YUYV8_1X16)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_BSWD);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_BSWD);
+
+ /* Use PACK8 mode for 1byte per pixel formats. */
+ if (omap3isp_video_format_info(format->code)->width <= 8)
+ syn_mode |= ISPCCDC_SYN_MODE_PACK8;
+ else
+ syn_mode &= ~ISPCCDC_SYN_MODE_PACK8;
+
+ isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
+
+ /* CCDC_PAD_SOURCE_VP */
+ format = &ccdc->formats[CCDC_PAD_SOURCE_VP];
+
+ isp_reg_writel(isp, (0 << ISPCCDC_FMT_HORZ_FMTSPH_SHIFT) |
+ (format->width << ISPCCDC_FMT_HORZ_FMTLNH_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ);
+ isp_reg_writel(isp, (0 << ISPCCDC_FMT_VERT_FMTSLV_SHIFT) |
+ ((format->height + 1) << ISPCCDC_FMT_VERT_FMTLNV_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT);
+
+ isp_reg_writel(isp, (format->width << ISPCCDC_VP_OUT_HORZ_NUM_SHIFT) |
+ (format->height << ISPCCDC_VP_OUT_VERT_NUM_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT);
+
+ /* Lens shading correction. */
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ if (ccdc->lsc.request == NULL)
+ goto unlock;
+
+ WARN_ON(ccdc->lsc.active);
+
+ /* Get last good LSC configuration. If it is not supported for
+ * the current active resolution discard it.
+ */
+ if (ccdc->lsc.active == NULL &&
+ __ccdc_lsc_configure(ccdc, ccdc->lsc.request) == 0) {
+ ccdc->lsc.active = ccdc->lsc.request;
+ } else {
+ list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue);
+ schedule_work(&ccdc->lsc.table_work);
+ }
+
+ ccdc->lsc.request = NULL;
+
+unlock:
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+
+ ccdc_apply_controls(ccdc);
+}
+
+static void __ccdc_enable(struct isp_ccdc_device *ccdc, int enable)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR,
+ ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0);
+}
+
+static int ccdc_disable(struct isp_ccdc_device *ccdc)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ccdc->lock, flags);
+ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS)
+ ccdc->stopping = CCDC_STOP_REQUEST;
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+
+ ret = wait_event_timeout(ccdc->wait,
+ ccdc->stopping == CCDC_STOP_FINISHED,
+ msecs_to_jiffies(2000));
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ dev_warn(to_device(ccdc), "CCDC stop timeout!\n");
+ }
+
+ omap3isp_sbl_disable(to_isp_device(ccdc), OMAP3_ISP_SBL_CCDC_LSC_READ);
+
+ mutex_lock(&ccdc->ioctl_lock);
+ ccdc_lsc_free_request(ccdc, ccdc->lsc.request);
+ ccdc->lsc.request = ccdc->lsc.active;
+ ccdc->lsc.active = NULL;
+ cancel_work_sync(&ccdc->lsc.table_work);
+ ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
+ mutex_unlock(&ccdc->ioctl_lock);
+
+ ccdc->stopping = CCDC_STOP_NOT_REQUESTED;
+
+ return ret > 0 ? 0 : ret;
+}
+
+static void ccdc_enable(struct isp_ccdc_device *ccdc)
+{
+ if (ccdc_lsc_is_configured(ccdc))
+ __ccdc_lsc_enable(ccdc, 1);
+ __ccdc_enable(ccdc, 1);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+/*
+ * ccdc_sbl_busy - Poll idle state of CCDC and related SBL memory write bits
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Returns zero if the CCDC is idle and the image has been written to
+ * memory, too.
+ */
+static int ccdc_sbl_busy(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ return omap3isp_ccdc_busy(ccdc)
+ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_0) &
+ ISPSBL_CCDC_WR_0_DATA_READY)
+ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_1) &
+ ISPSBL_CCDC_WR_0_DATA_READY)
+ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_2) &
+ ISPSBL_CCDC_WR_0_DATA_READY)
+ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_3) &
+ ISPSBL_CCDC_WR_0_DATA_READY);
+}
+
+/*
+ * ccdc_sbl_wait_idle - Wait until the CCDC and related SBL are idle
+ * @ccdc: Pointer to ISP CCDC device.
+ * @max_wait: Max retry count in us for wait for idle/busy transition.
+ */
+static int ccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc,
+ unsigned int max_wait)
+{
+ unsigned int wait = 0;
+
+ if (max_wait == 0)
+ max_wait = 10000; /* 10 ms */
+
+ for (wait = 0; wait <= max_wait; wait++) {
+ if (!ccdc_sbl_busy(ccdc))
+ return 0;
+
+ rmb();
+ udelay(1);
+ }
+
+ return -EBUSY;
+}
+
+/* __ccdc_handle_stopping - Handle CCDC and/or LSC stopping sequence
+ * @ccdc: Pointer to ISP CCDC device.
+ * @event: Pointing which event trigger handler
+ *
+ * Return 1 when the event and stopping request combination is satisfied,
+ * zero otherwise.
+ */
+static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event)
+{
+ int rval = 0;
+
+ switch ((ccdc->stopping & 3) | event) {
+ case CCDC_STOP_REQUEST | CCDC_EVENT_VD1:
+ if (ccdc->lsc.state != LSC_STATE_STOPPED)
+ __ccdc_lsc_enable(ccdc, 0);
+ __ccdc_enable(ccdc, 0);
+ ccdc->stopping = CCDC_STOP_EXECUTED;
+ return 1;
+
+ case CCDC_STOP_EXECUTED | CCDC_EVENT_VD0:
+ ccdc->stopping |= CCDC_STOP_CCDC_FINISHED;
+ if (ccdc->lsc.state == LSC_STATE_STOPPED)
+ ccdc->stopping |= CCDC_STOP_LSC_FINISHED;
+ rval = 1;
+ break;
+
+ case CCDC_STOP_EXECUTED | CCDC_EVENT_LSC_DONE:
+ ccdc->stopping |= CCDC_STOP_LSC_FINISHED;
+ rval = 1;
+ break;
+
+ case CCDC_STOP_EXECUTED | CCDC_EVENT_VD1:
+ return 1;
+ }
+
+ if (ccdc->stopping == CCDC_STOP_FINISHED) {
+ wake_up(&ccdc->wait);
+ rval = 1;
+ }
+
+ return rval;
+}
+
+static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ struct video_device *vdev = ccdc->subdev.devnode;
+ struct v4l2_event event;
+
+ /* Frame number propagation */
+ atomic_inc(&pipe->frame_number);
+
+ memset(&event, 0, sizeof(event));
+ event.type = V4L2_EVENT_FRAME_SYNC;
+ event.u.frame_sync.frame_sequence = atomic_read(&pipe->frame_number);
+
+ v4l2_event_queue(vdev, &event);
+}
+
+/*
+ * ccdc_lsc_isr - Handle LSC events
+ * @ccdc: Pointer to ISP CCDC device.
+ * @events: LSC events
+ */
+static void ccdc_lsc_isr(struct isp_ccdc_device *ccdc, u32 events)
+{
+ unsigned long flags;
+
+ if (events & IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ) {
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&ccdc->subdev.entity);
+
+ ccdc_lsc_error_handler(ccdc);
+ pipe->error = true;
+ dev_dbg(to_device(ccdc), "lsc prefetch error\n");
+ }
+
+ if (!(events & IRQ0STATUS_CCDC_LSC_DONE_IRQ))
+ return;
+
+ /* LSC_DONE interrupt occur, there are two cases
+ * 1. stopping for reconfiguration
+ * 2. stopping because of STREAM OFF command
+ */
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+
+ if (ccdc->lsc.state == LSC_STATE_STOPPING)
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+
+ if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_LSC_DONE))
+ goto done;
+
+ if (ccdc->lsc.state != LSC_STATE_RECONFIG)
+ goto done;
+
+ /* LSC is in STOPPING state, change to the new state */
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+
+ /* This is an exception. Start of frame and LSC_DONE interrupt
+ * have been received on the same time. Skip this event and wait
+ * for better times.
+ */
+ if (events & IRQ0STATUS_HS_VS_IRQ)
+ goto done;
+
+ /* The LSC engine is stopped at this point. Enable it if there's a
+ * pending request.
+ */
+ if (ccdc->lsc.request == NULL)
+ goto done;
+
+ ccdc_lsc_enable(ccdc);
+
+done:
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+}
+
+static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccdc);
+ struct isp_buffer *buffer;
+ int restart = 0;
+
+ /* The CCDC generates VD0 interrupts even when disabled (the datasheet
+ * doesn't explicitly state if that's supposed to happen or not, so it
+ * can be considered as a hardware bug or as a feature, but we have to
+ * deal with it anyway). Disabling the CCDC when no buffer is available
+ * would thus not be enough, we need to handle the situation explicitly.
+ */
+ if (list_empty(&ccdc->video_out.dmaqueue))
+ goto done;
+
+ /* We're in continuous mode, and memory writes were disabled due to a
+ * buffer underrun. Reenable them now that we have a buffer. The buffer
+ * address has been set in ccdc_video_queue.
+ */
+ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) {
+ restart = 1;
+ ccdc->underrun = 0;
+ goto done;
+ }
+
+ if (ccdc_sbl_wait_idle(ccdc, 1000)) {
+ dev_info(isp->dev, "CCDC won't become idle!\n");
+ isp->crashed |= 1U << ccdc->subdev.entity.id;
+ omap3isp_pipeline_cancel_stream(pipe);
+ goto done;
+ }
+
+ buffer = omap3isp_video_buffer_next(&ccdc->video_out);
+ if (buffer != NULL) {
+ ccdc_set_outaddr(ccdc, buffer->dma);
+ restart = 1;
+ }
+
+ pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
+
+ if (ccdc->state == ISP_PIPELINE_STREAM_SINGLESHOT &&
+ isp_pipeline_ready(pipe))
+ omap3isp_pipeline_set_stream(pipe,
+ ISP_PIPELINE_STREAM_SINGLESHOT);
+
+done:
+ return restart;
+}
+
+/*
+ * ccdc_vd0_isr - Handle VD0 event
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Executes LSC deferred enablement before next frame starts.
+ */
+static void ccdc_vd0_isr(struct isp_ccdc_device *ccdc)
+{
+ unsigned long flags;
+ int restart = 0;
+
+ if (ccdc->output & CCDC_OUTPUT_MEMORY)
+ restart = ccdc_isr_buffer(ccdc);
+
+ spin_lock_irqsave(&ccdc->lock, flags);
+ if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_VD0)) {
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+ return;
+ }
+
+ if (!ccdc->shadow_update)
+ ccdc_apply_controls(ccdc);
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+
+ if (restart)
+ ccdc_enable(ccdc);
+}
+
+/*
+ * ccdc_vd1_isr - Handle VD1 event
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+
+ /*
+ * Depending on the CCDC pipeline state, CCDC stopping should be
+ * handled differently. In SINGLESHOT we emulate an internal CCDC
+ * stopping because the CCDC hw works only in continuous mode.
+ * When CONTINUOUS pipeline state is used and the CCDC writes it's
+ * data to memory the CCDC and LSC are stopped immediately but
+ * without change the CCDC stopping state machine. The CCDC
+ * stopping state machine should be used only when user request
+ * for stopping is received (SINGLESHOT is an exeption).
+ */
+ switch (ccdc->state) {
+ case ISP_PIPELINE_STREAM_SINGLESHOT:
+ ccdc->stopping = CCDC_STOP_REQUEST;
+ break;
+
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ if (ccdc->output & CCDC_OUTPUT_MEMORY) {
+ if (ccdc->lsc.state != LSC_STATE_STOPPED)
+ __ccdc_lsc_enable(ccdc, 0);
+ __ccdc_enable(ccdc, 0);
+ }
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ break;
+ }
+
+ if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1))
+ goto done;
+
+ if (ccdc->lsc.request == NULL)
+ goto done;
+
+ /*
+ * LSC need to be reconfigured. Stop it here and on next LSC_DONE IRQ
+ * do the appropriate changes in registers
+ */
+ if (ccdc->lsc.state == LSC_STATE_RUNNING) {
+ __ccdc_lsc_enable(ccdc, 0);
+ ccdc->lsc.state = LSC_STATE_RECONFIG;
+ goto done;
+ }
+
+ /* LSC has been in STOPPED state, enable it */
+ if (ccdc->lsc.state == LSC_STATE_STOPPED)
+ ccdc_lsc_enable(ccdc);
+
+done:
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+}
+
+/*
+ * omap3isp_ccdc_isr - Configure CCDC during interframe time.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @events: CCDC events
+ */
+int omap3isp_ccdc_isr(struct isp_ccdc_device *ccdc, u32 events)
+{
+ if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ if (events & IRQ0STATUS_CCDC_VD1_IRQ)
+ ccdc_vd1_isr(ccdc);
+
+ ccdc_lsc_isr(ccdc, events);
+
+ if (events & IRQ0STATUS_CCDC_VD0_IRQ)
+ ccdc_vd0_isr(ccdc);
+
+ if (events & IRQ0STATUS_HS_VS_IRQ)
+ ccdc_hs_vs_isr(ccdc);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
+{
+ struct isp_ccdc_device *ccdc = &video->isp->isp_ccdc;
+
+ if (!(ccdc->output & CCDC_OUTPUT_MEMORY))
+ return -ENODEV;
+
+ ccdc_set_outaddr(ccdc, buffer->dma);
+
+ /* We now have a buffer queued on the output, restart the pipeline
+ * on the next CCDC interrupt if running in continuous mode (or when
+ * starting the stream).
+ */
+ ccdc->underrun = 1;
+
+ return 0;
+}
+
+static const struct isp_video_operations ccdc_video_ops = {
+ .queue = ccdc_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * ccdc_ioctl - CCDC module private ioctl's
+ * @sd: ISP CCDC V4L2 subdevice
+ * @cmd: ioctl command
+ * @arg: ioctl argument
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static long ccdc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ int ret;
+
+ switch (cmd) {
+ case VIDIOC_OMAP3ISP_CCDC_CFG:
+ mutex_lock(&ccdc->ioctl_lock);
+ ret = ccdc_config(ccdc, arg);
+ mutex_unlock(&ccdc->ioctl_lock);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return ret;
+}
+
+static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_FRAME_SYNC)
+ return -EINVAL;
+
+ /* line number is zero at frame start */
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS, NULL);
+}
+
+static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+/*
+ * ccdc_set_stream - Enable/Disable streaming on the CCDC module
+ * @sd: ISP CCDC V4L2 subdevice
+ * @enable: Enable/disable stream
+ *
+ * When writing to memory, the CCDC hardware can't be enabled without a memory
+ * buffer to write to. As the s_stream operation is called in response to a
+ * STREAMON call without any buffer queued yet, just update the enabled field
+ * and return immediately. The CCDC will be enabled in ccdc_isr_buffer().
+ *
+ * When not writing to memory enable the CCDC immediately.
+ */
+static int ccdc_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct isp_device *isp = to_isp_device(ccdc);
+ int ret = 0;
+
+ if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_CCDC);
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_VDLC);
+
+ ccdc_configure(ccdc);
+
+ /* TODO: Don't configure the video port if all of its output
+ * links are inactive.
+ */
+ ccdc_config_vp(ccdc);
+ ccdc_enable_vp(ccdc, 1);
+ ccdc_print_status(ccdc);
+ }
+
+ switch (enable) {
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ if (ccdc->output & CCDC_OUTPUT_MEMORY)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
+
+ if (ccdc->underrun || !(ccdc->output & CCDC_OUTPUT_MEMORY))
+ ccdc_enable(ccdc);
+
+ ccdc->underrun = 0;
+ break;
+
+ case ISP_PIPELINE_STREAM_SINGLESHOT:
+ if (ccdc->output & CCDC_OUTPUT_MEMORY &&
+ ccdc->state != ISP_PIPELINE_STREAM_SINGLESHOT)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
+
+ ccdc_enable(ccdc);
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ ret = ccdc_disable(ccdc);
+ if (ccdc->output & CCDC_OUTPUT_MEMORY)
+ omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
+ omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_CCDC);
+ ccdc->underrun = 0;
+ break;
+ }
+
+ ccdc->state = enable;
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &ccdc->formats[pad];
+}
+
+static struct v4l2_rect *
+__ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(fh, CCDC_PAD_SOURCE_OF);
+ else
+ return &ccdc->crop;
+}
+
+/*
+ * ccdc_try_format - Try video format on a pad
+ * @ccdc: ISP CCDC device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ const struct isp_format_info *info;
+ enum v4l2_mbus_pixelcode pixelcode;
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ struct v4l2_rect *crop;
+ unsigned int i;
+
+ switch (pad) {
+ case CCDC_PAD_SINK:
+ for (i = 0; i < ARRAY_SIZE(ccdc_fmts); i++) {
+ if (fmt->code == ccdc_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(ccdc_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ /* Clamp the input size. */
+ fmt->width = clamp_t(u32, width, 32, 4096);
+ fmt->height = clamp_t(u32, height, 32, 4096);
+ break;
+
+ case CCDC_PAD_SOURCE_OF:
+ pixelcode = fmt->code;
+ *fmt = *__ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
+
+ /* YUV formats are converted from 2X8 to 1X16 by the bridge and
+ * can be byte-swapped.
+ */
+ if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+ fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ /* Use the user requested format if YUV. */
+ if (pixelcode == V4L2_MBUS_FMT_YUYV8_2X8 ||
+ pixelcode == V4L2_MBUS_FMT_UYVY8_2X8 ||
+ pixelcode == V4L2_MBUS_FMT_YUYV8_1X16 ||
+ pixelcode == V4L2_MBUS_FMT_UYVY8_1X16)
+ fmt->code = pixelcode;
+
+ if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8)
+ fmt->code = V4L2_MBUS_FMT_YUYV8_1X16;
+ else if (fmt->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ }
+
+ /* Hardcode the output size to the crop rectangle size. */
+ crop = __ccdc_get_crop(ccdc, fh, which);
+ fmt->width = crop->width;
+ fmt->height = crop->height;
+ break;
+
+ case CCDC_PAD_SOURCE_VP:
+ *fmt = *__ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
+
+ /* The video port interface truncates the data to 10 bits. */
+ info = omap3isp_video_format_info(fmt->code);
+ fmt->code = info->truncated;
+
+ /* YUV formats are not supported by the video port. */
+ if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+ fmt->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ fmt->code = 0;
+
+ /* The number of lines that can be clocked out from the video
+ * port output must be at least one line less than the number
+ * of input lines.
+ */
+ fmt->width = clamp_t(u32, width, 32, fmt->width);
+ fmt->height = clamp_t(u32, height, 32, fmt->height - 1);
+ break;
+ }
+
+ /* Data is written to memory unpacked, each 10-bit or 12-bit pixel is
+ * stored on 2 bytes.
+ */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * ccdc_try_crop - Validate a crop rectangle
+ * @ccdc: ISP CCDC device
+ * @sink: format on the sink pad
+ * @crop: crop rectangle to be validated
+ */
+static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
+ const struct v4l2_mbus_framefmt *sink,
+ struct v4l2_rect *crop)
+{
+ const struct isp_format_info *info;
+ unsigned int max_width;
+
+ /* For Bayer formats, restrict left/top and width/height to even values
+ * to keep the Bayer pattern.
+ */
+ info = omap3isp_video_format_info(sink->code);
+ if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+ crop->left &= ~1;
+ crop->top &= ~1;
+ }
+
+ crop->left = clamp_t(u32, crop->left, 0, sink->width - CCDC_MIN_WIDTH);
+ crop->top = clamp_t(u32, crop->top, 0, sink->height - CCDC_MIN_HEIGHT);
+
+ /* The data formatter truncates the number of horizontal output pixels
+ * to a multiple of 16. To avoid clipping data, allow callers to request
+ * an output size bigger than the input size up to the nearest multiple
+ * of 16.
+ */
+ max_width = (sink->width - crop->left + 15) & ~15;
+ crop->width = clamp_t(u32, crop->width, CCDC_MIN_WIDTH, max_width)
+ & ~15;
+ crop->height = clamp_t(u32, crop->height, CCDC_MIN_HEIGHT,
+ sink->height - crop->top);
+
+ /* Odd width/height values don't make sense for Bayer formats. */
+ if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+ crop->width &= ~1;
+ crop->height &= ~1;
+ }
+}
+
+/*
+ * ccdc_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ switch (code->pad) {
+ case CCDC_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ccdc_fmts))
+ return -EINVAL;
+
+ code->code = ccdc_fmts[code->index];
+ break;
+
+ case CCDC_PAD_SOURCE_OF:
+ format = __ccdc_get_format(ccdc, fh, code->pad,
+ V4L2_SUBDEV_FORMAT_TRY);
+
+ if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+ format->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ /* In YUV mode the CCDC can swap bytes. */
+ if (code->index == 0)
+ code->code = V4L2_MBUS_FMT_YUYV8_1X16;
+ else if (code->index == 1)
+ code->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ else
+ return -EINVAL;
+ } else {
+ /* In raw mode, no configurable format confversion is
+ * available.
+ */
+ if (code->index == 0)
+ code->code = format->code;
+ else
+ return -EINVAL;
+ }
+ break;
+
+ case CCDC_PAD_SOURCE_VP:
+ /* The CCDC supports no configurable format conversion
+ * compatible with the video port. Enumerate a single output
+ * format code.
+ */
+ if (code->index != 0)
+ return -EINVAL;
+
+ format = __ccdc_get_format(ccdc, fh, code->pad,
+ V4L2_SUBDEV_FORMAT_TRY);
+
+ /* A pixel code equal to 0 means that the video port doesn't
+ * support the input format. Don't enumerate any pixel code.
+ */
+ if (format->code == 0)
+ return -EINVAL;
+
+ code->code = format->code;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ccdc_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the output formatter
+ * source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != CCDC_PAD_SOURCE_OF)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+ ccdc_try_crop(ccdc, format, &sel->r);
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ccdc_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the output
+ * formatter source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->target != V4L2_SEL_TGT_CROP ||
+ sel->pad != CCDC_PAD_SOURCE_OF)
+ return -EINVAL;
+
+ /* The crop rectangle can't be changed while streaming. */
+ if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
+ return -EBUSY;
+
+ /* Modifying the crop rectangle always changes the format on the source
+ * pad. If the KEEP_CONFIG flag is set, just return the current crop
+ * rectangle.
+ */
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+ return 0;
+ }
+
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+ ccdc_try_crop(ccdc, format, &sel->r);
+ *__ccdc_get_crop(ccdc, fh, sel->which) = sel->r;
+
+ /* Update the source format. */
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF, sel->which);
+ ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format, sel->which);
+
+ return 0;
+}
+
+/*
+ * ccdc_get_format - Retrieve the video format on a pad
+ * @sd : ISP CCDC V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * ccdc_set_format - Set the video format on a pad
+ * @sd : ISP CCDC V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ccdc_try_format(ccdc, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == CCDC_PAD_SINK) {
+ /* Reset the crop rectangle. */
+ crop = __ccdc_get_crop(ccdc, fh, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+
+ ccdc_try_crop(ccdc, &fmt->format, crop);
+
+ /* Update the source formats. */
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF,
+ fmt->which);
+ *format = fmt->format;
+ ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format,
+ fmt->which);
+
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_VP,
+ fmt->which);
+ *format = fmt->format;
+ ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_VP, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * Decide whether desired output pixel code can be obtained with
+ * the lane shifter by shifting the input pixel code.
+ * @in: input pixelcode to shifter
+ * @out: output pixelcode from shifter
+ * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
+ *
+ * return true if the combination is possible
+ * return false otherwise
+ */
+static bool ccdc_is_shiftable(enum v4l2_mbus_pixelcode in,
+ enum v4l2_mbus_pixelcode out,
+ unsigned int additional_shift)
+{
+ const struct isp_format_info *in_info, *out_info;
+
+ if (in == out)
+ return true;
+
+ in_info = omap3isp_video_format_info(in);
+ out_info = omap3isp_video_format_info(out);
+
+ if ((in_info->flavor == 0) || (out_info->flavor == 0))
+ return false;
+
+ if (in_info->flavor != out_info->flavor)
+ return false;
+
+ return in_info->width - out_info->width + additional_shift <= 6;
+}
+
+static int ccdc_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ unsigned long parallel_shift;
+
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ /* We've got a parallel sensor here. */
+ if (ccdc->input == CCDC_INPUT_PARALLEL) {
+ struct isp_parallel_platform_data *pdata =
+ &((struct isp_v4l2_subdevs_group *)
+ media_entity_to_v4l2_subdev(link->source->entity)
+ ->host_priv)->bus.parallel;
+ parallel_shift = pdata->data_lane_shift * 2;
+ } else {
+ parallel_shift = 0;
+ }
+
+ /* Lane shifter may be used to drop bits on CCDC sink pad */
+ if (!ccdc_is_shiftable(source_fmt->format.code,
+ sink_fmt->format.code, parallel_shift))
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
+ * ccdc_init_formats - Initialize formats on all pads
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = CCDC_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ ccdc_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/* V4L2 subdev core operations */
+static const struct v4l2_subdev_core_ops ccdc_v4l2_core_ops = {
+ .ioctl = ccdc_ioctl,
+ .subscribe_event = ccdc_subscribe_event,
+ .unsubscribe_event = ccdc_unsubscribe_event,
+};
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops ccdc_v4l2_video_ops = {
+ .s_stream = ccdc_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops ccdc_v4l2_pad_ops = {
+ .enum_mbus_code = ccdc_enum_mbus_code,
+ .enum_frame_size = ccdc_enum_frame_size,
+ .get_fmt = ccdc_get_format,
+ .set_fmt = ccdc_set_format,
+ .get_selection = ccdc_get_selection,
+ .set_selection = ccdc_set_selection,
+ .link_validate = ccdc_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops ccdc_v4l2_ops = {
+ .core = &ccdc_v4l2_core_ops,
+ .video = &ccdc_v4l2_video_ops,
+ .pad = &ccdc_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops ccdc_v4l2_internal_ops = {
+ .open = ccdc_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ccdc_link_setup - Setup CCDC connections
+ * @entity: CCDC media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int ccdc_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case CCDC_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Read from the sensor (parallel interface), CCP2, CSI2a or
+ * CSI2c.
+ */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ccdc->input = CCDC_INPUT_NONE;
+ break;
+ }
+
+ if (ccdc->input != CCDC_INPUT_NONE)
+ return -EBUSY;
+
+ if (remote->entity == &isp->isp_ccp2.subdev.entity)
+ ccdc->input = CCDC_INPUT_CCP2B;
+ else if (remote->entity == &isp->isp_csi2a.subdev.entity)
+ ccdc->input = CCDC_INPUT_CSI2A;
+ else if (remote->entity == &isp->isp_csi2c.subdev.entity)
+ ccdc->input = CCDC_INPUT_CSI2C;
+ else
+ ccdc->input = CCDC_INPUT_PARALLEL;
+
+ break;
+
+ /*
+ * The ISP core doesn't support pipelines with multiple video outputs.
+ * Revisit this when it will be implemented, and return -EBUSY for now.
+ */
+
+ case CCDC_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Write to preview engine, histogram and H3A. When none of
+ * those links are active, the video port can be disabled.
+ */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccdc->output & ~CCDC_OUTPUT_PREVIEW)
+ return -EBUSY;
+ ccdc->output |= CCDC_OUTPUT_PREVIEW;
+ } else {
+ ccdc->output &= ~CCDC_OUTPUT_PREVIEW;
+ }
+ break;
+
+ case CCDC_PAD_SOURCE_OF | MEDIA_ENT_T_DEVNODE:
+ /* Write to memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccdc->output & ~CCDC_OUTPUT_MEMORY)
+ return -EBUSY;
+ ccdc->output |= CCDC_OUTPUT_MEMORY;
+ } else {
+ ccdc->output &= ~CCDC_OUTPUT_MEMORY;
+ }
+ break;
+
+ case CCDC_PAD_SOURCE_OF | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Write to resizer */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccdc->output & ~CCDC_OUTPUT_RESIZER)
+ return -EBUSY;
+ ccdc->output |= CCDC_OUTPUT_RESIZER;
+ } else {
+ ccdc->output &= ~CCDC_OUTPUT_RESIZER;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ccdc_media_ops = {
+ .link_setup = ccdc_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc)
+{
+ v4l2_device_unregister_subdev(&ccdc->subdev);
+ omap3isp_video_unregister(&ccdc->video_out);
+}
+
+int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&ccdc->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_ccdc_unregister_entities(ccdc);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP CCDC initialisation and cleanup
+ */
+
+/*
+ * ccdc_init_entities - Initialize V4L2 subdev and media entity
+ * @ccdc: ISP CCDC module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
+{
+ struct v4l2_subdev *sd = &ccdc->subdev;
+ struct media_pad *pads = ccdc->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ ccdc->input = CCDC_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &ccdc_v4l2_ops);
+ sd->internal_ops = &ccdc_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP3 ISP CCDC", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
+ v4l2_set_subdevdata(sd, ccdc);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
+ pads[CCDC_PAD_SOURCE_OF].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &ccdc_media_ops;
+ ret = media_entity_init(me, CCDC_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ ccdc_init_formats(sd, NULL);
+
+ ccdc->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ccdc->video_out.ops = &ccdc_video_ops;
+ ccdc->video_out.isp = to_isp_device(ccdc);
+ ccdc->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+ ccdc->video_out.bpl_alignment = 32;
+
+ ret = omap3isp_video_init(&ccdc->video_out, "CCDC");
+ if (ret < 0)
+ goto error_video;
+
+ /* Connect the CCDC subdev to the video node. */
+ ret = media_entity_create_link(&ccdc->subdev.entity, CCDC_PAD_SOURCE_OF,
+ &ccdc->video_out.video.entity, 0, 0);
+ if (ret < 0)
+ goto error_link;
+
+ return 0;
+
+error_link:
+ omap3isp_video_cleanup(&ccdc->video_out);
+error_video:
+ media_entity_cleanup(me);
+ return ret;
+}
+
+/*
+ * omap3isp_ccdc_init - CCDC module initialization.
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap3isp_ccdc_init(struct isp_device *isp)
+{
+ struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+ int ret;
+
+ spin_lock_init(&ccdc->lock);
+ init_waitqueue_head(&ccdc->wait);
+ mutex_init(&ccdc->ioctl_lock);
+
+ ccdc->stopping = CCDC_STOP_NOT_REQUESTED;
+
+ INIT_WORK(&ccdc->lsc.table_work, ccdc_lsc_free_table_work);
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+ INIT_LIST_HEAD(&ccdc->lsc.free_queue);
+ spin_lock_init(&ccdc->lsc.req_lock);
+
+ ccdc->clamp.oblen = 0;
+ ccdc->clamp.dcsubval = 0;
+
+ ccdc->update = OMAP3ISP_CCDC_BLCLAMP;
+ ccdc_apply_controls(ccdc);
+
+ ret = ccdc_init_entities(ccdc);
+ if (ret < 0) {
+ mutex_destroy(&ccdc->ioctl_lock);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * omap3isp_ccdc_cleanup - CCDC module cleanup.
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ */
+void omap3isp_ccdc_cleanup(struct isp_device *isp)
+{
+ struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+
+ omap3isp_video_cleanup(&ccdc->video_out);
+ media_entity_cleanup(&ccdc->subdev.entity);
+
+ /* Free LSC requests. As the CCDC is stopped there's no active request,
+ * so only the pending request and the free queue need to be handled.
+ */
+ ccdc_lsc_free_request(ccdc, ccdc->lsc.request);
+ cancel_work_sync(&ccdc->lsc.table_work);
+ ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
+
+ if (ccdc->fpc.addr != NULL)
+ dma_free_coherent(isp->dev, ccdc->fpc.fpnum * 4, ccdc->fpc.addr,
+ ccdc->fpc.dma);
+
+ mutex_destroy(&ccdc->ioctl_lock);
+}
diff --git a/drivers/media/platform/omap3isp/ispccdc.h b/drivers/media/platform/omap3isp/ispccdc.h
new file mode 100644
index 00000000000..f65061602c7
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccdc.h
@@ -0,0 +1,176 @@
+/*
+ * ispccdc.h
+ *
+ * TI OMAP3 ISP - CCDC module
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CCDC_H
+#define OMAP3_ISP_CCDC_H
+
+#include <linux/omap3isp.h>
+#include <linux/workqueue.h>
+
+#include "ispvideo.h"
+
+enum ccdc_input_entity {
+ CCDC_INPUT_NONE,
+ CCDC_INPUT_PARALLEL,
+ CCDC_INPUT_CSI2A,
+ CCDC_INPUT_CCP2B,
+ CCDC_INPUT_CSI2C
+};
+
+#define CCDC_OUTPUT_MEMORY (1 << 0)
+#define CCDC_OUTPUT_PREVIEW (1 << 1)
+#define CCDC_OUTPUT_RESIZER (1 << 2)
+
+#define OMAP3ISP_CCDC_NEVENTS 16
+
+struct ispccdc_fpc {
+ void *addr;
+ dma_addr_t dma;
+ unsigned int fpnum;
+};
+
+enum ispccdc_lsc_state {
+ LSC_STATE_STOPPED = 0,
+ LSC_STATE_STOPPING = 1,
+ LSC_STATE_RUNNING = 2,
+ LSC_STATE_RECONFIG = 3,
+};
+
+struct ispccdc_lsc_config_req {
+ struct list_head list;
+ struct omap3isp_ccdc_lsc_config config;
+ unsigned char enable;
+
+ struct {
+ void *addr;
+ dma_addr_t dma;
+ struct sg_table sgt;
+ } table;
+};
+
+/*
+ * ispccdc_lsc - CCDC LSC parameters
+ */
+struct ispccdc_lsc {
+ enum ispccdc_lsc_state state;
+ struct work_struct table_work;
+
+ /* LSC queue of configurations */
+ spinlock_t req_lock;
+ struct ispccdc_lsc_config_req *request; /* requested configuration */
+ struct ispccdc_lsc_config_req *active; /* active configuration */
+ struct list_head free_queue; /* configurations for freeing */
+};
+
+#define CCDC_STOP_NOT_REQUESTED 0x00
+#define CCDC_STOP_REQUEST 0x01
+#define CCDC_STOP_EXECUTED (0x02 | CCDC_STOP_REQUEST)
+#define CCDC_STOP_CCDC_FINISHED 0x04
+#define CCDC_STOP_LSC_FINISHED 0x08
+#define CCDC_STOP_FINISHED \
+ (CCDC_STOP_EXECUTED | CCDC_STOP_CCDC_FINISHED | CCDC_STOP_LSC_FINISHED)
+
+#define CCDC_EVENT_VD1 0x10
+#define CCDC_EVENT_VD0 0x20
+#define CCDC_EVENT_LSC_DONE 0x40
+
+/* Sink and source CCDC pads */
+#define CCDC_PAD_SINK 0
+#define CCDC_PAD_SOURCE_OF 1
+#define CCDC_PAD_SOURCE_VP 2
+#define CCDC_PADS_NUM 3
+
+/*
+ * struct isp_ccdc_device - Structure for the CCDC module to store its own
+ * information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @crop: Active crop rectangle on the OF source pad
+ * @input: Active input
+ * @output: Active outputs
+ * @video_out: Output video node
+ * @alaw: A-law compression enabled (1) or disabled (0)
+ * @lpf: Low pass filter enabled (1) or disabled (0)
+ * @obclamp: Optical-black clamp enabled (1) or disabled (0)
+ * @fpc_en: Faulty pixels correction enabled (1) or disabled (0)
+ * @blcomp: Black level compensation configuration
+ * @clamp: Optical-black or digital clamp configuration
+ * @fpc: Faulty pixels correction configuration
+ * @lsc: Lens shading compensation configuration
+ * @update: Bitmask of controls to update during the next interrupt
+ * @shadow_update: Controls update in progress by userspace
+ * @underrun: A buffer underrun occurred and a new buffer has been queued
+ * @state: Streaming state
+ * @lock: Serializes shadow_update with interrupt handler
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ * @ioctl_lock: Serializes ioctl calls and LSC requests freeing
+ */
+struct isp_ccdc_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CCDC_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[CCDC_PADS_NUM];
+ struct v4l2_rect crop;
+
+ enum ccdc_input_entity input;
+ unsigned int output;
+ struct isp_video video_out;
+
+ unsigned int alaw:1,
+ lpf:1,
+ obclamp:1,
+ fpc_en:1;
+ struct omap3isp_ccdc_blcomp blcomp;
+ struct omap3isp_ccdc_bclamp clamp;
+ struct ispccdc_fpc fpc;
+ struct ispccdc_lsc lsc;
+ unsigned int update;
+ unsigned int shadow_update;
+
+ unsigned int underrun:1;
+ enum isp_pipeline_stream_state state;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ unsigned int stopping;
+ struct mutex ioctl_lock;
+};
+
+struct isp_device;
+
+int omap3isp_ccdc_init(struct isp_device *isp);
+void omap3isp_ccdc_cleanup(struct isp_device *isp);
+int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
+ struct v4l2_device *vdev);
+void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc);
+
+int omap3isp_ccdc_busy(struct isp_ccdc_device *isp_ccdc);
+int omap3isp_ccdc_isr(struct isp_ccdc_device *isp_ccdc, u32 events);
+void omap3isp_ccdc_restore_context(struct isp_device *isp);
+void omap3isp_ccdc_max_rate(struct isp_ccdc_device *ccdc,
+ unsigned int *max_rate);
+
+#endif /* OMAP3_ISP_CCDC_H */
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
new file mode 100644
index 00000000000..f3801db9095
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -0,0 +1,1179 @@
+/*
+ * ispccp2.c
+ *
+ * TI OMAP3 ISP - CCP2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispccp2.h"
+
+/* Number of LCX channels */
+#define CCP2_LCx_CHANS_NUM 3
+/* Max/Min size for CCP2 video port */
+#define ISPCCP2_DAT_START_MIN 0
+#define ISPCCP2_DAT_START_MAX 4095
+#define ISPCCP2_DAT_SIZE_MIN 0
+#define ISPCCP2_DAT_SIZE_MAX 4095
+#define ISPCCP2_VPCLK_FRACDIV 65536
+#define ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP 0x12
+#define ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP 0x16
+/* Max/Min size for CCP2 memory channel */
+#define ISPCCP2_LCM_HSIZE_COUNT_MIN 16
+#define ISPCCP2_LCM_HSIZE_COUNT_MAX 8191
+#define ISPCCP2_LCM_HSIZE_SKIP_MIN 0
+#define ISPCCP2_LCM_HSIZE_SKIP_MAX 8191
+#define ISPCCP2_LCM_VSIZE_MIN 1
+#define ISPCCP2_LCM_VSIZE_MAX 8191
+#define ISPCCP2_LCM_HWORDS_MIN 1
+#define ISPCCP2_LCM_HWORDS_MAX 4095
+#define ISPCCP2_LCM_CTRL_BURST_SIZE_32X 5
+#define ISPCCP2_LCM_CTRL_READ_THROTTLE_FULL 0
+#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10 2
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8 2
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10 3
+#define ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10 3
+#define ISPCCP2_LCM_CTRL_DST_PORT_VP 0
+#define ISPCCP2_LCM_CTRL_DST_PORT_MEM 1
+
+/* Set only the required bits */
+#define BIT_SET(var, shift, mask, val) \
+ do { \
+ var = ((var) & ~((mask) << (shift))) \
+ | ((val) << (shift)); \
+ } while (0)
+
+/*
+ * ccp2_print_status - Print current CCP2 module register values.
+ */
+#define CCP2_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###CCP2 " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_##name))
+
+static void ccp2_print_status(struct isp_ccp2_device *ccp2)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+
+ dev_dbg(isp->dev, "-------------CCP2 Register dump-------------\n");
+
+ CCP2_PRINT_REGISTER(isp, SYSCONFIG);
+ CCP2_PRINT_REGISTER(isp, SYSSTATUS);
+ CCP2_PRINT_REGISTER(isp, LC01_IRQENABLE);
+ CCP2_PRINT_REGISTER(isp, LC01_IRQSTATUS);
+ CCP2_PRINT_REGISTER(isp, LC23_IRQENABLE);
+ CCP2_PRINT_REGISTER(isp, LC23_IRQSTATUS);
+ CCP2_PRINT_REGISTER(isp, LCM_IRQENABLE);
+ CCP2_PRINT_REGISTER(isp, LCM_IRQSTATUS);
+ CCP2_PRINT_REGISTER(isp, CTRL);
+ CCP2_PRINT_REGISTER(isp, LCx_CTRL(0));
+ CCP2_PRINT_REGISTER(isp, LCx_CODE(0));
+ CCP2_PRINT_REGISTER(isp, LCx_STAT_START(0));
+ CCP2_PRINT_REGISTER(isp, LCx_STAT_SIZE(0));
+ CCP2_PRINT_REGISTER(isp, LCx_SOF_ADDR(0));
+ CCP2_PRINT_REGISTER(isp, LCx_EOF_ADDR(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_START(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_SIZE(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_PING_ADDR(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_PONG_ADDR(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_OFST(0));
+ CCP2_PRINT_REGISTER(isp, LCM_CTRL);
+ CCP2_PRINT_REGISTER(isp, LCM_VSIZE);
+ CCP2_PRINT_REGISTER(isp, LCM_HSIZE);
+ CCP2_PRINT_REGISTER(isp, LCM_PREFETCH);
+ CCP2_PRINT_REGISTER(isp, LCM_SRC_ADDR);
+ CCP2_PRINT_REGISTER(isp, LCM_SRC_OFST);
+ CCP2_PRINT_REGISTER(isp, LCM_DST_ADDR);
+ CCP2_PRINT_REGISTER(isp, LCM_DST_OFST);
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * ccp2_reset - Reset the CCP2
+ * @ccp2: pointer to ISP CCP2 device
+ */
+static void ccp2_reset(struct isp_ccp2_device *ccp2)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ int i = 0;
+
+ /* Reset the CSI1/CCP2B and wait for reset to complete */
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG,
+ ISPCCP2_SYSCONFIG_SOFT_RESET);
+ while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSSTATUS) &
+ ISPCCP2_SYSSTATUS_RESET_DONE)) {
+ udelay(10);
+ if (i++ > 10) { /* try read 10 times */
+ dev_warn(isp->dev,
+ "omap3_isp: timeout waiting for ccp2 reset\n");
+ break;
+ }
+ }
+}
+
+/*
+ * ccp2_pwr_cfg - Configure the power mode settings
+ * @ccp2: pointer to ISP CCP2 device
+ */
+static void ccp2_pwr_cfg(struct isp_ccp2_device *ccp2)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+
+ isp_reg_writel(isp, ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SMART |
+ ((isp->revision == ISP_REVISION_15_0 && isp->autoidle) ?
+ ISPCCP2_SYSCONFIG_AUTO_IDLE : 0),
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG);
+}
+
+/*
+ * ccp2_if_enable - Enable CCP2 interface.
+ * @ccp2: pointer to ISP CCP2 device
+ * @enable: enable/disable flag
+ */
+static int ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ int ret;
+ int i;
+
+ if (enable && ccp2->vdds_csib) {
+ ret = regulator_enable(ccp2->vdds_csib);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable/Disable all the LCx channels */
+ for (i = 0; i < CCP2_LCx_CHANS_NUM; i++)
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(i),
+ ISPCCP2_LCx_CTRL_CHAN_EN,
+ enable ? ISPCCP2_LCx_CTRL_CHAN_EN : 0);
+
+ /* Enable/Disable ccp2 interface in ccp2 mode */
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
+ ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN,
+ enable ? (ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN) : 0);
+
+ if (!enable && ccp2->vdds_csib)
+ regulator_disable(ccp2->vdds_csib);
+
+ return 0;
+}
+
+/*
+ * ccp2_mem_enable - Enable CCP2 memory interface.
+ * @ccp2: pointer to ISP CCP2 device
+ * @enable: enable/disable flag
+ */
+static void ccp2_mem_enable(struct isp_ccp2_device *ccp2, u8 enable)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+
+ if (enable)
+ ccp2_if_enable(ccp2, 0);
+
+ /* Enable/Disable ccp2 interface in ccp2 mode */
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
+ ISPCCP2_CTRL_MODE, enable ? ISPCCP2_CTRL_MODE : 0);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL,
+ ISPCCP2_LCM_CTRL_CHAN_EN,
+ enable ? ISPCCP2_LCM_CTRL_CHAN_EN : 0);
+}
+
+/*
+ * ccp2_phyif_config - Initialize CCP2 phy interface config
+ * @ccp2: Pointer to ISP CCP2 device
+ * @pdata: CCP2 platform data
+ *
+ * Configure the CCP2 physical interface module from platform data.
+ *
+ * Returns -EIO if strobe is chosen in CSI1 mode, or 0 on success.
+ */
+static int ccp2_phyif_config(struct isp_ccp2_device *ccp2,
+ const struct isp_ccp2_platform_data *pdata)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ u32 val;
+
+ /* CCP2B mode */
+ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL) |
+ ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE;
+ /* Data/strobe physical layer */
+ BIT_SET(val, ISPCCP2_CTRL_PHY_SEL_SHIFT, ISPCCP2_CTRL_PHY_SEL_MASK,
+ pdata->phy_layer);
+ BIT_SET(val, ISPCCP2_CTRL_INV_SHIFT, ISPCCP2_CTRL_INV_MASK,
+ pdata->strobe_clk_pol);
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+
+ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+ if (!(val & ISPCCP2_CTRL_MODE)) {
+ if (pdata->ccp2_mode == ISP_CCP2_MODE_CCP2)
+ dev_warn(isp->dev, "OMAP3 CCP2 bus not available\n");
+ if (pdata->phy_layer == ISP_CCP2_PHY_DATA_STROBE)
+ /* Strobe mode requires CCP2 */
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * ccp2_vp_config - Initialize CCP2 video port interface.
+ * @ccp2: Pointer to ISP CCP2 device
+ * @vpclk_div: Video port divisor
+ *
+ * Configure the CCP2 video port with the given clock divisor. The valid divisor
+ * values depend on the ISP revision:
+ *
+ * - revision 1.0 and 2.0 1 to 4
+ * - revision 15.0 1 to 65536
+ *
+ * The exact divisor value used might differ from the requested value, as ISP
+ * revision 15.0 represent the divisor by 65536 divided by an integer.
+ */
+static void ccp2_vp_config(struct isp_ccp2_device *ccp2,
+ unsigned int vpclk_div)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ u32 val;
+
+ /* ISPCCP2_CTRL Video port */
+ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+ val |= ISPCCP2_CTRL_VP_ONLY_EN; /* Disable the memory write port */
+
+ if (isp->revision == ISP_REVISION_15_0) {
+ vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 65536);
+ vpclk_div = min(ISPCCP2_VPCLK_FRACDIV / vpclk_div, 65535U);
+ BIT_SET(val, ISPCCP2_CTRL_VPCLK_DIV_SHIFT,
+ ISPCCP2_CTRL_VPCLK_DIV_MASK, vpclk_div);
+ } else {
+ vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 4);
+ BIT_SET(val, ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT,
+ ISPCCP2_CTRL_VP_OUT_CTRL_MASK, vpclk_div - 1);
+ }
+
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+}
+
+/*
+ * ccp2_lcx_config - Initialize CCP2 logical channel interface.
+ * @ccp2: Pointer to ISP CCP2 device
+ * @config: Pointer to ISP LCx config structure.
+ *
+ * This will analyze the parameters passed by the interface config
+ * and configure CSI1/CCP2 logical channel
+ *
+ */
+static void ccp2_lcx_config(struct isp_ccp2_device *ccp2,
+ struct isp_interface_lcx_config *config)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ u32 val, format;
+
+ switch (config->format) {
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ format = ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP;
+ break;
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ default:
+ format = ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP; /* RAW10+VP */
+ break;
+ }
+ /* ISPCCP2_LCx_CTRL logical channel #0 */
+ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0))
+ | (ISPCCP2_LCx_CTRL_REGION_EN); /* Region */
+
+ if (isp->revision == ISP_REVISION_15_0) {
+ /* CRC */
+ BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT_15_0,
+ ISPCCP2_LCx_CTRL_CRC_MASK,
+ config->crc);
+ /* Format = RAW10+VP or RAW8+DPCM10+VP*/
+ BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT_15_0,
+ ISPCCP2_LCx_CTRL_FORMAT_MASK_15_0, format);
+ } else {
+ BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT,
+ ISPCCP2_LCx_CTRL_CRC_MASK,
+ config->crc);
+
+ BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT,
+ ISPCCP2_LCx_CTRL_FORMAT_MASK, format);
+ }
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0));
+
+ /* ISPCCP2_DAT_START for logical channel #0 */
+ isp_reg_writel(isp, config->data_start << ISPCCP2_LCx_DAT_SHIFT,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_START(0));
+
+ /* ISPCCP2_DAT_SIZE for logical channel #0 */
+ isp_reg_writel(isp, config->data_size << ISPCCP2_LCx_DAT_SHIFT,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_SIZE(0));
+
+ /* Enable error IRQs for logical channel #0 */
+ val = ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
+
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQSTATUS);
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQENABLE, val);
+}
+
+/*
+ * ccp2_if_configure - Configure ccp2 with data from sensor
+ * @ccp2: Pointer to ISP CCP2 device
+ *
+ * Return 0 on success or a negative error code
+ */
+static int ccp2_if_configure(struct isp_ccp2_device *ccp2)
+{
+ const struct isp_v4l2_subdevs_group *pdata;
+ struct v4l2_mbus_framefmt *format;
+ struct media_pad *pad;
+ struct v4l2_subdev *sensor;
+ u32 lines = 0;
+ int ret;
+
+ ccp2_pwr_cfg(ccp2);
+
+ pad = media_entity_remote_pad(&ccp2->pads[CCP2_PAD_SINK]);
+ sensor = media_entity_to_v4l2_subdev(pad->entity);
+ pdata = sensor->host_priv;
+
+ ret = ccp2_phyif_config(ccp2, &pdata->bus.ccp2);
+ if (ret < 0)
+ return ret;
+
+ ccp2_vp_config(ccp2, pdata->bus.ccp2.vpclk_div + 1);
+
+ v4l2_subdev_call(sensor, sensor, g_skip_top_lines, &lines);
+
+ format = &ccp2->formats[CCP2_PAD_SINK];
+
+ ccp2->if_cfg.data_start = lines;
+ ccp2->if_cfg.crc = pdata->bus.ccp2.crc;
+ ccp2->if_cfg.format = format->code;
+ ccp2->if_cfg.data_size = format->height;
+
+ ccp2_lcx_config(ccp2, &ccp2->if_cfg);
+
+ return 0;
+}
+
+static int ccp2_adjust_bandwidth(struct isp_ccp2_device *ccp2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccp2);
+ const struct v4l2_mbus_framefmt *ofmt = &ccp2->formats[CCP2_PAD_SOURCE];
+ unsigned long l3_ick = pipe->l3_ick;
+ struct v4l2_fract *timeperframe;
+ unsigned int vpclk_div = 2;
+ unsigned int value;
+ u64 bound;
+ u64 area;
+
+ /* Compute the minimum clock divisor, based on the pipeline maximum
+ * data rate. This is an absolute lower bound if we don't want SBL
+ * overflows, so round the value up.
+ */
+ vpclk_div = max_t(unsigned int, DIV_ROUND_UP(l3_ick, pipe->max_rate),
+ vpclk_div);
+
+ /* Compute the maximum clock divisor, based on the requested frame rate.
+ * This is a soft lower bound to achieve a frame rate equal or higher
+ * than the requested value, so round the value down.
+ */
+ timeperframe = &pipe->max_timeperframe;
+
+ if (timeperframe->numerator) {
+ area = ofmt->width * ofmt->height;
+ bound = div_u64(area * timeperframe->denominator,
+ timeperframe->numerator);
+ value = min_t(u64, bound, l3_ick);
+ vpclk_div = max_t(unsigned int, l3_ick / value, vpclk_div);
+ }
+
+ dev_dbg(isp->dev, "%s: minimum clock divisor = %u\n", __func__,
+ vpclk_div);
+
+ return vpclk_div;
+}
+
+/*
+ * ccp2_mem_configure - Initialize CCP2 memory input/output interface
+ * @ccp2: Pointer to ISP CCP2 device
+ * @config: Pointer to ISP mem interface config structure
+ *
+ * This will analyze the parameters passed by the interface config
+ * structure, and configure the respective registers for proper
+ * CSI1/CCP2 memory input.
+ */
+static void ccp2_mem_configure(struct isp_ccp2_device *ccp2,
+ struct isp_interface_mem_config *config)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ u32 sink_pixcode = ccp2->formats[CCP2_PAD_SINK].code;
+ u32 source_pixcode = ccp2->formats[CCP2_PAD_SOURCE].code;
+ unsigned int dpcm_decompress = 0;
+ u32 val, hwords;
+
+ if (sink_pixcode != source_pixcode &&
+ sink_pixcode == V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8)
+ dpcm_decompress = 1;
+
+ ccp2_pwr_cfg(ccp2);
+
+ /* Hsize, Skip */
+ isp_reg_writel(isp, ISPCCP2_LCM_HSIZE_SKIP_MIN |
+ (config->hsize_count << ISPCCP2_LCM_HSIZE_SHIFT),
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_HSIZE);
+
+ /* Vsize, no. of lines */
+ isp_reg_writel(isp, config->vsize_count << ISPCCP2_LCM_VSIZE_SHIFT,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_VSIZE);
+
+ if (ccp2->video_in.bpl_padding == 0)
+ config->src_ofst = 0;
+ else
+ config->src_ofst = ccp2->video_in.bpl_value;
+
+ isp_reg_writel(isp, config->src_ofst, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LCM_SRC_OFST);
+
+ /* Source and Destination formats */
+ val = ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10 <<
+ ISPCCP2_LCM_CTRL_DST_FORMAT_SHIFT;
+
+ if (dpcm_decompress) {
+ /* source format is RAW8 */
+ val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8 <<
+ ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT;
+
+ /* RAW8 + DPCM10 - simple predictor */
+ val |= ISPCCP2_LCM_CTRL_SRC_DPCM_PRED;
+
+ /* enable source DPCM decompression */
+ val |= ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10 <<
+ ISPCCP2_LCM_CTRL_SRC_DECOMPR_SHIFT;
+ } else {
+ /* source format is RAW10 */
+ val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10 <<
+ ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT;
+ }
+
+ /* Burst size to 32x64 */
+ val |= ISPCCP2_LCM_CTRL_BURST_SIZE_32X <<
+ ISPCCP2_LCM_CTRL_BURST_SIZE_SHIFT;
+
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL);
+
+ /* Prefetch setup */
+ if (dpcm_decompress)
+ hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN +
+ config->hsize_count) >> 3;
+ else
+ hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN +
+ config->hsize_count) >> 2;
+
+ isp_reg_writel(isp, hwords << ISPCCP2_LCM_PREFETCH_SHIFT,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_PREFETCH);
+
+ /* Video port */
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
+ ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE);
+ ccp2_vp_config(ccp2, ccp2_adjust_bandwidth(ccp2));
+
+ /* Clear LCM interrupts */
+ isp_reg_writel(isp, ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ |
+ ISPCCP2_LCM_IRQSTATUS_EOF_IRQ,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQSTATUS);
+
+ /* Enable LCM interrupts */
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQENABLE,
+ ISPCCP2_LCM_IRQSTATUS_EOF_IRQ |
+ ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ);
+}
+
+/*
+ * ccp2_set_inaddr - Sets memory address of input frame.
+ * @ccp2: Pointer to ISP CCP2 device
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ *
+ * Configures the memory address from which the input frame is to be read.
+ */
+static void ccp2_set_inaddr(struct isp_ccp2_device *ccp2, u32 addr)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+
+ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_SRC_ADDR);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+ struct isp_buffer *buffer;
+
+ buffer = omap3isp_video_buffer_next(&ccp2->video_in);
+ if (buffer != NULL)
+ ccp2_set_inaddr(ccp2, buffer->dma);
+
+ pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+
+ if (ccp2->state == ISP_PIPELINE_STREAM_SINGLESHOT) {
+ if (isp_pipeline_ready(pipe))
+ omap3isp_pipeline_set_stream(pipe,
+ ISP_PIPELINE_STREAM_SINGLESHOT);
+ }
+}
+
+/*
+ * omap3isp_ccp2_isr - Handle ISP CCP2 interrupts
+ * @ccp2: Pointer to ISP CCP2 device
+ *
+ * This will handle the CCP2 interrupts
+ */
+void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccp2);
+ static const u32 ISPCCP2_LC01_ERROR =
+ ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
+ u32 lcx_irqstatus, lcm_irqstatus;
+
+ /* First clear the interrupts */
+ lcx_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LC01_IRQSTATUS);
+ isp_reg_writel(isp, lcx_irqstatus, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LC01_IRQSTATUS);
+
+ lcm_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LCM_IRQSTATUS);
+ isp_reg_writel(isp, lcm_irqstatus, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LCM_IRQSTATUS);
+ /* Errors */
+ if (lcx_irqstatus & ISPCCP2_LC01_ERROR) {
+ pipe->error = true;
+ dev_dbg(isp->dev, "CCP2 err:%x\n", lcx_irqstatus);
+ return;
+ }
+
+ if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ) {
+ pipe->error = true;
+ dev_dbg(isp->dev, "CCP2 OCP err:%x\n", lcm_irqstatus);
+ }
+
+ if (omap3isp_module_sync_is_stopping(&ccp2->wait, &ccp2->stopping))
+ return;
+
+ /* Handle queued buffers on frame end interrupts */
+ if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_EOF_IRQ)
+ ccp2_isr_buffer(ccp2);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static const unsigned int ccp2_fmts[] = {
+ V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+};
+
+/*
+ * __ccp2_get_format - helper function for getting ccp2 format
+ * @ccp2 : Pointer to ISP CCP2 device
+ * @fh : V4L2 subdev file handle
+ * @pad : pad number
+ * @which : wanted subdev format
+ * return format structure or NULL on error
+ */
+static struct v4l2_mbus_framefmt *
+__ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &ccp2->formats[pad];
+}
+
+/*
+ * ccp2_try_format - Handle try format by pad subdev method
+ * @ccp2 : Pointer to ISP CCP2 device
+ * @fh : V4L2 subdev file handle
+ * @pad : pad num
+ * @fmt : pointer to v4l2 mbus format structure
+ * @which : wanted subdev format
+ */
+static void ccp2_try_format(struct isp_ccp2_device *ccp2,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ switch (pad) {
+ case CCP2_PAD_SINK:
+ if (fmt->code != V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8)
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ if (ccp2->input == CCP2_INPUT_SENSOR) {
+ fmt->width = clamp_t(u32, fmt->width,
+ ISPCCP2_DAT_START_MIN,
+ ISPCCP2_DAT_START_MAX);
+ fmt->height = clamp_t(u32, fmt->height,
+ ISPCCP2_DAT_SIZE_MIN,
+ ISPCCP2_DAT_SIZE_MAX);
+ } else if (ccp2->input == CCP2_INPUT_MEMORY) {
+ fmt->width = clamp_t(u32, fmt->width,
+ ISPCCP2_LCM_HSIZE_COUNT_MIN,
+ ISPCCP2_LCM_HSIZE_COUNT_MAX);
+ fmt->height = clamp_t(u32, fmt->height,
+ ISPCCP2_LCM_VSIZE_MIN,
+ ISPCCP2_LCM_VSIZE_MAX);
+ }
+ break;
+
+ case CCP2_PAD_SOURCE:
+ /* Source format - copy sink format and change pixel code
+ * to SGRBG10_1X10 as we don't support CCP2 write to memory.
+ * When CCP2 write to memory feature will be added this
+ * should be changed properly.
+ */
+ format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SINK, which);
+ memcpy(fmt, format, sizeof(*fmt));
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+/*
+ * ccp2_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == CCP2_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(ccp2_fmts))
+ return -EINVAL;
+
+ code->code = ccp2_fmts[code->index];
+ } else {
+ if (code->index != 0)
+ return -EINVAL;
+
+ format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_TRY);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ccp2_try_format(ccp2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ccp2_try_format(ccp2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ccp2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt : pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ccp2_get_format(ccp2, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * ccp2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt : pointer to v4l2 subdev format structure
+ * returns zero
+ */
+static int ccp2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ccp2_get_format(ccp2, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ccp2_try_format(ccp2, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == CCP2_PAD_SINK) {
+ format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ ccp2_try_format(ccp2, fh, CCP2_PAD_SOURCE, format, fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * ccp2_init_formats - Initialize formats on all pads
+ * @sd: ISP CCP2 V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ccp2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = CCP2_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ ccp2_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/*
+ * ccp2_s_stream - Enable/Disable streaming on ccp2 subdev
+ * @sd : pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ * return zero
+ */
+static int ccp2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct isp_device *isp = to_isp_device(ccp2);
+ struct device *dev = to_device(ccp2);
+ int ret;
+
+ if (ccp2->state == ISP_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+ atomic_set(&ccp2->stopping, 0);
+ }
+
+ switch (enable) {
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ if (ccp2->phy) {
+ ret = omap3isp_csiphy_acquire(ccp2->phy);
+ if (ret < 0)
+ return ret;
+ }
+
+ ccp2_if_configure(ccp2);
+ ccp2_print_status(ccp2);
+
+ /* Enable CSI1/CCP2 interface */
+ ret = ccp2_if_enable(ccp2, 1);
+ if (ret < 0) {
+ if (ccp2->phy)
+ omap3isp_csiphy_release(ccp2->phy);
+ return ret;
+ }
+ break;
+
+ case ISP_PIPELINE_STREAM_SINGLESHOT:
+ if (ccp2->state != ISP_PIPELINE_STREAM_SINGLESHOT) {
+ struct v4l2_mbus_framefmt *format;
+
+ format = &ccp2->formats[CCP2_PAD_SINK];
+
+ ccp2->mem_cfg.hsize_count = format->width;
+ ccp2->mem_cfg.vsize_count = format->height;
+ ccp2->mem_cfg.src_ofst = 0;
+
+ ccp2_mem_configure(ccp2, &ccp2->mem_cfg);
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI1_READ);
+ ccp2_print_status(ccp2);
+ }
+ ccp2_mem_enable(ccp2, 1);
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ if (omap3isp_module_sync_idle(&sd->entity, &ccp2->wait,
+ &ccp2->stopping))
+ dev_dbg(dev, "%s: module stop timeout.\n", sd->name);
+ if (ccp2->input == CCP2_INPUT_MEMORY) {
+ ccp2_mem_enable(ccp2, 0);
+ omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI1_READ);
+ } else if (ccp2->input == CCP2_INPUT_SENSOR) {
+ /* Disable CSI1/CCP2 interface */
+ ccp2_if_enable(ccp2, 0);
+ if (ccp2->phy)
+ omap3isp_csiphy_release(ccp2->phy);
+ }
+ break;
+ }
+
+ ccp2->state = enable;
+ return 0;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops ccp2_sd_video_ops = {
+ .s_stream = ccp2_s_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops ccp2_sd_pad_ops = {
+ .enum_mbus_code = ccp2_enum_mbus_code,
+ .enum_frame_size = ccp2_enum_frame_size,
+ .get_fmt = ccp2_get_format,
+ .set_fmt = ccp2_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops ccp2_sd_ops = {
+ .video = &ccp2_sd_video_ops,
+ .pad = &ccp2_sd_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops ccp2_sd_internal_ops = {
+ .open = ccp2_init_formats,
+};
+
+/* --------------------------------------------------------------------------
+ * ISP ccp2 video device node
+ */
+
+/*
+ * ccp2_video_queue - Queue video buffer.
+ * @video : Pointer to isp video structure
+ * @buffer: Pointer to isp_buffer structure
+ * return -EIO or zero on success
+ */
+static int ccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer)
+{
+ struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2;
+
+ ccp2_set_inaddr(ccp2, buffer->dma);
+ return 0;
+}
+
+static const struct isp_video_operations ccp2_video_ops = {
+ .queue = ccp2_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ccp2_link_setup - Setup ccp2 connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL on error or zero on success
+ */
+static int ccp2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case CCP2_PAD_SINK | MEDIA_ENT_T_DEVNODE:
+ /* read from memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccp2->input == CCP2_INPUT_SENSOR)
+ return -EBUSY;
+ ccp2->input = CCP2_INPUT_MEMORY;
+ } else {
+ if (ccp2->input == CCP2_INPUT_MEMORY)
+ ccp2->input = CCP2_INPUT_NONE;
+ }
+ break;
+
+ case CCP2_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* read from sensor/phy */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccp2->input == CCP2_INPUT_MEMORY)
+ return -EBUSY;
+ ccp2->input = CCP2_INPUT_SENSOR;
+ } else {
+ if (ccp2->input == CCP2_INPUT_SENSOR)
+ ccp2->input = CCP2_INPUT_NONE;
+ } break;
+
+ case CCP2_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* write to video port/ccdc */
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ ccp2->output = CCP2_OUTPUT_CCDC;
+ else
+ ccp2->output = CCP2_OUTPUT_NONE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ccp2_media_ops = {
+ .link_setup = ccp2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * omap3isp_ccp2_unregister_entities - Unregister media entities: subdev
+ * @ccp2: Pointer to ISP CCP2 device
+ */
+void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2)
+{
+ v4l2_device_unregister_subdev(&ccp2->subdev);
+ omap3isp_video_unregister(&ccp2->video_in);
+}
+
+/*
+ * omap3isp_ccp2_register_entities - Register the subdev media entity
+ * @ccp2: Pointer to ISP CCP2 device
+ * @vdev: Pointer to v4l device
+ * return negative error code or zero on success
+ */
+
+int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&ccp2->video_in, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_ccp2_unregister_entities(ccp2);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP ccp2 initialisation and cleanup
+ */
+
+/*
+ * ccp2_init_entities - Initialize ccp2 subdev and media entity.
+ * @ccp2: Pointer to ISP CCP2 device
+ * return negative error code or zero on success
+ */
+static int ccp2_init_entities(struct isp_ccp2_device *ccp2)
+{
+ struct v4l2_subdev *sd = &ccp2->subdev;
+ struct media_pad *pads = ccp2->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ ccp2->input = CCP2_INPUT_NONE;
+ ccp2->output = CCP2_OUTPUT_NONE;
+
+ v4l2_subdev_init(sd, &ccp2_sd_ops);
+ sd->internal_ops = &ccp2_sd_internal_ops;
+ strlcpy(sd->name, "OMAP3 ISP CCP2", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
+ v4l2_set_subdevdata(sd, ccp2);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ pads[CCP2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &ccp2_media_ops;
+ ret = media_entity_init(me, CCP2_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ ccp2_init_formats(sd, NULL);
+
+ /*
+ * The CCP2 has weird line alignment requirements, possibly caused by
+ * DPCM8 decompression. Line length for data read from memory must be a
+ * multiple of 128 bits (16 bytes) in continuous mode (when no padding
+ * is present at end of lines). Additionally, if padding is used, the
+ * padded line length must be a multiple of 32 bytes. To simplify the
+ * implementation we use a fixed 32 bytes alignment regardless of the
+ * input format and width. If strict 128 bits alignment support is
+ * required ispvideo will need to be made aware of this special dual
+ * alignment requirements.
+ */
+ ccp2->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ccp2->video_in.bpl_alignment = 32;
+ ccp2->video_in.bpl_max = 0xffffffe0;
+ ccp2->video_in.isp = to_isp_device(ccp2);
+ ccp2->video_in.ops = &ccp2_video_ops;
+ ccp2->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+
+ ret = omap3isp_video_init(&ccp2->video_in, "CCP2");
+ if (ret < 0)
+ goto error_video;
+
+ /* Connect the video node to the ccp2 subdev. */
+ ret = media_entity_create_link(&ccp2->video_in.video.entity, 0,
+ &ccp2->subdev.entity, CCP2_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ return 0;
+
+error_link:
+ omap3isp_video_cleanup(&ccp2->video_in);
+error_video:
+ media_entity_cleanup(&ccp2->subdev.entity);
+ return ret;
+}
+
+/*
+ * omap3isp_ccp2_init - CCP2 initialization.
+ * @isp : Pointer to ISP device
+ * return negative error code or zero on success
+ */
+int omap3isp_ccp2_init(struct isp_device *isp)
+{
+ struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
+ int ret;
+
+ init_waitqueue_head(&ccp2->wait);
+
+ /*
+ * On the OMAP34xx the CSI1 receiver is operated in the CSIb IO
+ * complex, which is powered by vdds_csib power rail. Hence the
+ * request for the regulator.
+ *
+ * On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with
+ * the CSI2c or CSI2a receivers. The PHY then needs to be explicitly
+ * configured.
+ *
+ * TODO: Don't hardcode the usage of PHY1 (shared with CSI2c).
+ */
+ if (isp->revision == ISP_REVISION_2_0) {
+ ccp2->vdds_csib = devm_regulator_get(isp->dev, "vdds_csib");
+ if (IS_ERR(ccp2->vdds_csib)) {
+ dev_dbg(isp->dev,
+ "Could not get regulator vdds_csib\n");
+ ccp2->vdds_csib = NULL;
+ }
+ } else if (isp->revision == ISP_REVISION_15_0) {
+ ccp2->phy = &isp->isp_csiphy1;
+ }
+
+ ret = ccp2_init_entities(ccp2);
+ if (ret < 0)
+ return ret;
+
+ ccp2_reset(ccp2);
+ return 0;
+}
+
+/*
+ * omap3isp_ccp2_cleanup - CCP2 un-initialization
+ * @isp : Pointer to ISP device
+ */
+void omap3isp_ccp2_cleanup(struct isp_device *isp)
+{
+ struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
+
+ omap3isp_video_cleanup(&ccp2->video_in);
+ media_entity_cleanup(&ccp2->subdev.entity);
+}
diff --git a/drivers/media/platform/omap3isp/ispccp2.h b/drivers/media/platform/omap3isp/ispccp2.h
new file mode 100644
index 00000000000..76d65f4576e
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccp2.h
@@ -0,0 +1,98 @@
+/*
+ * ispccp2.h
+ *
+ * TI OMAP3 ISP - CCP2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CCP2_H
+#define OMAP3_ISP_CCP2_H
+
+#include <linux/videodev2.h>
+
+struct isp_device;
+struct isp_csiphy;
+
+/* Sink and source ccp2 pads */
+#define CCP2_PAD_SINK 0
+#define CCP2_PAD_SOURCE 1
+#define CCP2_PADS_NUM 2
+
+/* CCP2 input media entity */
+enum ccp2_input_entity {
+ CCP2_INPUT_NONE,
+ CCP2_INPUT_SENSOR,
+ CCP2_INPUT_MEMORY,
+};
+
+/* CCP2 output media entity */
+enum ccp2_output_entity {
+ CCP2_OUTPUT_NONE,
+ CCP2_OUTPUT_CCDC,
+ CCP2_OUTPUT_MEMORY,
+};
+
+
+/* Logical channel configuration */
+struct isp_interface_lcx_config {
+ int crc;
+ u32 data_start;
+ u32 data_size;
+ u32 format;
+};
+
+/* Memory channel configuration */
+struct isp_interface_mem_config {
+ u32 dst_port;
+ u32 vsize_count;
+ u32 hsize_count;
+ u32 src_ofst;
+ u32 dst_ofst;
+};
+
+/* CCP2 device */
+struct isp_ccp2_device {
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt formats[CCP2_PADS_NUM];
+ struct media_pad pads[CCP2_PADS_NUM];
+
+ enum ccp2_input_entity input;
+ enum ccp2_output_entity output;
+ struct isp_interface_lcx_config if_cfg;
+ struct isp_interface_mem_config mem_cfg;
+ struct isp_video video_in;
+ struct isp_csiphy *phy;
+ struct regulator *vdds_csib;
+ enum isp_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+/* Function declarations */
+int omap3isp_ccp2_init(struct isp_device *isp);
+void omap3isp_ccp2_cleanup(struct isp_device *isp);
+int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
+ struct v4l2_device *vdev);
+void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2);
+void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2);
+
+#endif /* OMAP3_ISP_CCP2_H */
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
new file mode 100644
index 00000000000..5a2e47e58b8
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -0,0 +1,1329 @@
+/*
+ * ispcsi2.c
+ *
+ * TI OMAP3 ISP - CSI2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/mm.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispcsi2.h"
+
+/*
+ * csi2_if_enable - Enable CSI2 Receiver interface.
+ * @enable: enable flag
+ *
+ */
+static void csi2_if_enable(struct isp_device *isp,
+ struct isp_csi2_device *csi2, u8 enable)
+{
+ struct isp_csi2_ctrl_cfg *currctrl = &csi2->ctrl;
+
+ isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_CTRL, ISPCSI2_CTRL_IF_EN,
+ enable ? ISPCSI2_CTRL_IF_EN : 0);
+
+ currctrl->if_enable = enable;
+}
+
+/*
+ * csi2_recv_config - CSI2 receiver module configuration.
+ * @currctrl: isp_csi2_ctrl_cfg structure
+ *
+ */
+static void csi2_recv_config(struct isp_device *isp,
+ struct isp_csi2_device *csi2,
+ struct isp_csi2_ctrl_cfg *currctrl)
+{
+ u32 reg;
+
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTRL);
+
+ if (currctrl->frame_mode)
+ reg |= ISPCSI2_CTRL_FRAME;
+ else
+ reg &= ~ISPCSI2_CTRL_FRAME;
+
+ if (currctrl->vp_clk_enable)
+ reg |= ISPCSI2_CTRL_VP_CLK_EN;
+ else
+ reg &= ~ISPCSI2_CTRL_VP_CLK_EN;
+
+ if (currctrl->vp_only_enable)
+ reg |= ISPCSI2_CTRL_VP_ONLY_EN;
+ else
+ reg &= ~ISPCSI2_CTRL_VP_ONLY_EN;
+
+ reg &= ~ISPCSI2_CTRL_VP_OUT_CTRL_MASK;
+ reg |= currctrl->vp_out_ctrl << ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT;
+
+ if (currctrl->ecc_enable)
+ reg |= ISPCSI2_CTRL_ECC_EN;
+ else
+ reg &= ~ISPCSI2_CTRL_ECC_EN;
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTRL);
+}
+
+static const unsigned int csi2_input_fmts[] = {
+ V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_YUYV8_2X8,
+};
+
+/* To set the format on the CSI2 requires a mapping function that takes
+ * the following inputs:
+ * - 3 different formats (at this time)
+ * - 2 destinations (mem, vp+mem) (vp only handled separately)
+ * - 2 decompression options (on, off)
+ * - 2 isp revisions (certain format must be handled differently on OMAP3630)
+ * Output should be CSI2 frame format code
+ * Array indices as follows: [format][dest][decompr][is_3630]
+ * Not all combinations are valid. 0 means invalid.
+ */
+static const u16 __csi2_fmt_map[3][2][2][2] = {
+ /* RAW10 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_RAW10_EXP16, CSI2_PIX_FMT_RAW10_EXP16 },
+ /* DPCM decompression */
+ { 0, 0 },
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_RAW10_EXP16_VP,
+ CSI2_PIX_FMT_RAW10_EXP16_VP },
+ /* DPCM decompression */
+ { 0, 0 },
+ },
+ },
+ /* RAW10 DPCM8 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_RAW8, CSI2_USERDEF_8BIT_DATA1 },
+ /* DPCM decompression */
+ { CSI2_PIX_FMT_RAW8_DPCM10_EXP16,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10 },
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_RAW8_VP,
+ CSI2_PIX_FMT_RAW8_VP },
+ /* DPCM decompression */
+ { CSI2_PIX_FMT_RAW8_DPCM10_VP,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP },
+ },
+ },
+ /* YUYV8 2X8 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_YUV422_8BIT,
+ CSI2_PIX_FMT_YUV422_8BIT },
+ /* DPCM decompression */
+ { 0, 0 },
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_YUV422_8BIT_VP,
+ CSI2_PIX_FMT_YUV422_8BIT_VP },
+ /* DPCM decompression */
+ { 0, 0 },
+ },
+ },
+};
+
+/*
+ * csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID
+ * @csi2: ISP CSI2 device
+ *
+ * Returns CSI2 physical format id
+ */
+static u16 csi2_ctx_map_format(struct isp_csi2_device *csi2)
+{
+ const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK];
+ int fmtidx, destidx, is_3630;
+
+ switch (fmt->code) {
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ fmtidx = 0;
+ break;
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
+ fmtidx = 1;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ fmtidx = 2;
+ break;
+ default:
+ WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
+ fmt->code);
+ return 0;
+ }
+
+ if (!(csi2->output & CSI2_OUTPUT_CCDC) &&
+ !(csi2->output & CSI2_OUTPUT_MEMORY)) {
+ /* Neither output enabled is a valid combination */
+ return CSI2_PIX_FMT_OTHERS;
+ }
+
+ /* If we need to skip frames at the beginning of the stream disable the
+ * video port to avoid sending the skipped frames to the CCDC.
+ */
+ destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_CCDC);
+ is_3630 = csi2->isp->revision == ISP_REVISION_15_0;
+
+ return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress][is_3630];
+}
+
+/*
+ * csi2_set_outaddr - Set memory address to save output image
+ * @csi2: Pointer to ISP CSI2a device.
+ * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ *
+ * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte
+ * boundary.
+ */
+static void csi2_set_outaddr(struct isp_csi2_device *csi2, u32 addr)
+{
+ struct isp_device *isp = csi2->isp;
+ struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[0];
+
+ ctx->ping_addr = addr;
+ ctx->pong_addr = addr;
+ isp_reg_writel(isp, ctx->ping_addr,
+ csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum));
+ isp_reg_writel(isp, ctx->pong_addr,
+ csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum));
+}
+
+/*
+ * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should
+ * be enabled by CSI2.
+ * @format_id: mapped format id
+ *
+ */
+static inline int is_usr_def_mapping(u32 format_id)
+{
+ return (format_id & 0x40) ? 1 : 0;
+}
+
+/*
+ * csi2_ctx_enable - Enable specified CSI2 context
+ * @ctxnum: Context number, valid between 0 and 7 values.
+ * @enable: enable
+ *
+ */
+static void csi2_ctx_enable(struct isp_device *isp,
+ struct isp_csi2_device *csi2, u8 ctxnum, u8 enable)
+{
+ struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum];
+ unsigned int skip = 0;
+ u32 reg;
+
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum));
+
+ if (enable) {
+ if (csi2->frame_skip)
+ skip = csi2->frame_skip;
+ else if (csi2->output & CSI2_OUTPUT_MEMORY)
+ skip = 1;
+
+ reg &= ~ISPCSI2_CTX_CTRL1_COUNT_MASK;
+ reg |= ISPCSI2_CTX_CTRL1_COUNT_UNLOCK
+ | (skip << ISPCSI2_CTX_CTRL1_COUNT_SHIFT)
+ | ISPCSI2_CTX_CTRL1_CTX_EN;
+ } else {
+ reg &= ~ISPCSI2_CTX_CTRL1_CTX_EN;
+ }
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum));
+ ctx->enabled = enable;
+}
+
+/*
+ * csi2_ctx_config - CSI2 context configuration.
+ * @ctx: context configuration
+ *
+ */
+static void csi2_ctx_config(struct isp_device *isp,
+ struct isp_csi2_device *csi2,
+ struct isp_csi2_ctx_cfg *ctx)
+{
+ u32 reg;
+
+ /* Set up CSI2_CTx_CTRL1 */
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum));
+
+ if (ctx->eof_enabled)
+ reg |= ISPCSI2_CTX_CTRL1_EOF_EN;
+ else
+ reg &= ~ISPCSI2_CTX_CTRL1_EOF_EN;
+
+ if (ctx->eol_enabled)
+ reg |= ISPCSI2_CTX_CTRL1_EOL_EN;
+ else
+ reg &= ~ISPCSI2_CTX_CTRL1_EOL_EN;
+
+ if (ctx->checksum_enabled)
+ reg |= ISPCSI2_CTX_CTRL1_CS_EN;
+ else
+ reg &= ~ISPCSI2_CTX_CTRL1_CS_EN;
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum));
+
+ /* Set up CSI2_CTx_CTRL2 */
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum));
+
+ reg &= ~(ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK);
+ reg |= ctx->virtual_id << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT;
+
+ reg &= ~(ISPCSI2_CTX_CTRL2_FORMAT_MASK);
+ reg |= ctx->format_id << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT;
+
+ if (ctx->dpcm_decompress) {
+ if (ctx->dpcm_predictor)
+ reg |= ISPCSI2_CTX_CTRL2_DPCM_PRED;
+ else
+ reg &= ~ISPCSI2_CTX_CTRL2_DPCM_PRED;
+ }
+
+ if (is_usr_def_mapping(ctx->format_id)) {
+ reg &= ~ISPCSI2_CTX_CTRL2_USER_DEF_MAP_MASK;
+ reg |= 2 << ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT;
+ }
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum));
+
+ /* Set up CSI2_CTx_CTRL3 */
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum));
+ reg &= ~(ISPCSI2_CTX_CTRL3_ALPHA_MASK);
+ reg |= (ctx->alpha << ISPCSI2_CTX_CTRL3_ALPHA_SHIFT);
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum));
+
+ /* Set up CSI2_CTx_DAT_OFST */
+ reg = isp_reg_readl(isp, csi2->regs1,
+ ISPCSI2_CTX_DAT_OFST(ctx->ctxnum));
+ reg &= ~ISPCSI2_CTX_DAT_OFST_OFST_MASK;
+ reg |= ctx->data_offset << ISPCSI2_CTX_DAT_OFST_OFST_SHIFT;
+ isp_reg_writel(isp, reg, csi2->regs1,
+ ISPCSI2_CTX_DAT_OFST(ctx->ctxnum));
+
+ isp_reg_writel(isp, ctx->ping_addr,
+ csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum));
+
+ isp_reg_writel(isp, ctx->pong_addr,
+ csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum));
+}
+
+/*
+ * csi2_timing_config - CSI2 timing configuration.
+ * @timing: csi2_timing_cfg structure
+ */
+static void csi2_timing_config(struct isp_device *isp,
+ struct isp_csi2_device *csi2,
+ struct isp_csi2_timing_cfg *timing)
+{
+ u32 reg;
+
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_TIMING);
+
+ if (timing->force_rx_mode)
+ reg |= ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum);
+ else
+ reg &= ~ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum);
+
+ if (timing->stop_state_16x)
+ reg |= ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum);
+ else
+ reg &= ~ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum);
+
+ if (timing->stop_state_4x)
+ reg |= ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum);
+ else
+ reg &= ~ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum);
+
+ reg &= ~ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(timing->ionum);
+ reg |= timing->stop_state_counter <<
+ ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(timing->ionum);
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_TIMING);
+}
+
+/*
+ * csi2_irq_ctx_set - Enables CSI2 Context IRQs.
+ * @enable: Enable/disable CSI2 Context interrupts
+ */
+static void csi2_irq_ctx_set(struct isp_device *isp,
+ struct isp_csi2_device *csi2, int enable)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ isp_reg_writel(isp, ISPCSI2_CTX_IRQSTATUS_FE_IRQ, csi2->regs1,
+ ISPCSI2_CTX_IRQSTATUS(i));
+ if (enable)
+ isp_reg_set(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
+ ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
+ else
+ isp_reg_clr(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
+ ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
+ }
+}
+
+/*
+ * csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs.
+ * @enable: Enable/disable CSI2 ComplexIO #1 interrupts
+ */
+static void csi2_irq_complexio1_set(struct isp_device *isp,
+ struct isp_csi2_device *csi2, int enable)
+{
+ u32 reg;
+ reg = ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT |
+ ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM5 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC5 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM4 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC4 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM3 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC3 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM2 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC2 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM1 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC1 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS1;
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
+ if (enable)
+ reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_PHY_IRQENABLE);
+ else
+ reg = 0;
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQENABLE);
+}
+
+/*
+ * csi2_irq_status_set - Enables CSI2 Status IRQs.
+ * @enable: Enable/disable CSI2 Status interrupts
+ */
+static void csi2_irq_status_set(struct isp_device *isp,
+ struct isp_csi2_device *csi2, int enable)
+{
+ u32 reg;
+ reg = ISPCSI2_IRQSTATUS_OCP_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ |
+ ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ |
+ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
+ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ |
+ ISPCSI2_IRQSTATUS_CONTEXT(0);
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQSTATUS);
+ if (enable)
+ reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQENABLE);
+ else
+ reg = 0;
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQENABLE);
+}
+
+/*
+ * omap3isp_csi2_reset - Resets the CSI2 module.
+ *
+ * Must be called with the phy lock held.
+ *
+ * Returns 0 if successful, or -EBUSY if power command didn't respond.
+ */
+int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
+{
+ struct isp_device *isp = csi2->isp;
+ u8 soft_reset_retries = 0;
+ u32 reg;
+ int i;
+
+ if (!csi2->available)
+ return -ENODEV;
+
+ if (csi2->phy->phy_in_use)
+ return -EBUSY;
+
+ isp_reg_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
+ ISPCSI2_SYSCONFIG_SOFT_RESET);
+
+ do {
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_SYSSTATUS) &
+ ISPCSI2_SYSSTATUS_RESET_DONE;
+ if (reg == ISPCSI2_SYSSTATUS_RESET_DONE)
+ break;
+ soft_reset_retries++;
+ if (soft_reset_retries < 5)
+ udelay(100);
+ } while (soft_reset_retries < 5);
+
+ if (soft_reset_retries == 5) {
+ dev_err(isp->dev, "CSI2: Soft reset try count exceeded!\n");
+ return -EBUSY;
+ }
+
+ if (isp->revision == ISP_REVISION_15_0)
+ isp_reg_set(isp, csi2->regs1, ISPCSI2_PHY_CFG,
+ ISPCSI2_PHY_CFG_RESET_CTRL);
+
+ i = 100;
+ do {
+ reg = isp_reg_readl(isp, csi2->phy->phy_regs, ISPCSIPHY_REG1)
+ & ISPCSIPHY_REG1_RESET_DONE_CTRLCLK;
+ if (reg == ISPCSIPHY_REG1_RESET_DONE_CTRLCLK)
+ break;
+ udelay(100);
+ } while (--i > 0);
+
+ if (i == 0) {
+ dev_err(isp->dev,
+ "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
+ return -EBUSY;
+ }
+
+ if (isp->autoidle)
+ isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
+ ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
+ ISPCSI2_SYSCONFIG_AUTO_IDLE,
+ ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART |
+ ((isp->revision == ISP_REVISION_15_0) ?
+ ISPCSI2_SYSCONFIG_AUTO_IDLE : 0));
+ else
+ isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
+ ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
+ ISPCSI2_SYSCONFIG_AUTO_IDLE,
+ ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO);
+
+ return 0;
+}
+
+static int csi2_configure(struct isp_csi2_device *csi2)
+{
+ const struct isp_v4l2_subdevs_group *pdata;
+ struct isp_device *isp = csi2->isp;
+ struct isp_csi2_timing_cfg *timing = &csi2->timing[0];
+ struct v4l2_subdev *sensor;
+ struct media_pad *pad;
+
+ /*
+ * CSI2 fields that can be updated while the context has
+ * been enabled or the interface has been enabled are not
+ * updated dynamically currently. So we do not allow to
+ * reconfigure if either has been enabled
+ */
+ if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
+ return -EBUSY;
+
+ pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
+ sensor = media_entity_to_v4l2_subdev(pad->entity);
+ pdata = sensor->host_priv;
+
+ csi2->frame_skip = 0;
+ v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
+
+ csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div;
+ csi2->ctrl.frame_mode = ISP_CSI2_FRAME_IMMEDIATE;
+ csi2->ctrl.ecc_enable = pdata->bus.csi2.crc;
+
+ timing->ionum = 1;
+ timing->force_rx_mode = 1;
+ timing->stop_state_16x = 1;
+ timing->stop_state_4x = 1;
+ timing->stop_state_counter = 0x1FF;
+
+ /*
+ * The CSI2 receiver can't do any format conversion except DPCM
+ * decompression, so every set_format call configures both pads
+ * and enables DPCM decompression as a special case:
+ */
+ if (csi2->formats[CSI2_PAD_SINK].code !=
+ csi2->formats[CSI2_PAD_SOURCE].code)
+ csi2->dpcm_decompress = true;
+ else
+ csi2->dpcm_decompress = false;
+
+ csi2->contexts[0].format_id = csi2_ctx_map_format(csi2);
+
+ if (csi2->video_out.bpl_padding == 0)
+ csi2->contexts[0].data_offset = 0;
+ else
+ csi2->contexts[0].data_offset = csi2->video_out.bpl_value;
+
+ /*
+ * Enable end of frame and end of line signals generation for
+ * context 0. These signals are generated from CSI2 receiver to
+ * qualify the last pixel of a frame and the last pixel of a line.
+ * Without enabling the signals CSI2 receiver writes data to memory
+ * beyond buffer size and/or data line offset is not handled correctly.
+ */
+ csi2->contexts[0].eof_enabled = 1;
+ csi2->contexts[0].eol_enabled = 1;
+
+ csi2_irq_complexio1_set(isp, csi2, 1);
+ csi2_irq_ctx_set(isp, csi2, 1);
+ csi2_irq_status_set(isp, csi2, 1);
+
+ /* Set configuration (timings, format and links) */
+ csi2_timing_config(isp, csi2, timing);
+ csi2_recv_config(isp, csi2, &csi2->ctrl);
+ csi2_ctx_config(isp, csi2, &csi2->contexts[0]);
+
+ return 0;
+}
+
+/*
+ * csi2_print_status - Prints CSI2 debug information.
+ */
+#define CSI2_PRINT_REGISTER(isp, regs, name)\
+ dev_dbg(isp->dev, "###CSI2 " #name "=0x%08x\n", \
+ isp_reg_readl(isp, regs, ISPCSI2_##name))
+
+static void csi2_print_status(struct isp_csi2_device *csi2)
+{
+ struct isp_device *isp = csi2->isp;
+
+ if (!csi2->available)
+ return;
+
+ dev_dbg(isp->dev, "-------------CSI2 Register dump-------------\n");
+
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSCONFIG);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSSTATUS);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQENABLE);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQSTATUS);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTRL);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_H);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, GNQ);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_CFG);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQSTATUS);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, SHORT_PACKET);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQENABLE);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_P);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, TIMING);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL1(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL2(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_OFST(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PING_ADDR(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PONG_ADDR(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQENABLE(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQSTATUS(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL3(0));
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+/*
+ * csi2_isr_buffer - Does buffer handling at end-of-frame
+ * when writing to memory.
+ */
+static void csi2_isr_buffer(struct isp_csi2_device *csi2)
+{
+ struct isp_device *isp = csi2->isp;
+ struct isp_buffer *buffer;
+
+ csi2_ctx_enable(isp, csi2, 0, 0);
+
+ buffer = omap3isp_video_buffer_next(&csi2->video_out);
+
+ /*
+ * Let video queue operation restart engine if there is an underrun
+ * condition.
+ */
+ if (buffer == NULL)
+ return;
+
+ csi2_set_outaddr(csi2, buffer->dma);
+ csi2_ctx_enable(isp, csi2, 0, 1);
+}
+
+static void csi2_isr_ctx(struct isp_csi2_device *csi2,
+ struct isp_csi2_ctx_cfg *ctx)
+{
+ struct isp_device *isp = csi2->isp;
+ unsigned int n = ctx->ctxnum;
+ u32 status;
+
+ status = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
+ isp_reg_writel(isp, status, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
+
+ if (!(status & ISPCSI2_CTX_IRQSTATUS_FE_IRQ))
+ return;
+
+ /* Skip interrupts until we reach the frame skip count. The CSI2 will be
+ * automatically disabled, as the frame skip count has been programmed
+ * in the CSI2_CTx_CTRL1::COUNT field, so reenable it.
+ *
+ * It would have been nice to rely on the FRAME_NUMBER interrupt instead
+ * but it turned out that the interrupt is only generated when the CSI2
+ * writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased
+ * correctly and reaches 0 when data is forwarded to the video port only
+ * but no interrupt arrives). Maybe a CSI2 hardware bug.
+ */
+ if (csi2->frame_skip) {
+ csi2->frame_skip--;
+ if (csi2->frame_skip == 0) {
+ ctx->format_id = csi2_ctx_map_format(csi2);
+ csi2_ctx_config(isp, csi2, ctx);
+ csi2_ctx_enable(isp, csi2, n, 1);
+ }
+ return;
+ }
+
+ if (csi2->output & CSI2_OUTPUT_MEMORY)
+ csi2_isr_buffer(csi2);
+}
+
+/*
+ * omap3isp_csi2_isr - CSI2 interrupt handling.
+ */
+void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
+ u32 csi2_irqstatus, cpxio1_irqstatus;
+ struct isp_device *isp = csi2->isp;
+
+ if (!csi2->available)
+ return;
+
+ csi2_irqstatus = isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQSTATUS);
+ isp_reg_writel(isp, csi2_irqstatus, csi2->regs1, ISPCSI2_IRQSTATUS);
+
+ /* Failure Cases */
+ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ) {
+ cpxio1_irqstatus = isp_reg_readl(isp, csi2->regs1,
+ ISPCSI2_PHY_IRQSTATUS);
+ isp_reg_writel(isp, cpxio1_irqstatus,
+ csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
+ dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ "
+ "%x\n", cpxio1_irqstatus);
+ pipe->error = true;
+ }
+
+ if (csi2_irqstatus & (ISPCSI2_IRQSTATUS_OCP_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ |
+ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
+ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) {
+ dev_dbg(isp->dev, "CSI2 Err:"
+ " OCP:%d,"
+ " Short_pack:%d,"
+ " ECC:%d,"
+ " CPXIO2:%d,"
+ " FIFO_OVF:%d,"
+ "\n",
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0,
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ) ? 1 : 0,
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ) ? 1 : 0,
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ) ? 1 : 0,
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ) ? 1 : 0);
+ pipe->error = true;
+ }
+
+ if (omap3isp_module_sync_is_stopping(&csi2->wait, &csi2->stopping))
+ return;
+
+ /* Successful cases */
+ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_CONTEXT(0))
+ csi2_isr_ctx(csi2, &csi2->contexts[0]);
+
+ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ)
+ dev_dbg(isp->dev, "CSI2: ECC correction done\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+/*
+ * csi2_queue - Queues the first buffer when using memory output
+ * @video: The video node
+ * @buffer: buffer to queue
+ */
+static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer)
+{
+ struct isp_device *isp = video->isp;
+ struct isp_csi2_device *csi2 = &isp->isp_csi2a;
+
+ csi2_set_outaddr(csi2, buffer->dma);
+
+ /*
+ * If streaming was enabled before there was a buffer queued
+ * or underrun happened in the ISR, the hardware was not enabled
+ * and DMA queue flag ISP_VIDEO_DMAQUEUE_UNDERRUN is still set.
+ * Enable it now.
+ */
+ if (csi2->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
+ /* Enable / disable context 0 and IRQs */
+ csi2_if_enable(isp, csi2, 1);
+ csi2_ctx_enable(isp, csi2, 0, 1);
+ isp_video_dmaqueue_flags_clr(&csi2->video_out);
+ }
+
+ return 0;
+}
+
+static const struct isp_video_operations csi2_ispvideo_ops = {
+ .queue = csi2_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &csi2->formats[pad];
+}
+
+static void
+csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ enum v4l2_mbus_pixelcode pixelcode;
+ struct v4l2_mbus_framefmt *format;
+ const struct isp_format_info *info;
+ unsigned int i;
+
+ switch (pad) {
+ case CSI2_PAD_SINK:
+ /* Clamp the width and height to valid range (1-8191). */
+ for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) {
+ if (fmt->code == csi2_input_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(csi2_input_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+ break;
+
+ case CSI2_PAD_SOURCE:
+ /* Source format same as sink format, except for DPCM
+ * compression.
+ */
+ pixelcode = fmt->code;
+ format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK, which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ /*
+ * Only Allow DPCM decompression, and check that the
+ * pattern is preserved
+ */
+ info = omap3isp_video_format_info(fmt->code);
+ if (info->uncompressed == pixelcode)
+ fmt->code = pixelcode;
+ break;
+ }
+
+ /* RGB, non-interlaced */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * csi2_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+ const struct isp_format_info *info;
+
+ if (code->pad == CSI2_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(csi2_input_fmts))
+ return -EINVAL;
+
+ code->code = csi2_input_fmts[code->index];
+ } else {
+ format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_TRY);
+ switch (code->index) {
+ case 0:
+ /* Passthrough sink pad code */
+ code->code = format->code;
+ break;
+ case 1:
+ /* Uncompressed code */
+ info = omap3isp_video_format_info(format->code);
+ if (info->uncompressed == format->code)
+ return -EINVAL;
+
+ code->code = info->uncompressed;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int csi2_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * csi2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * csi2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ csi2_try_format(csi2, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == CSI2_PAD_SINK) {
+ format = __csi2_get_format(csi2, fh, CSI2_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ csi2_try_format(csi2, fh, CSI2_PAD_SOURCE, format, fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * csi2_init_formats - Initialize formats on all pads
+ * @sd: ISP CSI2 V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = CSI2_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ csi2_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/*
+ * csi2_set_stream - Enable/Disable streaming on the CSI2 module
+ * @sd: ISP CSI2 V4L2 subdevice
+ * @enable: ISP pipeline stream state
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct isp_device *isp = csi2->isp;
+ struct isp_video *video_out = &csi2->video_out;
+
+ switch (enable) {
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ if (omap3isp_csiphy_acquire(csi2->phy) < 0)
+ return -ENODEV;
+ if (csi2->output & CSI2_OUTPUT_MEMORY)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI2A_WRITE);
+ csi2_configure(csi2);
+ csi2_print_status(csi2);
+
+ /*
+ * When outputting to memory with no buffer available, let the
+ * buffer queue handler start the hardware. A DMA queue flag
+ * ISP_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+ * a buffer available.
+ */
+ if (csi2->output & CSI2_OUTPUT_MEMORY &&
+ !(video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED))
+ break;
+ /* Enable context 0 and IRQs */
+ atomic_set(&csi2->stopping, 0);
+ csi2_ctx_enable(isp, csi2, 0, 1);
+ csi2_if_enable(isp, csi2, 1);
+ isp_video_dmaqueue_flags_clr(video_out);
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ if (csi2->state == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap3isp_module_sync_idle(&sd->entity, &csi2->wait,
+ &csi2->stopping))
+ dev_dbg(isp->dev, "%s: module stop timeout.\n",
+ sd->name);
+ csi2_ctx_enable(isp, csi2, 0, 0);
+ csi2_if_enable(isp, csi2, 0);
+ csi2_irq_ctx_set(isp, csi2, 0);
+ omap3isp_csiphy_release(csi2->phy);
+ isp_video_dmaqueue_flags_clr(video_out);
+ omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI2A_WRITE);
+ break;
+ }
+
+ csi2->state = enable;
+ return 0;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+ .s_stream = csi2_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .enum_mbus_code = csi2_enum_mbus_code,
+ .enum_frame_size = csi2_enum_frame_size,
+ .get_fmt = csi2_get_format,
+ .set_fmt = csi2_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops csi2_ops = {
+ .video = &csi2_video_ops,
+ .pad = &csi2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+ .open = csi2_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * csi2_link_setup - Setup CSI2 connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL or zero on success
+ */
+static int csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct isp_csi2_ctrl_cfg *ctrl = &csi2->ctrl;
+
+ /*
+ * The ISP core doesn't support pipelines with multiple video outputs.
+ * Revisit this when it will be implemented, and return -EBUSY for now.
+ */
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case CSI2_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_MEMORY)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_MEMORY;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_MEMORY;
+ }
+ break;
+
+ case CSI2_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_CCDC)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_CCDC;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_CCDC;
+ }
+ break;
+
+ default:
+ /* Link from camera to CSI2 is fixed... */
+ return -EINVAL;
+ }
+
+ ctrl->vp_only_enable =
+ (csi2->output & CSI2_OUTPUT_MEMORY) ? false : true;
+ ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_CCDC);
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations csi2_media_ops = {
+ .link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2)
+{
+ v4l2_device_unregister_subdev(&csi2->subdev);
+ omap3isp_video_unregister(&csi2->video_out);
+}
+
+int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&csi2->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_csi2_unregister_entities(csi2);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP CSI2 initialisation and cleanup
+ */
+
+/*
+ * csi2_init_entities - Initialize subdev and media entity.
+ * @csi2: Pointer to csi2 structure.
+ * return -ENOMEM or zero on success
+ */
+static int csi2_init_entities(struct isp_csi2_device *csi2)
+{
+ struct v4l2_subdev *sd = &csi2->subdev;
+ struct media_pad *pads = csi2->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ v4l2_subdev_init(sd, &csi2_ops);
+ sd->internal_ops = &csi2_internal_ops;
+ strlcpy(sd->name, "OMAP3 ISP CSI2a", sizeof(sd->name));
+
+ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
+ v4l2_set_subdevdata(sd, csi2);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+
+ me->ops = &csi2_media_ops;
+ ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ csi2_init_formats(sd, NULL);
+
+ /* Video device node */
+ csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ csi2->video_out.ops = &csi2_ispvideo_ops;
+ csi2->video_out.bpl_alignment = 32;
+ csi2->video_out.bpl_zero_padding = 1;
+ csi2->video_out.bpl_max = 0x1ffe0;
+ csi2->video_out.isp = csi2->isp;
+ csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+
+ ret = omap3isp_video_init(&csi2->video_out, "CSI2a");
+ if (ret < 0)
+ goto error_video;
+
+ /* Connect the CSI2 subdev to the video node. */
+ ret = media_entity_create_link(&csi2->subdev.entity, CSI2_PAD_SOURCE,
+ &csi2->video_out.video.entity, 0, 0);
+ if (ret < 0)
+ goto error_link;
+
+ return 0;
+
+error_link:
+ omap3isp_video_cleanup(&csi2->video_out);
+error_video:
+ media_entity_cleanup(&csi2->subdev.entity);
+ return ret;
+}
+
+/*
+ * omap3isp_csi2_init - Routine for module driver init
+ */
+int omap3isp_csi2_init(struct isp_device *isp)
+{
+ struct isp_csi2_device *csi2a = &isp->isp_csi2a;
+ struct isp_csi2_device *csi2c = &isp->isp_csi2c;
+ int ret;
+
+ csi2a->isp = isp;
+ csi2a->available = 1;
+ csi2a->regs1 = OMAP3_ISP_IOMEM_CSI2A_REGS1;
+ csi2a->regs2 = OMAP3_ISP_IOMEM_CSI2A_REGS2;
+ csi2a->phy = &isp->isp_csiphy2;
+ csi2a->state = ISP_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&csi2a->wait);
+
+ ret = csi2_init_entities(csi2a);
+ if (ret < 0)
+ return ret;
+
+ if (isp->revision == ISP_REVISION_15_0) {
+ csi2c->isp = isp;
+ csi2c->available = 1;
+ csi2c->regs1 = OMAP3_ISP_IOMEM_CSI2C_REGS1;
+ csi2c->regs2 = OMAP3_ISP_IOMEM_CSI2C_REGS2;
+ csi2c->phy = &isp->isp_csiphy1;
+ csi2c->state = ISP_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&csi2c->wait);
+ }
+
+ return 0;
+}
+
+/*
+ * omap3isp_csi2_cleanup - Routine for module driver cleanup
+ */
+void omap3isp_csi2_cleanup(struct isp_device *isp)
+{
+ struct isp_csi2_device *csi2a = &isp->isp_csi2a;
+
+ omap3isp_video_cleanup(&csi2a->video_out);
+ media_entity_cleanup(&csi2a->subdev.entity);
+}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.h b/drivers/media/platform/omap3isp/ispcsi2.h
new file mode 100644
index 00000000000..c57729b7e86
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsi2.h
@@ -0,0 +1,165 @@
+/*
+ * ispcsi2.h
+ *
+ * TI OMAP3 ISP - CSI2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CSI2_H
+#define OMAP3_ISP_CSI2_H
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+struct isp_csiphy;
+
+/* This is not an exhaustive list */
+enum isp_csi2_pix_formats {
+ CSI2_PIX_FMT_OTHERS = 0,
+ CSI2_PIX_FMT_YUV422_8BIT = 0x1e,
+ CSI2_PIX_FMT_YUV422_8BIT_VP = 0x9e,
+ CSI2_PIX_FMT_RAW10_EXP16 = 0xab,
+ CSI2_PIX_FMT_RAW10_EXP16_VP = 0x12f,
+ CSI2_PIX_FMT_RAW8 = 0x2a,
+ CSI2_PIX_FMT_RAW8_DPCM10_EXP16 = 0x2aa,
+ CSI2_PIX_FMT_RAW8_DPCM10_VP = 0x32a,
+ CSI2_PIX_FMT_RAW8_VP = 0x12a,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP = 0x340,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10 = 0x2c0,
+ CSI2_USERDEF_8BIT_DATA1 = 0x40,
+};
+
+enum isp_csi2_irqevents {
+ OCP_ERR_IRQ = 0x4000,
+ SHORT_PACKET_IRQ = 0x2000,
+ ECC_CORRECTION_IRQ = 0x1000,
+ ECC_NO_CORRECTION_IRQ = 0x800,
+ COMPLEXIO2_ERR_IRQ = 0x400,
+ COMPLEXIO1_ERR_IRQ = 0x200,
+ FIFO_OVF_IRQ = 0x100,
+ CONTEXT7 = 0x80,
+ CONTEXT6 = 0x40,
+ CONTEXT5 = 0x20,
+ CONTEXT4 = 0x10,
+ CONTEXT3 = 0x8,
+ CONTEXT2 = 0x4,
+ CONTEXT1 = 0x2,
+ CONTEXT0 = 0x1,
+};
+
+enum isp_csi2_ctx_irqevents {
+ CTX_ECC_CORRECTION = 0x100,
+ CTX_LINE_NUMBER = 0x80,
+ CTX_FRAME_NUMBER = 0x40,
+ CTX_CS = 0x20,
+ CTX_LE = 0x8,
+ CTX_LS = 0x4,
+ CTX_FE = 0x2,
+ CTX_FS = 0x1,
+};
+
+enum isp_csi2_frame_mode {
+ ISP_CSI2_FRAME_IMMEDIATE,
+ ISP_CSI2_FRAME_AFTERFEC,
+};
+
+#define ISP_CSI2_MAX_CTX_NUM 7
+
+struct isp_csi2_ctx_cfg {
+ u8 ctxnum; /* context number 0 - 7 */
+ u8 dpcm_decompress;
+
+ /* Fields in CSI2_CTx_CTRL2 - locked by CSI2_CTx_CTRL1.CTX_EN */
+ u8 virtual_id;
+ u16 format_id; /* as in CSI2_CTx_CTRL2[9:0] */
+ u8 dpcm_predictor; /* 1: simple, 0: advanced */
+
+ /* Fields in CSI2_CTx_CTRL1/3 - Shadowed */
+ u16 alpha;
+ u16 data_offset;
+ u32 ping_addr;
+ u32 pong_addr;
+ u8 eof_enabled;
+ u8 eol_enabled;
+ u8 checksum_enabled;
+ u8 enabled;
+};
+
+struct isp_csi2_timing_cfg {
+ u8 ionum; /* IO1 or IO2 as in CSI2_TIMING */
+ unsigned force_rx_mode:1;
+ unsigned stop_state_16x:1;
+ unsigned stop_state_4x:1;
+ u16 stop_state_counter;
+};
+
+struct isp_csi2_ctrl_cfg {
+ bool vp_clk_enable;
+ bool vp_only_enable;
+ u8 vp_out_ctrl;
+ enum isp_csi2_frame_mode frame_mode;
+ bool ecc_enable;
+ bool if_enable;
+};
+
+#define CSI2_PAD_SINK 0
+#define CSI2_PAD_SOURCE 1
+#define CSI2_PADS_NUM 2
+
+#define CSI2_OUTPUT_CCDC (1 << 0)
+#define CSI2_OUTPUT_MEMORY (1 << 1)
+
+struct isp_csi2_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CSI2_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
+
+ struct isp_video video_out;
+ struct isp_device *isp;
+
+ u8 available; /* Is the IP present on the silicon? */
+
+ /* mem resources - enums as defined in enum isp_mem_resources */
+ u8 regs1;
+ u8 regs2;
+
+ u32 output; /* output to CCDC, memory or both? */
+ bool dpcm_decompress;
+ unsigned int frame_skip;
+
+ struct isp_csiphy *phy;
+ struct isp_csi2_ctx_cfg contexts[ISP_CSI2_MAX_CTX_NUM + 1];
+ struct isp_csi2_timing_cfg timing[2];
+ struct isp_csi2_ctrl_cfg ctrl;
+ enum isp_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+void omap3isp_csi2_isr(struct isp_csi2_device *csi2);
+int omap3isp_csi2_reset(struct isp_csi2_device *csi2);
+int omap3isp_csi2_init(struct isp_device *isp);
+void omap3isp_csi2_cleanup(struct isp_device *isp);
+void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2);
+int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
+ struct v4l2_device *vdev);
+#endif /* OMAP3_ISP_CSI2_H */
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
new file mode 100644
index 00000000000..c09de32f986
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -0,0 +1,353 @@
+/*
+ * ispcsiphy.c
+ *
+ * TI OMAP3 ISP - CSI PHY module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispcsiphy.h"
+
+static void csiphy_routing_cfg_3630(struct isp_csiphy *phy,
+ enum isp_interface_type iface,
+ bool ccp2_strobe)
+{
+ u32 reg = isp_reg_readl(
+ phy->isp, OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+ u32 shift, mode;
+
+ switch (iface) {
+ default:
+ /* Should not happen in practice, but let's keep the compiler happy. */
+ case ISP_INTERFACE_CCP2B_PHY1:
+ reg &= ~OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2C_PHY1:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ case ISP_INTERFACE_CCP2B_PHY2:
+ reg |= OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2A_PHY2:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ }
+
+ /* Select data/clock or data/strobe mode for CCP2 */
+ if (iface == ISP_INTERFACE_CCP2B_PHY1 ||
+ iface == ISP_INTERFACE_CCP2B_PHY2) {
+ if (ccp2_strobe)
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE;
+ else
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK;
+ }
+
+ reg &= ~(OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK << shift);
+ reg |= mode << shift;
+
+ isp_reg_writel(phy->isp, reg,
+ OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+}
+
+static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on,
+ bool ccp2_strobe)
+{
+ u32 csirxfe = OMAP343X_CONTROL_CSIRXFE_PWRDNZ
+ | OMAP343X_CONTROL_CSIRXFE_RESET;
+
+ /* Only the CCP2B on PHY1 is configurable. */
+ if (iface != ISP_INTERFACE_CCP2B_PHY1)
+ return;
+
+ if (!on) {
+ isp_reg_writel(phy->isp, 0,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+ return;
+ }
+
+ if (ccp2_strobe)
+ csirxfe |= OMAP343X_CONTROL_CSIRXFE_SELFORM;
+
+ isp_reg_writel(phy->isp, csirxfe,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+}
+
+/*
+ * Configure OMAP 3 CSI PHY routing.
+ * @phy: relevant phy device
+ * @iface: ISP_INTERFACE_*
+ * @on: power on or off
+ * @ccp2_strobe: false: data/clock, true: data/strobe
+ *
+ * Note that the underlying routing configuration registers are part of the
+ * control (SCM) register space and part of the CORE power domain on both 3430
+ * and 3630, so they will not hold their contents in off-mode. This isn't an
+ * issue since the MPU power domain is forced on whilst the ISP is in use.
+ */
+static void csiphy_routing_cfg(struct isp_csiphy *phy,
+ enum isp_interface_type iface, bool on,
+ bool ccp2_strobe)
+{
+ if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL]
+ && on)
+ return csiphy_routing_cfg_3630(phy, iface, ccp2_strobe);
+ if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE])
+ return csiphy_routing_cfg_3430(phy, iface, on, ccp2_strobe);
+}
+
+/*
+ * csiphy_power_autoswitch_enable
+ * @enable: Sets or clears the autoswitch function enable flag.
+ */
+static void csiphy_power_autoswitch_enable(struct isp_csiphy *phy, bool enable)
+{
+ isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG,
+ ISPCSI2_PHY_CFG_PWR_AUTO,
+ enable ? ISPCSI2_PHY_CFG_PWR_AUTO : 0);
+}
+
+/*
+ * csiphy_set_power
+ * @power: Power state to be set.
+ *
+ * Returns 0 if successful, or -EBUSY if the retry count is exceeded.
+ */
+static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
+{
+ u32 reg;
+ u8 retry_count;
+
+ isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG,
+ ISPCSI2_PHY_CFG_PWR_CMD_MASK, power);
+
+ retry_count = 0;
+ do {
+ udelay(50);
+ reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG) &
+ ISPCSI2_PHY_CFG_PWR_STATUS_MASK;
+
+ if (reg != power >> 2)
+ retry_count++;
+
+ } while ((reg != power >> 2) && (retry_count < 100));
+
+ if (retry_count == 100) {
+ dev_err(phy->isp->dev, "CSI2 CIO set power failed!\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * TCLK values are OK at their reset values
+ */
+#define TCLK_TERM 0
+#define TCLK_MISS 1
+#define TCLK_SETTLE 14
+
+static int omap3isp_csiphy_config(struct isp_csiphy *phy)
+{
+ struct isp_csi2_device *csi2 = phy->csi2;
+ struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
+ struct isp_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
+ struct isp_csiphy_lanes_cfg *lanes;
+ int csi2_ddrclk_khz;
+ unsigned int used_lanes = 0;
+ unsigned int i;
+ u32 reg;
+
+ if (subdevs->interface == ISP_INTERFACE_CCP2B_PHY1
+ || subdevs->interface == ISP_INTERFACE_CCP2B_PHY2)
+ lanes = &subdevs->bus.ccp2.lanecfg;
+ else
+ lanes = &subdevs->bus.csi2.lanecfg;
+
+ /* Clock and data lanes verification */
+ for (i = 0; i < phy->num_data_lanes; i++) {
+ if (lanes->data[i].pol > 1 || lanes->data[i].pos > 3)
+ return -EINVAL;
+
+ if (used_lanes & (1 << lanes->data[i].pos))
+ return -EINVAL;
+
+ used_lanes |= 1 << lanes->data[i].pos;
+ }
+
+ if (lanes->clk.pol > 1 || lanes->clk.pos > 3)
+ return -EINVAL;
+
+ if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
+ return -EINVAL;
+
+ /*
+ * The PHY configuration is lost in off mode, that's not an
+ * issue since the MPU power domain is forced on whilst the
+ * ISP is in use.
+ */
+ csiphy_routing_cfg(phy, subdevs->interface, true,
+ subdevs->bus.ccp2.phy_layer);
+
+ /* DPHY timing configuration */
+ /* CSI-2 is DDR and we only count used lanes. */
+ csi2_ddrclk_khz = pipe->external_rate / 1000
+ / (2 * hweight32(used_lanes)) * pipe->external_width;
+
+ reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
+ ISPCSIPHY_REG0_THS_SETTLE_MASK);
+ /* THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1. */
+ reg |= (DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1)
+ << ISPCSIPHY_REG0_THS_TERM_SHIFT;
+ /* THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3. */
+ reg |= (DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3)
+ << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG1);
+
+ reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
+ ISPCSIPHY_REG1_TCLK_MISS_MASK |
+ ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
+ reg |= TCLK_TERM << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
+ reg |= TCLK_MISS << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
+ reg |= TCLK_SETTLE << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
+
+ /* DPHY lane configuration */
+ reg = isp_reg_readl(csi2->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+
+ for (i = 0; i < phy->num_data_lanes; i++) {
+ reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
+ ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
+ reg |= (lanes->data[i].pol <<
+ ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
+ reg |= (lanes->data[i].pos <<
+ ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
+ }
+
+ reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
+ ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
+ reg |= lanes->clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
+ reg |= lanes->clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
+
+ return 0;
+}
+
+int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
+{
+ int rval;
+
+ if (phy->vdd == NULL) {
+ dev_err(phy->isp->dev, "Power regulator for CSI PHY not "
+ "available\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&phy->mutex);
+
+ rval = regulator_enable(phy->vdd);
+ if (rval < 0)
+ goto done;
+
+ rval = omap3isp_csi2_reset(phy->csi2);
+ if (rval < 0)
+ goto done;
+
+ rval = omap3isp_csiphy_config(phy);
+ if (rval < 0)
+ goto done;
+
+ rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON);
+ if (rval) {
+ regulator_disable(phy->vdd);
+ goto done;
+ }
+
+ csiphy_power_autoswitch_enable(phy, true);
+ phy->phy_in_use = 1;
+
+done:
+ mutex_unlock(&phy->mutex);
+ return rval;
+}
+
+void omap3isp_csiphy_release(struct isp_csiphy *phy)
+{
+ mutex_lock(&phy->mutex);
+ if (phy->phy_in_use) {
+ struct isp_csi2_device *csi2 = phy->csi2;
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&csi2->subdev.entity);
+ struct isp_v4l2_subdevs_group *subdevs =
+ pipe->external->host_priv;
+
+ csiphy_routing_cfg(phy, subdevs->interface, false,
+ subdevs->bus.ccp2.phy_layer);
+ csiphy_power_autoswitch_enable(phy, false);
+ csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
+ regulator_disable(phy->vdd);
+ phy->phy_in_use = 0;
+ }
+ mutex_unlock(&phy->mutex);
+}
+
+/*
+ * omap3isp_csiphy_init - Initialize the CSI PHY frontends
+ */
+int omap3isp_csiphy_init(struct isp_device *isp)
+{
+ struct isp_csiphy *phy1 = &isp->isp_csiphy1;
+ struct isp_csiphy *phy2 = &isp->isp_csiphy2;
+
+ phy2->isp = isp;
+ phy2->csi2 = &isp->isp_csi2a;
+ phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES;
+ phy2->cfg_regs = OMAP3_ISP_IOMEM_CSI2A_REGS1;
+ phy2->phy_regs = OMAP3_ISP_IOMEM_CSIPHY2;
+ mutex_init(&phy2->mutex);
+
+ if (isp->revision == ISP_REVISION_15_0) {
+ phy1->isp = isp;
+ phy1->csi2 = &isp->isp_csi2c;
+ phy1->num_data_lanes = ISP_CSIPHY1_NUM_DATA_LANES;
+ phy1->cfg_regs = OMAP3_ISP_IOMEM_CSI2C_REGS1;
+ phy1->phy_regs = OMAP3_ISP_IOMEM_CSIPHY1;
+ mutex_init(&phy1->mutex);
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
new file mode 100644
index 00000000000..14551fd7769
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -0,0 +1,53 @@
+/*
+ * ispcsiphy.h
+ *
+ * TI OMAP3 ISP - CSI PHY module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CSI_PHY_H
+#define OMAP3_ISP_CSI_PHY_H
+
+#include <media/omap3isp.h>
+
+struct isp_csi2_device;
+struct regulator;
+
+struct isp_csiphy {
+ struct isp_device *isp;
+ struct mutex mutex; /* serialize csiphy configuration */
+ u8 phy_in_use;
+ struct isp_csi2_device *csi2;
+ struct regulator *vdd;
+
+ /* mem resources - enums as defined in enum isp_mem_resources */
+ unsigned int cfg_regs;
+ unsigned int phy_regs;
+
+ u8 num_data_lanes; /* number of CSI2 Data Lanes supported */
+};
+
+int omap3isp_csiphy_acquire(struct isp_csiphy *phy);
+void omap3isp_csiphy_release(struct isp_csiphy *phy);
+int omap3isp_csiphy_init(struct isp_device *isp);
+
+#endif /* OMAP3_ISP_CSI_PHY_H */
diff --git a/drivers/media/platform/omap3isp/isph3a.h b/drivers/media/platform/omap3isp/isph3a.h
new file mode 100644
index 00000000000..fb09fd4ca75
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isph3a.h
@@ -0,0 +1,117 @@
+/*
+ * isph3a.h
+ *
+ * TI OMAP3 ISP - H3A AF module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_H3A_H
+#define OMAP3_ISP_H3A_H
+
+#include <linux/omap3isp.h>
+
+/*
+ * ----------
+ * -H3A AEWB-
+ * ----------
+ */
+
+#define AEWB_PACKET_SIZE 16
+#define AEWB_SATURATION_LIMIT 0x3ff
+
+/* Flags for changed registers */
+#define PCR_CHNG (1 << 0)
+#define AEWWIN1_CHNG (1 << 1)
+#define AEWINSTART_CHNG (1 << 2)
+#define AEWINBLK_CHNG (1 << 3)
+#define AEWSUBWIN_CHNG (1 << 4)
+#define PRV_WBDGAIN_CHNG (1 << 5)
+#define PRV_WBGAIN_CHNG (1 << 6)
+
+/* ISPH3A REGISTERS bits */
+#define ISPH3A_PCR_AF_EN (1 << 0)
+#define ISPH3A_PCR_AF_ALAW_EN (1 << 1)
+#define ISPH3A_PCR_AF_MED_EN (1 << 2)
+#define ISPH3A_PCR_AF_BUSY (1 << 15)
+#define ISPH3A_PCR_AEW_EN (1 << 16)
+#define ISPH3A_PCR_AEW_ALAW_EN (1 << 17)
+#define ISPH3A_PCR_AEW_BUSY (1 << 18)
+#define ISPH3A_PCR_AEW_MASK (ISPH3A_PCR_AEW_ALAW_EN | \
+ ISPH3A_PCR_AEW_AVE2LMT_MASK)
+
+/*
+ * --------
+ * -H3A AF-
+ * --------
+ */
+
+/* Peripheral Revision */
+#define AFPID 0x0
+
+#define AFCOEF_OFFSET 0x00000004 /* COEF base address */
+
+/* PCR fields */
+#define AF_BUSYAF (1 << 15)
+#define AF_FVMODE (1 << 14)
+#define AF_RGBPOS (0x7 << 11)
+#define AF_MED_TH (0xFF << 3)
+#define AF_MED_EN (1 << 2)
+#define AF_ALAW_EN (1 << 1)
+#define AF_EN (1 << 0)
+#define AF_PCR_MASK (AF_FVMODE | AF_RGBPOS | AF_MED_TH | \
+ AF_MED_EN | AF_ALAW_EN)
+
+/* AFPAX1 fields */
+#define AF_PAXW (0x7F << 16)
+#define AF_PAXH 0x7F
+
+/* AFPAX2 fields */
+#define AF_AFINCV (0xF << 13)
+#define AF_PAXVC (0x7F << 6)
+#define AF_PAXHC 0x3F
+
+/* AFPAXSTART fields */
+#define AF_PAXSH (0xFFF<<16)
+#define AF_PAXSV 0xFFF
+
+/* COEFFICIENT MASK */
+#define AF_COEF_MASK0 0xFFF
+#define AF_COEF_MASK1 (0xFFF<<16)
+
+/* BIT SHIFTS */
+#define AF_RGBPOS_SHIFT 11
+#define AF_MED_TH_SHIFT 3
+#define AF_PAXW_SHIFT 16
+#define AF_LINE_INCR_SHIFT 13
+#define AF_VT_COUNT_SHIFT 6
+#define AF_HZ_START_SHIFT 16
+#define AF_COEF_SHIFT 16
+
+/* Init and cleanup functions */
+int omap3isp_h3a_aewb_init(struct isp_device *isp);
+int omap3isp_h3a_af_init(struct isp_device *isp);
+
+void omap3isp_h3a_aewb_cleanup(struct isp_device *isp);
+void omap3isp_h3a_af_cleanup(struct isp_device *isp);
+
+#endif /* OMAP3_ISP_H3A_H */
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
new file mode 100644
index 00000000000..d6811ce263e
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -0,0 +1,352 @@
+/*
+ * isph3a.c
+ *
+ * TI OMAP3 ISP - H3A module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "isp.h"
+#include "isph3a.h"
+#include "ispstat.h"
+
+/*
+ * h3a_aewb_update_regs - Helper function to update h3a registers.
+ */
+static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv)
+{
+ struct omap3isp_h3a_aewb_config *conf = priv;
+ u32 pcr;
+ u32 win1;
+ u32 start;
+ u32 blk;
+ u32 subwin;
+
+ if (aewb->state == ISPSTAT_DISABLED)
+ return;
+
+ isp_reg_writel(aewb->isp, aewb->active_buf->dma_addr,
+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST);
+
+ if (!aewb->update)
+ return;
+
+ /* Converting config metadata into reg values */
+ pcr = conf->saturation_limit << ISPH3A_PCR_AEW_AVE2LMT_SHIFT;
+ pcr |= !!conf->alaw_enable << ISPH3A_PCR_AEW_ALAW_EN_SHIFT;
+
+ win1 = ((conf->win_height >> 1) - 1) << ISPH3A_AEWWIN1_WINH_SHIFT;
+ win1 |= ((conf->win_width >> 1) - 1) << ISPH3A_AEWWIN1_WINW_SHIFT;
+ win1 |= (conf->ver_win_count - 1) << ISPH3A_AEWWIN1_WINVC_SHIFT;
+ win1 |= (conf->hor_win_count - 1) << ISPH3A_AEWWIN1_WINHC_SHIFT;
+
+ start = conf->hor_win_start << ISPH3A_AEWINSTART_WINSH_SHIFT;
+ start |= conf->ver_win_start << ISPH3A_AEWINSTART_WINSV_SHIFT;
+
+ blk = conf->blk_ver_win_start << ISPH3A_AEWINBLK_WINSV_SHIFT;
+ blk |= ((conf->blk_win_height >> 1) - 1) << ISPH3A_AEWINBLK_WINH_SHIFT;
+
+ subwin = ((conf->subsample_ver_inc >> 1) - 1) <<
+ ISPH3A_AEWSUBWIN_AEWINCV_SHIFT;
+ subwin |= ((conf->subsample_hor_inc >> 1) - 1) <<
+ ISPH3A_AEWSUBWIN_AEWINCH_SHIFT;
+
+ isp_reg_writel(aewb->isp, win1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWWIN1);
+ isp_reg_writel(aewb->isp, start, OMAP3_ISP_IOMEM_H3A,
+ ISPH3A_AEWINSTART);
+ isp_reg_writel(aewb->isp, blk, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWINBLK);
+ isp_reg_writel(aewb->isp, subwin, OMAP3_ISP_IOMEM_H3A,
+ ISPH3A_AEWSUBWIN);
+ isp_reg_clr_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AEW_MASK, pcr);
+
+ aewb->update = 0;
+ aewb->config_counter += aewb->inc_config;
+ aewb->inc_config = 0;
+ aewb->buf_size = conf->buf_size;
+}
+
+static void h3a_aewb_enable(struct ispstat *aewb, int enable)
+{
+ if (enable) {
+ isp_reg_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AEW_EN);
+ omap3isp_subclk_enable(aewb->isp, OMAP3_ISP_SUBCLK_AEWB);
+ } else {
+ isp_reg_clr(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AEW_EN);
+ omap3isp_subclk_disable(aewb->isp, OMAP3_ISP_SUBCLK_AEWB);
+ }
+}
+
+static int h3a_aewb_busy(struct ispstat *aewb)
+{
+ return isp_reg_readl(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR)
+ & ISPH3A_PCR_BUSYAEAWB;
+}
+
+static u32 h3a_aewb_get_buf_size(struct omap3isp_h3a_aewb_config *conf)
+{
+ /* Number of configured windows + extra row for black data */
+ u32 win_count = (conf->ver_win_count + 1) * conf->hor_win_count;
+
+ /*
+ * Unsaturated block counts for each 8 windows.
+ * 1 extra for the last (win_count % 8) windows if win_count is not
+ * divisible by 8.
+ */
+ win_count += (win_count + 7) / 8;
+
+ return win_count * AEWB_PACKET_SIZE;
+}
+
+static int h3a_aewb_validate_params(struct ispstat *aewb, void *new_conf)
+{
+ struct omap3isp_h3a_aewb_config *user_cfg = new_conf;
+ u32 buf_size;
+
+ if (unlikely(user_cfg->saturation_limit >
+ OMAP3ISP_AEWB_MAX_SATURATION_LIM))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->win_height < OMAP3ISP_AEWB_MIN_WIN_H ||
+ user_cfg->win_height > OMAP3ISP_AEWB_MAX_WIN_H ||
+ user_cfg->win_height & 0x01))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->win_width < OMAP3ISP_AEWB_MIN_WIN_W ||
+ user_cfg->win_width > OMAP3ISP_AEWB_MAX_WIN_W ||
+ user_cfg->win_width & 0x01))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->ver_win_count < OMAP3ISP_AEWB_MIN_WINVC ||
+ user_cfg->ver_win_count > OMAP3ISP_AEWB_MAX_WINVC))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->hor_win_count < OMAP3ISP_AEWB_MIN_WINHC ||
+ user_cfg->hor_win_count > OMAP3ISP_AEWB_MAX_WINHC))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->hor_win_start > OMAP3ISP_AEWB_MAX_WINSTART))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->blk_ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->blk_win_height < OMAP3ISP_AEWB_MIN_WIN_H ||
+ user_cfg->blk_win_height > OMAP3ISP_AEWB_MAX_WIN_H ||
+ user_cfg->blk_win_height & 0x01))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->subsample_ver_inc < OMAP3ISP_AEWB_MIN_SUB_INC ||
+ user_cfg->subsample_ver_inc > OMAP3ISP_AEWB_MAX_SUB_INC ||
+ user_cfg->subsample_ver_inc & 0x01))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->subsample_hor_inc < OMAP3ISP_AEWB_MIN_SUB_INC ||
+ user_cfg->subsample_hor_inc > OMAP3ISP_AEWB_MAX_SUB_INC ||
+ user_cfg->subsample_hor_inc & 0x01))
+ return -EINVAL;
+
+ buf_size = h3a_aewb_get_buf_size(user_cfg);
+ if (buf_size > user_cfg->buf_size)
+ user_cfg->buf_size = buf_size;
+ else if (user_cfg->buf_size > OMAP3ISP_AEWB_MAX_BUF_SIZE)
+ user_cfg->buf_size = OMAP3ISP_AEWB_MAX_BUF_SIZE;
+
+ return 0;
+}
+
+/*
+ * h3a_aewb_set_params - Helper function to check & store user given params.
+ * @new_conf: Pointer to AE and AWB parameters struct.
+ *
+ * As most of them are busy-lock registers, need to wait until AEW_BUSY = 0 to
+ * program them during ISR.
+ */
+static void h3a_aewb_set_params(struct ispstat *aewb, void *new_conf)
+{
+ struct omap3isp_h3a_aewb_config *user_cfg = new_conf;
+ struct omap3isp_h3a_aewb_config *cur_cfg = aewb->priv;
+ int update = 0;
+
+ if (cur_cfg->saturation_limit != user_cfg->saturation_limit) {
+ cur_cfg->saturation_limit = user_cfg->saturation_limit;
+ update = 1;
+ }
+ if (cur_cfg->alaw_enable != user_cfg->alaw_enable) {
+ cur_cfg->alaw_enable = user_cfg->alaw_enable;
+ update = 1;
+ }
+ if (cur_cfg->win_height != user_cfg->win_height) {
+ cur_cfg->win_height = user_cfg->win_height;
+ update = 1;
+ }
+ if (cur_cfg->win_width != user_cfg->win_width) {
+ cur_cfg->win_width = user_cfg->win_width;
+ update = 1;
+ }
+ if (cur_cfg->ver_win_count != user_cfg->ver_win_count) {
+ cur_cfg->ver_win_count = user_cfg->ver_win_count;
+ update = 1;
+ }
+ if (cur_cfg->hor_win_count != user_cfg->hor_win_count) {
+ cur_cfg->hor_win_count = user_cfg->hor_win_count;
+ update = 1;
+ }
+ if (cur_cfg->ver_win_start != user_cfg->ver_win_start) {
+ cur_cfg->ver_win_start = user_cfg->ver_win_start;
+ update = 1;
+ }
+ if (cur_cfg->hor_win_start != user_cfg->hor_win_start) {
+ cur_cfg->hor_win_start = user_cfg->hor_win_start;
+ update = 1;
+ }
+ if (cur_cfg->blk_ver_win_start != user_cfg->blk_ver_win_start) {
+ cur_cfg->blk_ver_win_start = user_cfg->blk_ver_win_start;
+ update = 1;
+ }
+ if (cur_cfg->blk_win_height != user_cfg->blk_win_height) {
+ cur_cfg->blk_win_height = user_cfg->blk_win_height;
+ update = 1;
+ }
+ if (cur_cfg->subsample_ver_inc != user_cfg->subsample_ver_inc) {
+ cur_cfg->subsample_ver_inc = user_cfg->subsample_ver_inc;
+ update = 1;
+ }
+ if (cur_cfg->subsample_hor_inc != user_cfg->subsample_hor_inc) {
+ cur_cfg->subsample_hor_inc = user_cfg->subsample_hor_inc;
+ update = 1;
+ }
+
+ if (update || !aewb->configured) {
+ aewb->inc_config++;
+ aewb->update = 1;
+ cur_cfg->buf_size = h3a_aewb_get_buf_size(cur_cfg);
+ }
+}
+
+static long h3a_aewb_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct ispstat *stat = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_OMAP3ISP_AEWB_CFG:
+ return omap3isp_stat_config(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_REQ:
+ return omap3isp_stat_request_statistics(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_EN: {
+ unsigned long *en = arg;
+ return omap3isp_stat_enable(stat, !!*en);
+ }
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static const struct ispstat_ops h3a_aewb_ops = {
+ .validate_params = h3a_aewb_validate_params,
+ .set_params = h3a_aewb_set_params,
+ .setup_regs = h3a_aewb_setup_regs,
+ .enable = h3a_aewb_enable,
+ .busy = h3a_aewb_busy,
+};
+
+static const struct v4l2_subdev_core_ops h3a_aewb_subdev_core_ops = {
+ .ioctl = h3a_aewb_ioctl,
+ .subscribe_event = omap3isp_stat_subscribe_event,
+ .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops h3a_aewb_subdev_video_ops = {
+ .s_stream = omap3isp_stat_s_stream,
+};
+
+static const struct v4l2_subdev_ops h3a_aewb_subdev_ops = {
+ .core = &h3a_aewb_subdev_core_ops,
+ .video = &h3a_aewb_subdev_video_ops,
+};
+
+/*
+ * omap3isp_h3a_aewb_init - Module Initialisation.
+ */
+int omap3isp_h3a_aewb_init(struct isp_device *isp)
+{
+ struct ispstat *aewb = &isp->isp_aewb;
+ struct omap3isp_h3a_aewb_config *aewb_cfg;
+ struct omap3isp_h3a_aewb_config *aewb_recover_cfg;
+
+ aewb_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_cfg), GFP_KERNEL);
+ if (!aewb_cfg)
+ return -ENOMEM;
+
+ aewb->ops = &h3a_aewb_ops;
+ aewb->priv = aewb_cfg;
+ aewb->dma_ch = -1;
+ aewb->event_type = V4L2_EVENT_OMAP3ISP_AEWB;
+ aewb->isp = isp;
+
+ /* Set recover state configuration */
+ aewb_recover_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_recover_cfg),
+ GFP_KERNEL);
+ if (!aewb_recover_cfg) {
+ dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for "
+ "recover configuration.\n");
+ return -ENOMEM;
+ }
+
+ aewb_recover_cfg->saturation_limit = OMAP3ISP_AEWB_MAX_SATURATION_LIM;
+ aewb_recover_cfg->win_height = OMAP3ISP_AEWB_MIN_WIN_H;
+ aewb_recover_cfg->win_width = OMAP3ISP_AEWB_MIN_WIN_W;
+ aewb_recover_cfg->ver_win_count = OMAP3ISP_AEWB_MIN_WINVC;
+ aewb_recover_cfg->hor_win_count = OMAP3ISP_AEWB_MIN_WINHC;
+ aewb_recover_cfg->blk_ver_win_start = aewb_recover_cfg->ver_win_start +
+ aewb_recover_cfg->win_height * aewb_recover_cfg->ver_win_count;
+ aewb_recover_cfg->blk_win_height = OMAP3ISP_AEWB_MIN_WIN_H;
+ aewb_recover_cfg->subsample_ver_inc = OMAP3ISP_AEWB_MIN_SUB_INC;
+ aewb_recover_cfg->subsample_hor_inc = OMAP3ISP_AEWB_MIN_SUB_INC;
+
+ if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
+ dev_err(aewb->isp->dev, "AEWB: recover configuration is "
+ "invalid.\n");
+ return -EINVAL;
+ }
+
+ aewb_recover_cfg->buf_size = h3a_aewb_get_buf_size(aewb_recover_cfg);
+ aewb->recover_priv = aewb_recover_cfg;
+
+ return omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops);
+}
+
+/*
+ * omap3isp_h3a_aewb_cleanup - Module exit.
+ */
+void omap3isp_h3a_aewb_cleanup(struct isp_device *isp)
+{
+ omap3isp_stat_cleanup(&isp->isp_aewb);
+}
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
new file mode 100644
index 00000000000..6fc960cd30f
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -0,0 +1,407 @@
+/*
+ * isph3a_af.c
+ *
+ * TI OMAP3 ISP - H3A AF module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+/* Linux specific include files */
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "isp.h"
+#include "isph3a.h"
+#include "ispstat.h"
+
+#define IS_OUT_OF_BOUNDS(value, min, max) \
+ (((value) < (min)) || ((value) > (max)))
+
+static void h3a_af_setup_regs(struct ispstat *af, void *priv)
+{
+ struct omap3isp_h3a_af_config *conf = priv;
+ u32 pcr;
+ u32 pax1;
+ u32 pax2;
+ u32 paxstart;
+ u32 coef;
+ u32 base_coef_set0;
+ u32 base_coef_set1;
+ int index;
+
+ if (af->state == ISPSTAT_DISABLED)
+ return;
+
+ isp_reg_writel(af->isp, af->active_buf->dma_addr, OMAP3_ISP_IOMEM_H3A,
+ ISPH3A_AFBUFST);
+
+ if (!af->update)
+ return;
+
+ /* Configure Hardware Registers */
+ pax1 = ((conf->paxel.width >> 1) - 1) << AF_PAXW_SHIFT;
+ /* Set height in AFPAX1 */
+ pax1 |= (conf->paxel.height >> 1) - 1;
+ isp_reg_writel(af->isp, pax1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX1);
+
+ /* Configure AFPAX2 Register */
+ /* Set Line Increment in AFPAX2 Register */
+ pax2 = ((conf->paxel.line_inc >> 1) - 1) << AF_LINE_INCR_SHIFT;
+ /* Set Vertical Count */
+ pax2 |= (conf->paxel.v_cnt - 1) << AF_VT_COUNT_SHIFT;
+ /* Set Horizontal Count */
+ pax2 |= (conf->paxel.h_cnt - 1);
+ isp_reg_writel(af->isp, pax2, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX2);
+
+ /* Configure PAXSTART Register */
+ /*Configure Horizontal Start */
+ paxstart = conf->paxel.h_start << AF_HZ_START_SHIFT;
+ /* Configure Vertical Start */
+ paxstart |= conf->paxel.v_start;
+ isp_reg_writel(af->isp, paxstart, OMAP3_ISP_IOMEM_H3A,
+ ISPH3A_AFPAXSTART);
+
+ /*SetIIRSH Register */
+ isp_reg_writel(af->isp, conf->iir.h_start,
+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFIIRSH);
+
+ base_coef_set0 = ISPH3A_AFCOEF010;
+ base_coef_set1 = ISPH3A_AFCOEF110;
+ for (index = 0; index <= 8; index += 2) {
+ /*Set IIR Filter0 Coefficients */
+ coef = 0;
+ coef |= conf->iir.coeff_set0[index];
+ coef |= conf->iir.coeff_set0[index + 1] <<
+ AF_COEF_SHIFT;
+ isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A,
+ base_coef_set0);
+ base_coef_set0 += AFCOEF_OFFSET;
+
+ /*Set IIR Filter1 Coefficients */
+ coef = 0;
+ coef |= conf->iir.coeff_set1[index];
+ coef |= conf->iir.coeff_set1[index + 1] <<
+ AF_COEF_SHIFT;
+ isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A,
+ base_coef_set1);
+ base_coef_set1 += AFCOEF_OFFSET;
+ }
+ /* set AFCOEF0010 Register */
+ isp_reg_writel(af->isp, conf->iir.coeff_set0[10],
+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF0010);
+ /* set AFCOEF1010 Register */
+ isp_reg_writel(af->isp, conf->iir.coeff_set1[10],
+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF1010);
+
+ /* PCR Register */
+ /* Set RGB Position */
+ pcr = conf->rgb_pos << AF_RGBPOS_SHIFT;
+ /* Set Accumulator Mode */
+ if (conf->fvmode == OMAP3ISP_AF_MODE_PEAK)
+ pcr |= AF_FVMODE;
+ /* Set A-law */
+ if (conf->alaw_enable)
+ pcr |= AF_ALAW_EN;
+ /* HMF Configurations */
+ if (conf->hmf.enable) {
+ /* Enable HMF */
+ pcr |= AF_MED_EN;
+ /* Set Median Threshold */
+ pcr |= conf->hmf.threshold << AF_MED_TH_SHIFT;
+ }
+ /* Set PCR Register */
+ isp_reg_clr_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ AF_PCR_MASK, pcr);
+
+ af->update = 0;
+ af->config_counter += af->inc_config;
+ af->inc_config = 0;
+ af->buf_size = conf->buf_size;
+}
+
+static void h3a_af_enable(struct ispstat *af, int enable)
+{
+ if (enable) {
+ isp_reg_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AF_EN);
+ omap3isp_subclk_enable(af->isp, OMAP3_ISP_SUBCLK_AF);
+ } else {
+ isp_reg_clr(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AF_EN);
+ omap3isp_subclk_disable(af->isp, OMAP3_ISP_SUBCLK_AF);
+ }
+}
+
+static int h3a_af_busy(struct ispstat *af)
+{
+ return isp_reg_readl(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR)
+ & ISPH3A_PCR_BUSYAF;
+}
+
+static u32 h3a_af_get_buf_size(struct omap3isp_h3a_af_config *conf)
+{
+ return conf->paxel.h_cnt * conf->paxel.v_cnt * OMAP3ISP_AF_PAXEL_SIZE;
+}
+
+/* Function to check paxel parameters */
+static int h3a_af_validate_params(struct ispstat *af, void *new_conf)
+{
+ struct omap3isp_h3a_af_config *user_cfg = new_conf;
+ struct omap3isp_h3a_af_paxel *paxel_cfg = &user_cfg->paxel;
+ struct omap3isp_h3a_af_iir *iir_cfg = &user_cfg->iir;
+ int index;
+ u32 buf_size;
+
+ /* Check horizontal Count */
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->h_cnt,
+ OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN,
+ OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MAX))
+ return -EINVAL;
+
+ /* Check Vertical Count */
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->v_cnt,
+ OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN,
+ OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MAX))
+ return -EINVAL;
+
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->height, OMAP3ISP_AF_PAXEL_HEIGHT_MIN,
+ OMAP3ISP_AF_PAXEL_HEIGHT_MAX) ||
+ paxel_cfg->height % 2)
+ return -EINVAL;
+
+ /* Check width */
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->width, OMAP3ISP_AF_PAXEL_WIDTH_MIN,
+ OMAP3ISP_AF_PAXEL_WIDTH_MAX) ||
+ paxel_cfg->width % 2)
+ return -EINVAL;
+
+ /* Check Line Increment */
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->line_inc,
+ OMAP3ISP_AF_PAXEL_INCREMENT_MIN,
+ OMAP3ISP_AF_PAXEL_INCREMENT_MAX) ||
+ paxel_cfg->line_inc % 2)
+ return -EINVAL;
+
+ /* Check Horizontal Start */
+ if ((paxel_cfg->h_start < iir_cfg->h_start) ||
+ IS_OUT_OF_BOUNDS(paxel_cfg->h_start,
+ OMAP3ISP_AF_PAXEL_HZSTART_MIN,
+ OMAP3ISP_AF_PAXEL_HZSTART_MAX))
+ return -EINVAL;
+
+ /* Check IIR */
+ for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) {
+ if ((iir_cfg->coeff_set0[index]) > OMAP3ISP_AF_COEF_MAX)
+ return -EINVAL;
+
+ if ((iir_cfg->coeff_set1[index]) > OMAP3ISP_AF_COEF_MAX)
+ return -EINVAL;
+ }
+
+ if (IS_OUT_OF_BOUNDS(iir_cfg->h_start, OMAP3ISP_AF_IIRSH_MIN,
+ OMAP3ISP_AF_IIRSH_MAX))
+ return -EINVAL;
+
+ /* Hack: If paxel size is 12, the 10th AF window may be corrupted */
+ if ((paxel_cfg->h_cnt * paxel_cfg->v_cnt > 9) &&
+ (paxel_cfg->width * paxel_cfg->height == 12))
+ return -EINVAL;
+
+ buf_size = h3a_af_get_buf_size(user_cfg);
+ if (buf_size > user_cfg->buf_size)
+ /* User buf_size request wasn't enough */
+ user_cfg->buf_size = buf_size;
+ else if (user_cfg->buf_size > OMAP3ISP_AF_MAX_BUF_SIZE)
+ user_cfg->buf_size = OMAP3ISP_AF_MAX_BUF_SIZE;
+
+ return 0;
+}
+
+/* Update local parameters */
+static void h3a_af_set_params(struct ispstat *af, void *new_conf)
+{
+ struct omap3isp_h3a_af_config *user_cfg = new_conf;
+ struct omap3isp_h3a_af_config *cur_cfg = af->priv;
+ int update = 0;
+ int index;
+
+ /* alaw */
+ if (cur_cfg->alaw_enable != user_cfg->alaw_enable) {
+ update = 1;
+ goto out;
+ }
+
+ /* hmf */
+ if (cur_cfg->hmf.enable != user_cfg->hmf.enable) {
+ update = 1;
+ goto out;
+ }
+ if (cur_cfg->hmf.threshold != user_cfg->hmf.threshold) {
+ update = 1;
+ goto out;
+ }
+
+ /* rgbpos */
+ if (cur_cfg->rgb_pos != user_cfg->rgb_pos) {
+ update = 1;
+ goto out;
+ }
+
+ /* iir */
+ if (cur_cfg->iir.h_start != user_cfg->iir.h_start) {
+ update = 1;
+ goto out;
+ }
+ for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) {
+ if (cur_cfg->iir.coeff_set0[index] !=
+ user_cfg->iir.coeff_set0[index]) {
+ update = 1;
+ goto out;
+ }
+ if (cur_cfg->iir.coeff_set1[index] !=
+ user_cfg->iir.coeff_set1[index]) {
+ update = 1;
+ goto out;
+ }
+ }
+
+ /* paxel */
+ if ((cur_cfg->paxel.width != user_cfg->paxel.width) ||
+ (cur_cfg->paxel.height != user_cfg->paxel.height) ||
+ (cur_cfg->paxel.h_start != user_cfg->paxel.h_start) ||
+ (cur_cfg->paxel.v_start != user_cfg->paxel.v_start) ||
+ (cur_cfg->paxel.h_cnt != user_cfg->paxel.h_cnt) ||
+ (cur_cfg->paxel.v_cnt != user_cfg->paxel.v_cnt) ||
+ (cur_cfg->paxel.line_inc != user_cfg->paxel.line_inc)) {
+ update = 1;
+ goto out;
+ }
+
+ /* af_mode */
+ if (cur_cfg->fvmode != user_cfg->fvmode)
+ update = 1;
+
+out:
+ if (update || !af->configured) {
+ memcpy(cur_cfg, user_cfg, sizeof(*cur_cfg));
+ af->inc_config++;
+ af->update = 1;
+ /*
+ * User might be asked for a bigger buffer than necessary for
+ * this configuration. In order to return the right amount of
+ * data during buffer request, let's calculate the size here
+ * instead of stick with user_cfg->buf_size.
+ */
+ cur_cfg->buf_size = h3a_af_get_buf_size(cur_cfg);
+ }
+}
+
+static long h3a_af_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct ispstat *stat = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_OMAP3ISP_AF_CFG:
+ return omap3isp_stat_config(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_REQ:
+ return omap3isp_stat_request_statistics(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_EN: {
+ int *en = arg;
+ return omap3isp_stat_enable(stat, !!*en);
+ }
+ }
+
+ return -ENOIOCTLCMD;
+
+}
+
+static const struct ispstat_ops h3a_af_ops = {
+ .validate_params = h3a_af_validate_params,
+ .set_params = h3a_af_set_params,
+ .setup_regs = h3a_af_setup_regs,
+ .enable = h3a_af_enable,
+ .busy = h3a_af_busy,
+};
+
+static const struct v4l2_subdev_core_ops h3a_af_subdev_core_ops = {
+ .ioctl = h3a_af_ioctl,
+ .subscribe_event = omap3isp_stat_subscribe_event,
+ .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops h3a_af_subdev_video_ops = {
+ .s_stream = omap3isp_stat_s_stream,
+};
+
+static const struct v4l2_subdev_ops h3a_af_subdev_ops = {
+ .core = &h3a_af_subdev_core_ops,
+ .video = &h3a_af_subdev_video_ops,
+};
+
+/* Function to register the AF character device driver. */
+int omap3isp_h3a_af_init(struct isp_device *isp)
+{
+ struct ispstat *af = &isp->isp_af;
+ struct omap3isp_h3a_af_config *af_cfg;
+ struct omap3isp_h3a_af_config *af_recover_cfg;
+
+ af_cfg = devm_kzalloc(isp->dev, sizeof(*af_cfg), GFP_KERNEL);
+ if (af_cfg == NULL)
+ return -ENOMEM;
+
+ af->ops = &h3a_af_ops;
+ af->priv = af_cfg;
+ af->dma_ch = -1;
+ af->event_type = V4L2_EVENT_OMAP3ISP_AF;
+ af->isp = isp;
+
+ /* Set recover state configuration */
+ af_recover_cfg = devm_kzalloc(isp->dev, sizeof(*af_recover_cfg),
+ GFP_KERNEL);
+ if (!af_recover_cfg) {
+ dev_err(af->isp->dev, "AF: cannot allocate memory for recover "
+ "configuration.\n");
+ return -ENOMEM;
+ }
+
+ af_recover_cfg->paxel.h_start = OMAP3ISP_AF_PAXEL_HZSTART_MIN;
+ af_recover_cfg->paxel.width = OMAP3ISP_AF_PAXEL_WIDTH_MIN;
+ af_recover_cfg->paxel.height = OMAP3ISP_AF_PAXEL_HEIGHT_MIN;
+ af_recover_cfg->paxel.h_cnt = OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN;
+ af_recover_cfg->paxel.v_cnt = OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN;
+ af_recover_cfg->paxel.line_inc = OMAP3ISP_AF_PAXEL_INCREMENT_MIN;
+ if (h3a_af_validate_params(af, af_recover_cfg)) {
+ dev_err(af->isp->dev, "AF: recover configuration is "
+ "invalid.\n");
+ return -EINVAL;
+ }
+
+ af_recover_cfg->buf_size = h3a_af_get_buf_size(af_recover_cfg);
+ af->recover_priv = af_recover_cfg;
+
+ return omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops);
+}
+
+void omap3isp_h3a_af_cleanup(struct isp_device *isp)
+{
+ omap3isp_stat_cleanup(&isp->isp_af);
+}
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
new file mode 100644