summaryrefslogtreecommitdiffstats
path: root/drivers/media/platform
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /drivers/media/platform
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/media/platform')
-rw-r--r--drivers/media/platform/Kconfig643
-rw-r--r--drivers/media/platform/Makefile98
-rw-r--r--drivers/media/platform/am437x/Kconfig12
-rw-r--r--drivers/media/platform/am437x/Makefile3
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c2776
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h281
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe_regs.h140
-rw-r--r--drivers/media/platform/atmel/Kconfig20
-rw-r--r--drivers/media/platform/atmel/Makefile2
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h263
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c2309
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c1354
-rw-r--r--drivers/media/platform/atmel/atmel-isi.h138
-rw-r--r--drivers/media/platform/cadence/Kconfig36
-rw-r--r--drivers/media/platform/cadence/Makefile4
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c499
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c564
-rw-r--r--drivers/media/platform/cec-gpio/Makefile1
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c281
-rw-r--r--drivers/media/platform/coda/Makefile6
-rw-r--r--drivers/media/platform/coda/coda-bit.c2331
-rw-r--r--drivers/media/platform/coda/coda-common.c2873
-rw-r--r--drivers/media/platform/coda/coda-gdi.c150
-rw-r--r--drivers/media/platform/coda/coda-h264.c432
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c253
-rw-r--r--drivers/media/platform/coda/coda.h321
-rw-r--r--drivers/media/platform/coda/coda_regs.h466
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c362
-rw-r--r--drivers/media/platform/coda/imx-vdoa.h58
-rw-r--r--drivers/media/platform/coda/trace.h163
-rw-r--r--drivers/media/platform/cros-ec-cec/Makefile1
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c347
-rw-r--r--drivers/media/platform/davinci/Kconfig91
-rw-r--r--drivers/media/platform/davinci/Makefile16
-rw-r--r--drivers/media/platform/davinci/ccdc_hw_device.h89
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c944
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc_regs.h306
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c889
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc_regs.h149
-rw-r--r--drivers/media/platform/davinci/isif.c1130
-rw-r--r--drivers/media/platform/davinci/isif_regs.h265
-rw-r--r--drivers/media/platform/davinci/vpbe.c871
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c1534
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c1596
-rw-r--r--drivers/media/platform/davinci/vpbe_osd_regs.h360
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c694
-rw-r--r--drivers/media/platform/davinci/vpbe_venc_regs.h173
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c1945
-rw-r--r--drivers/media/platform/davinci/vpif.c556
-rw-r--r--drivers/media/platform/davinci/vpif.h688
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c1832
-rw-r--r--drivers/media/platform/davinci/vpif_capture.h116
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1449
-rw-r--r--drivers/media/platform/davinci/vpif_display.h125
-rw-r--r--drivers/media/platform/davinci/vpss.c544
-rw-r--r--drivers/media/platform/exynos-gsc/Makefile3
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c1368
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h519
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c803
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-regs.c430
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-regs.h172
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig81
-rw-r--r--drivers/media/platform/exynos4-is/Makefile18
-rw-r--r--drivers/media/platform/exynos4-is/common.c52
-rw-r--r--drivers/media/platform/exynos4-is/common.h16
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c1919
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c1261
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h725
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-command.h137
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-errno.c272
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-errno.h248
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c162
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.h15
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-param.c896
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-param.h1025
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.c233
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.h164
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-sensor.c34
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-sensor.h56
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c1009
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.h345
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c661
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.h44
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c789
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.h193
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite-reg.c349
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite-reg.h156
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c1692
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.h225
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c761
-rw-r--r--drivers/media/platform/exynos4-is/fimc-reg.c842
-rw-r--r--drivers/media/platform/exynos4-is/fimc-reg.h338
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c1584
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.h211
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c1043
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.h26
-rw-r--r--drivers/media/platform/fsl-viu.c1618
-rw-r--r--drivers/media/platform/m2m-deinterlace.c1076
-rw-r--r--drivers/media/platform/marvell-ccic/Kconfig27
-rw-r--r--drivers/media/platform/marvell-ccic/Makefile5
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c661
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c1909
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h383
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c537
-rw-r--r--drivers/media/platform/meson/Makefile1
-rw-r--r--drivers/media/platform/meson/ao-cec.c744
-rw-r--r--drivers/media/platform/mtk-jpeg/Makefile2
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c1291
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h139
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c417
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h91
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c160
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h25
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h58
-rw-r--r--drivers/media/platform/mtk-mdp/Makefile10
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.c157
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.h72
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c300
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.h260
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h126
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c1269
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h22
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_regs.c156
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_regs.h31
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c145
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h41
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile29
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c1511
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h90
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c408
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c203
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h28
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h388
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c1355
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h58
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c428
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c139
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h26
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c53
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h26
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c120
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h89
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c508
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c633
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c1026
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_base.h56
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.c122
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.h103
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h103
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c169
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h96
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c679
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c484
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_base.h62
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.c113
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.h163
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_ipi_msg.h210
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.c236
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.h61
-rw-r--r--drivers/media/platform/mtk-vpu/Makefile3
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c966
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.h194
-rw-r--r--drivers/media/platform/mx2_emmaprp.c991
-rw-r--r--drivers/media/platform/omap/Kconfig18
-rw-r--r--drivers/media/platform/omap/Makefile8
-rw-r--r--drivers/media/platform/omap/omap_vout.c2223
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c421
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.h40
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h226
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c357
-rw-r--r--drivers/media/platform/omap/omap_voutlib.h39
-rw-r--r--drivers/media/platform/omap3isp/Makefile12
-rw-r--r--drivers/media/platform/omap3isp/cfa_coef_table.h51
-rw-r--r--drivers/media/platform/omap3isp/gamma_table.h80
-rw-r--r--drivers/media/platform/omap3isp/isp.c2423
-rw-r--r--drivers/media/platform/omap3isp/isp.h363
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c2741
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.h177
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c1177
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.h88
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c1318
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.h155
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c358
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h46
-rw-r--r--drivers/media/platform/omap3isp/isph3a.h107
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c343
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c398
-rw-r--r--drivers/media/platform/omap3isp/isphist.c540
-rw-r--r--drivers/media/platform/omap3isp/isphist.h30
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c2355
-rw-r--r--drivers/media/platform/omap3isp/isppreview.h164
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h1521
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c1796
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.h139
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c1083
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h159
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c1500
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h210
-rw-r--r--drivers/media/platform/omap3isp/luma_enhance_table.h32
-rw-r--r--drivers/media/platform/omap3isp/noise_filter_table.h20
-rw-r--r--drivers/media/platform/omap3isp/omap3isp.h138
-rw-r--r--drivers/media/platform/pxa_camera.c2596
-rw-r--r--drivers/media/platform/qcom/camss/Makefile15
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c1392
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h77
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c177
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c257
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c767
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h92
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c1373
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.h78
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c1019
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c1141
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c2342
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h186
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c959
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.h62
-rw-r--r--drivers/media/platform/qcom/camss/camss.c1028
-rw-r--r--drivers/media/platform/qcom/camss/camss.h115
-rw-r--r--drivers/media/platform/qcom/venus/Makefile13
-rw-r--r--drivers/media/platform/qcom/venus/core.c507
-rw-r--r--drivers/media/platform/qcom/venus/core.h353
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c98
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h22
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c1236
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h65
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c522
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h185
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c1250
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h304
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h1120
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c780
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.h283
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c279
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.h110
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c1628
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.h23
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h123
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c1263
-rw-r--r--drivers/media/platform/qcom/venus/vdec.h23
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c160
-rw-r--r--drivers/media/platform/qcom/venus/venc.c1370
-rw-r--r--drivers/media/platform/qcom/venus/venc.h23
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c329
-rw-r--r--drivers/media/platform/rcar-fcp.c192
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig25
-rw-r--r--drivers/media/platform/rcar-vin/Makefile5
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c1277
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c1108
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c1347
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c1033
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h270
-rw-r--r--drivers/media/platform/rcar_drif.c1499
-rw-r--r--drivers/media/platform/rcar_fdp1.c2453
-rw-r--r--drivers/media/platform/rcar_jpu.c1768
-rw-r--r--drivers/media/platform/renesas-ceu.c1753
-rw-r--r--drivers/media/platform/rockchip/rga/Makefile3
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c157
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c424
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h442
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c992
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h123
-rw-r--r--drivers/media/platform/s3c-camif/Makefile5
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c1661
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c652
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h391
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c606
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.h269
-rw-r--r--drivers/media/platform/s5p-cec/Makefile2
-rw-r--r--drivers/media/platform/s5p-cec/exynos_hdmi_cec.h37
-rw-r--r--drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c209
-rw-r--r--drivers/media/platform/s5p-cec/regs-cec.h96
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c312
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.h80
-rw-r--r--drivers/media/platform/s5p-g2d/Makefile3
-rw-r--r--drivers/media/platform/s5p-g2d/g2d-hw.c117
-rw-r--r--drivers/media/platform/s5p-g2d/g2d-regs.h122
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c783
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h89
-rw-r--r--drivers/media/platform/s5p-jpeg/Makefile2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c3229
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h272
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c489
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h60
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c324
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h47
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c309
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h60
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h649
-rw-r--r--drivers/media/platform/s5p-mfc/Makefile7
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v10.h87
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h411
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v7.h60
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v8.h126
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc.h462
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c1677
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c29
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h35
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c167
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h20
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c173
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h20
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h787
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c486
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h33
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_debug.h54
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c1196
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.h24
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c2713
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.h24
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_intr.c91
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_intr.h26
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h30
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c127
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.h342
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c1640
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h85
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c2537
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h60
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c125
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.h24
-rw-r--r--drivers/media/platform/sh_veu.c1208
-rw-r--r--drivers/media/platform/sh_vou.c1380
-rw-r--r--drivers/media/platform/soc_camera/Kconfig26
-rw-r--r--drivers/media/platform/soc_camera/Makefile9
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c1810
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c2154
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c188
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c533
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c426
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.h47
-rw-r--r--drivers/media/platform/sti/bdisp/Makefile3
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-debug.c687
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-filter.h42
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-hw.c1118
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-reg.h235
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c1435
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp.h214
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Kconfig27
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Makefile9
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c262
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h61
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c1206
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h285
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c264
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h19
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c235
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h17
-rw-r--r--drivers/media/platform/sti/cec/Makefile1
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c399
-rw-r--r--drivers/media/platform/sti/delta/Makefile6
-rw-r--r--drivers/media/platform/sti/delta/delta-cfg.h64
-rw-r--r--drivers/media/platform/sti/delta/delta-debug.c72
-rw-r--r--drivers/media/platform/sti/delta/delta-debug.h18
-rw-r--r--drivers/media/platform/sti/delta/delta-ipc.c594
-rw-r--r--drivers/media/platform/sti/delta/delta-ipc.h76
-rw-r--r--drivers/media/platform/sti/delta/delta-mem.c51
-rw-r--r--drivers/media/platform/sti/delta/delta-mem.h14
-rw-r--r--drivers/media/platform/sti/delta/delta-mjpeg-dec.c455
-rw-r--r--drivers/media/platform/sti/delta/delta-mjpeg-fw.h225
-rw-r--r--drivers/media/platform/sti/delta/delta-mjpeg-hdr.c149
-rw-r--r--drivers/media/platform/sti/delta/delta-mjpeg.h35
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c1979
-rw-r--r--drivers/media/platform/sti/delta/delta.h566
-rw-r--r--drivers/media/platform/sti/hva/Makefile3
-rw-r--r--drivers/media/platform/sti/hva/hva-debugfs.c422
-rw-r--r--drivers/media/platform/sti/hva/hva-h264.c1061
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c587
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.h45
-rw-r--r--drivers/media/platform/sti/hva/hva-mem.c62
-rw-r--r--drivers/media/platform/sti/hva/hva-mem.h34
-rw-r--r--drivers/media/platform/sti/hva/hva-v4l2.c1474
-rw-r--r--drivers/media/platform/sti/hva/hva.h409
-rw-r--r--drivers/media/platform/stm32/Makefile2
-rw-r--r--drivers/media/platform/stm32/stm32-cec.c357
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c1904
-rw-r--r--drivers/media/platform/tegra-cec/Makefile1
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c499
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.h127
-rw-r--r--drivers/media/platform/ti-vpe/Makefile16
-rw-r--r--drivers/media/platform/ti-vpe/cal.c1934
-rw-r--r--drivers/media/platform/ti-vpe/cal_regs.h479
-rw-r--r--drivers/media/platform/ti-vpe/csc.c204
-rw-r--r--drivers/media/platform/ti-vpe/csc.h68
-rw-r--r--drivers/media/platform/ti-vpe/sc.c311
-rw-r--r--drivers/media/platform/ti-vpe/sc.h211
-rw-r--r--drivers/media/platform/ti-vpe/sc_coeff.h1342
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c1171
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h286
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h641
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2633
-rw-r--r--drivers/media/platform/ti-vpe/vpe_regs.h309
-rw-r--r--drivers/media/platform/via-camera.c1483
-rw-r--r--drivers/media/platform/via-camera.h94
-rw-r--r--drivers/media/platform/vicodec/Kconfig13
-rw-r--r--drivers/media/platform/vicodec/Makefile4
-rw-r--r--drivers/media/platform/vicodec/vicodec-codec.c803
-rw-r--r--drivers/media/platform/vicodec/vicodec-codec.h129
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c1507
-rw-r--r--drivers/media/platform/video-mux.c425
-rw-r--r--drivers/media/platform/vim2m.c1121
-rw-r--r--drivers/media/platform/vimc/Kconfig15
-rw-r--r--drivers/media/platform/vimc/Makefile11
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c546
-rw-r--r--drivers/media/platform/vimc/vimc-common.c442
-rw-r--r--drivers/media/platform/vimc/vimc-common.h223
-rw-r--r--drivers/media/platform/vimc/vimc-core.c402
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c585
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c437
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c423
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c188
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.h38
-rw-r--r--drivers/media/platform/vivid/Kconfig41
-rw-r--r--drivers/media/platform/vivid/Makefile11
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c287
-rw-r--r--drivers/media/platform/vivid/vivid-cec.h20
-rw-r--r--drivers/media/platform/vivid/vivid-core.c1541
-rw-r--r--drivers/media/platform/vivid/vivid-core.h554
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c1741
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.h22
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c927
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.h14
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c298
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.h14
-rw-r--r--drivers/media/platform/vivid/vivid-osd.c389
-rw-r--r--drivers/media/platform/vivid/vivid-osd.h15
-rw-r--r--drivers/media/platform/vivid/vivid-radio-common.c177
-rw-r--r--drivers/media/platform/vivid/vivid-radio-common.h28
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.c278
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.h19
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.c128
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.h17
-rw-r--r--drivers/media/platform/vivid/vivid-rds-gen.c157
-rw-r--r--drivers/media/platform/vivid/vivid-rds-gen.h42
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c556
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.h24
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c361
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.h28
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-gen.c311
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-gen.h21
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c242
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.h22
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c1865
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.h59
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c869
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.h40
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c1174
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.h44
-rw-r--r--drivers/media/platform/vsp1/Makefile10
-rw-r--r--drivers/media/platform/vsp1/vsp1.h123
-rw-r--r--drivers/media/platform/vsp1/vsp1_brx.c452
-rw-r--r--drivers/media/platform/vsp1/vsp1_brx.h44
-rw-r--r--drivers/media/platform/vsp1/vsp1_clu.c284
-rw-r--r--drivers/media/platform/vsp1/vsp1_clu.h45
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c1141
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h77
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c948
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.h76
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c927
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c691
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h191
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgo.c222
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgo.h41
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgt.c214
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgt.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_histo.c591
-rw-r--r--drivers/media/platform/vsp1/vsp1_histo.h77
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c175
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.h34
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c154
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.h33
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c240
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.h42
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c388
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h175
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h851
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c381
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c283
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h88
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c388
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c418
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.h37
-rw-r--r--drivers/media/platform/vsp1/vsp1_uif.c264
-rw-r--r--drivers/media/platform/vsp1/vsp1_uif.h32
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c1353
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h61
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c562
-rw-r--r--drivers/media/platform/xilinx/Kconfig24
-rw-r--r--drivers/media/platform/xilinx/Makefile5
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c769
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h107
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c933
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c323
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h238
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c664
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h49
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c380
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h42
499 files changed, 238257 insertions, 0 deletions
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
new file mode 100644
index 000000000..54fe90acb
--- /dev/null
+++ b/drivers/media/platform/Kconfig
@@ -0,0 +1,643 @@
+#
+# Platform drivers
+# Most drivers here are currently for webcam support
+
+menuconfig V4L_PLATFORM_DRIVERS
+ bool "V4L platform devices"
+ depends on MEDIA_CAMERA_SUPPORT
+ default n
+ ---help---
+ Say Y here to enable support for platform-specific V4L drivers.
+
+if V4L_PLATFORM_DRIVERS
+
+source "drivers/media/platform/marvell-ccic/Kconfig"
+
+config VIDEO_VIA_CAMERA
+ tristate "VIAFB camera controller support"
+ depends on FB_VIA
+ select VIDEOBUF_DMA_SG
+ select VIDEO_OV7670
+ help
+ Driver support for the integrated camera controller in VIA
+ Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
+ with ov7670 sensors.
+
+#
+# Platform multimedia device configuration
+#
+source "drivers/media/platform/cadence/Kconfig"
+
+source "drivers/media/platform/davinci/Kconfig"
+
+source "drivers/media/platform/omap/Kconfig"
+
+config VIDEO_SH_VOU
+ tristate "SuperH VOU video output driver"
+ depends on MEDIA_CAMERA_SUPPORT
+ depends on VIDEO_DEV && I2C
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for the Video Output Unit (VOU) on SuperH SoCs.
+
+config VIDEO_VIU
+ tristate "Freescale VIU Video Driver"
+ depends on VIDEO_V4L2 && (PPC_MPC512x || COMPILE_TEST) && I2C
+ select VIDEOBUF_DMA_CONTIG
+ default y
+ ---help---
+ Support for Freescale VIU video driver. This device captures
+ video data, or overlays video on DIU frame buffer.
+
+ Say Y here if you want to enable VIU device on MPC5121e Rev2+.
+ In doubt, say N.
+
+config VIDEO_MUX
+ tristate "Video Multiplexer"
+ select MULTIPLEXER
+ depends on VIDEO_V4L2 && OF && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
+ select REGMAP
+ help
+ This driver provides support for N:1 video bus multiplexers.
+
+config VIDEO_OMAP3
+ tristate "OMAP 3 Camera support"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on (ARCH_OMAP3 && OMAP_IOMMU) || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ select ARM_DMA_USE_IOMMU if OMAP_IOMMU
+ select VIDEOBUF2_DMA_CONTIG
+ select MFD_SYSCON
+ select V4L2_FWNODE
+ ---help---
+ Driver for an OMAP 3 camera controller.
+
+config VIDEO_OMAP3_DEBUG
+ bool "OMAP 3 Camera debug messages"
+ depends on VIDEO_OMAP3
+ ---help---
+ Enable debug messages on OMAP 3 camera controller driver.
+
+config VIDEO_PXA27x
+ tristate "PXA27x Quick Capture Interface driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on PXA27x || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select SG_SPLIT
+ select V4L2_FWNODE
+ ---help---
+ This is a v4l2 driver for the PXA27x Quick Capture Interface
+
+config VIDEO_QCOM_CAMSS
+ tristate "Qualcomm V4L2 Camera Subsystem driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select V4L2_FWNODE
+
+config VIDEO_S3C_CAMIF
+ tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on PM
+ depends on ARCH_S3C64XX || PLAT_S3C24XX || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
+ host interface (CAMIF).
+
+ To compile this driver as a module, choose M here: the module
+ will be called s3c-camif.
+
+config VIDEO_STM32_DCMI
+ tristate "STM32 Digital Camera Memory Interface (DCMI) support"
+ depends on VIDEO_V4L2 && OF
+ depends on ARCH_STM32 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ ---help---
+ This module makes the STM32 Digital Camera Memory Interface (DCMI)
+ available as a v4l2 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called stm32-dcmi.
+
+config VIDEO_RENESAS_CEU
+ tristate "Renesas Capture Engine Unit (CEU) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_SHMOBILE || ARCH_R7S72100 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ ---help---
+ This is a v4l2 driver for the Renesas CEU Interface
+
+source "drivers/media/platform/soc_camera/Kconfig"
+source "drivers/media/platform/exynos4-is/Kconfig"
+source "drivers/media/platform/am437x/Kconfig"
+source "drivers/media/platform/xilinx/Kconfig"
+source "drivers/media/platform/rcar-vin/Kconfig"
+source "drivers/media/platform/atmel/Kconfig"
+
+config VIDEO_TI_CAL
+ tristate "TI CAL (Camera Adaptation Layer) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on SOC_DRA7XX || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ default n
+ ---help---
+ Support for the TI CAL (Camera Adaptation Layer) block
+ found on DRA72X SoC.
+ In TI Technical Reference Manual this module is referred as
+ Camera Interface Subsystem (CAMSS).
+
+endif # V4L_PLATFORM_DRIVERS
+
+menuconfig V4L_MEM2MEM_DRIVERS
+ bool "Memory-to-memory multimedia devices"
+ depends on VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
+ default n
+ ---help---
+ Say Y here to enable selecting drivers for V4L devices that
+ use system memory for both source and destination buffers, as opposed
+ to capture and output drivers, which use memory buffers for just
+ one of those.
+
+if V4L_MEM2MEM_DRIVERS
+
+config VIDEO_CODA
+ tristate "Chips&Media Coda multi-standard codec IP"
+ depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_MXC || COMPILE_TEST)
+ select SRAM
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ select GENERIC_ALLOCATOR
+ ---help---
+ Coda is a range of video codec IPs that supports
+ H.264, MPEG-4, and other video formats.
+
+config VIDEO_IMX_VDOA
+ def_tristate VIDEO_CODA if SOC_IMX6Q || COMPILE_TEST
+
+config VIDEO_MEDIATEK_JPEG
+ tristate "Mediatek JPEG Codec driver"
+ depends on MTK_IOMMU_V1 || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ Mediatek jpeg codec driver provides HW capability to decode
+ JPEG format
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-jpeg
+
+config VIDEO_MEDIATEK_VPU
+ tristate "Mediatek Video Processor Unit"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ ---help---
+ This driver provides downloading VPU firmware and
+ communicating with VPU. This driver for hw video
+ codec embedded in Mediatek's MT8173 SOCs. It is able
+ to handle video decoding/encoding in a range of formats.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-vpu.
+
+config VIDEO_MEDIATEK_MDP
+ tristate "Mediatek MDP driver"
+ depends on MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select VIDEO_MEDIATEK_VPU
+ default n
+ ---help---
+ It is a v4l2 driver and present in Mediatek MT8173 SoCs.
+ The driver supports for scaling and color space conversion.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-mdp.
+
+config VIDEO_MEDIATEK_VCODEC
+ tristate "Mediatek Video Codec driver"
+ depends on MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select VIDEO_MEDIATEK_VPU
+ default n
+ ---help---
+ Mediatek video codec driver provides HW capability to
+ encode and decode in a range of video formats
+ This driver rely on VPU driver to communicate with VPU.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-vcodec
+
+config VIDEO_MEM2MEM_DEINTERLACE
+ tristate "Deinterlace support"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Generic deinterlacing V4L2 driver.
+
+config VIDEO_SAMSUNG_S5P_G2D
+ tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a v4l2 driver for Samsung S5P and EXYNOS4 G2D
+ 2d graphics accelerator.
+
+config VIDEO_SAMSUNG_S5P_JPEG
+ tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a v4l2 driver for Samsung S5P, EXYNOS3250
+ and EXYNOS4 JPEG codec
+
+config VIDEO_SAMSUNG_S5P_MFC
+ tristate "Samsung S5P MFC Video Codec"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ default n
+ help
+ MFC 5.1 and 6.x driver for V4L2
+
+config VIDEO_MX2_EMMAPRP
+ tristate "MX2 eMMa-PrP support"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on SOC_IMX27 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ MX2X chips have a PrP that can be used to process buffers from
+ memory to memory. Operations include resizing and format
+ conversion.
+
+config VIDEO_SAMSUNG_EXYNOS_GSC
+ tristate "Samsung Exynos G-Scaler driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a v4l2 driver for Samsung EXYNOS5 SoC G-Scaler.
+
+config VIDEO_STI_BDISP
+ tristate "STMicroelectronics BDISP 2D blitter driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_STI || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This v4l2 mem2mem driver is a 2D blitter for STMicroelectronics SoC.
+
+config VIDEO_STI_HVA
+ tristate "STMicroelectronics HVA multi-format video encoder V4L2 driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_STI || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This V4L2 driver enables HVA (Hardware Video Accelerator) multi-format
+ video encoder of STMicroelectronics SoC, allowing hardware encoding of
+ raw uncompressed formats in various compressed video bitstreams format.
+
+ To compile this driver as a module, choose M here:
+ the module will be called st-hva.
+
+config VIDEO_STI_HVA_DEBUGFS
+ bool "Export STMicroelectronics HVA internals in debugfs"
+ depends on VIDEO_STI_HVA
+ depends on DEBUG_FS
+ help
+ Select this to see information about the internal state and the last
+ operation of STMicroelectronics HVA multi-format video encoder in
+ debugfs.
+
+ Choose N unless you know you need this.
+
+config VIDEO_STI_DELTA
+ tristate "STMicroelectronics DELTA multi-format video decoder V4L2 driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_STI || COMPILE_TEST
+ help
+ This V4L2 driver enables DELTA multi-format video decoder
+ of STMicroelectronics STiH4xx SoC series allowing hardware
+ decoding of various compressed video bitstream format in
+ raw uncompressed format.
+
+ Use this option to see the decoders available for such
+ hardware.
+
+ Please notice that the driver will only be built if
+ at least one of the DELTA decoder below is selected.
+
+if VIDEO_STI_DELTA
+
+config VIDEO_STI_DELTA_MJPEG
+ bool "STMicroelectronics DELTA MJPEG support"
+ default y
+ help
+ Enables DELTA MJPEG hardware support.
+
+ To compile this driver as a module, choose M here:
+ the module will be called st-delta.
+
+config VIDEO_STI_DELTA_DRIVER
+ tristate
+ depends on VIDEO_STI_DELTA
+ depends on VIDEO_STI_DELTA_MJPEG
+ default VIDEO_STI_DELTA_MJPEG
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select RPMSG
+
+endif # VIDEO_STI_DELTA
+
+config VIDEO_SH_VEU
+ tristate "SuperH VEU mem2mem video processing driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Video Engine Unit (VEU) on SuperH and
+ SH-Mobile SoCs.
+
+config VIDEO_RENESAS_FDP1
+ tristate "Renesas Fine Display Processor"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a V4L2 driver for the Renesas Fine Display Processor
+ providing colour space conversion, and de-interlacing features.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar_fdp1.
+
+config VIDEO_RENESAS_JPU
+ tristate "Renesas JPEG Processing Unit"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a V4L2 driver for the Renesas JPEG Processing Unit.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar_jpu.
+
+config VIDEO_RENESAS_FCP
+ tristate "Renesas Frame Compression Processor"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on OF
+ ---help---
+ This is a driver for the Renesas Frame Compression Processor (FCP).
+ The FCP is a companion module of video processing modules in the
+ Renesas R-Car Gen3 SoCs. It handles memory access for the codec,
+ VSP and FDP modules.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar-fcp.
+
+config VIDEO_RENESAS_VSP1
+ tristate "Renesas VSP1 Video Processing Engine"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ ---help---
+ This is a V4L2 driver for the Renesas VSP1 video processing engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vsp1.
+
+config VIDEO_ROCKCHIP_RGA
+ tristate "Rockchip Raster 2d Graphic Acceleration Unit"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a v4l2 driver for Rockchip SOC RGA 2d graphics accelerator.
+ Rockchip RGA is a separate 2D raster graphic acceleration unit.
+ It accelerates 2D graphics operations, such as point/line drawing,
+ image scaling, rotation, BitBLT, alpha blending and image blur/sharpness.
+
+ To compile this driver as a module choose m here.
+
+config VIDEO_TI_VPE
+ tristate "TI VPE (Video Processing Engine) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on SOC_DRA7XX || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select VIDEO_TI_VPDMA
+ select VIDEO_TI_SC
+ select VIDEO_TI_CSC
+ default n
+ ---help---
+ Support for the TI VPE(Video Processing Engine) block
+ found on DRA7XX SoC.
+
+config VIDEO_TI_VPE_DEBUG
+ bool "VPE debug messages"
+ depends on VIDEO_TI_VPE
+ ---help---
+ Enable debug messages on VPE driver.
+
+config VIDEO_QCOM_VENUS
+ tristate "Qualcomm Venus V4L2 encoder/decoder driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_SCM if ARCH_QCOM
+ select VIDEOBUF2_DMA_SG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a V4L2 driver for Qualcomm Venus video accelerator
+ hardware. It accelerates encoding and decoding operations
+ on various Qualcomm SoCs.
+ To compile this driver as a module choose m here.
+
+endif # V4L_MEM2MEM_DRIVERS
+
+# TI VIDEO PORT Helper Modules
+# These will be selected by VPE and VIP
+config VIDEO_TI_VPDMA
+ tristate
+
+config VIDEO_TI_SC
+ tristate
+
+config VIDEO_TI_CSC
+ tristate
+
+menuconfig V4L_TEST_DRIVERS
+ bool "Media test drivers"
+ depends on MEDIA_CAMERA_SUPPORT
+
+if V4L_TEST_DRIVERS
+
+source "drivers/media/platform/vimc/Kconfig"
+
+source "drivers/media/platform/vivid/Kconfig"
+
+config VIDEO_VIM2M
+ tristate "Virtual Memory-to-Memory Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a virtual test device for the memory-to-memory driver
+ framework.
+
+source "drivers/media/platform/vicodec/Kconfig"
+
+endif #V4L_TEST_DRIVERS
+
+menuconfig DVB_PLATFORM_DRIVERS
+ bool "DVB platform devices"
+ depends on MEDIA_DIGITAL_TV_SUPPORT
+ default n
+ ---help---
+ Say Y here to enable support for platform-specific Digital TV drivers.
+
+if DVB_PLATFORM_DRIVERS
+source "drivers/media/platform/sti/c8sectpfe/Kconfig"
+endif #DVB_PLATFORM_DRIVERS
+
+menuconfig CEC_PLATFORM_DRIVERS
+ bool "CEC platform devices"
+ depends on MEDIA_CEC_SUPPORT
+
+if CEC_PLATFORM_DRIVERS
+
+config VIDEO_CROS_EC_CEC
+ tristate "ChromeOS EC CEC driver"
+ depends on MFD_CROS_EC
+ select CEC_CORE
+ select CEC_NOTIFIER
+ select CHROME_PLATFORMS
+ select CROS_EC_PROTO
+ ---help---
+ If you say yes here you will get support for the
+ ChromeOS Embedded Controller's CEC.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config VIDEO_MESON_AO_CEC
+ tristate "Amlogic Meson AO CEC driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+
+config CEC_GPIO
+ tristate "Generic GPIO-based CEC driver"
+ depends on PREEMPT || COMPILE_TEST
+ select CEC_CORE
+ select CEC_PIN
+ select GPIOLIB
+ ---help---
+ This is a generic GPIO-based CEC driver.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config VIDEO_SAMSUNG_S5P_CEC
+ tristate "Samsung S5P CEC driver"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for Samsung S5P HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config VIDEO_STI_HDMI_CEC
+ tristate "STMicroelectronics STiH4xx HDMI CEC driver"
+ depends on ARCH_STI || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for STIH4xx HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config VIDEO_STM32_HDMI_CEC
+ tristate "STMicroelectronics STM32 HDMI CEC driver"
+ depends on ARCH_STM32 || COMPILE_TEST
+ select REGMAP
+ select REGMAP_MMIO
+ select CEC_CORE
+ ---help---
+ This is a driver for STM32 interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config VIDEO_TEGRA_HDMI_CEC
+ tristate "Tegra HDMI CEC driver"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for the Tegra HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+endif #CEC_PLATFORM_DRIVERS
+
+menuconfig SDR_PLATFORM_DRIVERS
+ bool "SDR platform devices"
+ depends on MEDIA_SDR_SUPPORT
+ default n
+ ---help---
+ Say Y here to enable support for platform-specific SDR Drivers.
+
+if SDR_PLATFORM_DRIVERS
+
+config VIDEO_RCAR_DRIF
+ tristate "Renesas Digitial Radio Interface (DRIF)"
+ depends on VIDEO_V4L2
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select VIDEOBUF2_VMALLOC
+ ---help---
+ Say Y if you want to enable R-Car Gen3 DRIF support. DRIF is Digital
+ Radio Interface that interfaces with an RF front end chip. It is a
+ receiver of digital data which uses DMA to transfer received data to
+ a configured location for an application to use.
+
+ To compile this driver as a module, choose M here; the module
+ will be called rcar_drif.
+
+endif # SDR_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
new file mode 100644
index 000000000..41322ab65
--- /dev/null
+++ b/drivers/media/platform/Makefile
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the video capture/playback device drivers.
+#
+
+obj-$(CONFIG_VIDEO_CADENCE) += cadence/
+obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
+
+obj-$(CONFIG_VIDEO_OMAP3) += omap3isp/
+obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
+
+obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
+
+obj-$(CONFIG_VIDEO_VIMC) += vimc/
+obj-$(CONFIG_VIDEO_VIVID) += vivid/
+obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
+obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
+
+obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
+
+obj-$(CONFIG_VIDEO_TI_CAL) += ti-vpe/
+
+obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
+obj-$(CONFIG_VIDEO_CODA) += coda/
+
+obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
+
+obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+
+obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
+
+obj-$(CONFIG_VIDEO_MUX) += video-mux.o
+
+obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
+obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS) += exynos4-is/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec/
+obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC) += exynos-gsc/
+
+obj-$(CONFIG_VIDEO_STI_BDISP) += sti/bdisp/
+obj-$(CONFIG_VIDEO_STI_HVA) += sti/hva/
+obj-$(CONFIG_DVB_C8SECTPFE) += sti/c8sectpfe/
+obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += sti/cec/
+
+obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/
+
+obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra-cec/
+
+obj-y += stm32/
+
+obj-y += davinci/
+
+obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
+
+obj-$(CONFIG_SOC_CAMERA) += soc_camera/
+
+obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o
+obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o
+obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
+obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
+obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
+obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
+
+obj-$(CONFIG_VIDEO_ROCKCHIP_RGA) += rockchip/rga/
+
+obj-y += omap/
+
+obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
+
+obj-$(CONFIG_VIDEO_XILINX) += xilinx/
+
+obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
+
+obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel/
+obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel/
+
+obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32/
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
+
+obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/
+
+obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss/
+
+obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
+
+obj-y += meson/
+
+obj-y += cros-ec-cec/
diff --git a/drivers/media/platform/am437x/Kconfig b/drivers/media/platform/am437x/Kconfig
new file mode 100644
index 000000000..f4ce1176e
--- /dev/null
+++ b/drivers/media/platform/am437x/Kconfig
@@ -0,0 +1,12 @@
+config VIDEO_AM437X_VPFE
+ tristate "TI AM437x VPFE video capture driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on SOC_AM43XX || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Support for AM437x Video Processing Front End based Video
+ Capture Driver.
+
+ To compile this driver as a module, choose M here. The module
+ will be called am437x-vpfe.
diff --git a/drivers/media/platform/am437x/Makefile b/drivers/media/platform/am437x/Makefile
new file mode 100644
index 000000000..d11fff16f
--- /dev/null
+++ b/drivers/media/platform/am437x/Makefile
@@ -0,0 +1,3 @@
+# Makefile for AM437x VPFE driver
+
+obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x-vpfe.o
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
new file mode 100644
index 000000000..809320dec
--- /dev/null
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -0,0 +1,2776 @@
+/*
+ * TI VPFE capture Driver
+ *
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+
+#include "am437x-vpfe.h"
+
+#define VPFE_MODULE_NAME "vpfe"
+#define VPFE_VERSION "0.1.0"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-8");
+
+#define vpfe_dbg(level, dev, fmt, arg...) \
+ v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg)
+#define vpfe_info(dev, fmt, arg...) \
+ v4l2_info(&dev->v4l2_dev, fmt, ##arg)
+#define vpfe_err(dev, fmt, arg...) \
+ v4l2_err(&dev->v4l2_dev, fmt, ##arg)
+
+/* standard information */
+struct vpfe_standard {
+ v4l2_std_id std_id;
+ unsigned int width;
+ unsigned int height;
+ struct v4l2_fract pixelaspect;
+ int frame_format;
+};
+
+static const struct vpfe_standard vpfe_standards[] = {
+ {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
+ {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
+};
+
+struct bus_format {
+ unsigned int width;
+ unsigned int bpp;
+};
+
+/*
+ * struct vpfe_fmt - VPFE media bus format information
+ * @name: V4L2 format description
+ * @code: V4L2 media bus format code
+ * @shifted: V4L2 media bus format code for the same pixel layout but
+ * shifted to be 8 bits per pixel. =0 if format is not shiftable.
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @width: Bits per pixel (when transferred over a bus)
+ * @bpp: Bytes per pixel (when stored in memory)
+ * @supported: Indicates format supported by subdev
+ */
+struct vpfe_fmt {
+ const char *name;
+ u32 fourcc;
+ u32 code;
+ struct bus_format l;
+ struct bus_format s;
+ bool supported;
+ u32 index;
+};
+
+static struct vpfe_fmt formats[] = {
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "RAW8 BGGR",
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RAW8 GBRG",
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RAW8 GRBG",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RAW8 RGGB",
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RGB565 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "RGB565 (BE)",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ },
+};
+
+static int
+__vpfe_get_format(struct vpfe_device *vpfe,
+ struct v4l2_format *format, unsigned int *bpp);
+
+static struct vpfe_fmt *find_format_by_code(unsigned int code)
+{
+ struct vpfe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (fmt->code == code)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
+{
+ struct vpfe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static void
+mbus_to_pix(struct vpfe_device *vpfe,
+ const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix, unsigned int *bpp)
+{
+ struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
+ unsigned int bus_width = sdinfo->vpfe_param.bus_width;
+ struct vpfe_fmt *fmt;
+
+ fmt = find_format_by_code(mbus->code);
+ if (WARN_ON(fmt == NULL)) {
+ pr_err("Invalid mbus code set\n");
+ *bpp = 1;
+ return;
+ }
+
+ memset(pix, 0, sizeof(*pix));
+ v4l2_fill_pix_format(pix, mbus);
+ pix->pixelformat = fmt->fourcc;
+ *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
+
+ /* pitch should be 32 bytes aligned */
+ pix->bytesperline = ALIGN(pix->width * *bpp, 32);
+ pix->sizeimage = pix->bytesperline * pix->height;
+}
+
+static void pix_to_mbus(struct vpfe_device *vpfe,
+ struct v4l2_pix_format *pix_fmt,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ struct vpfe_fmt *fmt;
+
+ fmt = find_format_by_pix(pix_fmt->pixelformat);
+ if (!fmt) {
+ /* default to first entry */
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ pix_fmt->pixelformat);
+ fmt = &formats[0];
+ }
+
+ memset(mbus_fmt, 0, sizeof(*mbus_fmt));
+ v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
+}
+
+/* Print Four-character-code (FOURCC) */
+static char *print_fourcc(u32 fmt)
+{
+ static char code[5];
+
+ code[0] = (unsigned char)(fmt & 0xff);
+ code[1] = (unsigned char)((fmt >> 8) & 0xff);
+ code[2] = (unsigned char)((fmt >> 16) & 0xff);
+ code[3] = (unsigned char)((fmt >> 24) & 0xff);
+ code[4] = '\0';
+
+ return code;
+}
+
+static int
+cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
+{
+ return lhs->type == rhs->type &&
+ lhs->fmt.pix.width == rhs->fmt.pix.width &&
+ lhs->fmt.pix.height == rhs->fmt.pix.height &&
+ lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
+ lhs->fmt.pix.field == rhs->fmt.pix.field &&
+ lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
+ lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
+ lhs->fmt.pix.quantization == rhs->fmt.pix.quantization &&
+ lhs->fmt.pix.xfer_func == rhs->fmt.pix.xfer_func;
+}
+
+static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
+{
+ return ioread32(ccdc->ccdc_cfg.base_addr + offset);
+}
+
+static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset)
+{
+ iowrite32(val, ccdc->ccdc_cfg.base_addr + offset);
+}
+
+static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
+{
+ return container_of(ccdc, struct vpfe_device, ccdc);
+}
+
+static inline
+struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct vpfe_cap_buffer, vb);
+}
+
+static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag)
+{
+ vpfe_reg_write(ccdc, !!flag, VPFE_PCR);
+}
+
+static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag)
+{
+ unsigned int cfg;
+
+ if (!flag) {
+ cfg = vpfe_reg_read(ccdc, VPFE_CONFIG);
+ cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT);
+ } else {
+ cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT;
+ }
+
+ vpfe_reg_write(ccdc, cfg, VPFE_CONFIG);
+}
+
+static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
+ struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt,
+ int bpp)
+{
+ int horz_start, horz_nr_pixels;
+ int vert_start, vert_nr_lines;
+ int val, mid_img;
+
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left * bpp;
+ horz_nr_pixels = (image_win->width * bpp) - 1;
+ vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) |
+ horz_nr_pixels, VPFE_HORZ_INFO);
+
+ vert_start = image_win->top;
+
+ if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ /* configure VDINT0 */
+ val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
+ } else {
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /*
+ * configure VDINT0 and VDINT1. VDINT1 will be at half
+ * of image height
+ */
+ mid_img = vert_start + (image_win->height / 2);
+ val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) |
+ (mid_img & VPFE_VDINT_VDINT1_MASK);
+ }
+
+ vpfe_reg_write(ccdc, val, VPFE_VDINT);
+
+ vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) |
+ vert_start, VPFE_VERT_START);
+ vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES);
+}
+
+static void vpfe_reg_dump(struct vpfe_ccdc *ccdc)
+{
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
+
+ vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW));
+ vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP));
+ vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB));
+ vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP));
+ vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN));
+ vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST));
+ vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_SYNMODE));
+ vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_HSIZE_OFF));
+ vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_HORZ_INFO));
+ vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_VERT_START));
+ vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_VERT_LINES));
+}
+
+static int
+vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_config_params_raw *ccdcparam)
+{
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
+ u8 max_gamma, max_data;
+
+ if (!ccdcparam->alaw.enable)
+ return 0;
+
+ max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
+ max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
+
+ if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
+ ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
+ max_gamma > max_data) {
+ vpfe_dbg(1, vpfe, "Invalid data line select\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_config_params_raw *raw_params)
+{
+ struct vpfe_ccdc_config_params_raw *config_params =
+ &ccdc->ccdc_cfg.bayer.config_params;
+
+ *config_params = *raw_params;
+}
+
+/*
+ * vpfe_ccdc_restore_defaults()
+ * This function will write defaults to all CCDC registers
+ */
+static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
+{
+ int i;
+
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+
+ /* set all registers to default value */
+ for (i = 4; i <= 0x94; i += 4)
+ vpfe_reg_write(ccdc, 0, i);
+
+ vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING);
+ vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW);
+}
+
+static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
+{
+ int dma_cntl, i, pcr;
+
+ /* If the CCDC module is still busy wait for it to be done */
+ for (i = 0; i < 10; i++) {
+ usleep_range(5000, 6000);
+ pcr = vpfe_reg_read(ccdc, VPFE_PCR);
+ if (!pcr)
+ break;
+
+ /* make sure it it is disabled */
+ vpfe_pcr_enable(ccdc, 0);
+ }
+
+ /* Disable CCDC by resetting all register to default POR values */
+ vpfe_ccdc_restore_defaults(ccdc);
+
+ /* if DMA_CNTL overflow bit is set. Clear it
+ * It appears to take a while for this to become quiescent ~20ms
+ */
+ for (i = 0; i < 10; i++) {
+ dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
+ if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
+ break;
+
+ /* Clear the overflow bit */
+ vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
+ usleep_range(5000, 6000);
+ }
+
+ /* Disabled the module at the CONFIG level */
+ vpfe_config_enable(ccdc, 0);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_ccdc_config_params_raw raw_params;
+ int x;
+
+ if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER)
+ return -EINVAL;
+
+ x = copy_from_user(&raw_params, params, sizeof(raw_params));
+ if (x) {
+ vpfe_dbg(1, vpfe,
+ "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
+ x);
+ return -EFAULT;
+ }
+
+ if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) {
+ vpfe_ccdc_update_raw_params(ccdc, &raw_params);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * vpfe_ccdc_config_ycbcr()
+ * This function will configure CCDC for YCbCr video capture
+ */
+static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
+ u32 syn_mode;
+
+ vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
+ /*
+ * first restore the CCDC registers to default values
+ * This is important since we assume default values to be set in
+ * a lot of registers that we didn't touch
+ */
+ vpfe_ccdc_restore_defaults(ccdc);
+
+ /*
+ * configure pixel format, frame format, configure video frame
+ * format, enable output to SDRAM, enable internal timing generator
+ * and 8bit pack mode
+ */
+ syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) <<
+ VPFE_SYN_MODE_INPMOD_SHIFT) |
+ ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) <<
+ VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE |
+ VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE);
+
+ /* setup BT.656 sync mode */
+ if (params->bt656_enable) {
+ vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF);
+
+ /*
+ * configure the FID, VD, HD pin polarity,
+ * fld,hd pol positive, vd negative, 8-bit data
+ */
+ syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE;
+ if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ syn_mode |= VPFE_SYN_MODE_10BITS;
+ else
+ syn_mode |= VPFE_SYN_MODE_8BITS;
+ } else {
+ /* y/c external sync mode */
+ syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) <<
+ VPFE_FID_POL_SHIFT) |
+ ((params->hd_pol & VPFE_HD_POL_MASK) <<
+ VPFE_HD_POL_SHIFT) |
+ ((params->vd_pol & VPFE_VD_POL_MASK) <<
+ VPFE_VD_POL_SHIFT));
+ }
+ vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
+
+ /* configure video window */
+ vpfe_ccdc_setwin(ccdc, &params->win,
+ params->frm_fmt, params->bytesperpixel);
+
+ /*
+ * configure the order of y cb cr in SDRAM, and disable latch
+ * internal register on vsync
+ */
+ if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ vpfe_reg_write(ccdc,
+ (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
+ VPFE_LATCH_ON_VSYNC_DISABLE |
+ VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG);
+ else
+ vpfe_reg_write(ccdc,
+ (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
+ VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
+
+ /*
+ * configure the horizontal line offset. This should be a
+ * on 32 byte boundary. So clear LSB 5 bits
+ */
+ vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
+
+ /* configure the memory line offset */
+ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
+ /* two fields are interleaved in memory */
+ vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED,
+ VPFE_SDOFST);
+}
+
+static void
+vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_black_clamp *bclamp)
+{
+ u32 val;
+
+ if (!bclamp->enable) {
+ /* configure DCSub */
+ val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK;
+ vpfe_reg_write(ccdc, val, VPFE_DCSUB);
+ vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP);
+ return;
+ }
+ /*
+ * Configure gain, Start pixel, No of line to be avg,
+ * No of pixel/line to be avg, & Enable the Black clamping
+ */
+ val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) |
+ ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) <<
+ VPFE_BLK_ST_PXL_SHIFT) |
+ ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) <<
+ VPFE_BLK_SAMPLE_LINE_SHIFT) |
+ ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) <<
+ VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE);
+ vpfe_reg_write(ccdc, val, VPFE_CLAMP);
+ /* If Black clamping is enable then make dcsub 0 */
+ vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB);
+}
+
+static void
+vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_black_compensation *bcomp)
+{
+ u32 val;
+
+ val = ((bcomp->b & VPFE_BLK_COMP_MASK) |
+ ((bcomp->gb & VPFE_BLK_COMP_MASK) <<
+ VPFE_BLK_COMP_GB_COMP_SHIFT) |
+ ((bcomp->gr & VPFE_BLK_COMP_MASK) <<
+ VPFE_BLK_COMP_GR_COMP_SHIFT) |
+ ((bcomp->r & VPFE_BLK_COMP_MASK) <<
+ VPFE_BLK_COMP_R_COMP_SHIFT));
+ vpfe_reg_write(ccdc, val, VPFE_BLKCMP);
+}
+
+/*
+ * vpfe_ccdc_config_raw()
+ * This function will configure CCDC for Raw capture mode
+ */
+static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_ccdc_config_params_raw *config_params =
+ &ccdc->ccdc_cfg.bayer.config_params;
+ struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
+ unsigned int syn_mode;
+ unsigned int val;
+
+ vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
+
+ /* Reset CCDC */
+ vpfe_ccdc_restore_defaults(ccdc);
+
+ /* Disable latching function registers on VSYNC */
+ vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
+
+ /*
+ * Configure the vertical sync polarity(SYN_MODE.VDPOL),
+ * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
+ * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
+ * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
+ * SDRAM, enable internal timing generator
+ */
+ syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) |
+ ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) |
+ ((params->fid_pol & VPFE_FID_POL_MASK) <<
+ VPFE_FID_POL_SHIFT) | ((params->frm_fmt &
+ VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) |
+ ((config_params->data_sz & VPFE_DATA_SZ_MASK) <<
+ VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt &
+ VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) |
+ VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE);
+
+ /* Enable and configure aLaw register if needed */
+ if (config_params->alaw.enable) {
+ val = ((config_params->alaw.gamma_wd &
+ VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE);
+ vpfe_reg_write(ccdc, val, VPFE_ALAW);
+ vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val);
+ }
+
+ /* Configure video window */
+ vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt,
+ params->bytesperpixel);
+
+ /* Configure Black Clamp */
+ vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp);
+
+ /* Configure Black level compensation */
+ vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp);
+
+ /* If data size is 8 bit then pack the data */
+ if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) ||
+ config_params->alaw.enable)
+ syn_mode |= VPFE_DATA_PACK_ENABLE;
+
+ /*
+ * Configure Horizontal offset register. If pack 8 is enabled then
+ * 1 pixel will take 1 byte
+ */
+ vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
+
+ vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n",
+ params->bytesperline, params->bytesperline);
+
+ /* Set value for SDOFST */
+ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (params->image_invert_enable) {
+ /* For interlace inverse mode */
+ vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT,
+ VPFE_SDOFST);
+ } else {
+ /* For interlace non inverse mode */
+ vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT,
+ VPFE_SDOFST);
+ }
+ } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT,
+ VPFE_SDOFST);
+ }
+
+ vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
+
+ vpfe_reg_dump(ccdc);
+}
+
+static inline int
+vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc,
+ enum ccdc_buftype buf_type)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc->ccdc_cfg.bayer.buf_type = buf_type;
+ else
+ ccdc->ccdc_cfg.ycbcr.buf_type = buf_type;
+
+ return 0;
+}
+
+static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc->ccdc_cfg.bayer.buf_type;
+
+ return ccdc->ccdc_cfg.ycbcr.buf_type;
+}
+
+static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+
+ vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
+ ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
+
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ /*
+ * Need to clear it in case it was left on
+ * after the last capture.
+ */
+ ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_SBGGR8:
+ ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1;
+ break;
+
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_RGB565X:
+ break;
+
+ case V4L2_PIX_FMT_SBGGR16:
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_YUYV:
+ ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+ break;
+
+ case V4L2_PIX_FMT_UYVY:
+ ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc)
+{
+ u32 pixfmt;
+
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ } else {
+ if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ else
+ pixfmt = V4L2_PIX_FMT_UYVY;
+ }
+
+ return pixfmt;
+}
+
+static int
+vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc,
+ struct v4l2_rect *win, unsigned int bpp)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc->ccdc_cfg.bayer.win = *win;
+ ccdc->ccdc_cfg.bayer.bytesperpixel = bpp;
+ ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32);
+ } else {
+ ccdc->ccdc_cfg.ycbcr.win = *win;
+ ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp;
+ ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32);
+ }
+
+ return 0;
+}
+
+static inline void
+vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc,
+ struct v4l2_rect *win)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ *win = ccdc->ccdc_cfg.bayer.win;
+ else
+ *win = ccdc->ccdc_cfg.ycbcr.win;
+}
+
+static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc->ccdc_cfg.bayer.bytesperline;
+
+ return ccdc->ccdc_cfg.ycbcr.bytesperline;
+}
+
+static inline int
+vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc,
+ enum ccdc_frmfmt frm_fmt)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
+
+ return 0;
+}
+
+static inline enum ccdc_frmfmt
+vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc->ccdc_cfg.bayer.frm_fmt;
+
+ return ccdc->ccdc_cfg.ycbcr.frm_fmt;
+}
+
+static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc)
+{
+ return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1;
+}
+
+static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
+{
+ vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR);
+}
+
+static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
+ struct vpfe_hw_if_param *params)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+
+ ccdc->ccdc_cfg.if_type = params->if_type;
+
+ switch (params->if_type) {
+ case VPFE_BT656:
+ case VPFE_YCBCR_SYNC_16:
+ case VPFE_YCBCR_SYNC_8:
+ case VPFE_BT656_10BIT:
+ ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol;
+ ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol;
+ break;
+
+ case VPFE_RAW_BAYER:
+ ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol;
+ ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol;
+ if (params->bus_width == 10)
+ ccdc->ccdc_cfg.bayer.config_params.data_sz =
+ VPFE_CCDC_DATA_10BITS;
+ else
+ ccdc->ccdc_cfg.bayer.config_params.data_sz =
+ VPFE_CCDC_DATA_8BITS;
+ vpfe_dbg(1, vpfe, "params.bus_width: %d\n",
+ params->bus_width);
+ vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n",
+ ccdc->ccdc_cfg.bayer.config_params.data_sz);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint)
+{
+ unsigned int vpfe_int_status;
+
+ vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
+
+ switch (vdint) {
+ /* VD0 interrupt */
+ case VPFE_VDINT0:
+ vpfe_int_status &= ~VPFE_VDINT0;
+ vpfe_int_status |= VPFE_VDINT0;
+ break;
+
+ /* VD1 interrupt */
+ case VPFE_VDINT1:
+ vpfe_int_status &= ~VPFE_VDINT1;
+ vpfe_int_status |= VPFE_VDINT1;
+ break;
+
+ /* VD2 interrupt */
+ case VPFE_VDINT2:
+ vpfe_int_status &= ~VPFE_VDINT2;
+ vpfe_int_status |= VPFE_VDINT2;
+ break;
+
+ /* Clear all interrupts */
+ default:
+ vpfe_int_status &= ~(VPFE_VDINT0 |
+ VPFE_VDINT1 |
+ VPFE_VDINT2);
+ vpfe_int_status |= (VPFE_VDINT0 |
+ VPFE_VDINT1 |
+ VPFE_VDINT2);
+ break;
+ }
+ /* Clear specific VDINT from the status register */
+ vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS);
+
+ vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
+
+ /* Acknowledge that we are done with all interrupts */
+ vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI);
+}
+
+static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc)
+{
+ ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER;
+
+ ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
+ ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED;
+ ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED;
+
+ ccdc->ccdc_cfg.ycbcr.win.left = 0;
+ ccdc->ccdc_cfg.ycbcr.win.top = 0;
+ ccdc->ccdc_cfg.ycbcr.win.width = 720;
+ ccdc->ccdc_cfg.ycbcr.win.height = 576;
+ ccdc->ccdc_cfg.ycbcr.bt656_enable = 1;
+
+ ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+ ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
+
+ ccdc->ccdc_cfg.bayer.win.left = 0;
+ ccdc->ccdc_cfg.bayer.win.top = 0;
+ ccdc->ccdc_cfg.bayer.win.width = 800;
+ ccdc->ccdc_cfg.bayer.win.height = 600;
+ ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS;
+ ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd =
+ VPFE_CCDC_GAMMA_BITS_09_0;
+}
+
+/*
+ * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
+ */
+static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
+ struct v4l2_format *f)
+{
+ struct v4l2_rect image_win;
+ enum ccdc_buftype buf_type;
+ enum ccdc_frmfmt frm_fmt;
+
+ memset(f, 0, sizeof(*f));
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
+ f->fmt.pix.width = image_win.width;
+ f->fmt.pix.height = image_win.height;
+ f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
+ buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc);
+ f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc);
+ frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
+
+ if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) {
+ f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+ } else {
+ vpfe_err(vpfe, "Invalid buf_type\n");
+ return -EINVAL;
+ }
+ } else {
+ vpfe_err(vpfe, "Invalid frm_fmt\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
+{
+ enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ int ret = 0;
+
+ vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
+
+ vpfe_dbg(1, vpfe, "pixelformat: %s\n",
+ print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
+
+ if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc,
+ vpfe->fmt.fmt.pix.pixelformat) < 0) {
+ vpfe_err(vpfe, "couldn't set pix format in ccdc\n");
+ return -EINVAL;
+ }
+
+ /* configure the image window */
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
+
+ switch (vpfe->fmt.fmt.pix.field) {
+ case V4L2_FIELD_INTERLACED:
+ /* do nothing, since it is default */
+ ret = vpfe_ccdc_set_buftype(
+ &vpfe->ccdc,
+ CCDC_BUFTYPE_FLD_INTERLEAVED);
+ break;
+
+ case V4L2_FIELD_NONE:
+ frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+ /* buffer type only applicable for interlaced scan */
+ break;
+
+ case V4L2_FIELD_SEQ_TB:
+ ret = vpfe_ccdc_set_buftype(
+ &vpfe->ccdc,
+ CCDC_BUFTYPE_FLD_SEPARATED);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt);
+}
+
+/*
+ * vpfe_config_image_format()
+ * For a given standard, this functions sets up the default
+ * pix format & crop values in the vpfe device and ccdc. It first
+ * starts with defaults based values from the standard table.
+ * It then checks if sub device supports get_fmt and then override the
+ * values based on that.Sets crop values to match with scan resolution
+ * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
+ * values in ccdc
+ */
+static int vpfe_config_image_format(struct vpfe_device *vpfe,
+ v4l2_std_id std_id)
+{
+ struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
+ if (vpfe_standards[i].std_id & std_id) {
+ vpfe->std_info.active_pixels =
+ vpfe_standards[i].width;
+ vpfe->std_info.active_lines =
+ vpfe_standards[i].height;
+ vpfe->std_info.frame_format =
+ vpfe_standards[i].frame_format;
+ vpfe->std_index = i;
+
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(vpfe_standards)) {
+ vpfe_err(vpfe, "standard not supported\n");
+ return -EINVAL;
+ }
+
+ vpfe->crop.top = vpfe->crop.left = 0;
+ vpfe->crop.width = vpfe->std_info.active_pixels;
+ vpfe->crop.height = vpfe->std_info.active_lines;
+ pix->width = vpfe->crop.width;
+ pix->height = vpfe->crop.height;
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+
+ /* first field and frame format based on standard frame format */
+ if (vpfe->std_info.frame_format)
+ pix->field = V4L2_FIELD_INTERLACED;
+ else
+ pix->field = V4L2_FIELD_NONE;
+
+ ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
+ if (ret)
+ return ret;
+
+ /* Update the crop window based on found values */
+ vpfe->crop.width = pix->width;
+ vpfe->crop.height = pix->height;
+
+ return vpfe_config_ccdc_image_format(vpfe);
+}
+
+static int vpfe_initialize_device(struct vpfe_device *vpfe)
+{
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ sdinfo = &vpfe->cfg->sub_devs[0];
+ sdinfo->sd = vpfe->sd[0];
+ vpfe->current_input = 0;
+ vpfe->std_index = 0;
+ /* Configure the default format information */
+ ret = vpfe_config_image_format(vpfe,
+ vpfe_standards[vpfe->std_index].std_id);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(vpfe->pdev);
+
+ vpfe_config_enable(&vpfe->ccdc, 1);
+
+ vpfe_ccdc_restore_defaults(&vpfe->ccdc);
+
+ /* Clear all VPFE interrupts */
+ vpfe_clear_intr(&vpfe->ccdc, -1);
+
+ return ret;
+}
+
+/*
+ * vpfe_release : This function is based on the vb2_fop_release
+ * helper function.
+ * It has been augmented to handle module power management,
+ * by disabling/enabling h/w module fcntl clock when necessary.
+ */
+static int vpfe_release(struct file *file)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&vpfe->lock);
+
+ /* Save the singular status before we call the clean-up helper */
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ /* the release helper will cleanup any on-going streaming */
+ ret = _vb2_fop_release(file, NULL);
+
+ /*
+ * If this was the last open file.
+ * Then de-initialize hw module.
+ */
+ if (fh_singular)
+ vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
+
+ mutex_unlock(&vpfe->lock);
+
+ return ret;
+}
+
+/*
+ * vpfe_open : This function is based on the v4l2_fh_open helper function.
+ * It has been augmented to handle module power management,
+ * by disabling/enabling h/w module fcntl clock when necessary.
+ */
+static int vpfe_open(struct file *file)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&vpfe->lock);
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ vpfe_err(vpfe, "v4l2_fh_open failed\n");
+ goto unlock;
+ }
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ if (vpfe_initialize_device(vpfe)) {
+ v4l2_fh_release(file);
+ ret = -ENODEV;
+ }
+
+unlock:
+ mutex_unlock(&vpfe->lock);
+ return ret;
+}
+
+/**
+ * vpfe_schedule_next_buffer: set next buffer address for capture
+ * @vpfe : ptr to vpfe device
+ *
+ * This function will get next buffer from the dma queue and
+ * set the buffer address in the vpfe register for capture.
+ * the buffer is marked active
+ *
+ * Assumes caller is holding vpfe->dma_queue_lock already
+ */
+static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
+{
+ vpfe->next_frm = list_entry(vpfe->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ list_del(&vpfe->next_frm->list);
+
+ vpfe_set_sdr_addr(&vpfe->ccdc,
+ vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
+}
+
+static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
+{
+ unsigned long addr;
+
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
+ vpfe->field_off;
+
+ vpfe_set_sdr_addr(&vpfe->ccdc, addr);
+}
+
+/*
+ * vpfe_process_buffer_complete: process a completed buffer
+ * @vpfe : ptr to vpfe device
+ *
+ * This function time stamp the buffer and mark it as DONE. It also
+ * wake up any process waiting on the QUEUE and set the next buffer
+ * as current
+ */
+static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
+{
+ vpfe->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
+ vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field;
+ vpfe->cur_frm->vb.sequence = vpfe->sequence++;
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ vpfe->cur_frm = vpfe->next_frm;
+}
+
+/*
+ * vpfe_isr : ISR handler for vpfe capture (VINT0)
+ * @irq: irq number
+ * @dev_id: dev_id ptr
+ *
+ * It changes status of the captured buffer, takes next buffer from the queue
+ * and sets its address in VPFE registers
+ */
+static irqreturn_t vpfe_isr(int irq, void *dev)
+{
+ struct vpfe_device *vpfe = (struct vpfe_device *)dev;
+ enum v4l2_field field;
+ int intr_status;
+ int fid;
+
+ intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
+
+ if (intr_status & VPFE_VDINT0) {
+ field = vpfe->fmt.fmt.pix.field;
+
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+ goto next_intr;
+ }
+
+ /* interlaced or TB capture check which field
+ we are in hardware */
+ fid = vpfe_ccdc_getfid(&vpfe->ccdc);
+
+ /* switch the software maintained field id */
+ vpfe->field ^= 1;
+ if (fid == vpfe->field) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the
+ * next frame is available, release the
+ * current frame and move on
+ */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+ /*
+ * based on whether the two fields are stored
+ * interleave or separately in memory,
+ * reconfigure the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_schedule_bottom_field(vpfe);
+
+ goto next_intr;
+ }
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ spin_lock(&vpfe->dma_queue_lock);
+ if (!list_empty(&vpfe->dma_queue) &&
+ vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ spin_unlock(&vpfe->dma_queue_lock);
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe->field = fid;
+ }
+ }
+
+next_intr:
+ if (intr_status & VPFE_VDINT1) {
+ spin_lock(&vpfe->dma_queue_lock);
+ if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
+ !list_empty(&vpfe->dma_queue) &&
+ vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ spin_unlock(&vpfe->dma_queue_lock);
+ }
+
+ vpfe_clear_intr(&vpfe->ccdc, intr_status);
+
+ return IRQ_HANDLED;
+}
+
+static inline void vpfe_detach_irq(struct vpfe_device *vpfe)
+{
+ unsigned int intr = VPFE_VDINT0;
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+ intr |= VPFE_VDINT1;
+
+ vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR);
+}
+
+static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
+{
+ unsigned int intr = VPFE_VDINT0;
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+ intr |= VPFE_VDINT1;
+
+ vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET);
+}
+
+static int vpfe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_querycap\n");
+
+ strlcpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", vpfe->v4l2_dev.name);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+/* get the format set at output pad of the adjacent subdev */
+static int __vpfe_get_format(struct vpfe_device *vpfe,
+ struct v4l2_format *format, unsigned int *bpp)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct vpfe_subdev_info *sdinfo;
+ struct v4l2_subdev_format fmt;
+ int ret;
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = 0;
+
+ ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ if (!ret) {
+ v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
+ mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+ } else {
+ ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
+ sdinfo->grp_id,
+ pad, get_fmt,
+ NULL, &fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+ v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
+ mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
+ }
+
+ format->type = vpfe->fmt.type;
+
+ vpfe_dbg(1, vpfe,
+ "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
+ __func__, format->fmt.pix.width, format->fmt.pix.height,
+ print_fourcc(format->fmt.pix.pixelformat),
+ format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+
+ return 0;
+}
+
+/* set the format at output pad of the adjacent subdev */
+static int __vpfe_set_format(struct vpfe_device *vpfe,
+ struct v4l2_format *format, unsigned int *bpp)
+{
+ struct vpfe_subdev_info *sdinfo;
+ struct v4l2_subdev_format fmt;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = 0;
+
+ pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
+
+ ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
+ mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+
+ format->type = vpfe->fmt.type;
+
+ vpfe_dbg(1, vpfe,
+ "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
+ __func__, format->fmt.pix.width, format->fmt.pix.height,
+ print_fourcc(format->fmt.pix.pixelformat),
+ format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+
+ return 0;
+}
+
+static int vpfe_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
+
+ *fmt = vpfe->fmt;
+
+ return 0;
+}
+
+static int vpfe_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_fmt *fmt = NULL;
+ unsigned int k;
+
+ vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
+ f->index);
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ if (f->index > ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ if (formats[k].index == f->index) {
+ fmt = &formats[k];
+ break;
+ }
+ }
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ f->type = vpfe->fmt.type;
+
+ vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s [%s]\n",
+ f->index, fmt->code, print_fourcc(fmt->fourcc), fmt->name);
+
+ return 0;
+}
+
+static int vpfe_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ unsigned int bpp;
+
+ vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
+
+ return __vpfe_get_format(vpfe, fmt, &bpp);
+}
+
+static int vpfe_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct v4l2_format format;
+ unsigned int bpp;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = __vpfe_get_format(vpfe, &format, &bpp);
+ if (ret)
+ return ret;
+
+
+ if (!cmp_v4l2_format(fmt, &format)) {
+ /* Sensor format is different from the requested format
+ * so we need to change it
+ */
+ ret = __vpfe_set_format(vpfe, fmt, &bpp);
+ if (ret)
+ return ret;
+ } else /* Just make sure all of the fields are consistent */
+ *fmt = format;
+
+ /* First detach any IRQ if currently attached */
+ vpfe_detach_irq(vpfe);
+ vpfe->fmt = *fmt;
+ vpfe->bpp = bpp;
+
+ /* Update the crop window based on found values */
+ vpfe->crop.width = fmt->fmt.pix.width;
+ vpfe->crop.height = fmt->fmt.pix.height;
+
+ /* set image capture parameters in the ccdc */
+ return vpfe_config_ccdc_image_format(vpfe);
+}
+
+static int vpfe_enum_size(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct v4l2_subdev_frame_size_enum fse;
+ struct vpfe_subdev_info *sdinfo;
+ struct v4l2_mbus_framefmt mbus;
+ struct v4l2_pix_format pix;
+ struct vpfe_fmt *fmt;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
+
+ /* check for valid format */
+ fmt = find_format_by_pix(fsize->pixel_format);
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ memset(&pix, 0x0, sizeof(pix));
+ /* Construct pix from parameter and use default for the rest */
+ pix.pixelformat = fsize->pixel_format;
+ pix.width = 640;
+ pix.height = 480;
+ pix.colorspace = V4L2_COLORSPACE_SRGB;
+ pix.field = V4L2_FIELD_NONE;
+ pix_to_mbus(vpfe, &pix, &mbus);
+
+ memset(&fse, 0x0, sizeof(fse));
+ fse.index = fsize->index;
+ fse.pad = 0;
+ fse.code = mbus.code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return -EINVAL;
+
+ vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
+ fsize->index, print_fourcc(fsize->pixel_format),
+ fsize->discrete.width, fsize->discrete.height);
+
+ return 0;
+}
+
+/*
+ * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
+ * given app input index
+ */
+static int
+vpfe_get_subdev_input_index(struct vpfe_device *vpfe,
+ int *subdev_index,
+ int *subdev_input_index,
+ int app_input_index)
+{
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
+ if (app_input_index < (j + 1)) {
+ *subdev_index = i;
+ *subdev_input_index = app_input_index - j;
+ return 0;
+ }
+ j++;
+ }
+ return -EINVAL;
+}
+
+/*
+ * vpfe_get_app_input - Get app input index for a given subdev input index
+ * driver stores the input index of the current sub device and translate it
+ * when application request the current input
+ */
+static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
+ int *app_input_index)
+{
+ struct vpfe_config *cfg = vpfe->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ struct i2c_client *client;
+ struct i2c_client *curr_client;
+ int i, j = 0;
+
+ curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd);
+ for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
+ sdinfo = &cfg->sub_devs[i];
+ client = v4l2_get_subdevdata(sdinfo->sd);
+ if (client->addr == curr_client->addr &&
+ client->adapter->nr == curr_client->adapter->nr) {
+ if (vpfe->current_input >= 1)
+ return -1;
+ *app_input_index = j + vpfe->current_input;
+ return 0;
+ }
+ j++;
+ }
+ return -EINVAL;
+}
+
+static int vpfe_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int subdev, index;
+
+ vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
+
+ if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
+ inp->index) < 0) {
+ vpfe_dbg(1, vpfe,
+ "input information not found for the subdev\n");
+ return -EINVAL;
+ }
+ sdinfo = &vpfe->cfg->sub_devs[subdev];
+ *inp = sdinfo->inputs[index];
+
+ return 0;
+}
+
+static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_g_input\n");
+
+ return vpfe_get_app_input_index(vpfe, index);
+}
+
+/* Assumes caller is holding vpfe_dev->lock */
+static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
+{
+ int subdev_index = 0, inp_index = 0;
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_route *route;
+ u32 input, output;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+ ret = vpfe_get_subdev_input_index(vpfe,
+ &subdev_index,
+ &inp_index,
+ index);
+ if (ret < 0) {
+ vpfe_err(vpfe, "invalid input index: %d\n", index);
+ goto get_out;
+ }
+
+ sdinfo = &vpfe->cfg->sub_devs[subdev_index];
+ sdinfo->sd = vpfe->sd[subdev_index];
+ route = &sdinfo->routes[inp_index];
+ if (route && sdinfo->can_route) {
+ input = route->input;
+ output = route->output;
+ if (sdinfo->sd) {
+ ret = v4l2_subdev_call(sdinfo->sd, video,
+ s_routing, input, output, 0);
+ if (ret) {
+ vpfe_err(vpfe, "s_routing failed\n");
+ ret = -EINVAL;
+ goto get_out;
+ }
+ }
+
+ }
+
+ vpfe->current_subdev = sdinfo;
+ if (sdinfo->sd)
+ vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler;
+ vpfe->current_input = index;
+ vpfe->std_index = 0;
+
+ /* set the bus/interface parameter for the sub device in ccdc */
+ ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param);
+ if (ret)
+ return ret;
+
+ /* set the default image parameters in the device */
+ return vpfe_config_image_format(vpfe,
+ vpfe_standards[vpfe->std_index].std_id);
+
+get_out:
+ return ret;
+}
+
+static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe,
+ "vpfe_s_input: index: %d\n", index);
+
+ return vpfe_set_input(vpfe, index);
+}
+
+static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+
+ vpfe_dbg(2, vpfe, "vpfe_querystd\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
+
+ /* Call querystd function of decoder device */
+ return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
+ video, querystd, std_id);
+}
+
+static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_s_std\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
+
+ /* if trying to set the same std then nothing to do */
+ if (vpfe_standards[vpfe->std_index].std_id == std_id)
+ return 0;
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ ret = -EBUSY;
+ return ret;
+ }
+
+ ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
+ video, s_std, std_id);
+ if (ret < 0) {
+ vpfe_err(vpfe, "Failed to set standard\n");
+ return ret;
+ }
+ ret = vpfe_config_image_format(vpfe, std_id);
+
+ return ret;
+}
+
+static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+
+ vpfe_dbg(2, vpfe, "vpfe_g_std\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
+ return -ENODATA;
+
+ *std_id = vpfe_standards[vpfe->std_index].std_id;
+
+ return 0;
+}
+
+/*
+ * vpfe_calculate_offsets : This function calculates buffers offset
+ * for top and bottom field
+ */
+static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
+{
+ struct v4l2_rect image_win;
+
+ vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
+
+ vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
+ vpfe->field_off = image_win.height * image_win.width;
+}
+
+/*
+ * vpfe_queue_setup - Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes[]: contains the size (in bytes) of each plane.
+ * @alloc_devs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer count and buffer size
+ */
+static int vpfe_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
+ unsigned size = vpfe->fmt.fmt.pix.sizeimage;
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ vpfe_dbg(1, vpfe,
+ "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]);
+
+ /* Calculate field offset */
+ vpfe_calculate_offsets(vpfe);
+
+ return 0;
+}
+
+/*
+ * vpfe_buffer_prepare : callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into physical address
+ */
+static int vpfe_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
+
+ vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
+
+ if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ vbuf->field = vpfe->fmt.fmt.pix.field;
+
+ return 0;
+}
+
+/*
+ * vpfe_buffer_queue : Callback function to add buffer to DMA queue
+ * @vb: ptr to vb2_buffer
+ */
+static void vpfe_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf);
+ unsigned long flags = 0;
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ list_add_tail(&buf->list, &vpfe->dma_queue);
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
+/*
+ * vpfe_start_streaming : Starts the DMA engine for streaming
+ * @vb: ptr to vb2_buffer
+ * @count: number of buffers
+ */
+static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
+ struct vpfe_cap_buffer *buf, *tmp;
+ struct vpfe_subdev_info *sdinfo;
+ unsigned long flags;
+ unsigned long addr;
+ int ret;
+
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+
+ vpfe->field = 0;
+ vpfe->sequence = 0;
+
+ sdinfo = vpfe->current_subdev;
+
+ vpfe_attach_irq(vpfe);
+
+ if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ vpfe_ccdc_config_raw(&vpfe->ccdc);
+ else
+ vpfe_ccdc_config_ycbcr(&vpfe->ccdc);
+
+ /* Get the next frame from the buffer queue */
+ vpfe->next_frm = list_entry(vpfe->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ vpfe->cur_frm = vpfe->next_frm;
+ /* Remove buffer from the buffer queue */
+ list_del(&vpfe->cur_frm->list);
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0);
+
+ vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
+
+ vpfe_pcr_enable(&vpfe->ccdc, 1);
+
+ ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1);
+ if (ret < 0) {
+ vpfe_err(vpfe, "Error in attaching interrupt handle\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+
+ return ret;
+}
+
+/*
+ * vpfe_stop_streaming : Stop the DMA engine
+ * @vq: ptr to vb2_queue
+ *
+ * This callback stops the DMA engine and any remaining buffers
+ * in the DMA queue are released.
+ */
+static void vpfe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
+ struct vpfe_subdev_info *sdinfo;
+ unsigned long flags;
+ int ret;
+
+ vpfe_pcr_enable(&vpfe->ccdc, 0);
+
+ vpfe_detach_irq(vpfe);
+
+ sdinfo = vpfe->current_subdev;
+ ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
+
+ /* release all active buffers */
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ if (vpfe->cur_frm == vpfe->next_frm) {
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ } else {
+ if (vpfe->cur_frm != NULL)
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ if (vpfe->next_frm != NULL)
+ vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&vpfe->dma_queue)) {
+ vpfe->next_frm = list_entry(vpfe->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ list_del(&vpfe->next_frm->list);
+ vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
+static int vpfe_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *crop)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_cropcap\n");
+
+ if (vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
+ return -EINVAL;
+
+ memset(crop, 0, sizeof(struct v4l2_cropcap));
+
+ crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ crop->defrect.width = vpfe_standards[vpfe->std_index].width;
+ crop->bounds.width = crop->defrect.width;
+ crop->defrect.height = vpfe_standards[vpfe->std_index].height;
+ crop->bounds.height = crop->defrect.height;
+ crop->pixelaspect = vpfe_standards[vpfe->std_index].pixelaspect;
+
+ return 0;
+}
+
+static int
+vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = s->r.top = 0;
+ s->r.width = vpfe->crop.width;
+ s->r.height = vpfe->crop.height;
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ s->r = vpfe->crop;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int
+vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct v4l2_rect cr = vpfe->crop;
+ struct v4l2_rect r = s->r;
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ v4l_bound_align_image(&r.width, 0, cr.width, 0,
+ &r.height, 0, cr.height, 0, 0);
+
+ r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width);
+ r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height);
+
+ if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&r, &s->r))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &r))
+ return -ERANGE;
+
+ s->r = vpfe->crop = r;
+
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
+ vpfe->fmt.fmt.pix.width = r.width;
+ vpfe->fmt.fmt.pix.height = r.height;
+ vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
+ vpfe->fmt.fmt.pix.height;
+
+ vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
+ r.left, r.top, r.width, r.height, cr.width, cr.height);
+
+ return 0;
+}
+
+static long vpfe_ioctl_default(struct file *file, void *priv,
+ bool valid_prio, unsigned int cmd, void *param)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
+
+ if (!valid_prio) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ switch (cmd) {
+ case VIDIOC_AM437X_CCDC_CFG:
+ ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param);
+ if (ret) {
+ vpfe_dbg(2, vpfe,
+ "Error setting parameters in CCDC\n");
+ return ret;
+ }
+ ret = vpfe_get_ccdc_image_format(vpfe,
+ &vpfe->fmt);
+ if (ret < 0) {
+ vpfe_dbg(2, vpfe,
+ "Invalid image format at CCDC\n");
+ return ret;
+ }
+ break;
+
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct vb2_ops vpfe_video_qops = {
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .queue_setup = vpfe_queue_setup,
+ .buf_prepare = vpfe_buffer_prepare,
+ .buf_queue = vpfe_buffer_queue,
+ .start_streaming = vpfe_start_streaming,
+ .stop_streaming = vpfe_stop_streaming,
+};
+
+/* vpfe capture driver file operations */
+static const struct v4l2_file_operations vpfe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpfe_open,
+ .release = vpfe_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/* vpfe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
+ .vidioc_querycap = vpfe_querycap,
+ .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
+ .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
+ .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
+
+ .vidioc_enum_framesizes = vpfe_enum_size,
+
+ .vidioc_enum_input = vpfe_enum_input,
+ .vidioc_g_input = vpfe_g_input,
+ .vidioc_s_input = vpfe_s_input,
+
+ .vidioc_querystd = vpfe_querystd,
+ .vidioc_s_std = vpfe_s_std,
+ .vidioc_g_std = vpfe_g_std,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_cropcap = vpfe_cropcap,
+ .vidioc_g_selection = vpfe_g_selection,
+ .vidioc_s_selection = vpfe_s_selection,
+
+ .vidioc_default = vpfe_ioctl_default,
+};
+
+static int
+vpfe_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
+ struct vpfe_device, v4l2_dev);
+ struct v4l2_subdev_mbus_code_enum mbus_code;
+ struct vpfe_subdev_info *sdinfo;
+ bool found = false;
+ int i, j;
+
+ vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
+
+ for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
+ if (vpfe->cfg->asd[i]->match.fwnode ==
+ asd[i].match.fwnode) {
+ sdinfo = &vpfe->cfg->sub_devs[i];
+ vpfe->sd[i] = subdev;
+ vpfe->sd[i]->grp_id = sdinfo->grp_id;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name);
+ return -EINVAL;
+ }
+
+ vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
+
+ /* setup the supported formats & indexes */
+ for (j = 0, i = 0; ; ++j) {
+ struct vpfe_fmt *fmt;
+ int ret;
+
+ memset(&mbus_code, 0, sizeof(mbus_code));
+ mbus_code.index = j;
+ mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code);
+ if (ret)
+ break;
+
+ fmt = find_format_by_code(mbus_code.code);
+ if (!fmt)
+ continue;
+
+ fmt->supported = true;
+ fmt->index = i++;
+ }
+
+ return 0;
+}
+
+static int vpfe_probe_complete(struct vpfe_device *vpfe)
+{
+ struct video_device *vdev;
+ struct vb2_queue *q;
+ int err;
+
+ spin_lock_init(&vpfe->dma_queue_lock);
+ mutex_init(&vpfe->lock);
+
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ /* set first sub device as current one */
+ vpfe->current_subdev = &vpfe->cfg->sub_devs[0];
+ vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler;
+
+ err = vpfe_set_input(vpfe, 0);
+ if (err)
+ goto probe_out;
+
+ /* Initialize videobuf2 queue as per the buffer type */
+ q = &vpfe->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = vpfe;
+ q->ops = &vpfe_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &vpfe->lock;
+ q->min_buffers_needed = 1;
+ q->dev = vpfe->pdev;
+
+ err = vb2_queue_init(q);
+ if (err) {
+ vpfe_err(vpfe, "vb2_queue_init() failed\n");
+ goto probe_out;
+ }
+
+ INIT_LIST_HEAD(&vpfe->dma_queue);
+
+ vdev = &vpfe->video_dev;
+ strlcpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
+ vdev->release = video_device_release_empty;
+ vdev->fops = &vpfe_fops;
+ vdev->ioctl_ops = &vpfe_ioctl_ops;
+ vdev->v4l2_dev = &vpfe->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = q;
+ vdev->lock = &vpfe->lock;
+ video_set_drvdata(vdev, vpfe);
+ err = video_register_device(&vpfe->video_dev, VFL_TYPE_GRABBER, -1);
+ if (err) {
+ vpfe_err(vpfe,
+ "Unable to register video device.\n");
+ goto probe_out;
+ }
+
+ return 0;
+
+probe_out:
+ v4l2_device_unregister(&vpfe->v4l2_dev);
+ return err;
+}
+
+static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
+ struct vpfe_device, v4l2_dev);
+
+ return vpfe_probe_complete(vpfe);
+}
+
+static const struct v4l2_async_notifier_operations vpfe_async_ops = {
+ .bound = vpfe_async_bound,
+ .complete = vpfe_async_complete,
+};
+
+static struct vpfe_config *
+vpfe_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *endpoint = NULL;
+ struct v4l2_fwnode_endpoint bus_cfg;
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_config *pdata;
+ unsigned int flags;
+ unsigned int i;
+ int err;
+
+ dev_dbg(&pdev->dev, "vpfe_get_pdata\n");
+
+ if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+ return pdev->dev.platform_data;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ for (i = 0; ; i++) {
+ struct device_node *rem;
+
+ endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
+ endpoint);
+ if (!endpoint)
+ break;
+
+ sdinfo = &pdata->sub_devs[i];
+ sdinfo->grp_id = 0;
+
+ /* we only support camera */
+ sdinfo->inputs[0].index = i;
+ strcpy(sdinfo->inputs[0].name, "Camera");
+ sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA;
+ sdinfo->inputs[0].std = V4L2_STD_ALL;
+ sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD;
+
+ sdinfo->can_route = 0;
+ sdinfo->routes = NULL;
+
+ of_property_read_u32(endpoint, "ti,am437x-vpfe-interface",
+ &sdinfo->vpfe_param.if_type);
+ if (sdinfo->vpfe_param.if_type < 0 ||
+ sdinfo->vpfe_param.if_type > 4) {
+ sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
+ }
+
+ err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &bus_cfg);
+ if (err) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ goto done;
+ }
+
+ sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width;
+
+ if (sdinfo->vpfe_param.bus_width < 8 ||
+ sdinfo->vpfe_param.bus_width > 16) {
+ dev_err(&pdev->dev, "Invalid bus width.\n");
+ goto done;
+ }
+
+ flags = bus_cfg.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ sdinfo->vpfe_param.hdpol = 1;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ sdinfo->vpfe_param.vdpol = 1;
+
+ rem = of_graph_get_remote_port_parent(endpoint);
+ if (!rem) {
+ dev_err(&pdev->dev, "Remote device at %pOF not found\n",
+ endpoint);
+ goto done;
+ }
+
+ pdata->asd[i] = devm_kzalloc(&pdev->dev,
+ sizeof(struct v4l2_async_subdev),
+ GFP_KERNEL);
+ if (!pdata->asd[i]) {
+ of_node_put(rem);
+ pdata = NULL;
+ goto done;
+ }
+
+ pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ pdata->asd[i]->match.fwnode = of_fwnode_handle(rem);
+ of_node_put(rem);
+ }
+
+ of_node_put(endpoint);
+ return pdata;
+
+done:
+ of_node_put(endpoint);
+ return NULL;
+}
+
+/*
+ * vpfe_probe : This function creates device entries by register
+ * itself to the V4L2 driver and initializes fields of each
+ * device objects
+ */
+static int vpfe_probe(struct platform_device *pdev)
+{
+ struct vpfe_config *vpfe_cfg = vpfe_get_pdata(pdev);
+ struct vpfe_device *vpfe;
+ struct vpfe_ccdc *ccdc;
+ struct resource *res;
+ int ret;
+
+ if (!vpfe_cfg) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
+ if (!vpfe)
+ return -ENOMEM;
+
+ vpfe->pdev = &pdev->dev;
+ vpfe->cfg = vpfe_cfg;
+ ccdc = &vpfe->ccdc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ccdc->ccdc_cfg.base_addr))
+ return PTR_ERR(ccdc->ccdc_cfg.base_addr);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return -ENODEV;
+ }
+ vpfe->irq = ret;
+
+ ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
+ "vpfe_capture0", vpfe);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request interrupt\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
+ if (ret) {
+ vpfe_err(vpfe,
+ "Unable to register v4l2 device.\n");
+ return ret;
+ }
+
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpfe);
+ /* Enabling module functional clock */
+ pm_runtime_enable(&pdev->dev);
+
+ /* for now just enable it here instead of waiting for the open */
+ pm_runtime_get_sync(&pdev->dev);
+
+ vpfe_ccdc_config_defaults(ccdc);
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ vpfe->sd = devm_kcalloc(&pdev->dev,
+ ARRAY_SIZE(vpfe->cfg->asd),
+ sizeof(struct v4l2_subdev *),
+ GFP_KERNEL);
+ if (!vpfe->sd) {
+ ret = -ENOMEM;
+ goto probe_out_v4l2_unregister;
+ }
+
+ vpfe->notifier.subdevs = vpfe->cfg->asd;
+ vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
+ vpfe->notifier.ops = &vpfe_async_ops;
+ ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
+ &vpfe->notifier);
+ if (ret) {
+ vpfe_err(vpfe, "Error registering async notifier\n");
+ ret = -EINVAL;
+ goto probe_out_v4l2_unregister;
+ }
+
+ return 0;
+
+probe_out_v4l2_unregister:
+ v4l2_device_unregister(&vpfe->v4l2_dev);
+ return ret;
+}
+
+/*
+ * vpfe_remove : It un-register device from V4L2 driver
+ */
+static int vpfe_remove(struct platform_device *pdev)
+{
+ struct vpfe_device *vpfe = platform_get_drvdata(pdev);
+
+ vpfe_dbg(2, vpfe, "vpfe_remove\n");
+
+ pm_runtime_disable(&pdev->dev);
+
+ v4l2_async_notifier_unregister(&vpfe->notifier);
+ v4l2_device_unregister(&vpfe->v4l2_dev);
+ video_unregister_device(&vpfe->video_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static void vpfe_save_context(struct vpfe_ccdc *ccdc)
+{
+ ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR);
+ ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE);
+ ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST);
+ ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR);
+ ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP);
+ ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB);
+ ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN);
+ ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP);
+ ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT);
+ ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW);
+ ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF);
+ ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG);
+ ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING);
+ ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc,
+ VPFE_HD_VD_WID);
+ ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc,
+ VPFE_PIX_LINES);
+ ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc,
+ VPFE_HORZ_INFO);
+ ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc,
+ VPFE_VERT_START);
+ ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc,
+ VPFE_VERT_LINES);
+ ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc,
+ VPFE_HSIZE_OFF);
+}
+
+static int vpfe_suspend(struct device *dev)
+{
+ struct vpfe_device *vpfe = dev_get_drvdata(dev);
+ struct vpfe_ccdc *ccdc = &vpfe->ccdc;
+
+ /* if streaming has not started we don't care */
+ if (!vb2_start_streaming_called(&vpfe->buffer_queue))
+ return 0;
+
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
+
+ /* Save VPFE context */
+ vpfe_save_context(ccdc);
+
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+ vpfe_config_enable(ccdc, 0);
+
+ /* Disable both master and slave clock */
+ pm_runtime_put_sync(dev);
+
+ /* Select sleep pin state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static void vpfe_restore_context(struct vpfe_ccdc *ccdc)
+{
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2],
+ VPFE_HD_VD_WID);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2],
+ VPFE_PIX_LINES);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2],
+ VPFE_HORZ_INFO);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2],
+ VPFE_VERT_START);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2],
+ VPFE_VERT_LINES);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2],
+ VPFE_HSIZE_OFF);
+}
+
+static int vpfe_resume(struct device *dev)
+{
+ struct vpfe_device *vpfe = dev_get_drvdata(dev);
+ struct vpfe_ccdc *ccdc = &vpfe->ccdc;
+
+ /* if streaming has not started we don't care */
+ if (!vb2_start_streaming_called(&vpfe->buffer_queue))
+ return 0;
+
+ /* Enable both master and slave clock */
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
+
+ /* Restore VPFE context */
+ vpfe_restore_context(ccdc);
+
+ vpfe_config_enable(ccdc, 0);
+ pm_runtime_put_sync(dev);
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume);
+
+static const struct of_device_id vpfe_of_match[] = {
+ { .compatible = "ti,am437x-vpfe", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, vpfe_of_match);
+
+static struct platform_driver vpfe_driver = {
+ .probe = vpfe_probe,
+ .remove = vpfe_remove,
+ .driver = {
+ .name = VPFE_MODULE_NAME,
+ .pm = &vpfe_pm_ops,
+ .of_match_table = of_match_ptr(vpfe_of_match),
+ },
+};
+
+module_platform_driver(vpfe_driver);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("TI AM437x VPFE driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPFE_VERSION);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
new file mode 100644
index 000000000..17d7aa426
--- /dev/null
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef AM437X_VPFE_H
+#define AM437X_VPFE_H
+
+#include <linux/am437x-vpfe.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "am437x-vpfe_regs.h"
+
+enum vpfe_pin_pol {
+ VPFE_PINPOL_POSITIVE = 0,
+ VPFE_PINPOL_NEGATIVE,
+};
+
+enum vpfe_hw_if_type {
+ /* Raw Bayer */
+ VPFE_RAW_BAYER = 0,
+ /* BT656 - 8 bit */
+ VPFE_BT656,
+ /* BT656 - 10 bit */
+ VPFE_BT656_10BIT,
+ /* YCbCr - 8 bit with external sync */
+ VPFE_YCBCR_SYNC_8,
+ /* YCbCr - 16 bit with external sync */
+ VPFE_YCBCR_SYNC_16,
+};
+
+/* interface description */
+struct vpfe_hw_if_param {
+ enum vpfe_hw_if_type if_type;
+ enum vpfe_pin_pol hdpol;
+ enum vpfe_pin_pol vdpol;
+ unsigned int bus_width;
+};
+
+#define VPFE_MAX_SUBDEV 1
+#define VPFE_MAX_INPUTS 1
+
+struct vpfe_pixel_format {
+ struct v4l2_fmtdesc fmtdesc;
+ /* bytes per pixel */
+ int bpp;
+};
+
+struct vpfe_std_info {
+ int active_pixels;
+ int active_lines;
+ /* current frame format */
+ int frame_format;
+};
+
+struct vpfe_route {
+ u32 input;
+ u32 output;
+};
+
+struct vpfe_subdev_info {
+ /* Sub device group id */
+ int grp_id;
+ /* inputs available at the sub device */
+ struct v4l2_input inputs[VPFE_MAX_INPUTS];
+ /* Sub dev routing information for each input */
+ struct vpfe_route *routes;
+ /* check if sub dev supports routing */
+ int can_route;
+ /* ccdc bus/interface configuration */
+ struct vpfe_hw_if_param vpfe_param;
+ struct v4l2_subdev *sd;
+};
+
+struct vpfe_config {
+ /* information about each subdev */
+ struct vpfe_subdev_info sub_devs[VPFE_MAX_SUBDEV];
+ /* Flat array, arranged in groups */
+ struct v4l2_async_subdev *asd[VPFE_MAX_SUBDEV];
+};
+
+struct vpfe_cap_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+enum ccdc_pixfmt {
+ CCDC_PIXFMT_RAW = 0,
+ CCDC_PIXFMT_YCBCR_16BIT,
+ CCDC_PIXFMT_YCBCR_8BIT,
+};
+
+enum ccdc_frmfmt {
+ CCDC_FRMFMT_PROGRESSIVE = 0,
+ CCDC_FRMFMT_INTERLACED,
+};
+
+/* PIXEL ORDER IN MEMORY from LSB to MSB */
+/* only applicable for 8-bit input mode */
+enum ccdc_pixorder {
+ CCDC_PIXORDER_YCBYCR,
+ CCDC_PIXORDER_CBYCRY,
+};
+
+enum ccdc_buftype {
+ CCDC_BUFTYPE_FLD_INTERLEAVED,
+ CCDC_BUFTYPE_FLD_SEPARATED
+};
+
+
+/* returns the highest bit used for the gamma */
+static inline u8 ccdc_gamma_width_max_bit(enum vpfe_ccdc_gamma_width width)
+{
+ return 15 - width;
+}
+
+/* returns the highest bit used for this data size */
+static inline u8 ccdc_data_size_max_bit(enum vpfe_ccdc_data_size sz)
+{
+ return sz == VPFE_CCDC_DATA_8BITS ? 7 : 15 - sz;
+}
+
+/* Structure for CCDC configuration parameters for raw capture mode */
+struct ccdc_params_raw {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ struct v4l2_rect win;
+ /* Current Format Bytes Per Pixels */
+ unsigned int bytesperpixel;
+ /* Current Format Bytes per Lines
+ * (Aligned to 32 bytes) used for HORZ_INFO
+ */
+ unsigned int bytesperline;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+ /*
+ * enable to store the image in inverse
+ * order in memory(bottom to top)
+ */
+ unsigned char image_invert_enable;
+ /* configurable parameters */
+ struct vpfe_ccdc_config_params_raw config_params;
+};
+
+struct ccdc_params_ycbcr {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ struct v4l2_rect win;
+ /* Current Format Bytes Per Pixels */
+ unsigned int bytesperpixel;
+ /* Current Format Bytes per Lines
+ * (Aligned to 32 bytes) used for HORZ_INFO
+ */
+ unsigned int bytesperline;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* enable BT.656 embedded sync mode */
+ int bt656_enable;
+ /* cb:y:cr:y or y:cb:y:cr in memory */
+ enum ccdc_pixorder pix_order;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+};
+
+/*
+ * CCDC operational configuration
+ */
+struct ccdc_config {
+ /* CCDC interface type */
+ enum vpfe_hw_if_type if_type;
+ /* Raw Bayer configuration */
+ struct ccdc_params_raw bayer;
+ /* YCbCr configuration */
+ struct ccdc_params_ycbcr ycbcr;
+ /* ccdc base address */
+ void __iomem *base_addr;
+};
+
+struct vpfe_ccdc {
+ struct ccdc_config ccdc_cfg;
+ u32 ccdc_ctx[VPFE_REG_END / sizeof(u32)];
+};
+
+struct vpfe_device {
+ /* V4l2 specific parameters */
+ /* Identifies video device for this channel */
+ struct video_device video_dev;
+ /* sub devices */
+ struct v4l2_subdev **sd;
+ /* vpfe cfg */
+ struct vpfe_config *cfg;
+ /* V4l2 device */
+ struct v4l2_device v4l2_dev;
+ /* parent device */
+ struct device *pdev;
+ /* subdevice async Notifier */
+ struct v4l2_async_notifier notifier;
+ /* Indicates id of the field which is being displayed */
+ unsigned field;
+ unsigned sequence;
+ /* current interface type */
+ struct vpfe_hw_if_param vpfe_if_params;
+ /* ptr to currently selected sub device */
+ struct vpfe_subdev_info *current_subdev;
+ /* current input at the sub device */
+ int current_input;
+ /* Keeps track of the information about the standard */
+ struct vpfe_std_info std_info;
+ /* std index into std table */
+ int std_index;
+ /* IRQs used when CCDC output to SDRAM */
+ unsigned int irq;
+ /* Pointer pointing to current v4l2_buffer */
+ struct vpfe_cap_buffer *cur_frm;
+ /* Pointer pointing to next v4l2_buffer */
+ struct vpfe_cap_buffer *next_frm;
+ /* Used to store pixel format */
+ struct v4l2_format fmt;
+ /* Used to store current bytes per pixel based on current format */
+ unsigned int bpp;
+ /*
+ * used when IMP is chained to store the crop window which
+ * is different from the image window
+ */
+ struct v4l2_rect crop;
+ /* Buffer queue used in video-buf */
+ struct vb2_queue buffer_queue;
+ /* Queue of filled frames */
+ struct list_head dma_queue;
+ /* IRQ lock for DMA queue */
+ spinlock_t dma_queue_lock;
+ /* lock used to access this structure */
+ struct mutex lock;
+ /*
+ * offset where second field starts from the starting of the
+ * buffer for field separated YCbCr formats
+ */
+ u32 field_off;
+ struct vpfe_ccdc ccdc;
+};
+
+#endif /* AM437X_VPFE_H */
diff --git a/drivers/media/platform/am437x/am437x-vpfe_regs.h b/drivers/media/platform/am437x/am437x-vpfe_regs.h
new file mode 100644
index 000000000..4a0ed2972
--- /dev/null
+++ b/drivers/media/platform/am437x/am437x-vpfe_regs.h
@@ -0,0 +1,140 @@
+/*
+ * TI AM437x Image Sensor Interface Registers
+ *
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef AM437X_VPFE_REGS_H
+#define AM437X_VPFE_REGS_H
+
+/* VPFE module register offset */
+#define VPFE_REVISION 0x0
+#define VPFE_PCR 0x4
+#define VPFE_SYNMODE 0x8
+#define VPFE_HD_VD_WID 0xc
+#define VPFE_PIX_LINES 0x10
+#define VPFE_HORZ_INFO 0x14
+#define VPFE_VERT_START 0x18
+#define VPFE_VERT_LINES 0x1c
+#define VPFE_CULLING 0x20
+#define VPFE_HSIZE_OFF 0x24
+#define VPFE_SDOFST 0x28
+#define VPFE_SDR_ADDR 0x2c
+#define VPFE_CLAMP 0x30
+#define VPFE_DCSUB 0x34
+#define VPFE_COLPTN 0x38
+#define VPFE_BLKCMP 0x3c
+#define VPFE_VDINT 0x48
+#define VPFE_ALAW 0x4c
+#define VPFE_REC656IF 0x50
+#define VPFE_CCDCFG 0x54
+#define VPFE_DMA_CNTL 0x98
+#define VPFE_SYSCONFIG 0x104
+#define VPFE_CONFIG 0x108
+#define VPFE_IRQ_EOI 0x110
+#define VPFE_IRQ_STS_RAW 0x114
+#define VPFE_IRQ_STS 0x118
+#define VPFE_IRQ_EN_SET 0x11c
+#define VPFE_IRQ_EN_CLR 0x120
+#define VPFE_REG_END 0x124
+
+/* Define bit fields within selected registers */
+#define VPFE_FID_POL_MASK 1
+#define VPFE_FID_POL_SHIFT 4
+#define VPFE_HD_POL_MASK 1
+#define VPFE_HD_POL_SHIFT 3
+#define VPFE_VD_POL_MASK 1
+#define VPFE_VD_POL_SHIFT 2
+#define VPFE_HSIZE_OFF_MASK 0xffffffe0
+#define VPFE_32BYTE_ALIGN_VAL 31
+#define VPFE_FRM_FMT_MASK 0x1
+#define VPFE_FRM_FMT_SHIFT 7
+#define VPFE_DATA_SZ_MASK 7
+#define VPFE_DATA_SZ_SHIFT 8
+#define VPFE_PIX_FMT_MASK 3
+#define VPFE_PIX_FMT_SHIFT 12
+#define VPFE_VP2SDR_DISABLE 0xfffbffff
+#define VPFE_WEN_ENABLE (1 << 17)
+#define VPFE_SDR2RSZ_DISABLE 0xfff7ffff
+#define VPFE_VDHDEN_ENABLE (1 << 16)
+#define VPFE_LPF_ENABLE (1 << 14)
+#define VPFE_ALAW_ENABLE (1 << 3)
+#define VPFE_ALAW_GAMMA_WD_MASK 7
+#define VPFE_BLK_CLAMP_ENABLE (1 << 31)
+#define VPFE_BLK_SGAIN_MASK 0x1f
+#define VPFE_BLK_ST_PXL_MASK 0x7fff
+#define VPFE_BLK_ST_PXL_SHIFT 10
+#define VPFE_BLK_SAMPLE_LN_MASK 7
+#define VPFE_BLK_SAMPLE_LN_SHIFT 28
+#define VPFE_BLK_SAMPLE_LINE_MASK 7
+#define VPFE_BLK_SAMPLE_LINE_SHIFT 25
+#define VPFE_BLK_DC_SUB_MASK 0x03fff
+#define VPFE_BLK_COMP_MASK 0xff
+#define VPFE_BLK_COMP_GB_COMP_SHIFT 8
+#define VPFE_BLK_COMP_GR_COMP_SHIFT 16
+#define VPFE_BLK_COMP_R_COMP_SHIFT 24
+#define VPFE_LATCH_ON_VSYNC_DISABLE (1 << 15)
+#define VPFE_DATA_PACK_ENABLE (1 << 11)
+#define VPFE_HORZ_INFO_SPH_SHIFT 16
+#define VPFE_VERT_START_SLV0_SHIFT 16
+#define VPFE_VDINT_VDINT0_SHIFT 16
+#define VPFE_VDINT_VDINT1_MASK 0xffff
+#define VPFE_PPC_RAW 1
+#define VPFE_DCSUB_DEFAULT_VAL 0
+#define VPFE_CLAMP_DEFAULT_VAL 0
+#define VPFE_COLPTN_VAL 0xbb11bb11
+#define VPFE_TWO_BYTES_PER_PIXEL 2
+#define VPFE_INTERLACED_IMAGE_INVERT 0x4b6d
+#define VPFE_INTERLACED_NO_IMAGE_INVERT 0x0249
+#define VPFE_PROGRESSIVE_IMAGE_INVERT 0x4000
+#define VPFE_PROGRESSIVE_NO_IMAGE_INVERT 0
+#define VPFE_INTERLACED_HEIGHT_SHIFT 1
+#define VPFE_SYN_MODE_INPMOD_SHIFT 12
+#define VPFE_SYN_MODE_INPMOD_MASK 3
+#define VPFE_SYN_MODE_8BITS (7 << 8)
+#define VPFE_SYN_MODE_10BITS (6 << 8)
+#define VPFE_SYN_MODE_11BITS (5 << 8)
+#define VPFE_SYN_MODE_12BITS (4 << 8)
+#define VPFE_SYN_MODE_13BITS (3 << 8)
+#define VPFE_SYN_MODE_14BITS (2 << 8)
+#define VPFE_SYN_MODE_15BITS (1 << 8)
+#define VPFE_SYN_MODE_16BITS (0 << 8)
+#define VPFE_SYN_FLDMODE_MASK 1
+#define VPFE_SYN_FLDMODE_SHIFT 7
+#define VPFE_REC656IF_BT656_EN 3
+#define VPFE_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
+#define VPFE_CCDCFG_Y8POS_SHIFT 11
+#define VPFE_CCDCFG_BW656_10BIT (1 << 5)
+#define VPFE_SDOFST_FIELD_INTERLEAVED 0x249
+#define VPFE_NO_CULLING 0xffff00ff
+#define VPFE_VDINT0 (1 << 0)
+#define VPFE_VDINT1 (1 << 1)
+#define VPFE_VDINT2 (1 << 2)
+#define VPFE_DMA_CNTL_OVERFLOW (1 << 31)
+
+#define VPFE_CONFIG_PCLK_INV_SHIFT 0
+#define VPFE_CONFIG_PCLK_INV_MASK 1
+#define VPFE_CONFIG_PCLK_INV_NOT_INV 0
+#define VPFE_CONFIG_PCLK_INV_INV 1
+#define VPFE_CONFIG_EN_SHIFT 1
+#define VPFE_CONFIG_EN_MASK 2
+#define VPFE_CONFIG_EN_DISABLE 0
+#define VPFE_CONFIG_EN_ENABLE 1
+#define VPFE_CONFIG_ST_SHIFT 2
+#define VPFE_CONFIG_ST_MASK 4
+#define VPFE_CONFIG_ST_OCP_ACTIVE 0
+#define VPFE_CONFIG_ST_OCP_STANDBY 1
+
+#endif /* AM437X_VPFE_REGS_H */
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
new file mode 100644
index 000000000..a211ef20f
--- /dev/null
+++ b/drivers/media/platform/atmel/Kconfig
@@ -0,0 +1,20 @@
+config VIDEO_ATMEL_ISC
+ tristate "ATMEL Image Sensor Controller (ISC) support"
+ depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API
+ depends on ARCH_AT91 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP_MMIO
+ select V4L2_FWNODE
+ help
+ This module makes the ATMEL Image Sensor Controller available
+ as a v4l2 device.
+
+config VIDEO_ATMEL_ISI
+ tristate "ATMEL Image Sensor Interface (ISI) support"
+ depends on VIDEO_V4L2 && OF
+ depends on ARCH_AT91 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ ---help---
+ This module makes the ATMEL Image Sensor Interface available
+ as a v4l2 device.
diff --git a/drivers/media/platform/atmel/Makefile b/drivers/media/platform/atmel/Makefile
new file mode 100644
index 000000000..27000d099
--- /dev/null
+++ b/drivers/media/platform/atmel/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
+obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
new file mode 100644
index 000000000..2aadc1923
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ATMEL_ISC_REGS_H
+#define __ATMEL_ISC_REGS_H
+
+#include <linux/bitops.h>
+
+/* ISC Control Enable Register 0 */
+#define ISC_CTRLEN 0x00000000
+
+/* ISC Control Disable Register 0 */
+#define ISC_CTRLDIS 0x00000004
+
+/* ISC Control Status Register 0 */
+#define ISC_CTRLSR 0x00000008
+
+#define ISC_CTRL_CAPTURE BIT(0)
+#define ISC_CTRL_UPPRO BIT(1)
+#define ISC_CTRL_HISREQ BIT(2)
+#define ISC_CTRL_HISCLR BIT(3)
+
+/* ISC Parallel Front End Configuration 0 Register */
+#define ISC_PFE_CFG0 0x0000000c
+
+#define ISC_PFE_CFG0_HPOL_LOW BIT(0)
+#define ISC_PFE_CFG0_VPOL_LOW BIT(1)
+#define ISC_PFE_CFG0_PPOL_LOW BIT(2)
+
+#define ISC_PFE_CFG0_MODE_PROGRESSIVE (0x0 << 4)
+#define ISC_PFE_CFG0_MODE_MASK GENMASK(6, 4)
+
+#define ISC_PFE_CFG0_BPS_EIGHT (0x4 << 28)
+#define ISC_PFG_CFG0_BPS_NINE (0x3 << 28)
+#define ISC_PFG_CFG0_BPS_TEN (0x2 << 28)
+#define ISC_PFG_CFG0_BPS_ELEVEN (0x1 << 28)
+#define ISC_PFG_CFG0_BPS_TWELVE (0x0 << 28)
+#define ISC_PFE_CFG0_BPS_MASK GENMASK(30, 28)
+
+/* ISC Clock Enable Register */
+#define ISC_CLKEN 0x00000018
+
+/* ISC Clock Disable Register */
+#define ISC_CLKDIS 0x0000001c
+
+/* ISC Clock Status Register */
+#define ISC_CLKSR 0x00000020
+#define ISC_CLKSR_SIP BIT(31)
+
+#define ISC_CLK(n) BIT(n)
+
+/* ISC Clock Configuration Register */
+#define ISC_CLKCFG 0x00000024
+#define ISC_CLKCFG_DIV_SHIFT(n) ((n)*16)
+#define ISC_CLKCFG_DIV_MASK(n) GENMASK(((n)*16 + 7), (n)*16)
+#define ISC_CLKCFG_SEL_SHIFT(n) ((n)*16 + 8)
+#define ISC_CLKCFG_SEL_MASK(n) GENMASK(((n)*17 + 8), ((n)*16 + 8))
+
+/* ISC Interrupt Enable Register */
+#define ISC_INTEN 0x00000028
+
+/* ISC Interrupt Disable Register */
+#define ISC_INTDIS 0x0000002c
+
+/* ISC Interrupt Mask Register */
+#define ISC_INTMASK 0x00000030
+
+/* ISC Interrupt Status Register */
+#define ISC_INTSR 0x00000034
+
+#define ISC_INT_DDONE BIT(8)
+#define ISC_INT_HISDONE BIT(12)
+
+/* ISC White Balance Control Register */
+#define ISC_WB_CTRL 0x00000058
+
+/* ISC White Balance Configuration Register */
+#define ISC_WB_CFG 0x0000005c
+
+/* ISC White Balance Offset for R, GR Register */
+#define ISC_WB_O_RGR 0x00000060
+
+/* ISC White Balance Offset for B, GB Register */
+#define ISC_WB_O_BGR 0x00000064
+
+/* ISC White Balance Gain for R, GR Register */
+#define ISC_WB_G_RGR 0x00000068
+
+/* ISC White Balance Gain for B, GB Register */
+#define ISC_WB_G_BGR 0x0000006c
+
+/* ISC Color Filter Array Control Register */
+#define ISC_CFA_CTRL 0x00000070
+
+/* ISC Color Filter Array Configuration Register */
+#define ISC_CFA_CFG 0x00000074
+#define ISC_CFA_CFG_EITPOL BIT(4)
+
+#define ISC_BAY_CFG_GRGR 0x0
+#define ISC_BAY_CFG_RGRG 0x1
+#define ISC_BAY_CFG_GBGB 0x2
+#define ISC_BAY_CFG_BGBG 0x3
+
+/* ISC Color Correction Control Register */
+#define ISC_CC_CTRL 0x00000078
+
+/* ISC Color Correction RR RG Register */
+#define ISC_CC_RR_RG 0x0000007c
+
+/* ISC Color Correction RB OR Register */
+#define ISC_CC_RB_OR 0x00000080
+
+/* ISC Color Correction GR GG Register */
+#define ISC_CC_GR_GG 0x00000084
+
+/* ISC Color Correction GB OG Register */
+#define ISC_CC_GB_OG 0x00000088
+
+/* ISC Color Correction BR BG Register */
+#define ISC_CC_BR_BG 0x0000008c
+
+/* ISC Color Correction BB OB Register */
+#define ISC_CC_BB_OB 0x00000090
+
+/* ISC Gamma Correction Control Register */
+#define ISC_GAM_CTRL 0x00000094
+
+/* ISC_Gamma Correction Blue Entry Register */
+#define ISC_GAM_BENTRY 0x00000098
+
+/* ISC_Gamma Correction Green Entry Register */
+#define ISC_GAM_GENTRY 0x00000198
+
+/* ISC_Gamma Correction Green Entry Register */
+#define ISC_GAM_RENTRY 0x00000298
+
+/* Color Space Conversion Control Register */
+#define ISC_CSC_CTRL 0x00000398
+
+/* Color Space Conversion YR YG Register */
+#define ISC_CSC_YR_YG 0x0000039c
+
+/* Color Space Conversion YB OY Register */
+#define ISC_CSC_YB_OY 0x000003a0
+
+/* Color Space Conversion CBR CBG Register */
+#define ISC_CSC_CBR_CBG 0x000003a4
+
+/* Color Space Conversion CBB OCB Register */
+#define ISC_CSC_CBB_OCB 0x000003a8
+
+/* Color Space Conversion CRR CRG Register */
+#define ISC_CSC_CRR_CRG 0x000003ac
+
+/* Color Space Conversion CRB OCR Register */
+#define ISC_CSC_CRB_OCR 0x000003b0
+
+/* Contrast And Brightness Control Register */
+#define ISC_CBC_CTRL 0x000003b4
+
+/* Contrast And Brightness Configuration Register */
+#define ISC_CBC_CFG 0x000003b8
+
+/* Brightness Register */
+#define ISC_CBC_BRIGHT 0x000003bc
+#define ISC_CBC_BRIGHT_MASK GENMASK(10, 0)
+
+/* Contrast Register */
+#define ISC_CBC_CONTRAST 0x000003c0
+#define ISC_CBC_CONTRAST_MASK GENMASK(11, 0)
+
+/* Subsampling 4:4:4 to 4:2:2 Control Register */
+#define ISC_SUB422_CTRL 0x000003c4
+
+/* Subsampling 4:2:2 to 4:2:0 Control Register */
+#define ISC_SUB420_CTRL 0x000003cc
+
+/* Rounding, Limiting and Packing Configuration Register */
+#define ISC_RLP_CFG 0x000003d0
+
+#define ISC_RLP_CFG_MODE_DAT8 0x0
+#define ISC_RLP_CFG_MODE_DAT9 0x1
+#define ISC_RLP_CFG_MODE_DAT10 0x2
+#define ISC_RLP_CFG_MODE_DAT11 0x3
+#define ISC_RLP_CFG_MODE_DAT12 0x4
+#define ISC_RLP_CFG_MODE_DATY8 0x5
+#define ISC_RLP_CFG_MODE_DATY10 0x6
+#define ISC_RLP_CFG_MODE_ARGB444 0x7
+#define ISC_RLP_CFG_MODE_ARGB555 0x8
+#define ISC_RLP_CFG_MODE_RGB565 0x9
+#define ISC_RLP_CFG_MODE_ARGB32 0xa
+#define ISC_RLP_CFG_MODE_YYCC 0xb
+#define ISC_RLP_CFG_MODE_YYCC_LIMITED 0xc
+#define ISC_RLP_CFG_MODE_MASK GENMASK(3, 0)
+
+/* Histogram Control Register */
+#define ISC_HIS_CTRL 0x000003d4
+
+#define ISC_HIS_CTRL_EN BIT(0)
+#define ISC_HIS_CTRL_DIS 0x0
+
+/* Histogram Configuration Register */
+#define ISC_HIS_CFG 0x000003d8
+
+#define ISC_HIS_CFG_MODE_GR 0x0
+#define ISC_HIS_CFG_MODE_R 0x1
+#define ISC_HIS_CFG_MODE_GB 0x2
+#define ISC_HIS_CFG_MODE_B 0x3
+#define ISC_HIS_CFG_MODE_Y 0x4
+#define ISC_HIS_CFG_MODE_RAW 0x5
+#define ISC_HIS_CFG_MODE_YCCIR656 0x6
+
+#define ISC_HIS_CFG_BAYSEL_SHIFT 4
+
+#define ISC_HIS_CFG_RAR BIT(8)
+
+/* DMA Configuration Register */
+#define ISC_DCFG 0x000003e0
+#define ISC_DCFG_IMODE_PACKED8 0x0
+#define ISC_DCFG_IMODE_PACKED16 0x1
+#define ISC_DCFG_IMODE_PACKED32 0x2
+#define ISC_DCFG_IMODE_YC422SP 0x3
+#define ISC_DCFG_IMODE_YC422P 0x4
+#define ISC_DCFG_IMODE_YC420SP 0x5
+#define ISC_DCFG_IMODE_YC420P 0x6
+#define ISC_DCFG_IMODE_MASK GENMASK(2, 0)
+
+#define ISC_DCFG_YMBSIZE_SINGLE (0x0 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS4 (0x1 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS8 (0x2 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS16 (0x3 << 4)
+#define ISC_DCFG_YMBSIZE_MASK GENMASK(5, 4)
+
+#define ISC_DCFG_CMBSIZE_SINGLE (0x0 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS4 (0x1 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS8 (0x2 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS16 (0x3 << 8)
+#define ISC_DCFG_CMBSIZE_MASK GENMASK(9, 8)
+
+/* DMA Control Register */
+#define ISC_DCTRL 0x000003e4
+
+#define ISC_DCTRL_DVIEW_PACKED (0x0 << 1)
+#define ISC_DCTRL_DVIEW_SEMIPLANAR (0x1 << 1)
+#define ISC_DCTRL_DVIEW_PLANAR (0x2 << 1)
+#define ISC_DCTRL_DVIEW_MASK GENMASK(2, 1)
+
+#define ISC_DCTRL_IE_IS (0x0 << 4)
+
+/* DMA Descriptor Address Register */
+#define ISC_DNDA 0x000003e8
+
+/* DMA Address 0 Register */
+#define ISC_DAD0 0x000003ec
+
+/* DMA Address 1 Register */
+#define ISC_DAD1 0x000003f4
+
+/* DMA Address 2 Register */
+#define ISC_DAD2 0x000003fc
+
+/* Histogram Entry */
+#define ISC_HIS_ENTRY 0x00000410
+
+#endif
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
new file mode 100644
index 000000000..1fd078257
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -0,0 +1,2309 @@
+/*
+ * Atmel Image Sensor Controller (ISC) driver
+ *
+ * Copyright (C) 2016 Atmel
+ *
+ * Author: Songjun Wu <songjun.wu@microchip.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Sensor-->PFE-->WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB-->RLP-->DMA
+ *
+ * ISC video pipeline integrates the following submodules:
+ * PFE: Parallel Front End to sample the camera sensor input stream
+ * WB: Programmable white balance in the Bayer domain
+ * CFA: Color filter array interpolation module
+ * CC: Programmable color correction
+ * GAM: Gamma correction
+ * CSC: Programmable color space conversion
+ * CBC: Contrast and Brightness control
+ * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
+ * RLP: This module performs rounding, range limiting
+ * and packing of the incoming data
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "atmel-isc-regs.h"
+
+#define ATMEL_ISC_NAME "atmel_isc"
+
+#define ISC_MAX_SUPPORT_WIDTH 2592
+#define ISC_MAX_SUPPORT_HEIGHT 1944
+
+#define ISC_CLK_MAX_DIV 255
+
+enum isc_clk_id {
+ ISC_ISPCK = 0,
+ ISC_MCK = 1,
+};
+
+struct isc_clk {
+ struct clk_hw hw;
+ struct clk *clk;
+ struct regmap *regmap;
+ spinlock_t lock;
+ u8 id;
+ u8 parent_id;
+ u32 div;
+ struct device *dev;
+};
+
+#define to_isc_clk(hw) container_of(hw, struct isc_clk, hw)
+
+struct isc_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+struct isc_subdev_entity {
+ struct v4l2_subdev *sd;
+ struct v4l2_async_subdev *asd;
+ struct v4l2_async_notifier notifier;
+
+ u32 pfe_cfg0;
+
+ struct list_head list;
+};
+
+/* Indicate the format is generated by the sensor */
+#define FMT_FLAG_FROM_SENSOR BIT(0)
+/* Indicate the format is produced by ISC itself */
+#define FMT_FLAG_FROM_CONTROLLER BIT(1)
+/* Indicate a Raw Bayer format */
+#define FMT_FLAG_RAW_FORMAT BIT(2)
+
+#define FMT_FLAG_RAW_FROM_SENSOR (FMT_FLAG_FROM_SENSOR | \
+ FMT_FLAG_RAW_FORMAT)
+
+/*
+ * struct isc_format - ISC media bus format information
+ * @fourcc: Fourcc code for this format
+ * @mbus_code: V4L2 media bus format code.
+ * flags: Indicate format from sensor or converted by controller
+ * @bpp: Bits per pixel (when stored in memory)
+ * (when transferred over a bus)
+ * @sd_support: Subdev supports this format
+ * @isc_support: ISC can convert raw format to this format
+ */
+
+struct isc_format {
+ u32 fourcc;
+ u32 mbus_code;
+ u32 flags;
+ u8 bpp;
+
+ bool sd_support;
+ bool isc_support;
+};
+
+/* Pipeline bitmap */
+#define WB_ENABLE BIT(0)
+#define CFA_ENABLE BIT(1)
+#define CC_ENABLE BIT(2)
+#define GAM_ENABLE BIT(3)
+#define GAM_BENABLE BIT(4)
+#define GAM_GENABLE BIT(5)
+#define GAM_RENABLE BIT(6)
+#define CSC_ENABLE BIT(7)
+#define CBC_ENABLE BIT(8)
+#define SUB422_ENABLE BIT(9)
+#define SUB420_ENABLE BIT(10)
+
+#define GAM_ENABLES (GAM_RENABLE | GAM_GENABLE | GAM_BENABLE | GAM_ENABLE)
+
+struct fmt_config {
+ u32 fourcc;
+
+ u32 pfe_cfg0_bps;
+ u32 cfa_baycfg;
+ u32 rlp_cfg_mode;
+ u32 dcfg_imode;
+ u32 dctrl_dview;
+
+ u32 bits_pipeline;
+};
+
+#define HIST_ENTRIES 512
+#define HIST_BAYER (ISC_HIS_CFG_MODE_B + 1)
+
+enum{
+ HIST_INIT = 0,
+ HIST_ENABLED,
+ HIST_DISABLED,
+};
+
+struct isc_ctrls {
+ struct v4l2_ctrl_handler handler;
+
+ u32 brightness;
+ u32 contrast;
+ u8 gamma_index;
+ u8 awb;
+
+ u32 r_gain;
+ u32 b_gain;
+
+ u32 hist_entry[HIST_ENTRIES];
+ u32 hist_count[HIST_BAYER];
+ u8 hist_id;
+ u8 hist_stat;
+};
+
+#define ISC_PIPE_LINE_NODE_NUM 11
+
+struct isc_device {
+ struct regmap *regmap;
+ struct clk *hclock;
+ struct clk *ispck;
+ struct isc_clk isc_clks[2];
+
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device video_dev;
+
+ struct vb2_queue vb2_vidq;
+ spinlock_t dma_queue_lock;
+ struct list_head dma_queue;
+ struct isc_buffer *cur_frm;
+ unsigned int sequence;
+ bool stop;
+ struct completion comp;
+
+ struct v4l2_format fmt;
+ struct isc_format **user_formats;
+ unsigned int num_user_formats;
+ const struct isc_format *current_fmt;
+ const struct isc_format *raw_fmt;
+
+ struct isc_ctrls ctrls;
+ struct work_struct awb_work;
+
+ struct mutex lock;
+
+ struct regmap_field *pipeline[ISC_PIPE_LINE_NODE_NUM];
+
+ struct isc_subdev_entity *current_subdev;
+ struct list_head subdev_entities;
+};
+
+static struct isc_format formats_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .mbus_code = 0x0,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 12,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .mbus_code = 0x0,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
+ .flags = FMT_FLAG_FROM_CONTROLLER |
+ FMT_FLAG_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .mbus_code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .mbus_code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAG_FROM_CONTROLLER |
+ FMT_FLAG_FROM_SENSOR,
+ .bpp = 16,
+ },
+};
+
+static struct fmt_config fmt_configs_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
+ .dcfg_imode = ISC_DCFG_IMODE_YC420P,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
+ .bits_pipeline = SUB420_ENABLE | SUB422_ENABLE |
+ CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
+ .dcfg_imode = ISC_DCFG_IMODE_YC422P,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
+ .bits_pipeline = SUB422_ENABLE |
+ CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB32,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED32,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0
+ },
+};
+
+#define GAMMA_MAX 2
+#define GAMMA_ENTRIES 64
+
+/* Gamma table with gamma 1/2.2 */
+static const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
+ /* 0 --> gamma 1/1.8 */
+ { 0x65, 0x66002F, 0x950025, 0xBB0020, 0xDB001D, 0xF8001A,
+ 0x1130018, 0x12B0017, 0x1420016, 0x1580014, 0x16D0013, 0x1810012,
+ 0x1940012, 0x1A60012, 0x1B80011, 0x1C90010, 0x1DA0010, 0x1EA000F,
+ 0x1FA000F, 0x209000F, 0x218000F, 0x227000E, 0x235000E, 0x243000E,
+ 0x251000E, 0x25F000D, 0x26C000D, 0x279000D, 0x286000D, 0x293000C,
+ 0x2A0000C, 0x2AC000C, 0x2B8000C, 0x2C4000C, 0x2D0000B, 0x2DC000B,
+ 0x2E7000B, 0x2F3000B, 0x2FE000B, 0x309000B, 0x314000B, 0x31F000A,
+ 0x32A000A, 0x334000B, 0x33F000A, 0x349000A, 0x354000A, 0x35E000A,
+ 0x368000A, 0x372000A, 0x37C000A, 0x386000A, 0x3900009, 0x399000A,
+ 0x3A30009, 0x3AD0009, 0x3B60009, 0x3BF000A, 0x3C90009, 0x3D20009,
+ 0x3DB0009, 0x3E40009, 0x3ED0009, 0x3F60009 },
+
+ /* 1 --> gamma 1/2 */
+ { 0x7F, 0x800034, 0xB50028, 0xDE0021, 0x100001E, 0x11E001B,
+ 0x1390019, 0x1520017, 0x16A0015, 0x1800014, 0x1940014, 0x1A80013,
+ 0x1BB0012, 0x1CD0011, 0x1DF0010, 0x1EF0010, 0x200000F, 0x20F000F,
+ 0x21F000E, 0x22D000F, 0x23C000E, 0x24A000E, 0x258000D, 0x265000D,
+ 0x273000C, 0x27F000D, 0x28C000C, 0x299000C, 0x2A5000C, 0x2B1000B,
+ 0x2BC000C, 0x2C8000B, 0x2D3000C, 0x2DF000B, 0x2EA000A, 0x2F5000A,
+ 0x2FF000B, 0x30A000A, 0x314000B, 0x31F000A, 0x329000A, 0x333000A,
+ 0x33D0009, 0x3470009, 0x350000A, 0x35A0009, 0x363000A, 0x36D0009,
+ 0x3760009, 0x37F0009, 0x3880009, 0x3910009, 0x39A0009, 0x3A30009,
+ 0x3AC0008, 0x3B40009, 0x3BD0008, 0x3C60008, 0x3CE0008, 0x3D60009,
+ 0x3DF0008, 0x3E70008, 0x3EF0008, 0x3F70008 },
+
+ /* 2 --> gamma 1/2.2 */
+ { 0x99, 0x9B0038, 0xD4002A, 0xFF0023, 0x122001F, 0x141001B,
+ 0x15D0019, 0x1760017, 0x18E0015, 0x1A30015, 0x1B80013, 0x1CC0012,
+ 0x1DE0011, 0x1F00010, 0x2010010, 0x2110010, 0x221000F, 0x230000F,
+ 0x23F000E, 0x24D000E, 0x25B000D, 0x269000C, 0x276000C, 0x283000C,
+ 0x28F000C, 0x29B000C, 0x2A7000C, 0x2B3000B, 0x2BF000B, 0x2CA000B,
+ 0x2D5000B, 0x2E0000A, 0x2EB000A, 0x2F5000A, 0x2FF000A, 0x30A000A,
+ 0x3140009, 0x31E0009, 0x327000A, 0x3310009, 0x33A0009, 0x3440009,
+ 0x34D0009, 0x3560009, 0x35F0009, 0x3680008, 0x3710008, 0x3790009,
+ 0x3820008, 0x38A0008, 0x3930008, 0x39B0008, 0x3A30008, 0x3AB0008,
+ 0x3B30008, 0x3BB0008, 0x3C30008, 0x3CB0007, 0x3D20008, 0x3DA0007,
+ 0x3E20007, 0x3E90007, 0x3F00008, 0x3F80007 },
+};
+
+static unsigned int sensor_preferred = 1;
+module_param(sensor_preferred, uint, 0644);
+MODULE_PARM_DESC(sensor_preferred,
+ "Sensor is preferred to output the specified format (1-on 0-off), default 1");
+
+static int isc_wait_clk_stable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ struct regmap *regmap = isc_clk->regmap;
+ unsigned long timeout = jiffies + usecs_to_jiffies(1000);
+ unsigned int status;
+
+ while (time_before(jiffies, timeout)) {
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (!(status & ISC_CLKSR_SIP))
+ return 0;
+
+ usleep_range(10, 250);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int isc_clk_prepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_get_sync(isc_clk->dev);
+
+ return isc_wait_clk_stable(hw);
+}
+
+static void isc_clk_unprepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ isc_wait_clk_stable(hw);
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_put_sync(isc_clk->dev);
+}
+
+static int isc_clk_enable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 id = isc_clk->id;
+ struct regmap *regmap = isc_clk->regmap;
+ unsigned long flags;
+ unsigned int status;
+
+ dev_dbg(isc_clk->dev, "ISC CLK: %s, div = %d, parent id = %d\n",
+ __func__, isc_clk->div, isc_clk->parent_id);
+
+ spin_lock_irqsave(&isc_clk->lock, flags);
+ regmap_update_bits(regmap, ISC_CLKCFG,
+ ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
+ (isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
+ (isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
+
+ regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
+
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (status & ISC_CLK(id))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static void isc_clk_disable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 id = isc_clk->id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isc_clk->lock, flags);
+ regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
+}
+
+static int isc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 status;
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_get_sync(isc_clk->dev);
+
+ regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_put_sync(isc_clk->dev);
+
+ return status & ISC_CLK(isc_clk->id) ? 1 : 0;
+}
+
+static unsigned long
+isc_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ return DIV_ROUND_CLOSEST(parent_rate, isc_clk->div + 1);
+}
+
+static int isc_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ long best_rate = -EINVAL;
+ int best_diff = -1;
+ unsigned int i, div;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent;
+ unsigned long parent_rate;
+
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+ if (!parent_rate)
+ continue;
+
+ for (div = 1; div < ISC_CLK_MAX_DIV + 2; div++) {
+ unsigned long rate;
+ int diff;
+
+ rate = DIV_ROUND_CLOSEST(parent_rate, div);
+ diff = abs(req->rate - rate);
+
+ if (best_diff < 0 || best_diff > diff) {
+ best_rate = rate;
+ best_diff = diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+
+ if (!best_diff || rate < req->rate)
+ break;
+ }
+
+ if (!best_diff)
+ break;
+ }
+
+ dev_dbg(isc_clk->dev,
+ "ISC CLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+ __func__, best_rate,
+ __clk_get_name((req->best_parent_hw)->clk),
+ req->best_parent_rate);
+
+ if (best_rate < 0)
+ return best_rate;
+
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static int isc_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ if (index >= clk_hw_get_num_parents(hw))
+ return -EINVAL;
+
+ isc_clk->parent_id = index;
+
+ return 0;
+}
+
+static u8 isc_clk_get_parent(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ return isc_clk->parent_id;
+}
+
+static int isc_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 div;
+
+ if (!rate)
+ return -EINVAL;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (div > (ISC_CLK_MAX_DIV + 1) || !div)
+ return -EINVAL;
+
+ isc_clk->div = div - 1;
+
+ return 0;
+}
+
+static const struct clk_ops isc_clk_ops = {
+ .prepare = isc_clk_prepare,
+ .unprepare = isc_clk_unprepare,
+ .enable = isc_clk_enable,
+ .disable = isc_clk_disable,
+ .is_enabled = isc_clk_is_enabled,
+ .recalc_rate = isc_clk_recalc_rate,
+ .determine_rate = isc_clk_determine_rate,
+ .set_parent = isc_clk_set_parent,
+ .get_parent = isc_clk_get_parent,
+ .set_rate = isc_clk_set_rate,
+};
+
+static int isc_clk_register(struct isc_device *isc, unsigned int id)
+{
+ struct regmap *regmap = isc->regmap;
+ struct device_node *np = isc->dev->of_node;
+ struct isc_clk *isc_clk;
+ struct clk_init_data init;
+ const char *clk_name = np->name;
+ const char *parent_names[3];
+ int num_parents;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents < 1 || num_parents > 3)
+ return -EINVAL;
+
+ if (num_parents > 2 && id == ISC_ISPCK)
+ num_parents = 2;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ if (id == ISC_MCK)
+ of_property_read_string(np, "clock-output-names", &clk_name);
+ else
+ clk_name = "isc-ispck";
+
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.name = clk_name;
+ init.ops = &isc_clk_ops;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ isc_clk = &isc->isc_clks[id];
+ isc_clk->hw.init = &init;
+ isc_clk->regmap = regmap;
+ isc_clk->id = id;
+ isc_clk->dev = isc->dev;
+ spin_lock_init(&isc_clk->lock);
+
+ isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
+ if (IS_ERR(isc_clk->clk)) {
+ dev_err(isc->dev, "%s: clock register fail\n", clk_name);
+ return PTR_ERR(isc_clk->clk);
+ } else if (id == ISC_MCK)
+ of_clk_add_provider(np, of_clk_src_simple_get, isc_clk->clk);
+
+ return 0;
+}
+
+static int isc_clk_init(struct isc_device *isc)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++)
+ isc->isc_clks[i].clk = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+ ret = isc_clk_register(isc, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void isc_clk_cleanup(struct isc_device *isc)
+{
+ unsigned int i;
+
+ of_clk_del_provider(isc->dev->of_node);
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+ struct isc_clk *isc_clk = &isc->isc_clks[i];
+
+ if (!IS_ERR(isc_clk->clk))
+ clk_unregister(isc_clk->clk);
+ }
+}
+
+static int isc_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct isc_device *isc = vb2_get_drv_priv(vq);
+ unsigned int size = isc->fmt.fmt.pix.sizeimage;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int isc_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = isc->fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(&isc->v4l2_dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ vbuf->field = isc->fmt.fmt.pix.field;
+
+ return 0;
+}
+
+static inline bool sensor_is_preferred(const struct isc_format *isc_fmt)
+{
+ return (sensor_preferred && isc_fmt->sd_support) ||
+ !isc_fmt->isc_support;
+}
+
+static struct fmt_config *get_fmt_config(u32 fourcc)
+{
+ struct fmt_config *config;
+ int i;
+
+ config = &fmt_configs_list[0];
+ for (i = 0; i < ARRAY_SIZE(fmt_configs_list); i++) {
+ if (config->fourcc == fourcc)
+ return config;
+
+ config++;
+ }
+ return NULL;
+}
+
+static void isc_start_dma(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ struct v4l2_pix_format *pixfmt = &isc->fmt.fmt.pix;
+ u32 sizeimage = pixfmt->sizeimage;
+ struct fmt_config *config = get_fmt_config(isc->current_fmt->fourcc);
+ u32 dctrl_dview;
+ dma_addr_t addr0;
+
+ addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
+ regmap_write(regmap, ISC_DAD0, addr0);
+
+ switch (pixfmt->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ regmap_write(regmap, ISC_DAD1, addr0 + (sizeimage * 2) / 3);
+ regmap_write(regmap, ISC_DAD2, addr0 + (sizeimage * 5) / 6);
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ regmap_write(regmap, ISC_DAD1, addr0 + sizeimage / 2);
+ regmap_write(regmap, ISC_DAD2, addr0 + (sizeimage * 3) / 4);
+ break;
+ default:
+ break;
+ }
+
+ if (sensor_is_preferred(isc->current_fmt))
+ dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ else
+ dctrl_dview = config->dctrl_dview;
+
+ regmap_write(regmap, ISC_DCTRL, dctrl_dview | ISC_DCTRL_IE_IS);
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
+}
+
+static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
+{
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
+ u32 val, bay_cfg;
+ const u32 *gamma;
+ unsigned int i;
+
+ /* WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB422-->SUB420 */
+ for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
+ val = pipeline & BIT(i) ? 1 : 0;
+ regmap_field_write(isc->pipeline[i], val);
+ }
+
+ if (!pipeline)
+ return;
+
+ bay_cfg = config->cfa_baycfg;
+
+ regmap_write(regmap, ISC_WB_CFG, bay_cfg);
+ regmap_write(regmap, ISC_WB_O_RGR, 0x0);
+ regmap_write(regmap, ISC_WB_O_BGR, 0x0);
+ regmap_write(regmap, ISC_WB_G_RGR, ctrls->r_gain | (0x1 << 25));
+ regmap_write(regmap, ISC_WB_G_BGR, ctrls->b_gain | (0x1 << 25));
+
+ regmap_write(regmap, ISC_CFA_CFG, bay_cfg | ISC_CFA_CFG_EITPOL);
+
+ gamma = &isc_gamma_table[ctrls->gamma_index][0];
+ regmap_bulk_write(regmap, ISC_GAM_BENTRY, gamma, GAMMA_ENTRIES);
+ regmap_bulk_write(regmap, ISC_GAM_GENTRY, gamma, GAMMA_ENTRIES);
+ regmap_bulk_write(regmap, ISC_GAM_RENTRY, gamma, GAMMA_ENTRIES);
+
+ /* Convert RGB to YUV */
+ regmap_write(regmap, ISC_CSC_YR_YG, 0x42 | (0x81 << 16));
+ regmap_write(regmap, ISC_CSC_YB_OY, 0x19 | (0x10 << 16));
+ regmap_write(regmap, ISC_CSC_CBR_CBG, 0xFDA | (0xFB6 << 16));
+ regmap_write(regmap, ISC_CSC_CBB_OCB, 0x70 | (0x80 << 16));
+ regmap_write(regmap, ISC_CSC_CRR_CRG, 0x70 | (0xFA2 << 16));
+ regmap_write(regmap, ISC_CSC_CRB_OCR, 0xFEE | (0x80 << 16));
+
+ regmap_write(regmap, ISC_CBC_BRIGHT, ctrls->brightness);
+ regmap_write(regmap, ISC_CBC_CONTRAST, ctrls->contrast);
+}
+
+static int isc_update_profile(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 sr;
+ int counter = 100;
+
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_UPPRO);
+
+ regmap_read(regmap, ISC_CTRLSR, &sr);
+ while ((sr & ISC_CTRL_UPPRO) && counter--) {
+ usleep_range(1000, 2000);
+ regmap_read(regmap, ISC_CTRLSR, &sr);
+ }
+
+ if (counter < 0) {
+ v4l2_warn(&isc->v4l2_dev, "Time out to update profie\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void isc_set_histogram(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
+
+ if (ctrls->awb && (ctrls->hist_stat != HIST_ENABLED)) {
+ regmap_write(regmap, ISC_HIS_CFG,
+ ISC_HIS_CFG_MODE_R |
+ (config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT) |
+ ISC_HIS_CFG_RAR);
+ regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_EN);
+ regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
+ ctrls->hist_id = ISC_HIS_CFG_MODE_R;
+ isc_update_profile(isc);
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
+
+ ctrls->hist_stat = HIST_ENABLED;
+ } else if (!ctrls->awb && (ctrls->hist_stat != HIST_DISABLED)) {
+ regmap_write(regmap, ISC_INTDIS, ISC_INT_HISDONE);
+ regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_DIS);
+
+ ctrls->hist_stat = HIST_DISABLED;
+ }
+}
+
+static inline void isc_get_param(const struct isc_format *fmt,
+ u32 *rlp_mode, u32 *dcfg)
+{
+ struct fmt_config *config = get_fmt_config(fmt->fourcc);
+
+ *dcfg = ISC_DCFG_YMBSIZE_BEATS8;
+
+ switch (fmt->fourcc) {
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ *rlp_mode = config->rlp_cfg_mode;
+ *dcfg |= config->dcfg_imode;
+ break;
+ default:
+ *rlp_mode = ISC_RLP_CFG_MODE_DAT8;
+ *dcfg |= ISC_DCFG_IMODE_PACKED8;
+ break;
+ }
+}
+
+static int isc_configure(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ const struct isc_format *current_fmt = isc->current_fmt;
+ struct fmt_config *curfmt_config = get_fmt_config(current_fmt->fourcc);
+ struct fmt_config *rawfmt_config = get_fmt_config(isc->raw_fmt->fourcc);
+ struct isc_subdev_entity *subdev = isc->current_subdev;
+ u32 pfe_cfg0, rlp_mode, dcfg, mask, pipeline;
+
+ if (sensor_is_preferred(current_fmt)) {
+ pfe_cfg0 = curfmt_config->pfe_cfg0_bps;
+ pipeline = 0x0;
+ isc_get_param(current_fmt, &rlp_mode, &dcfg);
+ isc->ctrls.hist_stat = HIST_INIT;
+ } else {
+ pfe_cfg0 = rawfmt_config->pfe_cfg0_bps;
+ pipeline = curfmt_config->bits_pipeline;
+ rlp_mode = curfmt_config->rlp_cfg_mode;
+ dcfg = curfmt_config->dcfg_imode |
+ ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
+ }
+
+ pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
+ mask = ISC_PFE_CFG0_BPS_MASK | ISC_PFE_CFG0_HPOL_LOW |
+ ISC_PFE_CFG0_VPOL_LOW | ISC_PFE_CFG0_PPOL_LOW |
+ ISC_PFE_CFG0_MODE_MASK;
+
+ regmap_update_bits(regmap, ISC_PFE_CFG0, mask, pfe_cfg0);
+
+ regmap_update_bits(regmap, ISC_RLP_CFG, ISC_RLP_CFG_MODE_MASK,
+ rlp_mode);
+
+ regmap_write(regmap, ISC_DCFG, dcfg);
+
+ /* Set the pipeline */
+ isc_set_pipeline(isc, pipeline);
+
+ if (pipeline)
+ isc_set_histogram(isc);
+
+ /* Update profile */
+ return isc_update_profile(isc);
+}
+
+static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct isc_device *isc = vb2_get_drv_priv(vq);
+ struct regmap *regmap = isc->regmap;
+ struct isc_buffer *buf;
+ unsigned long flags;
+ int ret;
+
+ /* Enable stream on the sub device */
+ ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ v4l2_err(&isc->v4l2_dev, "stream on failed in subdev\n");
+ goto err_start_stream;
+ }
+
+ pm_runtime_get_sync(isc->dev);
+
+ ret = isc_configure(isc);
+ if (unlikely(ret))
+ goto err_configure;
+
+ /* Enable DMA interrupt */
+ regmap_write(regmap, ISC_INTEN, ISC_INT_DDONE);
+
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+
+ isc->sequence = 0;
+ isc->stop = false;
+ reinit_completion(&isc->comp);
+
+ isc->cur_frm = list_first_entry(&isc->dma_queue,
+ struct isc_buffer, list);
+ list_del(&isc->cur_frm->list);
+
+ isc_start_dma(isc);
+
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+
+ return 0;
+
+err_configure:
+ pm_runtime_put_sync(isc->dev);
+
+ v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
+
+err_start_stream:
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+ list_for_each_entry(buf, &isc->dma_queue, list)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ INIT_LIST_HEAD(&isc->dma_queue);
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+
+ return ret;
+}
+
+static void isc_stop_streaming(struct vb2_queue *vq)
+{
+ struct isc_device *isc = vb2_get_drv_priv(vq);
+ unsigned long flags;
+ struct isc_buffer *buf;
+ int ret;
+
+ isc->stop = true;
+
+ /* Wait until the end of the current frame */
+ if (isc->cur_frm && !wait_for_completion_timeout(&isc->comp, 5 * HZ))
+ v4l2_err(&isc->v4l2_dev,
+ "Timeout waiting for end of the capture\n");
+
+ /* Disable DMA interrupt */
+ regmap_write(isc->regmap, ISC_INTDIS, ISC_INT_DDONE);
+
+ pm_runtime_put_sync(isc->dev);
+
+ /* Disable stream on the sub device */
+ ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ v4l2_err(&isc->v4l2_dev, "stream off failed in subdev\n");
+
+ /* Release all active buffers */
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+ if (unlikely(isc->cur_frm)) {
+ vb2_buffer_done(&isc->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ isc->cur_frm = NULL;
+ }
+ list_for_each_entry(buf, &isc->dma_queue, list)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&isc->dma_queue);
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+}
+
+static void isc_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct isc_buffer *buf = container_of(vbuf, struct isc_buffer, vb);
+ struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+ if (!isc->cur_frm && list_empty(&isc->dma_queue) &&
+ vb2_is_streaming(vb->vb2_queue)) {
+ isc->cur_frm = buf;
+ isc_start_dma(isc);
+ } else
+ list_add_tail(&buf->list, &isc->dma_queue);
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+}
+
+static const struct vb2_ops isc_vb2_ops = {
+ .queue_setup = isc_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = isc_buffer_prepare,
+ .start_streaming = isc_start_streaming,
+ .stop_streaming = isc_stop_streaming,
+ .buf_queue = isc_buffer_queue,
+};
+
+static int isc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ strcpy(cap->driver, ATMEL_ISC_NAME);
+ strcpy(cap->card, "Atmel Image Sensor Controller");
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", isc->v4l2_dev.name);
+
+ return 0;
+}
+
+static int isc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct isc_device *isc = video_drvdata(file);
+ u32 index = f->index;
+
+ if (index >= isc->num_user_formats)
+ return -EINVAL;
+
+ f->pixelformat = isc->user_formats[index]->fourcc;
+
+ return 0;
+}
+
+static int isc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ *fmt = isc->fmt;
+
+ return 0;
+}
+
+static struct isc_format *find_format_by_fourcc(struct isc_device *isc,
+ unsigned int fourcc)
+{
+ unsigned int num_formats = isc->num_user_formats;
+ struct isc_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ fmt = isc->user_formats[i];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
+ struct isc_format **current_fmt, u32 *code)
+{
+ struct isc_format *isc_fmt;
+ struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ u32 mbus_code;
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ isc_fmt = find_format_by_fourcc(isc, pixfmt->pixelformat);
+ if (!isc_fmt) {
+ v4l2_warn(&isc->v4l2_dev, "Format 0x%x not found\n",
+ pixfmt->pixelformat);
+ isc_fmt = isc->user_formats[isc->num_user_formats - 1];
+ pixfmt->pixelformat = isc_fmt->fourcc;
+ }
+
+ /* Limit to Atmel ISC hardware capabilities */
+ if (pixfmt->width > ISC_MAX_SUPPORT_WIDTH)
+ pixfmt->width = ISC_MAX_SUPPORT_WIDTH;
+ if (pixfmt->height > ISC_MAX_SUPPORT_HEIGHT)
+ pixfmt->height = ISC_MAX_SUPPORT_HEIGHT;
+
+ if (sensor_is_preferred(isc_fmt))
+ mbus_code = isc_fmt->mbus_code;
+ else
+ mbus_code = isc->raw_fmt->mbus_code;
+
+ v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
+ &pad_cfg, &format);
+ if (ret < 0)
+ return ret;
+
+ v4l2_fill_pix_format(pixfmt, &format.format);
+
+ pixfmt->field = V4L2_FIELD_NONE;
+ pixfmt->bytesperline = (pixfmt->width * isc_fmt->bpp) >> 3;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ if (current_fmt)
+ *current_fmt = isc_fmt;
+
+ if (code)
+ *code = mbus_code;
+
+ return 0;
+}
+
+static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct isc_format *current_fmt;
+ u32 mbus_code;
+ int ret;
+
+ ret = isc_try_fmt(isc, f, &current_fmt, &mbus_code);
+ if (ret)
+ return ret;
+
+ v4l2_fill_mbus_format(&format.format, &f->fmt.pix, mbus_code);
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
+ set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ isc->fmt = *f;
+ isc->current_fmt = current_fmt;
+
+ return 0;
+}
+
+static int isc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ if (vb2_is_streaming(&isc->vb2_vidq))
+ return -EBUSY;
+
+ return isc_set_fmt(isc, f);
+}
+
+static int isc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ return isc_try_fmt(isc, f, NULL, NULL);
+}
+
+static int isc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index != 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = 0;
+ strcpy(inp->name, "Camera");
+
+ return 0;
+}
+
+static int isc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int isc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int isc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ return v4l2_g_parm_cap(video_devdata(file), isc->current_subdev->sd, a);
+}
+
+static int isc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ return v4l2_s_parm_cap(video_devdata(file), isc->current_subdev->sd, a);
+}
+
+static int isc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct isc_device *isc = video_drvdata(file);
+ const struct isc_format *isc_fmt;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ isc_fmt = find_format_by_fourcc(isc, fsize->pixel_format);
+ if (!isc_fmt)
+ return -EINVAL;
+
+ if (sensor_is_preferred(isc_fmt))
+ fse.code = isc_fmt->mbus_code;
+ else
+ fse.code = isc->raw_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int isc_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct isc_device *isc = video_drvdata(file);
+ const struct isc_format *isc_fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ isc_fmt = find_format_by_fourcc(isc, fival->pixel_format);
+ if (!isc_fmt)
+ return -EINVAL;
+
+ if (sensor_is_preferred(isc_fmt))
+ fie.code = isc_fmt->mbus_code;
+ else
+ fie.code = isc->raw_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
+ enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops isc_ioctl_ops = {
+ .vidioc_querycap = isc_querycap,
+ .vidioc_enum_fmt_vid_cap = isc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = isc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = isc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = isc_try_fmt_vid_cap,
+
+ .vidioc_enum_input = isc_enum_input,
+ .vidioc_g_input = isc_g_input,
+ .vidioc_s_input = isc_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_g_parm = isc_g_parm,
+ .vidioc_s_parm = isc_s_parm,
+ .vidioc_enum_framesizes = isc_enum_framesizes,
+ .vidioc_enum_frameintervals = isc_enum_frameintervals,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int isc_open(struct file *file)
+{
+ struct isc_device *isc = video_drvdata(file);
+ struct v4l2_subdev *sd = isc->current_subdev->sd;
+ int ret;
+
+ if (mutex_lock_interruptible(&isc->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ v4l2_fh_release(file);
+ goto unlock;
+ }
+
+ ret = isc_set_fmt(isc, &isc->fmt);
+ if (ret) {
+ v4l2_subdev_call(sd, core, s_power, 0);
+ v4l2_fh_release(file);
+ }
+
+unlock:
+ mutex_unlock(&isc->lock);
+ return ret;
+}
+
+static int isc_release(struct file *file)
+{
+ struct isc_device *isc = video_drvdata(file);
+ struct v4l2_subdev *sd = isc->current_subdev->sd;
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&isc->lock);
+
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (fh_singular)
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ mutex_unlock(&isc->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations isc_fops = {
+ .owner = THIS_MODULE,
+ .open = isc_open,
+ .release = isc_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+static irqreturn_t isc_interrupt(int irq, void *dev_id)
+{
+ struct isc_device *isc = (struct isc_device *)dev_id;
+ struct regmap *regmap = isc->regmap;
+ u32 isc_intsr, isc_intmask, pending;
+ irqreturn_t ret = IRQ_NONE;
+
+ regmap_read(regmap, ISC_INTSR, &isc_intsr);
+ regmap_read(regmap, ISC_INTMASK, &isc_intmask);
+
+ pending = isc_intsr & isc_intmask;
+
+ if (likely(pending & ISC_INT_DDONE)) {
+ spin_lock(&isc->dma_queue_lock);
+ if (isc->cur_frm) {
+ struct vb2_v4l2_buffer *vbuf = &isc->cur_frm->vb;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vbuf->sequence = isc->sequence++;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ isc->cur_frm = NULL;
+ }
+
+ if (!list_empty(&isc->dma_queue) && !isc->stop) {
+ isc->cur_frm = list_first_entry(&isc->dma_queue,
+ struct isc_buffer, list);
+ list_del(&isc->cur_frm->list);
+
+ isc_start_dma(isc);
+ }
+
+ if (isc->stop)
+ complete(&isc->comp);
+
+ ret = IRQ_HANDLED;
+ spin_unlock(&isc->dma_queue_lock);
+ }
+
+ if (pending & ISC_INT_HISDONE) {
+ schedule_work(&isc->awb_work);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void isc_hist_count(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ u32 *hist_count = &ctrls->hist_count[ctrls->hist_id];
+ u32 *hist_entry = &ctrls->hist_entry[0];
+ u32 i;
+
+ regmap_bulk_read(regmap, ISC_HIS_ENTRY, hist_entry, HIST_ENTRIES);
+
+ *hist_count = 0;
+ for (i = 0; i < HIST_ENTRIES; i++)
+ *hist_count += i * (*hist_entry++);
+}
+
+static void isc_wb_update(struct isc_ctrls *ctrls)
+{
+ u32 *hist_count = &ctrls->hist_count[0];
+ u64 g_count = (u64)hist_count[ISC_HIS_CFG_MODE_GB] << 9;
+ u32 hist_r = hist_count[ISC_HIS_CFG_MODE_R];
+ u32 hist_b = hist_count[ISC_HIS_CFG_MODE_B];
+
+ if (hist_r)
+ ctrls->r_gain = div_u64(g_count, hist_r);
+
+ if (hist_b)
+ ctrls->b_gain = div_u64(g_count, hist_b);
+}
+
+static void isc_awb_work(struct work_struct *w)
+{
+ struct isc_device *isc =
+ container_of(w, struct isc_device, awb_work);
+ struct regmap *regmap = isc->regmap;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ u32 hist_id = ctrls->hist_id;
+ u32 baysel;
+
+ if (ctrls->hist_stat != HIST_ENABLED)
+ return;
+
+ isc_hist_count(isc);
+
+ if (hist_id != ISC_HIS_CFG_MODE_B) {
+ hist_id++;
+ } else {
+ isc_wb_update(ctrls);
+ hist_id = ISC_HIS_CFG_MODE_R;
+ }
+
+ ctrls->hist_id = hist_id;
+ baysel = config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
+
+ pm_runtime_get_sync(isc->dev);
+
+ regmap_write(regmap, ISC_HIS_CFG, hist_id | baysel | ISC_HIS_CFG_RAR);
+ isc_update_profile(isc);
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
+
+ pm_runtime_put_sync(isc->dev);
+}
+
+static int isc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct isc_device *isc = container_of(ctrl->handler,
+ struct isc_device, ctrls.handler);
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrls->brightness = ctrl->val & ISC_CBC_BRIGHT_MASK;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrls->contrast = ctrl->val & ISC_CBC_CONTRAST_MASK;
+ break;
+ case V4L2_CID_GAMMA:
+ ctrls->gamma_index = ctrl->val;
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ctrls->awb = ctrl->val;
+ if (ctrls->hist_stat != HIST_ENABLED) {
+ ctrls->r_gain = 0x1 << 9;
+ ctrls->b_gain = 0x1 << 9;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops isc_ctrl_ops = {
+ .s_ctrl = isc_s_ctrl,
+};
+
+static int isc_ctrl_init(struct isc_device *isc)
+{
+ const struct v4l2_ctrl_ops *ops = &isc_ctrl_ops;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret;
+
+ ctrls->hist_stat = HIST_INIT;
+
+ ret = v4l2_ctrl_handler_init(hdl, 4);
+ if (ret < 0)
+ return ret;
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -1024, 1023, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, GAMMA_MAX, 1, 2);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+
+ v4l2_ctrl_handler_setup(hdl);
+
+ return 0;
+}
+
+
+static int isc_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct isc_device *isc = container_of(notifier->v4l2_dev,
+ struct isc_device, v4l2_dev);
+ struct isc_subdev_entity *subdev_entity =
+ container_of(notifier, struct isc_subdev_entity, notifier);
+
+ if (video_is_registered(&isc->video_dev)) {
+ v4l2_err(&isc->v4l2_dev, "only supports one sub-device.\n");
+ return -EBUSY;
+ }
+
+ subdev_entity->sd = subdev;
+
+ return 0;
+}
+
+static void isc_async_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct isc_device *isc = container_of(notifier->v4l2_dev,
+ struct isc_device, v4l2_dev);
+ cancel_work_sync(&isc->awb_work);
+ video_unregister_device(&isc->video_dev);
+ v4l2_ctrl_handler_free(&isc->ctrls.handler);
+}
+
+static struct isc_format *find_format_by_code(unsigned int code, int *index)
+{
+ struct isc_format *fmt = &formats_list[0];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats_list); i++) {
+ if (fmt->mbus_code == code) {
+ *index = i;
+ return fmt;
+ }
+
+ fmt++;
+ }
+
+ return NULL;
+}
+
+static int isc_formats_init(struct isc_device *isc)
+{
+ struct isc_format *fmt;
+ struct v4l2_subdev *subdev = isc->current_subdev->sd;
+ unsigned int num_fmts, i, j;
+ u32 list_size = ARRAY_SIZE(formats_list);
+ struct v4l2_subdev_mbus_code_enum mbus_code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code)) {
+ mbus_code.index++;
+
+ fmt = find_format_by_code(mbus_code.code, &i);
+ if ((!fmt) || (!(fmt->flags & FMT_FLAG_FROM_SENSOR)))
+ continue;
+
+ fmt->sd_support = true;
+
+ if (fmt->flags & FMT_FLAG_RAW_FORMAT)
+ isc->raw_fmt = fmt;
+ }
+
+ fmt = &formats_list[0];
+ for (i = 0; i < list_size; i++) {
+ if (fmt->flags & FMT_FLAG_FROM_CONTROLLER)
+ fmt->isc_support = true;
+
+ fmt++;
+ }
+
+ fmt = &formats_list[0];
+ num_fmts = 0;
+ for (i = 0; i < list_size; i++) {
+ if (fmt->isc_support || fmt->sd_support)
+ num_fmts++;
+
+ fmt++;
+ }
+
+ if (!num_fmts)
+ return -ENXIO;
+
+ isc->num_user_formats = num_fmts;
+ isc->user_formats = devm_kcalloc(isc->dev,
+ num_fmts, sizeof(*isc->user_formats),
+ GFP_KERNEL);
+ if (!isc->user_formats)
+ return -ENOMEM;
+
+ fmt = &formats_list[0];
+ for (i = 0, j = 0; i < list_size; i++) {
+ if (fmt->isc_support || fmt->sd_support)
+ isc->user_formats[j++] = fmt;
+
+ fmt++;
+ }
+
+ return 0;
+}
+
+static int isc_set_default_fmt(struct isc_device *isc)
+{
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = isc->user_formats[0]->fourcc,
+ },
+ };
+ int ret;
+
+ ret = isc_try_fmt(isc, &f, NULL, NULL);
+ if (ret)
+ return ret;
+
+ isc->current_fmt = isc->user_formats[0];
+ isc->fmt = f;
+
+ return 0;
+}
+
+static int isc_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct isc_device *isc = container_of(notifier->v4l2_dev,
+ struct isc_device, v4l2_dev);
+ struct video_device *vdev = &isc->video_dev;
+ struct vb2_queue *q = &isc->vb2_vidq;
+ int ret;
+
+ INIT_WORK(&isc->awb_work, isc_awb_work);
+
+ ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ isc->current_subdev = container_of(notifier,
+ struct isc_subdev_entity, notifier);
+ mutex_init(&isc->lock);
+ init_completion(&isc->comp);
+
+ /* Initialize videobuf2 queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = isc;
+ q->buf_struct_size = sizeof(struct isc_buffer);
+ q->ops = &isc_vb2_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &isc->lock;
+ q->min_buffers_needed = 1;
+ q->dev = isc->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev,
+ "vb2_queue_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Init video dma queues */
+ INIT_LIST_HEAD(&isc->dma_queue);
+ spin_lock_init(&isc->dma_queue_lock);
+
+ ret = isc_formats_init(isc);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev,
+ "Init format failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = isc_set_default_fmt(isc);
+ if (ret) {
+ v4l2_err(&isc->v4l2_dev, "Could not set default format\n");
+ return ret;
+ }
+
+ ret = isc_ctrl_init(isc);
+ if (ret) {
+ v4l2_err(&isc->v4l2_dev, "Init isc ctrols failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Register video device */
+ strlcpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name));
+ vdev->release = video_device_release_empty;
+ vdev->fops = &isc_fops;
+ vdev->ioctl_ops = &isc_ioctl_ops;
+ vdev->v4l2_dev = &isc->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = q;
+ vdev->lock = &isc->lock;
+ vdev->ctrl_handler = &isc->ctrls.handler;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+ video_set_drvdata(vdev, isc);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev,
+ "video_register_device failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations isc_async_ops = {
+ .bound = isc_async_bound,
+ .unbind = isc_async_unbind,
+ .complete = isc_async_complete,
+};
+
+static void isc_subdev_cleanup(struct isc_device *isc)
+{
+ struct isc_subdev_entity *subdev_entity;
+
+ list_for_each_entry(subdev_entity, &isc->subdev_entities, list)
+ v4l2_async_notifier_unregister(&subdev_entity->notifier);
+
+ INIT_LIST_HEAD(&isc->subdev_entities);
+}
+
+static int isc_pipeline_init(struct isc_device *isc)
+{
+ struct device *dev = isc->dev;
+ struct regmap *regmap = isc->regmap;
+ struct regmap_field *regs;
+ unsigned int i;
+
+ /* WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB422-->SUB420 */
+ const struct reg_field regfields[ISC_PIPE_LINE_NODE_NUM] = {
+ REG_FIELD(ISC_WB_CTRL, 0, 0),
+ REG_FIELD(ISC_CFA_CTRL, 0, 0),
+ REG_FIELD(ISC_CC_CTRL, 0, 0),
+ REG_FIELD(ISC_GAM_CTRL, 0, 0),
+ REG_FIELD(ISC_GAM_CTRL, 1, 1),
+ REG_FIELD(ISC_GAM_CTRL, 2, 2),
+ REG_FIELD(ISC_GAM_CTRL, 3, 3),
+ REG_FIELD(ISC_CSC_CTRL, 0, 0),
+ REG_FIELD(ISC_CBC_CTRL, 0, 0),
+ REG_FIELD(ISC_SUB422_CTRL, 0, 0),
+ REG_FIELD(ISC_SUB420_CTRL, 0, 0),
+ };
+
+ for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
+ regs = devm_regmap_field_alloc(dev, regmap, regfields[i]);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ isc->pipeline[i] = regs;
+ }
+
+ return 0;
+}
+
+static int isc_parse_dt(struct device *dev, struct isc_device *isc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *epn = NULL, *rem;
+ struct v4l2_fwnode_endpoint v4l2_epn;
+ struct isc_subdev_entity *subdev_entity;
+ unsigned int flags;
+ int ret;
+
+ INIT_LIST_HEAD(&isc->subdev_entities);
+
+ while (1) {
+ epn = of_graph_get_next_endpoint(np, epn);
+ if (!epn)
+ return 0;
+
+ rem = of_graph_get_remote_port_parent(epn);
+ if (!rem) {
+ dev_notice(dev, "Remote device at %pOF not found\n",
+ epn);
+ continue;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
+ &v4l2_epn);
+ if (ret) {
+ of_node_put(rem);
+ ret = -EINVAL;
+ dev_err(dev, "Could not parse the endpoint\n");
+ break;
+ }
+
+ subdev_entity = devm_kzalloc(dev,
+ sizeof(*subdev_entity), GFP_KERNEL);
+ if (!subdev_entity) {
+ of_node_put(rem);
+ ret = -ENOMEM;
+ break;
+ }
+
+ /* asd will be freed by the subsystem once it's added to the
+ * notifier list
+ */
+ subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd),
+ GFP_KERNEL);
+ if (!subdev_entity->asd) {
+ of_node_put(rem);
+ ret = -ENOMEM;
+ break;
+ }
+
+ flags = v4l2_epn.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
+
+ subdev_entity->asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ subdev_entity->asd->match.fwnode =
+ of_fwnode_handle(rem);
+ list_add_tail(&subdev_entity->list, &isc->subdev_entities);
+ }
+
+ of_node_put(epn);
+ return ret;
+}
+
+/* regmap configuration */
+#define ATMEL_ISC_REG_MAX 0xbfc
+static const struct regmap_config isc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = ATMEL_ISC_REG_MAX,
+};
+
+static int atmel_isc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct isc_device *isc;
+ struct resource *res;
+ void __iomem *io_base;
+ struct isc_subdev_entity *subdev_entity;
+ int irq;
+ int ret;
+
+ isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
+ if (!isc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, isc);
+ isc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ isc->regmap = devm_regmap_init_mmio(dev, io_base, &isc_regmap_config);
+ if (IS_ERR(isc->regmap)) {
+ ret = PTR_ERR(isc->regmap);
+ dev_err(dev, "failed to init register map: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ dev_err(dev, "failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, irq, isc_interrupt, 0,
+ ATMEL_ISC_NAME, isc);
+ if (ret < 0) {
+ dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
+ irq, ret);
+ return ret;
+ }
+
+ ret = isc_pipeline_init(isc);
+ if (ret)
+ return ret;
+
+ isc->hclock = devm_clk_get(dev, "hclock");
+ if (IS_ERR(isc->hclock)) {
+ ret = PTR_ERR(isc->hclock);
+ dev_err(dev, "failed to get hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret) {
+ dev_err(dev, "failed to enable hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = isc_clk_init(isc);
+ if (ret) {
+ dev_err(dev, "failed to init isc clock: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
+ isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
+ /* ispck should be greater or equal to hclock */
+ ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
+ if (ret) {
+ dev_err(dev, "failed to set ispck rate: %d\n", ret);
+ goto unprepare_clk;
+ }
+
+ ret = v4l2_device_register(dev, &isc->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "unable to register v4l2 device.\n");
+ goto unprepare_clk;
+ }
+
+ ret = isc_parse_dt(dev, isc);
+ if (ret) {
+ dev_err(dev, "fail to parse device tree\n");
+ goto unregister_v4l2_device;
+ }
+
+ if (list_empty(&isc->subdev_entities)) {
+ dev_err(dev, "no subdev found\n");
+ ret = -ENODEV;
+ goto unregister_v4l2_device;
+ }
+
+ list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
+ subdev_entity->notifier.subdevs = &subdev_entity->asd;
+ subdev_entity->notifier.num_subdevs = 1;
+ subdev_entity->notifier.ops = &isc_async_ops;
+
+ ret = v4l2_async_notifier_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
+ if (ret) {
+ dev_err(dev, "fail to register async notifier\n");
+ kfree(subdev_entity->asd);
+ goto cleanup_subdev;
+ }
+
+ if (video_is_registered(&isc->video_dev))
+ break;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_request_idle(dev);
+
+ return 0;
+
+cleanup_subdev:
+ isc_subdev_cleanup(isc);
+
+unregister_v4l2_device:
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+unprepare_hclk:
+ clk_disable_unprepare(isc->hclock);
+
+ isc_clk_cleanup(isc);
+
+ return ret;
+}
+
+static int atmel_isc_remove(struct platform_device *pdev)
+{
+ struct isc_device *isc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
+
+ isc_subdev_cleanup(isc);
+
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+ isc_clk_cleanup(isc);
+
+ return 0;
+}
+
+static int __maybe_unused isc_runtime_suspend(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
+
+ return 0;
+}
+
+static int __maybe_unused isc_runtime_resume(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret)
+ return ret;
+
+ return clk_prepare_enable(isc->ispck);
+}
+
+static const struct dev_pm_ops atmel_isc_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(isc_runtime_suspend, isc_runtime_resume, NULL)
+};
+
+static const struct of_device_id atmel_isc_of_match[] = {
+ { .compatible = "atmel,sama5d2-isc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, atmel_isc_of_match);
+
+static struct platform_driver atmel_isc_driver = {
+ .probe = atmel_isc_probe,
+ .remove = atmel_isc_remove,
+ .driver = {
+ .name = ATMEL_ISC_NAME,
+ .pm = &atmel_isc_dev_pm_ops,
+ .of_match_table = of_match_ptr(atmel_isc_of_match),
+ },
+};
+
+module_platform_driver(atmel_isc_driver);
+
+MODULE_AUTHOR("Songjun Wu <songjun.wu@microchip.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
new file mode 100644
index 000000000..1a0e5233a
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -0,0 +1,1354 @@
+/*
+ * Copyright (c) 2011 Atmel Corporation
+ * Josh Wu, <josh.wu@atmel.com>
+ *
+ * Based on previous work by Lars Haring, <lars.haring@atmel.com>
+ * and Sedji Gaouaou
+ * Based on the bttv driver for Bt848 with respective copyright holders
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-image-sizes.h>
+
+#include "atmel-isi.h"
+
+#define MAX_SUPPORT_WIDTH 2048U
+#define MAX_SUPPORT_HEIGHT 2048U
+#define MIN_FRAME_RATE 15
+#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
+
+/* Frame buffer descriptor */
+struct fbd {
+ /* Physical address of the frame buffer */
+ u32 fb_address;
+ /* DMA Control Register(only in HISI2) */
+ u32 dma_ctrl;
+ /* Physical address of the next fbd */
+ u32 next_fbd_address;
+};
+
+static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl)
+{
+ fb_desc->dma_ctrl = ctrl;
+}
+
+struct isi_dma_desc {
+ struct list_head list;
+ struct fbd *p_fbd;
+ dma_addr_t fbd_phys;
+};
+
+/* Frame buffer data */
+struct frame_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct isi_dma_desc *p_dma_desc;
+ struct list_head list;
+};
+
+struct isi_graph_entity {
+ struct device_node *node;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+};
+
+/*
+ * struct isi_format - ISI media bus format information
+ * @fourcc: Fourcc code for this format
+ * @mbus_code: V4L2 media bus format code.
+ * @bpp: Bytes per pixel (when stored in memory)
+ * @swap: Byte swap configuration value
+ * @support: Indicates format supported by subdev
+ * @skip: Skip duplicate format supported by subdev
+ */
+struct isi_format {
+ u32 fourcc;
+ u32 mbus_code;
+ u8 bpp;
+ u32 swap;
+};
+
+
+struct atmel_isi {
+ /* Protects the access of variables shared with the ISR */
+ spinlock_t irqlock;
+ struct device *dev;
+ void __iomem *regs;
+
+ int sequence;
+
+ /* Allocate descriptors for dma buffer use */
+ struct fbd *p_fb_descriptors;
+ dma_addr_t fb_descriptors_phys;
+ struct list_head dma_desc_head;
+ struct isi_dma_desc dma_desc[VIDEO_MAX_FRAME];
+ bool enable_preview_path;
+
+ struct completion complete;
+ /* ISI peripherial clock */
+ struct clk *pclk;
+ unsigned int irq;
+
+ struct isi_platform_data pdata;
+ u16 width_flags; /* max 12 bits */
+
+ struct list_head video_buffer_list;
+ struct frame_buffer *active;
+
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct v4l2_async_notifier notifier;
+ struct isi_graph_entity entity;
+ struct v4l2_format fmt;
+
+ const struct isi_format **user_formats;
+ unsigned int num_user_formats;
+ const struct isi_format *current_fmt;
+
+ struct mutex lock;
+ struct vb2_queue queue;
+};
+
+#define notifier_to_isi(n) container_of(n, struct atmel_isi, notifier)
+
+static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val)
+{
+ writel(val, isi->regs + reg);
+}
+static u32 isi_readl(struct atmel_isi *isi, u32 reg)
+{
+ return readl(isi->regs + reg);
+}
+
+static void configure_geometry(struct atmel_isi *isi)
+{
+ u32 cfg2, psize;
+ u32 fourcc = isi->current_fmt->fourcc;
+
+ isi->enable_preview_path = fourcc == V4L2_PIX_FMT_RGB565 ||
+ fourcc == V4L2_PIX_FMT_RGB32;
+
+ /* According to sensor's output format to set cfg2 */
+ cfg2 = isi->current_fmt->swap;
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ /* Set width */
+ cfg2 |= ((isi->fmt.fmt.pix.width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) &
+ ISI_CFG2_IM_HSIZE_MASK;
+ /* Set height */
+ cfg2 |= ((isi->fmt.fmt.pix.height - 1) << ISI_CFG2_IM_VSIZE_OFFSET)
+ & ISI_CFG2_IM_VSIZE_MASK;
+ isi_writel(isi, ISI_CFG2, cfg2);
+
+ /* No down sampling, preview size equal to sensor output size */
+ psize = ((isi->fmt.fmt.pix.width - 1) << ISI_PSIZE_PREV_HSIZE_OFFSET) &
+ ISI_PSIZE_PREV_HSIZE_MASK;
+ psize |= ((isi->fmt.fmt.pix.height - 1) << ISI_PSIZE_PREV_VSIZE_OFFSET) &
+ ISI_PSIZE_PREV_VSIZE_MASK;
+ isi_writel(isi, ISI_PSIZE, psize);
+ isi_writel(isi, ISI_PDECF, ISI_PDECF_NO_SAMPLING);
+}
+
+static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
+{
+ if (isi->active) {
+ struct vb2_v4l2_buffer *vbuf = &isi->active->vb;
+ struct frame_buffer *buf = isi->active;
+
+ list_del_init(&buf->list);
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vbuf->sequence = isi->sequence++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ if (list_empty(&isi->video_buffer_list)) {
+ isi->active = NULL;
+ } else {
+ /* start next dma frame. */
+ isi->active = list_entry(isi->video_buffer_list.next,
+ struct frame_buffer, list);
+ if (!isi->enable_preview_path) {
+ isi_writel(isi, ISI_DMA_C_DSCR,
+ (u32)isi->active->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+ } else {
+ isi_writel(isi, ISI_DMA_P_DSCR,
+ (u32)isi->active->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_P_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* ISI interrupt service routine */
+static irqreturn_t isi_interrupt(int irq, void *dev_id)
+{
+ struct atmel_isi *isi = dev_id;
+ u32 status, mask, pending;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&isi->irqlock);
+
+ status = isi_readl(isi, ISI_STATUS);
+ mask = isi_readl(isi, ISI_INTMASK);
+ pending = status & mask;
+
+ if (pending & ISI_CTRL_SRST) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST);
+ ret = IRQ_HANDLED;
+ } else if (pending & ISI_CTRL_DIS) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
+ ret = IRQ_HANDLED;
+ } else {
+ if (likely(pending & ISI_SR_CXFR_DONE) ||
+ likely(pending & ISI_SR_PXFR_DONE))
+ ret = atmel_isi_handle_streaming(isi);
+ }
+
+ spin_unlock(&isi->irqlock);
+ return ret;
+}
+
+#define WAIT_ISI_RESET 1
+#define WAIT_ISI_DISABLE 0
+static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
+{
+ unsigned long timeout;
+ /*
+ * The reset or disable will only succeed if we have a
+ * pixel clock from the camera.
+ */
+ init_completion(&isi->complete);
+
+ if (wait_reset) {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST);
+ } else {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ }
+
+ timeout = wait_for_completion_timeout(&isi->complete,
+ msecs_to_jiffies(500));
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct atmel_isi *isi = vb2_get_drv_priv(vq);
+ unsigned long size;
+
+ size = isi->fmt.fmt.pix.sizeimage;
+
+ /* Make sure the image size is large enough. */
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ isi->active = NULL;
+
+ dev_dbg(isi->dev, "%s, count=%d, size=%ld\n", __func__,
+ *nbuffers, size);
+
+ return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
+
+ buf->p_dma_desc = NULL;
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
+ struct atmel_isi *isi = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+ struct isi_dma_desc *desc;
+
+ size = isi->fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(isi->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ if (!buf->p_dma_desc) {
+ if (list_empty(&isi->dma_desc_head)) {
+ dev_err(isi->dev, "Not enough dma descriptors.\n");
+ return -EINVAL;
+ } else {
+ /* Get an available descriptor */
+ desc = list_entry(isi->dma_desc_head.next,
+ struct isi_dma_desc, list);
+ /* Delete the descriptor since now it is used */
+ list_del_init(&desc->list);
+
+ /* Initialize the dma descriptor */
+ desc->p_fbd->fb_address =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ desc->p_fbd->next_fbd_address = 0;
+ set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
+
+ buf->p_dma_desc = desc;
+ }
+ }
+ return 0;
+}
+
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct atmel_isi *isi = vb2_get_drv_priv(vb->vb2_queue);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
+
+ /* This descriptor is available now and we add to head list */
+ if (buf->p_dma_desc)
+ list_add(&buf->p_dma_desc->list, &isi->dma_desc_head);
+}
+
+static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
+{
+ u32 ctrl, cfg1;
+
+ cfg1 = isi_readl(isi, ISI_CFG1);
+ /* Enable irq: cxfr for the codec path, pxfr for the preview path */
+ isi_writel(isi, ISI_INTEN,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Check if already in a frame */
+ if (!isi->enable_preview_path) {
+ if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) {
+ dev_err(isi->dev, "Already in frame handling.\n");
+ return;
+ }
+
+ isi_writel(isi, ISI_DMA_C_DSCR,
+ (u32)buffer->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+ } else {
+ isi_writel(isi, ISI_DMA_P_DSCR,
+ (u32)buffer->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_P_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH);
+ }
+
+ cfg1 &= ~ISI_CFG1_FRATE_DIV_MASK;
+ /* Enable linked list */
+ cfg1 |= isi->pdata.frate | ISI_CFG1_DISCR;
+
+ /* Enable ISI */
+ ctrl = ISI_CTRL_EN;
+
+ if (!isi->enable_preview_path)
+ ctrl |= ISI_CTRL_CDC;
+
+ isi_writel(isi, ISI_CTRL, ctrl);
+ isi_writel(isi, ISI_CFG1, cfg1);
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct atmel_isi *isi = vb2_get_drv_priv(vb->vb2_queue);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&isi->irqlock, flags);
+ list_add_tail(&buf->list, &isi->video_buffer_list);
+
+ if (!isi->active) {
+ isi->active = buf;
+ if (vb2_is_streaming(vb->vb2_queue))
+ start_dma(isi, buf);
+ }
+ spin_unlock_irqrestore(&isi->irqlock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct atmel_isi *isi = vb2_get_drv_priv(vq);
+ struct frame_buffer *buf, *node;
+ int ret;
+
+ pm_runtime_get_sync(isi->dev);
+
+ /* Enable stream on the sub device */
+ ret = v4l2_subdev_call(isi->entity.subdev, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ dev_err(isi->dev, "stream on failed in subdev\n");
+ goto err_start_stream;
+ }
+
+ /* Reset ISI */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
+ if (ret < 0) {
+ dev_err(isi->dev, "Reset ISI timed out\n");
+ goto err_reset;
+ }
+ /* Disable all interrupts */
+ isi_writel(isi, ISI_INTDIS, (u32)~0UL);
+
+ isi->sequence = 0;
+ configure_geometry(isi);
+
+ spin_lock_irq(&isi->irqlock);
+ /* Clear any pending interrupt */
+ isi_readl(isi, ISI_STATUS);
+
+ start_dma(isi, isi->active);
+ spin_unlock_irq(&isi->irqlock);
+
+ return 0;
+
+err_reset:
+ v4l2_subdev_call(isi->entity.subdev, video, s_stream, 0);
+
+err_start_stream:
+ pm_runtime_put(isi->dev);
+
+ spin_lock_irq(&isi->irqlock);
+ isi->active = NULL;
+ /* Release all active buffers */
+ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irq(&isi->irqlock);
+
+ return ret;
+}
+
+/* abort streaming and wait for last buffer */
+static void stop_streaming(struct vb2_queue *vq)
+{
+ struct atmel_isi *isi = vb2_get_drv_priv(vq);
+ struct frame_buffer *buf, *node;
+ int ret = 0;
+ unsigned long timeout;
+
+ /* Disable stream on the sub device */
+ ret = v4l2_subdev_call(isi->entity.subdev, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ dev_err(isi->dev, "stream off failed in subdev\n");
+
+ spin_lock_irq(&isi->irqlock);
+ isi->active = NULL;
+ /* Release all active buffers */
+ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irq(&isi->irqlock);
+
+ if (!isi->enable_preview_path) {
+ timeout = jiffies + (FRAME_INTERVAL_MILLI_SEC * HZ) / 1000;
+ /* Wait until the end of the current frame. */
+ while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
+ time_before(jiffies, timeout))
+ msleep(1);
+
+ if (time_after(jiffies, timeout))
+ dev_err(isi->dev,
+ "Timeout waiting for finishing codec request\n");
+ }
+
+ /* Disable interrupts */
+ isi_writel(isi, ISI_INTDIS,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Disable ISI and wait for it is done */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
+ if (ret < 0)
+ dev_err(isi->dev, "Disable ISI timed out\n");
+
+ pm_runtime_put(isi->dev);
+}
+
+static const struct vb2_ops isi_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_cleanup = buffer_cleanup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int isi_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ *fmt = isi->fmt;
+
+ return 0;
+}
+
+static const struct isi_format *find_format_by_fourcc(struct atmel_isi *isi,
+ unsigned int fourcc)
+{
+ unsigned int num_formats = isi->num_user_formats;
+ const struct isi_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ fmt = isi->user_formats[i];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
+ const struct isi_format **current_fmt)
+{
+ const struct isi_format *isi_fmt;
+ struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ int ret;
+
+ isi_fmt = find_format_by_fourcc(isi, pixfmt->pixelformat);
+ if (!isi_fmt) {
+ isi_fmt = isi->user_formats[isi->num_user_formats - 1];
+ pixfmt->pixelformat = isi_fmt->fourcc;
+ }
+
+ /* Limit to Atmel ISI hardware capabilities */
+ pixfmt->width = clamp(pixfmt->width, 0U, MAX_SUPPORT_WIDTH);
+ pixfmt->height = clamp(pixfmt->height, 0U, MAX_SUPPORT_HEIGHT);
+
+ v4l2_fill_mbus_format(&format.format, pixfmt, isi_fmt->mbus_code);
+ ret = v4l2_subdev_call(isi->entity.subdev, pad, set_fmt,
+ &pad_cfg, &format);
+ if (ret < 0)
+ return ret;
+
+ v4l2_fill_pix_format(pixfmt, &format.format);
+
+ pixfmt->field = V4L2_FIELD_NONE;
+ pixfmt->bytesperline = pixfmt->width * isi_fmt->bpp;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ if (current_fmt)
+ *current_fmt = isi_fmt;
+
+ return 0;
+}
+
+static int isi_set_fmt(struct atmel_isi *isi, struct v4l2_format *f)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct isi_format *current_fmt;
+ int ret;
+
+ ret = isi_try_fmt(isi, f, &current_fmt);
+ if (ret)
+ return ret;
+
+ v4l2_fill_mbus_format(&format.format, &f->fmt.pix,
+ current_fmt->mbus_code);
+ ret = v4l2_subdev_call(isi->entity.subdev, pad,
+ set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ isi->fmt = *f;
+ isi->current_fmt = current_fmt;
+
+ return 0;
+}
+
+static int isi_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ if (vb2_is_streaming(&isi->queue))
+ return -EBUSY;
+
+ return isi_set_fmt(isi, f);
+}
+
+static int isi_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ return isi_try_fmt(isi, f, NULL);
+}
+
+static int isi_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ if (f->index >= isi->num_user_formats)
+ return -EINVAL;
+
+ f->pixelformat = isi->user_formats[f->index]->fourcc;
+ return 0;
+}
+
+static int isi_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, "atmel-isi", sizeof(cap->driver));
+ strlcpy(cap->card, "Atmel Image Sensor Interface", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:isi", sizeof(cap->bus_info));
+ return 0;
+}
+
+static int isi_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index != 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, "Camera", sizeof(i->name));
+ return 0;
+}
+
+static int isi_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int isi_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int isi_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ return v4l2_g_parm_cap(video_devdata(file), isi->entity.subdev, a);
+}
+
+static int isi_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ return v4l2_s_parm_cap(video_devdata(file), isi->entity.subdev, a);
+}
+
+static int isi_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+ const struct isi_format *isi_fmt;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ isi_fmt = find_format_by_fourcc(isi, fsize->pixel_format);
+ if (!isi_fmt)
+ return -EINVAL;
+
+ fse.code = isi_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int isi_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+ const struct isi_format *isi_fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ isi_fmt = find_format_by_fourcc(isi, fival->pixel_format);
+ if (!isi_fmt)
+ return -EINVAL;
+
+ fie.code = isi_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(isi->entity.subdev, pad,
+ enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static void isi_camera_set_bus_param(struct atmel_isi *isi)
+{
+ u32 cfg1 = 0;
+
+ /* set bus param for ISI */
+ if (isi->pdata.hsync_act_low)
+ cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW;
+ if (isi->pdata.vsync_act_low)
+ cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW;
+ if (isi->pdata.pclk_act_falling)
+ cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
+ if (isi->pdata.has_emb_sync)
+ cfg1 |= ISI_CFG1_EMB_SYNC;
+ if (isi->pdata.full_mode)
+ cfg1 |= ISI_CFG1_FULL_MODE;
+
+ cfg1 |= ISI_CFG1_THMASK_BEATS_16;
+
+ /* Enable PM and peripheral clock before operate isi registers */
+ pm_runtime_get_sync(isi->dev);
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CFG1, cfg1);
+
+ pm_runtime_put(isi->dev);
+}
+
+/* -----------------------------------------------------------------------*/
+static int atmel_isi_parse_dt(struct atmel_isi *isi,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct v4l2_fwnode_endpoint ep;
+ int err;
+
+ /* Default settings for ISI */
+ isi->pdata.full_mode = 1;
+ isi->pdata.frate = ISI_CFG1_FRATE_CAPTURE_ALL;
+
+ np = of_graph_get_next_endpoint(np, NULL);
+ if (!np) {
+ dev_err(&pdev->dev, "Could not find the endpoint\n");
+ return -EINVAL;
+ }
+
+ err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
+ of_node_put(np);
+ if (err) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ return err;
+ }
+
+ switch (ep.bus.parallel.bus_width) {
+ case 8:
+ isi->pdata.data_width_flags = ISI_DATAWIDTH_8;
+ break;
+ case 10:
+ isi->pdata.data_width_flags =
+ ISI_DATAWIDTH_8 | ISI_DATAWIDTH_10;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported bus width: %d\n",
+ ep.bus.parallel.bus_width);
+ return -EINVAL;
+ }
+
+ if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ isi->pdata.hsync_act_low = true;
+ if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ isi->pdata.vsync_act_low = true;
+ if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ isi->pdata.pclk_act_falling = true;
+
+ if (ep.bus_type == V4L2_MBUS_BT656)
+ isi->pdata.has_emb_sync = true;
+
+ return 0;
+}
+
+static int isi_open(struct file *file)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+ struct v4l2_subdev *sd = isi->entity.subdev;
+ int ret;
+
+ if (mutex_lock_interruptible(&isi->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto fh_rel;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto fh_rel;
+
+ ret = isi_set_fmt(isi, &isi->fmt);
+ if (ret)
+ v4l2_subdev_call(sd, core, s_power, 0);
+fh_rel:
+ if (ret)
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&isi->lock);
+ return ret;
+}
+
+static int isi_release(struct file *file)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+ struct v4l2_subdev *sd = isi->entity.subdev;
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&isi->lock);
+
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (fh_singular)
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ mutex_unlock(&isi->lock);
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops isi_ioctl_ops = {
+ .vidioc_querycap = isi_querycap,
+
+ .vidioc_try_fmt_vid_cap = isi_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = isi_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = isi_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = isi_enum_fmt_vid_cap,
+
+ .vidioc_enum_input = isi_enum_input,
+ .vidioc_g_input = isi_g_input,
+ .vidioc_s_input = isi_s_input,
+
+ .vidioc_g_parm = isi_g_parm,
+ .vidioc_s_parm = isi_s_parm,
+ .vidioc_enum_framesizes = isi_enum_framesizes,
+ .vidioc_enum_frameintervals = isi_enum_frameintervals,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations isi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = isi_open,
+ .release = isi_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+static int isi_set_default_fmt(struct atmel_isi *isi)
+{
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = isi->user_formats[0]->fourcc,
+ },
+ };
+ int ret;
+
+ ret = isi_try_fmt(isi, &f, NULL);
+ if (ret)
+ return ret;
+ isi->current_fmt = isi->user_formats[0];
+ isi->fmt = f;
+ return 0;
+}
+
+static const struct isi_format isi_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_DEFAULT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_3,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_3,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_DEFAULT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_1,
+ },
+};
+
+static int isi_formats_init(struct atmel_isi *isi)
+{
+ const struct isi_format *isi_fmts[ARRAY_SIZE(isi_formats)];
+ unsigned int num_fmts = 0, i, j;
+ struct v4l2_subdev *subdev = isi->entity.subdev;
+ struct v4l2_subdev_mbus_code_enum mbus_code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code)) {
+ for (i = 0; i < ARRAY_SIZE(isi_formats); i++) {
+ if (isi_formats[i].mbus_code != mbus_code.code)
+ continue;
+
+ /* Code supported, have we got this fourcc yet? */
+ for (j = 0; j < num_fmts; j++)
+ if (isi_fmts[j]->fourcc == isi_formats[i].fourcc)
+ /* Already available */
+ break;
+ if (j == num_fmts)
+ /* new */
+ isi_fmts[num_fmts++] = isi_formats + i;
+ }
+ mbus_code.index++;
+ }
+
+ if (!num_fmts)
+ return -ENXIO;
+
+ isi->num_user_formats = num_fmts;
+ isi->user_formats = devm_kcalloc(isi->dev,
+ num_fmts, sizeof(struct isi_format *),
+ GFP_KERNEL);
+ if (!isi->user_formats)
+ return -ENOMEM;
+
+ memcpy(isi->user_formats, isi_fmts,
+ num_fmts * sizeof(struct isi_format *));
+ isi->current_fmt = isi->user_formats[0];
+
+ return 0;
+}
+
+static int isi_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct atmel_isi *isi = notifier_to_isi(notifier);
+ int ret;
+
+ isi->vdev->ctrl_handler = isi->entity.subdev->ctrl_handler;
+ ret = isi_formats_init(isi);
+ if (ret) {
+ dev_err(isi->dev, "No supported mediabus format found\n");
+ return ret;
+ }
+ isi_camera_set_bus_param(isi);
+
+ ret = isi_set_default_fmt(isi);
+ if (ret) {
+ dev_err(isi->dev, "Could not set default format\n");
+ return ret;
+ }
+
+ ret = video_register_device(isi->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(isi->dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ dev_dbg(isi->dev, "Device registered as %s\n",
+ video_device_node_name(isi->vdev));
+ return 0;
+}
+
+static void isi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct atmel_isi *isi = notifier_to_isi(notifier);
+
+ dev_dbg(isi->dev, "Removing %s\n", video_device_node_name(isi->vdev));
+
+ /* Checks internaly if vdev have been init or not */
+ video_unregister_device(isi->vdev);
+}
+
+static int isi_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct atmel_isi *isi = notifier_to_isi(notifier);
+
+ dev_dbg(isi->dev, "subdev %s bound\n", subdev->name);
+
+ isi->entity.subdev = subdev;
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations isi_graph_notify_ops = {
+ .bound = isi_graph_notify_bound,
+ .unbind = isi_graph_notify_unbind,
+ .complete = isi_graph_notify_complete,
+};
+
+static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
+{
+ struct device_node *ep = NULL;
+ struct device_node *remote;
+
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ return -EINVAL;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
+
+ /* Remote node to connect */
+ isi->entity.node = remote;
+ isi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ isi->entity.asd.match.fwnode = of_fwnode_handle(remote);
+ return 0;
+}
+
+static int isi_graph_init(struct atmel_isi *isi)
+{
+ struct v4l2_async_subdev **subdevs = NULL;
+ int ret;
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = isi_graph_parse(isi, isi->dev->of_node);
+ if (ret < 0) {
+ dev_err(isi->dev, "Graph parsing failed\n");
+ return ret;
+ }
+
+ /* Register the subdevices notifier. */
+ subdevs = devm_kzalloc(isi->dev, sizeof(*subdevs), GFP_KERNEL);
+ if (!subdevs) {
+ of_node_put(isi->entity.node);
+ return -ENOMEM;
+ }
+
+ subdevs[0] = &isi->entity.asd;
+
+ isi->notifier.subdevs = subdevs;
+ isi->notifier.num_subdevs = 1;
+ isi->notifier.ops = &isi_graph_notify_ops;
+
+ ret = v4l2_async_notifier_register(&isi->v4l2_dev, &isi->notifier);
+ if (ret < 0) {
+ dev_err(isi->dev, "Notifier registration failed\n");
+ of_node_put(isi->entity.node);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static int atmel_isi_probe(struct platform_device *pdev)
+{
+ int irq;
+ struct atmel_isi *isi;
+ struct vb2_queue *q;
+ struct resource *regs;
+ int ret, i;
+
+ isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
+ if (!isi)
+ return -ENOMEM;
+
+ isi->pclk = devm_clk_get(&pdev->dev, "isi_clk");
+ if (IS_ERR(isi->pclk))
+ return PTR_ERR(isi->pclk);
+
+ ret = atmel_isi_parse_dt(isi, pdev);
+ if (ret)
+ return ret;
+
+ isi->active = NULL;
+ isi->dev = &pdev->dev;
+ mutex_init(&isi->lock);
+ spin_lock_init(&isi->irqlock);
+ INIT_LIST_HEAD(&isi->video_buffer_list);
+ INIT_LIST_HEAD(&isi->dma_desc_head);
+
+ q = &isi->queue;
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(&pdev->dev, &isi->v4l2_dev);
+ if (ret)
+ return ret;
+
+ isi->vdev = video_device_alloc();
+ if (!isi->vdev) {
+ ret = -ENOMEM;
+ goto err_vdev_alloc;
+ }
+
+ /* video node */
+ isi->vdev->fops = &isi_fops;
+ isi->vdev->v4l2_dev = &isi->v4l2_dev;
+ isi->vdev->queue = &isi->queue;
+ strlcpy(isi->vdev->name, KBUILD_MODNAME, sizeof(isi->vdev->name));
+ isi->vdev->release = video_device_release;
+ isi->vdev->ioctl_ops = &isi_ioctl_ops;
+ isi->vdev->lock = &isi->lock;
+ isi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ video_set_drvdata(isi->vdev, isi);
+
+ /* buffer queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ q->lock = &isi->lock;
+ q->drv_priv = isi;
+ q->buf_struct_size = sizeof(struct frame_buffer);
+ q->ops = &isi_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->dev = &pdev->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to initialize VB2 queue\n");
+ goto err_vb2_queue;
+ }
+ isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct fbd) * VIDEO_MAX_FRAME,
+ &isi->fb_descriptors_phys,
+ GFP_KERNEL);
+ if (!isi->p_fb_descriptors) {
+ dev_err(&pdev->dev, "Can't allocate descriptors!\n");
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i;
+ isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys +
+ i * sizeof(struct fbd);
+ list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ isi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(isi->regs)) {
+ ret = PTR_ERR(isi->regs);
+ goto err_ioremap;
+ }
+
+ if (isi->pdata.data_width_flags & ISI_DATAWIDTH_8)
+ isi->width_flags = 1 << 7;
+ if (isi->pdata.data_width_flags & ISI_DATAWIDTH_10)
+ isi->width_flags |= 1 << 9;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_req_irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, isi_interrupt, 0, "isi", isi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ goto err_req_irq;
+ }
+ isi->irq = irq;
+
+ ret = isi_graph_init(isi);
+ if (ret < 0)
+ goto err_req_irq;
+
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+ platform_set_drvdata(pdev, isi);
+ return 0;
+
+err_req_irq:
+err_ioremap:
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * VIDEO_MAX_FRAME,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+err_dma_alloc:
+err_vb2_queue:
+ video_device_release(isi->vdev);
+err_vdev_alloc:
+ v4l2_device_unregister(&isi->v4l2_dev);
+
+ return ret;
+}
+
+static int atmel_isi_remove(struct platform_device *pdev)
+{
+ struct atmel_isi *isi = platform_get_drvdata(pdev);
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * VIDEO_MAX_FRAME,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+ pm_runtime_disable(&pdev->dev);
+ v4l2_async_notifier_unregister(&isi->notifier);
+ v4l2_device_unregister(&isi->v4l2_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atmel_isi_runtime_suspend(struct device *dev)
+{
+ struct atmel_isi *isi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isi->pclk);
+
+ return 0;
+}
+static int atmel_isi_runtime_resume(struct device *dev)
+{
+ struct atmel_isi *isi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(isi->pclk);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops atmel_isi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(atmel_isi_runtime_suspend,
+ atmel_isi_runtime_resume, NULL)
+};
+
+static const struct of_device_id atmel_isi_of_match[] = {
+ { .compatible = "atmel,at91sam9g45-isi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, atmel_isi_of_match);
+
+static struct platform_driver atmel_isi_driver = {
+ .driver = {
+ .name = "atmel_isi",
+ .of_match_table = of_match_ptr(atmel_isi_of_match),
+ .pm = &atmel_isi_dev_pm_ops,
+ },
+ .probe = atmel_isi_probe,
+ .remove = atmel_isi_remove,
+};
+
+module_platform_driver(atmel_isi_driver);
+
+MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/atmel/atmel-isi.h b/drivers/media/platform/atmel/atmel-isi.h
new file mode 100644
index 000000000..0acb32a2b
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isi.h
@@ -0,0 +1,138 @@
+/*
+ * Register definitions for the Atmel Image Sensor Interface.
+ *
+ * Copyright (C) 2011 Atmel Corporation
+ * Josh Wu, <josh.wu@atmel.com>
+ *
+ * Based on previous work by Lars Haring, <lars.haring@atmel.com>
+ * and Sedji Gaouaou
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ATMEL_ISI_H__
+#define __ATMEL_ISI_H__
+
+#include <linux/types.h>
+
+/* ISI_V2 register offsets */
+#define ISI_CFG1 0x0000
+#define ISI_CFG2 0x0004
+#define ISI_PSIZE 0x0008
+#define ISI_PDECF 0x000c
+#define ISI_Y2R_SET0 0x0010
+#define ISI_Y2R_SET1 0x0014
+#define ISI_R2Y_SET0 0x0018
+#define ISI_R2Y_SET1 0x001C
+#define ISI_R2Y_SET2 0x0020
+#define ISI_CTRL 0x0024
+#define ISI_STATUS 0x0028
+#define ISI_INTEN 0x002C
+#define ISI_INTDIS 0x0030
+#define ISI_INTMASK 0x0034
+#define ISI_DMA_CHER 0x0038
+#define ISI_DMA_CHDR 0x003C
+#define ISI_DMA_CHSR 0x0040
+#define ISI_DMA_P_ADDR 0x0044
+#define ISI_DMA_P_CTRL 0x0048
+#define ISI_DMA_P_DSCR 0x004C
+#define ISI_DMA_C_ADDR 0x0050
+#define ISI_DMA_C_CTRL 0x0054
+#define ISI_DMA_C_DSCR 0x0058
+
+/* Bitfields in CFG1 */
+#define ISI_CFG1_HSYNC_POL_ACTIVE_LOW (1 << 2)
+#define ISI_CFG1_VSYNC_POL_ACTIVE_LOW (1 << 3)
+#define ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING (1 << 4)
+#define ISI_CFG1_EMB_SYNC (1 << 6)
+#define ISI_CFG1_CRC_SYNC (1 << 7)
+/* Constants for FRATE(ISI_V2) */
+#define ISI_CFG1_FRATE_CAPTURE_ALL (0 << 8)
+#define ISI_CFG1_FRATE_DIV_2 (1 << 8)
+#define ISI_CFG1_FRATE_DIV_3 (2 << 8)
+#define ISI_CFG1_FRATE_DIV_4 (3 << 8)
+#define ISI_CFG1_FRATE_DIV_5 (4 << 8)
+#define ISI_CFG1_FRATE_DIV_6 (5 << 8)
+#define ISI_CFG1_FRATE_DIV_7 (6 << 8)
+#define ISI_CFG1_FRATE_DIV_8 (7 << 8)
+#define ISI_CFG1_FRATE_DIV_MASK (7 << 8)
+#define ISI_CFG1_DISCR (1 << 11)
+#define ISI_CFG1_FULL_MODE (1 << 12)
+/* Definition for THMASK(ISI_V2) */
+#define ISI_CFG1_THMASK_BEATS_4 (0 << 13)
+#define ISI_CFG1_THMASK_BEATS_8 (1 << 13)
+#define ISI_CFG1_THMASK_BEATS_16 (2 << 13)
+
+/* Bitfields in CFG2 */
+#define ISI_CFG2_GRAYSCALE (1 << 13)
+#define ISI_CFG2_COL_SPACE_YCbCr (0 << 15)
+#define ISI_CFG2_COL_SPACE_RGB (1 << 15)
+/* Constants for YCC_SWAP(ISI_V2) */
+#define ISI_CFG2_YCC_SWAP_DEFAULT (0 << 28)
+#define ISI_CFG2_YCC_SWAP_MODE_1 (1 << 28)
+#define ISI_CFG2_YCC_SWAP_MODE_2 (2 << 28)
+#define ISI_CFG2_YCC_SWAP_MODE_3 (3 << 28)
+#define ISI_CFG2_YCC_SWAP_MODE_MASK (3 << 28)
+#define ISI_CFG2_IM_VSIZE_OFFSET 0
+#define ISI_CFG2_IM_HSIZE_OFFSET 16
+#define ISI_CFG2_IM_VSIZE_MASK (0x7FF << ISI_CFG2_IM_VSIZE_OFFSET)
+#define ISI_CFG2_IM_HSIZE_MASK (0x7FF << ISI_CFG2_IM_HSIZE_OFFSET)
+
+/* Bitfields in PSIZE */
+#define ISI_PSIZE_PREV_VSIZE_OFFSET 0
+#define ISI_PSIZE_PREV_HSIZE_OFFSET 16
+#define ISI_PSIZE_PREV_VSIZE_MASK (0x3FF << ISI_PSIZE_PREV_VSIZE_OFFSET)
+#define ISI_PSIZE_PREV_HSIZE_MASK (0x3FF << ISI_PSIZE_PREV_HSIZE_OFFSET)
+
+/* Bitfields in PDECF */
+#define ISI_PDECF_DEC_FACTOR_MASK (0xFF << 0)
+#define ISI_PDECF_NO_SAMPLING (16)
+
+/* Bitfields in CTRL */
+/* Also using in SR(ISI_V2) */
+#define ISI_CTRL_EN (1 << 0)
+#define ISI_CTRL_CDC (1 << 8)
+/* Also using in SR/IER/IDR/IMR(ISI_V2) */
+#define ISI_CTRL_DIS (1 << 1)
+#define ISI_CTRL_SRST (1 << 2)
+
+/* Bitfields in SR */
+#define ISI_SR_SIP (1 << 19)
+/* Also using in SR/IER/IDR/IMR */
+#define ISI_SR_VSYNC (1 << 10)
+#define ISI_SR_PXFR_DONE (1 << 16)
+#define ISI_SR_CXFR_DONE (1 << 17)
+#define ISI_SR_P_OVR (1 << 24)
+#define ISI_SR_C_OVR (1 << 25)
+#define ISI_SR_CRC_ERR (1 << 26)
+#define ISI_SR_FR_OVR (1 << 27)
+
+/* Bitfields in DMA_C_CTRL & in DMA_P_CTRL */
+#define ISI_DMA_CTRL_FETCH (1 << 0)
+#define ISI_DMA_CTRL_WB (1 << 1)
+#define ISI_DMA_CTRL_IEN (1 << 2)
+#define ISI_DMA_CTRL_DONE (1 << 3)
+
+/* Bitfields in DMA_CHSR/CHER/CHDR */
+#define ISI_DMA_CHSR_P_CH (1 << 0)
+#define ISI_DMA_CHSR_C_CH (1 << 1)
+
+/* Definition for isi_platform_data */
+#define ISI_DATAWIDTH_8 0x01
+#define ISI_DATAWIDTH_10 0x02
+
+struct v4l2_async_subdev;
+
+struct isi_platform_data {
+ u8 has_emb_sync;
+ u8 hsync_act_low;
+ u8 vsync_act_low;
+ u8 pclk_act_falling;
+ u8 full_mode;
+ u32 data_width_flags;
+ /* Using for ISI_CFG1 */
+ u32 frate;
+};
+
+#endif /* __ATMEL_ISI_H__ */
diff --git a/drivers/media/platform/cadence/Kconfig b/drivers/media/platform/cadence/Kconfig
new file mode 100644
index 000000000..cf6124da3
--- /dev/null
+++ b/drivers/media/platform/cadence/Kconfig
@@ -0,0 +1,36 @@
+config VIDEO_CADENCE
+ bool "Cadence Video Devices"
+ help
+ If you have a media device designed by Cadence, say Y.
+
+ Note that this option doesn't include new drivers in the kernel:
+ saying N will just cause Kconfig to skip all the questions about
+ Cadence media devices.
+
+if VIDEO_CADENCE
+
+config VIDEO_CADENCE_CSI2RX
+ tristate "Cadence MIPI-CSI2 RX Controller"
+ depends on VIDEO_V4L2
+ depends on MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ Support for the Cadence MIPI CSI2 Receiver controller.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cdns-csi2rx.
+
+config VIDEO_CADENCE_CSI2TX
+ tristate "Cadence MIPI-CSI2 TX Controller"
+ depends on VIDEO_V4L2
+ depends on MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ Support for the Cadence MIPI CSI2 Transceiver controller.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cdns-csi2tx.
+
+endif
diff --git a/drivers/media/platform/cadence/Makefile b/drivers/media/platform/cadence/Makefile
new file mode 100644
index 000000000..be59a8728
--- /dev/null
+++ b/drivers/media/platform/cadence/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_CADENCE_CSI2RX) += cdns-csi2rx.o
+obj-$(CONFIG_VIDEO_CADENCE_CSI2TX) += cdns-csi2tx.o
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
new file mode 100644
index 000000000..6f64703d2
--- /dev/null
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Cadence MIPI-CSI2 RX Controller v1.3
+ *
+ * Copyright (C) 2017 Cadence Design Systems Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define CSI2RX_DEVICE_CFG_REG 0x000
+
+#define CSI2RX_SOFT_RESET_REG 0x004
+#define CSI2RX_SOFT_RESET_PROTOCOL BIT(1)
+#define CSI2RX_SOFT_RESET_FRONT BIT(0)
+
+#define CSI2RX_STATIC_CFG_REG 0x008
+#define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) ((plane) << (16 + (llane) * 4))
+#define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8)
+
+#define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100)
+
+#define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000)
+#define CSI2RX_STREAM_CTRL_START BIT(0)
+
+#define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008)
+#define CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT BIT(31)
+#define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16)
+
+#define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c)
+#define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8)
+
+#define CSI2RX_LANES_MAX 4
+#define CSI2RX_STREAMS_MAX 4
+
+enum csi2rx_pads {
+ CSI2RX_PAD_SINK,
+ CSI2RX_PAD_SOURCE_STREAM0,
+ CSI2RX_PAD_SOURCE_STREAM1,
+ CSI2RX_PAD_SOURCE_STREAM2,
+ CSI2RX_PAD_SOURCE_STREAM3,
+ CSI2RX_PAD_MAX,
+};
+
+struct csi2rx_priv {
+ struct device *dev;
+ unsigned int count;
+
+ /*
+ * Used to prevent race conditions between multiple,
+ * concurrent calls to start and stop.
+ */
+ struct mutex lock;
+
+ void __iomem *base;
+ struct clk *sys_clk;
+ struct clk *p_clk;
+ struct clk *pixel_clk[CSI2RX_STREAMS_MAX];
+ struct phy *dphy;
+
+ u8 lanes[CSI2RX_LANES_MAX];
+ u8 num_lanes;
+ u8 max_lanes;
+ u8 max_streams;
+ bool has_internal_dphy;
+
+ struct v4l2_subdev subdev;
+ struct v4l2_async_notifier notifier;
+ struct media_pad pads[CSI2RX_PAD_MAX];
+
+ /* Remote source */
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *source_subdev;
+ int source_pad;
+};
+
+static inline
+struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct csi2rx_priv, subdev);
+}
+
+static void csi2rx_reset(struct csi2rx_priv *csi2rx)
+{
+ writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT,
+ csi2rx->base + CSI2RX_SOFT_RESET_REG);
+
+ udelay(10);
+
+ writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG);
+}
+
+static int csi2rx_start(struct csi2rx_priv *csi2rx)
+{
+ unsigned int i;
+ unsigned long lanes_used = 0;
+ u32 reg;
+ int ret;
+
+ ret = clk_prepare_enable(csi2rx->p_clk);
+ if (ret)
+ return ret;
+
+ csi2rx_reset(csi2rx);
+
+ reg = csi2rx->num_lanes << 8;
+ for (i = 0; i < csi2rx->num_lanes; i++) {
+ reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]);
+ set_bit(csi2rx->lanes[i], &lanes_used);
+ }
+
+ /*
+ * Even the unused lanes need to be mapped. In order to avoid
+ * to map twice to the same physical lane, keep the lanes used
+ * in the previous loop, and only map unused physical lanes to
+ * the rest of our logical lanes.
+ */
+ for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
+ unsigned int idx = find_first_zero_bit(&lanes_used,
+ csi2rx->max_lanes);
+ set_bit(idx, &lanes_used);
+ reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
+ }
+
+ writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
+
+ ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
+ if (ret)
+ goto err_disable_pclk;
+
+ /*
+ * Create a static mapping between the CSI virtual channels
+ * and the output stream.
+ *
+ * This should be enhanced, but v4l2 lacks the support for
+ * changing that mapping dynamically.
+ *
+ * We also cannot enable and disable independent streams here,
+ * hence the reference counting.
+ */
+ for (i = 0; i < csi2rx->max_streams; i++) {
+ ret = clk_prepare_enable(csi2rx->pixel_clk[i]);
+ if (ret)
+ goto err_disable_pixclk;
+
+ writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
+ csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
+
+ writel(CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT |
+ CSI2RX_STREAM_DATA_CFG_VC_SELECT(i),
+ csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i));
+
+ writel(CSI2RX_STREAM_CTRL_START,
+ csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+ }
+
+ ret = clk_prepare_enable(csi2rx->sys_clk);
+ if (ret)
+ goto err_disable_pixclk;
+
+ clk_disable_unprepare(csi2rx->p_clk);
+
+ return 0;
+
+err_disable_pixclk:
+ for (; i > 0; i--)
+ clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
+
+err_disable_pclk:
+ clk_disable_unprepare(csi2rx->p_clk);
+
+ return ret;
+}
+
+static void csi2rx_stop(struct csi2rx_priv *csi2rx)
+{
+ unsigned int i;
+
+ clk_prepare_enable(csi2rx->p_clk);
+ clk_disable_unprepare(csi2rx->sys_clk);
+
+ for (i = 0; i < csi2rx->max_streams; i++) {
+ writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+
+ clk_disable_unprepare(csi2rx->pixel_clk[i]);
+ }
+
+ clk_disable_unprepare(csi2rx->p_clk);
+
+ if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false))
+ dev_warn(csi2rx->dev, "Couldn't disable our subdev\n");
+}
+
+static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
+ int ret = 0;
+
+ mutex_lock(&csi2rx->lock);
+
+ if (enable) {
+ /*
+ * If we're not the first users, there's no need to
+ * enable the whole controller.
+ */
+ if (!csi2rx->count) {
+ ret = csi2rx_start(csi2rx);
+ if (ret)
+ goto out;
+ }
+
+ csi2rx->count++;
+ } else {
+ csi2rx->count--;
+
+ /*
+ * Let the last user turn off the lights.
+ */
+ if (!csi2rx->count)
+ csi2rx_stop(csi2rx);
+ }
+
+out:
+ mutex_unlock(&csi2rx->lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
+ .s_stream = csi2rx_s_stream,
+};
+
+static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
+ .video = &csi2rx_video_ops,
+};
+
+static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *s_subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct v4l2_subdev *subdev = notifier->sd;
+ struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
+
+ csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
+ s_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (csi2rx->source_pad < 0) {
+ dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n",
+ s_subdev->name);
+ return csi2rx->source_pad;
+ }
+
+ csi2rx->source_subdev = s_subdev;
+
+ dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name,
+ csi2rx->source_pad);
+
+ return media_create_pad_link(&csi2rx->source_subdev->entity,
+ csi2rx->source_pad,
+ &csi2rx->subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = {
+ .bound = csi2rx_async_bound,
+};
+
+static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ unsigned char i;
+ u32 dev_cfg;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csi2rx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(csi2rx->base))
+ return PTR_ERR(csi2rx->base);
+
+ csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(csi2rx->sys_clk)) {
+ dev_err(&pdev->dev, "Couldn't get sys clock\n");
+ return PTR_ERR(csi2rx->sys_clk);
+ }
+
+ csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk");
+ if (IS_ERR(csi2rx->p_clk)) {
+ dev_err(&pdev->dev, "Couldn't get P clock\n");
+ return PTR_ERR(csi2rx->p_clk);
+ }
+
+ csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy");
+ if (IS_ERR(csi2rx->dphy)) {
+ dev_err(&pdev->dev, "Couldn't get external D-PHY\n");
+ return PTR_ERR(csi2rx->dphy);
+ }
+
+ /*
+ * FIXME: Once we'll have external D-PHY support, the check
+ * will need to be removed.
+ */
+ if (csi2rx->dphy) {
+ dev_err(&pdev->dev, "External D-PHY not supported yet\n");
+ return -EINVAL;
+ }
+
+ clk_prepare_enable(csi2rx->p_clk);
+ dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG);
+ clk_disable_unprepare(csi2rx->p_clk);
+
+ csi2rx->max_lanes = dev_cfg & 7;
+ if (csi2rx->max_lanes > CSI2RX_LANES_MAX) {
+ dev_err(&pdev->dev, "Invalid number of lanes: %u\n",
+ csi2rx->max_lanes);
+ return -EINVAL;
+ }
+
+ csi2rx->max_streams = (dev_cfg >> 4) & 7;
+ if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) {
+ dev_err(&pdev->dev, "Invalid number of streams: %u\n",
+ csi2rx->max_streams);
+ return -EINVAL;
+ }
+
+ csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false;
+
+ /*
+ * FIXME: Once we'll have internal D-PHY support, the check
+ * will need to be removed.
+ */
+ if (csi2rx->has_internal_dphy) {
+ dev_err(&pdev->dev, "Internal D-PHY not supported yet\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < csi2rx->max_streams; i++) {
+ char clk_name[16];
+
+ snprintf(clk_name, sizeof(clk_name), "pixel_if%u_clk", i);
+ csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(csi2rx->pixel_clk[i])) {
+ dev_err(&pdev->dev, "Couldn't get clock %s\n", clk_name);
+ return PTR_ERR(csi2rx->pixel_clk[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
+{
+ struct v4l2_fwnode_endpoint v4l2_ep;
+ struct fwnode_handle *fwh;
+ struct device_node *ep;
+ int ret;
+
+ ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0);
+ if (!ep)
+ return -EINVAL;
+
+ fwh = of_fwnode_handle(ep);
+ ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
+ if (ret) {
+ dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n");
+ of_node_put(ep);
+ return ret;
+ }
+
+ if (v4l2_ep.bus_type != V4L2_MBUS_CSI2) {
+ dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n",
+ v4l2_ep.bus_type);
+ of_node_put(ep);
+ return -EINVAL;
+ }
+
+ memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
+ sizeof(csi2rx->lanes));
+ csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+ if (csi2rx->num_lanes > csi2rx->max_lanes) {
+ dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n",
+ csi2rx->num_lanes);
+ of_node_put(ep);
+ return -EINVAL;
+ }
+
+ csi2rx->asd.match.fwnode = fwnode_graph_get_remote_port_parent(fwh);
+ csi2rx->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ of_node_put(ep);
+
+ csi2rx->notifier.subdevs = devm_kzalloc(csi2rx->dev,
+ sizeof(*csi2rx->notifier.subdevs),
+ GFP_KERNEL);
+ if (!csi2rx->notifier.subdevs)
+ return -ENOMEM;
+
+ csi2rx->notifier.subdevs[0] = &csi2rx->asd;
+ csi2rx->notifier.num_subdevs = 1;
+ csi2rx->notifier.ops = &csi2rx_notifier_ops;
+
+ return v4l2_async_subdev_notifier_register(&csi2rx->subdev,
+ &csi2rx->notifier);
+}
+
+static int csi2rx_probe(struct platform_device *pdev)
+{
+ struct csi2rx_priv *csi2rx;
+ unsigned int i;
+ int ret;
+
+ csi2rx = kzalloc(sizeof(*csi2rx), GFP_KERNEL);
+ if (!csi2rx)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, csi2rx);
+ csi2rx->dev = &pdev->dev;
+ mutex_init(&csi2rx->lock);
+
+ ret = csi2rx_get_resources(csi2rx, pdev);
+ if (ret)
+ goto err_free_priv;
+
+ ret = csi2rx_parse_dt(csi2rx);
+ if (ret)
+ goto err_free_priv;
+
+ csi2rx->subdev.owner = THIS_MODULE;
+ csi2rx->subdev.dev = &pdev->dev;
+ v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops);
+ v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev);
+ snprintf(csi2rx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s",
+ KBUILD_MODNAME, dev_name(&pdev->dev));
+
+ /* Create our media pads */
+ csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++)
+ csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX,
+ csi2rx->pads);
+ if (ret)
+ goto err_free_priv;
+
+ ret = v4l2_async_register_subdev(&csi2rx->subdev);
+ if (ret < 0)
+ goto err_free_priv;
+
+ dev_info(&pdev->dev,
+ "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n",
+ csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams,
+ csi2rx->has_internal_dphy ? "internal" : "no");
+
+ return 0;
+
+err_free_priv:
+ kfree(csi2rx);
+ return ret;
+}
+
+static int csi2rx_remove(struct platform_device *pdev)
+{
+ struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
+
+ v4l2_async_unregister_subdev(&csi2rx->subdev);
+ kfree(csi2rx);
+
+ return 0;
+}
+
+static const struct of_device_id csi2rx_of_table[] = {
+ { .compatible = "cdns,csi2rx" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, csi2rx_of_table);
+
+static struct platform_driver csi2rx_driver = {
+ .probe = csi2rx_probe,
+ .remove = csi2rx_remove,
+
+ .driver = {
+ .name = "cdns-csi2rx",
+ .of_match_table = csi2rx_of_table,
+ },
+};
+module_platform_driver(csi2rx_driver);
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_DESCRIPTION("Cadence CSI2-RX controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
new file mode 100644
index 000000000..40d0de690
--- /dev/null
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Cadence MIPI-CSI2 TX Controller
+ *
+ * Copyright (C) 2017-2018 Cadence Design Systems Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define CSI2TX_DEVICE_CONFIG_REG 0x00
+#define CSI2TX_DEVICE_CONFIG_STREAMS_MASK GENMASK(6, 4)
+#define CSI2TX_DEVICE_CONFIG_HAS_DPHY BIT(3)
+#define CSI2TX_DEVICE_CONFIG_LANES_MASK GENMASK(2, 0)
+
+#define CSI2TX_CONFIG_REG 0x20
+#define CSI2TX_CONFIG_CFG_REQ BIT(2)
+#define CSI2TX_CONFIG_SRST_REQ BIT(1)
+
+#define CSI2TX_DPHY_CFG_REG 0x28
+#define CSI2TX_DPHY_CFG_CLK_RESET BIT(16)
+#define CSI2TX_DPHY_CFG_LANE_RESET(n) BIT((n) + 12)
+#define CSI2TX_DPHY_CFG_MODE_MASK GENMASK(9, 8)
+#define CSI2TX_DPHY_CFG_MODE_LPDT (2 << 8)
+#define CSI2TX_DPHY_CFG_MODE_HS (1 << 8)
+#define CSI2TX_DPHY_CFG_MODE_ULPS (0 << 8)
+#define CSI2TX_DPHY_CFG_CLK_ENABLE BIT(4)
+#define CSI2TX_DPHY_CFG_LANE_ENABLE(n) BIT(n)
+
+#define CSI2TX_DPHY_CLK_WAKEUP_REG 0x2c
+#define CSI2TX_DPHY_CLK_WAKEUP_ULPS_CYCLES(n) ((n) & 0xffff)
+
+#define CSI2TX_DT_CFG_REG(n) (0x80 + (n) * 8)
+#define CSI2TX_DT_CFG_DT(n) (((n) & 0x3f) << 2)
+
+#define CSI2TX_DT_FORMAT_REG(n) (0x84 + (n) * 8)
+#define CSI2TX_DT_FORMAT_BYTES_PER_LINE(n) (((n) & 0xffff) << 16)
+#define CSI2TX_DT_FORMAT_MAX_LINE_NUM(n) ((n) & 0xffff)
+
+#define CSI2TX_STREAM_IF_CFG_REG(n) (0x100 + (n) * 4)
+#define CSI2TX_STREAM_IF_CFG_FILL_LEVEL(n) ((n) & 0x1f)
+
+#define CSI2TX_LANES_MAX 4
+#define CSI2TX_STREAMS_MAX 4
+
+enum csi2tx_pads {
+ CSI2TX_PAD_SOURCE,
+ CSI2TX_PAD_SINK_STREAM0,
+ CSI2TX_PAD_SINK_STREAM1,
+ CSI2TX_PAD_SINK_STREAM2,
+ CSI2TX_PAD_SINK_STREAM3,
+ CSI2TX_PAD_MAX,
+};
+
+struct csi2tx_fmt {
+ u32 mbus;
+ u32 dt;
+ u32 bpp;
+};
+
+struct csi2tx_priv {
+ struct device *dev;
+ unsigned int count;
+
+ /*
+ * Used to prevent race conditions between multiple,
+ * concurrent calls to start and stop.
+ */
+ struct mutex lock;
+
+ void __iomem *base;
+
+ struct clk *esc_clk;
+ struct clk *p_clk;
+ struct clk *pixel_clk[CSI2TX_STREAMS_MAX];
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CSI2TX_PAD_MAX];
+ struct v4l2_mbus_framefmt pad_fmts[CSI2TX_PAD_MAX];
+
+ bool has_internal_dphy;
+ u8 lanes[CSI2TX_LANES_MAX];
+ unsigned int num_lanes;
+ unsigned int max_lanes;
+ unsigned int max_streams;
+};
+
+static const struct csi2tx_fmt csi2tx_formats[] = {
+ {
+ .mbus = MEDIA_BUS_FMT_UYVY8_1X16,
+ .bpp = 2,
+ .dt = 0x1e,
+ },
+ {
+ .mbus = MEDIA_BUS_FMT_RGB888_1X24,
+ .bpp = 3,
+ .dt = 0x24,
+ },
+};
+
+static const struct v4l2_mbus_framefmt fmt_default = {
+ .width = 1280,
+ .height = 720,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+};
+
+static inline
+struct csi2tx_priv *v4l2_subdev_to_csi2tx(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct csi2tx_priv, subdev);
+}
+
+static const struct csi2tx_fmt *csi2tx_get_fmt_from_mbus(u32 mbus)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(csi2tx_formats); i++)
+ if (csi2tx_formats[i].mbus == mbus)
+ return &csi2tx_formats[i];
+
+ return NULL;
+}
+
+static int csi2tx_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index >= ARRAY_SIZE(csi2tx_formats))
+ return -EINVAL;
+
+ code->code = csi2tx_formats[code->index].mbus;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__csi2tx_get_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csi2tx_priv *csi2tx = v4l2_subdev_to_csi2tx(subdev);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(subdev, cfg,
+ fmt->pad);
+
+ return &csi2tx->pad_fmts[fmt->pad];
+}
+
+static int csi2tx_get_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ const struct v4l2_mbus_framefmt *format;
+
+ /* Multiplexed pad? */
+ if (fmt->pad == CSI2TX_PAD_SOURCE)
+ return -EINVAL;
+
+ format = __csi2tx_get_pad_format(subdev, cfg, fmt);
+ if (!format)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int csi2tx_set_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ const struct v4l2_mbus_framefmt *src_format = &fmt->format;
+ struct v4l2_mbus_framefmt *dst_format;
+
+ /* Multiplexed pad? */
+ if (fmt->pad == CSI2TX_PAD_SOURCE)
+ return -EINVAL;
+
+ if (!csi2tx_get_fmt_from_mbus(fmt->format.code))
+ src_format = &fmt_default;
+
+ dst_format = __csi2tx_get_pad_format(subdev, cfg, fmt);
+ if (!dst_format)
+ return -EINVAL;
+
+ *dst_format = *src_format;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops csi2tx_pad_ops = {
+ .enum_mbus_code = csi2tx_enum_mbus_code,
+ .get_fmt = csi2tx_get_pad_format,
+ .set_fmt = csi2tx_set_pad_format,
+};
+
+static void csi2tx_reset(struct csi2tx_priv *csi2tx)
+{
+ writel(CSI2TX_CONFIG_SRST_REQ, csi2tx->base + CSI2TX_CONFIG_REG);
+
+ udelay(10);
+}
+
+static int csi2tx_start(struct csi2tx_priv *csi2tx)
+{
+ struct media_entity *entity = &csi2tx->subdev.entity;
+ struct media_link *link;
+ unsigned int i;
+ u32 reg;
+
+ csi2tx_reset(csi2tx);
+
+ writel(CSI2TX_CONFIG_CFG_REQ, csi2tx->base + CSI2TX_CONFIG_REG);
+
+ udelay(10);
+
+ /* Configure our PPI interface with the D-PHY */
+ writel(CSI2TX_DPHY_CLK_WAKEUP_ULPS_CYCLES(32),
+ csi2tx->base + CSI2TX_DPHY_CLK_WAKEUP_REG);
+
+ /* Put our lanes (clock and data) out of reset */
+ reg = CSI2TX_DPHY_CFG_CLK_RESET | CSI2TX_DPHY_CFG_MODE_LPDT;
+ for (i = 0; i < csi2tx->num_lanes; i++)
+ reg |= CSI2TX_DPHY_CFG_LANE_RESET(csi2tx->lanes[i]);
+ writel(reg, csi2tx->base + CSI2TX_DPHY_CFG_REG);
+
+ udelay(10);
+
+ /* Enable our (clock and data) lanes */
+ reg |= CSI2TX_DPHY_CFG_CLK_ENABLE;
+ for (i = 0; i < csi2tx->num_lanes; i++)
+ reg |= CSI2TX_DPHY_CFG_LANE_ENABLE(csi2tx->lanes[i]);
+ writel(reg, csi2tx->base + CSI2TX_DPHY_CFG_REG);
+
+ udelay(10);
+
+ /* Switch to HS mode */
+ reg &= ~CSI2TX_DPHY_CFG_MODE_MASK;
+ writel(reg | CSI2TX_DPHY_CFG_MODE_HS,
+ csi2tx->base + CSI2TX_DPHY_CFG_REG);
+
+ udelay(10);
+
+ /*
+ * Create a static mapping between the CSI virtual channels
+ * and the input streams.
+ *
+ * This should be enhanced, but v4l2 lacks the support for
+ * changing that mapping dynamically at the moment.
+ *
+ * We're protected from the userspace setting up links at the
+ * same time by the upper layer having called
+ * media_pipeline_start().
+ */
+ list_for_each_entry(link, &entity->links, list) {
+ struct v4l2_mbus_framefmt *mfmt;
+ const struct csi2tx_fmt *fmt;
+ unsigned int stream;
+ int pad_idx = -1;
+
+ /* Only consider our enabled input pads */
+ for (i = CSI2TX_PAD_SINK_STREAM0; i < CSI2TX_PAD_MAX; i++) {
+ struct media_pad *pad = &csi2tx->pads[i];
+
+ if ((pad == link->sink) &&
+ (link->flags & MEDIA_LNK_FL_ENABLED)) {
+ pad_idx = i;
+ break;
+ }
+ }
+
+ if (pad_idx < 0)
+ continue;
+
+ mfmt = &csi2tx->pad_fmts[pad_idx];
+ fmt = csi2tx_get_fmt_from_mbus(mfmt->code);
+ if (!fmt)
+ continue;
+
+ stream = pad_idx - CSI2TX_PAD_SINK_STREAM0;
+
+ /*
+ * We use the stream ID there, but it's wrong.
+ *
+ * A stream could very well send a data type that is
+ * not equal to its stream ID. We need to find a
+ * proper way to address it.
+ */
+ writel(CSI2TX_DT_CFG_DT(fmt->dt),
+ csi2tx->base + CSI2TX_DT_CFG_REG(stream));
+
+ writel(CSI2TX_DT_FORMAT_BYTES_PER_LINE(mfmt->width * fmt->bpp) |
+ CSI2TX_DT_FORMAT_MAX_LINE_NUM(mfmt->height + 1),
+ csi2tx->base + CSI2TX_DT_FORMAT_REG(stream));
+
+ /*
+ * TODO: This needs to be calculated based on the
+ * output CSI2 clock rate.
+ */
+ writel(CSI2TX_STREAM_IF_CFG_FILL_LEVEL(4),
+ csi2tx->base + CSI2TX_STREAM_IF_CFG_REG(stream));
+ }
+
+ /* Disable the configuration mode */
+ writel(0, csi2tx->base + CSI2TX_CONFIG_REG);
+
+ return 0;
+}
+
+static void csi2tx_stop(struct csi2tx_priv *csi2tx)
+{
+ writel(CSI2TX_CONFIG_CFG_REQ | CSI2TX_CONFIG_SRST_REQ,
+ csi2tx->base + CSI2TX_CONFIG_REG);
+}
+
+static int csi2tx_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct csi2tx_priv *csi2tx = v4l2_subdev_to_csi2tx(subdev);
+ int ret = 0;
+
+ mutex_lock(&csi2tx->lock);
+
+ if (enable) {
+ /*
+ * If we're not the first users, there's no need to
+ * enable the whole controller.
+ */
+ if (!csi2tx->count) {
+ ret = csi2tx_start(csi2tx);
+ if (ret)
+ goto out;
+ }
+
+ csi2tx->count++;
+ } else {
+ csi2tx->count--;
+
+ /*
+ * Let the last user turn off the lights.
+ */
+ if (!csi2tx->count)
+ csi2tx_stop(csi2tx);
+ }
+
+out:
+ mutex_unlock(&csi2tx->lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops csi2tx_video_ops = {
+ .s_stream = csi2tx_s_stream,
+};
+
+static const struct v4l2_subdev_ops csi2tx_subdev_ops = {
+ .pad = &csi2tx_pad_ops,
+ .video = &csi2tx_video_ops,
+};
+
+static int csi2tx_get_resources(struct csi2tx_priv *csi2tx,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ unsigned int i;
+ u32 dev_cfg;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csi2tx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(csi2tx->base))
+ return PTR_ERR(csi2tx->base);
+
+ csi2tx->p_clk = devm_clk_get(&pdev->dev, "p_clk");
+ if (IS_ERR(csi2tx->p_clk)) {
+ dev_err(&pdev->dev, "Couldn't get p_clk\n");
+ return PTR_ERR(csi2tx->p_clk);
+ }
+
+ csi2tx->esc_clk = devm_clk_get(&pdev->dev, "esc_clk");
+ if (IS_ERR(csi2tx->esc_clk)) {
+ dev_err(&pdev->dev, "Couldn't get the esc_clk\n");
+ return PTR_ERR(csi2tx->esc_clk);
+ }
+
+ clk_prepare_enable(csi2tx->p_clk);
+ dev_cfg = readl(csi2tx->base + CSI2TX_DEVICE_CONFIG_REG);
+ clk_disable_unprepare(csi2tx->p_clk);
+
+ csi2tx->max_lanes = dev_cfg & CSI2TX_DEVICE_CONFIG_LANES_MASK;
+ if (csi2tx->max_lanes > CSI2TX_LANES_MAX) {
+ dev_err(&pdev->dev, "Invalid number of lanes: %u\n",
+ csi2tx->max_lanes);
+ return -EINVAL;
+ }
+
+ csi2tx->max_streams = (dev_cfg & CSI2TX_DEVICE_CONFIG_STREAMS_MASK) >> 4;
+ if (csi2tx->max_streams > CSI2TX_STREAMS_MAX) {
+ dev_err(&pdev->dev, "Invalid number of streams: %u\n",
+ csi2tx->max_streams);
+ return -EINVAL;
+ }
+
+ csi2tx->has_internal_dphy = !!(dev_cfg & CSI2TX_DEVICE_CONFIG_HAS_DPHY);
+
+ for (i = 0; i < csi2tx->max_streams; i++) {
+ char clk_name[16];
+
+ snprintf(clk_name, sizeof(clk_name), "pixel_if%u_clk", i);
+ csi2tx->pixel_clk[i] = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(csi2tx->pixel_clk[i])) {
+ dev_err(&pdev->dev, "Couldn't get clock %s\n",
+ clk_name);
+ return PTR_ERR(csi2tx->pixel_clk[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int csi2tx_check_lanes(struct csi2tx_priv *csi2tx)
+{
+ struct v4l2_fwnode_endpoint v4l2_ep;
+ struct device_node *ep;
+ int ret;
+
+ ep = of_graph_get_endpoint_by_regs(csi2tx->dev->of_node, 0, 0);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &v4l2_ep);
+ if (ret) {
+ dev_err(csi2tx->dev, "Could not parse v4l2 endpoint\n");
+ goto out;
+ }
+
+ if (v4l2_ep.bus_type != V4L2_MBUS_CSI2) {
+ dev_err(csi2tx->dev, "Unsupported media bus type: 0x%x\n",
+ v4l2_ep.bus_type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ csi2tx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+ if (csi2tx->num_lanes > csi2tx->max_lanes) {
+ dev_err(csi2tx->dev,
+ "Current configuration uses more lanes than supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(csi2tx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
+ sizeof(csi2tx->lanes));
+
+out:
+ of_node_put(ep);
+ return ret;
+}
+
+static int csi2tx_probe(struct platform_device *pdev)
+{
+ struct csi2tx_priv *csi2tx;
+ unsigned int i;
+ int ret;
+
+ csi2tx = kzalloc(sizeof(*csi2tx), GFP_KERNEL);
+ if (!csi2tx)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, csi2tx);
+ mutex_init(&csi2tx->lock);
+ csi2tx->dev = &pdev->dev;
+
+ ret = csi2tx_get_resources(csi2tx, pdev);
+ if (ret)
+ goto err_free_priv;
+
+ v4l2_subdev_init(&csi2tx->subdev, &csi2tx_subdev_ops);
+ csi2tx->subdev.owner = THIS_MODULE;
+ csi2tx->subdev.dev = &pdev->dev;
+ csi2tx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(csi2tx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s",
+ KBUILD_MODNAME, dev_name(&pdev->dev));
+
+ ret = csi2tx_check_lanes(csi2tx);
+ if (ret)
+ goto err_free_priv;
+
+ /* Create our media pads */
+ csi2tx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi2tx->pads[CSI2TX_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ for (i = CSI2TX_PAD_SINK_STREAM0; i < CSI2TX_PAD_MAX; i++)
+ csi2tx->pads[i].flags = MEDIA_PAD_FL_SINK;
+
+ /*
+ * Only the input pads are considered to have a format at the
+ * moment. The CSI link can multiplex various streams with
+ * different formats, and we can't expose this in v4l2 right
+ * now.
+ */
+ for (i = CSI2TX_PAD_SINK_STREAM0; i < CSI2TX_PAD_MAX; i++)
+ csi2tx->pad_fmts[i] = fmt_default;
+
+ ret = media_entity_pads_init(&csi2tx->subdev.entity, CSI2TX_PAD_MAX,
+ csi2tx->pads);
+ if (ret)
+ goto err_free_priv;
+
+ ret = v4l2_async_register_subdev(&csi2tx->subdev);
+ if (ret < 0)
+ goto err_free_priv;
+
+ dev_info(&pdev->dev,
+ "Probed CSI2TX with %u/%u lanes, %u streams, %s D-PHY\n",
+ csi2tx->num_lanes, csi2tx->max_lanes, csi2tx->max_streams,
+ csi2tx->has_internal_dphy ? "internal" : "no");
+
+ return 0;
+
+err_free_priv:
+ kfree(csi2tx);
+ return ret;
+}
+
+static int csi2tx_remove(struct platform_device *pdev)
+{
+ struct csi2tx_priv *csi2tx = platform_get_drvdata(pdev);
+
+ v4l2_async_unregister_subdev(&csi2tx->subdev);
+ kfree(csi2tx);
+
+ return 0;
+}
+
+static const struct of_device_id csi2tx_of_table[] = {
+ { .compatible = "cdns,csi2tx" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, csi2tx_of_table);
+
+static struct platform_driver csi2tx_driver = {
+ .probe = csi2tx_probe,
+ .remove = csi2tx_remove,
+
+ .driver = {
+ .name = "cdns-csi2tx",
+ .of_match_table = csi2tx_of_table,
+ },
+};
+module_platform_driver(csi2tx_driver);
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_DESCRIPTION("Cadence CSI2-TX controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/cec-gpio/Makefile b/drivers/media/platform/cec-gpio/Makefile
new file mode 100644
index 000000000..e82b258af
--- /dev/null
+++ b/drivers/media/platform/cec-gpio/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CEC_GPIO) += cec-gpio.o
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
new file mode 100644
index 000000000..d2861749d
--- /dev/null
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <media/cec-pin.h>
+
+struct cec_gpio {
+ struct cec_adapter *adap;
+ struct device *dev;
+
+ struct gpio_desc *cec_gpio;
+ int cec_irq;
+ bool cec_is_low;
+ bool cec_have_irq;
+
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+ bool hpd_is_high;
+ ktime_t hpd_ts;
+
+ struct gpio_desc *v5_gpio;
+ int v5_irq;
+ bool v5_is_high;
+ ktime_t v5_ts;
+};
+
+static bool cec_gpio_read(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_is_low)
+ return false;
+ return gpiod_get_value(cec->cec_gpio);
+}
+
+static void cec_gpio_high(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->cec_is_low)
+ return;
+ cec->cec_is_low = false;
+ gpiod_set_value(cec->cec_gpio, 1);
+}
+
+static void cec_gpio_low(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_is_low)
+ return;
+ if (WARN_ON_ONCE(cec->cec_have_irq))
+ free_irq(cec->cec_irq, cec);
+ cec->cec_have_irq = false;
+ cec->cec_is_low = true;
+ gpiod_set_value(cec->cec_gpio, 0);
+}
+
+static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_queue_pin_hpd_event(cec->adap, cec->hpd_is_high, cec->hpd_ts);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cec_5v_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+ bool is_high = gpiod_get_value(cec->v5_gpio);
+
+ if (is_high == cec->v5_is_high)
+ return IRQ_HANDLED;
+ cec->v5_ts = ktime_get();
+ cec->v5_is_high = is_high;
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t cec_5v_gpio_irq_handler_thread(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_queue_pin_5v_event(cec->adap, cec->v5_is_high, cec->v5_ts);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+ bool is_high = gpiod_get_value(cec->hpd_gpio);
+
+ if (is_high == cec->hpd_is_high)
+ return IRQ_HANDLED;
+ cec->hpd_ts = ktime_get();
+ cec->hpd_is_high = is_high;
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t cec_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_pin_changed(cec->adap, gpiod_get_value(cec->cec_gpio));
+ return IRQ_HANDLED;
+}
+
+static bool cec_gpio_enable_irq(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_have_irq)
+ return true;
+
+ if (request_irq(cec->cec_irq, cec_gpio_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ adap->name, cec))
+ return false;
+ cec->cec_have_irq = true;
+ return true;
+}
+
+static void cec_gpio_disable_irq(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_have_irq)
+ free_irq(cec->cec_irq, cec);
+ cec->cec_have_irq = false;
+}
+
+static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ seq_printf(file, "mode: %s\n", cec->cec_is_low ? "low-drive" : "read");
+ if (cec->cec_have_irq)
+ seq_printf(file, "using irq: %d\n", cec->cec_irq);
+ if (cec->hpd_gpio)
+ seq_printf(file, "hpd: %s\n",
+ cec->hpd_is_high ? "high" : "low");
+ if (cec->v5_gpio)
+ seq_printf(file, "5V: %s\n",
+ cec->v5_is_high ? "high" : "low");
+}
+
+static int cec_gpio_read_hpd(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->hpd_gpio)
+ return -ENOTTY;
+ return gpiod_get_value(cec->hpd_gpio);
+}
+
+static int cec_gpio_read_5v(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->v5_gpio)
+ return -ENOTTY;
+ return gpiod_get_value(cec->v5_gpio);
+}
+
+static void cec_gpio_free(struct cec_adapter *adap)
+{
+ cec_gpio_disable_irq(adap);
+}
+
+static const struct cec_pin_ops cec_gpio_pin_ops = {
+ .read = cec_gpio_read,
+ .low = cec_gpio_low,
+ .high = cec_gpio_high,
+ .enable_irq = cec_gpio_enable_irq,
+ .disable_irq = cec_gpio_disable_irq,
+ .status = cec_gpio_status,
+ .free = cec_gpio_free,
+ .read_hpd = cec_gpio_read_hpd,
+ .read_5v = cec_gpio_read_5v,
+};
+
+static int cec_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cec_gpio *cec;
+ int ret;
+
+ cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ cec->cec_gpio = devm_gpiod_get(dev, "cec", GPIOD_OUT_HIGH_OPEN_DRAIN);
+ if (IS_ERR(cec->cec_gpio))
+ return PTR_ERR(cec->cec_gpio);
+ cec->cec_irq = gpiod_to_irq(cec->cec_gpio);
+
+ cec->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(cec->hpd_gpio))
+ return PTR_ERR(cec->hpd_gpio);
+
+ cec->v5_gpio = devm_gpiod_get_optional(dev, "v5", GPIOD_IN);
+ if (IS_ERR(cec->v5_gpio))
+ return PTR_ERR(cec->v5_gpio);
+
+ cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
+ cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
+ CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
+ if (IS_ERR(cec->adap))
+ return PTR_ERR(cec->adap);
+
+ if (cec->hpd_gpio) {
+ cec->hpd_irq = gpiod_to_irq(cec->hpd_gpio);
+ ret = devm_request_threaded_irq(dev, cec->hpd_irq,
+ cec_hpd_gpio_irq_handler,
+ cec_hpd_gpio_irq_handler_thread,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "hpd-gpio", cec);
+ if (ret)
+ return ret;
+ }
+
+ if (cec->v5_gpio) {
+ cec->v5_irq = gpiod_to_irq(cec->v5_gpio);
+ ret = devm_request_threaded_irq(dev, cec->v5_irq,
+ cec_5v_gpio_irq_handler,
+ cec_5v_gpio_irq_handler_thread,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "v5-gpio", cec);
+ if (ret)
+ return ret;
+ }
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, cec);
+ return 0;
+}
+
+static int cec_gpio_remove(struct platform_device *pdev)
+{
+ struct cec_gpio *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ return 0;
+}
+
+static const struct of_device_id cec_gpio_match[] = {
+ {
+ .compatible = "cec-gpio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cec_gpio_match);
+
+static struct platform_driver cec_gpio_pdrv = {
+ .probe = cec_gpio_probe,
+ .remove = cec_gpio_remove,
+ .driver = {
+ .name = "cec-gpio",
+ .of_match_table = cec_gpio_match,
+ },
+};
+
+module_platform_driver(cec_gpio_pdrv);
+
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CEC GPIO driver");
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
new file mode 100644
index 000000000..858284328
--- /dev/null
+++ b/drivers/media/platform/coda/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -I$(src)
+
+coda-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-jpeg.o
+
+obj-$(CONFIG_VIDEO_CODA) += coda.o
+obj-$(CONFIG_VIDEO_IMX_VDOA) += imx-vdoa.o
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
new file mode 100644
index 000000000..c3eaddced
--- /dev/null
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -0,0 +1,2331 @@
+/*
+ * Coda multi-standard codec IP - BIT processor functions
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ * Javier Martin, <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ * Copyright (C) 2012-2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "coda.h"
+#include "imx-vdoa.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#define CODA_PARA_BUF_SIZE (10 * 1024)
+#define CODA7_PS_BUF_SIZE 0x28000
+#define CODA9_PS_SAVE_SIZE (512 * 1024)
+
+#define CODA_DEFAULT_GAMMA 4096
+#define CODA9_DEFAULT_GAMMA 24576 /* 0.75 * 32768 */
+
+static void coda_free_bitstream_buffer(struct coda_ctx *ctx);
+
+static inline int coda_is_initialized(struct coda_dev *dev)
+{
+ return coda_read(dev, CODA_REG_BIT_CUR_PC) != 0;
+}
+
+static inline unsigned long coda_isbusy(struct coda_dev *dev)
+{
+ return coda_read(dev, CODA_REG_BIT_BUSY);
+}
+
+static int coda_wait_timeout(struct coda_dev *dev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (coda_isbusy(dev)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static void coda_command_async(struct coda_ctx *ctx, int cmd)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ if (dev->devtype->product == CODA_HX4 ||
+ dev->devtype->product == CODA_7541 ||
+ dev->devtype->product == CODA_960) {
+ /* Restore context related registers to CODA */
+ coda_write(dev, ctx->bit_stream_param,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+ coda_write(dev, ctx->frm_dis_flg,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+ coda_write(dev, ctx->frame_mem_ctrl,
+ CODA_REG_BIT_FRAME_MEM_CTRL);
+ coda_write(dev, ctx->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR);
+ }
+
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, 1, CODA9_GDI_WPROT_ERR_CLR);
+ coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
+ }
+
+ coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+
+ coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX);
+ coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
+ coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD);
+
+ trace_coda_bit_run(ctx, cmd);
+
+ coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
+}
+
+static int coda_command_sync(struct coda_ctx *ctx, int cmd)
+{
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ coda_command_async(ctx, cmd);
+ ret = coda_wait_timeout(dev);
+ trace_coda_bit_done(ctx);
+
+ return ret;
+}
+
+int coda_hw_reset(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ unsigned long timeout;
+ unsigned int idx;
+ int ret;
+
+ if (!dev->rstc)
+ return -ENOENT;
+
+ idx = coda_read(dev, CODA_REG_BIT_RUN_INDEX);
+
+ if (dev->devtype->product == CODA_960) {
+ timeout = jiffies + msecs_to_jiffies(100);
+ coda_write(dev, 0x11, CODA9_GDI_BUS_CTRL);
+ while (coda_read(dev, CODA9_GDI_BUS_STATUS) != 0x77) {
+ if (time_after(jiffies, timeout))
+ return -ETIME;
+ cpu_relax();
+ }
+ }
+
+ ret = reset_control_reset(dev->rstc);
+ if (ret < 0)
+ return ret;
+
+ if (dev->devtype->product == CODA_960)
+ coda_write(dev, 0x00, CODA9_GDI_BUS_CTRL);
+ coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+ coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
+ ret = coda_wait_timeout(dev);
+ coda_write(dev, idx, CODA_REG_BIT_RUN_INDEX);
+
+ return ret;
+}
+
+static void coda_kfifo_sync_from_device(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 rd_ptr;
+
+ rd_ptr = coda_read(dev, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ kfifo->out = (kfifo->in & ~kfifo->mask) |
+ (rd_ptr - ctx->bitstream.paddr);
+ if (kfifo->out > kfifo->in)
+ kfifo->out -= kfifo->mask + 1;
+}
+
+static void coda_kfifo_sync_to_device_full(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 rd_ptr, wr_ptr;
+
+ rd_ptr = ctx->bitstream.paddr + (kfifo->out & kfifo->mask);
+ coda_write(dev, rd_ptr, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+ coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 wr_ptr;
+
+ wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+ coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static int coda_bitstream_pad(struct coda_ctx *ctx, u32 size)
+{
+ unsigned char *buf;
+ u32 n;
+
+ if (size < 6)
+ size = 6;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ coda_h264_filler_nal(size, buf);
+ n = kfifo_in(&ctx->bitstream_fifo, buf, size);
+ kfree(buf);
+
+ return (n < size) ? -ENOSPC : 0;
+}
+
+static int coda_bitstream_queue(struct coda_ctx *ctx,
+ struct vb2_v4l2_buffer *src_buf)
+{
+ u32 src_size = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ u32 n;
+
+ n = kfifo_in(&ctx->bitstream_fifo,
+ vb2_plane_vaddr(&src_buf->vb2_buf, 0), src_size);
+ if (n < src_size)
+ return -ENOSPC;
+
+ src_buf->sequence = ctx->qsequence++;
+
+ return 0;
+}
+
+static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
+ struct vb2_v4l2_buffer *src_buf)
+{
+ unsigned long payload = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ int ret;
+
+ if (coda_get_bitstream_payload(ctx) + payload + 512 >=
+ ctx->bitstream.size)
+ return false;
+
+ if (vb2_plane_vaddr(&src_buf->vb2_buf, 0) == NULL) {
+ v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
+ return true;
+ }
+
+ /* Add zero padding before the first H.264 buffer, if it is too small */
+ if (ctx->qsequence == 0 && payload < 512 &&
+ ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
+ coda_bitstream_pad(ctx, 512 - payload);
+
+ ret = coda_bitstream_queue(ctx, src_buf);
+ if (ret < 0) {
+ v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
+ return false;
+ }
+ /* Sync read pointer to device */
+ if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev))
+ coda_kfifo_sync_to_device_write(ctx);
+
+ ctx->hold = false;
+
+ return true;
+}
+
+void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
+{
+ struct vb2_v4l2_buffer *src_buf;
+ struct coda_buffer_meta *meta;
+ unsigned long flags;
+ u32 start;
+
+ if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG)
+ return;
+
+ while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
+ /*
+ * Only queue two JPEGs into the bitstream buffer to keep
+ * latency low. We need at least one complete buffer and the
+ * header of another buffer (for prescan) in the bitstream.
+ */
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
+ ctx->num_metas > 1)
+ break;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+
+ /* Drop frames that do not start/end with a SOI/EOI markers */
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
+ !coda_jpeg_check_buffer(ctx, &src_buf->vb2_buf)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "dropping invalid JPEG frame %d\n",
+ ctx->qsequence);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (buffer_list) {
+ struct v4l2_m2m_buffer *m2m_buf;
+
+ m2m_buf = container_of(src_buf,
+ struct v4l2_m2m_buffer,
+ vb);
+ list_add_tail(&m2m_buf->list, buffer_list);
+ } else {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ }
+ continue;
+ }
+
+ /* Dump empty buffers */
+ if (!vb2_get_plane_payload(&src_buf->vb2_buf, 0)) {
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ continue;
+ }
+
+ /* Buffer start position */
+ start = ctx->bitstream_fifo.kfifo.in &
+ ctx->bitstream_fifo.kfifo.mask;
+
+ if (coda_bitstream_try_queue(ctx, src_buf)) {
+ /*
+ * Source buffer is queued in the bitstream ringbuffer;
+ * queue the timestamp and mark source buffer as done
+ */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+
+ meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+ if (meta) {
+ meta->sequence = src_buf->sequence;
+ meta->timecode = src_buf->timecode;
+ meta->timestamp = src_buf->vb2_buf.timestamp;
+ meta->start = start;
+ meta->end = ctx->bitstream_fifo.kfifo.in &
+ ctx->bitstream_fifo.kfifo.mask;
+ spin_lock_irqsave(&ctx->buffer_meta_lock,
+ flags);
+ list_add_tail(&meta->list,
+ &ctx->buffer_meta_list);
+ ctx->num_metas++;
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock,
+ flags);
+
+ trace_coda_bit_queue(ctx, src_buf, meta);
+ }
+
+ if (buffer_list) {
+ struct v4l2_m2m_buffer *m2m_buf;
+
+ m2m_buf = container_of(src_buf,
+ struct v4l2_m2m_buffer,
+ vb);
+ list_add_tail(&m2m_buf->list, buffer_list);
+ } else {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
+ } else {
+ break;
+ }
+ }
+}
+
+void coda_bit_stream_end_flag(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+ /* If this context is currently running, update the hardware flag */
+ if ((dev->devtype->product == CODA_960) &&
+ coda_isbusy(dev) &&
+ (ctx->idx == coda_read(dev, CODA_REG_BIT_RUN_INDEX))) {
+ coda_write(dev, ctx->bit_stream_param,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+ }
+}
+
+static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
+{
+ struct coda_dev *dev = ctx->dev;
+ u32 *p = ctx->parabuf.vaddr;
+
+ if (dev->devtype->product == CODA_DX6)
+ p[index] = value;
+ else
+ p[index ^ 1] = value;
+}
+
+static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
+ struct coda_aux_buf *buf, size_t size,
+ const char *name)
+{
+ return coda_alloc_aux_buf(ctx->dev, buf, size, name, ctx->debugfs_entry);
+}
+
+
+static void coda_free_framebuffers(struct coda_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++)
+ coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i]);
+}
+
+static int coda_alloc_framebuffers(struct coda_ctx *ctx,
+ struct coda_q_data *q_data, u32 fourcc)
+{
+ struct coda_dev *dev = ctx->dev;
+ unsigned int ysize, ycbcr_size;
+ int ret;
+ int i;
+
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 ||
+ ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 ||
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_MPEG4)
+ ysize = round_up(q_data->rect.width, 16) *
+ round_up(q_data->rect.height, 16);
+ else
+ ysize = round_up(q_data->rect.width, 8) * q_data->rect.height;
+
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+ ycbcr_size = round_up(ysize, 4096) + ysize / 2;
+ else
+ ycbcr_size = ysize + ysize / 2;
+
+ /* Allocate frame buffers */
+ for (i = 0; i < ctx->num_internal_frames; i++) {
+ size_t size = ycbcr_size;
+ char *name;
+
+ /* Add space for mvcol buffers */
+ if (dev->devtype->product != CODA_DX6 &&
+ (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
+ (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 && i == 0)))
+ size += ysize / 4;
+ name = kasprintf(GFP_KERNEL, "fb%d", i);
+ if (!name) {
+ coda_free_framebuffers(ctx);
+ return -ENOMEM;
+ }
+ ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i],
+ size, name);
+ kfree(name);
+ if (ret < 0) {
+ coda_free_framebuffers(ctx);
+ return ret;
+ }
+ }
+
+ /* Register frame buffers in the parameter buffer */
+ for (i = 0; i < ctx->num_internal_frames; i++) {
+ u32 y, cb, cr, mvcol;
+
+ /* Start addresses of Y, Cb, Cr planes */
+ y = ctx->internal_frames[i].paddr;
+ cb = y + ysize;
+ cr = y + ysize + ysize/4;
+ mvcol = y + ysize + ysize/4 + ysize/4;
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP) {
+ cb = round_up(cb, 4096);
+ mvcol = cb + ysize/2;
+ cr = 0;
+ /* Packed 20-bit MSB of base addresses */
+ /* YYYYYCCC, CCyyyyyc, cccc.... */
+ y = (y & 0xfffff000) | cb >> 20;
+ cb = (cb & 0x000ff000) << 12;
+ }
+ coda_parabuf_write(ctx, i * 3 + 0, y);
+ coda_parabuf_write(ctx, i * 3 + 1, cb);
+ coda_parabuf_write(ctx, i * 3 + 2, cr);
+
+ if (dev->devtype->product == CODA_DX6)
+ continue;
+
+ /* mvcol buffer for h.264 and mpeg4 */
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
+ coda_parabuf_write(ctx, 96 + i, mvcol);
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 && i == 0)
+ coda_parabuf_write(ctx, 97, mvcol);
+ }
+
+ return 0;
+}
+
+static void coda_free_context_buffers(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ coda_free_aux_buf(dev, &ctx->slicebuf);
+ coda_free_aux_buf(dev, &ctx->psbuf);
+ if (dev->devtype->product != CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+ coda_free_aux_buf(dev, &ctx->parabuf);
+}
+
+static int coda_alloc_context_buffers(struct coda_ctx *ctx,
+ struct coda_q_data *q_data)
+{
+ struct coda_dev *dev = ctx->dev;
+ size_t size;
+ int ret;
+
+ if (!ctx->parabuf.vaddr) {
+ ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
+ CODA_PARA_BUF_SIZE, "parabuf");
+ if (ret < 0)
+ return ret;
+ }
+
+ if (dev->devtype->product == CODA_DX6)
+ return 0;
+
+ if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) {
+ /* worst case slice size */
+ size = (DIV_ROUND_UP(q_data->rect.width, 16) *
+ DIV_ROUND_UP(q_data->rect.height, 16)) * 3200 / 8 + 512;
+ ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size,
+ "slicebuf");
+ if (ret < 0)
+ goto err;
+ }
+
+ if (!ctx->psbuf.vaddr && (dev->devtype->product == CODA_HX4 ||
+ dev->devtype->product == CODA_7541)) {
+ ret = coda_alloc_context_buf(ctx, &ctx->psbuf,
+ CODA7_PS_BUF_SIZE, "psbuf");
+ if (ret < 0)
+ goto err;
+ }
+
+ if (!ctx->workbuf.vaddr) {
+ size = dev->devtype->workbuf_size;
+ if (dev->devtype->product == CODA_960 &&
+ q_data->fourcc == V4L2_PIX_FMT_H264)
+ size += CODA9_PS_SAVE_SIZE;
+ ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size,
+ "workbuf");
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ coda_free_context_buffers(ctx);
+ return ret;
+}
+
+static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
+ int header_code, u8 *header, int *size)
+{
+ struct vb2_buffer *vb = &buf->vb2_buf;
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_src;
+ struct v4l2_rect *r;
+ size_t bufsize;
+ int ret;
+ int i;
+
+ if (dev->devtype->product == CODA_960)
+ memset(vb2_plane_vaddr(vb, 0), 0, 64);
+
+ coda_write(dev, vb2_dma_contig_plane_dma_addr(vb, 0),
+ CODA_CMD_ENC_HEADER_BB_START);
+ bufsize = vb2_plane_size(vb, 0);
+ if (dev->devtype->product == CODA_960)
+ bufsize /= 1024;
+ coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
+ if (dev->devtype->product == CODA_960 &&
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 &&
+ header_code == CODA_HEADER_H264_SPS) {
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ r = &q_data_src->rect;
+
+ if (r->width % 16 || r->height % 16) {
+ u32 crop_right = round_up(r->width, 16) - r->width;
+ u32 crop_bottom = round_up(r->height, 16) - r->height;
+
+ coda_write(dev, crop_right,
+ CODA9_CMD_ENC_HEADER_FRAME_CROP_H);
+ coda_write(dev, crop_bottom,
+ CODA9_CMD_ENC_HEADER_FRAME_CROP_V);
+ header_code |= CODA9_HEADER_FRAME_CROP;
+ }
+ }
+ coda_write(dev, header_code, CODA_CMD_ENC_HEADER_CODE);
+ ret = coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
+ return ret;
+ }
+
+ if (dev->devtype->product == CODA_960) {
+ for (i = 63; i > 0; i--)
+ if (((char *)vb2_plane_vaddr(vb, 0))[i] != 0)
+ break;
+ *size = i + 1;
+ } else {
+ *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
+ coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
+ }
+ memcpy(header, vb2_plane_vaddr(vb, 0), *size);
+
+ return 0;
+}
+
+static phys_addr_t coda_iram_alloc(struct coda_iram_info *iram, size_t size)
+{
+ phys_addr_t ret;
+
+ size = round_up(size, 1024);
+ if (size > iram->remaining)
+ return 0;
+ iram->remaining -= size;
+
+ ret = iram->next_paddr;
+ iram->next_paddr += size;
+
+ return ret;
+}
+
+static void coda_setup_iram(struct coda_ctx *ctx)
+{
+ struct coda_iram_info *iram_info = &ctx->iram_info;
+ struct coda_dev *dev = ctx->dev;
+ int w64, w128;
+ int mb_width;
+ int dbk_bits;
+ int bit_bits;
+ int ip_bits;
+ int me_bits;
+
+ memset(iram_info, 0, sizeof(*iram_info));
+ iram_info->next_paddr = dev->iram.paddr;
+ iram_info->remaining = dev->iram.size;
+
+ if (!dev->iram.vaddr)
+ return;
+
+ switch (dev->devtype->product) {
+ case CODA_HX4:
+ dbk_bits = CODA7_USE_HOST_DBK_ENABLE;
+ bit_bits = CODA7_USE_HOST_BIT_ENABLE;
+ ip_bits = CODA7_USE_HOST_IP_ENABLE;
+ me_bits = CODA7_USE_HOST_ME_ENABLE;
+ break;
+ case CODA_7541:
+ dbk_bits = CODA7_USE_HOST_DBK_ENABLE | CODA7_USE_DBK_ENABLE;
+ bit_bits = CODA7_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
+ ip_bits = CODA7_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
+ me_bits = CODA7_USE_HOST_ME_ENABLE | CODA7_USE_ME_ENABLE;
+ break;
+ case CODA_960:
+ dbk_bits = CODA9_USE_HOST_DBK_ENABLE | CODA9_USE_DBK_ENABLE;
+ bit_bits = CODA9_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
+ ip_bits = CODA9_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
+ me_bits = 0;
+ break;
+ default: /* CODA_DX6 */
+ return;
+ }
+
+ if (ctx->inst_type == CODA_INST_ENCODER) {
+ struct coda_q_data *q_data_src;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ mb_width = DIV_ROUND_UP(q_data_src->rect.width, 16);
+ w128 = mb_width * 128;
+ w64 = mb_width * 64;
+
+ /* Prioritize in case IRAM is too small for everything */
+ if (dev->devtype->product == CODA_HX4 ||
+ dev->devtype->product == CODA_7541) {
+ iram_info->search_ram_size = round_up(mb_width * 16 *
+ 36 + 2048, 1024);
+ iram_info->search_ram_paddr = coda_iram_alloc(iram_info,
+ iram_info->search_ram_size);
+ if (!iram_info->search_ram_paddr) {
+ pr_err("IRAM is smaller than the search ram size\n");
+ goto out;
+ }
+ iram_info->axi_sram_use |= me_bits;
+ }
+
+ /* Only H.264BP and H.263P3 are considered */
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64);
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64);
+ if (!iram_info->buf_dbk_c_use)
+ goto out;
+ iram_info->axi_sram_use |= dbk_bits;
+
+ iram_info->buf_bit_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_bit_use)
+ goto out;
+ iram_info->axi_sram_use |= bit_bits;
+
+ iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_ip_ac_dc_use)
+ goto out;
+ iram_info->axi_sram_use |= ip_bits;
+
+ /* OVL and BTP disabled for encoder */
+ } else if (ctx->inst_type == CODA_INST_DECODER) {
+ struct coda_q_data *q_data_dst;
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ mb_width = DIV_ROUND_UP(q_data_dst->width, 16);
+ w128 = mb_width * 128;
+
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128);
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_dbk_c_use)
+ goto out;
+ iram_info->axi_sram_use |= dbk_bits;
+
+ iram_info->buf_bit_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_bit_use)
+ goto out;
+ iram_info->axi_sram_use |= bit_bits;
+
+ iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_ip_ac_dc_use)
+ goto out;
+ iram_info->axi_sram_use |= ip_bits;
+
+ /* OVL and BTP unused as there is no VC1 support yet */
+ }
+
+out:
+ if (!(iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE))
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "IRAM smaller than needed\n");
+
+ if (dev->devtype->product == CODA_HX4 ||
+ dev->devtype->product == CODA_7541) {
+ /* TODO - Enabling these causes picture errors on CODA7541 */
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ /* fw 1.4.50 */
+ iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+ CODA7_USE_IP_ENABLE);
+ } else {
+ /* fw 13.4.29 */
+ iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+ CODA7_USE_HOST_DBK_ENABLE |
+ CODA7_USE_IP_ENABLE |
+ CODA7_USE_DBK_ENABLE);
+ }
+ }
+}
+
+static u32 coda_supported_firmwares[] = {
+ CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
+ CODA_FIRMWARE_VERNUM(CODA_HX4, 1, 4, 50),
+ CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
+ CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 5),
+ CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 9),
+ CODA_FIRMWARE_VERNUM(CODA_960, 2, 3, 10),
+ CODA_FIRMWARE_VERNUM(CODA_960, 3, 1, 1),
+};
+
+static bool coda_firmware_supported(u32 vernum)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(coda_supported_firmwares); i++)
+ if (vernum == coda_supported_firmwares[i])
+ return true;
+ return false;
+}
+
+int coda_check_firmware(struct coda_dev *dev)
+{
+ u16 product, major, minor, release;
+ u32 data;
+ int ret;
+
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ goto err_clk_per;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
+
+ coda_write(dev, 0, CODA_CMD_FIRMWARE_VERNUM);
+ coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+ coda_write(dev, 0, CODA_REG_BIT_RUN_INDEX);
+ coda_write(dev, 0, CODA_REG_BIT_RUN_COD_STD);
+ coda_write(dev, CODA_COMMAND_FIRMWARE_GET, CODA_REG_BIT_RUN_COMMAND);
+ if (coda_wait_timeout(dev)) {
+ v4l2_err(&dev->v4l2_dev, "firmware get command error\n");
+ ret = -EIO;
+ goto err_run_cmd;
+ }
+
+ if (dev->devtype->product == CODA_960) {
+ data = coda_read(dev, CODA9_CMD_FIRMWARE_CODE_REV);
+ v4l2_info(&dev->v4l2_dev, "Firmware code revision: %d\n",
+ data);
+ }
+
+ /* Check we are compatible with the loaded firmware */
+ data = coda_read(dev, CODA_CMD_FIRMWARE_VERNUM);
+ product = CODA_FIRMWARE_PRODUCT(data);
+ major = CODA_FIRMWARE_MAJOR(data);
+ minor = CODA_FIRMWARE_MINOR(data);
+ release = CODA_FIRMWARE_RELEASE(data);
+
+ clk_disable_unprepare(dev->clk_per);
+ clk_disable_unprepare(dev->clk_ahb);
+
+ if (product != dev->devtype->product) {
+ v4l2_err(&dev->v4l2_dev,
+ "Wrong firmware. Hw: %s, Fw: %s, Version: %u.%u.%u\n",
+ coda_product_name(dev->devtype->product),
+ coda_product_name(product), major, minor, release);
+ return -EINVAL;
+ }
+
+ v4l2_info(&dev->v4l2_dev, "Initialized %s.\n",
+ coda_product_name(product));
+
+ if (coda_firmware_supported(data)) {
+ v4l2_info(&dev->v4l2_dev, "Firmware version: %u.%u.%u\n",
+ major, minor, release);
+ } else {
+ v4l2_warn(&dev->v4l2_dev,
+ "Unsupported firmware version: %u.%u.%u\n",
+ major, minor, release);
+ }
+
+ return 0;
+
+err_run_cmd:
+ clk_disable_unprepare(dev->clk_ahb);
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+err_clk_per:
+ return ret;
+}
+
+static void coda9_set_frame_cache(struct coda_ctx *ctx, u32 fourcc)
+{
+ u32 cache_size, cache_config;
+
+ if (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) {
+ /* Luma 2x0 page, 2x6 cache, chroma 2x0 page, 2x4 cache size */
+ cache_size = 0x20262024;
+ cache_config = 2 << CODA9_CACHE_PAGEMERGE_OFFSET;
+ } else {
+ /* Luma 0x2 page, 4x4 cache, chroma 0x2 page, 4x3 cache size */
+ cache_size = 0x02440243;
+ cache_config = 1 << CODA9_CACHE_PAGEMERGE_OFFSET;
+ }
+ coda_write(ctx->dev, cache_size, CODA9_CMD_SET_FRAME_CACHE_SIZE);
+ if (fourcc == V4L2_PIX_FMT_NV12 || fourcc == V4L2_PIX_FMT_YUYV) {
+ cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
+ 16 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
+ 0 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
+ } else {
+ cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
+ 8 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
+ 8 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
+ }
+ coda_write(ctx->dev, cache_config, CODA9_CMD_SET_FRAME_CACHE_CONFIG);
+}
+
+/*
+ * Encoder context operations
+ */
+
+static int coda_encoder_reqbufs(struct coda_ctx *ctx,
+ struct v4l2_requestbuffers *rb)
+{
+ struct coda_q_data *q_data_src;
+ int ret;
+
+ if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return 0;
+
+ if (rb->count) {
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ ret = coda_alloc_context_buffers(ctx, q_data_src);
+ if (ret < 0)
+ return ret;
+ } else {
+ coda_free_context_buffers(ctx);
+ }
+
+ return 0;
+}
+
+static int coda_start_encoding(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
+ struct coda_q_data *q_data_src, *q_data_dst;
+ u32 bitstream_buf, bitstream_size;
+ struct vb2_v4l2_buffer *buf;
+ int gamma, ret, value;
+ u32 dst_fourcc;
+ int num_fb;
+ u32 stride;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_fourcc = q_data_dst->fourcc;
+
+ buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ bitstream_buf = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
+ bitstream_size = q_data_dst->sizeimage;
+
+ if (!coda_is_initialized(dev)) {
+ v4l2_err(v4l2_dev, "coda is not initialized.\n");
+ return -EFAULT;
+ }
+
+ if (dst_fourcc == V4L2_PIX_FMT_JPEG) {
+ if (!ctx->params.jpeg_qmat_tab[0])
+ ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
+ if (!ctx->params.jpeg_qmat_tab[1])
+ ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
+ coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
+ }
+
+ mutex_lock(&dev->coda_mutex);
+
+ coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+ coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ coda_write(dev, CODADX6_STREAM_BUF_DYNALLOC_EN |
+ CODADX6_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
+ break;
+ case CODA_960:
+ coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
+ /* fallthrough */
+ case CODA_HX4:
+ case CODA_7541:
+ coda_write(dev, CODA7_STREAM_BUF_DYNALLOC_EN |
+ CODA7_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
+ break;
+ }
+
+ ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
+ CODA9_FRAME_TILED2LINEAR);
+ if (q_data_src->fourcc == V4L2_PIX_FMT_NV12)
+ ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+ ctx->frame_mem_ctrl |= (0x3 << 9) | CODA9_FRAME_TILED2LINEAR;
+ coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
+
+ if (dev->devtype->product == CODA_DX6) {
+ /* Configure the coda */
+ coda_write(dev, dev->iram.paddr,
+ CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR);
+ }
+
+ /* Could set rotation here if needed */
+ value = 0;
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ value = (q_data_src->rect.width & CODADX6_PICWIDTH_MASK)
+ << CODADX6_PICWIDTH_OFFSET;
+ value |= (q_data_src->rect.height & CODADX6_PICHEIGHT_MASK)
+ << CODA_PICHEIGHT_OFFSET;
+ break;
+ case CODA_HX4:
+ case CODA_7541:
+ if (dst_fourcc == V4L2_PIX_FMT_H264) {
+ value = (round_up(q_data_src->rect.width, 16) &
+ CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
+ value |= (round_up(q_data_src->rect.height, 16) &
+ CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
+ break;
+ }
+ /* fallthrough */
+ case CODA_960:
+ value = (q_data_src->rect.width & CODA7_PICWIDTH_MASK)
+ << CODA7_PICWIDTH_OFFSET;
+ value |= (q_data_src->rect.height & CODA7_PICHEIGHT_MASK)
+ << CODA_PICHEIGHT_OFFSET;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
+ if (dst_fourcc == V4L2_PIX_FMT_JPEG)
+ ctx->params.framerate = 0;
+ coda_write(dev, ctx->params.framerate,
+ CODA_CMD_ENC_SEQ_SRC_F_RATE);
+
+ ctx->params.codec_mode = ctx->codec->mode;
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_MPEG4:
+ if (dev->devtype->product == CODA_960)
+ coda_write(dev, CODA9_STD_MPEG4,
+ CODA_CMD_ENC_SEQ_COD_STD);
+ else
+ coda_write(dev, CODA_STD_MPEG4,
+ CODA_CMD_ENC_SEQ_COD_STD);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_MP4_PARA);
+ break;
+ case V4L2_PIX_FMT_H264:
+ if (dev->devtype->product == CODA_960)
+ coda_write(dev, CODA9_STD_H264,
+ CODA_CMD_ENC_SEQ_COD_STD);
+ else
+ coda_write(dev, CODA_STD_H264,
+ CODA_CMD_ENC_SEQ_COD_STD);
+ value = ((ctx->params.h264_disable_deblocking_filter_idc &
+ CODA_264PARAM_DISABLEDEBLK_MASK) <<
+ CODA_264PARAM_DISABLEDEBLK_OFFSET) |
+ ((ctx->params.h264_slice_alpha_c0_offset_div2 &
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
+ ((ctx->params.h264_slice_beta_offset_div2 &
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_PARA);
+ coda_write(dev, ctx->params.jpeg_restart_interval,
+ CODA_CMD_ENC_SEQ_JPG_RST_INTERVAL);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_EN);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_SIZE);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_OFFSET);
+
+ coda_jpeg_write_tables(ctx);
+ break;
+ default:
+ v4l2_err(v4l2_dev,
+ "dst format (0x%08x) invalid.\n", dst_fourcc);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * slice mode and GOP size registers are used for thumb size/offset
+ * in JPEG mode
+ */
+ if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
+ switch (ctx->params.slice_mode) {
+ case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
+ value = 0;
+ break;
+ case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB:
+ value = (ctx->params.slice_max_mb &
+ CODA_SLICING_SIZE_MASK)
+ << CODA_SLICING_SIZE_OFFSET;
+ value |= (1 & CODA_SLICING_UNIT_MASK)
+ << CODA_SLICING_UNIT_OFFSET;
+ value |= 1 & CODA_SLICING_MODE_MASK;
+ break;
+ case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES:
+ value = (ctx->params.slice_max_bits &
+ CODA_SLICING_SIZE_MASK)
+ << CODA_SLICING_SIZE_OFFSET;
+ value |= (0 & CODA_SLICING_UNIT_MASK)
+ << CODA_SLICING_UNIT_OFFSET;
+ value |= 1 & CODA_SLICING_MODE_MASK;
+ break;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
+ value = ctx->params.gop_size;
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
+ }
+
+ if (ctx->params.bitrate) {
+ /* Rate control enabled */
+ value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK)
+ << CODA_RATECONTROL_BITRATE_OFFSET;
+ value |= 1 & CODA_RATECONTROL_ENABLE_MASK;
+ value |= (ctx->params.vbv_delay &
+ CODA_RATECONTROL_INITIALDELAY_MASK)
+ << CODA_RATECONTROL_INITIALDELAY_OFFSET;
+ if (dev->devtype->product == CODA_960)
+ value |= BIT(31); /* disable autoskip */
+ } else {
+ value = 0;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_PARA);
+
+ coda_write(dev, ctx->params.vbv_size, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
+ coda_write(dev, ctx->params.intra_refresh,
+ CODA_CMD_ENC_SEQ_INTRA_REFRESH);
+
+ coda_write(dev, bitstream_buf, CODA_CMD_ENC_SEQ_BB_START);
+ coda_write(dev, bitstream_size / 1024, CODA_CMD_ENC_SEQ_BB_SIZE);
+
+
+ value = 0;
+ if (dev->devtype->product == CODA_960)
+ gamma = CODA9_DEFAULT_GAMMA;
+ else
+ gamma = CODA_DEFAULT_GAMMA;
+ if (gamma > 0) {
+ coda_write(dev, (gamma & CODA_GAMMA_MASK) << CODA_GAMMA_OFFSET,
+ CODA_CMD_ENC_SEQ_RC_GAMMA);
+ }
+
+ if (ctx->params.h264_min_qp || ctx->params.h264_max_qp) {
+ coda_write(dev,
+ ctx->params.h264_min_qp << CODA_QPMIN_OFFSET |
+ ctx->params.h264_max_qp << CODA_QPMAX_OFFSET,
+ CODA_CMD_ENC_SEQ_RC_QP_MIN_MAX);
+ }
+ if (dev->devtype->product == CODA_960) {
+ if (ctx->params.h264_max_qp)
+ value |= 1 << CODA9_OPTION_RCQPMAX_OFFSET;
+ if (CODA_DEFAULT_GAMMA > 0)
+ value |= 1 << CODA9_OPTION_GAMMA_OFFSET;
+ } else {
+ if (CODA_DEFAULT_GAMMA > 0) {
+ if (dev->devtype->product == CODA_DX6)
+ value |= 1 << CODADX6_OPTION_GAMMA_OFFSET;
+ else
+ value |= 1 << CODA7_OPTION_GAMMA_OFFSET;
+ }
+ if (ctx->params.h264_min_qp)
+ value |= 1 << CODA7_OPTION_RCQPMIN_OFFSET;
+ if (ctx->params.h264_max_qp)
+ value |= 1 << CODA7_OPTION_RCQPMAX_OFFSET;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
+
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_INTERVAL_MODE);
+
+ coda_setup_iram(ctx);
+
+ if (dst_fourcc == V4L2_PIX_FMT_H264) {
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ value = FMO_SLICE_SAVE_BUF_SIZE << 7;
+ coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
+ break;
+ case CODA_HX4:
+ case CODA_7541:
+ coda_write(dev, ctx->iram_info.search_ram_paddr,
+ CODA7_CMD_ENC_SEQ_SEARCH_BASE);
+ coda_write(dev, ctx->iram_info.search_ram_size,
+ CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
+ break;
+ case CODA_960:
+ coda_write(dev, 0, CODA9_CMD_ENC_SEQ_ME_OPTION);
+ coda_write(dev, 0, CODA9_CMD_ENC_SEQ_INTRA_WEIGHT);
+ }
+ }
+
+ ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+ goto out;
+ }
+
+ if (coda_read(dev, CODA_RET_ENC_SEQ_SUCCESS) == 0) {
+ v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT failed\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ ctx->initialized = 1;
+
+ if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
+ if (dev->devtype->product == CODA_960)
+ ctx->num_internal_frames = 4;
+ else
+ ctx->num_internal_frames = 2;
+ ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
+ goto out;
+ }
+ num_fb = 2;
+ stride = q_data_src->bytesperline;
+ } else {
+ ctx->num_internal_frames = 0;
+ num_fb = 0;
+ stride = 0;
+ }
+ coda_write(dev, num_fb, CODA_CMD_SET_FRAME_BUF_NUM);
+ coda_write(dev, stride, CODA_CMD_SET_FRAME_BUF_STRIDE);
+
+ if (dev->devtype->product == CODA_HX4 ||
+ dev->devtype->product == CODA_7541) {
+ coda_write(dev, q_data_src->bytesperline,
+ CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
+ }
+ if (dev->devtype->product != CODA_DX6) {
+ coda_write(dev, ctx->iram_info.buf_bit_use,
+ CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+ CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ovl_use,
+ CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, ctx->iram_info.buf_btp_use,
+ CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
+
+ coda9_set_frame_cache(ctx, q_data_src->fourcc);
+
+ /* FIXME */
+ coda_write(dev, ctx->internal_frames[2].paddr,
+ CODA9_CMD_SET_FRAME_SUBSAMP_A);
+ coda_write(dev, ctx->internal_frames[3].paddr,
+ CODA9_CMD_SET_FRAME_SUBSAMP_B);
+ }
+ }
+
+ ret = coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "CODA_COMMAND_SET_FRAME_BUF timeout\n");
+ goto out;
+ }
+
+ /* Save stream headers */
+ buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ /*
+ * Get SPS in the first frame and copy it to an
+ * intermediate buffer.
+ */
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_SPS,
+ &ctx->vpu_header[0][0],
+ &ctx->vpu_header_size[0]);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * If visible width or height are not aligned to macroblock
+ * size, the crop_right and crop_bottom SPS fields must be set
+ * to the difference between visible and coded size. This is
+ * only supported by CODA960 firmware. All others do not allow
+ * writing frame cropping parameters, so we have to manually
+ * fix up the SPS RBSP (Sequence Parameter Set Raw Byte
+ * Sequence Payload) ourselves.
+ */
+ if (ctx->dev->devtype->product != CODA_960 &&
+ ((q_data_src->rect.width % 16) ||
+ (q_data_src->rect.height % 16))) {
+ ret = coda_h264_sps_fixup(ctx, q_data_src->rect.width,
+ q_data_src->rect.height,
+ &ctx->vpu_header[0][0],
+ &ctx->vpu_header_size[0],
+ sizeof(ctx->vpu_header[0]));
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
+ * Get PPS in the first frame and copy it to an
+ * intermediate buffer.
+ */
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_PPS,
+ &ctx->vpu_header[1][0],
+ &ctx->vpu_header_size[1]);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Length of H.264 headers is variable and thus it might not be
+ * aligned for the coda to append the encoded frame. In that is
+ * the case a filler NAL must be added to header 2.
+ */
+ ctx->vpu_header_size[2] = coda_h264_padding(
+ (ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1]),
+ ctx->vpu_header[2]);
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ /*
+ * Get VOS in the first frame and copy it to an
+ * intermediate buffer
+ */
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOS,
+ &ctx->vpu_header[0][0],
+ &ctx->vpu_header_size[0]);
+ if (ret < 0)
+ goto out;
+
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VIS,
+ &ctx->vpu_header[1][0],
+ &ctx->vpu_header_size[1]);
+ if (ret < 0)
+ goto out;
+
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOL,
+ &ctx->vpu_header[2][0],
+ &ctx->vpu_header_size[2]);
+ if (ret < 0)
+ goto out;
+ break;
+ default:
+ /* No more formats need to save headers at the moment */
+ break;
+ }
+
+out:
+ mutex_unlock(&dev->coda_mutex);
+ return ret;
+}
+
+static int coda_prepare_encode(struct coda_ctx *ctx)
+{
+ struct coda_q_data *q_data_src, *q_data_dst;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ int force_ipicture;
+ int quant_param = 0;
+ u32 pic_stream_buffer_addr, pic_stream_buffer_size;
+ u32 rot_mode = 0;
+ u32 dst_fourcc;
+ u32 reg;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_fourcc = q_data_dst->fourcc;
+
+ src_buf->sequence = ctx->osequence;
+ dst_buf->sequence = ctx->osequence;
+ ctx->osequence++;
+
+ force_ipicture = ctx->params.force_ipicture;
+ if (force_ipicture)
+ ctx->params.force_ipicture = false;
+ else if (ctx->params.gop_size != 0 &&
+ (src_buf->sequence % ctx->params.gop_size) == 0)
+ force_ipicture = 1;
+
+ /*
+ * Workaround coda firmware BUG that only marks the first
+ * frame as IDR. This is a problem for some decoders that can't
+ * recover when a frame is lost.
+ */
+ if (!force_ipicture) {
+ src_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ src_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ } else {
+ src_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ src_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
+ }
+
+ if (dev->devtype->product == CODA_960)
+ coda_set_gdi_regs(ctx);
+
+ /*
+ * Copy headers in front of the first frame and forced I frames for
+ * H.264 only. In MPEG4 they are already copied by the CODA.
+ */
+ if (src_buf->sequence == 0 || force_ipicture) {
+ pic_stream_buffer_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0) +
+ ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1] +
+ ctx->vpu_header_size[2];
+ pic_stream_buffer_size = q_data_dst->sizeimage -
+ ctx->vpu_header_size[0] -
+ ctx->vpu_header_size[1] -
+ ctx->vpu_header_size[2];
+ memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0),
+ &ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
+ memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
+ + ctx->vpu_header_size[0], &ctx->vpu_header[1][0],
+ ctx->vpu_header_size[1]);
+ memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
+ + ctx->vpu_header_size[0] + ctx->vpu_header_size[1],
+ &ctx->vpu_header[2][0], ctx->vpu_header_size[2]);
+ } else {
+ pic_stream_buffer_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ pic_stream_buffer_size = q_data_dst->sizeimage;
+ }
+
+ if (force_ipicture) {
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ quant_param = ctx->params.h264_intra_qp;
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ quant_param = ctx->params.mpeg4_intra_qp;
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ quant_param = 30;
+ break;
+ default:
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "cannot set intra qp, fmt not supported\n");
+ break;
+ }
+ } else {
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ quant_param = ctx->params.h264_inter_qp;
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ quant_param = ctx->params.mpeg4_inter_qp;
+ break;
+ default:
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "cannot set inter qp, fmt not supported\n");
+ break;
+ }
+ }
+
+ /* submit */
+ if (ctx->params.rot_mode)
+ rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode;
+ coda_write(dev, rot_mode, CODA_CMD_ENC_PIC_ROT_MODE);
+ coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS);
+
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX);
+ coda_write(dev, q_data_src->bytesperline,
+ CODA9_CMD_ENC_PIC_SRC_STRIDE);
+ coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC);
+
+ reg = CODA9_CMD_ENC_PIC_SRC_ADDR_Y;
+ } else {
+ reg = CODA_CMD_ENC_PIC_SRC_ADDR_Y;
+ }
+ coda_write_base(ctx, q_data_src, src_buf, reg);
+
+ coda_write(dev, force_ipicture << 1 & 0x2,
+ CODA_CMD_ENC_PIC_OPTION);
+
+ coda_write(dev, pic_stream_buffer_addr, CODA_CMD_ENC_PIC_BB_START);
+ coda_write(dev, pic_stream_buffer_size / 1024,
+ CODA_CMD_ENC_PIC_BB_SIZE);
+
+ if (!ctx->streamon_out) {
+ /* After streamoff on the output side, set stream end flag */
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+ coda_write(dev, ctx->bit_stream_param,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+ }
+
+ if (dev->devtype->product != CODA_DX6)
+ coda_write(dev, ctx->iram_info.axi_sram_use,
+ CODA7_REG_BIT_AXI_SRAM_USE);
+
+ trace_coda_enc_pic_run(ctx, src_buf);
+
+ coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
+
+ return 0;
+}
+
+static void coda_finish_encode(struct coda_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ u32 wr_ptr, start_ptr;
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ trace_coda_enc_pic_done(ctx, dst_buf);
+
+ /* Get results from the coda */
+ start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
+ wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+
+ /* Calculate bytesused field */
+ if (dst_buf->sequence == 0 ||
+ src_buf->flags & V4L2_BUF_FLAG_KEYFRAME) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
+ ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1] +
+ ctx->vpu_header_size[2]);
+ } else {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
+ }
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
+ wr_ptr - start_ptr);
+
+ coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
+ coda_read(dev, CODA_RET_ENC_PIC_FLAG);
+
+ if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
+ dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
+ } else {
+ dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ }
+
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->field = src_buf->field;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->flags |=
+ src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->timecode = src_buf->timecode;
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
+
+ ctx->gopcounter--;
+ if (ctx->gopcounter < 0)
+ ctx->gopcounter = ctx->params.gop_size - 1;
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: encoding frame (%d) (%s)\n",
+ dst_buf->sequence,
+ (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
+ "KEYFRAME" : "PFRAME");
+}
+
+static void coda_seq_end_work(struct work_struct *work)
+{
+ struct coda_ctx *ctx = container_of(work, struct coda_ctx, seq_end_work);
+ struct coda_dev *dev = ctx->dev;
+
+ mutex_lock(&ctx->buffer_mutex);
+ mutex_lock(&dev->coda_mutex);
+
+ if (ctx->initialized == 0)
+ goto out;
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%d: %s: sent command 'SEQ_END' to coda\n", ctx->idx,
+ __func__);
+ if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_END failed\n");
+ }
+
+ /*
+ * FIXME: Sometimes h.264 encoding fails with 8-byte sequences missing
+ * from the output stream after the h.264 decoder has run. Resetting the
+ * hardware after the decoder has finished seems to help.
+ */
+ if (dev->devtype->product == CODA_960)
+ coda_hw_reset(ctx);
+
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+
+ coda_free_framebuffers(ctx);
+
+ ctx->initialized = 0;
+
+out:
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+}
+
+static void coda_bit_release(struct coda_ctx *ctx)
+{
+ mutex_lock(&ctx->buffer_mutex);
+ coda_free_framebuffers(ctx);
+ coda_free_context_buffers(ctx);
+ coda_free_bitstream_buffer(ctx);
+ mutex_unlock(&ctx->buffer_mutex);
+}
+
+const struct coda_context_ops coda_bit_encode_ops = {
+ .queue_init = coda_encoder_queue_init,
+ .reqbufs = coda_encoder_reqbufs,
+ .start_streaming = coda_start_encoding,
+ .prepare_run = coda_prepare_encode,
+ .finish_run = coda_finish_encode,
+ .seq_end_work = coda_seq_end_work,
+ .release = coda_bit_release,
+};
+
+/*
+ * Decoder context operations
+ */
+
+static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx,
+ struct coda_q_data *q_data)
+{
+ if (ctx->bitstream.vaddr)
+ return 0;
+
+ ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2);
+ ctx->bitstream.vaddr = dma_alloc_wc(&ctx->dev->plat_dev->dev,
+ ctx->bitstream.size,
+ &ctx->bitstream.paddr, GFP_KERNEL);
+ if (!ctx->bitstream.vaddr) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "failed to allocate bitstream ringbuffer");
+ return -ENOMEM;
+ }
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+
+ return 0;
+}
+
+static void coda_free_bitstream_buffer(struct coda_ctx *ctx)
+{
+ if (ctx->bitstream.vaddr == NULL)
+ return;
+
+ dma_free_wc(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
+ ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ ctx->bitstream.vaddr = NULL;
+ kfifo_init(&ctx->bitstream_fifo, NULL, 0);
+}
+
+static int coda_decoder_reqbufs(struct coda_ctx *ctx,
+ struct v4l2_requestbuffers *rb)
+{
+ struct coda_q_data *q_data_src;
+ int ret;
+
+ if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return 0;
+
+ if (rb->count) {
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ ret = coda_alloc_context_buffers(ctx, q_data_src);
+ if (ret < 0)
+ return ret;
+ ret = coda_alloc_bitstream_buffer(ctx, q_data_src);
+ if (ret < 0) {
+ coda_free_context_buffers(ctx);
+ return ret;
+ }
+ } else {
+ coda_free_bitstream_buffer(ctx);
+ coda_free_context_buffers(ctx);
+ }
+
+ return 0;
+}
+
+static bool coda_reorder_enable(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ int profile;
+
+ if (dev->devtype->product != CODA_HX4 &&
+ dev->devtype->product != CODA_7541 &&
+ dev->devtype->product != CODA_960)
+ return false;
+
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
+ return false;
+
+ if (ctx->codec->src_fourcc != V4L2_PIX_FMT_H264)
+ return true;
+
+ profile = coda_h264_profile(ctx->params.h264_profile_idc);
+ if (profile < 0)
+ v4l2_warn(&dev->v4l2_dev, "Unknown H264 Profile: %u\n",
+ ctx->params.h264_profile_idc);
+
+ /* Baseline profile does not support reordering */
+ return profile > V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
+}
+
+static int __coda_start_decoding(struct coda_ctx *ctx)
+{
+ struct coda_q_data *q_data_src, *q_data_dst;
+ u32 bitstream_buf, bitstream_size;
+ struct coda_dev *dev = ctx->dev;
+ int width, height;
+ u32 src_fourcc, dst_fourcc;
+ u32 val;
+ int ret;
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "Video Data Order Adapter: %s\n",
+ ctx->use_vdoa ? "Enabled" : "Disabled");
+
+ /* Start decoding */
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ bitstream_buf = ctx->bitstream.paddr;
+ bitstream_size = ctx->bitstream.size;
+ src_fourcc = q_data_src->fourcc;
+ dst_fourcc = q_data_dst->fourcc;
+
+ coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+
+ /* Update coda bitstream read and write pointers from kfifo */
+ coda_kfifo_sync_to_device_full(ctx);
+
+ ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
+ CODA9_FRAME_TILED2LINEAR);
+ if (dst_fourcc == V4L2_PIX_FMT_NV12 || dst_fourcc == V4L2_PIX_FMT_YUYV)
+ ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+ ctx->frame_mem_ctrl |= (0x3 << 9) |
+ ((ctx->use_vdoa) ? 0 : CODA9_FRAME_TILED2LINEAR);
+ coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
+
+ ctx->display_idx = -1;
+ ctx->frm_dis_flg = 0;
+ coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+ coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
+ coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
+ val = 0;
+ if (coda_reorder_enable(ctx))
+ val |= CODA_REORDER_ENABLE;
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
+ val |= CODA_NO_INT_ENABLE;
+ coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
+
+ ctx->params.codec_mode = ctx->codec->mode;
+ if (dev->devtype->product == CODA_960 &&
+ src_fourcc == V4L2_PIX_FMT_MPEG4)
+ ctx->params.codec_mode_aux = CODA_MP4_AUX_MPEG4;
+ else
+ ctx->params.codec_mode_aux = 0;
+ if (src_fourcc == V4L2_PIX_FMT_MPEG4) {
+ coda_write(dev, CODA_MP4_CLASS_MPEG4,
+ CODA_CMD_DEC_SEQ_MP4_ASP_CLASS);
+ }
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ if (dev->devtype->product == CODA_HX4 ||
+ dev->devtype->product == CODA_7541) {
+ coda_write(dev, ctx->psbuf.paddr,
+ CODA_CMD_DEC_SEQ_PS_BB_START);
+ coda_write(dev, (CODA7_PS_BUF_SIZE / 1024),
+ CODA_CMD_DEC_SEQ_PS_BB_SIZE);
+ }
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, 0, CODA_CMD_DEC_SEQ_X264_MV_EN);
+ coda_write(dev, 512, CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE);
+ }
+ }
+ if (src_fourcc == V4L2_PIX_FMT_JPEG)
+ coda_write(dev, 0, CODA_CMD_DEC_SEQ_JPG_THUMB_EN);
+ if (dev->devtype->product != CODA_960)
+ coda_write(dev, 0, CODA_CMD_DEC_SEQ_SRC_SIZE);
+
+ ctx->bit_stream_param = CODA_BIT_DEC_SEQ_INIT_ESCAPE;
+ ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
+ ctx->bit_stream_param = 0;
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+ return ret;
+ }
+ ctx->sequence_offset = ~0U;
+ ctx->initialized = 1;
+
+ /* Update kfifo out pointer from coda bitstream read pointer */
+ coda_kfifo_sync_from_device(ctx);
+
+ if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_INIT failed, error code = %d\n",
+ coda_read(dev, CODA_RET_DEC_SEQ_ERR_REASON));
+ return -EAGAIN;
+ }
+
+ val = coda_read(dev, CODA_RET_DEC_SEQ_SRC_SIZE);
+ if (dev->devtype->product == CODA_DX6) {
+ width = (val >> CODADX6_PICWIDTH_OFFSET) & CODADX6_PICWIDTH_MASK;
+ height = val & CODADX6_PICHEIGHT_MASK;
+ } else {
+ width = (val >> CODA7_PICWIDTH_OFFSET) & CODA7_PICWIDTH_MASK;
+ height = val & CODA7_PICHEIGHT_MASK;
+ }
+
+ if (width > q_data_dst->bytesperline || height > q_data_dst->height) {
+ v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
+ width, height, q_data_dst->bytesperline,
+ q_data_dst->height);
+ return -EINVAL;
+ }
+
+ width = round_up(width, 16);
+ height = round_up(height, 16);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "%s instance %d now: %dx%d\n",
+ __func__, ctx->idx, width, height);
+
+ ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED);
+ /*
+ * If the VDOA is used, the decoder needs one additional frame,
+ * because the frames are freed when the next frame is decoded.
+ * Otherwise there are visible errors in the decoded frames (green
+ * regions in displayed frames) and a broken order of frames (earlier
+ * frames are sporadically displayed after later frames).
+ */
+ if (ctx->use_vdoa)
+ ctx->num_internal_frames += 1;
+ if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) {
+ v4l2_err(&dev->v4l2_dev,
+ "not enough framebuffers to decode (%d < %d)\n",
+ CODA_MAX_FRAMEBUFFERS, ctx->num_internal_frames);
+ return -EINVAL;
+ }
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ u32 left_right;
+ u32 top_bottom;
+
+ left_right = coda_read(dev, CODA_RET_DEC_SEQ_CROP_LEFT_RIGHT);
+ top_bottom = coda_read(dev, CODA_RET_DEC_SEQ_CROP_TOP_BOTTOM);
+
+ q_data_dst->rect.left = (left_right >> 10) & 0x3ff;
+ q_data_dst->rect.top = (top_bottom >> 10) & 0x3ff;
+ q_data_dst->rect.width = width - q_data_dst->rect.left -
+ (left_right & 0x3ff);
+ q_data_dst->rect.height = height - q_data_dst->rect.top -
+ (top_bottom & 0x3ff);
+ }
+
+ ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate framebuffers\n");
+ return ret;
+ }
+
+ /* Tell the decoder how many frame buffers we allocated. */
+ coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
+ coda_write(dev, width, CODA_CMD_SET_FRAME_BUF_STRIDE);
+
+ if (dev->devtype->product != CODA_DX6) {
+ /* Set secondary AXI IRAM */
+ coda_setup_iram(ctx);
+
+ coda_write(dev, ctx->iram_info.buf_bit_use,
+ CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+ CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ovl_use,
+ CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, ctx->iram_info.buf_btp_use,
+ CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
+
+ coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
+ coda9_set_frame_cache(ctx, dst_fourcc);
+ }
+ }
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ coda_write(dev, ctx->slicebuf.paddr,
+ CODA_CMD_SET_FRAME_SLICE_BB_START);
+ coda_write(dev, ctx->slicebuf.size / 1024,
+ CODA_CMD_SET_FRAME_SLICE_BB_SIZE);
+ }
+
+ if (dev->devtype->product == CODA_HX4 ||
+ dev->devtype->product == CODA_7541) {
+ int max_mb_x = 1920 / 16;
+ int max_mb_y = 1088 / 16;
+ int max_mb_num = max_mb_x * max_mb_y;
+
+ coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
+ CODA7_CMD_SET_FRAME_MAX_DEC_SIZE);
+ } else if (dev->devtype->product == CODA_960) {
+ int max_mb_x = 1920 / 16;
+ int max_mb_y = 1088 / 16;
+ int max_mb_num = max_mb_x * max_mb_y;
+
+ coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
+ CODA9_CMD_SET_FRAME_MAX_DEC_SIZE);
+ }
+
+ if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "CODA_COMMAND_SET_FRAME_BUF timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int coda_start_decoding(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ mutex_lock(&dev->coda_mutex);
+ ret = __coda_start_decoding(ctx);
+ mutex_unlock(&dev->coda_mutex);
+
+ return ret;
+}
+
+static int coda_prepare_decode(struct coda_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_dst;
+ struct coda_buffer_meta *meta;
+ unsigned long flags;
+ u32 rot_mode = 0;
+ u32 reg_addr, reg_stride;
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ /* Try to copy source buffer contents into the bitstream ringbuffer */
+ mutex_lock(&ctx->bitstream_mutex);
+ coda_fill_bitstream(ctx, NULL);
+ mutex_unlock(&ctx->bitstream_mutex);
+
+ if (coda_get_bitstream_payload(ctx) < 512 &&
+ (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "bitstream payload: %d, skipping\n",
+ coda_get_bitstream_payload(ctx));
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ return -EAGAIN;
+ }
+
+ /* Run coda_start_decoding (again) if not yet initialized */
+ if (!ctx->initialized) {
+ int ret = __coda_start_decoding(ctx);
+
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to start decoding\n");
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ return -EAGAIN;
+ } else {
+ ctx->initialized = 1;
+ }
+ }
+
+ if (dev->devtype->product == CODA_960)
+ coda_set_gdi_regs(ctx);
+
+ if (ctx->use_vdoa &&
+ ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ vdoa_device_run(ctx->vdoa,
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0),
+ ctx->internal_frames[ctx->display_idx].paddr);
+ } else {
+ if (dev->devtype->product == CODA_960) {
+ /*
+ * The CODA960 seems to have an internal list of
+ * buffers with 64 entries that includes the
+ * registered frame buffers as well as the rotator
+ * buffer output.
+ *
+ * ROT_INDEX needs to be < 0x40, but >
+ * ctx->num_internal_frames.
+ */
+ coda_write(dev,
+ CODA_MAX_FRAMEBUFFERS + dst_buf->vb2_buf.index,
+ CODA9_CMD_DEC_PIC_ROT_INDEX);
+
+ reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
+ reg_stride = CODA9_CMD_DEC_PIC_ROT_STRIDE;
+ } else {
+ reg_addr = CODA_CMD_DEC_PIC_ROT_ADDR_Y;
+ reg_stride = CODA_CMD_DEC_PIC_ROT_STRIDE;
+ }
+ coda_write_base(ctx, q_data_dst, dst_buf, reg_addr);
+ coda_write(dev, q_data_dst->bytesperline, reg_stride);
+
+ rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode;
+ }
+
+ coda_write(dev, rot_mode, CODA_CMD_DEC_PIC_ROT_MODE);
+
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ /* TBD */
+ case CODA_HX4:
+ case CODA_7541:
+ coda_write(dev, CODA_PRE_SCAN_EN, CODA_CMD_DEC_PIC_OPTION);
+ break;
+ case CODA_960:
+ /* 'hardcode to use interrupt disable mode'? */
+ coda_write(dev, (1 << 10), CODA_CMD_DEC_PIC_OPTION);
+ break;
+ }
+
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_SKIP_NUM);
+
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_BB_START);
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_START_BYTE);
+
+ if (dev->devtype->product != CODA_DX6)
+ coda_write(dev, ctx->iram_info.axi_sram_use,
+ CODA7_REG_BIT_AXI_SRAM_USE);
+
+ spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
+ meta = list_first_entry_or_null(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+
+ if (meta && ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
+
+ /* If this is the last buffer in the bitstream, add padding */
+ if (meta->end == (ctx->bitstream_fifo.kfifo.in &
+ ctx->bitstream_fifo.kfifo.mask)) {
+ static unsigned char buf[512];
+ unsigned int pad;
+
+ /* Pad to multiple of 256 and then add 256 more */
+ pad = ((0 - meta->end) & 0xff) + 256;
+
+ memset(buf, 0xff, sizeof(buf));
+
+ kfifo_in(&ctx->bitstream_fifo, buf, pad);
+ }
+ }
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+
+ coda_kfifo_sync_to_device_full(ctx);
+
+ /* Clear decode success flag */
+ coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
+
+ /* Clear error return value */
+ coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB);
+
+ trace_coda_dec_pic_run(ctx, meta);
+
+ coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
+
+ return 0;
+}
+
+static void coda_finish_decode(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_src;
+ struct coda_q_data *q_data_dst;
+ struct vb2_v4l2_buffer *dst_buf;
+ struct coda_buffer_meta *meta;
+ unsigned long payload;
+ unsigned long flags;
+ int width, height;
+ int decoded_idx;
+ int display_idx;
+ u32 src_fourcc;
+ int success;
+ u32 err_mb;
+ int err_vdoa = 0;
+ u32 val;
+
+ /* Update kfifo out pointer from coda bitstream read pointer */
+ coda_kfifo_sync_from_device(ctx);
+
+ /*
+ * in stream-end mode, the read pointer can overshoot the write pointer
+ * by up to 512 bytes
+ */
+ if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) {
+ if (coda_get_bitstream_payload(ctx) >= ctx->bitstream.size - 512)
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ }
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ src_fourcc = q_data_src->fourcc;
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_SUCCESS);
+ if (val != 1)
+ pr_err("DEC_PIC_SUCCESS = %d\n", val);
+
+ success = val & 0x1;
+ if (!success)
+ v4l2_err(&dev->v4l2_dev, "decode failed\n");
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ if (val & (1 << 3))
+ v4l2_err(&dev->v4l2_dev,
+ "insufficient PS buffer space (%d bytes)\n",
+ ctx->psbuf.size);
+ if (val & (1 << 2))
+ v4l2_err(&dev->v4l2_dev,
+ "insufficient slice buffer space (%d bytes)\n",
+ ctx->slicebuf.size);
+ }
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_SIZE);
+ width = (val >> 16) & 0xffff;
+ height = val & 0xffff;
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ /* frame crop information */
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ u32 left_right;
+ u32 top_bottom;
+
+ left_right = coda_read(dev, CODA_RET_DEC_PIC_CROP_LEFT_RIGHT);
+ top_bottom = coda_read(dev, CODA_RET_DEC_PIC_CROP_TOP_BOTTOM);
+
+ if (left_right == 0xffffffff && top_bottom == 0xffffffff) {
+ /* Keep current crop information */
+ } else {
+ struct v4l2_rect *rect = &q_data_dst->rect;
+
+ rect->left = left_right >> 16 & 0xffff;
+ rect->top = top_bottom >> 16 & 0xffff;
+ rect->width = width - rect->left -
+ (left_right & 0xffff);
+ rect->height = height - rect->top -
+ (top_bottom & 0xffff);
+ }
+ } else {
+ /* no cropping */
+ }
+
+ err_mb = coda_read(dev, CODA_RET_DEC_PIC_ERR_MB);
+ if (err_mb > 0)
+ v4l2_err(&dev->v4l2_dev,
+ "errors in %d macroblocks\n", err_mb);
+
+ if (dev->devtype->product == CODA_HX4 ||
+ dev->devtype->product == CODA_7541) {
+ val = coda_read(dev, CODA_RET_DEC_PIC_OPTION);
+ if (val == 0) {
+ /* not enough bitstream data */
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "prescan failed: %d\n", val);
+ ctx->hold = true;
+ return;
+ }
+ }
+
+ /* Wait until the VDOA finished writing the previous display frame */
+ if (ctx->use_vdoa &&
+ ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ err_vdoa = vdoa_wait_for_completion(ctx->vdoa);
+ }
+
+ ctx->frm_dis_flg = coda_read(dev,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+ /* The previous display frame was copied out and can be overwritten */
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ ctx->frm_dis_flg &= ~(1 << ctx->display_idx);
+ coda_write(dev, ctx->frm_dis_flg,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+ }
+
+ /*
+ * The index of the last decoded frame, not necessarily in
+ * display order, and the index of the next display frame.
+ * The latter could have been decoded in a previous run.
+ */
+ decoded_idx = coda_read(dev, CODA_RET_DEC_PIC_CUR_IDX);
+ display_idx = coda_read(dev, CODA_RET_DEC_PIC_FRAME_IDX);
+
+ if (decoded_idx == -1) {
+ /* no frame was decoded, but we might have a display frame */
+ if (display_idx >= 0 && display_idx < ctx->num_internal_frames)
+ ctx->sequence_offset++;
+ else if (ctx->display_idx < 0)
+ ctx->hold = true;
+ } else if (decoded_idx == -2) {
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames)
+ ctx->sequence_offset++;
+ /* no frame was decoded, we still return remaining buffers */
+ } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
+ v4l2_err(&dev->v4l2_dev,
+ "decoded frame index out of range: %d\n", decoded_idx);
+ } else {
+ val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
+ if (ctx->sequence_offset == -1)
+ ctx->sequence_offset = val;
+ val -= ctx->sequence_offset;
+ spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
+ if (!list_empty(&ctx->buffer_meta_list)) {
+ meta = list_first_entry(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+ list_del(&meta->list);
+ ctx->num_metas--;
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+ /*
+ * Clamp counters to 16 bits for comparison, as the HW
+ * counter rolls over at this point for h.264. This
+ * may be different for other formats, but using 16 bits
+ * should be enough to detect most errors and saves us
+ * from doing different things based on the format.
+ */
+ if ((val & 0xffff) != (meta->sequence & 0xffff)) {
+ v4l2_err(&dev->v4l2_dev,
+ "sequence number mismatch (%d(%d) != %d)\n",
+ val, ctx->sequence_offset,
+ meta->sequence);
+ }
+ ctx->frame_metas[decoded_idx] = *meta;
+ kfree(meta);
+ } else {
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+ v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
+ memset(&ctx->frame_metas[decoded_idx], 0,
+ sizeof(struct coda_buffer_meta));
+ ctx->frame_metas[decoded_idx].sequence = val;
+ ctx->sequence_offset++;
+ }
+
+ trace_coda_dec_pic_done(ctx, &ctx->frame_metas[decoded_idx]);
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_TYPE) & 0x7;
+ if (val == 0)
+ ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_KEYFRAME;
+ else if (val == 1)
+ ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_PFRAME;
+ else
+ ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_BFRAME;
+
+ ctx->frame_errors[decoded_idx] = err_mb;
+ }
+
+ if (display_idx == -1) {
+ /*
+ * no more frames to be decoded, but there could still
+ * be rotator output to dequeue
+ */
+ ctx->hold = true;
+ } else if (display_idx == -3) {
+ /* possibly prescan failure */
+ } else if (display_idx < 0 || display_idx >= ctx->num_internal_frames) {
+ v4l2_err(&dev->v4l2_dev,
+ "presentation frame index out of range: %d\n",
+ display_idx);
+ }
+
+ /* If a frame was copied out, return it */
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf->sequence = ctx->osequence++;
+
+ dst_buf->field = V4L2_FIELD_NONE;
+ dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME);
+ dst_buf->flags |= ctx->frame_types[ctx->display_idx];
+ meta = &ctx->frame_metas[ctx->display_idx];
+ dst_buf->timecode = meta->timecode;
+ dst_buf->vb2_buf.timestamp = meta->timestamp;
+
+ trace_coda_dec_rot_done(ctx, dst_buf, meta);
+
+ switch (q_data_dst->fourcc) {
+ case V4L2_PIX_FMT_YUYV:
+ payload = width * height * 2;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_NV12:
+ default:
+ payload = width * height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ payload = width * height * 2;
+ break;
+ }
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload);
+
+ if (ctx->frame_errors[ctx->display_idx] || err_vdoa)
+ coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
+ else
+ coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: decoding frame (%d) (%s)\n",
+ dst_buf->sequence,
+ (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
+ "KEYFRAME" : "PFRAME");
+ } else {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: no frame decoded\n");
+ }
+
+ /* The rotator will copy the current display frame next time */
+ ctx->display_idx = display_idx;
+}
+
+static void coda_decode_timeout(struct coda_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *dst_buf;
+
+ /*
+ * For now this only handles the case where we would deadlock with
+ * userspace, i.e. userspace issued DEC_CMD_STOP and waits for EOS,
+ * but after a failed decode run we would hold the context and wait for
+ * userspace to queue more buffers.
+ */
+ if (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))
+ return;
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf->sequence = ctx->qsequence - 1;
+
+ coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
+}
+
+const struct coda_context_ops coda_bit_decode_ops = {
+ .queue_init = coda_decoder_queue_init,
+ .reqbufs = coda_decoder_reqbufs,
+ .start_streaming = coda_start_decoding,
+ .prepare_run = coda_prepare_decode,
+ .finish_run = coda_finish_decode,
+ .run_timeout = coda_decode_timeout,
+ .seq_end_work = coda_seq_end_work,
+ .release = coda_bit_release,
+};
+
+irqreturn_t coda_irq_handler(int irq, void *data)
+{
+ struct coda_dev *dev = data;
+ struct coda_ctx *ctx;
+
+ /* read status register to attend the IRQ */
+ coda_read(dev, CODA_REG_BIT_INT_STATUS);
+ coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
+ CODA_REG_BIT_INT_CLEAR);
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (ctx == NULL) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_HANDLED;
+ }
+
+ trace_coda_bit_done(ctx);
+
+ if (ctx->aborting) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "task has been aborted\n");
+ }
+
+ if (coda_isbusy(ctx->dev)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "coda is still busy!!!!\n");
+ return IRQ_NONE;
+ }
+
+ complete(&ctx->completion);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
new file mode 100644
index 000000000..d792122b8
--- /dev/null
+++ b/drivers/media/platform/coda/coda-common.c
@@ -0,0 +1,2873 @@
+/*
+ * Coda multi-standard codec IP
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ * Javier Martin, <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gcd.h>
+#include <linux/genalloc.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/of.h>
+#include <linux/platform_data/media/coda.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "coda.h"
+#include "imx-vdoa.h"
+
+#define CODA_NAME "coda"
+
+#define CODADX6_MAX_INSTANCES 4
+#define CODA_MAX_FORMATS 4
+
+#define CODA_ISRAM_SIZE (2048 * 2)
+
+#define MIN_W 176
+#define MIN_H 144
+
+#define S_ALIGN 1 /* multiple of 2 */
+#define W_ALIGN 1 /* multiple of 2 */
+#define H_ALIGN 1 /* multiple of 2 */
+
+#define fh_to_ctx(__fh) container_of(__fh, struct coda_ctx, fh)
+
+int coda_debug;
+module_param(coda_debug, int, 0644);
+MODULE_PARM_DESC(coda_debug, "Debug level (0-2)");
+
+static int disable_tiling;
+module_param(disable_tiling, int, 0644);
+MODULE_PARM_DESC(disable_tiling, "Disable tiled frame buffers");
+
+static int disable_vdoa;
+module_param(disable_vdoa, int, 0644);
+MODULE_PARM_DESC(disable_vdoa, "Disable Video Data Order Adapter tiled to raster-scan conversion");
+
+static int enable_bwb = 0;
+module_param(enable_bwb, int, 0644);
+MODULE_PARM_DESC(enable_bwb, "Enable BWB unit for decoding, may crash on certain streams");
+
+void coda_write(struct coda_dev *dev, u32 data, u32 reg)
+{
+ v4l2_dbg(2, coda_debug, &dev->v4l2_dev,
+ "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
+ writel(data, dev->regs_base + reg);
+}
+
+unsigned int coda_read(struct coda_dev *dev, u32 reg)
+{
+ u32 data;
+
+ data = readl(dev->regs_base + reg);
+ v4l2_dbg(2, coda_debug, &dev->v4l2_dev,
+ "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
+ return data;
+}
+
+void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
+ struct vb2_v4l2_buffer *buf, unsigned int reg_y)
+{
+ u32 base_y = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
+ u32 base_cb, base_cr;
+
+ switch (q_data->fourcc) {
+ case V4L2_PIX_FMT_YUYV:
+ /* Fallthrough: IN -H264-> CODA -NV12 MB-> VDOA -YUYV-> OUT */
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_YUV420:
+ default:
+ base_cb = base_y + q_data->bytesperline * q_data->height;
+ base_cr = base_cb + q_data->bytesperline * q_data->height / 4;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ /* Switch Cb and Cr for YVU420 format */
+ base_cr = base_y + q_data->bytesperline * q_data->height;
+ base_cb = base_cr + q_data->bytesperline * q_data->height / 4;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ base_cb = base_y + q_data->bytesperline * q_data->height;
+ base_cr = base_cb + q_data->bytesperline * q_data->height / 2;
+ }
+
+ coda_write(ctx->dev, base_y, reg_y);
+ coda_write(ctx->dev, base_cb, reg_y + 4);
+ coda_write(ctx->dev, base_cr, reg_y + 8);
+}
+
+#define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \
+ { mode, src_fourcc, dst_fourcc, max_w, max_h }
+
+/*
+ * Arrays of codecs supported by each given version of Coda:
+ * i.MX27 -> codadx6
+ * i.MX51 -> codahx4
+ * i.MX53 -> coda7
+ * i.MX6 -> coda960
+ * Use V4L2_PIX_FMT_YUV420 as placeholder for all supported YUV 4:2:0 variants
+ */
+static const struct coda_codec codadx6_codecs[] = {
+ CODA_CODEC(CODADX6_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 720, 576),
+ CODA_CODEC(CODADX6_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 720, 576),
+};
+
+static const struct coda_codec codahx4_codecs[] = {
+ CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 720, 576),
+ CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA7_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1280, 720),
+};
+
+static const struct coda_codec coda7_codecs[] = {
+ CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720),
+ CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720),
+ CODA_CODEC(CODA7_MODE_ENCODE_MJPG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_JPEG, 8192, 8192),
+ CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA7_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA7_MODE_DECODE_MJPG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420, 8192, 8192),
+};
+
+static const struct coda_codec coda9_codecs[] = {
+ CODA_CODEC(CODA9_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
+};
+
+struct coda_video_device {
+ const char *name;
+ enum coda_inst_type type;
+ const struct coda_context_ops *ops;
+ bool direct;
+ u32 src_formats[CODA_MAX_FORMATS];
+ u32 dst_formats[CODA_MAX_FORMATS];
+};
+
+static const struct coda_video_device coda_bit_encoder = {
+ .name = "coda-encoder",
+ .type = CODA_INST_ENCODER,
+ .ops = &coda_bit_encode_ops,
+ .src_formats = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_H264,
+ V4L2_PIX_FMT_MPEG4,
+ },
+};
+
+static const struct coda_video_device coda_bit_jpeg_encoder = {
+ .name = "coda-jpeg-encoder",
+ .type = CODA_INST_ENCODER,
+ .ops = &coda_bit_encode_ops,
+ .src_formats = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_YUV422P,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_JPEG,
+ },
+};
+
+static const struct coda_video_device coda_bit_decoder = {
+ .name = "coda-decoder",
+ .type = CODA_INST_DECODER,
+ .ops = &coda_bit_decode_ops,
+ .src_formats = {
+ V4L2_PIX_FMT_H264,
+ V4L2_PIX_FMT_MPEG2,
+ V4L2_PIX_FMT_MPEG4,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ /*
+ * If V4L2_PIX_FMT_YUYV should be default,
+ * set_default_params() must be adjusted.
+ */
+ V4L2_PIX_FMT_YUYV,
+ },
+};
+
+static const struct coda_video_device coda_bit_jpeg_decoder = {
+ .name = "coda-jpeg-decoder",
+ .type = CODA_INST_DECODER,
+ .ops = &coda_bit_decode_ops,
+ .src_formats = {
+ V4L2_PIX_FMT_JPEG,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_YUV422P,
+ },
+};
+
+static const struct coda_video_device *codadx6_video_devices[] = {
+ &coda_bit_encoder,
+};
+
+static const struct coda_video_device *codahx4_video_devices[] = {
+ &coda_bit_encoder,
+ &coda_bit_decoder,
+};
+
+static const struct coda_video_device *coda7_video_devices[] = {
+ &coda_bit_jpeg_encoder,
+ &coda_bit_jpeg_decoder,
+ &coda_bit_encoder,
+ &coda_bit_decoder,
+};
+
+static const struct coda_video_device *coda9_video_devices[] = {
+ &coda_bit_encoder,
+ &coda_bit_decoder,
+};
+
+/*
+ * Normalize all supported YUV 4:2:0 formats to the value used in the codec
+ * tables.
+ */
+static u32 coda_format_normalize_yuv(u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUYV:
+ return V4L2_PIX_FMT_YUV420;
+ default:
+ return fourcc;
+ }
+}
+
+static const struct coda_codec *coda_find_codec(struct coda_dev *dev,
+ int src_fourcc, int dst_fourcc)
+{
+ const struct coda_codec *codecs = dev->devtype->codecs;
+ int num_codecs = dev->devtype->num_codecs;
+ int k;
+
+ src_fourcc = coda_format_normalize_yuv(src_fourcc);
+ dst_fourcc = coda_format_normalize_yuv(dst_fourcc);
+ if (src_fourcc == dst_fourcc)
+ return NULL;
+
+ for (k = 0; k < num_codecs; k++) {
+ if (codecs[k].src_fourcc == src_fourcc &&
+ codecs[k].dst_fourcc == dst_fourcc)
+ break;
+ }
+
+ if (k == num_codecs)
+ return NULL;
+
+ return &codecs[k];
+}
+
+static void coda_get_max_dimensions(struct coda_dev *dev,
+ const struct coda_codec *codec,
+ int *max_w, int *max_h)
+{
+ const struct coda_codec *codecs = dev->devtype->codecs;
+ int num_codecs = dev->devtype->num_codecs;
+ unsigned int w, h;
+ int k;
+
+ if (codec) {
+ w = codec->max_w;
+ h = codec->max_h;
+ } else {
+ for (k = 0, w = 0, h = 0; k < num_codecs; k++) {
+ w = max(w, codecs[k].max_w);
+ h = max(h, codecs[k].max_h);
+ }
+ }
+
+ if (max_w)
+ *max_w = w;
+ if (max_h)
+ *max_h = h;
+}
+
+static const struct coda_video_device *to_coda_video_device(struct video_device
+ *vdev)
+{
+ struct coda_dev *dev = video_get_drvdata(vdev);
+ unsigned int i = vdev - dev->vfd;
+
+ if (i >= dev->devtype->num_vdevs)
+ return NULL;
+
+ return dev->devtype->vdevs[i];
+}
+
+const char *coda_product_name(int product)
+{
+ static char buf[9];
+
+ switch (product) {
+ case CODA_DX6:
+ return "CodaDx6";
+ case CODA_HX4:
+ return "CodaHx4";
+ case CODA_7541:
+ return "CODA7541";
+ case CODA_960:
+ return "CODA960";
+ default:
+ snprintf(buf, sizeof(buf), "(0x%04x)", product);
+ return buf;
+ }
+}
+
+static struct vdoa_data *coda_get_vdoa_data(void)
+{
+ struct device_node *vdoa_node;
+ struct platform_device *vdoa_pdev;
+ struct vdoa_data *vdoa_data = NULL;
+
+ vdoa_node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-vdoa");
+ if (!vdoa_node)
+ return NULL;
+
+ vdoa_pdev = of_find_device_by_node(vdoa_node);
+ if (!vdoa_pdev)
+ goto out;
+
+ vdoa_data = platform_get_drvdata(vdoa_pdev);
+ if (!vdoa_data)
+ vdoa_data = ERR_PTR(-EPROBE_DEFER);
+
+ put_device(&vdoa_pdev->dev);
+out:
+ if (vdoa_node)
+ of_node_put(vdoa_node);
+
+ return vdoa_data;
+}
+
+/*
+ * V4L2 ioctl() operations.
+ */
+static int coda_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product),
+ sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int coda_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ const struct coda_video_device *cvd = to_coda_video_device(vdev);
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ const u32 *formats;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ formats = cvd->src_formats;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ formats = cvd->dst_formats;
+ else
+ return -EINVAL;
+
+ if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0)
+ return -EINVAL;
+
+ /* Skip YUYV if the vdoa is not available */
+ if (!ctx->vdoa && f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ formats[f->index] == V4L2_PIX_FMT_YUYV)
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index];
+
+ return 0;
+}
+
+static int coda_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_q_data *q_data;
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = q_data->fourcc;
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.bytesperline = q_data->bytesperline;
+
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
+
+ return 0;
+}
+
+static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f)
+{
+ struct coda_q_data *q_data;
+ const u32 *formats;
+ int i;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ formats = ctx->cvd->src_formats;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ formats = ctx->cvd->dst_formats;
+ else
+ return -EINVAL;
+
+ for (i = 0; i < CODA_MAX_FORMATS; i++) {
+ /* Skip YUYV if the vdoa is not available */
+ if (!ctx->vdoa && f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ formats[i] == V4L2_PIX_FMT_YUYV)
+ continue;
+
+ if (formats[i] == f->fmt.pix.pixelformat) {
+ f->fmt.pix.pixelformat = formats[i];
+ return 0;
+ }
+ }
+
+ /* Fall back to currently set pixelformat */
+ q_data = get_q_data(ctx, f->type);
+ f->fmt.pix.pixelformat = q_data->fourcc;
+
+ return 0;
+}
+
+static int coda_try_fmt_vdoa(struct coda_ctx *ctx, struct v4l2_format *f,
+ bool *use_vdoa)
+{
+ int err;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (!use_vdoa)
+ return -EINVAL;
+
+ if (!ctx->vdoa) {
+ *use_vdoa = false;
+ return 0;
+ }
+
+ err = vdoa_context_configure(NULL, round_up(f->fmt.pix.width, 16),
+ f->fmt.pix.height, f->fmt.pix.pixelformat);
+ if (err) {
+ *use_vdoa = false;
+ return 0;
+ }
+
+ *use_vdoa = true;
+ return 0;
+}
+
+static unsigned int coda_estimate_sizeimage(struct coda_ctx *ctx, u32 sizeimage,
+ u32 width, u32 height)
+{
+ /*
+ * This is a rough estimate for sensible compressed buffer
+ * sizes (between 1 and 16 bits per pixel). This could be
+ * improved by better format specific worst case estimates.
+ */
+ return round_up(clamp(sizeimage, width * height / 8,
+ width * height * 2), PAGE_SIZE);
+}
+
+static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
+ struct v4l2_format *f)
+{
+ struct coda_dev *dev = ctx->dev;
+ unsigned int max_w, max_h;
+ enum v4l2_field field;
+
+ field = f->fmt.pix.field;
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != field)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ f->fmt.pix.field = field;
+
+ coda_get_max_dimensions(dev, codec, &max_w, &max_h);
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN,
+ &f->fmt.pix.height, MIN_H, max_h, H_ALIGN,
+ S_ALIGN);
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ /*
+ * Frame stride must be at least multiple of 8,
+ * but multiple of 16 for h.264 or JPEG 4:2:x
+ */
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16) * 2;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 2;
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_MPEG2:
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = coda_estimate_sizeimage(ctx,
+ f->fmt.pix.sizeimage,
+ f->fmt.pix.width,
+ f->fmt.pix.height);
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static int coda_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ const struct coda_q_data *q_data_src;
+ const struct coda_codec *codec;
+ struct vb2_queue *src_vq;
+ int ret;
+ bool use_vdoa;
+
+ ret = coda_try_pixelformat(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ /*
+ * If the source format is already fixed, only allow the same output
+ * resolution
+ */
+ src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (vb2_is_streaming(src_vq)) {
+ f->fmt.pix.width = q_data_src->width;
+ f->fmt.pix.height = q_data_src->height;
+ }
+
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+ f->fmt.pix.pixelformat);
+ if (!codec)
+ return -EINVAL;
+
+ ret = coda_try_fmt(ctx, codec, f);
+ if (ret < 0)
+ return ret;
+
+ /* The h.264 decoder only returns complete 16x16 macroblocks */
+ if (codec && codec->src_fourcc == V4L2_PIX_FMT_H264) {
+ f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 3 / 2;
+
+ ret = coda_try_fmt_vdoa(ctx, f, &use_vdoa);
+ if (ret < 0)
+ return ret;
+
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ if (!use_vdoa)
+ return -EINVAL;
+
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16) * 2;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
+ }
+ }
+
+ return 0;
+}
+
+static void coda_set_default_colorspace(struct v4l2_pix_format *fmt)
+{
+ enum v4l2_colorspace colorspace;
+
+ if (fmt->pixelformat == V4L2_PIX_FMT_JPEG)
+ colorspace = V4L2_COLORSPACE_JPEG;
+ else if (fmt->width <= 720 && fmt->height <= 576)
+ colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ colorspace = V4L2_COLORSPACE_REC709;
+
+ fmt->colorspace = colorspace;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+}
+
+static int coda_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_dev *dev = ctx->dev;
+ const struct coda_q_data *q_data_dst;
+ const struct coda_codec *codec;
+ int ret;
+
+ ret = coda_try_pixelformat(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ if (f->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT)
+ coda_set_default_colorspace(&f->fmt.pix);
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ codec = coda_find_codec(dev, f->fmt.pix.pixelformat, q_data_dst->fourcc);
+
+ return coda_try_fmt(ctx, codec, f);
+}
+
+static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
+ struct v4l2_rect *r)
+{
+ struct coda_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fourcc = f->fmt.pix.pixelformat;
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->bytesperline = f->fmt.pix.bytesperline;
+ q_data->sizeimage = f->fmt.pix.sizeimage;
+ if (r) {
+ q_data->rect = *r;
+ } else {
+ q_data->rect.left = 0;
+ q_data->rect.top = 0;
+ q_data->rect.width = f->fmt.pix.width;
+ q_data->rect.height = f->fmt.pix.height;
+ }
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ if (!disable_tiling) {
+ ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
+ break;
+ }
+ /* else fall through */
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
+ break;
+ default:
+ break;
+ }
+
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP &&
+ !coda_try_fmt_vdoa(ctx, f, &ctx->use_vdoa) &&
+ ctx->use_vdoa)
+ vdoa_context_configure(ctx->vdoa,
+ round_up(f->fmt.pix.width, 16),
+ f->fmt.pix.height,
+ f->fmt.pix.pixelformat);
+ else
+ ctx->use_vdoa = false;
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %4.4s %c\n",
+ f->type, q_data->width, q_data->height,
+ (char *)&q_data->fourcc,
+ (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) ? 'L' : 'T');
+
+ return 0;
+}
+
+static int coda_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_q_data *q_data_src;
+ struct v4l2_rect r;
+ int ret;
+
+ ret = coda_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ r.left = 0;
+ r.top = 0;
+ r.width = q_data_src->width;
+ r.height = q_data_src->height;
+
+ ret = coda_s_fmt(ctx, f, &r);
+ if (ret)
+ return ret;
+
+ if (ctx->inst_type != CODA_INST_ENCODER)
+ return 0;
+
+ ctx->colorspace = f->fmt.pix.colorspace;
+ ctx->xfer_func = f->fmt.pix.xfer_func;
+ ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->quantization = f->fmt.pix.quantization;
+
+ return 0;
+}
+
+static int coda_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_format f_cap;
+ struct vb2_queue *dst_vq;
+ int ret;
+
+ ret = coda_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = coda_s_fmt(ctx, f, NULL);
+ if (ret)
+ return ret;
+
+ if (ctx->inst_type != CODA_INST_DECODER)
+ return 0;
+
+ ctx->colorspace = f->fmt.pix.colorspace;
+ ctx->xfer_func = f->fmt.pix.xfer_func;
+ ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->quantization = f->fmt.pix.quantization;
+
+ dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (!dst_vq)
+ return -EINVAL;
+
+ /*
+ * Setting the capture queue format is not possible while the capture
+ * queue is still busy. This is not an error, but the user will have to
+ * make sure themselves that the capture format is set correctly before
+ * starting the output queue again.
+ */
+ if (vb2_is_busy(dst_vq))
+ return 0;
+
+ memset(&f_cap, 0, sizeof(f_cap));
+ f_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ coda_g_fmt(file, priv, &f_cap);
+ f_cap.fmt.pix.width = f->fmt.pix.width;
+ f_cap.fmt.pix.height = f->fmt.pix.height;
+
+ return coda_s_fmt_vid_cap(file, priv, &f_cap);
+}
+
+static int coda_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb);
+ if (ret)
+ return ret;
+
+ /*
+ * Allow to allocate instance specific per-context buffers, such as
+ * bitstream ringbuffer, slice buffer, work buffer, etc. if needed.
+ */
+ if (rb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && ctx->ops->reqbufs)
+ return ctx->ops->reqbufs(ctx, rb);
+
+ return 0;
+}
+
+static int coda_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
+}
+
+static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
+ struct vb2_v4l2_buffer *buf)
+{
+ return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
+ (buf->sequence == (ctx->qsequence - 1)));
+}
+
+void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
+ enum vb2_buffer_state state)
+{
+ const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ if (coda_buf_is_end_of_stream(ctx, buf)) {
+ buf->flags |= V4L2_BUF_FLAG_LAST;
+
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+
+ v4l2_m2m_buf_done(buf, state);
+}
+
+static int coda_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_q_data *q_data;
+ struct v4l2_rect r, *rsel;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ r.left = 0;
+ r.top = 0;
+ r.width = q_data->width;
+ r.height = q_data->height;
+ rsel = &q_data->rect;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ rsel = &r;
+ /* fallthrough */
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ rsel = &r;
+ /* fallthrough */
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ s->r = *rsel;
+
+ return 0;
+}
+
+static int coda_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_q_data *q_data;
+
+ if (ctx->inst_type == CODA_INST_ENCODER &&
+ s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->target == V4L2_SEL_TGT_CROP) {
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = clamp(s->r.width, 2U, q_data->width);
+ s->r.height = clamp(s->r.height, 2U, q_data->height);
+
+ if (s->flags & V4L2_SEL_FLAG_LE) {
+ s->r.width = round_up(s->r.width, 2);
+ s->r.height = round_up(s->r.height, 2);
+ } else {
+ s->r.width = round_down(s->r.width, 2);
+ s->r.height = round_down(s->r.height, 2);
+ }
+
+ q_data->rect = s->r;
+
+ return 0;
+ }
+
+ return coda_g_selection(file, fh, s);
+}
+
+static int coda_try_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP)
+ return -EINVAL;
+
+ if (ec->flags & V4L2_ENC_CMD_STOP_AT_GOP_END)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int coda_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct vb2_queue *dst_vq;
+ int ret;
+
+ ret = coda_try_encoder_cmd(file, fh, ec);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore encoder stop command silently in decoder context */
+ if (ctx->inst_type != CODA_INST_ENCODER)
+ return 0;
+
+ /* Set the stream-end flag on this context */
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+ flush_work(&ctx->pic_run_work);
+
+ /* If there is no buffer in flight, wake up */
+ if (!ctx->streamon_out || ctx->qsequence == ctx->osequence) {
+ dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_vq->last_buffer_dequeued = true;
+ wake_up(&dst_vq->done_wq);
+ }
+
+ return 0;
+}
+
+static int coda_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+
+ if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int coda_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ int ret;
+
+ ret = coda_try_decoder_cmd(file, fh, dc);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore decoder stop command silently in encoder context */
+ if (ctx->inst_type != CODA_INST_DECODER)
+ return 0;
+
+ /* Set the stream-end flag on this context */
+ coda_bit_stream_end_flag(ctx);
+ ctx->hold = false;
+ v4l2_m2m_try_schedule(ctx->fh.m2m_ctx);
+
+ return 0;
+}
+
+static int coda_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_fract *tpf;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ tpf = &a->parm.output.timeperframe;
+ tpf->denominator = ctx->params.framerate & CODA_FRATE_RES_MASK;
+ tpf->numerator = 1 + (ctx->params.framerate >>
+ CODA_FRATE_DIV_OFFSET);
+
+ return 0;
+}
+
+/*
+ * Approximate timeperframe v4l2_fract with values that can be written
+ * into the 16-bit CODA_FRATE_DIV and CODA_FRATE_RES fields.
+ */
+static void coda_approximate_timeperframe(struct v4l2_fract *timeperframe)
+{
+ struct v4l2_fract s = *timeperframe;
+ struct v4l2_fract f0;
+ struct v4l2_fract f1 = { 1, 0 };
+ struct v4l2_fract f2 = { 0, 1 };
+ unsigned int i, div, s_denominator;
+
+ /* Lower bound is 1/65535 */
+ if (s.numerator == 0 || s.denominator / s.numerator > 65535) {
+ timeperframe->numerator = 1;
+ timeperframe->denominator = 65535;
+ return;
+ }
+
+ /* Upper bound is 65536/1, map everything above to infinity */
+ if (s.denominator == 0 || s.numerator / s.denominator > 65536) {
+ timeperframe->numerator = 1;
+ timeperframe->denominator = 0;
+ return;
+ }
+
+ /* Reduce fraction to lowest terms */
+ div = gcd(s.numerator, s.denominator);
+ if (div > 1) {
+ s.numerator /= div;
+ s.denominator /= div;
+ }
+
+ if (s.numerator <= 65536 && s.denominator < 65536) {
+ *timeperframe = s;
+ return;
+ }
+
+ /* Find successive convergents from continued fraction expansion */
+ while (f2.numerator <= 65536 && f2.denominator < 65536) {
+ f0 = f1;
+ f1 = f2;
+
+ /* Stop when f2 exactly equals timeperframe */
+ if (s.numerator == 0)
+ break;
+
+ i = s.denominator / s.numerator;
+
+ f2.numerator = f0.numerator + i * f1.numerator;
+ f2.denominator = f0.denominator + i * f2.denominator;
+
+ s_denominator = s.numerator;
+ s.numerator = s.denominator % s.numerator;
+ s.denominator = s_denominator;
+ }
+
+ *timeperframe = f1;
+}
+
+static uint32_t coda_timeperframe_to_frate(struct v4l2_fract *timeperframe)
+{
+ return ((timeperframe->numerator - 1) << CODA_FRATE_DIV_OFFSET) |
+ timeperframe->denominator;
+}
+
+static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_fract *tpf;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ tpf = &a->parm.output.timeperframe;
+ coda_approximate_timeperframe(tpf);
+ ctx->params.framerate = coda_timeperframe_to_frate(tpf);
+
+ return 0;
+}
+
+static int coda_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static const struct v4l2_ioctl_ops coda_ioctl_ops = {
+ .vidioc_querycap = coda_querycap,
+
+ .vidioc_enum_fmt_vid_cap = coda_enum_fmt,
+ .vidioc_g_fmt_vid_cap = coda_g_fmt,
+ .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = coda_enum_fmt,
+ .vidioc_g_fmt_vid_out = coda_g_fmt,
+ .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out,
+
+ .vidioc_reqbufs = coda_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+
+ .vidioc_qbuf = coda_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = coda_g_selection,
+ .vidioc_s_selection = coda_s_selection,
+
+ .vidioc_try_encoder_cmd = coda_try_encoder_cmd,
+ .vidioc_encoder_cmd = coda_encoder_cmd,
+ .vidioc_try_decoder_cmd = coda_try_decoder_cmd,
+ .vidioc_decoder_cmd = coda_decoder_cmd,
+
+ .vidioc_g_parm = coda_g_parm,
+ .vidioc_s_parm = coda_s_parm,
+
+ .vidioc_subscribe_event = coda_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Mem-to-mem operations.
+ */
+
+static void coda_device_run(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+ struct coda_dev *dev = ctx->dev;
+
+ queue_work(dev->workqueue, &ctx->pic_run_work);
+}
+
+static void coda_pic_run_work(struct work_struct *work)
+{
+ struct coda_ctx *ctx = container_of(work, struct coda_ctx, pic_run_work);
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ mutex_lock(&ctx->buffer_mutex);
+ mutex_lock(&dev->coda_mutex);
+
+ ret = ctx->ops->prepare_run(ctx);
+ if (ret < 0 && ctx->inst_type == CODA_INST_DECODER) {
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+ /* job_finish scheduled by prepare_decode */
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&ctx->completion,
+ msecs_to_jiffies(1000))) {
+ dev_err(&dev->plat_dev->dev, "CODA PIC_RUN timeout\n");
+
+ ctx->hold = true;
+
+ coda_hw_reset(ctx);
+
+ if (ctx->ops->run_timeout)
+ ctx->ops->run_timeout(ctx);
+ } else if (!ctx->aborting) {
+ ctx->ops->finish_run(ctx);
+ }
+
+ if ((ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out)) &&
+ ctx->ops->seq_end_work)
+ queue_work(dev->workqueue, &ctx->seq_end_work);
+
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static int coda_job_ready(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+ int src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
+
+ /*
+ * For both 'P' and 'key' frame cases 1 picture
+ * and 1 frame are needed. In the decoder case,
+ * the compressed frame can be in the bitstream.
+ */
+ if (!src_bufs && ctx->inst_type != CODA_INST_DECODER) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: not enough video buffers.\n");
+ return 0;
+ }
+
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: not enough video capture buffers.\n");
+ return 0;
+ }
+
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
+ bool stream_end = ctx->bit_stream_param &
+ CODA_BIT_STREAM_END_FLAG;
+ int num_metas = ctx->num_metas;
+ unsigned int count;
+
+ count = hweight32(ctx->frm_dis_flg);
+ if (ctx->use_vdoa && count >= (ctx->num_internal_frames - 1)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: all internal buffers in use: %d/%d (0x%x)",
+ ctx->idx, count, ctx->num_internal_frames,
+ ctx->frm_dis_flg);
+ return 0;
+ }
+
+ if (ctx->hold && !src_bufs) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: on hold for more buffers.\n",
+ ctx->idx);
+ return 0;
+ }
+
+ if (!stream_end && (num_metas + src_bufs) < 2) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: need 2 buffers available (%d, %d)\n",
+ ctx->idx, num_metas, src_bufs);
+ return 0;
+ }
+
+
+ if (!src_bufs && !stream_end &&
+ (coda_get_bitstream_payload(ctx) < 512)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: not enough bitstream data (%d).\n",
+ ctx->idx, coda_get_bitstream_payload(ctx));
+ return 0;
+ }
+ }
+
+ if (ctx->aborting) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: aborting\n");
+ return 0;
+ }
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "job ready\n");
+
+ return 1;
+}
+
+static void coda_job_abort(void *priv)
+{
+ struct coda_ctx *ctx = priv;
+
+ ctx->aborting = 1;
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "Aborting task\n");
+}
+
+static const struct v4l2_m2m_ops coda_m2m_ops = {
+ .device_run = coda_device_run,
+ .job_ready = coda_job_ready,
+ .job_abort = coda_job_abort,
+};
+
+static void set_default_params(struct coda_ctx *ctx)
+{
+ unsigned int max_w, max_h, usize, csize;
+
+ ctx->codec = coda_find_codec(ctx->dev, ctx->cvd->src_formats[0],
+ ctx->cvd->dst_formats[0]);
+ max_w = min(ctx->codec->max_w, 1920U);
+ max_h = min(ctx->codec->max_h, 1088U);
+ usize = max_w * max_h * 3 / 2;
+ csize = coda_estimate_sizeimage(ctx, usize, max_w, max_h);
+
+ ctx->params.codec_mode = ctx->codec->mode;
+ if (ctx->cvd->src_formats[0] == V4L2_PIX_FMT_JPEG)
+ ctx->colorspace = V4L2_COLORSPACE_JPEG;
+ else
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+ ctx->params.framerate = 30;
+
+ /* Default formats for output and input queues */
+ ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->cvd->src_formats[0];
+ ctx->q_data[V4L2_M2M_DST].fourcc = ctx->cvd->dst_formats[0];
+ ctx->q_data[V4L2_M2M_SRC].width = max_w;
+ ctx->q_data[V4L2_M2M_SRC].height = max_h;
+ ctx->q_data[V4L2_M2M_DST].width = max_w;
+ ctx->q_data[V4L2_M2M_DST].height = max_h;
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) {
+ ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = usize;
+ ctx->q_data[V4L2_M2M_DST].bytesperline = 0;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = csize;
+ } else {
+ ctx->q_data[V4L2_M2M_SRC].bytesperline = 0;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = csize;
+ ctx->q_data[V4L2_M2M_DST].bytesperline = max_w;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = usize;
+ }
+ ctx->q_data[V4L2_M2M_SRC].rect.width = max_w;
+ ctx->q_data[V4L2_M2M_SRC].rect.height = max_h;
+ ctx->q_data[V4L2_M2M_DST].rect.width = max_w;
+ ctx->q_data[V4L2_M2M_DST].rect.height = max_h;
+
+ /*
+ * Since the RBC2AXI logic only supports a single chroma plane,
+ * macroblock tiling only works for to NV12 pixel format.
+ */
+ ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
+}
+
+/*
+ * Queue operations
+ */
+static int coda_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(vq);
+ struct coda_q_data *q_data;
+ unsigned int size;
+
+ q_data = get_q_data(ctx, vq->type);
+ size = q_data->sizeimage;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "get %d buffer(s) of size %d each.\n", *nbuffers, size);
+
+ return 0;
+}
+
+static int coda_buf_prepare(struct vb2_buffer *vb)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct coda_q_data *q_data;
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void coda_update_menu_ctrl(struct v4l2_ctrl *ctrl, int value)
+{
+ if (!ctrl)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+
+ /*
+ * Extend the control range if the parsed stream contains a known but
+ * unsupported value or level.
+ */
+ if (value > ctrl->maximum) {
+ __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, value,
+ ctrl->menu_skip_mask & ~(1 << value),
+ ctrl->default_value);
+ } else if (value < ctrl->minimum) {
+ __v4l2_ctrl_modify_range(ctrl, value, ctrl->maximum,
+ ctrl->menu_skip_mask & ~(1 << value),
+ ctrl->default_value);
+ }
+
+ __v4l2_ctrl_s_ctrl(ctrl, value);
+
+ v4l2_ctrl_unlock(ctrl);
+}
+
+static void coda_update_h264_profile_ctrl(struct coda_ctx *ctx)
+{
+ const char * const *profile_names;
+ int profile;
+
+ profile = coda_h264_profile(ctx->params.h264_profile_idc);
+ if (profile < 0) {
+ v4l2_warn(&ctx->dev->v4l2_dev, "Invalid H264 Profile: %u\n",
+ ctx->params.h264_profile_idc);
+ return;
+ }
+
+ coda_update_menu_ctrl(ctx->h264_profile_ctrl, profile);
+
+ profile_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Parsed H264 Profile: %s\n",
+ profile_names[profile]);
+}
+
+static void coda_update_h264_level_ctrl(struct coda_ctx *ctx)
+{
+ const char * const *level_names;
+ int level;
+
+ level = coda_h264_level(ctx->params.h264_level_idc);
+ if (level < 0) {
+ v4l2_warn(&ctx->dev->v4l2_dev, "Invalid H264 Level: %u\n",
+ ctx->params.h264_level_idc);
+ return;
+ }
+
+ coda_update_menu_ctrl(ctx->h264_level_ctrl, level);
+
+ level_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Parsed H264 Level: %s\n",
+ level_names[level]);
+}
+
+static void coda_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct coda_q_data *q_data;
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+ /*
+ * In the decoder case, immediately try to copy the buffer into the
+ * bitstream ringbuffer and mark it as ready to be dequeued.
+ */
+ if (ctx->bitstream.size && vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /*
+ * For backwards compatibility, queuing an empty buffer marks
+ * the stream end
+ */
+ if (vb2_get_plane_payload(vb, 0) == 0)
+ coda_bit_stream_end_flag(ctx);
+
+ if (q_data->fourcc == V4L2_PIX_FMT_H264) {
+ /*
+ * Unless already done, try to obtain profile_idc and
+ * level_idc from the SPS header. This allows to decide
+ * whether to enable reordering during sequence
+ * initialization.
+ */
+ if (!ctx->params.h264_profile_idc) {
+ coda_sps_parse_profile(ctx, vb);
+ coda_update_h264_profile_ctrl(ctx);
+ coda_update_h264_level_ctrl(ctx);
+ }
+ }
+
+ mutex_lock(&ctx->bitstream_mutex);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ if (vb2_is_streaming(vb->vb2_queue))
+ /* This set buf->sequence = ctx->qsequence++ */
+ coda_fill_bitstream(ctx, NULL);
+ mutex_unlock(&ctx->bitstream_mutex);
+ } else {
+ if (ctx->inst_type == CODA_INST_ENCODER &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ vbuf->sequence = ctx->qsequence++;
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ }
+}
+
+int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
+ size_t size, const char *name, struct dentry *parent)
+{
+ buf->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, &buf->paddr,
+ GFP_KERNEL);
+ if (!buf->vaddr) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to allocate %s buffer of size %zu\n",
+ name, size);
+ return -ENOMEM;
+ }
+
+ buf->size = size;
+
+ if (name && parent) {
+ buf->blob.data = buf->vaddr;
+ buf->blob.size = size;
+ buf->dentry = debugfs_create_blob(name, 0644, parent,
+ &buf->blob);
+ if (!buf->dentry)
+ dev_warn(&dev->plat_dev->dev,
+ "failed to create debugfs entry %s\n", name);
+ }
+
+ return 0;
+}
+
+void coda_free_aux_buf(struct coda_dev *dev,
+ struct coda_aux_buf *buf)
+{
+ if (buf->vaddr) {
+ dma_free_coherent(&dev->plat_dev->dev, buf->size,
+ buf->vaddr, buf->paddr);
+ buf->vaddr = NULL;
+ buf->size = 0;
+ debugfs_remove(buf->dentry);
+ buf->dentry = NULL;
+ }
+}
+
+static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(q);
+ struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
+ struct coda_q_data *q_data_src, *q_data_dst;
+ struct v4l2_m2m_buffer *m2m_buf, *tmp;
+ struct vb2_v4l2_buffer *buf;
+ struct list_head list;
+ int ret = 0;
+
+ if (count < 1)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&list);
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
+ /* copy the buffers that were queued before streamon */
+ mutex_lock(&ctx->bitstream_mutex);
+ coda_fill_bitstream(ctx, &list);
+ mutex_unlock(&ctx->bitstream_mutex);
+
+ if (coda_get_bitstream_payload(ctx) < 512) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ ctx->streamon_out = 1;
+ } else {
+ ctx->streamon_cap = 1;
+ }
+
+ /* Don't start the coda unless both queues are on */
+ if (!(ctx->streamon_out && ctx->streamon_cap))
+ goto out;
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if ((q_data_src->rect.width != q_data_dst->width &&
+ round_up(q_data_src->rect.width, 16) != q_data_dst->width) ||
+ (q_data_src->rect.height != q_data_dst->height &&
+ round_up(q_data_src->rect.height, 16) != q_data_dst->height)) {
+ v4l2_err(v4l2_dev, "can't convert %dx%d to %dx%d\n",
+ q_data_src->rect.width, q_data_src->rect.height,
+ q_data_dst->width, q_data_dst->height);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Allow BIT decoder device_run with no new buffers queued */
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
+ v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true);
+
+ ctx->gopcounter = ctx->params.gop_size - 1;
+
+ ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+ q_data_dst->fourcc);
+ if (!ctx->codec) {
+ v4l2_err(v4l2_dev, "couldn't tell instance type.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
+ ctx->params.gop_size = 1;
+ ctx->gopcounter = ctx->params.gop_size - 1;
+
+ ret = ctx->ops->start_streaming(ctx);
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ if (ret == -EAGAIN)
+ goto out;
+ }
+ if (ret < 0)
+ goto err;
+
+out:
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ list_for_each_entry_safe(m2m_buf, tmp, &list, list) {
+ list_del(&m2m_buf->list);
+ v4l2_m2m_buf_done(&m2m_buf->vb, VB2_BUF_STATE_DONE);
+ }
+ }
+ return 0;
+
+err:
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ list_for_each_entry_safe(m2m_buf, tmp, &list, list) {
+ list_del(&m2m_buf->list);
+ v4l2_m2m_buf_done(&m2m_buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ }
+ return ret;
+}
+
+static void coda_stop_streaming(struct vb2_queue *q)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(q);
+ struct coda_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *buf;
+ unsigned long flags;
+ bool stop;
+
+ stop = ctx->streamon_out && ctx->streamon_cap;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: output\n", __func__);
+ ctx->streamon_out = 0;
+
+ coda_bit_stream_end_flag(ctx);
+
+ ctx->qsequence = 0;
+
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ } else {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: capture\n", __func__);
+ ctx->streamon_cap = 0;
+
+ ctx->osequence = 0;
+ ctx->sequence_offset = 0;
+
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+
+ if (stop) {
+ struct coda_buffer_meta *meta;
+
+ if (ctx->ops->seq_end_work) {
+ queue_work(dev->workqueue, &ctx->seq_end_work);
+ flush_work(&ctx->seq_end_work);
+ }
+ spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
+ while (!list_empty(&ctx->buffer_meta_list)) {
+ meta = list_first_entry(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+ list_del(&meta->list);
+ kfree(meta);
+ }
+ ctx->num_metas = 0;
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ ctx->runcounter = 0;
+ ctx->aborting = 0;
+ ctx->hold = false;
+ }
+
+ if (!ctx->streamon_out && !ctx->streamon_cap)
+ ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
+}
+
+static const struct vb2_ops coda_qops = {
+ .queue_setup = coda_queue_setup,
+ .buf_prepare = coda_buf_prepare,
+ .buf_queue = coda_buf_queue,
+ .start_streaming = coda_start_streaming,
+ .stop_streaming = coda_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct coda_ctx *ctx =
+ container_of(ctrl->handler, struct coda_ctx, ctrls);
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ ctx->params.rot_mode |= CODA_MIR_HOR;
+ else
+ ctx->params.rot_mode &= ~CODA_MIR_HOR;
+ break;
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ ctx->params.rot_mode |= CODA_MIR_VER;
+ else
+ ctx->params.rot_mode &= ~CODA_MIR_VER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctx->params.bitrate = ctrl->val / 1000;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctx->params.gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ ctx->params.h264_intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ ctx->params.h264_inter_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ ctx->params.h264_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ ctx->params.h264_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ ctx->params.h264_slice_alpha_c0_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ ctx->params.h264_slice_beta_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ ctx->params.h264_disable_deblocking_filter_idc = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ /* TODO: switch between baseline and constrained baseline */
+ if (ctx->inst_type == CODA_INST_ENCODER)
+ ctx->params.h264_profile_idc = 66;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ /* nothing to do, this is set by the encoder */
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
+ ctx->params.mpeg4_intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
+ ctx->params.mpeg4_inter_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ /* nothing to do, these are fixed */
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ ctx->params.slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ ctx->params.slice_max_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ ctx->params.slice_max_bits = ctrl->val * 8;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ break;
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
+ ctx->params.intra_refresh = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ ctx->params.force_ipicture = true;
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ coda_set_jpeg_compression_quality(ctx, ctrl->val);
+ break;
+ case V4L2_CID_JPEG_RESTART_INTERVAL:
+ ctx->params.jpeg_restart_interval = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VBV_DELAY:
+ ctx->params.vbv_delay = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
+ ctx->params.vbv_size = min(ctrl->val * 8192, 0x7fffffff);
+ break;
+ default:
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "Invalid control, id=%d, val=%d\n",
+ ctrl->id, ctrl->val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops coda_ctrl_ops = {
+ .s_ctrl = coda_s_ctrl,
+};
+
+static void coda_encode_ctrls(struct coda_ctx *ctx)
+{
+ int max_gop_size = (ctx->dev->devtype->product == CODA_DX6) ? 60 : 99;
+
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1000, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, max_gop_size, 1, 16);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 0, 51, 1, 25);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 0, 51, 1, 25);
+ if (ctx->dev->devtype->product != CODA_960) {
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 0, 51, 1, 12);
+ }
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
+ 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE, 0x0,
+ V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE);
+ if (ctx->dev->devtype->product == CODA_HX4 ||
+ ctx->dev->devtype->product == CODA_7541) {
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_1,
+ ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1)),
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_1);
+ }
+ if (ctx->dev->devtype->product == CODA_960) {
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+ ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2)),
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+ }
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP, 1, 31, 1, 2);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 1, 31, 1, 2);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE, 0x0,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE);
+ if (ctx->dev->devtype->product == CODA_HX4 ||
+ ctx->dev->devtype->product == CODA_7541 ||
+ ctx->dev->devtype->product == CODA_960) {
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+ ~(1 << V4L2_MPEG_VIDEO_MPEG4_LEVEL_5),
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5);
+ }
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, 0x0,
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, 0x3fffffff, 1, 1);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, 1, 0x3fffffff, 1,
+ 500);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ (1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE),
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0,
+ 1920 * 1088 / 256, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VBV_DELAY, 0, 0x7fff, 1, 0);
+ /*
+ * The maximum VBV size value is 0x7fffffff bits,
+ * one bit less than 262144 KiB
+ */
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VBV_SIZE, 0, 262144, 1, 0);
+}
+
+static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx)
+{
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 5, 100, 1, 50);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_JPEG_RESTART_INTERVAL, 0, 100, 1, 0);
+}
+
+static void coda_decode_ctrls(struct coda_ctx *ctx)
+{
+ u64 mask;
+ u8 max;
+
+ ctx->h264_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+ if (ctx->h264_profile_ctrl)
+ ctx->h264_profile_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ if (ctx->dev->devtype->product == CODA_HX4 ||
+ ctx->dev->devtype->product == CODA_7541) {
+ max = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ mask = ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0));
+ } else if (ctx->dev->devtype->product == CODA_960) {
+ max = V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ mask = ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1));
+ } else {
+ return;
+ }
+ ctx->h264_level_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, max, mask,
+ max);
+ if (ctx->h264_level_ctrl)
+ ctx->h264_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+}
+
+static int coda_ctrls_setup(struct coda_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrls, 2);
+
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ctx->inst_type == CODA_INST_ENCODER) {
+ if (ctx->cvd->dst_formats[0] == V4L2_PIX_FMT_JPEG)
+ coda_jpeg_encode_ctrls(ctx);
+ else
+ coda_encode_ctrls(ctx);
+ } else {
+ if (ctx->cvd->src_formats[0] == V4L2_PIX_FMT_H264)
+ coda_decode_ctrls(ctx);
+ }
+
+ if (ctx->ctrls.error) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "control initialization error (%d)",
+ ctx->ctrls.error);
+ return -EINVAL;
+ }
+
+ return v4l2_ctrl_handler_setup(&ctx->ctrls);
+}
+
+static int coda_queue_init(struct coda_ctx *ctx, struct vb2_queue *vq)
+{
+ vq->drv_priv = ctx;
+ vq->ops = &coda_qops;
+ vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ vq->lock = &ctx->dev->dev_mutex;
+ /* One way to indicate end-of-stream for coda is to set the
+ * bytesused == 0. However by default videobuf2 handles bytesused
+ * equal to 0 as a special case and changes its value to the size
+ * of the buffer. Set the allow_zero_bytesused flag, so
+ * that videobuf2 will keep the value of bytesused intact.
+ */
+ vq->allow_zero_bytesused = 1;
+ /*
+ * We might be fine with no buffers on some of the queues, but that
+ * would need to be reflected in job_ready(). Currently we expect all
+ * queues to have at least one buffer queued.
+ */
+ vq->min_buffers_needed = 1;
+ vq->dev = &ctx->dev->plat_dev->dev;
+
+ return vb2_queue_init(vq);
+}
+
+int coda_encoder_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ ret = coda_queue_init(priv, src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+ return coda_queue_init(priv, dst_vq);
+}
+
+int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ src_vq->mem_ops = &vb2_vmalloc_memops;
+
+ ret = coda_queue_init(priv, src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+ return coda_queue_init(priv, dst_vq);
+}
+
+/*
+ * File operations
+ */
+
+static int coda_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct coda_dev *dev = video_get_drvdata(vdev);
+ struct coda_ctx *ctx;
+ unsigned int max = ~0;
+ char *name;
+ int ret;
+ int idx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (dev->devtype->product == CODA_DX6)
+ max = CODADX6_MAX_INSTANCES - 1;
+ idx = ida_alloc_max(&dev->ida, max, GFP_KERNEL);
+ if (idx < 0) {
+ ret = idx;
+ goto err_coda_max;
+ }
+
+ name = kasprintf(GFP_KERNEL, "context%d", idx);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_coda_name_init;
+ }
+
+ ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root);
+ kfree(name);
+
+ ctx->cvd = to_coda_video_device(vdev);
+ ctx->inst_type = ctx->cvd->type;
+ ctx->ops = ctx->cvd->ops;
+ ctx->use_bit = !ctx->cvd->direct;
+ init_completion(&ctx->completion);
+ INIT_WORK(&ctx->pic_run_work, coda_pic_run_work);
+ if (ctx->ops->seq_end_work)
+ INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ ctx->dev = dev;
+ ctx->idx = idx;
+ switch (dev->devtype->product) {
+ case CODA_960:
+ /*
+ * Enabling the BWB when decoding can hang the firmware with
+ * certain streams. The issue was tracked as ENGR00293425 by
+ * Freescale. As a workaround, disable BWB for all decoders.
+ * The enable_bwb module parameter allows to override this.
+ */
+ if (enable_bwb || ctx->inst_type == CODA_INST_ENCODER)
+ ctx->frame_mem_ctrl = CODA9_FRAME_ENABLE_BWB;
+ /* fallthrough */
+ case CODA_HX4:
+ case CODA_7541:
+ ctx->reg_idx = 0;
+ break;
+ default:
+ ctx->reg_idx = idx;
+ }
+ if (ctx->dev->vdoa && !disable_vdoa) {
+ ctx->vdoa = vdoa_context_create(dev->vdoa);
+ if (!ctx->vdoa)
+ v4l2_warn(&dev->v4l2_dev,
+ "Failed to create vdoa context: not using vdoa");
+ }
+ ctx->use_vdoa = false;
+
+ /* Power up and upload firmware if necessary */
+ ret = pm_runtime_get_sync(&dev->plat_dev->dev);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to power up: %d\n", ret);
+ goto err_pm_get;
+ }
+
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ goto err_clk_per;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
+
+ set_default_params(ctx);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ ctx->ops->queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_err(&dev->v4l2_dev, "%s return error (%d)\n",
+ __func__, ret);
+ goto err_ctx_init;
+ }
+
+ ret = coda_ctrls_setup(ctx);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "failed to setup coda controls\n");
+ goto err_ctrls_setup;
+ }
+
+ ctx->fh.ctrl_handler = &ctx->ctrls;
+
+ mutex_init(&ctx->bitstream_mutex);
+ mutex_init(&ctx->buffer_mutex);
+ INIT_LIST_HEAD(&ctx->buffer_meta_list);
+ spin_lock_init(&ctx->buffer_meta_lock);
+
+ mutex_lock(&dev->dev_mutex);
+ list_add(&ctx->list, &dev->instances);
+ mutex_unlock(&dev->dev_mutex);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n",
+ ctx->idx, ctx);
+
+ return 0;
+
+err_ctrls_setup:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+err_ctx_init:
+ clk_disable_unprepare(dev->clk_ahb);
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+err_clk_per:
+ pm_runtime_put_sync(&dev->plat_dev->dev);
+err_pm_get:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+err_coda_name_init:
+ ida_free(&dev->ida, ctx->idx);
+err_coda_max:
+ kfree(ctx);
+ return ret;
+}
+
+static int coda_release(struct file *file)
+{
+ struct coda_dev *dev = video_drvdata(file);
+ struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
+ ctx);
+
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
+ coda_bit_stream_end_flag(ctx);
+
+ /* If this instance is running, call .job_abort and wait for it to end */
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ if (ctx->vdoa)
+ vdoa_context_destroy(ctx->vdoa);
+
+ /* In case the instance was not running, we still need to call SEQ_END */
+ if (ctx->ops->seq_end_work) {
+ queue_work(dev->workqueue, &ctx->seq_end_work);
+ flush_work(&ctx->seq_end_work);
+ }
+
+ mutex_lock(&dev->dev_mutex);
+ list_del(&ctx->list);
+ mutex_unlock(&dev->dev_mutex);
+
+ if (ctx->dev->devtype->product == CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+
+ v4l2_ctrl_handler_free(&ctx->ctrls);
+ clk_disable_unprepare(dev->clk_ahb);
+ clk_disable_unprepare(dev->clk_per);
+ pm_runtime_put_sync(&dev->plat_dev->dev);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ ida_free(&dev->ida, ctx->idx);
+ if (ctx->ops->release)
+ ctx->ops->release(ctx);
+ debugfs_remove_recursive(ctx->debugfs_entry);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations coda_fops = {
+ .owner = THIS_MODULE,
+ .open = coda_open,
+ .release = coda_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int coda_hw_init(struct coda_dev *dev)
+{
+ u32 data;
+ u16 *p;
+ int i, ret;
+
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ goto err_clk_per;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
+
+ reset_control_reset(dev->rstc);
+
+ /*
+ * Copy the first CODA_ISRAM_SIZE in the internal SRAM.
+ * The 16-bit chars in the code buffer are in memory access
+ * order, re-sort them to CODA order for register download.
+ * Data in this SRAM survives a reboot.
+ */
+ p = (u16 *)dev->codebuf.vaddr;
+ if (dev->devtype->product == CODA_DX6) {
+ for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
+ data = CODA_DOWN_ADDRESS_SET(i) |
+ CODA_DOWN_DATA_SET(p[i ^ 1]);
+ coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
+ }
+ } else {
+ for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
+ data = CODA_DOWN_ADDRESS_SET(i) |
+ CODA_DOWN_DATA_SET(p[round_down(i, 4) +
+ 3 - (i % 4)]);
+ coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
+ }
+ }
+
+ /* Clear registers */
+ for (i = 0; i < 64; i++)
+ coda_write(dev, 0, CODA_REG_BIT_CODE_BUF_ADDR + i * 4);
+
+ /* Tell the BIT where to find everything it needs */
+ if (dev->devtype->product == CODA_960 ||
+ dev->devtype->product == CODA_7541 ||
+ dev->devtype->product == CODA_HX4) {
+ coda_write(dev, dev->tempbuf.paddr,
+ CODA_REG_BIT_TEMP_BUF_ADDR);
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+ } else {
+ coda_write(dev, dev->workbuf.paddr,
+ CODA_REG_BIT_WORK_BUF_ADDR);
+ }
+ coda_write(dev, dev->codebuf.paddr,
+ CODA_REG_BIT_CODE_BUF_ADDR);
+ coda_write(dev, 0, CODA_REG_BIT_CODE_RUN);
+
+ /* Set default values */
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ coda_write(dev, CODADX6_STREAM_BUF_PIC_FLUSH,
+ CODA_REG_BIT_STREAM_CTRL);
+ break;
+ default:
+ coda_write(dev, CODA7_STREAM_BUF_PIC_FLUSH,
+ CODA_REG_BIT_STREAM_CTRL);
+ }
+ if (dev->devtype->product == CODA_960)
+ coda_write(dev, CODA9_FRAME_ENABLE_BWB,
+ CODA_REG_BIT_FRAME_MEM_CTRL);
+ else
+ coda_write(dev, 0, CODA_REG_BIT_FRAME_MEM_CTRL);
+
+ if (dev->devtype->product != CODA_DX6)
+ coda_write(dev, 0, CODA7_REG_BIT_AXI_SRAM_USE);
+
+ coda_write(dev, CODA_INT_INTERRUPT_ENABLE,
+ CODA_REG_BIT_INT_ENABLE);
+
+ /* Reset VPU and start processor */
+ data = coda_read(dev, CODA_REG_BIT_CODE_RESET);
+ data |= CODA_REG_RESET_ENABLE;
+ coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
+ udelay(10);
+ data &= ~CODA_REG_RESET_ENABLE;
+ coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
+ coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
+
+ clk_disable_unprepare(dev->clk_ahb);
+ clk_disable_unprepare(dev->clk_per);
+
+ return 0;
+
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+err_clk_per:
+ return ret;
+}
+
+static int coda_register_device(struct coda_dev *dev, int i)
+{
+ struct video_device *vfd = &dev->vfd[i];
+
+ if (i >= dev->devtype->num_vdevs)
+ return -EINVAL;
+
+ strlcpy(vfd->name, dev->devtype->vdevs[i]->name, sizeof(vfd->name));
+ vfd->fops = &coda_fops;
+ vfd->ioctl_ops = &coda_ioctl_ops;
+ vfd->release = video_device_release_empty,
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ video_set_drvdata(vfd, dev);
+
+ /* Not applicable, use the selection API instead */
+ v4l2_disable_ioctl(vfd, VIDIOC_CROPCAP);
+ v4l2_disable_ioctl(vfd, VIDIOC_G_CROP);
+ v4l2_disable_ioctl(vfd, VIDIOC_S_CROP);
+
+ return video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+}
+
+static void coda_copy_firmware(struct coda_dev *dev, const u8 * const buf,
+ size_t size)
+{
+ u32 *src = (u32 *)buf;
+
+ /* Check if the firmware has a 16-byte Freescale header, skip it */
+ if (buf[0] == 'M' && buf[1] == 'X')
+ src += 4;
+ /*
+ * Check whether the firmware is in native order or pre-reordered for
+ * memory access. The first instruction opcode always is 0xe40e.
+ */
+ if (__le16_to_cpup((__le16 *)src) == 0xe40e) {
+ u32 *dst = dev->codebuf.vaddr;
+ int i;
+
+ /* Firmware in native order, reorder while copying */
+ if (dev->devtype->product == CODA_DX6) {
+ for (i = 0; i < (size - 16) / 4; i++)
+ dst[i] = (src[i] << 16) | (src[i] >> 16);
+ } else {
+ for (i = 0; i < (size - 16) / 4; i += 2) {
+ dst[i] = (src[i + 1] << 16) | (src[i + 1] >> 16);
+ dst[i + 1] = (src[i] << 16) | (src[i] >> 16);
+ }
+ }
+ } else {
+ /* Copy the already reordered firmware image */
+ memcpy(dev->codebuf.vaddr, src, size);
+ }
+}
+
+static void coda_fw_callback(const struct firmware *fw, void *context);
+
+static int coda_firmware_request(struct coda_dev *dev)
+{
+ char *fw;
+
+ if (dev->firmware >= ARRAY_SIZE(dev->devtype->firmware))
+ return -EINVAL;
+
+ fw = dev->devtype->firmware[dev->firmware];
+
+ dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
+ coda_product_name(dev->devtype->product));
+
+ return request_firmware_nowait(THIS_MODULE, true, fw,
+ &dev->plat_dev->dev, GFP_KERNEL, dev,
+ coda_fw_callback);
+}
+
+static void coda_fw_callback(const struct firmware *fw, void *context)
+{
+ struct coda_dev *dev = context;
+ struct platform_device *pdev = dev->plat_dev;
+ int i, ret;
+
+ if (!fw) {
+ dev->firmware++;
+ ret = coda_firmware_request(dev);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
+ goto put_pm;
+ }
+ return;
+ }
+ if (dev->firmware > 0) {
+ /*
+ * Since we can't suppress warnings for failed asynchronous
+ * firmware requests, report that the fallback firmware was
+ * found.
+ */
+ dev_info(&pdev->dev, "Using fallback firmware %s\n",
+ dev->devtype->firmware[dev->firmware]);
+ }
+
+ /* allocate auxiliary per-device code buffer for the BIT processor */
+ ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size, "codebuf",
+ dev->debugfs_root);
+ if (ret < 0)
+ goto put_pm;
+
+ coda_copy_firmware(dev, fw->data, fw->size);
+ release_firmware(fw);
+
+ ret = coda_hw_init(dev);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "HW initialization failed\n");
+ goto put_pm;
+ }
+
+ ret = coda_check_firmware(dev);
+ if (ret < 0)
+ goto put_pm;
+
+ dev->m2m_dev = v4l2_m2m_init(&coda_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ goto put_pm;
+ }
+
+ for (i = 0; i < dev->devtype->num_vdevs; i++) {
+ ret = coda_register_device(dev, i);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to register %s video device: %d\n",
+ dev->devtype->vdevs[i]->name, ret);
+ goto rel_vfd;
+ }
+ }
+
+ v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video[%d-%d]\n",
+ dev->vfd[0].num, dev->vfd[i - 1].num);
+
+ pm_runtime_put_sync(&pdev->dev);
+ return;
+
+rel_vfd:
+ while (--i >= 0)
+ video_unregister_device(&dev->vfd[i]);
+ v4l2_m2m_release(dev->m2m_dev);
+put_pm:
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+enum coda_platform {
+ CODA_IMX27,
+ CODA_IMX51,
+ CODA_IMX53,
+ CODA_IMX6Q,
+ CODA_IMX6DL,
+};
+
+static const struct coda_devtype coda_devdata[] = {
+ [CODA_IMX27] = {
+ .firmware = {
+ "vpu_fw_imx27_TO2.bin",
+ "vpu/vpu_fw_imx27_TO2.bin",
+ "v4l-codadx6-imx27.bin"
+ },
+ .product = CODA_DX6,
+ .codecs = codadx6_codecs,
+ .num_codecs = ARRAY_SIZE(codadx6_codecs),
+ .vdevs = codadx6_video_devices,
+ .num_vdevs = ARRAY_SIZE(codadx6_video_devices),
+ .workbuf_size = 288 * 1024 + FMO_SLICE_SAVE_BUF_SIZE * 8 * 1024,
+ .iram_size = 0xb000,
+ },
+ [CODA_IMX51] = {
+ .firmware = {
+ "vpu_fw_imx51.bin",
+ "vpu/vpu_fw_imx51.bin",
+ "v4l-codahx4-imx51.bin"
+ },
+ .product = CODA_HX4,
+ .codecs = codahx4_codecs,
+ .num_codecs = ARRAY_SIZE(codahx4_codecs),
+ .vdevs = codahx4_video_devices,
+ .num_vdevs = ARRAY_SIZE(codahx4_video_devices),
+ .workbuf_size = 128 * 1024,
+ .tempbuf_size = 304 * 1024,
+ .iram_size = 0x14000,
+ },
+ [CODA_IMX53] = {
+ .firmware = {
+ "vpu_fw_imx53.bin",
+ "vpu/vpu_fw_imx53.bin",
+ "v4l-coda7541-imx53.bin"
+ },
+ .product = CODA_7541,
+ .codecs = coda7_codecs,
+ .num_codecs = ARRAY_SIZE(coda7_codecs),
+ .vdevs = coda7_video_devices,
+ .num_vdevs = ARRAY_SIZE(coda7_video_devices),
+ .workbuf_size = 128 * 1024,
+ .tempbuf_size = 304 * 1024,
+ .iram_size = 0x14000,
+ },
+ [CODA_IMX6Q] = {
+ .firmware = {
+ "vpu_fw_imx6q.bin",
+ "vpu/vpu_fw_imx6q.bin",
+ "v4l-coda960-imx6q.bin"
+ },
+ .product = CODA_960,
+ .codecs = coda9_codecs,
+ .num_codecs = ARRAY_SIZE(coda9_codecs),
+ .vdevs = coda9_video_devices,
+ .num_vdevs = ARRAY_SIZE(coda9_video_devices),
+ .workbuf_size = 80 * 1024,
+ .tempbuf_size = 204 * 1024,
+ .iram_size = 0x21000,
+ },
+ [CODA_IMX6DL] = {
+ .firmware = {
+ "vpu_fw_imx6d.bin",
+ "vpu/vpu_fw_imx6d.bin",
+ "v4l-coda960-imx6dl.bin"
+ },
+ .product = CODA_960,
+ .codecs = coda9_codecs,
+ .num_codecs = ARRAY_SIZE(coda9_codecs),
+ .vdevs = coda9_video_devices,
+ .num_vdevs = ARRAY_SIZE(coda9_video_devices),
+ .workbuf_size = 80 * 1024,
+ .tempbuf_size = 204 * 1024,
+ .iram_size = 0x1f000, /* leave 4k for suspend code */
+ },
+};
+
+static const struct platform_device_id coda_platform_ids[] = {
+ { .name = "coda-imx27", .driver_data = CODA_IMX27 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, coda_platform_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id coda_dt_ids[] = {
+ { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
+ { .compatible = "fsl,imx51-vpu", .data = &coda_devdata[CODA_IMX51] },
+ { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
+ { .compatible = "fsl,imx6q-vpu", .data = &coda_devdata[CODA_IMX6Q] },
+ { .compatible = "fsl,imx6dl-vpu", .data = &coda_devdata[CODA_IMX6DL] },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, coda_dt_ids);
+#endif
+
+static int coda_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(of_match_ptr(coda_dt_ids), &pdev->dev);
+ const struct platform_device_id *pdev_id;
+ struct coda_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
+ struct gen_pool *pool;
+ struct coda_dev *dev;
+ struct resource *res;
+ int ret, irq;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
+
+ if (of_id)
+ dev->devtype = of_id->data;
+ else if (pdev_id)
+ dev->devtype = &coda_devdata[pdev_id->driver_data];
+ else
+ return -EINVAL;
+
+ spin_lock_init(&dev->irqlock);
+ INIT_LIST_HEAD(&dev->instances);
+
+ dev->plat_dev = pdev;
+ dev->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(dev->clk_per)) {
+ dev_err(&pdev->dev, "Could not get per clock\n");
+ return PTR_ERR(dev->clk_per);
+ }
+
+ dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(dev->clk_ahb)) {
+ dev_err(&pdev->dev, "Could not get ahb clock\n");
+ return PTR_ERR(dev->clk_ahb);
+ }
+
+ /* Get memory for physical registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs_base))
+ return PTR_ERR(dev->regs_base);
+
+ /* IRQ */
+ irq = platform_get_irq_byname(pdev, "bit");
+ if (irq < 0)
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, coda_irq_handler,
+ IRQF_ONESHOT, dev_name(&pdev->dev), dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+ return ret;
+ }
+
+ dev->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ NULL);
+ if (IS_ERR(dev->rstc)) {
+ ret = PTR_ERR(dev->rstc);
+ dev_err(&pdev->dev, "failed get reset control: %d\n", ret);
+ return ret;
+ }
+
+ /* Get IRAM pool from device tree or platform data */
+ pool = of_gen_pool_get(np, "iram", 0);
+ if (!pool && pdata)
+ pool = gen_pool_get(pdata->iram_dev, NULL);
+ if (!pool) {
+ dev_err(&pdev->dev, "iram pool not available\n");
+ return -ENOMEM;
+ }
+ dev->iram_pool = pool;
+
+ /* Get vdoa_data if supported by the platform */
+ dev->vdoa = coda_get_vdoa_data();
+ if (PTR_ERR(dev->vdoa) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->coda_mutex);
+ ida_init(&dev->ida);
+
+ dev->debugfs_root = debugfs_create_dir("coda", NULL);
+ if (!dev->debugfs_root)
+ dev_warn(&pdev->dev, "failed to create debugfs root\n");
+
+ /* allocate auxiliary per-device buffers for the BIT processor */
+ if (dev->devtype->product == CODA_DX6) {
+ ret = coda_alloc_aux_buf(dev, &dev->workbuf,
+ dev->devtype->workbuf_size, "workbuf",
+ dev->debugfs_root);
+ if (ret < 0)
+ goto err_v4l2_register;
+ }
+
+ if (dev->devtype->tempbuf_size) {
+ ret = coda_alloc_aux_buf(dev, &dev->tempbuf,
+ dev->devtype->tempbuf_size, "tempbuf",
+ dev->debugfs_root);
+ if (ret < 0)
+ goto err_v4l2_register;
+ }
+
+ dev->iram.size = dev->devtype->iram_size;
+ dev->iram.vaddr = gen_pool_dma_alloc(dev->iram_pool, dev->iram.size,
+ &dev->iram.paddr);
+ if (!dev->iram.vaddr) {
+ dev_warn(&pdev->dev, "unable to alloc iram\n");
+ } else {
+ memset(dev->iram.vaddr, 0, dev->iram.size);
+ dev->iram.blob.data = dev->iram.vaddr;
+ dev->iram.blob.size = dev->iram.size;
+ dev->iram.dentry = debugfs_create_blob("iram", 0644,
+ dev->debugfs_root,
+ &dev->iram.blob);
+ }
+
+ dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!dev->workqueue) {
+ dev_err(&pdev->dev, "unable to alloc workqueue\n");
+ ret = -ENOMEM;
+ goto err_v4l2_register;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Start activated so we can directly call coda_hw_init in
+ * coda_fw_callback regardless of whether CONFIG_PM is
+ * enabled or whether the device is associated with a PM domain.
+ */
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = coda_firmware_request(dev);
+ if (ret)
+ goto err_alloc_workqueue;
+ return 0;
+
+err_alloc_workqueue:
+ destroy_workqueue(dev->workqueue);
+err_v4l2_register:
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
+}
+
+static int coda_remove(struct platform_device *pdev)
+{
+ struct coda_dev *dev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->vfd); i++) {
+ if (video_get_drvdata(&dev->vfd[i]))
+ video_unregister_device(&dev->vfd[i]);
+ }
+ if (dev->m2m_dev)
+ v4l2_m2m_release(dev->m2m_dev);
+ pm_runtime_disable(&pdev->dev);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ destroy_workqueue(dev->workqueue);
+ if (dev->iram.vaddr)
+ gen_pool_free(dev->iram_pool, (unsigned long)dev->iram.vaddr,
+ dev->iram.size);
+ coda_free_aux_buf(dev, &dev->codebuf);
+ coda_free_aux_buf(dev, &dev->tempbuf);
+ coda_free_aux_buf(dev, &dev->workbuf);
+ debugfs_remove_recursive(dev->debugfs_root);
+ ida_destroy(&dev->ida);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int coda_runtime_resume(struct device *dev)
+{
+ struct coda_dev *cdev = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (dev->pm_domain && cdev->codebuf.vaddr) {
+ ret = coda_hw_init(cdev);
+ if (ret)
+ v4l2_err(&cdev->v4l2_dev, "HW initialization failed\n");
+ }
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops coda_pm_ops = {
+ SET_RUNTIME_PM_OPS(NULL, coda_runtime_resume, NULL)
+};
+
+static struct platform_driver coda_driver = {
+ .probe = coda_probe,
+ .remove = coda_remove,
+ .driver = {
+ .name = CODA_NAME,
+ .of_match_table = of_match_ptr(coda_dt_ids),
+ .pm = &coda_pm_ops,
+ },
+ .id_table = coda_platform_ids,
+};
+
+module_platform_driver(coda_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_DESCRIPTION("Coda multi-standard codec V4L2 driver");
diff --git a/drivers/media/platform/coda/coda-gdi.c b/drivers/media/platform/coda/coda-gdi.c
new file mode 100644
index 000000000..aaa7afc68
--- /dev/null
+++ b/drivers/media/platform/coda/coda-gdi.c
@@ -0,0 +1,150 @@
+/*
+ * Coda multi-standard codec IP
+ *
+ * Copyright (C) 2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include "coda.h"
+
+#define XY2_INVERT BIT(7)
+#define XY2_ZERO BIT(6)
+#define XY2_TB_XOR BIT(5)
+#define XY2_XYSEL BIT(4)
+#define XY2_Y (1 << 4)
+#define XY2_X (0 << 4)
+
+#define XY2(luma_sel, luma_bit, chroma_sel, chroma_bit) \
+ (((XY2_##luma_sel) | (luma_bit)) << 8 | \
+ (XY2_##chroma_sel) | (chroma_bit))
+
+static const u16 xy2ca_zero_map[16] = {
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+};
+
+static const u16 xy2ca_tiled_map[16] = {
+ XY2(Y, 0, Y, 0),
+ XY2(Y, 1, Y, 1),
+ XY2(Y, 2, Y, 2),
+ XY2(Y, 3, X, 3),
+ XY2(X, 3, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+};
+
+/*
+ * RA[15:0], CA[15:8] are hardwired to contain the 24-bit macroblock
+ * start offset (macroblock size is 16x16 for luma, 16x8 for chroma).
+ * Bits CA[4:0] are set using XY2CA above. BA[3:0] seems to be unused.
+ */
+
+#define RBC_CA (0 << 4)
+#define RBC_BA (1 << 4)
+#define RBC_RA (2 << 4)
+#define RBC_ZERO (3 << 4)
+
+#define RBC(luma_sel, luma_bit, chroma_sel, chroma_bit) \
+ (((RBC_##luma_sel) | (luma_bit)) << 6 | \
+ (RBC_##chroma_sel) | (chroma_bit))
+
+static const u16 rbc2axi_tiled_map[32] = {
+ RBC(ZERO, 0, ZERO, 0),
+ RBC(ZERO, 0, ZERO, 0),
+ RBC(ZERO, 0, ZERO, 0),
+ RBC(CA, 0, CA, 0),
+ RBC(CA, 1, CA, 1),
+ RBC(CA, 2, CA, 2),
+ RBC(CA, 3, CA, 3),
+ RBC(CA, 4, CA, 8),
+ RBC(CA, 8, CA, 9),
+ RBC(CA, 9, CA, 10),
+ RBC(CA, 10, CA, 11),
+ RBC(CA, 11, CA, 12),
+ RBC(CA, 12, CA, 13),
+ RBC(CA, 13, CA, 14),
+ RBC(CA, 14, CA, 15),
+ RBC(CA, 15, RA, 0),
+ RBC(RA, 0, RA, 1),
+ RBC(RA, 1, RA, 2),
+ RBC(RA, 2, RA, 3),
+ RBC(RA, 3, RA, 4),
+ RBC(RA, 4, RA, 5),
+ RBC(RA, 5, RA, 6),
+ RBC(RA, 6, RA, 7),
+ RBC(RA, 7, RA, 8),
+ RBC(RA, 8, RA, 9),
+ RBC(RA, 9, RA, 10),
+ RBC(RA, 10, RA, 11),
+ RBC(RA, 11, RA, 12),
+ RBC(RA, 12, RA, 13),
+ RBC(RA, 13, RA, 14),
+ RBC(RA, 14, RA, 15),
+ RBC(RA, 15, ZERO, 0),
+};
+
+void coda_set_gdi_regs(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ const u16 *xy2ca_map;
+ u32 xy2rbc_config;
+ int i;
+
+ switch (ctx->tiled_map_type) {
+ case GDI_LINEAR_FRAME_MAP:
+ default:
+ xy2ca_map = xy2ca_zero_map;
+ xy2rbc_config = 0;
+ break;
+ case GDI_TILED_FRAME_MB_RASTER_MAP:
+ xy2ca_map = xy2ca_tiled_map;
+ xy2rbc_config = CODA9_XY2RBC_TILED_MAP |
+ CODA9_XY2RBC_CA_INC_HOR |
+ (16 - 1) << 12 | (8 - 1) << 4;
+ break;
+ }
+
+ for (i = 0; i < 16; i++)
+ coda_write(dev, xy2ca_map[i],
+ CODA9_GDI_XY2_CAS_0 + 4 * i);
+ for (i = 0; i < 4; i++)
+ coda_write(dev, XY2(ZERO, 0, ZERO, 0),
+ CODA9_GDI_XY2_BA_0 + 4 * i);
+ for (i = 0; i < 16; i++)
+ coda_write(dev, XY2(ZERO, 0, ZERO, 0),
+ CODA9_GDI_XY2_RAS_0 + 4 * i);
+ coda_write(dev, xy2rbc_config, CODA9_GDI_XY2_RBC_CONFIG);
+ if (xy2rbc_config) {
+ for (i = 0; i < 32; i++)
+ coda_write(dev, rbc2axi_tiled_map[i],
+ CODA9_GDI_RBC2_AXI_0 + 4 * i);
+ }
+}
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/coda/coda-h264.c
new file mode 100644
index 000000000..635356a83
--- /dev/null
+++ b/drivers/media/platform/coda/coda-h264.c
@@ -0,0 +1,432 @@
+/*
+ * Coda multi-standard codec IP - H.264 helper functions
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ * Javier Martin, <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+#include <coda.h>
+
+static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
+
+static const u8 *coda_find_nal_header(const u8 *buf, const u8 *end)
+{
+ u32 val = 0xffffffff;
+
+ do {
+ val = val << 8 | *buf++;
+ if (buf >= end)
+ return NULL;
+ } while (val != 0x00000001);
+
+ return buf;
+}
+
+int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb)
+{
+ const u8 *buf = vb2_plane_vaddr(vb, 0);
+ const u8 *end = buf + vb2_get_plane_payload(vb, 0);
+
+ /* Find SPS header */
+ do {
+ buf = coda_find_nal_header(buf, end);
+ if (!buf)
+ return -EINVAL;
+ } while ((*buf++ & 0x1f) != 0x7);
+
+ ctx->params.h264_profile_idc = buf[0];
+ ctx->params.h264_level_idc = buf[2];
+
+ return 0;
+}
+
+int coda_h264_filler_nal(int size, char *p)
+{
+ if (size < 6)
+ return -EINVAL;
+
+ p[0] = 0x00;
+ p[1] = 0x00;
+ p[2] = 0x00;
+ p[3] = 0x01;
+ p[4] = 0x0c;
+ memset(p + 5, 0xff, size - 6);
+ /* Add rbsp stop bit and trailing at the end */
+ p[size - 1] = 0x80;
+
+ return 0;
+}
+
+int coda_h264_padding(int size, char *p)
+{
+ int nal_size;
+ int diff;
+
+ diff = size - (size & ~0x7);
+ if (diff == 0)
+ return 0;
+
+ nal_size = coda_filler_size[diff];
+ coda_h264_filler_nal(nal_size, p);
+
+ return nal_size;
+}
+
+int coda_h264_profile(int profile_idc)
+{
+ switch (profile_idc) {
+ case 66: return V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
+ case 77: return V4L2_MPEG_VIDEO_H264_PROFILE_MAIN;
+ case 88: return V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED;
+ case 100: return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
+ default: return -EINVAL;
+ }
+}
+
+int coda_h264_level(int level_idc)
+{
+ switch (level_idc) {
+ case 10: return V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
+ case 9: return V4L2_MPEG_VIDEO_H264_LEVEL_1B;
+ case 11: return V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
+ case 12: return V4L2_MPEG_VIDEO_H264_LEVEL_1_2;
+ case 13: return V4L2_MPEG_VIDEO_H264_LEVEL_1_3;
+ case 20: return V4L2_MPEG_VIDEO_H264_LEVEL_2_0;
+ case 21: return V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
+ case 22: return V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
+ case 30: return V4L2_MPEG_VIDEO_H264_LEVEL_3_0;
+ case 31: return V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
+ case 32: return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
+ case 40: return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ case 41: return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ case 42: return V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+ case 50: return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
+ case 51: return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
+ default: return -EINVAL;
+ }
+}
+
+struct rbsp {
+ char *buf;
+ int size;
+ int pos;
+};
+
+static inline int rbsp_read_bit(struct rbsp *rbsp)
+{
+ int shift = 7 - (rbsp->pos % 8);
+ int ofs = rbsp->pos++ / 8;
+
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ return (rbsp->buf[ofs] >> shift) & 1;
+}
+
+static inline int rbsp_write_bit(struct rbsp *rbsp, int bit)
+{
+ int shift = 7 - (rbsp->pos % 8);
+ int ofs = rbsp->pos++ / 8;
+
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ rbsp->buf[ofs] &= ~(1 << shift);
+ rbsp->buf[ofs] |= bit << shift;
+
+ return 0;
+}
+
+static inline int rbsp_read_bits(struct rbsp *rbsp, int num, int *val)
+{
+ int i, ret;
+ int tmp = 0;
+
+ if (num > 32)
+ return -EINVAL;
+
+ for (i = 0; i < num; i++) {
+ ret = rbsp_read_bit(rbsp);
+ if (ret < 0)
+ return ret;
+ tmp |= ret << (num - i - 1);
+ }
+
+ if (val)
+ *val = tmp;
+
+ return 0;
+}
+
+static int rbsp_write_bits(struct rbsp *rbsp, int num, int value)
+{
+ int ret;
+
+ while (num--) {
+ ret = rbsp_write_bit(rbsp, (value >> num) & 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *val)
+{
+ int leading_zero_bits = 0;
+ unsigned int tmp = 0;
+ int ret;
+
+ while ((ret = rbsp_read_bit(rbsp)) == 0)
+ leading_zero_bits++;
+ if (ret < 0)
+ return ret;
+
+ if (leading_zero_bits > 0) {
+ ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp);
+ if (ret)
+ return ret;
+ }
+
+ if (val)
+ *val = (1 << leading_zero_bits) - 1 + tmp;
+
+ return 0;
+}
+
+static int rbsp_write_uev(struct rbsp *rbsp, unsigned int value)
+{
+ int i;
+ int ret;
+ int tmp = value + 1;
+ int leading_zero_bits = fls(tmp) - 1;
+
+ for (i = 0; i < leading_zero_bits; i++) {
+ ret = rbsp_write_bit(rbsp, 0);
+ if (ret)
+ return ret;
+ }
+
+ return rbsp_write_bits(rbsp, leading_zero_bits + 1, tmp);
+}
+
+static int rbsp_read_sev(struct rbsp *rbsp, int *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = rbsp_read_uev(rbsp, &tmp);
+ if (ret)
+ return ret;
+
+ if (val) {
+ if (tmp & 1)
+ *val = (tmp + 1) / 2;
+ else
+ *val = -(tmp / 2);
+ }
+
+ return 0;
+}
+
+/**
+ * coda_h264_sps_fixup - fixes frame cropping values in h.264 SPS
+ * @ctx: encoder context
+ * @width: visible width
+ * @height: visible height
+ * @buf: buffer containing h.264 SPS RBSP, starting with NAL header
+ * @size: modified RBSP size return value
+ * @max_size: available size in buf
+ *
+ * Rewrites the frame cropping values in an h.264 SPS RBSP correctly for the
+ * given visible width and height.
+ */
+int coda_h264_sps_fixup(struct coda_ctx *ctx, int width, int height, char *buf,
+ int *size, int max_size)
+{
+ int profile_idc;
+ unsigned int pic_order_cnt_type;
+ int pic_width_in_mbs_minus1, pic_height_in_map_units_minus1;
+ int frame_mbs_only_flag, frame_cropping_flag;
+ int vui_parameters_present_flag;
+ unsigned int crop_right, crop_bottom;
+ struct rbsp sps;
+ int pos;
+ int ret;
+
+ if (*size < 8 || *size >= max_size)
+ return -EINVAL;
+
+ sps.buf = buf + 5; /* Skip NAL header */
+ sps.size = *size - 5;
+
+ profile_idc = sps.buf[0];
+ /* Skip constraint_set[0-5]_flag, reserved_zero_2bits */
+ /* Skip level_idc */
+ sps.pos = 24;
+
+ /* seq_parameter_set_id */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ if (profile_idc == 100 || profile_idc == 110 || profile_idc == 122 ||
+ profile_idc == 244 || profile_idc == 44 || profile_idc == 83 ||
+ profile_idc == 86 || profile_idc == 118 || profile_idc == 128 ||
+ profile_idc == 138 || profile_idc == 139 || profile_idc == 134 ||
+ profile_idc == 135) {
+ dev_err(ctx->fh.vdev->dev_parent,
+ "%s: Handling profile_idc %d not implemented\n",
+ __func__, profile_idc);
+ return -EINVAL;
+ }
+
+ /* log2_max_frame_num_minus4 */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ ret = rbsp_read_uev(&sps, &pic_order_cnt_type);
+ if (ret)
+ return ret;
+
+ if (pic_order_cnt_type == 0) {
+ /* log2_max_pic_order_cnt_lsb_minus4 */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+ } else if (pic_order_cnt_type == 1) {
+ unsigned int i, num_ref_frames_in_pic_order_cnt_cycle;
+
+ /* delta_pic_order_always_zero_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ /* offset_for_non_ref_pic */
+ ret = rbsp_read_sev(&sps, NULL);
+ if (ret)
+ return ret;
+ /* offset_for_top_to_bottom_field */
+ ret = rbsp_read_sev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ ret = rbsp_read_uev(&sps,
+ &num_ref_frames_in_pic_order_cnt_cycle);
+ if (ret)
+ return ret;
+ for (i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++) {
+ /* offset_for_ref_frame */
+ ret = rbsp_read_sev(&sps, NULL);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* max_num_ref_frames */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ /* gaps_in_frame_num_value_allowed_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ ret = rbsp_read_uev(&sps, &pic_width_in_mbs_minus1);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &pic_height_in_map_units_minus1);
+ if (ret)
+ return ret;
+ frame_mbs_only_flag = ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ if (!frame_mbs_only_flag) {
+ /* mb_adaptive_frame_field_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ }
+ /* direct_8x8_inference_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+
+ /* Mark position of the frame cropping flag */
+ pos = sps.pos;
+ frame_cropping_flag = ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ if (frame_cropping_flag) {
+ unsigned int crop_left, crop_top;
+
+ ret = rbsp_read_uev(&sps, &crop_left);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &crop_right);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &crop_top);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &crop_bottom);
+ if (ret)
+ return ret;
+ }
+ vui_parameters_present_flag = ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ if (vui_parameters_present_flag) {
+ dev_err(ctx->fh.vdev->dev_parent,
+ "%s: Handling vui_parameters not implemented\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ crop_right = round_up(width, 16) - width;
+ crop_bottom = round_up(height, 16) - height;
+ crop_right /= 2;
+ if (frame_mbs_only_flag)
+ crop_bottom /= 2;
+ else
+ crop_bottom /= 4;
+
+
+ sps.size = max_size - 5;
+ sps.pos = pos;
+ frame_cropping_flag = 1;
+ ret = rbsp_write_bit(&sps, frame_cropping_flag);
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, 0); /* crop_left */
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, crop_right);
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, 0); /* crop_top */
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, crop_bottom);
+ if (ret)
+ return ret;
+ ret = rbsp_write_bit(&sps, 0); /* vui_parameters_present_flag */
+ if (ret)
+ return ret;
+ ret = rbsp_write_bit(&sps, 1);
+ if (ret)
+ return ret;
+
+ *size = 5 + DIV_ROUND_UP(sps.pos, 8);
+
+ return 0;
+}
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
new file mode 100644
index 000000000..9f899a6ce
--- /dev/null
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -0,0 +1,253 @@
+/*
+ * Coda multi-standard codec IP - JPEG support functions
+ *
+ * Copyright (C) 2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/swab.h>
+
+#include "coda.h"
+#include "trace.h"
+
+#define SOI_MARKER 0xffd8
+#define EOI_MARKER 0xffd9
+
+/*
+ * Typical Huffman tables for 8-bit precision luminance and
+ * chrominance from JPEG ITU-T.81 (ISO/IEC 10918-1) Annex K.3
+ */
+
+static const unsigned char luma_dc_bits[16] = {
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const unsigned char luma_dc_value[12] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char chroma_dc_bits[16] = {
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const unsigned char chroma_dc_value[12] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char luma_ac_bits[16] = {
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
+ 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
+};
+
+static const unsigned char luma_ac_value[162 + 2] = {
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa, /* padded to 32-bit */
+};
+
+static const unsigned char chroma_ac_bits[16] = {
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
+ 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
+};
+
+static const unsigned char chroma_ac_value[162 + 2] = {
+ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
+ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
+ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
+ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
+ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
+ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
+ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
+ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
+ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
+ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa, /* padded to 32-bit */
+};
+
+/*
+ * Quantization tables for luminance and chrominance components in
+ * zig-zag scan order from the Freescale i.MX VPU libaries
+ */
+
+static unsigned char luma_q[64] = {
+ 0x06, 0x04, 0x04, 0x04, 0x05, 0x04, 0x06, 0x05,
+ 0x05, 0x06, 0x09, 0x06, 0x05, 0x06, 0x09, 0x0b,
+ 0x08, 0x06, 0x06, 0x08, 0x0b, 0x0c, 0x0a, 0x0a,
+ 0x0b, 0x0a, 0x0a, 0x0c, 0x10, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x10, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+};
+
+static unsigned char chroma_q[64] = {
+ 0x07, 0x07, 0x07, 0x0d, 0x0c, 0x0d, 0x18, 0x10,
+ 0x10, 0x18, 0x14, 0x0e, 0x0e, 0x0e, 0x14, 0x14,
+ 0x0e, 0x0e, 0x0e, 0x0e, 0x14, 0x11, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x11, 0x11, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x11, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+};
+
+struct coda_memcpy_desc {
+ int offset;
+ const void *src;
+ size_t len;
+};
+
+static void coda_memcpy_parabuf(void *parabuf,
+ const struct coda_memcpy_desc *desc)
+{
+ u32 *dst = parabuf + desc->offset;
+ const u32 *src = desc->src;
+ int len = desc->len / 4;
+ int i;
+
+ for (i = 0; i < len; i += 2) {
+ dst[i + 1] = swab32(src[i]);
+ dst[i] = swab32(src[i + 1]);
+ }
+}
+
+int coda_jpeg_write_tables(struct coda_ctx *ctx)
+{
+ int i;
+ static const struct coda_memcpy_desc huff[8] = {
+ { 0, luma_dc_bits, sizeof(luma_dc_bits) },
+ { 16, luma_dc_value, sizeof(luma_dc_value) },
+ { 32, luma_ac_bits, sizeof(luma_ac_bits) },
+ { 48, luma_ac_value, sizeof(luma_ac_value) },
+ { 216, chroma_dc_bits, sizeof(chroma_dc_bits) },
+ { 232, chroma_dc_value, sizeof(chroma_dc_value) },
+ { 248, chroma_ac_bits, sizeof(chroma_ac_bits) },
+ { 264, chroma_ac_value, sizeof(chroma_ac_value) },
+ };
+ struct coda_memcpy_desc qmat[3] = {
+ { 512, ctx->params.jpeg_qmat_tab[0], 64 },
+ { 576, ctx->params.jpeg_qmat_tab[1], 64 },
+ { 640, ctx->params.jpeg_qmat_tab[1], 64 },
+ };
+
+ /* Write huffman tables to parameter memory */
+ for (i = 0; i < ARRAY_SIZE(huff); i++)
+ coda_memcpy_parabuf(ctx->parabuf.vaddr, huff + i);
+
+ /* Write Q-matrix to parameter memory */
+ for (i = 0; i < ARRAY_SIZE(qmat); i++)
+ coda_memcpy_parabuf(ctx->parabuf.vaddr, qmat + i);
+
+ return 0;
+}
+
+bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb)
+{
+ void *vaddr = vb2_plane_vaddr(vb, 0);
+ u16 soi, eoi;
+ int len, i;
+
+ soi = be16_to_cpup((__be16 *)vaddr);
+ if (soi != SOI_MARKER)
+ return false;
+
+ len = vb2_get_plane_payload(vb, 0);
+ vaddr += len - 2;
+ for (i = 0; i < 32; i++) {
+ eoi = be16_to_cpup((__be16 *)(vaddr - i));
+ if (eoi == EOI_MARKER) {
+ if (i > 0)
+ vb2_set_plane_payload(vb, 0, len - i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Scale quantization table using nonlinear scaling factor
+ * u8 qtab[64], scale [50,190]
+ */
+static void coda_scale_quant_table(u8 *q_tab, int scale)
+{
+ unsigned int temp;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ temp = DIV_ROUND_CLOSEST((unsigned int)q_tab[i] * scale, 100);
+ if (temp <= 0)
+ temp = 1;
+ if (temp > 255)
+ temp = 255;
+ q_tab[i] = (unsigned char)temp;
+ }
+}
+
+void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality)
+{
+ unsigned int scale;
+
+ ctx->params.jpeg_quality = quality;
+
+ /* Clip quality setting to [5,100] interval */
+ if (quality > 100)
+ quality = 100;
+ if (quality < 5)
+ quality = 5;
+
+ /*
+ * Non-linear scaling factor:
+ * [5,50] -> [1000..100], [51,100] -> [98..0]
+ */
+ if (quality < 50)
+ scale = 5000 / quality;
+ else
+ scale = 200 - 2 * quality;
+
+ if (ctx->params.jpeg_qmat_tab[0]) {
+ memcpy(ctx->params.jpeg_qmat_tab[0], luma_q, 64);
+ coda_scale_quant_table(ctx->params.jpeg_qmat_tab[0], scale);
+ }
+ if (ctx->params.jpeg_qmat_tab[1]) {
+ memcpy(ctx->params.jpeg_qmat_tab[1], chroma_q, 64);
+ coda_scale_quant_table(ctx->params.jpeg_qmat_tab[1], scale);
+ }
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
new file mode 100644
index 000000000..8df02c327
--- /dev/null
+++ b/drivers/media/platform/coda/coda.h
@@ -0,0 +1,321 @@
+/*
+ * Coda multi-standard codec IP
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ * Javier Martin, <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ * Copyright (C) 2012-2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __CODA_H__
+#define __CODA_H__
+
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/kfifo.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "coda_regs.h"
+
+#define CODA_MAX_FRAMEBUFFERS 19
+#define FMO_SLICE_SAVE_BUF_SIZE (32)
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+enum coda_inst_type {
+ CODA_INST_ENCODER,
+ CODA_INST_DECODER,
+};
+
+enum coda_product {
+ CODA_DX6 = 0xf001,
+ CODA_HX4 = 0xf00a,
+ CODA_7541 = 0xf012,
+ CODA_960 = 0xf020,
+};
+
+struct coda_video_device;
+
+struct coda_devtype {
+ char *firmware[3];
+ enum coda_product product;
+ const struct coda_codec *codecs;
+ unsigned int num_codecs;
+ const struct coda_video_device **vdevs;
+ unsigned int num_vdevs;
+ size_t workbuf_size;
+ size_t tempbuf_size;
+ size_t iram_size;
+};
+
+struct coda_aux_buf {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+ struct debugfs_blob_wrapper blob;
+ struct dentry *dentry;
+};
+
+struct coda_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd[5];
+ struct platform_device *plat_dev;
+ const struct coda_devtype *devtype;
+ int firmware;
+ struct vdoa_data *vdoa;
+
+ void __iomem *regs_base;
+ struct clk *clk_per;
+ struct clk *clk_ahb;
+ struct reset_control *rstc;
+
+ struct coda_aux_buf codebuf;
+ struct coda_aux_buf tempbuf;
+ struct coda_aux_buf workbuf;
+ struct gen_pool *iram_pool;
+ struct coda_aux_buf iram;
+
+ spinlock_t irqlock;
+ struct mutex dev_mutex;
+ struct mutex coda_mutex;
+ struct workqueue_struct *workqueue;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct list_head instances;
+ struct ida ida;
+ struct dentry *debugfs_root;
+};
+
+struct coda_codec {
+ u32 mode;
+ u32 src_fourcc;
+ u32 dst_fourcc;
+ u32 max_w;
+ u32 max_h;
+};
+
+struct coda_huff_tab;
+
+struct coda_params {
+ u8 rot_mode;
+ u8 h264_intra_qp;
+ u8 h264_inter_qp;
+ u8 h264_min_qp;
+ u8 h264_max_qp;
+ u8 h264_disable_deblocking_filter_idc;
+ s8 h264_slice_alpha_c0_offset_div2;
+ s8 h264_slice_beta_offset_div2;
+ u8 h264_profile_idc;
+ u8 h264_level_idc;
+ u8 mpeg4_intra_qp;
+ u8 mpeg4_inter_qp;
+ u8 gop_size;
+ int intra_refresh;
+ u8 jpeg_quality;
+ u8 jpeg_restart_interval;
+ u8 *jpeg_qmat_tab[3];
+ int codec_mode;
+ int codec_mode_aux;
+ enum v4l2_mpeg_video_multi_slice_mode slice_mode;
+ u32 framerate;
+ u16 bitrate;
+ u16 vbv_delay;
+ u32 vbv_size;
+ u32 slice_max_bits;
+ u32 slice_max_mb;
+ bool force_ipicture;
+};
+
+struct coda_buffer_meta {
+ struct list_head list;
+ u32 sequence;
+ struct v4l2_timecode timecode;
+ u64 timestamp;
+ u32 start;
+ u32 end;
+};
+
+/* Per-queue, driver-specific private data */
+struct coda_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int bytesperline;
+ unsigned int sizeimage;
+ unsigned int fourcc;
+ struct v4l2_rect rect;
+};
+
+struct coda_iram_info {
+ u32 axi_sram_use;
+ phys_addr_t buf_bit_use;
+ phys_addr_t buf_ip_ac_dc_use;
+ phys_addr_t buf_dbk_y_use;
+ phys_addr_t buf_dbk_c_use;
+ phys_addr_t buf_ovl_use;
+ phys_addr_t buf_btp_use;
+ phys_addr_t search_ram_paddr;
+ int search_ram_size;
+ int remaining;
+ phys_addr_t next_paddr;
+};
+
+#define GDI_LINEAR_FRAME_MAP 0
+#define GDI_TILED_FRAME_MB_RASTER_MAP 1
+
+struct coda_ctx;
+
+struct coda_context_ops {
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+ int (*reqbufs)(struct coda_ctx *ctx, struct v4l2_requestbuffers *rb);
+ int (*start_streaming)(struct coda_ctx *ctx);
+ int (*prepare_run)(struct coda_ctx *ctx);
+ void (*finish_run)(struct coda_ctx *ctx);
+ void (*run_timeout)(struct coda_ctx *ctx);
+ void (*seq_end_work)(struct work_struct *work);
+ void (*release)(struct coda_ctx *ctx);
+};
+
+struct coda_ctx {
+ struct coda_dev *dev;
+ struct mutex buffer_mutex;
+ struct list_head list;
+ struct work_struct pic_run_work;
+ struct work_struct seq_end_work;
+ struct completion completion;
+ const struct coda_video_device *cvd;
+ const struct coda_context_ops *ops;
+ int aborting;
+ int initialized;
+ int streamon_out;
+ int streamon_cap;
+ u32 qsequence;
+ u32 osequence;
+ u32 sequence_offset;
+ struct coda_q_data q_data[2];
+ enum coda_inst_type inst_type;
+ const struct coda_codec *codec;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ struct coda_params params;
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *h264_profile_ctrl;
+ struct v4l2_ctrl *h264_level_ctrl;
+ struct v4l2_fh fh;
+ int gopcounter;
+ int runcounter;
+ char vpu_header[3][64];
+ int vpu_header_size[3];
+ struct kfifo bitstream_fifo;
+ struct mutex bitstream_mutex;
+ struct coda_aux_buf bitstream;
+ bool hold;
+ struct coda_aux_buf parabuf;
+ struct coda_aux_buf psbuf;
+ struct coda_aux_buf slicebuf;
+ struct coda_aux_buf internal_frames[CODA_MAX_FRAMEBUFFERS];
+ u32 frame_types[CODA_MAX_FRAMEBUFFERS];
+ struct coda_buffer_meta frame_metas[CODA_MAX_FRAMEBUFFERS];
+ u32 frame_errors[CODA_MAX_FRAMEBUFFERS];
+ struct list_head buffer_meta_list;
+ spinlock_t buffer_meta_lock;
+ int num_metas;
+ struct coda_aux_buf workbuf;
+ int num_internal_frames;
+ int idx;
+ int reg_idx;
+ struct coda_iram_info iram_info;
+ int tiled_map_type;
+ u32 bit_stream_param;
+ u32 frm_dis_flg;
+ u32 frame_mem_ctrl;
+ int display_idx;
+ struct dentry *debugfs_entry;
+ bool use_bit;
+ bool use_vdoa;
+ struct vdoa_ctx *vdoa;
+};
+
+extern int coda_debug;
+
+void coda_write(struct coda_dev *dev, u32 data, u32 reg);
+unsigned int coda_read(struct coda_dev *dev, u32 reg);
+void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
+ struct vb2_v4l2_buffer *buf, unsigned int reg_y);
+
+int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
+ size_t size, const char *name, struct dentry *parent);
+void coda_free_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf);
+
+int coda_encoder_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+
+int coda_hw_reset(struct coda_ctx *ctx);
+
+void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list);
+
+void coda_set_gdi_regs(struct coda_ctx *ctx);
+
+static inline struct coda_q_data *get_q_data(struct coda_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &(ctx->q_data[V4L2_M2M_SRC]);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &(ctx->q_data[V4L2_M2M_DST]);
+ default:
+ return NULL;
+ }
+}
+
+const char *coda_product_name(int product);
+
+int coda_check_firmware(struct coda_dev *dev);
+
+static inline unsigned int coda_get_bitstream_payload(struct coda_ctx *ctx)
+{
+ return kfifo_len(&ctx->bitstream_fifo);
+}
+
+void coda_bit_stream_end_flag(struct coda_ctx *ctx);
+
+void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
+ enum vb2_buffer_state state);
+
+int coda_h264_filler_nal(int size, char *p);
+int coda_h264_padding(int size, char *p);
+int coda_h264_profile(int profile_idc);
+int coda_h264_level(int level_idc);
+int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb);
+int coda_h264_sps_fixup(struct coda_ctx *ctx, int width, int height, char *buf,
+ int *size, int max_size);
+
+bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
+int coda_jpeg_write_tables(struct coda_ctx *ctx);
+void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
+
+extern const struct coda_context_ops coda_bit_encode_ops;
+extern const struct coda_context_ops coda_bit_decode_ops;
+
+irqreturn_t coda_irq_handler(int irq, void *data);
+
+#endif /* __CODA_H__ */
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
new file mode 100644
index 000000000..e675e38f3
--- /dev/null
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -0,0 +1,466 @@
+/*
+ * linux/drivers/media/platform/coda/coda_regs.h
+ *
+ * Copyright (C) 2012 Vista Silicon SL
+ * Javier Martin <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _REGS_CODA_H_
+#define _REGS_CODA_H_
+
+/* HW registers */
+#define CODA_REG_BIT_CODE_RUN 0x000
+#define CODA_REG_RUN_ENABLE (1 << 0)
+#define CODA_REG_BIT_CODE_DOWN 0x004
+#define CODA_DOWN_ADDRESS_SET(x) (((x) & 0xffff) << 16)
+#define CODA_DOWN_DATA_SET(x) ((x) & 0xffff)
+#define CODA_REG_BIT_HOST_IN_REQ 0x008
+#define CODA_REG_BIT_INT_CLEAR 0x00c
+#define CODA_REG_BIT_INT_CLEAR_SET 0x1
+#define CODA_REG_BIT_INT_STATUS 0x010
+#define CODA_REG_BIT_CODE_RESET 0x014
+#define CODA_REG_RESET_ENABLE (1 << 0)
+#define CODA_REG_BIT_CUR_PC 0x018
+#define CODA9_REG_BIT_SW_RESET 0x024
+#define CODA9_SW_RESET_BPU_CORE 0x008
+#define CODA9_SW_RESET_BPU_BUS 0x010
+#define CODA9_SW_RESET_VCE_CORE 0x020
+#define CODA9_SW_RESET_VCE_BUS 0x040
+#define CODA9_SW_RESET_GDI_CORE 0x080
+#define CODA9_SW_RESET_GDI_BUS 0x100
+#define CODA9_REG_BIT_SW_RESET_STATUS 0x034
+
+/* Static SW registers */
+#define CODA_REG_BIT_CODE_BUF_ADDR 0x100
+#define CODA_REG_BIT_WORK_BUF_ADDR 0x104
+#define CODA_REG_BIT_PARA_BUF_ADDR 0x108
+#define CODA_REG_BIT_STREAM_CTRL 0x10c
+#define CODA7_STREAM_BUF_PIC_RESET (1 << 4)
+#define CODADX6_STREAM_BUF_PIC_RESET (1 << 3)
+#define CODA7_STREAM_BUF_PIC_FLUSH (1 << 3)
+#define CODADX6_STREAM_BUF_PIC_FLUSH (1 << 2)
+#define CODA7_STREAM_BUF_DYNALLOC_EN (1 << 5)
+#define CODADX6_STREAM_BUF_DYNALLOC_EN (1 << 4)
+#define CODADX6_STREAM_CHKDIS_OFFSET (1 << 1)
+#define CODA7_STREAM_SEL_64BITS_ENDIAN (1 << 1)
+#define CODA_STREAM_ENDIAN_SELECT (1 << 0)
+#define CODA_REG_BIT_FRAME_MEM_CTRL 0x110
+#define CODA9_FRAME_ENABLE_BWB (1 << 12)
+#define CODA9_FRAME_TILED2LINEAR (1 << 11)
+#define CODA_FRAME_CHROMA_INTERLEAVE (1 << 2)
+#define CODA_IMAGE_ENDIAN_SELECT (1 << 0)
+#define CODA_REG_BIT_BIT_STREAM_PARAM 0x114
+#define CODA_BIT_STREAM_END_FLAG (1 << 2)
+#define CODA_BIT_DEC_SEQ_INIT_ESCAPE (1 << 0)
+#define CODA_REG_BIT_TEMP_BUF_ADDR 0x118
+#define CODA_REG_BIT_RD_PTR(x) (0x120 + 8 * (x))
+#define CODA_REG_BIT_WR_PTR(x) (0x124 + 8 * (x))
+#define CODA_REG_BIT_FRM_DIS_FLG(x) (0x150 + 4 * (x))
+#define CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR 0x140
+#define CODA7_REG_BIT_AXI_SRAM_USE 0x140
+#define CODA9_USE_HOST_BTP_ENABLE (1 << 13)
+#define CODA9_USE_HOST_OVL_ENABLE (1 << 12)
+#define CODA7_USE_HOST_ME_ENABLE (1 << 11)
+#define CODA9_USE_HOST_DBK_ENABLE (3 << 10)
+#define CODA7_USE_HOST_OVL_ENABLE (1 << 10)
+#define CODA7_USE_HOST_DBK_ENABLE (1 << 9)
+#define CODA9_USE_HOST_IP_ENABLE (1 << 9)
+#define CODA7_USE_HOST_IP_ENABLE (1 << 8)
+#define CODA9_USE_HOST_BIT_ENABLE (1 << 8)
+#define CODA7_USE_HOST_BIT_ENABLE (1 << 7)
+#define CODA9_USE_BTP_ENABLE (1 << 5)
+#define CODA7_USE_ME_ENABLE (1 << 4)
+#define CODA9_USE_OVL_ENABLE (1 << 4)
+#define CODA7_USE_OVL_ENABLE (1 << 3)
+#define CODA9_USE_DBK_ENABLE (3 << 2)
+#define CODA7_USE_DBK_ENABLE (1 << 2)
+#define CODA7_USE_IP_ENABLE (1 << 1)
+#define CODA7_USE_BIT_ENABLE (1 << 0)
+
+#define CODA_REG_BIT_BUSY 0x160
+#define CODA_REG_BIT_BUSY_FLAG 1
+#define CODA_REG_BIT_RUN_COMMAND 0x164
+#define CODA_COMMAND_SEQ_INIT 1
+#define CODA_COMMAND_SEQ_END 2
+#define CODA_COMMAND_PIC_RUN 3
+#define CODA_COMMAND_SET_FRAME_BUF 4
+#define CODA_COMMAND_ENCODE_HEADER 5
+#define CODA_COMMAND_ENC_PARA_SET 6
+#define CODA_COMMAND_DEC_PARA_SET 7
+#define CODA_COMMAND_DEC_BUF_FLUSH 8
+#define CODA_COMMAND_RC_CHANGE_PARAMETER 9
+#define CODA_COMMAND_FIRMWARE_GET 0xf
+#define CODA_REG_BIT_RUN_INDEX 0x168
+#define CODA_INDEX_SET(x) ((x) & 0x3)
+#define CODA_REG_BIT_RUN_COD_STD 0x16c
+#define CODADX6_MODE_DECODE_MP4 0
+#define CODADX6_MODE_ENCODE_MP4 1
+#define CODADX6_MODE_DECODE_H264 2
+#define CODADX6_MODE_ENCODE_H264 3
+#define CODA7_MODE_DECODE_H264 0
+#define CODA7_MODE_DECODE_VC1 1
+#define CODA7_MODE_DECODE_MP2 2
+#define CODA7_MODE_DECODE_MP4 3
+#define CODA7_MODE_DECODE_DV3 3
+#define CODA7_MODE_DECODE_RV 4
+#define CODA7_MODE_DECODE_MJPG 5
+#define CODA7_MODE_ENCODE_H264 8
+#define CODA7_MODE_ENCODE_MP4 11
+#define CODA7_MODE_ENCODE_MJPG 13
+#define CODA9_MODE_DECODE_H264 0
+#define CODA9_MODE_DECODE_VC1 1
+#define CODA9_MODE_DECODE_MP2 2
+#define CODA9_MODE_DECODE_MP4 3
+#define CODA9_MODE_DECODE_DV3 3
+#define CODA9_MODE_DECODE_RV 4
+#define CODA9_MODE_DECODE_AVS 5
+#define CODA9_MODE_DECODE_MJPG 6
+#define CODA9_MODE_DECODE_VPX 7
+#define CODA9_MODE_ENCODE_H264 8
+#define CODA9_MODE_ENCODE_MP4 11
+#define CODA9_MODE_ENCODE_MJPG 13
+#define CODA_MODE_INVALID 0xffff
+#define CODA_REG_BIT_INT_ENABLE 0x170
+#define CODA_INT_INTERRUPT_ENABLE (1 << 3)
+#define CODA_REG_BIT_INT_REASON 0x174
+#define CODA7_REG_BIT_RUN_AUX_STD 0x178
+#define CODA_MP4_AUX_MPEG4 0
+#define CODA_MP4_AUX_DIVX3 1
+#define CODA_VPX_AUX_THO 0
+#define CODA_VPX_AUX_VP6 1
+#define CODA_VPX_AUX_VP8 2
+#define CODA_H264_AUX_AVC 0
+#define CODA_H264_AUX_MVC 1
+
+/*
+ * Commands' mailbox:
+ * registers with offsets in the range 0x180-0x1d0
+ * have different meaning depending on the command being
+ * issued.
+ */
+
+/* Decoder Sequence Initialization */
+#define CODA_CMD_DEC_SEQ_BB_START 0x180
+#define CODA_CMD_DEC_SEQ_BB_SIZE 0x184
+#define CODA_CMD_DEC_SEQ_OPTION 0x188
+#define CODA_NO_INT_ENABLE (1 << 10)
+#define CODA_REORDER_ENABLE (1 << 1)
+#define CODADX6_QP_REPORT (1 << 0)
+#define CODA7_MP4_DEBLK_ENABLE (1 << 0)
+#define CODA_CMD_DEC_SEQ_SRC_SIZE 0x18c
+#define CODA_CMD_DEC_SEQ_START_BYTE 0x190
+#define CODA_CMD_DEC_SEQ_PS_BB_START 0x194
+#define CODA_CMD_DEC_SEQ_PS_BB_SIZE 0x198
+#define CODA_CMD_DEC_SEQ_JPG_THUMB_EN 0x19c
+#define CODA_CMD_DEC_SEQ_MP4_ASP_CLASS 0x19c
+#define CODA_MP4_CLASS_MPEG4 0
+#define CODA_CMD_DEC_SEQ_X264_MV_EN 0x19c
+#define CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE 0x1a0
+
+#define CODA7_RET_DEC_SEQ_ASPECT 0x1b0
+#define CODA9_RET_DEC_SEQ_BITRATE 0x1b4
+#define CODA_RET_DEC_SEQ_SUCCESS 0x1c0
+#define CODA_RET_DEC_SEQ_SRC_FMT 0x1c4 /* SRC_SIZE on CODA7 */
+#define CODA_RET_DEC_SEQ_SRC_SIZE 0x1c4
+#define CODA_RET_DEC_SEQ_SRC_F_RATE 0x1c8
+#define CODA9_RET_DEC_SEQ_ASPECT 0x1c8
+#define CODA_RET_DEC_SEQ_FRAME_NEED 0x1cc
+#define CODA_RET_DEC_SEQ_FRAME_DELAY 0x1d0
+#define CODA_RET_DEC_SEQ_INFO 0x1d4
+#define CODA_RET_DEC_SEQ_CROP_LEFT_RIGHT 0x1d8
+#define CODA_RET_DEC_SEQ_CROP_TOP_BOTTOM 0x1dc
+#define CODA_RET_DEC_SEQ_NEXT_FRAME_NUM 0x1e0
+#define CODA_RET_DEC_SEQ_ERR_REASON 0x1e0
+#define CODA_RET_DEC_SEQ_FRATE_NR 0x1e4
+#define CODA_RET_DEC_SEQ_FRATE_DR 0x1e8
+#define CODA_RET_DEC_SEQ_JPG_PARA 0x1e4
+#define CODA_RET_DEC_SEQ_JPG_THUMB_IND 0x1e8
+#define CODA9_RET_DEC_SEQ_HEADER_REPORT 0x1ec
+
+/* Decoder Picture Run */
+#define CODA_CMD_DEC_PIC_ROT_MODE 0x180
+#define CODA_CMD_DEC_PIC_ROT_ADDR_Y 0x184
+#define CODA9_CMD_DEC_PIC_ROT_INDEX 0x184
+#define CODA_CMD_DEC_PIC_ROT_ADDR_CB 0x188
+#define CODA9_CMD_DEC_PIC_ROT_ADDR_Y 0x188
+#define CODA_CMD_DEC_PIC_ROT_ADDR_CR 0x18c
+#define CODA9_CMD_DEC_PIC_ROT_ADDR_CB 0x18c
+#define CODA_CMD_DEC_PIC_ROT_STRIDE 0x190
+#define CODA9_CMD_DEC_PIC_ROT_ADDR_CR 0x190
+#define CODA9_CMD_DEC_PIC_ROT_STRIDE 0x1b8
+
+#define CODA_CMD_DEC_PIC_OPTION 0x194
+#define CODA_PRE_SCAN_EN (1 << 0)
+#define CODA_PRE_SCAN_MODE_DECODE (0 << 1)
+#define CODA_PRE_SCAN_MODE_RETURN (1 << 1)
+#define CODA_IFRAME_SEARCH_EN (1 << 2)
+#define CODA_SKIP_FRAME_MODE (0x3 << 3)
+#define CODA_CMD_DEC_PIC_SKIP_NUM 0x198
+#define CODA_CMD_DEC_PIC_CHUNK_SIZE 0x19c
+#define CODA_CMD_DEC_PIC_BB_START 0x1a0
+#define CODA_CMD_DEC_PIC_START_BYTE 0x1a4
+#define CODA_RET_DEC_PIC_SIZE 0x1bc
+#define CODA_RET_DEC_PIC_FRAME_NUM 0x1c0
+#define CODA_RET_DEC_PIC_FRAME_IDX 0x1c4
+#define CODA_RET_DEC_PIC_ERR_MB 0x1c8
+#define CODA_RET_DEC_PIC_TYPE 0x1cc
+#define CODA_PIC_TYPE_MASK 0x7
+#define CODA_PIC_TYPE_MASK_VC1 0x3f
+#define CODA9_PIC_TYPE_FIRST_MASK (0x7 << 3)
+#define CODA9_PIC_TYPE_IDR_MASK (0x3 << 6)
+#define CODA7_PIC_TYPE_H264_NPF_MASK (0x3 << 16)
+#define CODA7_PIC_TYPE_INTERLACED (1 << 18)
+#define CODA_RET_DEC_PIC_POST 0x1d0
+#define CODA_RET_DEC_PIC_MVC_REPORT 0x1d0
+#define CODA_RET_DEC_PIC_OPTION 0x1d4
+#define CODA_RET_DEC_PIC_SUCCESS 0x1d8
+#define CODA_RET_DEC_PIC_CUR_IDX 0x1dc
+#define CODA_RET_DEC_PIC_CROP_LEFT_RIGHT 0x1e0
+#define CODA_RET_DEC_PIC_CROP_TOP_BOTTOM 0x1e4
+#define CODA_RET_DEC_PIC_FRAME_NEED 0x1ec
+
+#define CODA9_RET_DEC_PIC_VP8_PIC_REPORT 0x1e8
+#define CODA9_RET_DEC_PIC_ASPECT 0x1f0
+#define CODA9_RET_DEC_PIC_VP8_SCALE_INFO 0x1f0
+#define CODA9_RET_DEC_PIC_FRATE_NR 0x1f4
+#define CODA9_RET_DEC_PIC_FRATE_DR 0x1f8
+
+/* Encoder Sequence Initialization */
+#define CODA_CMD_ENC_SEQ_BB_START 0x180
+#define CODA_CMD_ENC_SEQ_BB_SIZE 0x184
+#define CODA_CMD_ENC_SEQ_OPTION 0x188
+#define CODA7_OPTION_AVCINTRA16X16ONLY_OFFSET 9
+#define CODA9_OPTION_MVC_PREFIX_NAL_OFFSET 9
+#define CODA7_OPTION_GAMMA_OFFSET 8
+#define CODA9_OPTION_MVC_PARASET_REFRESH_OFFSET 8
+#define CODA7_OPTION_RCQPMAX_OFFSET 7
+#define CODA9_OPTION_GAMMA_OFFSET 7
+#define CODADX6_OPTION_GAMMA_OFFSET 7
+#define CODA7_OPTION_RCQPMIN_OFFSET 6
+#define CODA9_OPTION_RCQPMAX_OFFSET 6
+#define CODA_OPTION_LIMITQP_OFFSET 6
+#define CODA_OPTION_RCINTRAQP_OFFSET 5
+#define CODA_OPTION_FMO_OFFSET 4
+#define CODA9_OPTION_MVC_INTERVIEW_OFFSET 4
+#define CODA_OPTION_AVC_AUD_OFFSET 2
+#define CODA_OPTION_SLICEREPORT_OFFSET 1
+#define CODA_CMD_ENC_SEQ_COD_STD 0x18c
+#define CODA_STD_MPEG4 0
+#define CODA9_STD_H264 0
+#define CODA_STD_H263 1
+#define CODA_STD_H264 2
+#define CODA9_STD_MPEG4 3
+
+#define CODA_CMD_ENC_SEQ_SRC_SIZE 0x190
+#define CODA7_PICWIDTH_OFFSET 16
+#define CODA7_PICWIDTH_MASK 0xffff
+#define CODADX6_PICWIDTH_OFFSET 10
+#define CODADX6_PICWIDTH_MASK 0x3ff
+#define CODA_PICHEIGHT_OFFSET 0
+#define CODADX6_PICHEIGHT_MASK 0x3ff
+#define CODA7_PICHEIGHT_MASK 0xffff
+#define CODA_CMD_ENC_SEQ_SRC_F_RATE 0x194
+#define CODA_FRATE_RES_OFFSET 0
+#define CODA_FRATE_RES_MASK 0xffff
+#define CODA_FRATE_DIV_OFFSET 16
+#define CODA_FRATE_DIV_MASK 0xffff
+#define CODA_CMD_ENC_SEQ_MP4_PARA 0x198
+#define CODA_MP4PARAM_VERID_OFFSET 6
+#define CODA_MP4PARAM_VERID_MASK 0x01
+#define CODA_MP4PARAM_INTRADCVLCTHR_OFFSET 2
+#define CODA_MP4PARAM_INTRADCVLCTHR_MASK 0x07
+#define CODA_MP4PARAM_REVERSIBLEVLCENABLE_OFFSET 1
+#define CODA_MP4PARAM_REVERSIBLEVLCENABLE_MASK 0x01
+#define CODA_MP4PARAM_DATAPARTITIONENABLE_OFFSET 0
+#define CODA_MP4PARAM_DATAPARTITIONENABLE_MASK 0x01
+#define CODA_CMD_ENC_SEQ_263_PARA 0x19c
+#define CODA_263PARAM_ANNEXJENABLE_OFFSET 2
+#define CODA_263PARAM_ANNEXJENABLE_MASK 0x01
+#define CODA_263PARAM_ANNEXKENABLE_OFFSET 1
+#define CODA_263PARAM_ANNEXKENABLE_MASK 0x01
+#define CODA_263PARAM_ANNEXTENABLE_OFFSET 0
+#define CODA_263PARAM_ANNEXTENABLE_MASK 0x01
+#define CODA_CMD_ENC_SEQ_264_PARA 0x1a0
+#define CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET 12
+#define CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK 0x0f
+#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET 8
+#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK 0x0f
+#define CODA_264PARAM_DISABLEDEBLK_OFFSET 6
+#define CODA_264PARAM_DISABLEDEBLK_MASK 0x03
+#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET 5
+#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_MASK 0x01
+#define CODA_264PARAM_CHROMAQPOFFSET_OFFSET 0
+#define CODA_264PARAM_CHROMAQPOFFSET_MASK 0x1f
+#define CODA_CMD_ENC_SEQ_SLICE_MODE 0x1a4
+#define CODA_SLICING_SIZE_OFFSET 2
+#define CODA_SLICING_SIZE_MASK 0x3fffffff
+#define CODA_SLICING_UNIT_OFFSET 1
+#define CODA_SLICING_UNIT_MASK 0x01
+#define CODA_SLICING_MODE_OFFSET 0
+#define CODA_SLICING_MODE_MASK 0x01
+#define CODA_CMD_ENC_SEQ_GOP_SIZE 0x1a8
+#define CODA_GOP_SIZE_OFFSET 0
+#define CODA_GOP_SIZE_MASK 0x3f
+#define CODA_CMD_ENC_SEQ_RC_PARA 0x1ac
+#define CODA_RATECONTROL_AUTOSKIP_OFFSET 31
+#define CODA_RATECONTROL_AUTOSKIP_MASK 0x01
+#define CODA_RATECONTROL_INITIALDELAY_OFFSET 16
+#define CODA_RATECONTROL_INITIALDELAY_MASK 0x7fff
+#define CODA_RATECONTROL_BITRATE_OFFSET 1
+#define CODA_RATECONTROL_BITRATE_MASK 0x7fff
+#define CODA_RATECONTROL_ENABLE_OFFSET 0
+#define CODA_RATECONTROL_ENABLE_MASK 0x01
+#define CODA_CMD_ENC_SEQ_RC_BUF_SIZE 0x1b0
+#define CODA_CMD_ENC_SEQ_INTRA_REFRESH 0x1b4
+#define CODADX6_CMD_ENC_SEQ_FMO 0x1b8
+#define CODA_FMOPARAM_TYPE_OFFSET 4
+#define CODA_FMOPARAM_TYPE_MASK 1
+#define CODA_FMOPARAM_SLICENUM_OFFSET 0
+#define CODA_FMOPARAM_SLICENUM_MASK 0x0f
+#define CODADX6_CMD_ENC_SEQ_INTRA_QP 0x1bc
+#define CODA7_CMD_ENC_SEQ_SEARCH_BASE 0x1b8
+#define CODA7_CMD_ENC_SEQ_SEARCH_SIZE 0x1bc
+#define CODA7_CMD_ENC_SEQ_INTRA_QP 0x1c4
+#define CODA_CMD_ENC_SEQ_RC_QP_MIN_MAX 0x1c8
+#define CODA_QPMIN_OFFSET 8
+#define CODA_QPMIN_MASK 0x3f
+#define CODA_QPMAX_OFFSET 0
+#define CODA_QPMAX_MASK 0x3f
+#define CODA_CMD_ENC_SEQ_RC_GAMMA 0x1cc
+#define CODA_GAMMA_OFFSET 0
+#define CODA_GAMMA_MASK 0xffff
+#define CODA_CMD_ENC_SEQ_RC_INTERVAL_MODE 0x1d0
+#define CODA9_CMD_ENC_SEQ_INTRA_WEIGHT 0x1d4
+#define CODA9_CMD_ENC_SEQ_ME_OPTION 0x1d8
+#define CODA_RET_ENC_SEQ_SUCCESS 0x1c0
+
+#define CODA_CMD_ENC_SEQ_JPG_PARA 0x198
+#define CODA_CMD_ENC_SEQ_JPG_RST_INTERVAL 0x19C
+#define CODA_CMD_ENC_SEQ_JPG_THUMB_EN 0x1a0
+#define CODA_CMD_ENC_SEQ_JPG_THUMB_SIZE 0x1a4
+#define CODA_CMD_ENC_SEQ_JPG_THUMB_OFFSET 0x1a8
+
+/* Encoder Picture Run */
+#define CODA9_CMD_ENC_PIC_SRC_INDEX 0x180
+#define CODA9_CMD_ENC_PIC_SRC_STRIDE 0x184
+#define CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC 0x1a4
+#define CODA9_CMD_ENC_PIC_SRC_ADDR_Y 0x1a8
+#define CODA9_CMD_ENC_PIC_SRC_ADDR_CB 0x1ac
+#define CODA9_CMD_ENC_PIC_SRC_ADDR_CR 0x1b0
+#define CODA_CMD_ENC_PIC_SRC_ADDR_Y 0x180
+#define CODA_CMD_ENC_PIC_SRC_ADDR_CB 0x184
+#define CODA_CMD_ENC_PIC_SRC_ADDR_CR 0x188
+#define CODA_CMD_ENC_PIC_QS 0x18c
+#define CODA_CMD_ENC_PIC_ROT_MODE 0x190
+#define CODA_ROT_MIR_ENABLE (1 << 4)
+#define CODA_ROT_0 (0x0 << 0)
+#define CODA_ROT_90 (0x1 << 0)
+#define CODA_ROT_180 (0x2 << 0)
+#define CODA_ROT_270 (0x3 << 0)
+#define CODA_MIR_NONE (0x0 << 2)
+#define CODA_MIR_VER (0x1 << 2)
+#define CODA_MIR_HOR (0x2 << 2)
+#define CODA_MIR_VER_HOR (0x3 << 2)
+#define CODA_CMD_ENC_PIC_OPTION 0x194
+#define CODA_FORCE_IPICTURE BIT(1)
+#define CODA_REPORT_MB_INFO BIT(3)
+#define CODA_REPORT_MV_INFO BIT(4)
+#define CODA_REPORT_SLICE_INFO BIT(5)
+#define CODA_CMD_ENC_PIC_BB_START 0x198
+#define CODA_CMD_ENC_PIC_BB_SIZE 0x19c
+#define CODA_RET_ENC_FRAME_NUM 0x1c0
+#define CODA_RET_ENC_PIC_TYPE 0x1c4
+#define CODA_RET_ENC_PIC_FRAME_IDX 0x1c8
+#define CODA_RET_ENC_PIC_SLICE_NUM 0x1cc
+#define CODA_RET_ENC_PIC_FLAG 0x1d0
+#define CODA_RET_ENC_PIC_SUCCESS 0x1d8
+
+/* Set Frame Buffer */
+#define CODA_CMD_SET_FRAME_BUF_NUM 0x180
+#define CODA_CMD_SET_FRAME_BUF_STRIDE 0x184
+#define CODA_CMD_SET_FRAME_SLICE_BB_START 0x188
+#define CODA_CMD_SET_FRAME_SLICE_BB_SIZE 0x18c
+#define CODA9_CMD_SET_FRAME_SUBSAMP_A 0x188
+#define CODA9_CMD_SET_FRAME_SUBSAMP_B 0x18c
+#define CODA7_CMD_SET_FRAME_AXI_BIT_ADDR 0x190
+#define CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR 0x194
+#define CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR 0x198
+#define CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR 0x19c
+#define CODA7_CMD_SET_FRAME_AXI_OVL_ADDR 0x1a0
+#define CODA7_CMD_SET_FRAME_MAX_DEC_SIZE 0x1a4
+#define CODA9_CMD_SET_FRAME_AXI_BTP_ADDR 0x1a4
+#define CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE 0x1a8
+#define CODA9_CMD_SET_FRAME_CACHE_SIZE 0x1a8
+#define CODA9_CMD_SET_FRAME_CACHE_CONFIG 0x1ac
+#define CODA9_CACHE_BYPASS_OFFSET 28
+#define CODA9_CACHE_DUALCONF_OFFSET 26
+#define CODA9_CACHE_PAGEMERGE_OFFSET 24
+#define CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET 16
+#define CODA9_CACHE_CB_BUFFER_SIZE_OFFSET 8
+#define CODA9_CACHE_CR_BUFFER_SIZE_OFFSET 0
+#define CODA9_CMD_SET_FRAME_SUBSAMP_A_MVC 0x1b0
+#define CODA9_CMD_SET_FRAME_SUBSAMP_B_MVC 0x1b4
+#define CODA9_CMD_SET_FRAME_DP_BUF_BASE 0x1b0
+#define CODA9_CMD_SET_FRAME_DP_BUF_SIZE 0x1b4
+#define CODA9_CMD_SET_FRAME_MAX_DEC_SIZE 0x1b8
+#define CODA9_CMD_SET_FRAME_DELAY 0x1bc
+
+/* Encoder Header */
+#define CODA_CMD_ENC_HEADER_CODE 0x180
+#define CODA_GAMMA_OFFSET 0
+#define CODA_HEADER_H264_SPS 0
+#define CODA_HEADER_H264_PPS 1
+#define CODA_HEADER_MP4V_VOL 0
+#define CODA_HEADER_MP4V_VOS 1
+#define CODA_HEADER_MP4V_VIS 2
+#define CODA9_HEADER_FRAME_CROP (1 << 3)
+#define CODA_CMD_ENC_HEADER_BB_START 0x184
+#define CODA_CMD_ENC_HEADER_BB_SIZE 0x188
+#define CODA9_CMD_ENC_HEADER_FRAME_CROP_H 0x18c
+#define CODA9_CMD_ENC_HEADER_FRAME_CROP_V 0x190
+
+/* Get Version */
+#define CODA_CMD_FIRMWARE_VERNUM 0x1c0
+#define CODA_FIRMWARE_PRODUCT(x) (((x) >> 16) & 0xffff)
+#define CODA_FIRMWARE_MAJOR(x) (((x) >> 12) & 0x0f)
+#define CODA_FIRMWARE_MINOR(x) (((x) >> 8) & 0x0f)
+#define CODA_FIRMWARE_RELEASE(x) ((x) & 0xff)
+#define CODA_FIRMWARE_VERNUM(product, major, minor, release) \
+ ((product) << 16 | ((major) << 12) | \
+ ((minor) << 8) | (release))
+#define CODA9_CMD_FIRMWARE_CODE_REV 0x1c4
+
+#define CODA9_GDMA_BASE 0x1000
+#define CODA9_GDI_WPROT_ERR_CLR (CODA9_GDMA_BASE + 0x0a0)
+#define CODA9_GDI_WPROT_RGN_EN (CODA9_GDMA_BASE + 0x0ac)
+
+#define CODA9_GDI_BUS_CTRL (CODA9_GDMA_BASE + 0x0f0)
+#define CODA9_GDI_BUS_STATUS (CODA9_GDMA_BASE + 0x0f4)
+
+#define CODA9_GDI_XY2_CAS_0 (CODA9_GDMA_BASE + 0x800)
+#define CODA9_GDI_XY2_CAS_F (CODA9_GDMA_BASE + 0x83c)
+
+#define CODA9_GDI_XY2_BA_0 (CODA9_GDMA_BASE + 0x840)
+#define CODA9_GDI_XY2_BA_1 (CODA9_GDMA_BASE + 0x844)
+#define CODA9_GDI_XY2_BA_2 (CODA9_GDMA_BASE + 0x848)
+#define CODA9_GDI_XY2_BA_3 (CODA9_GDMA_BASE + 0x84c)
+
+#define CODA9_GDI_XY2_RAS_0 (CODA9_GDMA_BASE + 0x850)
+#define CODA9_GDI_XY2_RAS_F (CODA9_GDMA_BASE + 0x88c)
+
+#define CODA9_GDI_XY2_RBC_CONFIG (CODA9_GDMA_BASE + 0x890)
+#define CODA9_XY2RBC_SEPARATE_MAP BIT(19)
+#define CODA9_XY2RBC_TOP_BOT_SPLIT BIT(18)
+#define CODA9_XY2RBC_TILED_MAP BIT(17)
+#define CODA9_XY2RBC_CA_INC_HOR BIT(16)
+#define CODA9_GDI_RBC2_AXI_0 (CODA9_GDMA_BASE + 0x8a0)
+#define CODA9_GDI_RBC2_AXI_1F (CODA9_GDMA_BASE + 0x91c)
+#define CODA9_GDI_TILEDBUF_BASE (CODA9_GDMA_BASE + 0x920)
+
+#endif
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
new file mode 100644
index 000000000..36d50c3f9
--- /dev/null
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -0,0 +1,362 @@
+/*
+ * i.MX6 Video Data Order Adapter (VDOA)
+ *
+ * Copyright (C) 2014 Philipp Zabel
+ * Copyright (C) 2016 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include "imx-vdoa.h"
+
+#define VDOA_NAME "imx-vdoa"
+
+#define VDOAC 0x00
+#define VDOASRR 0x04
+#define VDOAIE 0x08
+#define VDOAIST 0x0c
+#define VDOAFP 0x10
+#define VDOAIEBA00 0x14
+#define VDOAIEBA01 0x18
+#define VDOAIEBA02 0x1c
+#define VDOAIEBA10 0x20
+#define VDOAIEBA11 0x24
+#define VDOAIEBA12 0x28
+#define VDOASL 0x2c
+#define VDOAIUBO 0x30
+#define VDOAVEBA0 0x34
+#define VDOAVEBA1 0x38
+#define VDOAVEBA2 0x3c
+#define VDOAVUBO 0x40
+#define VDOASR 0x44
+
+#define VDOAC_ISEL BIT(6)
+#define VDOAC_PFS BIT(5)
+#define VDOAC_SO BIT(4)
+#define VDOAC_SYNC BIT(3)
+#define VDOAC_NF BIT(2)
+#define VDOAC_BNDM_MASK 0x3
+#define VDOAC_BAND_HEIGHT_8 0x0
+#define VDOAC_BAND_HEIGHT_16 0x1
+#define VDOAC_BAND_HEIGHT_32 0x2
+
+#define VDOASRR_START BIT(1)
+#define VDOASRR_SWRST BIT(0)
+
+#define VDOAIE_EITERR BIT(1)
+#define VDOAIE_EIEOT BIT(0)
+
+#define VDOAIST_TERR BIT(1)
+#define VDOAIST_EOT BIT(0)
+
+#define VDOAFP_FH_MASK (0x1fff << 16)
+#define VDOAFP_FW_MASK (0x3fff)
+
+#define VDOASL_VSLY_MASK (0x3fff << 16)
+#define VDOASL_ISLY_MASK (0x7fff)
+
+#define VDOASR_ERRW BIT(4)
+#define VDOASR_EOB BIT(3)
+#define VDOASR_CURRENT_FRAME (0x3 << 1)
+#define VDOASR_CURRENT_BUFFER BIT(1)
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+struct vdoa_data {
+ struct vdoa_ctx *curr_ctx;
+ struct device *dev;
+ struct clk *vdoa_clk;
+ void __iomem *regs;
+};
+
+struct vdoa_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int bytesperline;
+ unsigned int sizeimage;
+ u32 pixelformat;
+};
+
+struct vdoa_ctx {
+ struct vdoa_data *vdoa;
+ struct completion completion;
+ struct vdoa_q_data q_data[2];
+ unsigned int submitted_job;
+ unsigned int completed_job;
+};
+
+static irqreturn_t vdoa_irq_handler(int irq, void *data)
+{
+ struct vdoa_data *vdoa = data;
+ struct vdoa_ctx *curr_ctx;
+ u32 val;
+
+ /* Disable interrupts */
+ writel(0, vdoa->regs + VDOAIE);
+
+ curr_ctx = vdoa->curr_ctx;
+ if (!curr_ctx) {
+ dev_warn(vdoa->dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_HANDLED;
+ }
+
+ val = readl(vdoa->regs + VDOAIST);
+ writel(val, vdoa->regs + VDOAIST);
+ if (val & VDOAIST_TERR) {
+ val = readl(vdoa->regs + VDOASR) & VDOASR_ERRW;
+ dev_err(vdoa->dev, "AXI %s error\n", val ? "write" : "read");
+ } else if (!(val & VDOAIST_EOT)) {
+ dev_warn(vdoa->dev, "Spurious interrupt\n");
+ }
+ curr_ctx->completed_job++;
+ complete(&curr_ctx->completion);
+
+ return IRQ_HANDLED;
+}
+
+int vdoa_wait_for_completion(struct vdoa_ctx *ctx)
+{
+ struct vdoa_data *vdoa = ctx->vdoa;
+
+ if (ctx->submitted_job == ctx->completed_job)
+ return 0;
+
+ if (!wait_for_completion_timeout(&ctx->completion,
+ msecs_to_jiffies(300))) {
+ dev_err(vdoa->dev,
+ "Timeout waiting for transfer result\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vdoa_wait_for_completion);
+
+void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src)
+{
+ struct vdoa_q_data *src_q_data, *dst_q_data;
+ struct vdoa_data *vdoa = ctx->vdoa;
+ u32 val;
+
+ if (vdoa->curr_ctx)
+ vdoa_wait_for_completion(vdoa->curr_ctx);
+
+ vdoa->curr_ctx = ctx;
+
+ reinit_completion(&ctx->completion);
+ ctx->submitted_job++;
+
+ src_q_data = &ctx->q_data[V4L2_M2M_SRC];
+ dst_q_data = &ctx->q_data[V4L2_M2M_DST];
+
+ /* Progressive, no sync, 1 frame per run */
+ if (dst_q_data->pixelformat == V4L2_PIX_FMT_YUYV)
+ val = VDOAC_PFS;
+ else
+ val = 0;
+ writel(val, vdoa->regs + VDOAC);
+
+ writel(dst_q_data->height << 16 | dst_q_data->width,
+ vdoa->regs + VDOAFP);
+
+ val = dst;
+ writel(val, vdoa->regs + VDOAIEBA00);
+
+ writel(src_q_data->bytesperline << 16 | dst_q_data->bytesperline,
+ vdoa->regs + VDOASL);
+
+ if (dst_q_data->pixelformat == V4L2_PIX_FMT_NV12 ||
+ dst_q_data->pixelformat == V4L2_PIX_FMT_NV21)
+ val = dst_q_data->bytesperline * dst_q_data->height;
+ else
+ val = 0;
+ writel(val, vdoa->regs + VDOAIUBO);
+
+ val = src;
+ writel(val, vdoa->regs + VDOAVEBA0);
+ val = round_up(src_q_data->bytesperline * src_q_data->height, 4096);
+ writel(val, vdoa->regs + VDOAVUBO);
+
+ /* Enable interrupts and start transfer */
+ writel(VDOAIE_EITERR | VDOAIE_EIEOT, vdoa->regs + VDOAIE);
+ writel(VDOASRR_START, vdoa->regs + VDOASRR);
+}
+EXPORT_SYMBOL(vdoa_device_run);
+
+struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa)
+{
+ struct vdoa_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ err = clk_prepare_enable(vdoa->vdoa_clk);
+ if (err) {
+ kfree(ctx);
+ return NULL;
+ }
+
+ init_completion(&ctx->completion);
+ ctx->vdoa = vdoa;
+
+ return ctx;
+}
+EXPORT_SYMBOL(vdoa_context_create);
+
+void vdoa_context_destroy(struct vdoa_ctx *ctx)
+{
+ struct vdoa_data *vdoa = ctx->vdoa;
+
+ if (vdoa->curr_ctx == ctx) {
+ vdoa_wait_for_completion(vdoa->curr_ctx);
+ vdoa->curr_ctx = NULL;
+ }
+
+ clk_disable_unprepare(vdoa->vdoa_clk);
+ kfree(ctx);
+}
+EXPORT_SYMBOL(vdoa_context_destroy);
+
+int vdoa_context_configure(struct vdoa_ctx *ctx,
+ unsigned int width, unsigned int height,
+ u32 pixelformat)
+{
+ struct vdoa_q_data *src_q_data;
+ struct vdoa_q_data *dst_q_data;
+
+ if (width < 16 || width > 8192 || width % 16 != 0 ||
+ height < 16 || height > 4096 || height % 16 != 0)
+ return -EINVAL;
+
+ if (pixelformat != V4L2_PIX_FMT_YUYV &&
+ pixelformat != V4L2_PIX_FMT_NV12)
+ return -EINVAL;
+
+ /* If no context is passed, only check if the format is valid */
+ if (!ctx)
+ return 0;
+
+ src_q_data = &ctx->q_data[V4L2_M2M_SRC];
+ dst_q_data = &ctx->q_data[V4L2_M2M_DST];
+
+ src_q_data->width = width;
+ src_q_data->height = height;
+ src_q_data->bytesperline = width;
+ src_q_data->sizeimage =
+ round_up(src_q_data->bytesperline * height, 4096) +
+ src_q_data->bytesperline * height / 2;
+
+ dst_q_data->width = width;
+ dst_q_data->height = height;
+ dst_q_data->pixelformat = pixelformat;
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ dst_q_data->bytesperline = width * 2;
+ dst_q_data->sizeimage = dst_q_data->bytesperline * height;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ default:
+ dst_q_data->bytesperline = width;
+ dst_q_data->sizeimage =
+ dst_q_data->bytesperline * height * 3 / 2;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vdoa_context_configure);
+
+static int vdoa_probe(struct platform_device *pdev)
+{
+ struct vdoa_data *vdoa;
+ struct resource *res;
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "DMA enable failed\n");
+ return ret;
+ }
+
+ vdoa = devm_kzalloc(&pdev->dev, sizeof(*vdoa), GFP_KERNEL);
+ if (!vdoa)
+ return -ENOMEM;
+
+ vdoa->dev = &pdev->dev;
+
+ vdoa->vdoa_clk = devm_clk_get(vdoa->dev, NULL);
+ if (IS_ERR(vdoa->vdoa_clk)) {
+ dev_err(vdoa->dev, "Failed to get clock\n");
+ return PTR_ERR(vdoa->vdoa_clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vdoa->regs = devm_ioremap_resource(vdoa->dev, res);
+ if (IS_ERR(vdoa->regs))
+ return PTR_ERR(vdoa->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -EINVAL;
+ ret = devm_request_threaded_irq(&pdev->dev, res->start, NULL,
+ vdoa_irq_handler, IRQF_ONESHOT,
+ "vdoa", vdoa);
+ if (ret < 0) {
+ dev_err(vdoa->dev, "Failed to get irq\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, vdoa);
+
+ return 0;
+}
+
+static int vdoa_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id vdoa_dt_ids[] = {
+ { .compatible = "fsl,imx6q-vdoa" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vdoa_dt_ids);
+
+static struct platform_driver vdoa_driver = {
+ .probe = vdoa_probe,
+ .remove = vdoa_remove,
+ .driver = {
+ .name = VDOA_NAME,
+ .of_match_table = vdoa_dt_ids,
+ },
+};
+
+module_platform_driver(vdoa_driver);
+
+MODULE_DESCRIPTION("Video Data Order Adapter");
+MODULE_AUTHOR("Philipp Zabel <philipp.zabel@gmail.com>");
+MODULE_ALIAS("platform:imx-vdoa");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/coda/imx-vdoa.h b/drivers/media/platform/coda/imx-vdoa.h
new file mode 100644
index 000000000..967576b2a
--- /dev/null
+++ b/drivers/media/platform/coda/imx-vdoa.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IMX_VDOA_H
+#define IMX_VDOA_H
+
+struct vdoa_data;
+struct vdoa_ctx;
+
+#if (defined CONFIG_VIDEO_IMX_VDOA || defined CONFIG_VIDEO_IMX_VDOA_MODULE)
+
+struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa);
+int vdoa_context_configure(struct vdoa_ctx *ctx,
+ unsigned int width, unsigned int height,
+ u32 pixelformat);
+void vdoa_context_destroy(struct vdoa_ctx *ctx);
+
+void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src);
+int vdoa_wait_for_completion(struct vdoa_ctx *ctx);
+
+#else
+
+static inline struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa)
+{
+ return NULL;
+}
+
+static inline int vdoa_context_configure(struct vdoa_ctx *ctx,
+ unsigned int width,
+ unsigned int height,
+ u32 pixelformat)
+{
+ return 0;
+}
+
+static inline void vdoa_context_destroy(struct vdoa_ctx *ctx) { };
+
+static inline void vdoa_device_run(struct vdoa_ctx *ctx,
+ dma_addr_t dst, dma_addr_t src) { };
+
+static inline int vdoa_wait_for_completion(struct vdoa_ctx *ctx)
+{
+ return 0;
+};
+
+#endif
+
+#endif /* IMX_VDOA_H */
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
new file mode 100644
index 000000000..ca671e315
--- /dev/null
+++ b/drivers/media/platform/coda/trace.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM coda
+
+#if !defined(__CODA_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __CODA_TRACE_H__
+
+#include <linux/tracepoint.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "coda.h"
+
+TRACE_EVENT(coda_bit_run,
+ TP_PROTO(struct coda_ctx *ctx, int cmd),
+
+ TP_ARGS(ctx, cmd),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, ctx)
+ __field(int, cmd)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->ctx = ctx->idx;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("minor = %d, ctx = %d, cmd = %d",
+ __entry->minor, __entry->ctx, __entry->cmd)
+);
+
+TRACE_EVENT(coda_bit_done,
+ TP_PROTO(struct coda_ctx *ctx),
+
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, ctx = %d", __entry->minor, __entry->ctx)
+);
+
+DECLARE_EVENT_CLASS(coda_buf_class,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
+
+ TP_ARGS(ctx, buf),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, index)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->index = buf->vb2_buf.index;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, index = %d, ctx = %d",
+ __entry->minor, __entry->index, __entry->ctx)
+);
+
+DEFINE_EVENT(coda_buf_class, coda_enc_pic_run,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
+ TP_ARGS(ctx, buf)
+);
+
+DEFINE_EVENT(coda_buf_class, coda_enc_pic_done,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
+ TP_ARGS(ctx, buf)
+);
+
+DECLARE_EVENT_CLASS(coda_buf_meta_class,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
+ struct coda_buffer_meta *meta),
+
+ TP_ARGS(ctx, buf, meta),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, index)
+ __field(int, start)
+ __field(int, end)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->index = buf->vb2_buf.index;
+ __entry->start = meta->start;
+ __entry->end = meta->end;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, index = %d, start = 0x%x, end = 0x%x, ctx = %d",
+ __entry->minor, __entry->index, __entry->start, __entry->end,
+ __entry->ctx)
+);
+
+DEFINE_EVENT(coda_buf_meta_class, coda_bit_queue,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
+ struct coda_buffer_meta *meta),
+ TP_ARGS(ctx, buf, meta)
+);
+
+DECLARE_EVENT_CLASS(coda_meta_class,
+ TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
+
+ TP_ARGS(ctx, meta),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, start)
+ __field(int, end)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->start = meta ? meta->start : 0;
+ __entry->end = meta ? meta->end : 0;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, start = 0x%x, end = 0x%x, ctx = %d",
+ __entry->minor, __entry->start, __entry->end, __entry->ctx)
+);
+
+DEFINE_EVENT(coda_meta_class, coda_dec_pic_run,
+ TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
+ TP_ARGS(ctx, meta)
+);
+
+DEFINE_EVENT(coda_meta_class, coda_dec_pic_done,
+ TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
+ TP_ARGS(ctx, meta)
+);
+
+DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
+ struct coda_buffer_meta *meta),
+ TP_ARGS(ctx, buf, meta)
+);
+
+#endif /* __CODA_TRACE_H__ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/media/platform/cros-ec-cec/Makefile b/drivers/media/platform/cros-ec-cec/Makefile
new file mode 100644
index 000000000..9ce97f93f
--- /dev/null
+++ b/drivers/media/platform/cros-ec-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_CROS_EC_CEC) += cros-ec-cec.o
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
new file mode 100644
index 000000000..7bc4d8a9a
--- /dev/null
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CEC driver for ChromeOS Embedded Controller
+ *
+ * Copyright (c) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
+#include <linux/cec.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+
+#define DRV_NAME "cros-ec-cec"
+
+/**
+ * struct cros_ec_cec - Driver data for EC CEC
+ *
+ * @cros_ec: Pointer to EC device
+ * @notifier: Notifier info for responding to EC events
+ * @adap: CEC adapter
+ * @notify: CEC notifier pointer
+ * @rx_msg: storage for a received message
+ */
+struct cros_ec_cec {
+ struct cros_ec_device *cros_ec;
+ struct notifier_block notifier;
+ struct cec_adapter *adap;
+ struct cec_notifier *notify;
+ struct cec_msg rx_msg;
+};
+
+static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
+{
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ uint8_t *cec_message = cros_ec->event_data.data.cec_message;
+ unsigned int len = cros_ec->event_size;
+
+ cros_ec_cec->rx_msg.len = len;
+ memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
+
+ cec_received_msg(cros_ec_cec->adap, &cros_ec_cec->rx_msg);
+}
+
+static void handle_cec_event(struct cros_ec_cec *cros_ec_cec)
+{
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ uint32_t events = cros_ec->event_data.data.cec_events;
+
+ if (events & EC_MKBP_CEC_SEND_OK)
+ cec_transmit_attempt_done(cros_ec_cec->adap,
+ CEC_TX_STATUS_OK);
+
+ /* FW takes care of all retries, tell core to avoid more retries */
+ if (events & EC_MKBP_CEC_SEND_FAILED)
+ cec_transmit_attempt_done(cros_ec_cec->adap,
+ CEC_TX_STATUS_MAX_RETRIES |
+ CEC_TX_STATUS_NACK);
+}
+
+static int cros_ec_cec_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_cec *cros_ec_cec;
+ struct cros_ec_device *cros_ec;
+
+ cros_ec_cec = container_of(nb, struct cros_ec_cec, notifier);
+ cros_ec = cros_ec_cec->cros_ec;
+
+ if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_EVENT) {
+ handle_cec_event(cros_ec_cec);
+ return NOTIFY_OK;
+ }
+
+ if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_MESSAGE) {
+ handle_cec_message(cros_ec_cec);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int cros_ec_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_cec_set data;
+ } __packed msg = {};
+ int ret;
+
+ msg.msg.command = EC_CMD_CEC_SET;
+ msg.msg.outsize = sizeof(msg.data);
+ msg.data.cmd = CEC_CMD_LOGICAL_ADDRESS;
+ msg.data.val = logical_addr;
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error setting CEC logical address on EC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_cec_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *cec_msg)
+{
+ struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_cec_write data;
+ } __packed msg = {};
+ int ret;
+
+ msg.msg.command = EC_CMD_CEC_WRITE_MSG;
+ msg.msg.outsize = cec_msg->len;
+ memcpy(msg.data.msg, cec_msg->msg, cec_msg->len);
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error writing CEC msg on EC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_cec_set data;
+ } __packed msg = {};
+ int ret;
+
+ msg.msg.command = EC_CMD_CEC_SET;
+ msg.msg.outsize = sizeof(msg.data);
+ msg.data.cmd = CEC_CMD_ENABLE;
+ msg.data.val = enable;
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error %sabling CEC on EC: %d\n",
+ (enable ? "en" : "dis"), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct cec_adap_ops cros_ec_cec_ops = {
+ .adap_enable = cros_ec_cec_adap_enable,
+ .adap_log_addr = cros_ec_cec_set_log_addr,
+ .adap_transmit = cros_ec_cec_transmit,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_cec_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(cros_ec_cec->cros_ec->irq);
+
+ return 0;
+}
+
+static int cros_ec_cec_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(cros_ec_cec->cros_ec->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_cec_pm_ops,
+ cros_ec_cec_suspend, cros_ec_cec_resume);
+
+#if IS_ENABLED(CONFIG_PCI) && IS_ENABLED(CONFIG_DMI)
+
+/*
+ * The Firmware only handles a single CEC interface tied to a single HDMI
+ * connector we specify along with the DRM device name handling the HDMI output
+ */
+
+struct cec_dmi_match {
+ char *sys_vendor;
+ char *product_name;
+ char *devname;
+ char *conn;
+};
+
+static const struct cec_dmi_match cec_dmi_match_table[] = {
+ /* Google Fizz */
+ { "Google", "Fizz", "0000:00:02.0", "Port B" },
+};
+
+static int cros_ec_cec_get_notifier(struct device *dev,
+ struct cec_notifier **notify)
+{
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(cec_dmi_match_table) ; ++i) {
+ const struct cec_dmi_match *m = &cec_dmi_match_table[i];
+
+ if (dmi_match(DMI_SYS_VENDOR, m->sys_vendor) &&
+ dmi_match(DMI_PRODUCT_NAME, m->product_name)) {
+ struct device *d;
+
+ /* Find the device, bail out if not yet registered */
+ d = bus_find_device_by_name(&pci_bus_type, NULL,
+ m->devname);
+ if (!d)
+ return -EPROBE_DEFER;
+
+ *notify = cec_notifier_get_conn(d, m->conn);
+ return 0;
+ }
+ }
+
+ /* Hardware support must be added in the cec_dmi_match_table */
+ dev_warn(dev, "CEC notifier not configured for this hardware\n");
+
+ return -ENODEV;
+}
+
+#else
+
+static int cros_ec_cec_get_notifier(struct device *dev,
+ struct cec_notifier **notify)
+{
+ return -ENODEV;
+}
+
+#endif
+
+static int cros_ec_cec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct cros_ec_cec *cros_ec_cec;
+ int ret;
+
+ cros_ec_cec = devm_kzalloc(&pdev->dev, sizeof(*cros_ec_cec),
+ GFP_KERNEL);
+ if (!cros_ec_cec)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cros_ec_cec);
+ cros_ec_cec->cros_ec = cros_ec;
+
+ ret = cros_ec_cec_get_notifier(&pdev->dev, &cros_ec_cec->notify);
+ if (ret)
+ return ret;
+
+ ret = device_init_wakeup(&pdev->dev, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize wakeup\n");
+ return ret;
+ }
+
+ cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec,
+ DRV_NAME, CEC_CAP_DEFAULTS, 1);
+ if (IS_ERR(cros_ec_cec->adap))
+ return PTR_ERR(cros_ec_cec->adap);
+
+ /* Get CEC events from the EC. */
+ cros_ec_cec->notifier.notifier_call = cros_ec_cec_event;
+ ret = blocking_notifier_chain_register(&cros_ec->event_notifier,
+ &cros_ec_cec->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register notifier\n");
+ cec_delete_adapter(cros_ec_cec->adap);
+ return ret;
+ }
+
+ ret = cec_register_adapter(cros_ec_cec->adap, &pdev->dev);
+ if (ret < 0) {
+ cec_delete_adapter(cros_ec_cec->adap);
+ return ret;
+ }
+
+ cec_register_cec_notifier(cros_ec_cec->adap, cros_ec_cec->notify);
+
+ return 0;
+}
+
+static int cros_ec_cec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_cec *cros_ec_cec = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = blocking_notifier_chain_unregister(
+ &cros_ec_cec->cros_ec->event_notifier,
+ &cros_ec_cec->notifier);
+
+ if (ret) {
+ dev_err(dev, "failed to unregister notifier\n");
+ return ret;
+ }
+
+ cec_unregister_adapter(cros_ec_cec->adap);
+
+ if (cros_ec_cec->notify)
+ cec_notifier_put(cros_ec_cec->notify);
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_cec_driver = {
+ .probe = cros_ec_cec_probe,
+ .remove = cros_ec_cec_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_ec_cec_pm_ops,
+ },
+};
+
+module_platform_driver(cros_ec_cec_driver);
+
+MODULE_DESCRIPTION("CEC driver for ChromeOS ECs");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
new file mode 100644
index 000000000..06b5e581f
--- /dev/null
+++ b/drivers/media/platform/davinci/Kconfig
@@ -0,0 +1,91 @@
+config VIDEO_DAVINCI_VPIF_DISPLAY
+ tristate "TI DaVinci VPIF V4L2-Display driver"
+ depends on VIDEO_V4L2
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on I2C
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_ADV7343 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_THS7303 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Enables Davinci VPIF module used for display devices.
+ This module is used for display on TI DM6467/DA850/OMAPL138
+ SoCs.
+
+ To compile this driver as a module, choose M here. There will
+ be two modules called vpif.ko and vpif_display.ko
+
+config VIDEO_DAVINCI_VPIF_CAPTURE
+ tristate "TI DaVinci VPIF video capture driver"
+ depends on VIDEO_V4L2
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on I2C
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Enables Davinci VPIF module used for capture devices.
+ This module is used for capture on TI DM6467/DA850/OMAPL138
+ SoCs.
+
+ To compile this driver as a module, choose M here. There will
+ be two modules called vpif.ko and vpif_capture.ko
+
+config VIDEO_DM6446_CCDC
+ tristate "TI DM6446 CCDC video capture driver"
+ depends on VIDEO_V4L2
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on I2C
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces
+ with decoder modules such as TVP5146 over BT656 or
+ sensor module such as MT9T001 over a raw interface. This
+ module configures the interface and CCDC/ISIF to do
+ video frame capture from slave decoders.
+
+ To compile this driver as a module, choose M here. There will
+ be three modules called vpfe_capture.ko, vpss.ko and dm644x_ccdc.ko
+
+config VIDEO_DM355_CCDC
+ tristate "TI DM355 CCDC video capture driver"
+ depends on VIDEO_V4L2
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on I2C
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables DM355 CCD hw module. DM355 CCDC hw interfaces
+ with decoder modules such as TVP5146 over BT656 or
+ sensor module such as MT9T001 over a raw interface. This
+ module configures the interface and CCDC/ISIF to do
+ video frame capture from a slave decoders
+
+ To compile this driver as a module, choose M here. There will
+ be three modules called vpfe_capture.ko, vpss.ko and dm355_ccdc.ko
+
+config VIDEO_DM365_ISIF
+ tristate "TI DM365 ISIF video capture driver"
+ depends on VIDEO_V4L2
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on I2C
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables ISIF hw module. This is the hardware module for
+ configuring ISIF in VPFE to capture Raw Bayer RGB data from
+ a image sensor or YUV data from a YUV source.
+
+ To compile this driver as a module, choose M here. There will
+ be three modules called vpfe_capture.ko, vpss.ko and isif.ko
+
+config VIDEO_DAVINCI_VPBE_DISPLAY
+ tristate "TI DaVinci VPBE V4L2-Display driver"
+ depends on VIDEO_V4L2
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on I2C
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Enables Davinci VPBE module used for display devices.
+ This module is used for display on TI DM644x/DM365/DM355
+ based display devices.
+
+ To compile this driver as a module, choose M here. There will
+ be five modules created called vpss.ko, vpbe.ko, vpbe_osd.ko,
+ vpbe_venc.ko and vpbe_display.ko
diff --git a/drivers/media/platform/davinci/Makefile b/drivers/media/platform/davinci/Makefile
new file mode 100644
index 000000000..05c45bf37
--- /dev/null
+++ b/drivers/media/platform/davinci/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the davinci video device drivers.
+#
+
+#VPIF Display driver
+obj-$(CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY) += vpif.o vpif_display.o
+#VPIF Capture driver
+obj-$(CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE) += vpif.o vpif_capture.o
+
+# Capture: DM6446 and DM355
+obj-$(CONFIG_VIDEO_DM6446_CCDC) += vpfe_capture.o vpss.o dm644x_ccdc.o
+obj-$(CONFIG_VIDEO_DM355_CCDC) += vpfe_capture.o vpss.o dm355_ccdc.o
+obj-$(CONFIG_VIDEO_DM365_ISIF) += vpfe_capture.o vpss.o isif.o
+obj-$(CONFIG_VIDEO_DAVINCI_VPBE_DISPLAY) += vpss.o vpbe.o vpbe_osd.o \
+ vpbe_venc.o vpbe_display.o
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
new file mode 100644
index 000000000..3482178cb
--- /dev/null
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ccdc device API
+ */
+#ifndef _CCDC_HW_DEVICE_H
+#define _CCDC_HW_DEVICE_H
+
+#ifdef __KERNEL__
+#include <linux/videodev2.h>
+#include <linux/device.h>
+#include <media/davinci/vpfe_types.h>
+#include <media/davinci/ccdc_types.h>
+
+/*
+ * ccdc hw operations
+ */
+struct ccdc_hw_ops {
+ /* Pointer to initialize function to initialize ccdc device */
+ int (*open) (struct device *dev);
+ /* Pointer to deinitialize function */
+ int (*close) (struct device *dev);
+ /* set ccdc base address */
+ void (*set_ccdc_base)(void *base, int size);
+ /* Pointer to function to enable or disable ccdc */
+ void (*enable) (int en);
+ /* reset sbl. only for 6446 */
+ void (*reset) (void);
+ /* enable output to sdram */
+ void (*enable_out_to_sdram) (int en);
+ /* Pointer to function to set hw parameters */
+ int (*set_hw_if_params) (struct vpfe_hw_if_param *param);
+ /* get interface parameters */
+ int (*get_hw_if_params) (struct vpfe_hw_if_param *param);
+ /* Pointer to function to configure ccdc */
+ int (*configure) (void);
+
+ /* Pointer to function to set buffer type */
+ int (*set_buftype) (enum ccdc_buftype buf_type);
+ /* Pointer to function to get buffer type */
+ enum ccdc_buftype (*get_buftype) (void);
+ /* Pointer to function to set frame format */
+ int (*set_frame_format) (enum ccdc_frmfmt frm_fmt);
+ /* Pointer to function to get frame format */
+ enum ccdc_frmfmt (*get_frame_format) (void);
+ /* enumerate hw pix formats */
+ int (*enum_pix)(u32 *hw_pix, int i);
+ /* Pointer to function to set buffer type */
+ u32 (*get_pixel_format) (void);
+ /* Pointer to function to get pixel format. */
+ int (*set_pixel_format) (u32 pixfmt);
+ /* Pointer to function to set image window */
+ int (*set_image_window) (struct v4l2_rect *win);
+ /* Pointer to function to set image window */
+ void (*get_image_window) (struct v4l2_rect *win);
+ /* Pointer to function to get line length */
+ unsigned int (*get_line_length) (void);
+
+ /* Pointer to function to set frame buffer address */
+ void (*setfbaddr) (unsigned long addr);
+ /* Pointer to function to get field id */
+ int (*getfid) (void);
+};
+
+struct ccdc_hw_device {
+ /* ccdc device name */
+ char name[32];
+ /* module owner */
+ struct module *owner;
+ /* hw ops */
+ struct ccdc_hw_ops hw_ops;
+};
+
+/* Used by CCDC module to register & unregister with vpfe capture driver */
+int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev);
+void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev);
+
+#endif
+#endif
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
new file mode 100644
index 000000000..238d01b7f
--- /dev/null
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -0,0 +1,944 @@
+/*
+ * Copyright (C) 2005-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * CCDC hardware module for DM355
+ * ------------------------------
+ *
+ * This module is for configuring DM355 CCD controller of VPFE to capture
+ * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
+ * such as Defect Pixel Correction, Color Space Conversion etc to
+ * pre-process the Bayer RGB data, before writing it to SDRAM.
+ *
+ * TODO: 1) Raw bayer parameter settings and bayer capture
+ * 2) Split module parameter structure to module specific ioctl structs
+ * 3) add support for lense shading correction
+ * 4) investigate if enum used for user space type definition
+ * to be replaced by #defines or integer
+ */
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <media/davinci/dm355_ccdc.h>
+#include <media/davinci/vpss.h>
+
+#include "dm355_ccdc_regs.h"
+#include "ccdc_hw_device.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CCDC Driver for DM355");
+MODULE_AUTHOR("Texas Instruments");
+
+static struct ccdc_oper_config {
+ struct device *dev;
+ /* CCDC interface type */
+ enum vpfe_hw_if_type if_type;
+ /* Raw Bayer configuration */
+ struct ccdc_params_raw bayer;
+ /* YCbCr configuration */
+ struct ccdc_params_ycbcr ycbcr;
+ /* ccdc base address */
+ void __iomem *base_addr;
+} ccdc_cfg = {
+ /* Raw configurations */
+ .bayer = {
+ .pix_fmt = CCDC_PIXFMT_RAW,
+ .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
+ .win = CCDC_WIN_VGA,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .gain = {
+ .r_ye = 256,
+ .gb_g = 256,
+ .gr_cy = 256,
+ .b_mg = 256
+ },
+ .config_params = {
+ .datasft = 2,
+ .mfilt1 = CCDC_NO_MEDIAN_FILTER1,
+ .mfilt2 = CCDC_NO_MEDIAN_FILTER2,
+ .alaw = {
+ .gamma_wd = 2,
+ },
+ .blk_clamp = {
+ .sample_pixel = 1,
+ .dc_sub = 25
+ },
+ .col_pat_field0 = {
+ .olop = CCDC_GREEN_BLUE,
+ .olep = CCDC_BLUE,
+ .elop = CCDC_RED,
+ .elep = CCDC_GREEN_RED
+ },
+ .col_pat_field1 = {
+ .olop = CCDC_GREEN_BLUE,
+ .olep = CCDC_BLUE,
+ .elop = CCDC_RED,
+ .elep = CCDC_GREEN_RED
+ },
+ },
+ },
+ /* YCbCr configuration */
+ .ycbcr = {
+ .win = CCDC_WIN_PAL,
+ .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
+ .frm_fmt = CCDC_FRMFMT_INTERLACED,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .bt656_enable = 1,
+ .pix_order = CCDC_PIXORDER_CBYCRY,
+ .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
+ },
+};
+
+
+/* Raw Bayer formats */
+static u32 ccdc_raw_bayer_pix_formats[] =
+ {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
+
+/* Raw YUV formats */
+static u32 ccdc_raw_yuv_pix_formats[] =
+ {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+
+/* register access routines */
+static inline u32 regr(u32 offset)
+{
+ return __raw_readl(ccdc_cfg.base_addr + offset);
+}
+
+static inline void regw(u32 val, u32 offset)
+{
+ __raw_writel(val, ccdc_cfg.base_addr + offset);
+}
+
+static void ccdc_enable(int en)
+{
+ unsigned int temp;
+ temp = regr(SYNCEN);
+ temp &= (~CCDC_SYNCEN_VDHDEN_MASK);
+ temp |= (en & CCDC_SYNCEN_VDHDEN_MASK);
+ regw(temp, SYNCEN);
+}
+
+static void ccdc_enable_output_to_sdram(int en)
+{
+ unsigned int temp;
+ temp = regr(SYNCEN);
+ temp &= (~(CCDC_SYNCEN_WEN_MASK));
+ temp |= ((en << CCDC_SYNCEN_WEN_SHIFT) & CCDC_SYNCEN_WEN_MASK);
+ regw(temp, SYNCEN);
+}
+
+static void ccdc_config_gain_offset(void)
+{
+ /* configure gain */
+ regw(ccdc_cfg.bayer.gain.r_ye, RYEGAIN);
+ regw(ccdc_cfg.bayer.gain.gr_cy, GRCYGAIN);
+ regw(ccdc_cfg.bayer.gain.gb_g, GBGGAIN);
+ regw(ccdc_cfg.bayer.gain.b_mg, BMGGAIN);
+ /* configure offset */
+ regw(ccdc_cfg.bayer.ccdc_offset, OFFSET);
+}
+
+/*
+ * ccdc_restore_defaults()
+ * This function restore power on defaults in the ccdc registers
+ */
+static int ccdc_restore_defaults(void)
+{
+ int i;
+
+ dev_dbg(ccdc_cfg.dev, "\nstarting ccdc_restore_defaults...");
+ /* set all registers to zero */
+ for (i = 0; i <= CCDC_REG_LAST; i += 4)
+ regw(0, i);
+
+ /* now override the values with power on defaults in registers */
+ regw(MODESET_DEFAULT, MODESET);
+ /* no culling support */
+ regw(CULH_DEFAULT, CULH);
+ regw(CULV_DEFAULT, CULV);
+ /* Set default Gain and Offset */
+ ccdc_cfg.bayer.gain.r_ye = GAIN_DEFAULT;
+ ccdc_cfg.bayer.gain.gb_g = GAIN_DEFAULT;
+ ccdc_cfg.bayer.gain.gr_cy = GAIN_DEFAULT;
+ ccdc_cfg.bayer.gain.b_mg = GAIN_DEFAULT;
+ ccdc_config_gain_offset();
+ regw(OUTCLIP_DEFAULT, OUTCLIP);
+ regw(LSCCFG2_DEFAULT, LSCCFG2);
+ /* select ccdc input */
+ if (vpss_select_ccdc_source(VPSS_CCDCIN)) {
+ dev_dbg(ccdc_cfg.dev, "\ncouldn't select ccdc input source");
+ return -EFAULT;
+ }
+ /* select ccdc clock */
+ if (vpss_enable_clock(VPSS_CCDC_CLOCK, 1) < 0) {
+ dev_dbg(ccdc_cfg.dev, "\ncouldn't enable ccdc clock");
+ return -EFAULT;
+ }
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_restore_defaults...");
+ return 0;
+}
+
+static int ccdc_open(struct device *device)
+{
+ return ccdc_restore_defaults();
+}
+
+static int ccdc_close(struct device *device)
+{
+ /* disable clock */
+ vpss_enable_clock(VPSS_CCDC_CLOCK, 0);
+ /* do nothing for now */
+ return 0;
+}
+/*
+ * ccdc_setwin()
+ * This function will configure the window size to
+ * be capture in CCDC reg.
+ */
+static void ccdc_setwin(struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt, int ppc)
+{
+ int horz_start, horz_nr_pixels;
+ int vert_start, vert_nr_lines;
+ int mid_img = 0;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
+
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left << (ppc - 1);
+ horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
+
+ /* Writing the horizontal info into the registers */
+ regw(horz_start, SPH);
+ regw(horz_nr_pixels, NPH);
+ vert_start = image_win->top;
+
+ if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ /* configure VDINT0 and VDINT1 */
+ regw(vert_start, VDINT0);
+ } else {
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /* configure VDINT0 and VDINT1 */
+ mid_img = vert_start + (image_win->height / 2);
+ regw(vert_start, VDINT0);
+ regw(mid_img, VDINT1);
+ }
+ regw(vert_start & CCDC_START_VER_ONE_MASK, SLV0);
+ regw(vert_start & CCDC_START_VER_TWO_MASK, SLV1);
+ regw(vert_nr_lines & CCDC_NUM_LINES_VER, NLV);
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
+}
+
+/* This function will configure CCDC for YCbCr video capture */
+static void ccdc_config_ycbcr(void)
+{
+ struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
+ u32 temp;
+
+ /* first set the CCDC power on defaults values in all registers */
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
+ ccdc_restore_defaults();
+
+ /* configure pixel format & video frame format */
+ temp = (((params->pix_fmt & CCDC_INPUT_MODE_MASK) <<
+ CCDC_INPUT_MODE_SHIFT) |
+ ((params->frm_fmt & CCDC_FRM_FMT_MASK) <<
+ CCDC_FRM_FMT_SHIFT));
+
+ /* setup BT.656 sync mode */
+ if (params->bt656_enable) {
+ regw(CCDC_REC656IF_BT656_EN, REC656IF);
+ /*
+ * configure the FID, VD, HD pin polarity fld,hd pol positive,
+ * vd negative, 8-bit pack mode
+ */
+ temp |= CCDC_VD_POL_NEGATIVE;
+ } else { /* y/c external sync mode */
+ temp |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
+ CCDC_FID_POL_SHIFT) |
+ ((params->hd_pol & CCDC_HD_POL_MASK) <<
+ CCDC_HD_POL_SHIFT) |
+ ((params->vd_pol & CCDC_VD_POL_MASK) <<
+ CCDC_VD_POL_SHIFT));
+ }
+
+ /* pack the data to 8-bit */
+ temp |= CCDC_DATA_PACK_ENABLE;
+
+ regw(temp, MODESET);
+
+ /* configure video window */
+ ccdc_setwin(&params->win, params->frm_fmt, 2);
+
+ /* configure the order of y cb cr in SD-RAM */
+ temp = (params->pix_order << CCDC_Y8POS_SHIFT);
+ temp |= CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC;
+ regw(temp, CCDCFG);
+
+ /*
+ * configure the horizontal line offset. This is done by rounding up
+ * width to a multiple of 16 pixels and multiply by two to account for
+ * y:cb:cr 4:2:2 data
+ */
+ regw(((params->win.width * 2 + 31) >> 5), HSIZE);
+
+ /* configure the memory line offset */
+ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
+ /* two fields are interleaved in memory */
+ regw(CCDC_SDOFST_FIELD_INTERLEAVED, SDOFST);
+ }
+
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
+}
+
+/*
+ * ccdc_config_black_clamp()
+ * configure parameters for Optical Black Clamp
+ */
+static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
+{
+ u32 val;
+
+ if (!bclamp->b_clamp_enable) {
+ /* configure DCSub */
+ regw(bclamp->dc_sub & CCDC_BLK_DC_SUB_MASK, DCSUB);
+ regw(0x0000, CLAMP);
+ return;
+ }
+ /* Enable the Black clamping, set sample lines and pixels */
+ val = (bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) |
+ ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
+ CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE;
+ regw(val, CLAMP);
+
+ /* If Black clamping is enable then make dcsub 0 */
+ val = (bclamp->sample_ln & CCDC_NUM_LINE_CALC_MASK)
+ << CCDC_NUM_LINE_CALC_SHIFT;
+ regw(val, DCSUB);
+}
+
+/*
+ * ccdc_config_black_compense()
+ * configure parameters for Black Compensation
+ */
+static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
+{
+ u32 val;
+
+ val = (bcomp->b & CCDC_BLK_COMP_MASK) |
+ ((bcomp->gb & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_GB_COMP_SHIFT);
+ regw(val, BLKCMP1);
+
+ val = ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_GR_COMP_SHIFT) |
+ ((bcomp->r & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_R_COMP_SHIFT);
+ regw(val, BLKCMP0);
+}
+
+/*
+ * ccdc_write_dfc_entry()
+ * write an entry in the dfc table.
+ */
+static int ccdc_write_dfc_entry(int index, struct ccdc_vertical_dft *dfc)
+{
+/* TODO This is to be re-visited and adjusted */
+#define DFC_WRITE_WAIT_COUNT 1000
+ u32 val, count = DFC_WRITE_WAIT_COUNT;
+
+ regw(dfc->dft_corr_vert[index], DFCMEM0);
+ regw(dfc->dft_corr_horz[index], DFCMEM1);
+ regw(dfc->dft_corr_sub1[index], DFCMEM2);
+ regw(dfc->dft_corr_sub2[index], DFCMEM3);
+ regw(dfc->dft_corr_sub3[index], DFCMEM4);
+ /* set WR bit to write */
+ val = regr(DFCMEMCTL) | CCDC_DFCMEMCTL_DFCMWR_MASK;
+ regw(val, DFCMEMCTL);
+
+ /*
+ * Assume, it is very short. If we get an error, we need to
+ * adjust this value
+ */
+ while (regr(DFCMEMCTL) & CCDC_DFCMEMCTL_DFCMWR_MASK)
+ count--;
+ /*
+ * TODO We expect the count to be non-zero to be successful. Adjust
+ * the count if write requires more time
+ */
+
+ if (count) {
+ dev_err(ccdc_cfg.dev, "defect table write timeout !!!\n");
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * ccdc_config_vdfc()
+ * configure parameters for Vertical Defect Correction
+ */
+static int ccdc_config_vdfc(struct ccdc_vertical_dft *dfc)
+{
+ u32 val;
+ int i;
+
+ /* Configure General Defect Correction. The table used is from IPIPE */
+ val = dfc->gen_dft_en & CCDC_DFCCTL_GDFCEN_MASK;
+
+ /* Configure Vertical Defect Correction if needed */
+ if (!dfc->ver_dft_en) {
+ /* Enable only General Defect Correction */
+ regw(val, DFCCTL);
+ return 0;
+ }
+
+ if (dfc->table_size > CCDC_DFT_TABLE_SIZE)
+ return -EINVAL;
+
+ val |= CCDC_DFCCTL_VDFC_DISABLE;
+ val |= (dfc->dft_corr_ctl.vdfcsl & CCDC_DFCCTL_VDFCSL_MASK) <<
+ CCDC_DFCCTL_VDFCSL_SHIFT;
+ val |= (dfc->dft_corr_ctl.vdfcuda & CCDC_DFCCTL_VDFCUDA_MASK) <<
+ CCDC_DFCCTL_VDFCUDA_SHIFT;
+ val |= (dfc->dft_corr_ctl.vdflsft & CCDC_DFCCTL_VDFLSFT_MASK) <<
+ CCDC_DFCCTL_VDFLSFT_SHIFT;
+ regw(val , DFCCTL);
+
+ /* clear address ptr to offset 0 */
+ val = CCDC_DFCMEMCTL_DFCMARST_MASK << CCDC_DFCMEMCTL_DFCMARST_SHIFT;
+
+ /* write defect table entries */
+ for (i = 0; i < dfc->table_size; i++) {
+ /* increment address for non zero index */
+ if (i != 0)
+ val = CCDC_DFCMEMCTL_INC_ADDR;
+ regw(val, DFCMEMCTL);
+ if (ccdc_write_dfc_entry(i, dfc) < 0)
+ return -EFAULT;
+ }
+
+ /* update saturation level and enable dfc */
+ regw(dfc->saturation_ctl & CCDC_VDC_DFCVSAT_MASK, DFCVSAT);
+ val = regr(DFCCTL) | (CCDC_DFCCTL_VDFCEN_MASK <<
+ CCDC_DFCCTL_VDFCEN_SHIFT);
+ regw(val, DFCCTL);
+ return 0;
+}
+
+/*
+ * ccdc_config_csc()
+ * configure parameters for color space conversion
+ * Each register CSCM0-7 has two values in S8Q5 format.
+ */
+static void ccdc_config_csc(struct ccdc_csc *csc)
+{
+ u32 val1 = 0, val2;
+ int i;
+
+ if (!csc->enable)
+ return;
+
+ /* Enable the CSC sub-module */
+ regw(CCDC_CSC_ENABLE, CSCCTL);
+
+ /* Converting the co-eff as per the format of the register */
+ for (i = 0; i < CCDC_CSC_COEFF_TABLE_SIZE; i++) {
+ if ((i % 2) == 0) {
+ /* CSCM - LSB */
+ val1 = (csc->coeff[i].integer &
+ CCDC_CSC_COEF_INTEG_MASK)
+ << CCDC_CSC_COEF_INTEG_SHIFT;
+ /*
+ * convert decimal part to binary. Use 2 decimal
+ * precision, user values range from .00 - 0.99
+ */
+ val1 |= (((csc->coeff[i].decimal &
+ CCDC_CSC_COEF_DECIMAL_MASK) *
+ CCDC_CSC_DEC_MAX) / 100);
+ } else {
+
+ /* CSCM - MSB */
+ val2 = (csc->coeff[i].integer &
+ CCDC_CSC_COEF_INTEG_MASK)
+ << CCDC_CSC_COEF_INTEG_SHIFT;
+ val2 |= (((csc->coeff[i].decimal &
+ CCDC_CSC_COEF_DECIMAL_MASK) *
+ CCDC_CSC_DEC_MAX) / 100);
+ val2 <<= CCDC_CSCM_MSB_SHIFT;
+ val2 |= val1;
+ regw(val2, (CSCM0 + ((i - 1) << 1)));
+ }
+ }
+}
+
+/*
+ * ccdc_config_color_patterns()
+ * configure parameters for color patterns
+ */
+static void ccdc_config_color_patterns(struct ccdc_col_pat *pat0,
+ struct ccdc_col_pat *pat1)
+{
+ u32 val;
+
+ val = (pat0->olop | (pat0->olep << 2) | (pat0->elop << 4) |
+ (pat0->elep << 6) | (pat1->olop << 8) | (pat1->olep << 10) |
+ (pat1->elop << 12) | (pat1->elep << 14));
+ regw(val, COLPTN);
+}
+
+/* This function will configure CCDC for Raw mode image capture */
+static int ccdc_config_raw(void)
+{
+ struct ccdc_params_raw *params = &ccdc_cfg.bayer;
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int val;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
+
+ /* restore power on defaults to register */
+ ccdc_restore_defaults();
+
+ /* CCDCFG register:
+ * set CCD Not to swap input since input is RAW data
+ * set FID detection function to Latch at V-Sync
+ * set WENLOG - ccdc valid area to AND
+ * set TRGSEL to WENBIT
+ * set EXTRG to DISABLE
+ * disable latching function on VSYNC - shadowed registers
+ */
+ regw(CCDC_YCINSWP_RAW | CCDC_CCDCFG_FIDMD_LATCH_VSYNC |
+ CCDC_CCDCFG_WENLOG_AND | CCDC_CCDCFG_TRGSEL_WEN |
+ CCDC_CCDCFG_EXTRG_DISABLE | CCDC_LATCH_ON_VSYNC_DISABLE, CCDCFG);
+
+ /*
+ * Set VDHD direction to input, input type to raw input
+ * normal data polarity, do not use external WEN
+ */
+ val = (CCDC_VDHDOUT_INPUT | CCDC_RAW_IP_MODE | CCDC_DATAPOL_NORMAL |
+ CCDC_EXWEN_DISABLE);
+
+ /*
+ * Configure the vertical sync polarity (MODESET.VDPOL), horizontal
+ * sync polarity (MODESET.HDPOL), field id polarity (MODESET.FLDPOL),
+ * frame format(progressive or interlace), & pixel format (Input mode)
+ */
+ val |= (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
+ ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
+ ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
+ ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
+ ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT));
+
+ /* set pack for alaw compression */
+ if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+ config_params->alaw.enable)
+ val |= CCDC_DATA_PACK_ENABLE;
+
+ /* Configure for LPF */
+ if (config_params->lpf_enable)
+ val |= (config_params->lpf_enable & CCDC_LPF_MASK) <<
+ CCDC_LPF_SHIFT;
+
+ /* Configure the data shift */
+ val |= (config_params->datasft & CCDC_DATASFT_MASK) <<
+ CCDC_DATASFT_SHIFT;
+ regw(val , MODESET);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to MODESET...\n", val);
+
+ /* Configure the Median Filter threshold */
+ regw((config_params->med_filt_thres) & CCDC_MED_FILT_THRESH, MEDFILT);
+
+ /* Configure GAMMAWD register. defaur 11-2, and Mosaic cfa pattern */
+ val = CCDC_GAMMA_BITS_11_2 << CCDC_GAMMAWD_INPUT_SHIFT |
+ CCDC_CFA_MOSAIC;
+
+ /* Enable and configure aLaw register if needed */
+ if (config_params->alaw.enable) {
+ val |= (CCDC_ALAW_ENABLE |
+ ((config_params->alaw.gamma_wd &
+ CCDC_ALAW_GAMMA_WD_MASK) <<
+ CCDC_GAMMAWD_INPUT_SHIFT));
+ }
+
+ /* Configure Median filter1 & filter2 */
+ val |= ((config_params->mfilt1 << CCDC_MFILT1_SHIFT) |
+ (config_params->mfilt2 << CCDC_MFILT2_SHIFT));
+
+ regw(val, GAMMAWD);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to GAMMAWD...\n", val);
+
+ /* configure video window */
+ ccdc_setwin(&params->win, params->frm_fmt, 1);
+
+ /* Optical Clamp Averaging */
+ ccdc_config_black_clamp(&config_params->blk_clamp);
+
+ /* Black level compensation */
+ ccdc_config_black_compense(&config_params->blk_comp);
+
+ /* Vertical Defect Correction if needed */
+ if (ccdc_config_vdfc(&config_params->vertical_dft) < 0)
+ return -EFAULT;
+
+ /* color space conversion */
+ ccdc_config_csc(&config_params->csc);
+
+ /* color pattern */
+ ccdc_config_color_patterns(&config_params->col_pat_field0,
+ &config_params->col_pat_field1);
+
+ /* Configure the Gain & offset control */
+ ccdc_config_gain_offset();
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to COLPTN...\n", val);
+
+ /* Configure DATAOFST register */
+ val = (config_params->data_offset.horz_offset & CCDC_DATAOFST_MASK) <<
+ CCDC_DATAOFST_H_SHIFT;
+ val |= (config_params->data_offset.vert_offset & CCDC_DATAOFST_MASK) <<
+ CCDC_DATAOFST_V_SHIFT;
+ regw(val, DATAOFST);
+
+ /* configuring HSIZE register */
+ val = (params->horz_flip_enable & CCDC_HSIZE_FLIP_MASK) <<
+ CCDC_HSIZE_FLIP_SHIFT;
+
+ /* If pack 8 is enable then 1 pixel will take 1 byte */
+ if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+ config_params->alaw.enable) {
+ val |= (((params->win.width) + 31) >> 5) &
+ CCDC_HSIZE_VAL_MASK;
+
+ /* adjust to multiple of 32 */
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
+ (((params->win.width) + 31) >> 5) &
+ CCDC_HSIZE_VAL_MASK);
+ } else {
+ /* else one pixel will take 2 byte */
+ val |= (((params->win.width * 2) + 31) >> 5) &
+ CCDC_HSIZE_VAL_MASK;
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
+ (((params->win.width * 2) + 31) >> 5) &
+ CCDC_HSIZE_VAL_MASK);
+ }
+ regw(val, HSIZE);
+
+ /* Configure SDOFST register */
+ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (params->image_invert_enable) {
+ /* For interlace inverse mode */
+ regw(CCDC_SDOFST_INTERLACE_INVERSE, SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+ CCDC_SDOFST_INTERLACE_INVERSE);
+ } else {
+ /* For interlace non inverse mode */
+ regw(CCDC_SDOFST_INTERLACE_NORMAL, SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+ CCDC_SDOFST_INTERLACE_NORMAL);
+ }
+ } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ if (params->image_invert_enable) {
+ /* For progessive inverse mode */
+ regw(CCDC_SDOFST_PROGRESSIVE_INVERSE, SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+ CCDC_SDOFST_PROGRESSIVE_INVERSE);
+ } else {
+ /* For progessive non inverse mode */
+ regw(CCDC_SDOFST_PROGRESSIVE_NORMAL, SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+ CCDC_SDOFST_PROGRESSIVE_NORMAL);
+ }
+ }
+ dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
+ return 0;
+}
+
+static int ccdc_configure(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_config_raw();
+ else
+ ccdc_config_ycbcr();
+ return 0;
+}
+
+static int ccdc_set_buftype(enum ccdc_buftype buf_type)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.buf_type = buf_type;
+ else
+ ccdc_cfg.ycbcr.buf_type = buf_type;
+ return 0;
+}
+static enum ccdc_buftype ccdc_get_buftype(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_cfg.bayer.buf_type;
+ return ccdc_cfg.ycbcr.buf_type;
+}
+
+static int ccdc_enum_pix(u32 *pix, int i)
+{
+ int ret = -EINVAL;
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
+ *pix = ccdc_raw_bayer_pix_formats[i];
+ ret = 0;
+ }
+ } else {
+ if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
+ *pix = ccdc_raw_yuv_pix_formats[i];
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+static int ccdc_set_pixel_format(u32 pixfmt)
+{
+ struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ if (pixfmt == V4L2_PIX_FMT_SBGGR8)
+ alaw->enable = 1;
+ else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
+ return -EINVAL;
+ } else {
+ if (pixfmt == V4L2_PIX_FMT_YUYV)
+ ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+ else if (pixfmt == V4L2_PIX_FMT_UYVY)
+ ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+static u32 ccdc_get_pixel_format(void)
+{
+ struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
+ u32 pixfmt;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ if (alaw->enable)
+ pixfmt = V4L2_PIX_FMT_SBGGR8;
+ else
+ pixfmt = V4L2_PIX_FMT_SBGGR16;
+ else {
+ if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ else
+ pixfmt = V4L2_PIX_FMT_UYVY;
+ }
+ return pixfmt;
+}
+static int ccdc_set_image_window(struct v4l2_rect *win)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.win = *win;
+ else
+ ccdc_cfg.ycbcr.win = *win;
+ return 0;
+}
+
+static void ccdc_get_image_window(struct v4l2_rect *win)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ *win = ccdc_cfg.bayer.win;
+ else
+ *win = ccdc_cfg.ycbcr.win;
+}
+
+static unsigned int ccdc_get_line_length(void)
+{
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int len;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ if ((config_params->alaw.enable) ||
+ (config_params->data_sz == CCDC_DATA_8BITS))
+ len = ccdc_cfg.bayer.win.width;
+ else
+ len = ccdc_cfg.bayer.win.width * 2;
+ } else
+ len = ccdc_cfg.ycbcr.win.width * 2;
+ return ALIGN(len, 32);
+}
+
+static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
+ return 0;
+}
+
+static enum ccdc_frmfmt ccdc_get_frame_format(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_cfg.bayer.frm_fmt;
+ else
+ return ccdc_cfg.ycbcr.frm_fmt;
+}
+
+static int ccdc_getfid(void)
+{
+ return (regr(MODESET) >> 15) & 1;
+}
+
+/* misc operations */
+static inline void ccdc_setfbaddr(unsigned long addr)
+{
+ regw((addr >> 21) & 0x007f, STADRH);
+ regw((addr >> 5) & 0x0ffff, STADRL);
+}
+
+static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
+{
+ ccdc_cfg.if_type = params->if_type;
+
+ switch (params->if_type) {
+ case VPFE_BT656:
+ case VPFE_YCBCR_SYNC_16:
+ case VPFE_YCBCR_SYNC_8:
+ ccdc_cfg.ycbcr.vd_pol = params->vdpol;
+ ccdc_cfg.ycbcr.hd_pol = params->hdpol;
+ break;
+ default:
+ /* TODO add support for raw bayer here */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct ccdc_hw_device ccdc_hw_dev = {
+ .name = "DM355 CCDC",
+ .owner = THIS_MODULE,
+ .hw_ops = {
+ .open = ccdc_open,
+ .close = ccdc_close,
+ .enable = ccdc_enable,
+ .enable_out_to_sdram = ccdc_enable_output_to_sdram,
+ .set_hw_if_params = ccdc_set_hw_if_params,
+ .configure = ccdc_configure,
+ .set_buftype = ccdc_set_buftype,
+ .get_buftype = ccdc_get_buftype,
+ .enum_pix = ccdc_enum_pix,
+ .set_pixel_format = ccdc_set_pixel_format,
+ .get_pixel_format = ccdc_get_pixel_format,
+ .set_frame_format = ccdc_set_frame_format,
+ .get_frame_format = ccdc_get_frame_format,
+ .set_image_window = ccdc_set_image_window,
+ .get_image_window = ccdc_get_image_window,
+ .get_line_length = ccdc_get_line_length,
+ .setfbaddr = ccdc_setfbaddr,
+ .getfid = ccdc_getfid,
+ },
+};
+
+static int dm355_ccdc_probe(struct platform_device *pdev)
+{
+ void (*setup_pinmux)(void);
+ struct resource *res;
+ int status = 0;
+
+ /*
+ * first try to register with vpfe. If not correct platform, then we
+ * don't have to iomap
+ */
+ status = vpfe_register_ccdc_device(&ccdc_hw_dev);
+ if (status < 0)
+ return status;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ status = -ENODEV;
+ goto fail_nores;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), res->name);
+ if (!res) {
+ status = -EBUSY;
+ goto fail_nores;
+ }
+
+ ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ if (!ccdc_cfg.base_addr) {
+ status = -ENOMEM;
+ goto fail_nomem;
+ }
+
+ /* Platform data holds setup_pinmux function ptr */
+ if (NULL == pdev->dev.platform_data) {
+ status = -ENODEV;
+ goto fail_nomap;
+ }
+ setup_pinmux = pdev->dev.platform_data;
+ /*
+ * setup Mux configuration for ccdc which may be different for
+ * different SoCs using this CCDC
+ */
+ setup_pinmux();
+ ccdc_cfg.dev = &pdev->dev;
+ printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
+ return 0;
+fail_nomap:
+ iounmap(ccdc_cfg.base_addr);
+fail_nomem:
+ release_mem_region(res->start, resource_size(res));
+fail_nores:
+ vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+ return status;
+}
+
+static int dm355_ccdc_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ iounmap(ccdc_cfg.base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+ return 0;
+}
+
+static struct platform_driver dm355_ccdc_driver = {
+ .driver = {
+ .name = "dm355_ccdc",
+ },
+ .remove = dm355_ccdc_remove,
+ .probe = dm355_ccdc_probe,
+};
+
+module_platform_driver(dm355_ccdc_driver);
diff --git a/drivers/media/platform/davinci/dm355_ccdc_regs.h b/drivers/media/platform/davinci/dm355_ccdc_regs.h
new file mode 100644
index 000000000..20ba39076
--- /dev/null
+++ b/drivers/media/platform/davinci/dm355_ccdc_regs.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2005-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DM355_CCDC_REGS_H
+#define _DM355_CCDC_REGS_H
+
+/**************************************************************************\
+* Register OFFSET Definitions
+\**************************************************************************/
+#define SYNCEN 0x00
+#define MODESET 0x04
+#define HDWIDTH 0x08
+#define VDWIDTH 0x0c
+#define PPLN 0x10
+#define LPFR 0x14
+#define SPH 0x18
+#define NPH 0x1c
+#define SLV0 0x20
+#define SLV1 0x24
+#define NLV 0x28
+#define CULH 0x2c
+#define CULV 0x30
+#define HSIZE 0x34
+#define SDOFST 0x38
+#define STADRH 0x3c
+#define STADRL 0x40
+#define CLAMP 0x44
+#define DCSUB 0x48
+#define COLPTN 0x4c
+#define BLKCMP0 0x50
+#define BLKCMP1 0x54
+#define MEDFILT 0x58
+#define RYEGAIN 0x5c
+#define GRCYGAIN 0x60
+#define GBGGAIN 0x64
+#define BMGGAIN 0x68
+#define OFFSET 0x6c
+#define OUTCLIP 0x70
+#define VDINT0 0x74
+#define VDINT1 0x78
+#define RSV0 0x7c
+#define GAMMAWD 0x80
+#define REC656IF 0x84
+#define CCDCFG 0x88
+#define FMTCFG 0x8c
+#define FMTPLEN 0x90
+#define FMTSPH 0x94
+#define FMTLNH 0x98
+#define FMTSLV 0x9c
+#define FMTLNV 0xa0
+#define FMTRLEN 0xa4
+#define FMTHCNT 0xa8
+#define FMT_ADDR_PTR_B 0xac
+#define FMT_ADDR_PTR(i) (FMT_ADDR_PTR_B + (i * 4))
+#define FMTPGM_VF0 0xcc
+#define FMTPGM_VF1 0xd0
+#define FMTPGM_AP0 0xd4
+#define FMTPGM_AP1 0xd8
+#define FMTPGM_AP2 0xdc
+#define FMTPGM_AP3 0xe0
+#define FMTPGM_AP4 0xe4
+#define FMTPGM_AP5 0xe8
+#define FMTPGM_AP6 0xec
+#define FMTPGM_AP7 0xf0
+#define LSCCFG1 0xf4
+#define LSCCFG2 0xf8
+#define LSCH0 0xfc
+#define LSCV0 0x100
+#define LSCKH 0x104
+#define LSCKV 0x108
+#define LSCMEMCTL 0x10c
+#define LSCMEMD 0x110
+#define LSCMEMQ 0x114
+#define DFCCTL 0x118
+#define DFCVSAT 0x11c
+#define DFCMEMCTL 0x120
+#define DFCMEM0 0x124
+#define DFCMEM1 0x128
+#define DFCMEM2 0x12c
+#define DFCMEM3 0x130
+#define DFCMEM4 0x134
+#define CSCCTL 0x138
+#define CSCM0 0x13c
+#define CSCM1 0x140
+#define CSCM2 0x144
+#define CSCM3 0x148
+#define CSCM4 0x14c
+#define CSCM5 0x150
+#define CSCM6 0x154
+#define CSCM7 0x158
+#define DATAOFST 0x15c
+#define CCDC_REG_LAST DATAOFST
+/**************************************************************
+* Define for various register bit mask and shifts for CCDC
+*
+**************************************************************/
+#define CCDC_RAW_IP_MODE 0
+#define CCDC_VDHDOUT_INPUT 0
+#define CCDC_YCINSWP_RAW (0 << 4)
+#define CCDC_EXWEN_DISABLE 0
+#define CCDC_DATAPOL_NORMAL 0
+#define CCDC_CCDCFG_FIDMD_LATCH_VSYNC 0
+#define CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC (1 << 6)
+#define CCDC_CCDCFG_WENLOG_AND 0
+#define CCDC_CCDCFG_TRGSEL_WEN 0
+#define CCDC_CCDCFG_EXTRG_DISABLE 0
+#define CCDC_CFA_MOSAIC 0
+#define CCDC_Y8POS_SHIFT 11
+
+#define CCDC_VDC_DFCVSAT_MASK 0x3fff
+#define CCDC_DATAOFST_MASK 0x0ff
+#define CCDC_DATAOFST_H_SHIFT 0
+#define CCDC_DATAOFST_V_SHIFT 8
+#define CCDC_GAMMAWD_CFA_MASK 1
+#define CCDC_GAMMAWD_CFA_SHIFT 5
+#define CCDC_GAMMAWD_INPUT_SHIFT 2
+#define CCDC_FID_POL_MASK 1
+#define CCDC_FID_POL_SHIFT 4
+#define CCDC_HD_POL_MASK 1
+#define CCDC_HD_POL_SHIFT 3
+#define CCDC_VD_POL_MASK 1
+#define CCDC_VD_POL_SHIFT 2
+#define CCDC_VD_POL_NEGATIVE (1 << 2)
+#define CCDC_FRM_FMT_MASK 1
+#define CCDC_FRM_FMT_SHIFT 7
+#define CCDC_DATA_SZ_MASK 7
+#define CCDC_DATA_SZ_SHIFT 8
+#define CCDC_VDHDOUT_MASK 1
+#define CCDC_VDHDOUT_SHIFT 0
+#define CCDC_EXWEN_MASK 1
+#define CCDC_EXWEN_SHIFT 5
+#define CCDC_INPUT_MODE_MASK 3
+#define CCDC_INPUT_MODE_SHIFT 12
+#define CCDC_PIX_FMT_MASK 3
+#define CCDC_PIX_FMT_SHIFT 12
+#define CCDC_DATAPOL_MASK 1
+#define CCDC_DATAPOL_SHIFT 6
+#define CCDC_WEN_ENABLE (1 << 1)
+#define CCDC_VDHDEN_ENABLE (1 << 16)
+#define CCDC_LPF_ENABLE (1 << 14)
+#define CCDC_ALAW_ENABLE 1
+#define CCDC_ALAW_GAMMA_WD_MASK 7
+#define CCDC_REC656IF_BT656_EN 3
+
+#define CCDC_FMTCFG_FMTMODE_MASK 3
+#define CCDC_FMTCFG_FMTMODE_SHIFT 1
+#define CCDC_FMTCFG_LNUM_MASK 3
+#define CCDC_FMTCFG_LNUM_SHIFT 4
+#define CCDC_FMTCFG_ADDRINC_MASK 7
+#define CCDC_FMTCFG_ADDRINC_SHIFT 8
+
+#define CCDC_CCDCFG_FIDMD_SHIFT 6
+#define CCDC_CCDCFG_WENLOG_SHIFT 8
+#define CCDC_CCDCFG_TRGSEL_SHIFT 9
+#define CCDC_CCDCFG_EXTRG_SHIFT 10
+#define CCDC_CCDCFG_MSBINVI_SHIFT 13
+
+#define CCDC_HSIZE_FLIP_SHIFT 12
+#define CCDC_HSIZE_FLIP_MASK 1
+#define CCDC_HSIZE_VAL_MASK 0xFFF
+#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
+#define CCDC_SDOFST_INTERLACE_INVERSE 0x4B6D
+#define CCDC_SDOFST_INTERLACE_NORMAL 0x0B6D
+#define CCDC_SDOFST_PROGRESSIVE_INVERSE 0x4000
+#define CCDC_SDOFST_PROGRESSIVE_NORMAL 0
+#define CCDC_START_PX_HOR_MASK 0x7FFF
+#define CCDC_NUM_PX_HOR_MASK 0x7FFF
+#define CCDC_START_VER_ONE_MASK 0x7FFF
+#define CCDC_START_VER_TWO_MASK 0x7FFF
+#define CCDC_NUM_LINES_VER 0x7FFF
+
+#define CCDC_BLK_CLAMP_ENABLE (1 << 15)
+#define CCDC_BLK_SGAIN_MASK 0x1F
+#define CCDC_BLK_ST_PXL_MASK 0x1FFF
+#define CCDC_BLK_SAMPLE_LN_MASK 3
+#define CCDC_BLK_SAMPLE_LN_SHIFT 13
+
+#define CCDC_NUM_LINE_CALC_MASK 3
+#define CCDC_NUM_LINE_CALC_SHIFT 14
+
+#define CCDC_BLK_DC_SUB_MASK 0x3FFF
+#define CCDC_BLK_COMP_MASK 0xFF
+#define CCDC_BLK_COMP_GB_COMP_SHIFT 8
+#define CCDC_BLK_COMP_GR_COMP_SHIFT 0
+#define CCDC_BLK_COMP_R_COMP_SHIFT 8
+#define CCDC_LATCH_ON_VSYNC_DISABLE (1 << 15)
+#define CCDC_LATCH_ON_VSYNC_ENABLE (0 << 15)
+#define CCDC_FPC_ENABLE (1 << 15)
+#define CCDC_FPC_FPC_NUM_MASK 0x7FFF
+#define CCDC_DATA_PACK_ENABLE (1 << 11)
+#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16
+#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_SHIFT 16
+#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF
+#define CCDC_VP_OUT_VERT_NUM_SHIFT 17
+#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF
+#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4
+#define CCDC_VP_OUT_HORZ_ST_MASK 0xF
+
+#define CCDC_CSC_COEF_INTEG_MASK 7
+#define CCDC_CSC_COEF_DECIMAL_MASK 0x1f
+#define CCDC_CSC_COEF_INTEG_SHIFT 5
+#define CCDC_CSCM_MSB_SHIFT 8
+#define CCDC_CSC_ENABLE 1
+#define CCDC_CSC_DEC_MAX 32
+
+#define CCDC_MFILT1_SHIFT 10
+#define CCDC_MFILT2_SHIFT 8
+#define CCDC_MED_FILT_THRESH 0x3FFF
+#define CCDC_LPF_MASK 1
+#define CCDC_LPF_SHIFT 14
+#define CCDC_OFFSET_MASK 0x3FF
+#define CCDC_DATASFT_MASK 7
+#define CCDC_DATASFT_SHIFT 8
+
+#define CCDC_DF_ENABLE 1
+
+#define CCDC_FMTPLEN_P0_MASK 0xF
+#define CCDC_FMTPLEN_P1_MASK 0xF
+#define CCDC_FMTPLEN_P2_MASK 7
+#define CCDC_FMTPLEN_P3_MASK 7
+#define CCDC_FMTPLEN_P0_SHIFT 0
+#define CCDC_FMTPLEN_P1_SHIFT 4
+#define CCDC_FMTPLEN_P2_SHIFT 8
+#define CCDC_FMTPLEN_P3_SHIFT 12
+
+#define CCDC_FMTSPH_MASK 0x1FFF
+#define CCDC_FMTLNH_MASK 0x1FFF
+#define CCDC_FMTSLV_MASK 0x1FFF
+#define CCDC_FMTLNV_MASK 0x7FFF
+#define CCDC_FMTRLEN_MASK 0x1FFF
+#define CCDC_FMTHCNT_MASK 0x1FFF
+
+#define CCDC_ADP_INIT_MASK 0x1FFF
+#define CCDC_ADP_LINE_SHIFT 13
+#define CCDC_ADP_LINE_MASK 3
+#define CCDC_FMTPGN_APTR_MASK 7
+
+#define CCDC_DFCCTL_GDFCEN_MASK 1
+#define CCDC_DFCCTL_VDFCEN_MASK 1
+#define CCDC_DFCCTL_VDFC_DISABLE (0 << 4)
+#define CCDC_DFCCTL_VDFCEN_SHIFT 4
+#define CCDC_DFCCTL_VDFCSL_MASK 3
+#define CCDC_DFCCTL_VDFCSL_SHIFT 5
+#define CCDC_DFCCTL_VDFCUDA_MASK 1
+#define CCDC_DFCCTL_VDFCUDA_SHIFT 7
+#define CCDC_DFCCTL_VDFLSFT_MASK 3
+#define CCDC_DFCCTL_VDFLSFT_SHIFT 8
+#define CCDC_DFCMEMCTL_DFCMARST_MASK 1
+#define CCDC_DFCMEMCTL_DFCMARST_SHIFT 2
+#define CCDC_DFCMEMCTL_DFCMWR_MASK 1
+#define CCDC_DFCMEMCTL_DFCMWR_SHIFT 0
+#define CCDC_DFCMEMCTL_INC_ADDR (0 << 2)
+
+#define CCDC_LSCCFG_GFTSF_MASK 7
+#define CCDC_LSCCFG_GFTSF_SHIFT 1
+#define CCDC_LSCCFG_GFTINV_MASK 0xf
+#define CCDC_LSCCFG_GFTINV_SHIFT 4
+#define CCDC_LSC_GFTABLE_SEL_MASK 3
+#define CCDC_LSC_GFTABLE_EPEL_SHIFT 8
+#define CCDC_LSC_GFTABLE_OPEL_SHIFT 10
+#define CCDC_LSC_GFTABLE_EPOL_SHIFT 12
+#define CCDC_LSC_GFTABLE_OPOL_SHIFT 14
+#define CCDC_LSC_GFMODE_MASK 3
+#define CCDC_LSC_GFMODE_SHIFT 4
+#define CCDC_LSC_DISABLE 0
+#define CCDC_LSC_ENABLE 1
+#define CCDC_LSC_TABLE1_SLC 0
+#define CCDC_LSC_TABLE2_SLC 1
+#define CCDC_LSC_TABLE3_SLC 2
+#define CCDC_LSC_MEMADDR_RESET (1 << 2)
+#define CCDC_LSC_MEMADDR_INCR (0 << 2)
+#define CCDC_LSC_FRAC_MASK_T1 0xFF
+#define CCDC_LSC_INT_MASK 3
+#define CCDC_LSC_FRAC_MASK 0x3FFF
+#define CCDC_LSC_CENTRE_MASK 0x3FFF
+#define CCDC_LSC_COEF_MASK 0xff
+#define CCDC_LSC_COEFL_SHIFT 0
+#define CCDC_LSC_COEFU_SHIFT 8
+#define CCDC_GAIN_MASK 0x7FF
+#define CCDC_SYNCEN_VDHDEN_MASK (1 << 0)
+#define CCDC_SYNCEN_WEN_MASK (1 << 1)
+#define CCDC_SYNCEN_WEN_SHIFT 1
+
+/* Power on Defaults in hardware */
+#define MODESET_DEFAULT 0x200
+#define CULH_DEFAULT 0xFFFF
+#define CULV_DEFAULT 0xFF
+#define GAIN_DEFAULT 256
+#define OUTCLIP_DEFAULT 0x3FFF
+#define LSCCFG2_DEFAULT 0xE
+
+#endif
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
new file mode 100644
index 000000000..592d3fc91
--- /dev/null
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -0,0 +1,889 @@
+/*
+ * Copyright (C) 2006-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * CCDC hardware module for DM6446
+ * ------------------------------
+ *
+ * This module is for configuring CCD controller of DM6446 VPFE to capture
+ * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
+ * such as Defect Pixel Correction, Color Space Conversion etc to
+ * pre-process the Raw Bayer RGB data, before writing it to SDRAM.
+ * This file is named DM644x so that other variants such DM6443
+ * may be supported using the same module.
+ *
+ * TODO: Test Raw bayer parameter settings and bayer capture
+ * Split module parameter structure to module specific ioctl structs
+ * investigate if enum used for user space type definition
+ * to be replaced by #defines or integer
+ */
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <media/davinci/dm644x_ccdc.h>
+#include <media/davinci/vpss.h>
+
+#include "dm644x_ccdc_regs.h"
+#include "ccdc_hw_device.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CCDC Driver for DM6446");
+MODULE_AUTHOR("Texas Instruments");
+
+static struct ccdc_oper_config {
+ struct device *dev;
+ /* CCDC interface type */
+ enum vpfe_hw_if_type if_type;
+ /* Raw Bayer configuration */
+ struct ccdc_params_raw bayer;
+ /* YCbCr configuration */
+ struct ccdc_params_ycbcr ycbcr;
+ /* ccdc base address */
+ void __iomem *base_addr;
+} ccdc_cfg = {
+ /* Raw configurations */
+ .bayer = {
+ .pix_fmt = CCDC_PIXFMT_RAW,
+ .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
+ .win = CCDC_WIN_VGA,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .config_params = {
+ .data_sz = CCDC_DATA_10BITS,
+ },
+ },
+ .ycbcr = {
+ .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
+ .frm_fmt = CCDC_FRMFMT_INTERLACED,
+ .win = CCDC_WIN_PAL,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .bt656_enable = 1,
+ .pix_order = CCDC_PIXORDER_CBYCRY,
+ .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
+ },
+};
+
+#define CCDC_MAX_RAW_YUV_FORMATS 2
+
+/* Raw Bayer formats */
+static u32 ccdc_raw_bayer_pix_formats[] =
+ {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
+
+/* Raw YUV formats */
+static u32 ccdc_raw_yuv_pix_formats[] =
+ {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+
+/* CCDC Save/Restore context */
+static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)];
+
+/* register access routines */
+static inline u32 regr(u32 offset)
+{
+ return __raw_readl(ccdc_cfg.base_addr + offset);
+}
+
+static inline void regw(u32 val, u32 offset)
+{
+ __raw_writel(val, ccdc_cfg.base_addr + offset);
+}
+
+static void ccdc_enable(int flag)
+{
+ regw(flag, CCDC_PCR);
+}
+
+static void ccdc_enable_vport(int flag)
+{
+ if (flag)
+ /* enable video port */
+ regw(CCDC_ENABLE_VIDEO_PORT, CCDC_FMTCFG);
+ else
+ regw(CCDC_DISABLE_VIDEO_PORT, CCDC_FMTCFG);
+}
+
+/*
+ * ccdc_setwin()
+ * This function will configure the window size
+ * to be capture in CCDC reg
+ */
+static void ccdc_setwin(struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt,
+ int ppc)
+{
+ int horz_start, horz_nr_pixels;
+ int vert_start, vert_nr_lines;
+ int val = 0, mid_img = 0;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left << (ppc - 1);
+ horz_nr_pixels = (image_win->width << (ppc - 1)) - 1;
+ regw((horz_start << CCDC_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels,
+ CCDC_HORZ_INFO);
+
+ vert_start = image_win->top;
+
+ if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ /* configure VDINT0 */
+ val = (vert_start << CCDC_VDINT_VDINT0_SHIFT);
+ regw(val, CCDC_VDINT);
+
+ } else {
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /*
+ * configure VDINT0 and VDINT1. VDINT1 will be at half
+ * of image height
+ */
+ mid_img = vert_start + (image_win->height / 2);
+ val = (vert_start << CCDC_VDINT_VDINT0_SHIFT) |
+ (mid_img & CCDC_VDINT_VDINT1_MASK);
+ regw(val, CCDC_VDINT);
+
+ }
+ regw((vert_start << CCDC_VERT_START_SLV0_SHIFT) | vert_start,
+ CCDC_VERT_START);
+ regw(vert_nr_lines, CCDC_VERT_LINES);
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
+}
+
+static void ccdc_readregs(void)
+{
+ unsigned int val = 0;
+
+ val = regr(CCDC_ALAW);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to ALAW...\n", val);
+ val = regr(CCDC_CLAMP);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to CLAMP...\n", val);
+ val = regr(CCDC_DCSUB);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to DCSUB...\n", val);
+ val = regr(CCDC_BLKCMP);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to BLKCMP...\n", val);
+ val = regr(CCDC_FPC_ADDR);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC_ADDR...\n", val);
+ val = regr(CCDC_FPC);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC...\n", val);
+ val = regr(CCDC_FMTCFG);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMTCFG...\n", val);
+ val = regr(CCDC_COLPTN);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to COLPTN...\n", val);
+ val = regr(CCDC_FMT_HORZ);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_HORZ...\n", val);
+ val = regr(CCDC_FMT_VERT);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_VERT...\n", val);
+ val = regr(CCDC_HSIZE_OFF);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HSIZE_OFF...\n", val);
+ val = regr(CCDC_SDOFST);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SDOFST...\n", val);
+ val = regr(CCDC_VP_OUT);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VP_OUT...\n", val);
+ val = regr(CCDC_SYN_MODE);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SYN_MODE...\n", val);
+ val = regr(CCDC_HORZ_INFO);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HORZ_INFO...\n", val);
+ val = regr(CCDC_VERT_START);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_START...\n", val);
+ val = regr(CCDC_VERT_LINES);
+ dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val);
+}
+
+static int ccdc_close(struct device *dev)
+{
+ return 0;
+}
+
+/*
+ * ccdc_restore_defaults()
+ * This function will write defaults to all CCDC registers
+ */
+static void ccdc_restore_defaults(void)
+{
+ int i;
+
+ /* disable CCDC */
+ ccdc_enable(0);
+ /* set all registers to default value */
+ for (i = 4; i <= 0x94; i += 4)
+ regw(0, i);
+ regw(CCDC_NO_CULLING, CCDC_CULLING);
+ regw(CCDC_GAMMA_BITS_11_2, CCDC_ALAW);
+}
+
+static int ccdc_open(struct device *device)
+{
+ ccdc_restore_defaults();
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_enable_vport(1);
+ return 0;
+}
+
+static void ccdc_sbl_reset(void)
+{
+ vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O);
+}
+
+/*
+ * ccdc_config_ycbcr()
+ * This function will configure CCDC for YCbCr video capture
+ */
+static void ccdc_config_ycbcr(void)
+{
+ struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
+ u32 syn_mode;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
+ /*
+ * first restore the CCDC registers to default values
+ * This is important since we assume default values to be set in
+ * a lot of registers that we didn't touch
+ */
+ ccdc_restore_defaults();
+
+ /*
+ * configure pixel format, frame format, configure video frame
+ * format, enable output to SDRAM, enable internal timing generator
+ * and 8bit pack mode
+ */
+ syn_mode = (((params->pix_fmt & CCDC_SYN_MODE_INPMOD_MASK) <<
+ CCDC_SYN_MODE_INPMOD_SHIFT) |
+ ((params->frm_fmt & CCDC_SYN_FLDMODE_MASK) <<
+ CCDC_SYN_FLDMODE_SHIFT) | CCDC_VDHDEN_ENABLE |
+ CCDC_WEN_ENABLE | CCDC_DATA_PACK_ENABLE);
+
+ /* setup BT.656 sync mode */
+ if (params->bt656_enable) {
+ regw(CCDC_REC656IF_BT656_EN, CCDC_REC656IF);
+
+ /*
+ * configure the FID, VD, HD pin polarity,
+ * fld,hd pol positive, vd negative, 8-bit data
+ */
+ syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE;
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ syn_mode |= CCDC_SYN_MODE_10BITS;
+ else
+ syn_mode |= CCDC_SYN_MODE_8BITS;
+ } else {
+ /* y/c external sync mode */
+ syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
+ CCDC_FID_POL_SHIFT) |
+ ((params->hd_pol & CCDC_HD_POL_MASK) <<
+ CCDC_HD_POL_SHIFT) |
+ ((params->vd_pol & CCDC_VD_POL_MASK) <<
+ CCDC_VD_POL_SHIFT));
+ }
+ regw(syn_mode, CCDC_SYN_MODE);
+
+ /* configure video window */
+ ccdc_setwin(&params->win, params->frm_fmt, 2);
+
+ /*
+ * configure the order of y cb cr in SDRAM, and disable latch
+ * internal register on vsync
+ */
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT,
+ CCDC_CCDCFG);
+ else
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
+
+ /*
+ * configure the horizontal line offset. This should be a
+ * on 32 byte boundary. So clear LSB 5 bits
+ */
+ regw(((params->win.width * 2 + 31) & ~0x1f), CCDC_HSIZE_OFF);
+
+ /* configure the memory line offset */
+ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
+ /* two fields are interleaved in memory */
+ regw(CCDC_SDOFST_FIELD_INTERLEAVED, CCDC_SDOFST);
+
+ ccdc_sbl_reset();
+ dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
+}
+
+static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
+{
+ u32 val;
+
+ if (!bclamp->enable) {
+ /* configure DCSub */
+ val = (bclamp->dc_sub) & CCDC_BLK_DC_SUB_MASK;
+ regw(val, CCDC_DCSUB);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to DCSUB...\n", val);
+ regw(CCDC_CLAMP_DEFAULT_VAL, CCDC_CLAMP);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to CLAMP...\n");
+ return;
+ }
+ /*
+ * Configure gain, Start pixel, No of line to be avg,
+ * No of pixel/line to be avg, & Enable the Black clamping
+ */
+ val = ((bclamp->sgain & CCDC_BLK_SGAIN_MASK) |
+ ((bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) <<
+ CCDC_BLK_ST_PXL_SHIFT) |
+ ((bclamp->sample_ln & CCDC_BLK_SAMPLE_LINE_MASK) <<
+ CCDC_BLK_SAMPLE_LINE_SHIFT) |
+ ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
+ CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE);
+ regw(val, CCDC_CLAMP);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to CLAMP...\n", val);
+ /* If Black clamping is enable then make dcsub 0 */
+ regw(CCDC_DCSUB_DEFAULT_VAL, CCDC_DCSUB);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x00000000 to DCSUB...\n");
+}
+
+static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
+{
+ u32 val;
+
+ val = ((bcomp->b & CCDC_BLK_COMP_MASK) |
+ ((bcomp->gb & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_GB_COMP_SHIFT) |
+ ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_GR_COMP_SHIFT) |
+ ((bcomp->r & CCDC_BLK_COMP_MASK) <<
+ CCDC_BLK_COMP_R_COMP_SHIFT));
+ regw(val, CCDC_BLKCMP);
+}
+
+/*
+ * ccdc_config_raw()
+ * This function will configure CCDC for Raw capture mode
+ */
+static void ccdc_config_raw(void)
+{
+ struct ccdc_params_raw *params = &ccdc_cfg.bayer;
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int syn_mode = 0;
+ unsigned int val;
+
+ dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
+
+ /* Reset CCDC */
+ ccdc_restore_defaults();
+
+ /* Disable latching function registers on VSYNC */
+ regw(CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
+
+ /*
+ * Configure the vertical sync polarity(SYN_MODE.VDPOL),
+ * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
+ * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
+ * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
+ * SDRAM, enable internal timing generator
+ */
+ syn_mode =
+ (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
+ ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
+ ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
+ ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
+ ((config_params->data_sz & CCDC_DATA_SZ_MASK) <<
+ CCDC_DATA_SZ_SHIFT) |
+ ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT) |
+ CCDC_WEN_ENABLE | CCDC_VDHDEN_ENABLE);
+
+ /* Enable and configure aLaw register if needed */
+ if (config_params->alaw.enable) {
+ val = ((config_params->alaw.gamma_wd &
+ CCDC_ALAW_GAMMA_WD_MASK) | CCDC_ALAW_ENABLE);
+ regw(val, CCDC_ALAW);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to ALAW...\n", val);
+ }
+
+ /* Configure video window */
+ ccdc_setwin(&params->win, params->frm_fmt, CCDC_PPC_RAW);
+
+ /* Configure Black Clamp */
+ ccdc_config_black_clamp(&config_params->blk_clamp);
+
+ /* Configure Black level compensation */
+ ccdc_config_black_compense(&config_params->blk_comp);
+
+ /* If data size is 8 bit then pack the data */
+ if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+ config_params->alaw.enable)
+ syn_mode |= CCDC_DATA_PACK_ENABLE;
+
+ /* disable video port */
+ val = CCDC_DISABLE_VIDEO_PORT;
+
+ if (config_params->data_sz == CCDC_DATA_8BITS)
+ val |= (CCDC_DATA_10BITS & CCDC_FMTCFG_VPIN_MASK)
+ << CCDC_FMTCFG_VPIN_SHIFT;
+ else
+ val |= (config_params->data_sz & CCDC_FMTCFG_VPIN_MASK)
+ << CCDC_FMTCFG_VPIN_SHIFT;
+ /* Write value in FMTCFG */
+ regw(val, CCDC_FMTCFG);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMTCFG...\n", val);
+ /* Configure the color pattern according to mt9t001 sensor */
+ regw(CCDC_COLPTN_VAL, CCDC_COLPTN);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0xBB11BB11 to COLPTN...\n");
+ /*
+ * Configure Data formatter(Video port) pixel selection
+ * (FMT_HORZ, FMT_VERT)
+ */
+ val = ((params->win.left & CCDC_FMT_HORZ_FMTSPH_MASK) <<
+ CCDC_FMT_HORZ_FMTSPH_SHIFT) |
+ (params->win.width & CCDC_FMT_HORZ_FMTLNH_MASK);
+ regw(val, CCDC_FMT_HORZ);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_HORZ...\n", val);
+ val = (params->win.top & CCDC_FMT_VERT_FMTSLV_MASK)
+ << CCDC_FMT_VERT_FMTSLV_SHIFT;
+ if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
+ val |= (params->win.height) & CCDC_FMT_VERT_FMTLNV_MASK;
+ else
+ val |= (params->win.height >> 1) & CCDC_FMT_VERT_FMTLNV_MASK;
+
+ dev_dbg(ccdc_cfg.dev, "\nparams->win.height 0x%x ...\n",
+ params->win.height);
+ regw(val, CCDC_FMT_VERT);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_VERT...\n", val);
+
+ dev_dbg(ccdc_cfg.dev, "\nbelow regw(val, FMT_VERT)...");
+
+ /*
+ * Configure Horizontal offset register. If pack 8 is enabled then
+ * 1 pixel will take 1 byte
+ */
+ if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+ config_params->alaw.enable)
+ regw((params->win.width + CCDC_32BYTE_ALIGN_VAL) &
+ CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF);
+ else
+ /* else one pixel will take 2 byte */
+ regw(((params->win.width * CCDC_TWO_BYTES_PER_PIXEL) +
+ CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK,
+ CCDC_HSIZE_OFF);
+
+ /* Set value for SDOFST */
+ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (params->image_invert_enable) {
+ /* For intelace inverse mode */
+ regw(CCDC_INTERLACED_IMAGE_INVERT, CCDC_SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x4B6D to SDOFST..\n");
+ }
+
+ else {
+ /* For intelace non inverse mode */
+ regw(CCDC_INTERLACED_NO_IMAGE_INVERT, CCDC_SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x0249 to SDOFST..\n");
+ }
+ } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ regw(CCDC_PROGRESSIVE_NO_IMAGE_INVERT, CCDC_SDOFST);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to SDOFST...\n");
+ }
+
+ /*
+ * Configure video port pixel selection (VPOUT)
+ * Here -1 is to make the height value less than FMT_VERT.FMTLNV
+ */
+ if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
+ val = (((params->win.height - 1) & CCDC_VP_OUT_VERT_NUM_MASK))
+ << CCDC_VP_OUT_VERT_NUM_SHIFT;
+ else
+ val =
+ ((((params->win.height >> CCDC_INTERLACED_HEIGHT_SHIFT) -
+ 1) & CCDC_VP_OUT_VERT_NUM_MASK)) <<
+ CCDC_VP_OUT_VERT_NUM_SHIFT;
+
+ val |= ((((params->win.width))) & CCDC_VP_OUT_HORZ_NUM_MASK)
+ << CCDC_VP_OUT_HORZ_NUM_SHIFT;
+ val |= (params->win.left) & CCDC_VP_OUT_HORZ_ST_MASK;
+ regw(val, CCDC_VP_OUT);
+
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to VP_OUT...\n", val);
+ regw(syn_mode, CCDC_SYN_MODE);
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to SYN_MODE...\n", syn_mode);
+
+ ccdc_sbl_reset();
+ dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
+ ccdc_readregs();
+}
+
+static int ccdc_configure(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_config_raw();
+ else
+ ccdc_config_ycbcr();
+ return 0;
+}
+
+static int ccdc_set_buftype(enum ccdc_buftype buf_type)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.buf_type = buf_type;
+ else
+ ccdc_cfg.ycbcr.buf_type = buf_type;
+ return 0;
+}
+
+static enum ccdc_buftype ccdc_get_buftype(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_cfg.bayer.buf_type;
+ return ccdc_cfg.ycbcr.buf_type;
+}
+
+static int ccdc_enum_pix(u32 *pix, int i)
+{
+ int ret = -EINVAL;
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
+ *pix = ccdc_raw_bayer_pix_formats[i];
+ ret = 0;
+ }
+ } else {
+ if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
+ *pix = ccdc_raw_yuv_pix_formats[i];
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+static int ccdc_set_pixel_format(u32 pixfmt)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ if (pixfmt == V4L2_PIX_FMT_SBGGR8)
+ ccdc_cfg.bayer.config_params.alaw.enable = 1;
+ else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
+ return -EINVAL;
+ } else {
+ if (pixfmt == V4L2_PIX_FMT_YUYV)
+ ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+ else if (pixfmt == V4L2_PIX_FMT_UYVY)
+ ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 ccdc_get_pixel_format(void)
+{
+ struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
+ u32 pixfmt;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ if (alaw->enable)
+ pixfmt = V4L2_PIX_FMT_SBGGR8;
+ else
+ pixfmt = V4L2_PIX_FMT_SBGGR16;
+ else {
+ if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ else
+ pixfmt = V4L2_PIX_FMT_UYVY;
+ }
+ return pixfmt;
+}
+
+static int ccdc_set_image_window(struct v4l2_rect *win)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.win = *win;
+ else
+ ccdc_cfg.ycbcr.win = *win;
+ return 0;
+}
+
+static void ccdc_get_image_window(struct v4l2_rect *win)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ *win = ccdc_cfg.bayer.win;
+ else
+ *win = ccdc_cfg.ycbcr.win;
+}
+
+static unsigned int ccdc_get_line_length(void)
+{
+ struct ccdc_config_params_raw *config_params =
+ &ccdc_cfg.bayer.config_params;
+ unsigned int len;
+
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ if ((config_params->alaw.enable) ||
+ (config_params->data_sz == CCDC_DATA_8BITS))
+ len = ccdc_cfg.bayer.win.width;
+ else
+ len = ccdc_cfg.bayer.win.width * 2;
+ } else
+ len = ccdc_cfg.ycbcr.win.width * 2;
+ return ALIGN(len, 32);
+}
+
+static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
+ return 0;
+}
+
+static enum ccdc_frmfmt ccdc_get_frame_format(void)
+{
+ if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc_cfg.bayer.frm_fmt;
+ else
+ return ccdc_cfg.ycbcr.frm_fmt;
+}
+
+static int ccdc_getfid(void)
+{
+ return (regr(CCDC_SYN_MODE) >> 15) & 1;
+}
+
+/* misc operations */
+static inline void ccdc_setfbaddr(unsigned long addr)
+{
+ regw(addr & 0xffffffe0, CCDC_SDR_ADDR);
+}
+
+static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
+{
+ ccdc_cfg.if_type = params->if_type;
+
+ switch (params->if_type) {
+ case VPFE_BT656:
+ case VPFE_YCBCR_SYNC_16:
+ case VPFE_YCBCR_SYNC_8:
+ case VPFE_BT656_10BIT:
+ ccdc_cfg.ycbcr.vd_pol = params->vdpol;
+ ccdc_cfg.ycbcr.hd_pol = params->hdpol;
+ break;
+ default:
+ /* TODO add support for raw bayer here */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void ccdc_save_context(void)
+{
+ ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR);
+ ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE);
+ ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID);
+ ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES);
+ ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO);
+ ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START);
+ ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES);
+ ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING);
+ ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF);
+ ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST);
+ ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR);
+ ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP);
+ ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB);
+ ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN);
+ ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP);
+ ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC);
+ ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR);
+ ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT);
+ ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW);
+ ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF);
+ ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG);
+ ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG);
+ ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ);
+ ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT);
+ ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0);
+ ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1);
+ ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2);
+ ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3);
+ ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4);
+ ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5);
+ ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6);
+ ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7);
+ ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0);
+ ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1);
+ ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0);
+ ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1);
+ ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT);
+}
+
+static void ccdc_restore_context(void)
+{
+ regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE);
+ regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID);
+ regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES);
+ regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO);
+ regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START);
+ regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES);
+ regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING);
+ regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF);
+ regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST);
+ regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR);
+ regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP);
+ regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB);
+ regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN);
+ regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP);
+ regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC);
+ regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR);
+ regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT);
+ regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW);
+ regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF);
+ regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG);
+ regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG);
+ regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ);
+ regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT);
+ regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0);
+ regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1);
+ regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2);
+ regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3);
+ regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4);
+ regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5);
+ regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6);
+ regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7);
+ regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0);
+ regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1);
+ regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0);
+ regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1);
+ regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
+ regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
+}
+static const struct ccdc_hw_device ccdc_hw_dev = {
+ .name = "DM6446 CCDC",
+ .owner = THIS_MODULE,
+ .hw_ops = {
+ .open = ccdc_open,
+ .close = ccdc_close,
+ .reset = ccdc_sbl_reset,
+ .enable = ccdc_enable,
+ .set_hw_if_params = ccdc_set_hw_if_params,
+ .configure = ccdc_configure,
+ .set_buftype = ccdc_set_buftype,
+ .get_buftype = ccdc_get_buftype,
+ .enum_pix = ccdc_enum_pix,
+ .set_pixel_format = ccdc_set_pixel_format,
+ .get_pixel_format = ccdc_get_pixel_format,
+ .set_frame_format = ccdc_set_frame_format,
+ .get_frame_format = ccdc_get_frame_format,
+ .set_image_window = ccdc_set_image_window,
+ .get_image_window = ccdc_get_image_window,
+ .get_line_length = ccdc_get_line_length,
+ .setfbaddr = ccdc_setfbaddr,
+ .getfid = ccdc_getfid,
+ },
+};
+
+static int dm644x_ccdc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int status = 0;
+
+ /*
+ * first try to register with vpfe. If not correct platform, then we
+ * don't have to iomap
+ */
+ status = vpfe_register_ccdc_device(&ccdc_hw_dev);
+ if (status < 0)
+ return status;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ status = -ENODEV;
+ goto fail_nores;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), res->name);
+ if (!res) {
+ status = -EBUSY;
+ goto fail_nores;
+ }
+
+ ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ if (!ccdc_cfg.base_addr) {
+ status = -ENOMEM;
+ goto fail_nomem;
+ }
+
+ ccdc_cfg.dev = &pdev->dev;
+ printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
+ return 0;
+fail_nomem:
+ release_mem_region(res->start, resource_size(res));
+fail_nores:
+ vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+ return status;
+}
+
+static int dm644x_ccdc_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ iounmap(ccdc_cfg.base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+ return 0;
+}
+
+static int dm644x_ccdc_suspend(struct device *dev)
+{
+ /* Save CCDC context */
+ ccdc_save_context();
+ /* Disable CCDC */
+ ccdc_enable(0);
+
+ return 0;
+}
+
+static int dm644x_ccdc_resume(struct device *dev)
+{
+ /* Restore CCDC context */
+ ccdc_restore_context();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dm644x_ccdc_pm_ops = {
+ .suspend = dm644x_ccdc_suspend,
+ .resume = dm644x_ccdc_resume,
+};
+
+static struct platform_driver dm644x_ccdc_driver = {
+ .driver = {
+ .name = "dm644x_ccdc",
+ .pm = &dm644x_ccdc_pm_ops,
+ },
+ .remove = dm644x_ccdc_remove,
+ .probe = dm644x_ccdc_probe,
+};
+
+module_platform_driver(dm644x_ccdc_driver);
diff --git a/drivers/media/platform/davinci/dm644x_ccdc_regs.h b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
new file mode 100644
index 000000000..ffd89c7ea
--- /dev/null
+++ b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2006-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DM644X_CCDC_REGS_H
+#define _DM644X_CCDC_REGS_H
+
+/**************************************************************************\
+* Register OFFSET Definitions
+\**************************************************************************/
+#define CCDC_PID 0x0
+#define CCDC_PCR 0x4
+#define CCDC_SYN_MODE 0x8
+#define CCDC_HD_VD_WID 0xc
+#define CCDC_PIX_LINES 0x10
+#define CCDC_HORZ_INFO 0x14
+#define CCDC_VERT_START 0x18
+#define CCDC_VERT_LINES 0x1c
+#define CCDC_CULLING 0x20
+#define CCDC_HSIZE_OFF 0x24
+#define CCDC_SDOFST 0x28
+#define CCDC_SDR_ADDR 0x2c
+#define CCDC_CLAMP 0x30
+#define CCDC_DCSUB 0x34
+#define CCDC_COLPTN 0x38
+#define CCDC_BLKCMP 0x3c
+#define CCDC_FPC 0x40
+#define CCDC_FPC_ADDR 0x44
+#define CCDC_VDINT 0x48
+#define CCDC_ALAW 0x4c
+#define CCDC_REC656IF 0x50
+#define CCDC_CCDCFG 0x54
+#define CCDC_FMTCFG 0x58
+#define CCDC_FMT_HORZ 0x5c
+#define CCDC_FMT_VERT 0x60
+#define CCDC_FMT_ADDR0 0x64
+#define CCDC_FMT_ADDR1 0x68
+#define CCDC_FMT_ADDR2 0x6c
+#define CCDC_FMT_ADDR3 0x70
+#define CCDC_FMT_ADDR4 0x74
+#define CCDC_FMT_ADDR5 0x78
+#define CCDC_FMT_ADDR6 0x7c
+#define CCDC_FMT_ADDR7 0x80
+#define CCDC_PRGEVEN_0 0x84
+#define CCDC_PRGEVEN_1 0x88
+#define CCDC_PRGODD_0 0x8c
+#define CCDC_PRGODD_1 0x90
+#define CCDC_VP_OUT 0x94
+#define CCDC_REG_END 0x98
+
+/***************************************************************
+* Define for various register bit mask and shifts for CCDC
+****************************************************************/
+#define CCDC_FID_POL_MASK 1
+#define CCDC_FID_POL_SHIFT 4
+#define CCDC_HD_POL_MASK 1
+#define CCDC_HD_POL_SHIFT 3
+#define CCDC_VD_POL_MASK 1
+#define CCDC_VD_POL_SHIFT 2
+#define CCDC_HSIZE_OFF_MASK 0xffffffe0
+#define CCDC_32BYTE_ALIGN_VAL 31
+#define CCDC_FRM_FMT_MASK 0x1
+#define CCDC_FRM_FMT_SHIFT 7
+#define CCDC_DATA_SZ_MASK 7
+#define CCDC_DATA_SZ_SHIFT 8
+#define CCDC_PIX_FMT_MASK 3
+#define CCDC_PIX_FMT_SHIFT 12
+#define CCDC_VP2SDR_DISABLE 0xFFFBFFFF
+#define CCDC_WEN_ENABLE (1 << 17)
+#define CCDC_SDR2RSZ_DISABLE 0xFFF7FFFF
+#define CCDC_VDHDEN_ENABLE (1 << 16)
+#define CCDC_LPF_ENABLE (1 << 14)
+#define CCDC_ALAW_ENABLE (1 << 3)
+#define CCDC_ALAW_GAMMA_WD_MASK 7
+#define CCDC_BLK_CLAMP_ENABLE (1 << 31)
+#define CCDC_BLK_SGAIN_MASK 0x1F
+#define CCDC_BLK_ST_PXL_MASK 0x7FFF
+#define CCDC_BLK_ST_PXL_SHIFT 10
+#define CCDC_BLK_SAMPLE_LN_MASK 7
+#define CCDC_BLK_SAMPLE_LN_SHIFT 28
+#define CCDC_BLK_SAMPLE_LINE_MASK 7
+#define CCDC_BLK_SAMPLE_LINE_SHIFT 25
+#define CCDC_BLK_DC_SUB_MASK 0x03FFF
+#define CCDC_BLK_COMP_MASK 0xFF
+#define CCDC_BLK_COMP_GB_COMP_SHIFT 8
+#define CCDC_BLK_COMP_GR_COMP_SHIFT 16
+#define CCDC_BLK_COMP_R_COMP_SHIFT 24
+#define CCDC_LATCH_ON_VSYNC_DISABLE (1 << 15)
+#define CCDC_FPC_ENABLE (1 << 15)
+#define CCDC_FPC_DISABLE 0
+#define CCDC_FPC_FPC_NUM_MASK 0x7FFF
+#define CCDC_DATA_PACK_ENABLE (1 << 11)
+#define CCDC_FMTCFG_VPIN_MASK 7
+#define CCDC_FMTCFG_VPIN_SHIFT 12
+#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16
+#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_SHIFT 16
+#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF
+#define CCDC_VP_OUT_VERT_NUM_SHIFT 17
+#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF
+#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4
+#define CCDC_VP_OUT_HORZ_ST_MASK 0xF
+#define CCDC_HORZ_INFO_SPH_SHIFT 16
+#define CCDC_VERT_START_SLV0_SHIFT 16
+#define CCDC_VDINT_VDINT0_SHIFT 16
+#define CCDC_VDINT_VDINT1_MASK 0xFFFF
+#define CCDC_PPC_RAW 1
+#define CCDC_DCSUB_DEFAULT_VAL 0
+#define CCDC_CLAMP_DEFAULT_VAL 0
+#define CCDC_ENABLE_VIDEO_PORT 0x8000
+#define CCDC_DISABLE_VIDEO_PORT 0
+#define CCDC_COLPTN_VAL 0xBB11BB11
+#define CCDC_TWO_BYTES_PER_PIXEL 2
+#define CCDC_INTERLACED_IMAGE_INVERT 0x4B6D
+#define CCDC_INTERLACED_NO_IMAGE_INVERT 0x0249
+#define CCDC_PROGRESSIVE_IMAGE_INVERT 0x4000
+#define CCDC_PROGRESSIVE_NO_IMAGE_INVERT 0
+#define CCDC_INTERLACED_HEIGHT_SHIFT 1
+#define CCDC_SYN_MODE_INPMOD_SHIFT 12
+#define CCDC_SYN_MODE_INPMOD_MASK 3
+#define CCDC_SYN_MODE_8BITS (7 << 8)
+#define CCDC_SYN_MODE_10BITS (6 << 8)
+#define CCDC_SYN_MODE_11BITS (5 << 8)
+#define CCDC_SYN_MODE_12BITS (4 << 8)
+#define CCDC_SYN_MODE_13BITS (3 << 8)
+#define CCDC_SYN_MODE_14BITS (2 << 8)
+#define CCDC_SYN_MODE_15BITS (1 << 8)
+#define CCDC_SYN_MODE_16BITS (0 << 8)
+#define CCDC_SYN_FLDMODE_MASK 1
+#define CCDC_SYN_FLDMODE_SHIFT 7
+#define CCDC_REC656IF_BT656_EN 3
+#define CCDC_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
+#define CCDC_CCDCFG_Y8POS_SHIFT 11
+#define CCDC_CCDCFG_BW656_10BIT (1 << 5)
+#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
+#define CCDC_NO_CULLING 0xffff00ff
+#endif
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
new file mode 100644
index 000000000..80fa60a4c
--- /dev/null
+++ b/drivers/media/platform/davinci/isif.c
@@ -0,0 +1,1130 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Image Sensor Interface (ISIF) driver
+ *
+ * This driver is for configuring the ISIF IP available on DM365 or any other
+ * TI SoCs. This is used for capturing yuv or bayer video or image data
+ * from a decoder or sensor. This IP is similar to the CCDC IP on DM355
+ * and DM6446, but with enhanced or additional ip blocks. The driver
+ * configures the ISIF upon commands from the vpfe bridge driver through
+ * ccdc_hw_device interface.
+ *
+ * TODO: 1) Raw bayer parameter settings and bayer capture
+ * 2) Add support for control ioctl
+ */
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <media/davinci/isif.h>
+#include <media/davinci/vpss.h>
+
+#include "isif_regs.h"
+#include "ccdc_hw_device.h"
+
+/* Defaults for module configuration parameters */
+static struct isif_config_params_raw isif_config_defaults = {
+ .linearize = {
+ .en = 0,
+ .corr_shft = ISIF_NO_SHIFT,
+ .scale_fact = {1, 0},
+ },
+ .df_csc = {
+ .df_or_csc = 0,
+ .csc = {
+ .en = 0,
+ },
+ },
+ .dfc = {
+ .en = 0,
+ },
+ .bclamp = {
+ .en = 0,
+ },
+ .gain_offset = {
+ .gain = {
+ .r_ye = {1, 0},
+ .gr_cy = {1, 0},
+ .gb_g = {1, 0},
+ .b_mg = {1, 0},
+ },
+ },
+ .culling = {
+ .hcpat_odd = 0xff,
+ .hcpat_even = 0xff,
+ .vcpat = 0xff,
+ },
+ .compress = {
+ .alg = ISIF_ALAW,
+ },
+};
+
+/* ISIF operation configuration */
+static struct isif_oper_config {
+ struct device *dev;
+ enum vpfe_hw_if_type if_type;
+ struct isif_ycbcr_config ycbcr;
+ struct isif_params_raw bayer;
+ enum isif_data_pack data_pack;
+ /* ISIF base address */
+ void __iomem *base_addr;
+ /* ISIF Linear Table 0 */
+ void __iomem *linear_tbl0_addr;
+ /* ISIF Linear Table 1 */
+ void __iomem *linear_tbl1_addr;
+} isif_cfg = {
+ .ycbcr = {
+ .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
+ .frm_fmt = CCDC_FRMFMT_INTERLACED,
+ .win = ISIF_WIN_NTSC,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .pix_order = CCDC_PIXORDER_CBYCRY,
+ .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED,
+ },
+ .bayer = {
+ .pix_fmt = CCDC_PIXFMT_RAW,
+ .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
+ .win = ISIF_WIN_VGA,
+ .fid_pol = VPFE_PINPOL_POSITIVE,
+ .vd_pol = VPFE_PINPOL_POSITIVE,
+ .hd_pol = VPFE_PINPOL_POSITIVE,
+ .gain = {
+ .r_ye = {1, 0},
+ .gr_cy = {1, 0},
+ .gb_g = {1, 0},
+ .b_mg = {1, 0},
+ },
+ .cfa_pat = ISIF_CFA_PAT_MOSAIC,
+ .data_msb = ISIF_BIT_MSB_11,
+ .config_params = {
+ .data_shift = ISIF_NO_SHIFT,
+ .col_pat_field0 = {
+ .olop = ISIF_GREEN_BLUE,
+ .olep = ISIF_BLUE,
+ .elop = ISIF_RED,
+ .elep = ISIF_GREEN_RED,
+ },
+ .col_pat_field1 = {
+ .olop = ISIF_GREEN_BLUE,
+ .olep = ISIF_BLUE,
+ .elop = ISIF_RED,
+ .elep = ISIF_GREEN_RED,
+ },
+ .test_pat_gen = 0,
+ },
+ },
+ .data_pack = ISIF_DATA_PACK8,
+};
+
+/* Raw Bayer formats */
+static const u32 isif_raw_bayer_pix_formats[] = {
+ V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
+
+/* Raw YUV formats */
+static const u32 isif_raw_yuv_pix_formats[] = {
+ V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+
+/* register access routines */
+static inline u32 regr(u32 offset)
+{
+ return __raw_readl(isif_cfg.base_addr + offset);
+}
+
+static inline void regw(u32 val, u32 offset)
+{
+ __raw_writel(val, isif_cfg.base_addr + offset);
+}
+
+/* reg_modify() - read, modify and write register */
+static inline u32 reg_modify(u32 mask, u32 val, u32 offset)
+{
+ u32 new_val = (regr(offset) & ~mask) | (val & mask);
+
+ regw(new_val, offset);
+ return new_val;
+}
+
+static inline void regw_lin_tbl(u32 val, u32 offset, int i)
+{
+ if (!i)
+ __raw_writel(val, isif_cfg.linear_tbl0_addr + offset);
+ else
+ __raw_writel(val, isif_cfg.linear_tbl1_addr + offset);
+}
+
+static void isif_disable_all_modules(void)
+{
+ /* disable BC */
+ regw(0, CLAMPCFG);
+ /* disable vdfc */
+ regw(0, DFCCTL);
+ /* disable CSC */
+ regw(0, CSCCTL);
+ /* disable linearization */
+ regw(0, LINCFG0);
+ /* disable other modules here as they are supported */
+}
+
+static void isif_enable(int en)
+{
+ if (!en) {
+ /* Before disable isif, disable all ISIF modules */
+ isif_disable_all_modules();
+ /*
+ * wait for next VD. Assume lowest scan rate is 12 Hz. So
+ * 100 msec delay is good enough
+ */
+ msleep(100);
+ }
+ reg_modify(ISIF_SYNCEN_VDHDEN_MASK, en, SYNCEN);
+}
+
+static void isif_enable_output_to_sdram(int en)
+{
+ reg_modify(ISIF_SYNCEN_WEN_MASK, en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
+}
+
+static void isif_config_culling(struct isif_cul *cul)
+{
+ u32 val;
+
+ /* Horizontal pattern */
+ val = (cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT) | cul->hcpat_odd;
+ regw(val, CULH);
+
+ /* vertical pattern */
+ regw(cul->vcpat, CULV);
+
+ /* LPF */
+ reg_modify(ISIF_LPF_MASK << ISIF_LPF_SHIFT,
+ cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
+}
+
+static void isif_config_gain_offset(void)
+{
+ struct isif_gain_offsets_adj *gain_off_p =
+ &isif_cfg.bayer.config_params.gain_offset;
+ u32 val;
+
+ val = (!!gain_off_p->gain_sdram_en << GAIN_SDRAM_EN_SHIFT) |
+ (!!gain_off_p->gain_ipipe_en << GAIN_IPIPE_EN_SHIFT) |
+ (!!gain_off_p->gain_h3a_en << GAIN_H3A_EN_SHIFT) |
+ (!!gain_off_p->offset_sdram_en << OFST_SDRAM_EN_SHIFT) |
+ (!!gain_off_p->offset_ipipe_en << OFST_IPIPE_EN_SHIFT) |
+ (!!gain_off_p->offset_h3a_en << OFST_H3A_EN_SHIFT);
+
+ reg_modify(GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
+
+ val = (gain_off_p->gain.r_ye.integer << GAIN_INTEGER_SHIFT) |
+ gain_off_p->gain.r_ye.decimal;
+ regw(val, CRGAIN);
+
+ val = (gain_off_p->gain.gr_cy.integer << GAIN_INTEGER_SHIFT) |
+ gain_off_p->gain.gr_cy.decimal;
+ regw(val, CGRGAIN);
+
+ val = (gain_off_p->gain.gb_g.integer << GAIN_INTEGER_SHIFT) |
+ gain_off_p->gain.gb_g.decimal;
+ regw(val, CGBGAIN);
+
+ val = (gain_off_p->gain.b_mg.integer << GAIN_INTEGER_SHIFT) |
+ gain_off_p->gain.b_mg.decimal;
+ regw(val, CBGAIN);
+
+ regw(gain_off_p->offset, COFSTA);
+}
+
+static void isif_restore_defaults(void)
+{
+ enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
+
+ dev_dbg(isif_cfg.dev, "\nstarting isif_restore_defaults...");
+ isif_cfg.bayer.config_params = isif_config_defaults;
+ /* Enable clock to ISIF, IPIPEIF and BL */
+ vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
+ vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
+ vpss_enable_clock(VPSS_BL_CLOCK, 1);
+ /* Set default offset and gain */
+ isif_config_gain_offset();
+ vpss_select_ccdc_source(source);
+ dev_dbg(isif_cfg.dev, "\nEnd of isif_restore_defaults...");
+}
+
+static int isif_open(struct device *device)
+{
+ isif_restore_defaults();
+ return 0;
+}
+
+/* This function will configure the window size to be capture in ISIF reg */
+static void isif_setwin(struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt, int ppc)
+{
+ int horz_start, horz_nr_pixels;
+ int vert_start, vert_nr_lines;
+ int mid_img = 0;
+
+ dev_dbg(isif_cfg.dev, "\nStarting isif_setwin...");
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left << (ppc - 1);
+ horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
+
+ /* Writing the horizontal info into the registers */
+ regw(horz_start & START_PX_HOR_MASK, SPH);
+ regw(horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
+ vert_start = image_win->top;
+
+ if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* To account for VD since line 0 doesn't have any data */
+ vert_start += 1;
+ } else {
+ /* To account for VD since line 0 doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /* configure VDINT0 and VDINT1 */
+ mid_img = vert_start + (image_win->height / 2);
+ regw(mid_img, VDINT1);
+ }
+
+ regw(0, VDINT0);
+ regw(vert_start & START_VER_ONE_MASK, SLV0);
+ regw(vert_start & START_VER_TWO_MASK, SLV1);
+ regw(vert_nr_lines & NUM_LINES_VER, LNV);
+}
+
+static void isif_config_bclamp(struct isif_black_clamp *bc)
+{
+ u32 val;
+
+ /*
+ * DC Offset is always added to image data irrespective of bc enable
+ * status
+ */
+ regw(bc->dc_offset, CLDCOFST);
+
+ if (bc->en) {
+ val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT;
+
+ /* Enable BC and horizontal clamp caculation paramaters */
+ val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT);
+
+ regw(val, CLAMPCFG);
+
+ if (bc->horz.mode != ISIF_HORZ_BC_DISABLE) {
+ /*
+ * Window count for calculation
+ * Base window selection
+ * pixel limit
+ * Horizontal size of window
+ * vertical size of the window
+ * Horizontal start position of the window
+ * Vertical start position of the window
+ */
+ val = bc->horz.win_count_calc |
+ ((!!bc->horz.base_win_sel_calc) <<
+ ISIF_HORZ_BC_WIN_SEL_SHIFT) |
+ ((!!bc->horz.clamp_pix_limit) <<
+ ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
+ (bc->horz.win_h_sz_calc <<
+ ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
+ (bc->horz.win_v_sz_calc <<
+ ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
+ regw(val, CLHWIN0);
+
+ regw(bc->horz.win_start_h_calc, CLHWIN1);
+ regw(bc->horz.win_start_v_calc, CLHWIN2);
+ }
+
+ /* vertical clamp caculation paramaters */
+
+ /* Reset clamp value sel for previous line */
+ val |=
+ (bc->vert.reset_val_sel << ISIF_VERT_BC_RST_VAL_SEL_SHIFT) |
+ (bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT);
+ regw(val, CLVWIN0);
+
+ /* Optical Black horizontal start position */
+ regw(bc->vert.ob_start_h, CLVWIN1);
+ /* Optical Black vertical start position */
+ regw(bc->vert.ob_start_v, CLVWIN2);
+ /* Optical Black vertical size for calculation */
+ regw(bc->vert.ob_v_sz_calc, CLVWIN3);
+ /* Vertical start position for BC subtraction */
+ regw(bc->vert_start_sub, CLSV);
+ }
+}
+
+static void isif_config_linearization(struct isif_linearize *linearize)
+{
+ u32 val, i;
+
+ if (!linearize->en) {
+ regw(0, LINCFG0);
+ return;
+ }
+
+ /* shift value for correction & enable linearization (set lsb) */
+ val = (linearize->corr_shft << ISIF_LIN_CORRSFT_SHIFT) | 1;
+ regw(val, LINCFG0);
+
+ /* Scale factor */
+ val = ((!!linearize->scale_fact.integer) <<
+ ISIF_LIN_SCALE_FACT_INTEG_SHIFT) |
+ linearize->scale_fact.decimal;
+ regw(val, LINCFG1);
+
+ for (i = 0; i < ISIF_LINEAR_TAB_SIZE; i++) {
+ if (i % 2)
+ regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 1);
+ else
+ regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 0);
+ }
+}
+
+static int isif_config_dfc(struct isif_dfc *vdfc)
+{
+ /* initialize retries to loop for max ~ 250 usec */
+ u32 val, count, retries = loops_per_jiffy / (4000/HZ);
+ int i;
+
+ if (!vdfc->en)
+ return 0;
+
+ /* Correction mode */
+ val = (vdfc->corr_mode << ISIF_VDFC_CORR_MOD_SHIFT);
+
+ /* Correct whole line or partial */
+ if (vdfc->corr_whole_line)
+ val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
+
+ /* level shift value */
+ val |= vdfc->def_level_shift << ISIF_VDFC_LEVEL_SHFT_SHIFT;
+
+ regw(val, DFCCTL);
+
+ /* Defect saturation level */
+ regw(vdfc->def_sat_level, VDFSATLV);
+
+ regw(vdfc->table[0].pos_vert, DFCMEM0);
+ regw(vdfc->table[0].pos_horz, DFCMEM1);
+ if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
+ vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+ regw(vdfc->table[0].level_at_pos, DFCMEM2);
+ regw(vdfc->table[0].level_up_pixels, DFCMEM3);
+ regw(vdfc->table[0].level_low_pixels, DFCMEM4);
+ }
+
+ /* set DFCMARST and set DFCMWR */
+ val = regr(DFCMEMCTL) | (1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT) | 1;
+ regw(val, DFCMEMCTL);
+
+ count = retries;
+ while (count && (regr(DFCMEMCTL) & 0x1))
+ count--;
+
+ if (!count) {
+ dev_dbg(isif_cfg.dev, "defect table write timeout !!!\n");
+ return -1;
+ }
+
+ for (i = 1; i < vdfc->num_vdefects; i++) {
+ regw(vdfc->table[i].pos_vert, DFCMEM0);
+ regw(vdfc->table[i].pos_horz, DFCMEM1);
+ if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
+ vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+ regw(vdfc->table[i].level_at_pos, DFCMEM2);
+ regw(vdfc->table[i].level_up_pixels, DFCMEM3);
+ regw(vdfc->table[i].level_low_pixels, DFCMEM4);
+ }
+ val = regr(DFCMEMCTL);
+ /* clear DFCMARST and set DFCMWR */
+ val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
+ val |= 1;
+ regw(val, DFCMEMCTL);
+
+ count = retries;
+ while (count && (regr(DFCMEMCTL) & 0x1))
+ count--;
+
+ if (!count) {
+ dev_err(isif_cfg.dev,
+ "defect table write timeout !!!\n");
+ return -1;
+ }
+ }
+ if (vdfc->num_vdefects < ISIF_VDFC_TABLE_SIZE) {
+ /* Extra cycle needed */
+ regw(0, DFCMEM0);
+ regw(0x1FFF, DFCMEM1);
+ regw(1, DFCMEMCTL);
+ }
+
+ /* enable VDFC */
+ reg_modify((1 << ISIF_VDFC_EN_SHIFT), (1 << ISIF_VDFC_EN_SHIFT),
+ DFCCTL);
+ return 0;
+}
+
+static void isif_config_csc(struct isif_df_csc *df_csc)
+{
+ u32 val1 = 0, val2 = 0, i;
+
+ if (!df_csc->csc.en) {
+ regw(0, CSCCTL);
+ return;
+ }
+ for (i = 0; i < ISIF_CSC_NUM_COEFF; i++) {
+ if ((i % 2) == 0) {
+ /* CSCM - LSB */
+ val1 = (df_csc->csc.coeff[i].integer <<
+ ISIF_CSC_COEF_INTEG_SHIFT) |
+ df_csc->csc.coeff[i].decimal;
+ } else {
+
+ /* CSCM - MSB */
+ val2 = (df_csc->csc.coeff[i].integer <<
+ ISIF_CSC_COEF_INTEG_SHIFT) |
+ df_csc->csc.coeff[i].decimal;
+ val2 <<= ISIF_CSCM_MSB_SHIFT;
+ val2 |= val1;
+ regw(val2, (CSCM0 + ((i - 1) << 1)));
+ }
+ }
+
+ /* program the active area */
+ regw(df_csc->start_pix, FMTSPH);
+ /*
+ * one extra pixel as required for CSC. Actually number of
+ * pixel - 1 should be configured in this register. So we
+ * need to subtract 1 before writing to FMTSPH, but we will
+ * not do this since csc requires one extra pixel
+ */
+ regw(df_csc->num_pixels, FMTLNH);
+ regw(df_csc->start_line, FMTSLV);
+ /*
+ * one extra line as required for CSC. See reason documented for
+ * num_pixels
+ */
+ regw(df_csc->num_lines, FMTLNV);
+
+ /* Enable CSC */
+ regw(1, CSCCTL);
+}
+
+static int isif_config_raw(void)
+{
+ struct isif_params_raw *params = &isif_cfg.bayer;
+ struct isif_config_params_raw *module_params =
+ &isif_cfg.bayer.config_params;
+ struct vpss_pg_frame_size frame_size;
+ struct vpss_sync_pol sync;
+ u32 val;
+
+ dev_dbg(isif_cfg.dev, "\nStarting isif_config_raw..\n");
+
+ /*
+ * Configure CCDCFG register:-
+ * Set CCD Not to swap input since input is RAW data
+ * Set FID detection function to Latch at V-Sync
+ * Set WENLOG - isif valid area
+ * Set TRGSEL
+ * Set EXTRG
+ * Packed to 8 or 16 bits
+ */
+
+ val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
+ ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
+ ISIF_CCDCFG_EXTRG_DISABLE | isif_cfg.data_pack;
+
+ dev_dbg(isif_cfg.dev, "Writing 0x%x to ...CCDCFG \n", val);
+ regw(val, CCDCFG);
+
+ /*
+ * Configure the vertical sync polarity(MODESET.VDPOL)
+ * Configure the horizontal sync polarity (MODESET.HDPOL)
+ * Configure frame id polarity (MODESET.FLDPOL)
+ * Configure data polarity
+ * Configure External WEN Selection
+ * Configure frame format(progressive or interlace)
+ * Configure pixel format (Input mode)
+ * Configure the data shift
+ */
+
+ val = ISIF_VDHDOUT_INPUT | (params->vd_pol << ISIF_VD_POL_SHIFT) |
+ (params->hd_pol << ISIF_HD_POL_SHIFT) |
+ (params->fid_pol << ISIF_FID_POL_SHIFT) |
+ (ISIF_DATAPOL_NORMAL << ISIF_DATAPOL_SHIFT) |
+ (ISIF_EXWEN_DISABLE << ISIF_EXWEN_SHIFT) |
+ (params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
+ (params->pix_fmt << ISIF_INPUT_SHIFT) |
+ (params->config_params.data_shift << ISIF_DATASFT_SHIFT);
+
+ regw(val, MODESET);
+ dev_dbg(isif_cfg.dev, "Writing 0x%x to MODESET...\n", val);
+
+ /*
+ * Configure GAMMAWD register
+ * CFA pattern setting
+ */
+ val = params->cfa_pat << ISIF_GAMMAWD_CFA_SHIFT;
+
+ /* Gamma msb */
+ if (module_params->compress.alg == ISIF_ALAW)
+ val |= ISIF_ALAW_ENABLE;
+
+ val |= (params->data_msb << ISIF_ALAW_GAMMA_WD_SHIFT);
+ regw(val, CGAMMAWD);
+
+ /* Configure DPCM compression settings */
+ if (module_params->compress.alg == ISIF_DPCM) {
+ val = BIT(ISIF_DPCM_EN_SHIFT) |
+ (module_params->compress.pred <<
+ ISIF_DPCM_PREDICTOR_SHIFT);
+ }
+
+ regw(val, MISC);
+
+ /* Configure Gain & Offset */
+ isif_config_gain_offset();
+
+ /* Configure Color pattern */
+ val = (params->config_params.col_pat_field0.olop) |
+ (params->config_params.col_pat_field0.olep << 2) |
+ (params->config_params.col_pat_field0.elop << 4) |
+ (params->config_params.col_pat_field0.elep << 6) |
+ (params->config_params.col_pat_field1.olop << 8) |
+ (params->config_params.col_pat_field1.olep << 10) |
+ (params->config_params.col_pat_field1.elop << 12) |
+ (params->config_params.col_pat_field1.elep << 14);
+ regw(val, CCOLP);
+ dev_dbg(isif_cfg.dev, "Writing %x to CCOLP ...\n", val);
+
+ /* Configure HSIZE register */
+ val = (!!params->horz_flip_en) << ISIF_HSIZE_FLIP_SHIFT;
+
+ /* calculate line offset in 32 bytes based on pack value */
+ if (isif_cfg.data_pack == ISIF_PACK_8BIT)
+ val |= ((params->win.width + 31) >> 5);
+ else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
+ val |= (((params->win.width +
+ (params->win.width >> 2)) + 31) >> 5);
+ else
+ val |= (((params->win.width * 2) + 31) >> 5);
+ regw(val, HSIZE);
+
+ /* Configure SDOFST register */
+ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (params->image_invert_en) {
+ /* For interlace inverse mode */
+ regw(0x4B6D, SDOFST);
+ dev_dbg(isif_cfg.dev, "Writing 0x4B6D to SDOFST...\n");
+ } else {
+ /* For interlace non inverse mode */
+ regw(0x0B6D, SDOFST);
+ dev_dbg(isif_cfg.dev, "Writing 0x0B6D to SDOFST...\n");
+ }
+ } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ if (params->image_invert_en) {
+ /* For progressive inverse mode */
+ regw(0x4000, SDOFST);
+ dev_dbg(isif_cfg.dev, "Writing 0x4000 to SDOFST...\n");
+ } else {
+ /* For progressive non inverse mode */
+ regw(0x0000, SDOFST);
+ dev_dbg(isif_cfg.dev, "Writing 0x0000 to SDOFST...\n");
+ }
+ }
+
+ /* Configure video window */
+ isif_setwin(&params->win, params->frm_fmt, 1);
+
+ /* Configure Black Clamp */
+ isif_config_bclamp(&module_params->bclamp);
+
+ /* Configure Vertical Defection Pixel Correction */
+ if (isif_config_dfc(&module_params->dfc) < 0)
+ return -EFAULT;
+
+ if (!module_params->df_csc.df_or_csc)
+ /* Configure Color Space Conversion */
+ isif_config_csc(&module_params->df_csc);
+
+ isif_config_linearization(&module_params->linearize);
+
+ /* Configure Culling */
+ isif_config_culling(&module_params->culling);
+
+ /* Configure horizontal and vertical offsets(DFC,LSC,Gain) */
+ regw(module_params->horz_offset, DATAHOFST);
+ regw(module_params->vert_offset, DATAVOFST);
+
+ /* Setup test pattern if enabled */
+ if (params->config_params.test_pat_gen) {
+ /* Use the HD/VD pol settings from user */
+ sync.ccdpg_hdpol = params->hd_pol;
+ sync.ccdpg_vdpol = params->vd_pol;
+ dm365_vpss_set_sync_pol(sync);
+ frame_size.hlpfr = isif_cfg.bayer.win.width;
+ frame_size.pplen = isif_cfg.bayer.win.height;
+ dm365_vpss_set_pg_frame_size(frame_size);
+ vpss_select_ccdc_source(VPSS_PGLPBK);
+ }
+
+ dev_dbg(isif_cfg.dev, "\nEnd of isif_config_ycbcr...\n");
+ return 0;
+}
+
+static int isif_set_buftype(enum ccdc_buftype buf_type)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ isif_cfg.bayer.buf_type = buf_type;
+ else
+ isif_cfg.ycbcr.buf_type = buf_type;
+
+ return 0;
+
+}
+static enum ccdc_buftype isif_get_buftype(void)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ return isif_cfg.bayer.buf_type;
+
+ return isif_cfg.ycbcr.buf_type;
+}
+
+static int isif_enum_pix(u32 *pix, int i)
+{
+ int ret = -EINVAL;
+
+ if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+ if (i < ARRAY_SIZE(isif_raw_bayer_pix_formats)) {
+ *pix = isif_raw_bayer_pix_formats[i];
+ ret = 0;
+ }
+ } else {
+ if (i < ARRAY_SIZE(isif_raw_yuv_pix_formats)) {
+ *pix = isif_raw_yuv_pix_formats[i];
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int isif_set_pixel_format(unsigned int pixfmt)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+ if (pixfmt == V4L2_PIX_FMT_SBGGR8) {
+ if ((isif_cfg.bayer.config_params.compress.alg !=
+ ISIF_ALAW) &&
+ (isif_cfg.bayer.config_params.compress.alg !=
+ ISIF_DPCM)) {
+ dev_dbg(isif_cfg.dev,
+ "Either configure A-Law or DPCM\n");
+ return -EINVAL;
+ }
+ isif_cfg.data_pack = ISIF_PACK_8BIT;
+ } else if (pixfmt == V4L2_PIX_FMT_SBGGR16) {
+ isif_cfg.bayer.config_params.compress.alg =
+ ISIF_NO_COMPRESSION;
+ isif_cfg.data_pack = ISIF_PACK_16BIT;
+ } else
+ return -EINVAL;
+ isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ } else {
+ if (pixfmt == V4L2_PIX_FMT_YUYV)
+ isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+ else if (pixfmt == V4L2_PIX_FMT_UYVY)
+ isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ else
+ return -EINVAL;
+ isif_cfg.data_pack = ISIF_PACK_8BIT;
+ }
+ return 0;
+}
+
+static u32 isif_get_pixel_format(void)
+{
+ u32 pixfmt;
+
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ if (isif_cfg.bayer.config_params.compress.alg == ISIF_ALAW ||
+ isif_cfg.bayer.config_params.compress.alg == ISIF_DPCM)
+ pixfmt = V4L2_PIX_FMT_SBGGR8;
+ else
+ pixfmt = V4L2_PIX_FMT_SBGGR16;
+ else {
+ if (isif_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ else
+ pixfmt = V4L2_PIX_FMT_UYVY;
+ }
+ return pixfmt;
+}
+
+static int isif_set_image_window(struct v4l2_rect *win)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+ isif_cfg.bayer.win.top = win->top;
+ isif_cfg.bayer.win.left = win->left;
+ isif_cfg.bayer.win.width = win->width;
+ isif_cfg.bayer.win.height = win->height;
+ } else {
+ isif_cfg.ycbcr.win.top = win->top;
+ isif_cfg.ycbcr.win.left = win->left;
+ isif_cfg.ycbcr.win.width = win->width;
+ isif_cfg.ycbcr.win.height = win->height;
+ }
+ return 0;
+}
+
+static void isif_get_image_window(struct v4l2_rect *win)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ *win = isif_cfg.bayer.win;
+ else
+ *win = isif_cfg.ycbcr.win;
+}
+
+static unsigned int isif_get_line_length(void)
+{
+ unsigned int len;
+
+ if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+ if (isif_cfg.data_pack == ISIF_PACK_8BIT)
+ len = ((isif_cfg.bayer.win.width));
+ else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
+ len = (((isif_cfg.bayer.win.width * 2) +
+ (isif_cfg.bayer.win.width >> 2)));
+ else
+ len = (((isif_cfg.bayer.win.width * 2)));
+ } else
+ len = (((isif_cfg.ycbcr.win.width * 2)));
+ return ALIGN(len, 32);
+}
+
+static int isif_set_frame_format(enum ccdc_frmfmt frm_fmt)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ isif_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ isif_cfg.ycbcr.frm_fmt = frm_fmt;
+ return 0;
+}
+static enum ccdc_frmfmt isif_get_frame_format(void)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ return isif_cfg.bayer.frm_fmt;
+ return isif_cfg.ycbcr.frm_fmt;
+}
+
+static int isif_getfid(void)
+{
+ return (regr(MODESET) >> 15) & 0x1;
+}
+
+/* misc operations */
+static void isif_setfbaddr(unsigned long addr)
+{
+ regw((addr >> 21) & 0x07ff, CADU);
+ regw((addr >> 5) & 0x0ffff, CADL);
+}
+
+static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
+{
+ isif_cfg.if_type = params->if_type;
+
+ switch (params->if_type) {
+ case VPFE_BT656:
+ case VPFE_BT656_10BIT:
+ case VPFE_YCBCR_SYNC_8:
+ isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
+ isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ break;
+ case VPFE_BT1120:
+ case VPFE_YCBCR_SYNC_16:
+ isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_16BIT;
+ isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ break;
+ case VPFE_RAW_BAYER:
+ isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ break;
+ default:
+ dev_dbg(isif_cfg.dev, "Invalid interface type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This function will configure ISIF for YCbCr parameters. */
+static int isif_config_ycbcr(void)
+{
+ struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
+ u32 modeset = 0, ccdcfg = 0;
+
+ dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
+
+ /* configure pixel format or input mode */
+ modeset = modeset | (params->pix_fmt << ISIF_INPUT_SHIFT) |
+ (params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
+ (params->fid_pol << ISIF_FID_POL_SHIFT) |
+ (params->hd_pol << ISIF_HD_POL_SHIFT) |
+ (params->vd_pol << ISIF_VD_POL_SHIFT);
+
+ /* pack the data to 8-bit ISIFCFG */
+ switch (isif_cfg.if_type) {
+ case VPFE_BT656:
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ modeset |= (VPFE_PINPOL_NEGATIVE << ISIF_VD_POL_SHIFT);
+ regw(3, REC656IF);
+ ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR;
+ break;
+ case VPFE_BT656_10BIT:
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ /* setup BT.656, embedded sync */
+ regw(3, REC656IF);
+ /* enable 10 bit mode in ccdcfg */
+ ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR |
+ ISIF_BW656_ENABLE;
+ break;
+ case VPFE_BT1120:
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ regw(3, REC656IF);
+ break;
+
+ case VPFE_YCBCR_SYNC_8:
+ ccdcfg |= ISIF_DATA_PACK8;
+ ccdcfg |= ISIF_YCINSWP_YCBCR;
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ break;
+ case VPFE_YCBCR_SYNC_16:
+ if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
+ dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* should never come here */
+ dev_dbg(isif_cfg.dev, "Invalid interface type\n");
+ return -EINVAL;
+ }
+
+ regw(modeset, MODESET);
+
+ /* Set up pix order */
+ ccdcfg |= params->pix_order << ISIF_PIX_ORDER_SHIFT;
+
+ regw(ccdcfg, CCDCFG);
+
+ /* configure video window */
+ if ((isif_cfg.if_type == VPFE_BT1120) ||
+ (isif_cfg.if_type == VPFE_YCBCR_SYNC_16))
+ isif_setwin(&params->win, params->frm_fmt, 1);
+ else
+ isif_setwin(&params->win, params->frm_fmt, 2);
+
+ /*
+ * configure the horizontal line offset
+ * this is done by rounding up width to a multiple of 16 pixels
+ * and multiply by two to account for y:cb:cr 4:2:2 data
+ */
+ regw(((((params->win.width * 2) + 31) & 0xffffffe0) >> 5), HSIZE);
+
+ /* configure the memory line offset */
+ if ((params->frm_fmt == CCDC_FRMFMT_INTERLACED) &&
+ (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED))
+ /* two fields are interleaved in memory */
+ regw(0x00000249, SDOFST);
+
+ return 0;
+}
+
+static int isif_configure(void)
+{
+ if (isif_cfg.if_type == VPFE_RAW_BAYER)
+ return isif_config_raw();
+ return isif_config_ycbcr();
+}
+
+static int isif_close(struct device *device)
+{
+ /* copy defaults to module params */
+ isif_cfg.bayer.config_params = isif_config_defaults;
+ return 0;
+}
+
+static const struct ccdc_hw_device isif_hw_dev = {
+ .name = "ISIF",
+ .owner = THIS_MODULE,
+ .hw_ops = {
+ .open = isif_open,
+ .close = isif_close,
+ .enable = isif_enable,
+ .enable_out_to_sdram = isif_enable_output_to_sdram,
+ .set_hw_if_params = isif_set_hw_if_params,
+ .configure = isif_configure,
+ .set_buftype = isif_set_buftype,
+ .get_buftype = isif_get_buftype,
+ .enum_pix = isif_enum_pix,
+ .set_pixel_format = isif_set_pixel_format,
+ .get_pixel_format = isif_get_pixel_format,
+ .set_frame_format = isif_set_frame_format,
+ .get_frame_format = isif_get_frame_format,
+ .set_image_window = isif_set_image_window,
+ .get_image_window = isif_get_image_window,
+ .get_line_length = isif_get_line_length,
+ .setfbaddr = isif_setfbaddr,
+ .getfid = isif_getfid,
+ },
+};
+
+static int isif_probe(struct platform_device *pdev)
+{
+ void (*setup_pinmux)(void);
+ struct resource *res;
+ void __iomem *addr;
+ int status = 0, i;
+
+ /* Platform data holds setup_pinmux function ptr */
+ if (!pdev->dev.platform_data)
+ return -ENODEV;
+
+ /*
+ * first try to register with vpfe. If not correct platform, then we
+ * don't have to iomap
+ */
+ status = vpfe_register_ccdc_device(&isif_hw_dev);
+ if (status < 0)
+ return status;
+
+ setup_pinmux = pdev->dev.platform_data;
+ /*
+ * setup Mux configuration for ccdc which may be different for
+ * different SoCs using this CCDC
+ */
+ setup_pinmux();
+
+ i = 0;
+ /* Get the ISIF base address, linearization table0 and table1 addr. */
+ while (i < 3) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res) {
+ status = -ENODEV;
+ goto fail_nobase_res;
+ }
+ res = request_mem_region(res->start, resource_size(res),
+ res->name);
+ if (!res) {
+ status = -EBUSY;
+ goto fail_nobase_res;
+ }
+ addr = ioremap_nocache(res->start, resource_size(res));
+ if (!addr) {
+ status = -ENOMEM;
+ goto fail_base_iomap;
+ }
+ switch (i) {
+ case 0:
+ /* ISIF base address */
+ isif_cfg.base_addr = addr;
+ break;
+ case 1:
+ /* ISIF linear tbl0 address */
+ isif_cfg.linear_tbl0_addr = addr;
+ break;
+ default:
+ /* ISIF linear tbl0 address */
+ isif_cfg.linear_tbl1_addr = addr;
+ break;
+ }
+ i++;
+ }
+ isif_cfg.dev = &pdev->dev;
+
+ printk(KERN_NOTICE "%s is registered with vpfe.\n",
+ isif_hw_dev.name);
+ return 0;
+fail_base_iomap:
+ release_mem_region(res->start, resource_size(res));
+ i--;
+fail_nobase_res:
+ if (isif_cfg.base_addr)
+ iounmap(isif_cfg.base_addr);
+ if (isif_cfg.linear_tbl0_addr)
+ iounmap(isif_cfg.linear_tbl0_addr);
+
+ while (i >= 0) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ i--;
+ }
+ vpfe_unregister_ccdc_device(&isif_hw_dev);
+ return status;
+}
+
+static int isif_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ int i = 0;
+
+ iounmap(isif_cfg.base_addr);
+ iounmap(isif_cfg.linear_tbl0_addr);
+ iounmap(isif_cfg.linear_tbl1_addr);
+ while (i < 3) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ i++;
+ }
+ vpfe_unregister_ccdc_device(&isif_hw_dev);
+ return 0;
+}
+
+static struct platform_driver isif_driver = {
+ .driver = {
+ .name = "isif",
+ },
+ .remove = isif_remove,
+ .probe = isif_probe,
+};
+
+module_platform_driver(isif_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/davinci/isif_regs.h b/drivers/media/platform/davinci/isif_regs.h
new file mode 100644
index 000000000..97d3ba161
--- /dev/null
+++ b/drivers/media/platform/davinci/isif_regs.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ISIF_REGS_H
+#define _ISIF_REGS_H
+
+/* ISIF registers relative offsets */
+#define SYNCEN 0x00
+#define MODESET 0x04
+#define HDW 0x08
+#define VDW 0x0c
+#define PPLN 0x10
+#define LPFR 0x14
+#define SPH 0x18
+#define LNH 0x1c
+#define SLV0 0x20
+#define SLV1 0x24
+#define LNV 0x28
+#define CULH 0x2c
+#define CULV 0x30
+#define HSIZE 0x34
+#define SDOFST 0x38
+#define CADU 0x3c
+#define CADL 0x40
+#define LINCFG0 0x44
+#define LINCFG1 0x48
+#define CCOLP 0x4c
+#define CRGAIN 0x50
+#define CGRGAIN 0x54
+#define CGBGAIN 0x58
+#define CBGAIN 0x5c
+#define COFSTA 0x60
+#define FLSHCFG0 0x64
+#define FLSHCFG1 0x68
+#define FLSHCFG2 0x6c
+#define VDINT0 0x70
+#define VDINT1 0x74
+#define VDINT2 0x78
+#define MISC 0x7c
+#define CGAMMAWD 0x80
+#define REC656IF 0x84
+#define CCDCFG 0x88
+/*****************************************************
+* Defect Correction registers
+*****************************************************/
+#define DFCCTL 0x8c
+#define VDFSATLV 0x90
+#define DFCMEMCTL 0x94
+#define DFCMEM0 0x98
+#define DFCMEM1 0x9c
+#define DFCMEM2 0xa0
+#define DFCMEM3 0xa4
+#define DFCMEM4 0xa8
+/****************************************************
+* Black Clamp registers
+****************************************************/
+#define CLAMPCFG 0xac
+#define CLDCOFST 0xb0
+#define CLSV 0xb4
+#define CLHWIN0 0xb8
+#define CLHWIN1 0xbc
+#define CLHWIN2 0xc0
+#define CLVRV 0xc4
+#define CLVWIN0 0xc8
+#define CLVWIN1 0xcc
+#define CLVWIN2 0xd0
+#define CLVWIN3 0xd4
+/****************************************************
+* Lense Shading Correction
+****************************************************/
+#define DATAHOFST 0xd8
+#define DATAVOFST 0xdc
+#define LSCHVAL 0xe0
+#define LSCVVAL 0xe4
+#define TWODLSCCFG 0xe8
+#define TWODLSCOFST 0xec
+#define TWODLSCINI 0xf0
+#define TWODLSCGRBU 0xf4
+#define TWODLSCGRBL 0xf8
+#define TWODLSCGROF 0xfc
+#define TWODLSCORBU 0x100
+#define TWODLSCORBL 0x104
+#define TWODLSCOROF 0x108
+#define TWODLSCIRQEN 0x10c
+#define TWODLSCIRQST 0x110
+/****************************************************
+* Data formatter
+****************************************************/
+#define FMTCFG 0x114
+#define FMTPLEN 0x118
+#define FMTSPH 0x11c
+#define FMTLNH 0x120
+#define FMTSLV 0x124
+#define FMTLNV 0x128
+#define FMTRLEN 0x12c
+#define FMTHCNT 0x130
+#define FMTAPTR_BASE 0x134
+/* Below macro for addresses FMTAPTR0 - FMTAPTR15 */
+#define FMTAPTR(i) (FMTAPTR_BASE + (i * 4))
+#define FMTPGMVF0 0x174
+#define FMTPGMVF1 0x178
+#define FMTPGMAPU0 0x17c
+#define FMTPGMAPU1 0x180
+#define FMTPGMAPS0 0x184
+#define FMTPGMAPS1 0x188
+#define FMTPGMAPS2 0x18c
+#define FMTPGMAPS3 0x190
+#define FMTPGMAPS4 0x194
+#define FMTPGMAPS5 0x198
+#define FMTPGMAPS6 0x19c
+#define FMTPGMAPS7 0x1a0
+/************************************************
+* Color Space Converter
+************************************************/
+#define CSCCTL 0x1a4
+#define CSCM0 0x1a8
+#define CSCM1 0x1ac
+#define CSCM2 0x1b0
+#define CSCM3 0x1b4
+#define CSCM4 0x1b8
+#define CSCM5 0x1bc
+#define CSCM6 0x1c0
+#define CSCM7 0x1c4
+#define OBWIN0 0x1c8
+#define OBWIN1 0x1cc
+#define OBWIN2 0x1d0
+#define OBWIN3 0x1d4
+#define OBVAL0 0x1d8
+#define OBVAL1 0x1dc
+#define OBVAL2 0x1e0
+#define OBVAL3 0x1e4
+#define OBVAL4 0x1e8
+#define OBVAL5 0x1ec
+#define OBVAL6 0x1f0
+#define OBVAL7 0x1f4
+#define CLKCTL 0x1f8
+
+/* Masks & Shifts below */
+#define START_PX_HOR_MASK 0x7FFF
+#define NUM_PX_HOR_MASK 0x7FFF
+#define START_VER_ONE_MASK 0x7FFF
+#define START_VER_TWO_MASK 0x7FFF
+#define NUM_LINES_VER 0x7FFF
+
+/* gain - offset masks */
+#define GAIN_INTEGER_SHIFT 9
+#define OFFSET_MASK 0xFFF
+#define GAIN_SDRAM_EN_SHIFT 12
+#define GAIN_IPIPE_EN_SHIFT 13
+#define GAIN_H3A_EN_SHIFT 14
+#define OFST_SDRAM_EN_SHIFT 8
+#define OFST_IPIPE_EN_SHIFT 9
+#define OFST_H3A_EN_SHIFT 10
+#define GAIN_OFFSET_EN_MASK 0x7700
+
+/* Culling */
+#define CULL_PAT_EVEN_LINE_SHIFT 8
+
+/* CCDCFG register */
+#define ISIF_YCINSWP_RAW (0x00 << 4)
+#define ISIF_YCINSWP_YCBCR (0x01 << 4)
+#define ISIF_CCDCFG_FIDMD_LATCH_VSYNC (0x00 << 6)
+#define ISIF_CCDCFG_WENLOG_AND (0x00 << 8)
+#define ISIF_CCDCFG_TRGSEL_WEN (0x00 << 9)
+#define ISIF_CCDCFG_EXTRG_DISABLE (0x00 << 10)
+#define ISIF_LATCH_ON_VSYNC_DISABLE (0x01 << 15)
+#define ISIF_LATCH_ON_VSYNC_ENABLE (0x00 << 15)
+#define ISIF_DATA_PACK_MASK 3
+#define ISIF_DATA_PACK16 0
+#define ISIF_DATA_PACK12 1
+#define ISIF_DATA_PACK8 2
+#define ISIF_PIX_ORDER_SHIFT 11
+#define ISIF_BW656_ENABLE (0x01 << 5)
+
+/* MODESET registers */
+#define ISIF_VDHDOUT_INPUT (0x00 << 0)
+#define ISIF_INPUT_SHIFT 12
+#define ISIF_RAW_INPUT_MODE 0
+#define ISIF_FID_POL_SHIFT 4
+#define ISIF_HD_POL_SHIFT 3
+#define ISIF_VD_POL_SHIFT 2
+#define ISIF_DATAPOL_NORMAL 0
+#define ISIF_DATAPOL_SHIFT 6
+#define ISIF_EXWEN_DISABLE 0
+#define ISIF_EXWEN_SHIFT 5
+#define ISIF_FRM_FMT_SHIFT 7
+#define ISIF_DATASFT_SHIFT 8
+#define ISIF_LPF_SHIFT 14
+#define ISIF_LPF_MASK 1
+
+/* GAMMAWD registers */
+#define ISIF_ALAW_GAMMA_WD_MASK 0xF
+#define ISIF_ALAW_GAMMA_WD_SHIFT 1
+#define ISIF_ALAW_ENABLE 1
+#define ISIF_GAMMAWD_CFA_SHIFT 5
+
+/* HSIZE registers */
+#define ISIF_HSIZE_FLIP_MASK 1
+#define ISIF_HSIZE_FLIP_SHIFT 12
+
+/* MISC registers */
+#define ISIF_DPCM_EN_SHIFT 12
+#define ISIF_DPCM_PREDICTOR_SHIFT 13
+
+/* Black clamp related */
+#define ISIF_BC_MODE_COLOR_SHIFT 4
+#define ISIF_HORZ_BC_MODE_SHIFT 1
+#define ISIF_HORZ_BC_WIN_SEL_SHIFT 5
+#define ISIF_HORZ_BC_PIX_LIMIT_SHIFT 6
+#define ISIF_HORZ_BC_WIN_H_SIZE_SHIFT 8
+#define ISIF_HORZ_BC_WIN_V_SIZE_SHIFT 12
+#define ISIF_VERT_BC_RST_VAL_SEL_SHIFT 4
+#define ISIF_VERT_BC_LINE_AVE_COEF_SHIFT 8
+
+/* VDFC registers */
+#define ISIF_VDFC_EN_SHIFT 4
+#define ISIF_VDFC_CORR_MOD_SHIFT 5
+#define ISIF_VDFC_CORR_WHOLE_LN_SHIFT 7
+#define ISIF_VDFC_LEVEL_SHFT_SHIFT 8
+#define ISIF_VDFC_POS_MASK 0x1FFF
+#define ISIF_DFCMEMCTL_DFCMARST_SHIFT 2
+
+/* CSC registers */
+#define ISIF_CSC_COEF_INTEG_MASK 7
+#define ISIF_CSC_COEF_DECIMAL_MASK 0x1f
+#define ISIF_CSC_COEF_INTEG_SHIFT 5
+#define ISIF_CSCM_MSB_SHIFT 8
+#define ISIF_DF_CSC_SPH_MASK 0x1FFF
+#define ISIF_DF_CSC_LNH_MASK 0x1FFF
+#define ISIF_DF_CSC_SLV_MASK 0x1FFF
+#define ISIF_DF_CSC_LNV_MASK 0x1FFF
+#define ISIF_DF_NUMLINES 0x7FFF
+#define ISIF_DF_NUMPIX 0x1FFF
+
+/* Offsets for LSC/DFC/Gain */
+#define ISIF_DATA_H_OFFSET_MASK 0x1FFF
+#define ISIF_DATA_V_OFFSET_MASK 0x1FFF
+
+/* Linearization */
+#define ISIF_LIN_CORRSFT_SHIFT 4
+#define ISIF_LIN_SCALE_FACT_INTEG_SHIFT 10
+
+
+/* Pattern registers */
+#define ISIF_PG_EN (1 << 3)
+#define ISIF_SEL_PG_SRC (3 << 4)
+#define ISIF_PG_VD_POL_SHIFT 0
+#define ISIF_PG_HD_POL_SHIFT 1
+
+/*random other junk*/
+#define ISIF_SYNCEN_VDHDEN_MASK (1 << 0)
+#define ISIF_SYNCEN_WEN_MASK (1 << 1)
+#define ISIF_SYNCEN_WEN_SHIFT 1
+
+#endif
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
new file mode 100644
index 000000000..e45e062f4
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -0,0 +1,871 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpss.h>
+#include <media/davinci/vpbe_venc.h>
+
+#define VPBE_DEFAULT_OUTPUT "Composite"
+#define VPBE_DEFAULT_MODE "ntsc"
+
+static char *def_output = VPBE_DEFAULT_OUTPUT;
+static char *def_mode = VPBE_DEFAULT_MODE;
+static int debug;
+
+module_param(def_output, charp, S_IRUGO);
+module_param(def_mode, charp, S_IRUGO);
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(def_output, "vpbe output name (default:Composite)");
+MODULE_PARM_DESC(def_mode, "vpbe output mode name (default:ntsc");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("TI DMXXX VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/**
+ * vpbe_current_encoder_info - Get config info for current encoder
+ * @vpbe_dev: vpbe device ptr
+ *
+ * Return ptr to current encoder config info
+ */
+static struct encoder_config_info*
+vpbe_current_encoder_info(struct vpbe_device *vpbe_dev)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int index = vpbe_dev->current_sd_index;
+
+ return ((index == 0) ? &cfg->venc :
+ &cfg->ext_encoders[index-1]);
+}
+
+/**
+ * vpbe_find_encoder_sd_index - Given a name find encoder sd index
+ *
+ * @cfg: ptr to vpbe cfg
+ * @index: index used by application
+ *
+ * Return sd index of the encoder
+ */
+static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
+ int index)
+{
+ char *encoder_name = cfg->outputs[index].subdev_name;
+ int i;
+
+ /* Venc is always first */
+ if (!strcmp(encoder_name, cfg->venc.module_name))
+ return 0;
+
+ for (i = 0; i < cfg->num_ext_encoders; i++) {
+ if (!strcmp(encoder_name,
+ cfg->ext_encoders[i].module_name))
+ return i+1;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_g_cropcap - Get crop capabilities of the display
+ * @vpbe_dev: vpbe device ptr
+ * @cropcap: cropcap is a ptr to struct v4l2_cropcap
+ *
+ * Update the crop capabilities in crop cap for current
+ * mode
+ */
+static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
+ struct v4l2_cropcap *cropcap)
+{
+ if (!cropcap)
+ return -EINVAL;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.top = 0;
+ cropcap->bounds.width = vpbe_dev->current_timings.xres;
+ cropcap->bounds.height = vpbe_dev->current_timings.yres;
+ cropcap->defrect = cropcap->bounds;
+
+ return 0;
+}
+
+/**
+ * vpbe_enum_outputs - enumerate outputs
+ * @vpbe_dev: vpbe device ptr
+ * @output: ptr to v4l2_output structure
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
+ struct v4l2_output *output)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ unsigned int temp_index = output->index;
+
+ if (temp_index >= cfg->num_outputs)
+ return -EINVAL;
+
+ *output = cfg->outputs[temp_index].output;
+ output->index = temp_index;
+
+ return 0;
+}
+
+static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
+ int output_index)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = output_index;
+ int i;
+
+ if (!mode)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if (!strcmp(mode, var.name)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
+ struct vpbe_enc_mode_info *mode_info)
+{
+ if (!mode_info)
+ return -EINVAL;
+
+ *mode_info = vpbe_dev->current_timings;
+
+ return 0;
+}
+
+/* Get std by std id */
+static int vpbe_get_std_info(struct vpbe_device *vpbe_dev,
+ v4l2_std_id std_id)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if ((var.timings_type & VPBE_ENC_STD) &&
+ (var.std_id & std_id)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vpbe_get_std_info_by_name(struct vpbe_device *vpbe_dev,
+ char *std_name)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if (!strcmp(var.name, std_name)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_set_output - Set output
+ * @vpbe_dev: vpbe device ptr
+ * @index: index of output
+ *
+ * Set vpbe output to the output specified by the index
+ */
+static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
+{
+ struct encoder_config_info *curr_enc_info =
+ vpbe_current_encoder_info(vpbe_dev);
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct venc_platform_data *venc_device = vpbe_dev->venc_device;
+ int enc_out_index;
+ int sd_index;
+ int ret;
+
+ if (index >= cfg->num_outputs)
+ return -EINVAL;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ sd_index = vpbe_dev->current_sd_index;
+ enc_out_index = cfg->outputs[index].output.index;
+ /*
+ * Currently we switch the encoder based on output selected
+ * by the application. If media controller is implemented later
+ * there is will be an API added to setup_link between venc
+ * and external encoder. So in that case below comparison always
+ * match and encoder will not be switched. But if application
+ * chose not to use media controller, then this provides current
+ * way of switching encoder at the venc output.
+ */
+ if (strcmp(curr_enc_info->module_name,
+ cfg->outputs[index].subdev_name)) {
+ /* Need to switch the encoder at the output */
+ sd_index = vpbe_find_encoder_sd_index(cfg, index);
+ if (sd_index < 0) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = venc_device->setup_if_config(cfg->outputs[index].if_params);
+ if (ret)
+ goto unlock;
+ }
+
+ /* Set output at the encoder */
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_routing, 0, enc_out_index, 0);
+ if (ret)
+ goto unlock;
+
+ /*
+ * It is assumed that venc or extenal encoder will set a default
+ * mode in the sub device. For external encoder or LCD pannel output,
+ * we also need to set up the lcd port for the required mode. So setup
+ * the lcd port for the default mode that is configured in the board
+ * arch/arm/mach-davinci/board-dm355-evm.setup file for the external
+ * encoder.
+ */
+ ret = vpbe_get_mode_info(vpbe_dev,
+ cfg->outputs[index].default_mode, index);
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ vpbe_dev->current_sd_index = sd_index;
+ vpbe_dev->current_out_index = index;
+ }
+unlock:
+ mutex_unlock(&vpbe_dev->lock);
+ return ret;
+}
+
+static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int i;
+
+ for (i = 0; i < cfg->num_outputs; i++) {
+ if (!strcmp(def_output,
+ cfg->outputs[i].output.name)) {
+ int ret = vpbe_set_output(vpbe_dev, i);
+
+ if (!ret)
+ vpbe_dev->current_out_index = i;
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * vpbe_get_output - Get output
+ * @vpbe_dev: vpbe device ptr
+ *
+ * return current vpbe output to the the index
+ */
+static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
+{
+ return vpbe_dev->current_out_index;
+}
+
+/*
+ * vpbe_s_dv_timings - Set the given preset timings in the encoder
+ *
+ * Sets the timings if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ struct vpbe_output *output = &cfg->outputs[out_index];
+ int sd_index = vpbe_dev->current_sd_index;
+ int ret, i;
+
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_DV_TIMINGS))
+ return -ENODATA;
+
+ for (i = 0; i < output->num_modes; i++) {
+ if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS &&
+ !memcmp(&output->modes[i].dv_timings,
+ dv_timings, sizeof(*dv_timings)))
+ break;
+ }
+ if (i >= output->num_modes)
+ return -EINVAL;
+ vpbe_dev->current_timings = output->modes[i];
+ mutex_lock(&vpbe_dev->lock);
+
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_dv_timings, dv_timings);
+ if (!ret && vpbe_dev->amp) {
+ /* Call amplifier subdevice */
+ ret = v4l2_subdev_call(vpbe_dev->amp, video,
+ s_dv_timings, dv_timings);
+ }
+ /* set the lcd controller output for the given mode */
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ }
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+/*
+ * vpbe_g_dv_timings - Get the timings in the current encoder
+ *
+ * Get the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_dv_timings(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_DV_TIMINGS))
+ return -ENODATA;
+
+ if (vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_DV_TIMINGS) {
+ *dv_timings = vpbe_dev->current_timings.dv_timings;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * vpbe_enum_dv_timings - Enumerate the dv timings in the current encoder
+ *
+ * Get the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_enum_dv_timings(struct vpbe_device *vpbe_dev,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ struct vpbe_output *output = &cfg->outputs[out_index];
+ int j = 0;
+ int i;
+
+ if (!(output->output.capabilities & V4L2_OUT_CAP_DV_TIMINGS))
+ return -ENODATA;
+
+ for (i = 0; i < output->num_modes; i++) {
+ if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS) {
+ if (j == timings->index)
+ break;
+ j++;
+ }
+ }
+
+ if (i == output->num_modes)
+ return -EINVAL;
+ timings->timings = output->modes[i].dv_timings;
+ return 0;
+}
+
+/*
+ * vpbe_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id std_id)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ int sd_index = vpbe_dev->current_sd_index;
+ int ret;
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_STD))
+ return -ENODATA;
+
+ ret = vpbe_get_std_info(vpbe_dev, std_id);
+ if (ret)
+ return ret;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_std_output, std_id);
+ /* set the lcd controller output for the given mode */
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ }
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+/*
+ * vpbe_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
+{
+ struct vpbe_enc_mode_info *cur_timings = &vpbe_dev->current_timings;
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+
+ if (!(cfg->outputs[out_index].output.capabilities & V4L2_OUT_CAP_STD))
+ return -ENODATA;
+
+ if (cur_timings->timings_type & VPBE_ENC_STD) {
+ *std_id = cur_timings->std_id;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * vpbe_set_mode - Set mode in the current encoder using mode info
+ *
+ * Use the mode string to decide what timings to set in the encoder
+ * This is typically useful when fbset command is used to change the current
+ * timings by specifying a string to indicate the timings.
+ */
+static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
+ struct vpbe_enc_mode_info *mode_info)
+{
+ struct vpbe_enc_mode_info *preset_mode = NULL;
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct v4l2_dv_timings dv_timings;
+ struct osd_state *osd_device;
+ int out_index = vpbe_dev->current_out_index;
+ int i;
+
+ if (!mode_info || !mode_info->name)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
+ if (!strcmp(mode_info->name,
+ cfg->outputs[out_index].modes[i].name)) {
+ preset_mode = &cfg->outputs[out_index].modes[i];
+ /*
+ * it may be one of the 3 timings type. Check and
+ * invoke right API
+ */
+ if (preset_mode->timings_type & VPBE_ENC_STD)
+ return vpbe_s_std(vpbe_dev,
+ preset_mode->std_id);
+ if (preset_mode->timings_type &
+ VPBE_ENC_DV_TIMINGS) {
+ dv_timings =
+ preset_mode->dv_timings;
+ return vpbe_s_dv_timings(vpbe_dev, &dv_timings);
+ }
+ }
+ }
+
+ /* Only custom timing should reach here */
+ if (!preset_mode)
+ return -EINVAL;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ osd_device = vpbe_dev->osd_device;
+ vpbe_dev->current_timings = *preset_mode;
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+
+ mutex_unlock(&vpbe_dev->lock);
+ return 0;
+}
+
+static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
+{
+ int ret;
+
+ ret = vpbe_get_std_info_by_name(vpbe_dev, def_mode);
+ if (ret)
+ return ret;
+
+ /* set the default mode in the encoder */
+ return vpbe_set_mode(vpbe_dev, &vpbe_dev->current_timings);
+}
+
+static int platform_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpbe_device *vpbe_dev = data;
+
+ if (strstr(pdev->name, "vpbe-osd"))
+ vpbe_dev->osd_device = platform_get_drvdata(pdev);
+ if (strstr(pdev->name, "vpbe-venc"))
+ vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
+
+ return 0;
+}
+
+/**
+ * vpbe_initialize() - Initialize the vpbe display controller
+ * @dev: Master and slave device ptr
+ * @vpbe_dev: vpbe device ptr
+ *
+ * Master frame buffer device drivers calls this to initialize vpbe
+ * display controller. This will then registers v4l2 device and the sub
+ * devices and sets a current encoder sub device for display. v4l2 display
+ * device driver is the master and frame buffer display device driver is
+ * the slave. Frame buffer display driver checks the initialized during
+ * probe and exit if not initialized. Returns status.
+ */
+static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+ struct encoder_config_info *enc_info;
+ struct amp_config_info *amp_info;
+ struct v4l2_subdev **enc_subdev;
+ struct osd_state *osd_device;
+ struct i2c_adapter *i2c_adap;
+ int num_encoders;
+ int ret = 0;
+ int err;
+ int i;
+
+ /*
+ * v4l2 abd FBDev frame buffer devices will get the vpbe_dev pointer
+ * from the platform device by iteration of platform drivers and
+ * matching with device name
+ */
+ if (!vpbe_dev || !dev) {
+ printk(KERN_ERR "Null device pointers.\n");
+ return -ENODEV;
+ }
+
+ if (vpbe_dev->initialized)
+ return 0;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ /* We have dac clock available for platform */
+ vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac");
+ if (IS_ERR(vpbe_dev->dac_clk)) {
+ ret = PTR_ERR(vpbe_dev->dac_clk);
+ goto fail_mutex_unlock;
+ }
+ if (clk_prepare_enable(vpbe_dev->dac_clk)) {
+ ret = -ENODEV;
+ clk_put(vpbe_dev->dac_clk);
+ goto fail_mutex_unlock;
+ }
+ }
+
+ /* first enable vpss clocks */
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
+
+ /* First register a v4l2 device */
+ ret = v4l2_device_register(dev, &vpbe_dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(dev->driver,
+ "Unable to register v4l2 device.\n");
+ goto fail_clk_put;
+ }
+ v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n");
+
+ err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev,
+ platform_device_get);
+ if (err < 0) {
+ ret = err;
+ goto fail_dev_unregister;
+ }
+
+ vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
+ vpbe_dev->cfg->venc.module_name);
+ /* register venc sub device */
+ if (!vpbe_dev->venc) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "vpbe unable to init venc sub device\n");
+ ret = -ENODEV;
+ goto fail_dev_unregister;
+ }
+ /* initialize osd device */
+ osd_device = vpbe_dev->osd_device;
+ if (osd_device->ops.initialize) {
+ err = osd_device->ops.initialize(osd_device);
+ if (err) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to initialize the OSD device");
+ err = -ENOMEM;
+ goto fail_dev_unregister;
+ }
+ }
+
+ /*
+ * Register any external encoders that are configured. At index 0 we
+ * store venc sd index.
+ */
+ num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
+ vpbe_dev->encoders = kmalloc_array(num_encoders,
+ sizeof(*vpbe_dev->encoders),
+ GFP_KERNEL);
+ if (!vpbe_dev->encoders) {
+ ret = -ENOMEM;
+ goto fail_dev_unregister;
+ }
+
+ i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id);
+ for (i = 0; i < (vpbe_dev->cfg->num_ext_encoders + 1); i++) {
+ if (i == 0) {
+ /* venc is at index 0 */
+ enc_subdev = &vpbe_dev->encoders[i];
+ *enc_subdev = vpbe_dev->venc;
+ continue;
+ }
+ enc_info = &vpbe_dev->cfg->ext_encoders[i];
+ if (enc_info->is_i2c) {
+ enc_subdev = &vpbe_dev->encoders[i];
+ *enc_subdev = v4l2_i2c_new_subdev_board(
+ &vpbe_dev->v4l2_dev, i2c_adap,
+ &enc_info->board_info, NULL);
+ if (*enc_subdev)
+ v4l2_info(&vpbe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ enc_info->module_name);
+ else {
+ v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s failed to register",
+ enc_info->module_name);
+ ret = -ENODEV;
+ goto fail_kfree_encoders;
+ }
+ } else
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders currently not supported");
+ }
+ /* Add amplifier subdevice for dm365 */
+ if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) &&
+ vpbe_dev->cfg->amp) {
+ amp_info = vpbe_dev->cfg->amp;
+ if (amp_info->is_i2c) {
+ vpbe_dev->amp = v4l2_i2c_new_subdev_board(
+ &vpbe_dev->v4l2_dev, i2c_adap,
+ &amp_info->board_info, NULL);
+ if (!vpbe_dev->amp) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "amplifier %s failed to register",
+ amp_info->module_name);
+ ret = -ENODEV;
+ goto fail_kfree_encoders;
+ }
+ v4l2_info(&vpbe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ amp_info->module_name);
+ } else {
+ vpbe_dev->amp = NULL;
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers currently not supported");
+ }
+ } else {
+ vpbe_dev->amp = NULL;
+ }
+
+ /* set the current encoder and output to that of venc by default */
+ vpbe_dev->current_sd_index = 0;
+ vpbe_dev->current_out_index = 0;
+
+ mutex_unlock(&vpbe_dev->lock);
+
+ printk(KERN_NOTICE "Setting default output to %s\n", def_output);
+ ret = vpbe_set_default_output(vpbe_dev);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
+ def_output);
+ goto fail_kfree_amp;
+ }
+
+ printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
+ ret = vpbe_set_default_mode(vpbe_dev);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
+ def_mode);
+ goto fail_kfree_amp;
+ }
+ vpbe_dev->initialized = 1;
+ /* TBD handling of bootargs for default output and mode */
+ return 0;
+
+fail_kfree_amp:
+ mutex_lock(&vpbe_dev->lock);
+ kfree(vpbe_dev->amp);
+fail_kfree_encoders:
+ kfree(vpbe_dev->encoders);
+fail_dev_unregister:
+ v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+fail_clk_put:
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
+ clk_put(vpbe_dev->dac_clk);
+ }
+fail_mutex_unlock:
+ mutex_unlock(&vpbe_dev->lock);
+ return ret;
+}
+
+/**
+ * vpbe_deinitialize() - de-initialize the vpbe display controller
+ * @dev: Master and slave device ptr
+ * @vpbe_dev: vpbe device ptr
+ *
+ * vpbe_master and slave frame buffer devices calls this to de-initialize
+ * the display controller. It is called when master and slave device
+ * driver modules are removed and no longer requires the display controller.
+ */
+static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+ v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
+ clk_put(vpbe_dev->dac_clk);
+ }
+
+ kfree(vpbe_dev->amp);
+ kfree(vpbe_dev->encoders);
+ vpbe_dev->initialized = 0;
+ /* disable vpss clocks */
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 0);
+}
+
+static const struct vpbe_device_ops vpbe_dev_ops = {
+ .g_cropcap = vpbe_g_cropcap,
+ .enum_outputs = vpbe_enum_outputs,
+ .set_output = vpbe_set_output,
+ .get_output = vpbe_get_output,
+ .s_dv_timings = vpbe_s_dv_timings,
+ .g_dv_timings = vpbe_g_dv_timings,
+ .enum_dv_timings = vpbe_enum_dv_timings,
+ .s_std = vpbe_s_std,
+ .g_std = vpbe_g_std,
+ .initialize = vpbe_initialize,
+ .deinitialize = vpbe_deinitialize,
+ .get_mode_info = vpbe_get_current_mode_info,
+ .set_mode = vpbe_set_mode,
+};
+
+static int vpbe_probe(struct platform_device *pdev)
+{
+ struct vpbe_device *vpbe_dev;
+ struct vpbe_config *cfg;
+
+ if (!pdev->dev.platform_data) {
+ v4l2_err(pdev->dev.driver, "No platform data\n");
+ return -ENODEV;
+ }
+ cfg = pdev->dev.platform_data;
+
+ if (!cfg->module_name[0] ||
+ !cfg->osd.module_name[0] ||
+ !cfg->venc.module_name[0]) {
+ v4l2_err(pdev->dev.driver, "vpbe display module names not defined\n");
+ return -EINVAL;
+ }
+
+ vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
+ if (!vpbe_dev)
+ return -ENOMEM;
+
+ vpbe_dev->cfg = cfg;
+ vpbe_dev->ops = vpbe_dev_ops;
+ vpbe_dev->pdev = &pdev->dev;
+
+ if (cfg->outputs->num_modes > 0)
+ vpbe_dev->current_timings = vpbe_dev->cfg->outputs[0].modes[0];
+ else {
+ kfree(vpbe_dev);
+ return -ENODEV;
+ }
+
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpbe_dev);
+ mutex_init(&vpbe_dev->lock);
+
+ return 0;
+}
+
+static int vpbe_remove(struct platform_device *device)
+{
+ struct vpbe_device *vpbe_dev = platform_get_drvdata(device);
+
+ kfree(vpbe_dev);
+
+ return 0;
+}
+
+static struct platform_driver vpbe_driver = {
+ .driver = {
+ .name = "vpbe_controller",
+ },
+ .probe = vpbe_probe,
+ .remove = vpbe_remove,
+};
+
+module_platform_driver(vpbe_driver);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
new file mode 100644
index 000000000..2a073a6d8
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -0,0 +1,1534 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include <mach/cputype.h>
+#endif
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_display.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpbe_osd.h>
+#include "vpbe_venc_regs.h"
+
+#define VPBE_DISPLAY_DRIVER "vpbe-v4l2"
+
+static int debug;
+
+#define VPBE_DEFAULT_NUM_BUFS 3
+
+module_param(debug, int, 0644);
+
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer);
+
+static int venc_is_second_field(struct vpbe_display *disp_dev)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int ret, val;
+
+ ret = v4l2_subdev_call(vpbe_dev->venc,
+ core,
+ command,
+ VENC_GET_FLD,
+ &val);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in getting Field ID 0\n");
+ return 1;
+ }
+ return val;
+}
+
+static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
+ struct vpbe_layer *layer)
+{
+ if (layer->cur_frm == layer->next_frm)
+ return;
+
+ layer->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&layer->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ /* Make cur_frm pointing to next_frm */
+ layer->cur_frm = layer->next_frm;
+}
+
+static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
+ struct vpbe_layer *layer)
+{
+ struct osd_state *osd_device = disp_obj->osd_device;
+ unsigned long addr;
+
+ spin_lock(&disp_obj->dma_queue_lock);
+ if (list_empty(&layer->dma_queue) ||
+ (layer->cur_frm != layer->next_frm)) {
+ spin_unlock(&disp_obj->dma_queue_lock);
+ return;
+ }
+ /*
+ * one field is displayed configure
+ * the next frame if it is available
+ * otherwise hold on current frame
+ * Get next from the buffer queue
+ */
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ /* Remove that from the buffer queue */
+ list_del(&layer->next_frm->list);
+ spin_unlock(&disp_obj->dma_queue_lock);
+ /* Mark state of the frame to active */
+ layer->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb.vb2_buf, 0);
+ osd_device->ops.start_layer(osd_device,
+ layer->layer_info.id,
+ addr,
+ disp_obj->cbcr_ofst);
+}
+
+/* interrupt service routine */
+static irqreturn_t venc_isr(int irq, void *arg)
+{
+ struct vpbe_display *disp_dev = (struct vpbe_display *)arg;
+ struct vpbe_layer *layer;
+ static unsigned last_event;
+ unsigned event = 0;
+ int fid;
+ int i;
+
+ if (!arg || !disp_dev->dev[0])
+ return IRQ_HANDLED;
+
+ if (venc_is_second_field(disp_dev))
+ event |= VENC_SECOND_FIELD;
+ else
+ event |= VENC_FIRST_FIELD;
+
+ if (event == (last_event & ~VENC_END_OF_FRAME)) {
+ /*
+ * If the display is non-interlaced, then we need to flag the
+ * end-of-frame event at every interrupt regardless of the
+ * value of the FIDST bit. We can conclude that the display is
+ * non-interlaced if the value of the FIDST bit is unchanged
+ * from the previous interrupt.
+ */
+ event |= VENC_END_OF_FRAME;
+ } else if (event == VENC_SECOND_FIELD) {
+ /* end-of-frame for interlaced display */
+ event |= VENC_END_OF_FRAME;
+ }
+ last_event = event;
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ layer = disp_dev->dev[i];
+
+ if (!vb2_start_streaming_called(&layer->buffer_queue))
+ continue;
+
+ if (layer->layer_first_int) {
+ layer->layer_first_int = 0;
+ continue;
+ }
+ /* Check the field format */
+ if ((V4L2_FIELD_NONE == layer->pix_fmt.field) &&
+ (event & VENC_END_OF_FRAME)) {
+ /* Progressive mode */
+
+ vpbe_isr_even_field(disp_dev, layer);
+ vpbe_isr_odd_field(disp_dev, layer);
+ } else {
+ /* Interlaced mode */
+
+ layer->field_id ^= 1;
+ if (event & VENC_FIRST_FIELD)
+ fid = 0;
+ else
+ fid = 1;
+
+ /*
+ * If field id does not match with store
+ * field id
+ */
+ if (fid != layer->field_id) {
+ /* Make them in sync */
+ layer->field_id = fid;
+ continue;
+ }
+ /*
+ * device field id and local field id are
+ * in sync. If this is even field
+ */
+ if (0 == fid)
+ vpbe_isr_even_field(disp_dev, layer);
+ else /* odd field */
+ vpbe_isr_odd_field(disp_dev, layer);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * vpbe_buffer_prepare()
+ * This is the callback function called from vb2_qbuf() function
+ * the buffer is prepared and user space virtual address is converted into
+ * physical address
+ */
+static int vpbe_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct vpbe_layer *layer = vb2_get_drv_priv(q);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ unsigned long addr;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_prepare\n");
+
+ vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage);
+ if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (!IS_ALIGNED(addr, 8)) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "buffer_prepare:offset is not aligned to 32 bytes\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * vpbe_buffer_setup()
+ * This function allocates memory for the buffers
+ */
+static int
+vpbe_buffer_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_layer *layer = vb2_get_drv_priv(vq);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
+
+ /* Store number of buffers allocated in numbuffer member */
+ if (vq->num_buffers + *nbuffers < VPBE_DEFAULT_NUM_BUFS)
+ *nbuffers = VPBE_DEFAULT_NUM_BUFS - vq->num_buffers;
+
+ if (*nplanes)
+ return sizes[0] < layer->pix_fmt.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = layer->pix_fmt.sizeimage;
+
+ return 0;
+}
+
+/*
+ * vpbe_buffer_queue()
+ * This function adds the buffer to DMA queue
+ */
+static void vpbe_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ /* Get the file handle object and layer object */
+ struct vpbe_disp_buffer *buf = container_of(vbuf,
+ struct vpbe_disp_buffer, vb);
+ struct vpbe_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpbe_display *disp = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ unsigned long flags;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_queue\n");
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&disp->dma_queue_lock, flags);
+ list_add_tail(&buf->list, &layer->dma_queue);
+ spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
+}
+
+static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpbe_layer *layer = vb2_get_drv_priv(vq);
+ struct osd_state *osd_device = layer->disp_dev->osd_device;
+ int ret;
+
+ osd_device->ops.disable_layer(osd_device, layer->layer_info.id);
+
+ /* Get the next frame from the buffer queue */
+ layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&layer->cur_frm->list);
+ /* Mark state of the current frame to active */
+ layer->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ /* Initialize field_id and started member */
+ layer->field_id = 0;
+
+ /* Set parameters in OSD and VENC */
+ ret = vpbe_set_osd_display_params(layer->disp_dev, layer);
+ if (ret < 0) {
+ struct vpbe_disp_buffer *buf, *tmp;
+
+ vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ list_for_each_entry_safe(buf, tmp, &layer->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+
+ return ret;
+ }
+
+ /*
+ * if request format is yuv420 semiplanar, need to
+ * enable both video windows
+ */
+ layer->layer_first_int = 1;
+
+ return ret;
+}
+
+static void vpbe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpbe_layer *layer = vb2_get_drv_priv(vq);
+ struct osd_state *osd_device = layer->disp_dev->osd_device;
+ struct vpbe_display *disp = layer->disp_dev;
+ unsigned long flags;
+
+ if (!vb2_is_streaming(vq))
+ return;
+
+ osd_device->ops.disable_layer(osd_device, layer->layer_info.id);
+
+ /* release all active buffers */
+ spin_lock_irqsave(&disp->dma_queue_lock, flags);
+ if (layer->cur_frm == layer->next_frm) {
+ vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ } else {
+ if (layer->cur_frm)
+ vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ if (layer->next_frm)
+ vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&layer->dma_queue)) {
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ list_del(&layer->next_frm->list);
+ vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
+}
+
+static const struct vb2_ops video_qops = {
+ .queue_setup = vpbe_buffer_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = vpbe_buffer_prepare,
+ .start_streaming = vpbe_start_streaming,
+ .stop_streaming = vpbe_stop_streaming,
+ .buf_queue = vpbe_buffer_queue,
+};
+
+static
+struct vpbe_layer*
+_vpbe_display_get_other_win_layer(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer)
+{
+ enum vpbe_display_device_id thiswin, otherwin;
+ thiswin = layer->device_id;
+
+ otherwin = (thiswin == VPBE_DISPLAY_DEVICE_0) ?
+ VPBE_DISPLAY_DEVICE_1 : VPBE_DISPLAY_DEVICE_0;
+ return disp_dev->dev[otherwin];
+}
+
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer)
+{
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ unsigned long addr;
+ int ret;
+
+ addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb.vb2_buf, 0);
+ /* Set address in the display registers */
+ osd_device->ops.start_layer(osd_device,
+ layer->layer_info.id,
+ addr,
+ disp_dev->cbcr_ofst);
+
+ ret = osd_device->ops.enable_layer(osd_device,
+ layer->layer_info.id, 0);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in enabling osd window layer 0\n");
+ return -1;
+ }
+
+ /* Enable the window */
+ layer->layer_info.enable = 1;
+ if (cfg->pixfmt == PIXFMT_NV12) {
+ struct vpbe_layer *otherlayer =
+ _vpbe_display_get_other_win_layer(disp_dev, layer);
+
+ ret = osd_device->ops.enable_layer(osd_device,
+ otherlayer->layer_info.id, 1);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in enabling osd window layer 1\n");
+ return -1;
+ }
+ otherlayer->layer_info.enable = 1;
+ }
+ return 0;
+}
+
+static void
+vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer,
+ int expected_xsize, int expected_ysize)
+{
+ struct display_layer_info *layer_info = &layer->layer_info;
+ struct v4l2_pix_format *pixfmt = &layer->pix_fmt;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int calculated_xsize;
+ int h_exp = 0;
+ int v_exp = 0;
+ int h_scale;
+ int v_scale;
+
+ v4l2_std_id standard_id = vpbe_dev->current_timings.std_id;
+
+ /*
+ * Application initially set the image format. Current display
+ * size is obtained from the vpbe display controller. expected_xsize
+ * and expected_ysize are set through S_SELECTION ioctl. Based on this,
+ * driver will calculate the scale factors for vertical and
+ * horizontal direction so that the image is displayed scaled
+ * and expanded. Application uses expansion to display the image
+ * in a square pixel. Otherwise it is displayed using displays
+ * pixel aspect ratio.It is expected that application chooses
+ * the crop coordinates for cropped or scaled display. if crop
+ * size is less than the image size, it is displayed cropped or
+ * it is displayed scaled and/or expanded.
+ *
+ * to begin with, set the crop window same as expected. Later we
+ * will override with scaled window size
+ */
+
+ cfg->xsize = pixfmt->width;
+ cfg->ysize = pixfmt->height;
+ layer_info->h_zoom = ZOOM_X1; /* no horizontal zoom */
+ layer_info->v_zoom = ZOOM_X1; /* no horizontal zoom */
+ layer_info->h_exp = H_EXP_OFF; /* no horizontal zoom */
+ layer_info->v_exp = V_EXP_OFF; /* no horizontal zoom */
+
+ if (pixfmt->width < expected_xsize) {
+ h_scale = vpbe_dev->current_timings.xres / pixfmt->width;
+ if (h_scale < 2)
+ h_scale = 1;
+ else if (h_scale >= 4)
+ h_scale = 4;
+ else
+ h_scale = 2;
+ cfg->xsize *= h_scale;
+ if (cfg->xsize < expected_xsize) {
+ if ((standard_id & V4L2_STD_525_60) ||
+ (standard_id & V4L2_STD_625_50)) {
+ calculated_xsize = (cfg->xsize *
+ VPBE_DISPLAY_H_EXP_RATIO_N) /
+ VPBE_DISPLAY_H_EXP_RATIO_D;
+ if (calculated_xsize <= expected_xsize) {
+ h_exp = 1;
+ cfg->xsize = calculated_xsize;
+ }
+ }
+ }
+ if (h_scale == 2)
+ layer_info->h_zoom = ZOOM_X2;
+ else if (h_scale == 4)
+ layer_info->h_zoom = ZOOM_X4;
+ if (h_exp)
+ layer_info->h_exp = H_EXP_9_OVER_8;
+ } else {
+ /* no scaling, only cropping. Set display area to crop area */
+ cfg->xsize = expected_xsize;
+ }
+
+ if (pixfmt->height < expected_ysize) {
+ v_scale = expected_ysize / pixfmt->height;
+ if (v_scale < 2)
+ v_scale = 1;
+ else if (v_scale >= 4)
+ v_scale = 4;
+ else
+ v_scale = 2;
+ cfg->ysize *= v_scale;
+ if (cfg->ysize < expected_ysize) {
+ if ((standard_id & V4L2_STD_625_50)) {
+ calculated_xsize = (cfg->ysize *
+ VPBE_DISPLAY_V_EXP_RATIO_N) /
+ VPBE_DISPLAY_V_EXP_RATIO_D;
+ if (calculated_xsize <= expected_ysize) {
+ v_exp = 1;
+ cfg->ysize = calculated_xsize;
+ }
+ }
+ }
+ if (v_scale == 2)
+ layer_info->v_zoom = ZOOM_X2;
+ else if (v_scale == 4)
+ layer_info->v_zoom = ZOOM_X4;
+ if (v_exp)
+ layer_info->v_exp = V_EXP_6_OVER_5;
+ } else {
+ /* no scaling, only cropping. Set display area to crop area */
+ cfg->ysize = expected_ysize;
+ }
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "crop display xsize = %d, ysize = %d\n",
+ cfg->xsize, cfg->ysize);
+}
+
+static void vpbe_disp_adj_position(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer,
+ int top, int left)
+{
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+ cfg->xpos = min((unsigned int)left,
+ vpbe_dev->current_timings.xres - cfg->xsize);
+ cfg->ypos = min((unsigned int)top,
+ vpbe_dev->current_timings.yres - cfg->ysize);
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "new xpos = %d, ypos = %d\n",
+ cfg->xpos, cfg->ypos);
+}
+
+static void vpbe_disp_check_window_params(struct vpbe_display *disp_dev,
+ struct v4l2_rect *c)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+ if ((c->width == 0) ||
+ ((c->width + c->left) > vpbe_dev->current_timings.xres))
+ c->width = vpbe_dev->current_timings.xres - c->left;
+
+ if ((c->height == 0) || ((c->height + c->top) >
+ vpbe_dev->current_timings.yres))
+ c->height = vpbe_dev->current_timings.yres - c->top;
+
+ /* window height must be even for interlaced display */
+ if (vpbe_dev->current_timings.interlaced)
+ c->height &= (~0x01);
+
+}
+
+/*
+ * vpbe_try_format()
+ * If user application provides width and height, and have bytesperline set
+ * to zero, driver calculates bytesperline and sizeimage based on hardware
+ * limits.
+ */
+static int vpbe_try_format(struct vpbe_display *disp_dev,
+ struct v4l2_pix_format *pixfmt, int check)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int min_height = 1;
+ int min_width = 32;
+ int max_height;
+ int max_width;
+ int bpp;
+
+ if ((pixfmt->pixelformat != V4L2_PIX_FMT_UYVY) &&
+ (pixfmt->pixelformat != V4L2_PIX_FMT_NV12))
+ /* choose default as V4L2_PIX_FMT_UYVY */
+ pixfmt->pixelformat = V4L2_PIX_FMT_UYVY;
+
+ /* Check the field format */
+ if ((pixfmt->field != V4L2_FIELD_INTERLACED) &&
+ (pixfmt->field != V4L2_FIELD_NONE)) {
+ if (vpbe_dev->current_timings.interlaced)
+ pixfmt->field = V4L2_FIELD_INTERLACED;
+ else
+ pixfmt->field = V4L2_FIELD_NONE;
+ }
+
+ if (pixfmt->field == V4L2_FIELD_INTERLACED)
+ min_height = 2;
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ bpp = 1;
+ else
+ bpp = 2;
+
+ max_width = vpbe_dev->current_timings.xres;
+ max_height = vpbe_dev->current_timings.yres;
+
+ min_width /= bpp;
+
+ if (!pixfmt->width || (pixfmt->width < min_width) ||
+ (pixfmt->width > max_width)) {
+ pixfmt->width = vpbe_dev->current_timings.xres;
+ }
+
+ if (!pixfmt->height || (pixfmt->height < min_height) ||
+ (pixfmt->height > max_height)) {
+ pixfmt->height = vpbe_dev->current_timings.yres;
+ }
+
+ if (pixfmt->bytesperline < (pixfmt->width * bpp))
+ pixfmt->bytesperline = pixfmt->width * bpp;
+
+ /* Make the bytesperline 32 byte aligned */
+ pixfmt->bytesperline = ((pixfmt->width * bpp + 31) & ~31);
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height +
+ (pixfmt->bytesperline * pixfmt->height >> 1);
+ else
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ return 0;
+}
+
+static int vpbe_display_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ snprintf(cap->driver, sizeof(cap->driver), "%s",
+ dev_name(vpbe_dev->pdev));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vpbe_dev->pdev));
+ strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vpbe_display_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ struct v4l2_rect rect = sel->r;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_S_SELECTION, layer id = %d\n", layer->device_id);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ if (rect.top < 0)
+ rect.top = 0;
+ if (rect.left < 0)
+ rect.left = 0;
+
+ vpbe_disp_check_window_params(disp_dev, &rect);
+
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+
+ vpbe_disp_calculate_scale_factor(disp_dev, layer,
+ rect.width,
+ rect.height);
+ vpbe_disp_adj_position(disp_dev, layer, rect.top,
+ rect.left);
+ ret = osd_device->ops.set_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in set layer config:\n");
+ return -EINVAL;
+ }
+
+ /* apply zooming and h or v expansion */
+ osd_device->ops.set_zoom(osd_device,
+ layer->layer_info.id,
+ layer->layer_info.h_zoom,
+ layer->layer_info.v_zoom);
+ ret = osd_device->ops.set_vid_expansion(osd_device,
+ layer->layer_info.h_exp,
+ layer->layer_info.v_exp);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in set vid expansion:\n");
+ return -EINVAL;
+ }
+
+ if ((layer->layer_info.h_zoom != ZOOM_X1) ||
+ (layer->layer_info.v_zoom != ZOOM_X1) ||
+ (layer->layer_info.h_exp != H_EXP_OFF) ||
+ (layer->layer_info.v_exp != V_EXP_OFF))
+ /* Enable expansion filter */
+ osd_device->ops.set_interpolation_filter(osd_device, 1);
+ else
+ osd_device->ops.set_interpolation_filter(osd_device, 0);
+
+ sel->r = rect;
+ return 0;
+}
+
+static int vpbe_display_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = layer->disp_dev->osd_device;
+ struct v4l2_rect *rect = &sel->r;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_G_SELECTION, layer id = %d\n",
+ layer->device_id);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ rect->top = cfg->ypos;
+ rect->left = cfg->xpos;
+ rect->width = cfg->xsize;
+ rect->height = cfg->ysize;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = vpbe_dev->current_timings.xres;
+ rect->height = vpbe_dev->current_timings.yres;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpbe_display_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cropcap)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
+
+ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ cropcap->pixelaspect = vpbe_dev->current_timings.aspect;
+ return 0;
+}
+
+static int vpbe_display_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_G_FMT, layer id = %d\n",
+ layer->device_id);
+
+ /* If buffer type is video output */
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+ /* Fill in the information about format */
+ fmt->fmt.pix = layer->pix_fmt;
+
+ return 0;
+}
+
+static int vpbe_display_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ unsigned int index = 0;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_ENUM_FMT, layer id = %d\n",
+ layer->device_id);
+ if (fmt->index > 1) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid format index\n");
+ return -EINVAL;
+ }
+
+ /* Fill in the information about format */
+ index = fmt->index;
+ memset(fmt, 0, sizeof(*fmt));
+ fmt->index = index;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ if (index == 0) {
+ strcpy(fmt->description, "YUV 4:2:2 - UYVY");
+ fmt->pixelformat = V4L2_PIX_FMT_UYVY;
+ } else {
+ strcpy(fmt->description, "Y/CbCr 4:2:0");
+ fmt->pixelformat = V4L2_PIX_FMT_NV12;
+ }
+
+ return 0;
+}
+
+static int vpbe_display_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_S_FMT, layer id = %d\n",
+ layer->device_id);
+
+ if (vb2_is_busy(&layer->buffer_queue))
+ return -EBUSY;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+ /* Check for valid pixel format */
+ ret = vpbe_try_format(disp_dev, pixfmt, 1);
+ if (ret)
+ return ret;
+
+ /* YUV420 is requested, check availability of the
+ other video window */
+
+ layer->pix_fmt = *pixfmt;
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
+ struct vpbe_layer *otherlayer;
+
+ otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
+ /* if other layer is available, only
+ * claim it, do not configure it
+ */
+ ret = osd_device->ops.request_layer(osd_device,
+ otherlayer->layer_info.id);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Display Manager failed to allocate layer\n");
+ return -EBUSY;
+ }
+ }
+
+ /* Get osd layer config */
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ /* Store the pixel format in the layer object */
+ cfg->xsize = pixfmt->width;
+ cfg->ysize = pixfmt->height;
+ cfg->line_length = pixfmt->bytesperline;
+ cfg->ypos = 0;
+ cfg->xpos = 0;
+ cfg->interlaced = vpbe_dev->current_timings.interlaced;
+
+ if (V4L2_PIX_FMT_UYVY == pixfmt->pixelformat)
+ cfg->pixfmt = PIXFMT_YCBCRI;
+
+ /* Change of the default pixel format for both video windows */
+ if (V4L2_PIX_FMT_NV12 == pixfmt->pixelformat) {
+ struct vpbe_layer *otherlayer;
+ cfg->pixfmt = PIXFMT_NV12;
+ otherlayer = _vpbe_display_get_other_win_layer(disp_dev,
+ layer);
+ otherlayer->layer_info.config.pixfmt = PIXFMT_NV12;
+ }
+
+ /* Set the layer config in the osd window */
+ ret = osd_device->ops.set_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in S_FMT params:\n");
+ return -EINVAL;
+ }
+
+ /* Readback and fill the local copy of current pix format */
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+
+ return 0;
+}
+
+static int vpbe_display_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+
+ /* Check for valid field format */
+ return vpbe_try_format(disp_dev, pixfmt, 0);
+
+}
+
+/*
+ * vpbe_display_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_display_s_std(struct file *file, void *priv,
+ v4l2_std_id std_id)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n");
+
+ if (vb2_is_busy(&layer->buffer_queue))
+ return -EBUSY;
+
+ if (vpbe_dev->ops.s_std) {
+ ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set standard for sub devices\n");
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * vpbe_display_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_display_g_std(struct file *file, void *priv,
+ v4l2_std_id *std_id)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n");
+
+ /* Get the standard from the current encoder */
+ if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) {
+ *std_id = vpbe_dev->current_timings.std_id;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * vpbe_display_enum_output - enumerate outputs
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_display_enum_output(struct file *file, void *priv,
+ struct v4l2_output *output)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
+
+ /* Enumerate outputs */
+ if (!vpbe_dev->ops.enum_outputs)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
+ if (ret) {
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "Failed to enumerate outputs\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * vpbe_display_s_output - Set output to
+ * the output specified by the index
+ */
+static int vpbe_display_s_output(struct file *file, void *priv,
+ unsigned int i)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n");
+
+ if (vb2_is_busy(&layer->buffer_queue))
+ return -EBUSY;
+
+ if (!vpbe_dev->ops.set_output)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.set_output(vpbe_dev, i);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set output for sub devices\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * vpbe_display_g_output - Get output from subdevice
+ * for a given by the index
+ */
+static int vpbe_display_g_output(struct file *file, void *priv,
+ unsigned int *i)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n");
+ /* Get the standard from the current encoder */
+ *i = vpbe_dev->current_out_index;
+
+ return 0;
+}
+
+/*
+ * vpbe_display_enum_dv_timings - Enumerate the dv timings
+ *
+ * enum the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_enum_dv_timings(struct file *file, void *priv,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n");
+
+ /* Enumerate outputs */
+ if (!vpbe_dev->ops.enum_dv_timings)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.enum_dv_timings(vpbe_dev, timings);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to enumerate dv timings info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * vpbe_display_s_dv_timings - Set the dv timings
+ *
+ * Set the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_TIMINGS\n");
+
+ if (vb2_is_busy(&layer->buffer_queue))
+ return -EBUSY;
+
+ /* Set the given standard in the encoder */
+ if (!vpbe_dev->ops.s_dv_timings)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.s_dv_timings(vpbe_dev, timings);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set the dv timings info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * vpbe_display_g_dv_timings - Set the dv timings
+ *
+ * Get the timings in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_TIMINGS\n");
+
+ /* Get the given standard in the encoder */
+
+ if (vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_DV_TIMINGS) {
+ *dv_timings = vpbe_dev->current_timings.dv_timings;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * vpbe_display_open()
+ * It creates object of file handle structure and stores it in private_data
+ * member of filepointer
+ */
+static int vpbe_display_open(struct file *file)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int err;
+
+ /* creating context for file descriptor */
+ err = v4l2_fh_open(file);
+ if (err) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "v4l2_fh_open failed\n");
+ return err;
+ }
+
+ /* leaving if layer is already initialized */
+ if (!v4l2_fh_is_singular_file(file))
+ return err;
+
+ if (!layer->usrs) {
+ if (mutex_lock_interruptible(&layer->opslock))
+ return -ERESTARTSYS;
+ /* First claim the layer for this device */
+ err = osd_device->ops.request_layer(osd_device,
+ layer->layer_info.id);
+ mutex_unlock(&layer->opslock);
+ if (err < 0) {
+ /* Couldn't get layer */
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Display Manager failed to allocate layer\n");
+ v4l2_fh_release(file);
+ return -EINVAL;
+ }
+ }
+ /* Increment layer usrs counter */
+ layer->usrs++;
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe display device opened successfully\n");
+ return 0;
+}
+
+/*
+ * vpbe_display_release()
+ * This function deletes buffer queue, frees the buffers and the davinci
+ * display file * handle
+ */
+static int vpbe_display_release(struct file *file)
+{
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_display *disp_dev = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n");
+
+ mutex_lock(&layer->opslock);
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ /* Decrement layer usrs counter */
+ layer->usrs--;
+ /* If this file handle has initialize encoder device, reset it */
+ if (!layer->usrs) {
+ if (cfg->pixfmt == PIXFMT_NV12) {
+ struct vpbe_layer *otherlayer;
+ otherlayer =
+ _vpbe_display_get_other_win_layer(disp_dev, layer);
+ osd_device->ops.disable_layer(osd_device,
+ otherlayer->layer_info.id);
+ osd_device->ops.release_layer(osd_device,
+ otherlayer->layer_info.id);
+ }
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ osd_device->ops.release_layer(osd_device,
+ layer->layer_info.id);
+ }
+
+ _vb2_fop_release(file, NULL);
+ mutex_unlock(&layer->opslock);
+
+ disp_dev->cbcr_ofst = 0;
+
+ return 0;
+}
+
+/* vpbe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
+ .vidioc_querycap = vpbe_display_querycap,
+ .vidioc_g_fmt_vid_out = vpbe_display_g_fmt,
+ .vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt,
+ .vidioc_s_fmt_vid_out = vpbe_display_s_fmt,
+ .vidioc_try_fmt_vid_out = vpbe_display_try_fmt,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+
+ .vidioc_cropcap = vpbe_display_cropcap,
+ .vidioc_g_selection = vpbe_display_g_selection,
+ .vidioc_s_selection = vpbe_display_s_selection,
+
+ .vidioc_s_std = vpbe_display_s_std,
+ .vidioc_g_std = vpbe_display_g_std,
+
+ .vidioc_enum_output = vpbe_display_enum_output,
+ .vidioc_s_output = vpbe_display_s_output,
+ .vidioc_g_output = vpbe_display_g_output,
+
+ .vidioc_s_dv_timings = vpbe_display_s_dv_timings,
+ .vidioc_g_dv_timings = vpbe_display_g_dv_timings,
+ .vidioc_enum_dv_timings = vpbe_display_enum_dv_timings,
+};
+
+static const struct v4l2_file_operations vpbe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpbe_display_open,
+ .release = vpbe_display_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+static int vpbe_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpbe_display *vpbe_disp = data;
+
+ if (strcmp("vpbe_controller", pdev->name) == 0)
+ vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
+
+ if (strstr(pdev->name, "vpbe-osd"))
+ vpbe_disp->osd_device = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+static int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer = NULL;
+ struct video_device *vbd = NULL;
+
+ /* Allocate memory for four plane display objects */
+ disp_dev->dev[i] = kzalloc(sizeof(*disp_dev->dev[i]), GFP_KERNEL);
+ if (!disp_dev->dev[i])
+ return -ENOMEM;
+
+ spin_lock_init(&disp_dev->dev[i]->irqlock);
+ mutex_init(&disp_dev->dev[i]->opslock);
+
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[i];
+ vbd = &vpbe_display_layer->video_dev;
+ /* Initialize field of video device */
+ vbd->release = video_device_release_empty;
+ vbd->fops = &vpbe_fops;
+ vbd->ioctl_ops = &vpbe_ioctl_ops;
+ vbd->minor = -1;
+ vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
+ vbd->lock = &vpbe_display_layer->opslock;
+ vbd->vfl_dir = VFL_DIR_TX;
+
+ if (disp_dev->vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_STD)
+ vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50);
+
+ snprintf(vbd->name, sizeof(vbd->name),
+ "DaVinci_VPBE Display_DRIVER_V%d.%d.%d",
+ (VPBE_DISPLAY_VERSION_CODE >> 16) & 0xff,
+ (VPBE_DISPLAY_VERSION_CODE >> 8) & 0xff,
+ (VPBE_DISPLAY_VERSION_CODE) & 0xff);
+
+ vpbe_display_layer->device_id = i;
+
+ vpbe_display_layer->layer_info.id =
+ ((i == VPBE_DISPLAY_DEVICE_0) ? WIN_VID0 : WIN_VID1);
+
+
+ return 0;
+}
+
+static int register_device(struct vpbe_layer *vpbe_display_layer,
+ struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
+{
+ int err;
+
+ v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+ "Trying to register VPBE display device.\n");
+ v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+ "layer=%p,layer->video_dev=%p\n",
+ vpbe_display_layer,
+ &vpbe_display_layer->video_dev);
+
+ vpbe_display_layer->video_dev.queue = &vpbe_display_layer->buffer_queue;
+ err = video_register_device(&vpbe_display_layer->video_dev,
+ VFL_TYPE_GRABBER,
+ -1);
+ if (err)
+ return -ENODEV;
+
+ vpbe_display_layer->disp_dev = disp_dev;
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, disp_dev);
+ video_set_drvdata(&vpbe_display_layer->video_dev,
+ vpbe_display_layer);
+
+ return 0;
+}
+
+
+
+/*
+ * vpbe_display_probe()
+ * This function creates device entries by register itself to the V4L2 driver
+ * and initializes fields of each layer objects
+ */
+static int vpbe_display_probe(struct platform_device *pdev)
+{
+ struct vpbe_display *disp_dev;
+ struct v4l2_device *v4l2_dev;
+ struct resource *res = NULL;
+ struct vb2_queue *q;
+ int k;
+ int i;
+ int err;
+ int irq;
+
+ printk(KERN_DEBUG "vpbe_display_probe\n");
+ /* Allocate memory for vpbe_display */
+ disp_dev = devm_kzalloc(&pdev->dev, sizeof(*disp_dev), GFP_KERNEL);
+ if (!disp_dev)
+ return -ENOMEM;
+
+ spin_lock_init(&disp_dev->dma_queue_lock);
+ /*
+ * Scan all the platform devices to find the vpbe
+ * controller device and get the vpbe_dev object
+ */
+ err = bus_for_each_dev(&platform_bus_type, NULL, disp_dev,
+ vpbe_device_get);
+ if (err < 0)
+ return err;
+
+ v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
+ /* Initialize the vpbe display controller */
+ if (disp_dev->vpbe_dev->ops.initialize) {
+ err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
+ disp_dev->vpbe_dev);
+ if (err) {
+ v4l2_err(v4l2_dev, "Error initing vpbe\n");
+ err = -ENOMEM;
+ goto probe_out;
+ }
+ }
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ if (init_vpbe_layer(i, disp_dev, pdev)) {
+ err = -ENODEV;
+ goto probe_out;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ v4l2_err(v4l2_dev, "Unable to get VENC interrupt resource\n");
+ err = -ENODEV;
+ goto probe_out;
+ }
+
+ irq = res->start;
+ err = devm_request_irq(&pdev->dev, irq, venc_isr, 0,
+ VPBE_DISPLAY_DRIVER, disp_dev);
+ if (err) {
+ v4l2_err(v4l2_dev, "VPBE IRQ request failed\n");
+ goto probe_out;
+ }
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ /* initialize vb2 queue */
+ q = &disp_dev->dev[i]->buffer_queue;
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = disp_dev->dev[i];
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &disp_dev->dev[i]->opslock;
+ q->dev = disp_dev->vpbe_dev->pdev;
+ err = vb2_queue_init(q);
+ if (err) {
+ v4l2_err(v4l2_dev, "vb2_queue_init() failed\n");
+ goto probe_out;
+ }
+
+ INIT_LIST_HEAD(&disp_dev->dev[i]->dma_queue);
+
+ if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
+ err = -ENODEV;
+ goto probe_out;
+ }
+ }
+
+ v4l2_dbg(1, debug, v4l2_dev,
+ "Successfully completed the probing of vpbe v4l2 device\n");
+
+ return 0;
+
+probe_out:
+ for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
+ /* Unregister video device */
+ if (disp_dev->dev[k]) {
+ video_unregister_device(&disp_dev->dev[k]->video_dev);
+ kfree(disp_dev->dev[k]);
+ }
+ }
+ return err;
+}
+
+/*
+ * vpbe_display_remove()
+ * It un-register hardware layer from V4L2 driver
+ */
+static int vpbe_display_remove(struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer;
+ struct vpbe_display *disp_dev = platform_get_drvdata(pdev);
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int i;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
+
+ /* deinitialize the vpbe display controller */
+ if (vpbe_dev->ops.deinitialize)
+ vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
+ /* un-register device */
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[i];
+ /* Unregister video device */
+ video_unregister_device(&vpbe_display_layer->video_dev);
+
+ }
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ kfree(disp_dev->dev[i]);
+ disp_dev->dev[i] = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver vpbe_display_driver = {
+ .driver = {
+ .name = VPBE_DISPLAY_DRIVER,
+ .bus = &platform_bus_type,
+ },
+ .probe = vpbe_display_probe,
+ .remove = vpbe_display_remove,
+};
+
+module_platform_driver(vpbe_display_driver);
+
+MODULE_DESCRIPTION("TI DM644x/DM355/DM365 VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
new file mode 100644
index 000000000..c551a25d9
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -0,0 +1,1596 @@
+/*
+ * Copyright (C) 2007-2010 Texas Instruments Inc
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ *
+ * Andy Lowe (alowe@mvista.com), MontaVista Software
+ * - Initial version
+ * Murali Karicheri (mkaricheri@gmail.com), Texas Instruments Ltd.
+ * - ported to sub device interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include <mach/cputype.h>
+#include <mach/hardware.h>
+#endif
+
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_osd.h>
+
+#include <linux/io.h>
+#include "vpbe_osd_regs.h"
+
+#define MODULE_NAME "davinci-vpbe-osd"
+
+static const struct platform_device_id vpbe_osd_devtype[] = {
+ {
+ .name = DM644X_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_1,
+ }, {
+ .name = DM365_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_2,
+ }, {
+ .name = DM355_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_3,
+ },
+ {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(platform, vpbe_osd_devtype);
+
+/* register access routines */
+static inline u32 osd_read(struct osd_state *sd, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ return readl(osd->osd_base + offset);
+}
+
+static inline u32 osd_write(struct osd_state *sd, u32 val, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ writel(val, osd->osd_base + offset);
+
+ return val;
+}
+
+static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ void __iomem *addr = osd->osd_base + offset;
+ u32 val = readl(addr) | mask;
+
+ writel(val, addr);
+
+ return val;
+}
+
+static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ void __iomem *addr = osd->osd_base + offset;
+ u32 val = readl(addr) & ~mask;
+
+ writel(val, addr);
+
+ return val;
+}
+
+static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
+ u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ void __iomem *addr = osd->osd_base + offset;
+ u32 new_val = (readl(addr) & ~mask) | (val & mask);
+
+ writel(new_val, addr);
+
+ return new_val;
+}
+
+/* define some macros for layer and pixfmt classification */
+#define is_osd_win(layer) (((layer) == WIN_OSD0) || ((layer) == WIN_OSD1))
+#define is_vid_win(layer) (((layer) == WIN_VID0) || ((layer) == WIN_VID1))
+#define is_rgb_pixfmt(pixfmt) \
+ (((pixfmt) == PIXFMT_RGB565) || ((pixfmt) == PIXFMT_RGB888))
+#define is_yc_pixfmt(pixfmt) \
+ (((pixfmt) == PIXFMT_YCBCRI) || ((pixfmt) == PIXFMT_YCRCBI) || \
+ ((pixfmt) == PIXFMT_NV12))
+#define MAX_WIN_SIZE OSD_VIDWIN0XP_V0X
+#define MAX_LINE_LENGTH (OSD_VIDWIN0OFST_V0LO << 5)
+
+/**
+ * _osd_dm6446_vid0_pingpong() - field inversion fix for DM6446
+ * @sd: ptr to struct osd_state
+ * @field_inversion: inversion flag
+ * @fb_base_phys: frame buffer address
+ * @lconfig: ptr to layer config
+ *
+ * This routine implements a workaround for the field signal inversion silicon
+ * erratum described in Advisory 1.3.8 for the DM6446. The fb_base_phys and
+ * lconfig parameters apply to the vid0 window. This routine should be called
+ * whenever the vid0 layer configuration or start address is modified, or when
+ * the OSD field inversion setting is modified.
+ * Returns: 1 if the ping-pong buffers need to be toggled in the vsync isr, or
+ * 0 otherwise
+ */
+static int _osd_dm6446_vid0_pingpong(struct osd_state *sd,
+ int field_inversion,
+ unsigned long fb_base_phys,
+ const struct osd_layer_config *lconfig)
+{
+ struct osd_platform_data *pdata;
+
+ pdata = (struct osd_platform_data *)sd->dev->platform_data;
+ if (pdata != NULL && pdata->field_inv_wa_enable) {
+
+ if (!field_inversion || !lconfig->interlaced) {
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_PPVWIN0ADR);
+ osd_modify(sd, OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, 0,
+ OSD_MISCCTL);
+ return 0;
+ } else {
+ unsigned miscctl = OSD_MISCCTL_PPRV;
+
+ osd_write(sd,
+ (fb_base_phys & ~0x1F) - lconfig->line_length,
+ OSD_VIDWIN0ADR);
+ osd_write(sd,
+ (fb_base_phys & ~0x1F) + lconfig->line_length,
+ OSD_PPVWIN0ADR);
+ osd_modify(sd,
+ OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, miscctl,
+ OSD_MISCCTL);
+
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void _osd_set_field_inversion(struct osd_state *sd, int enable)
+{
+ unsigned fsinv = 0;
+
+ if (enable)
+ fsinv = OSD_MODE_FSINV;
+
+ osd_modify(sd, OSD_MODE_FSINV, fsinv, OSD_MODE);
+}
+
+static void _osd_set_blink_attribute(struct osd_state *sd, int enable,
+ enum osd_blink_interval blink)
+{
+ u32 osdatrmd = 0;
+
+ if (enable) {
+ osdatrmd |= OSD_OSDATRMD_BLNK;
+ osdatrmd |= blink << OSD_OSDATRMD_BLNKINT_SHIFT;
+ }
+ /* caller must ensure that OSD1 is configured in attribute mode */
+ osd_modify(sd, OSD_OSDATRMD_BLNKINT | OSD_OSDATRMD_BLNK, osdatrmd,
+ OSD_OSDATRMD);
+}
+
+static void _osd_set_rom_clut(struct osd_state *sd,
+ enum osd_rom_clut rom_clut)
+{
+ if (rom_clut == ROM_CLUT0)
+ osd_clear(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+ else
+ osd_set(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+}
+
+static void _osd_set_palette_map(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ unsigned char pixel_value,
+ unsigned char clut_index,
+ enum osd_pix_format pixfmt)
+{
+ static const int map_2bpp[] = { 0, 5, 10, 15 };
+ static const int map_1bpp[] = { 0, 15 };
+ int bmp_offset;
+ int bmp_shift;
+ int bmp_mask;
+ int bmp_reg;
+
+ switch (pixfmt) {
+ case PIXFMT_1BPP:
+ bmp_reg = map_1bpp[pixel_value & 0x1];
+ break;
+ case PIXFMT_2BPP:
+ bmp_reg = map_2bpp[pixel_value & 0x3];
+ break;
+ case PIXFMT_4BPP:
+ bmp_reg = pixel_value & 0xf;
+ break;
+ default:
+ return;
+ }
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ bmp_offset = OSD_W0BMP01 + (bmp_reg >> 1) * sizeof(u32);
+ break;
+ case OSDWIN_OSD1:
+ bmp_offset = OSD_W1BMP01 + (bmp_reg >> 1) * sizeof(u32);
+ break;
+ default:
+ return;
+ }
+
+ if (bmp_reg & 1) {
+ bmp_shift = 8;
+ bmp_mask = 0xff << 8;
+ } else {
+ bmp_shift = 0;
+ bmp_mask = 0xff;
+ }
+
+ osd_modify(sd, bmp_mask, clut_index << bmp_shift, bmp_offset);
+}
+
+static void _osd_set_rec601_attenuation(struct osd_state *sd,
+ enum osd_win_layer osdwin, int enable)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
+ enable ? OSD_OSDWIN0MD_ATN0E : 0,
+ OSD_OSDWIN0MD);
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
+ enable ? OSD_OSDWIN0MD_ATN0E : 0,
+ OSD_OSDWIN0MD);
+ else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+ (sd->vpbe_type == VPBE_VERSION_2))
+ osd_modify(sd, OSD_EXTMODE_ATNOSD0EN,
+ enable ? OSD_EXTMODE_ATNOSD0EN : 0,
+ OSD_EXTMODE);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
+ enable ? OSD_OSDWIN1MD_ATN1E : 0,
+ OSD_OSDWIN1MD);
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
+ enable ? OSD_OSDWIN1MD_ATN1E : 0,
+ OSD_OSDWIN1MD);
+ else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+ (sd->vpbe_type == VPBE_VERSION_2))
+ osd_modify(sd, OSD_EXTMODE_ATNOSD1EN,
+ enable ? OSD_EXTMODE_ATNOSD1EN : 0,
+ OSD_EXTMODE);
+ break;
+ }
+}
+
+static void _osd_set_blending_factor(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ enum osd_blending_factor blend)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0MD_BLND0,
+ blend << OSD_OSDWIN0MD_BLND0_SHIFT, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1MD_BLND1,
+ blend << OSD_OSDWIN1MD_BLND1_SHIFT, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_enable_rgb888_pixblend(struct osd_state *sd,
+ enum osd_win_layer osdwin)
+{
+
+ osd_modify(sd, OSD_MISCCTL_BLDSEL, 0, OSD_MISCCTL);
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_EXTMODE_OSD0BLDCHR,
+ OSD_EXTMODE_OSD0BLDCHR, OSD_EXTMODE);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_EXTMODE_OSD1BLDCHR,
+ OSD_EXTMODE_OSD1BLDCHR, OSD_EXTMODE);
+ break;
+ }
+}
+
+static void _osd_enable_color_key(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ unsigned colorkey,
+ enum osd_pix_format pixfmt)
+{
+ switch (pixfmt) {
+ case PIXFMT_1BPP:
+ case PIXFMT_2BPP:
+ case PIXFMT_4BPP:
+ case PIXFMT_8BPP:
+ if (sd->vpbe_type == VPBE_VERSION_3) {
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_TRANSPBMPIDX_BMP0,
+ colorkey <<
+ OSD_TRANSPBMPIDX_BMP0_SHIFT,
+ OSD_TRANSPBMPIDX);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_TRANSPBMPIDX_BMP1,
+ colorkey <<
+ OSD_TRANSPBMPIDX_BMP1_SHIFT,
+ OSD_TRANSPBMPIDX);
+ break;
+ }
+ }
+ break;
+ case PIXFMT_RGB565:
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_write(sd, colorkey & OSD_TRANSPVAL_RGBTRANS,
+ OSD_TRANSPVAL);
+ else if (sd->vpbe_type == VPBE_VERSION_3)
+ osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
+ OSD_TRANSPVALL);
+ break;
+ case PIXFMT_YCBCRI:
+ case PIXFMT_YCRCBI:
+ if (sd->vpbe_type == VPBE_VERSION_3)
+ osd_modify(sd, OSD_TRANSPVALU_Y, colorkey,
+ OSD_TRANSPVALU);
+ break;
+ case PIXFMT_RGB888:
+ if (sd->vpbe_type == VPBE_VERSION_3) {
+ osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
+ OSD_TRANSPVALL);
+ osd_modify(sd, OSD_TRANSPVALU_RGBU, colorkey >> 16,
+ OSD_TRANSPVALU);
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_set(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_set(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_disable_color_key(struct osd_state *sd,
+ enum osd_win_layer osdwin)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_clear(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_clear(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_osd_clut(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ enum osd_clut clut)
+{
+ u32 winmd = 0;
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ if (clut == RAM_CLUT)
+ winmd |= OSD_OSDWIN0MD_CLUTS0;
+ osd_modify(sd, OSD_OSDWIN0MD_CLUTS0, winmd, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ if (clut == RAM_CLUT)
+ winmd |= OSD_OSDWIN1MD_CLUTS1;
+ osd_modify(sd, OSD_OSDWIN1MD_CLUTS1, winmd, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_zoom(struct osd_state *sd, enum osd_layer layer,
+ enum osd_zoom_factor h_zoom,
+ enum osd_zoom_factor v_zoom)
+{
+ u32 winmd = 0;
+
+ switch (layer) {
+ case WIN_OSD0:
+ winmd |= (h_zoom << OSD_OSDWIN0MD_OHZ0_SHIFT);
+ winmd |= (v_zoom << OSD_OSDWIN0MD_OVZ0_SHIFT);
+ osd_modify(sd, OSD_OSDWIN0MD_OHZ0 | OSD_OSDWIN0MD_OVZ0, winmd,
+ OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ winmd |= (h_zoom << OSD_VIDWINMD_VHZ0_SHIFT);
+ winmd |= (v_zoom << OSD_VIDWINMD_VVZ0_SHIFT);
+ osd_modify(sd, OSD_VIDWINMD_VHZ0 | OSD_VIDWINMD_VVZ0, winmd,
+ OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ winmd |= (h_zoom << OSD_OSDWIN1MD_OHZ1_SHIFT);
+ winmd |= (v_zoom << OSD_OSDWIN1MD_OVZ1_SHIFT);
+ osd_modify(sd, OSD_OSDWIN1MD_OHZ1 | OSD_OSDWIN1MD_OVZ1, winmd,
+ OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ winmd |= (h_zoom << OSD_VIDWINMD_VHZ1_SHIFT);
+ winmd |= (v_zoom << OSD_VIDWINMD_VVZ1_SHIFT);
+ osd_modify(sd, OSD_VIDWINMD_VHZ1 | OSD_VIDWINMD_VVZ1, winmd,
+ OSD_VIDWINMD);
+ break;
+ }
+}
+
+static void _osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_clear(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ osd_clear(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ /* disable attribute mode as well as disabling the window */
+ osd_clear(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+ OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ osd_clear(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+ break;
+ }
+}
+
+static void osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (!win->is_enabled) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return;
+ }
+ win->is_enabled = 0;
+
+ _osd_disable_layer(sd, layer);
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void _osd_enable_attribute_mode(struct osd_state *sd)
+{
+ /* enable attribute mode for OSD1 */
+ osd_set(sd, OSD_OSDWIN1MD_OASW, OSD_OSDWIN1MD);
+}
+
+static void _osd_enable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_set(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ osd_set(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ /* enable OSD1 and disable attribute mode */
+ osd_modify(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+ OSD_OSDWIN1MD_OACT1, OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ osd_set(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+ break;
+ }
+}
+
+static int osd_enable_layer(struct osd_state *sd, enum osd_layer layer,
+ int otherwin)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ /*
+ * use otherwin flag to know this is the other vid window
+ * in YUV420 mode, if is, skip this check
+ */
+ if (!otherwin && (!win->is_allocated ||
+ !win->fb_base_phys ||
+ !cfg->line_length ||
+ !cfg->xsize ||
+ !cfg->ysize)) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return -1;
+ }
+
+ if (win->is_enabled) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return 0;
+ }
+ win->is_enabled = 1;
+
+ if (cfg->pixfmt != PIXFMT_OSD_ATTR)
+ _osd_enable_layer(sd, layer);
+ else {
+ _osd_enable_attribute_mode(sd);
+ _osd_set_blink_attribute(sd, osd->is_blinking, osd->blink);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+#define OSD_SRC_ADDR_HIGH4 0x7800000
+#define OSD_SRC_ADDR_HIGH7 0x7F0000
+#define OSD_SRCADD_OFSET_SFT 23
+#define OSD_SRCADD_ADD_SFT 16
+#define OSD_WINADL_MASK 0xFFFF
+#define OSD_WINOFST_MASK 0x1000
+#define VPBE_REG_BASE 0x80000000
+
+static void _osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+ unsigned long fb_base_phys,
+ unsigned long cbcr_ofst)
+{
+
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ switch (layer) {
+ case WIN_OSD0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN0ADR);
+ break;
+ case WIN_VID0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+ break;
+ case WIN_OSD1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN1ADR);
+ break;
+ case WIN_VID1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN1ADR);
+ break;
+ }
+ } else if (sd->vpbe_type == VPBE_VERSION_3) {
+ unsigned long fb_offset_32 =
+ (fb_base_phys - VPBE_REG_BASE) >> 5;
+
+ switch (layer) {
+ case WIN_OSD0:
+ osd_modify(sd, OSD_OSDWINADH_O0AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O0AH_SHIFT),
+ OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_OSDWIN0ADL_O0AL,
+ OSD_OSDWIN0ADL);
+ break;
+ case WIN_VID0:
+ osd_modify(sd, OSD_VIDWINADH_V0AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V0AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_VIDWIN0ADL_V0AL,
+ OSD_VIDWIN0ADL);
+ break;
+ case WIN_OSD1:
+ osd_modify(sd, OSD_OSDWINADH_O1AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O1AH_SHIFT),
+ OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_OSDWIN1ADL_O1AL,
+ OSD_OSDWIN1ADL);
+ break;
+ case WIN_VID1:
+ osd_modify(sd, OSD_VIDWINADH_V1AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V1AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_VIDWIN1ADL_V1AL,
+ OSD_VIDWIN1ADL);
+ break;
+ }
+ } else if (sd->vpbe_type == VPBE_VERSION_2) {
+ struct osd_window_state *win = &sd->win[layer];
+ unsigned long fb_offset_32, cbcr_offset_32;
+
+ fb_offset_32 = fb_base_phys - VPBE_REG_BASE;
+ if (cbcr_ofst)
+ cbcr_offset_32 = cbcr_ofst;
+ else
+ cbcr_offset_32 = win->lconfig.line_length *
+ win->lconfig.ysize;
+ cbcr_offset_32 += fb_offset_32;
+ fb_offset_32 = fb_offset_32 >> 5;
+ cbcr_offset_32 = cbcr_offset_32 >> 5;
+ /*
+ * DM365: start address is 27-bit long address b26 - b23 are
+ * in offset register b12 - b9, and * bit 26 has to be '1'
+ */
+ if (win->lconfig.pixfmt == PIXFMT_NV12) {
+ switch (layer) {
+ case WIN_VID0:
+ case WIN_VID1:
+ /* Y is in VID0 */
+ osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
+ osd_modify(sd, OSD_VIDWINADH_V0AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V0AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN0ADL);
+ /* CbCr is in VID1 */
+ osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
+ ((cbcr_offset_32 &
+ OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
+ osd_modify(sd, OSD_VIDWINADH_V1AH,
+ (cbcr_offset_32 &
+ OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V1AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, cbcr_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN1ADL);
+ break;
+ default:
+ break;
+ }
+ }
+
+ switch (layer) {
+ case WIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0OFST_O0AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
+ OSD_OSDWIN0OFST);
+ osd_modify(sd, OSD_OSDWINADH_O0AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O0AH_SHIFT), OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_OSDWIN0ADL);
+ break;
+ case WIN_VID0:
+ if (win->lconfig.pixfmt != PIXFMT_NV12) {
+ osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
+ osd_modify(sd, OSD_VIDWINADH_V0AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V0AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN0ADL);
+ }
+ break;
+ case WIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1OFST_O1AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
+ OSD_OSDWIN1OFST);
+ osd_modify(sd, OSD_OSDWINADH_O1AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O1AH_SHIFT),
+ OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_OSDWIN1ADL);
+ break;
+ case WIN_VID1:
+ if (win->lconfig.pixfmt != PIXFMT_NV12) {
+ osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
+ osd_modify(sd, OSD_VIDWINADH_V1AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V1AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN1ADL);
+ }
+ break;
+ }
+ }
+}
+
+static void osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+ unsigned long fb_base_phys,
+ unsigned long cbcr_ofst)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->fb_base_phys = fb_base_phys & ~0x1F;
+ _osd_start_layer(sd, layer, fb_base_phys, cbcr_ofst);
+
+ if (layer == WIN_VID0) {
+ osd->pingpong =
+ _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+ win->fb_base_phys,
+ cfg);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_get_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ *lconfig = win->lconfig;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+/**
+ * try_layer_config() - Try a specific configuration for the layer
+ * @sd: ptr to struct osd_state
+ * @layer: layer to configure
+ * @lconfig: layer configuration to try
+ *
+ * If the requested lconfig is completely rejected and the value of lconfig on
+ * exit is the current lconfig, then try_layer_config() returns 1. Otherwise,
+ * try_layer_config() returns 0. A return value of 0 does not necessarily mean
+ * that the value of lconfig on exit is identical to the value of lconfig on
+ * entry, but merely that it represents a change from the current lconfig.
+ */
+static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ int bad_config = 0;
+
+ /* verify that the pixel format is compatible with the layer */
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ case PIXFMT_2BPP:
+ case PIXFMT_4BPP:
+ case PIXFMT_8BPP:
+ case PIXFMT_RGB565:
+ if (osd->vpbe_type == VPBE_VERSION_1)
+ bad_config = !is_vid_win(layer);
+ break;
+ case PIXFMT_YCBCRI:
+ case PIXFMT_YCRCBI:
+ bad_config = !is_vid_win(layer);
+ break;
+ case PIXFMT_RGB888:
+ if (osd->vpbe_type == VPBE_VERSION_1)
+ bad_config = !is_vid_win(layer);
+ else if ((osd->vpbe_type == VPBE_VERSION_3) ||
+ (osd->vpbe_type == VPBE_VERSION_2))
+ bad_config = !is_osd_win(layer);
+ break;
+ case PIXFMT_NV12:
+ if (osd->vpbe_type != VPBE_VERSION_2)
+ bad_config = 1;
+ else
+ bad_config = is_osd_win(layer);
+ break;
+ case PIXFMT_OSD_ATTR:
+ bad_config = (layer != WIN_OSD1);
+ break;
+ default:
+ bad_config = 1;
+ break;
+ }
+ if (bad_config) {
+ /*
+ * The requested pixel format is incompatible with the layer,
+ * so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return bad_config;
+ }
+
+ /* DM6446: */
+ /* only one OSD window at a time can use RGB pixel formats */
+ if ((osd->vpbe_type == VPBE_VERSION_1) &&
+ is_osd_win(layer) && is_rgb_pixfmt(lconfig->pixfmt)) {
+ enum osd_pix_format pixfmt;
+
+ if (layer == WIN_OSD0)
+ pixfmt = osd->win[WIN_OSD1].lconfig.pixfmt;
+ else
+ pixfmt = osd->win[WIN_OSD0].lconfig.pixfmt;
+
+ if (is_rgb_pixfmt(pixfmt)) {
+ /*
+ * The other OSD window is already configured for an
+ * RGB, so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return 1;
+ }
+ }
+
+ /* DM6446: only one video window at a time can use RGB888 */
+ if ((osd->vpbe_type == VPBE_VERSION_1) && is_vid_win(layer) &&
+ lconfig->pixfmt == PIXFMT_RGB888) {
+ enum osd_pix_format pixfmt;
+
+ if (layer == WIN_VID0)
+ pixfmt = osd->win[WIN_VID1].lconfig.pixfmt;
+ else
+ pixfmt = osd->win[WIN_VID0].lconfig.pixfmt;
+
+ if (pixfmt == PIXFMT_RGB888) {
+ /*
+ * The other video window is already configured for
+ * RGB888, so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return 1;
+ }
+ }
+
+ /* window dimensions must be non-zero */
+ if (!lconfig->line_length || !lconfig->xsize || !lconfig->ysize) {
+ *lconfig = win->lconfig;
+ return 1;
+ }
+
+ /* round line_length up to a multiple of 32 */
+ lconfig->line_length = ((lconfig->line_length + 31) / 32) * 32;
+ lconfig->line_length =
+ min(lconfig->line_length, (unsigned)MAX_LINE_LENGTH);
+ lconfig->xsize = min(lconfig->xsize, (unsigned)MAX_WIN_SIZE);
+ lconfig->ysize = min(lconfig->ysize, (unsigned)MAX_WIN_SIZE);
+ lconfig->xpos = min(lconfig->xpos, (unsigned)MAX_WIN_SIZE);
+ lconfig->ypos = min(lconfig->ypos, (unsigned)MAX_WIN_SIZE);
+ lconfig->interlaced = (lconfig->interlaced != 0);
+ if (lconfig->interlaced) {
+ /* ysize and ypos must be even for interlaced displays */
+ lconfig->ysize &= ~1;
+ lconfig->ypos &= ~1;
+ }
+
+ return 0;
+}
+
+static void _osd_disable_vid_rgb888(struct osd_state *sd)
+{
+ /*
+ * The DM6446 supports RGB888 pixel format in a single video window.
+ * This routine disables RGB888 pixel format for both video windows.
+ * The caller must ensure that neither video window is currently
+ * configured for RGB888 pixel format.
+ */
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_clear(sd, OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+}
+
+static void _osd_enable_vid_rgb888(struct osd_state *sd,
+ enum osd_layer layer)
+{
+ /*
+ * The DM6446 supports RGB888 pixel format in a single video window.
+ * This routine enables RGB888 pixel format for the specified video
+ * window. The caller must ensure that the other video window is not
+ * currently configured for RGB888 pixel format, as this routine will
+ * disable RGB888 pixel format for the other window.
+ */
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ if (layer == WIN_VID0)
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+ else if (layer == WIN_VID1)
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL);
+ }
+}
+
+static void _osd_set_cbcr_order(struct osd_state *sd,
+ enum osd_pix_format pixfmt)
+{
+ /*
+ * The caller must ensure that all windows using YC pixfmt use the same
+ * Cb/Cr order.
+ */
+ if (pixfmt == PIXFMT_YCBCRI)
+ osd_clear(sd, OSD_MODE_CS, OSD_MODE);
+ else if (pixfmt == PIXFMT_YCRCBI)
+ osd_set(sd, OSD_MODE_CS, OSD_MODE);
+}
+
+static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+ const struct osd_layer_config *lconfig)
+{
+ u32 winmd = 0, winmd_mask = 0, bmw = 0;
+
+ _osd_set_cbcr_order(sd, lconfig->pixfmt);
+
+ switch (layer) {
+ case WIN_OSD0:
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ winmd_mask |= OSD_OSDWIN0MD_RGB0E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN0MD_RGB0E;
+ } else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+ (sd->vpbe_type == VPBE_VERSION_2)) {
+ winmd_mask |= OSD_OSDWIN0MD_BMP0MD;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_RGB565:
+ winmd |= (1 <<
+ OSD_OSDWIN0MD_BMP0MD_SHIFT);
+ break;
+ case PIXFMT_RGB888:
+ winmd |= (2 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
+ _osd_enable_rgb888_pixblend(sd, OSDWIN_OSD0);
+ break;
+ case PIXFMT_YCBCRI:
+ case PIXFMT_YCRCBI:
+ winmd |= (3 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
+ break;
+ default:
+ break;
+ }
+ }
+
+ winmd_mask |= OSD_OSDWIN0MD_BMW0 | OSD_OSDWIN0MD_OFF0;
+
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ bmw = 0;
+ break;
+ case PIXFMT_2BPP:
+ bmw = 1;
+ break;
+ case PIXFMT_4BPP:
+ bmw = 2;
+ break;
+ case PIXFMT_8BPP:
+ bmw = 3;
+ break;
+ default:
+ break;
+ }
+ winmd |= (bmw << OSD_OSDWIN0MD_BMW0_SHIFT);
+
+ if (lconfig->interlaced)
+ winmd |= OSD_OSDWIN0MD_OFF0;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN0MD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_OSDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_OSDWIN0XL);
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN0YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_OSDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_OSDWIN0YL);
+ }
+ break;
+ case WIN_VID0:
+ winmd_mask |= OSD_VIDWINMD_VFF0;
+ if (lconfig->interlaced)
+ winmd |= OSD_VIDWINMD_VFF0;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
+ /*
+ * For YUV420P format the register contents are
+ * duplicated in both VID registers
+ */
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ (lconfig->pixfmt == PIXFMT_NV12)) {
+ /* other window also */
+ if (lconfig->interlaced) {
+ winmd_mask |= OSD_VIDWINMD_VFF1;
+ winmd |= OSD_VIDWINMD_VFF1;
+ osd_modify(sd, winmd_mask, winmd,
+ OSD_VIDWINMD);
+ }
+
+ osd_modify(sd, OSD_MISCCTL_S420D,
+ OSD_MISCCTL_S420D, OSD_MISCCTL);
+ osd_write(sd, lconfig->line_length >> 5,
+ OSD_VIDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
+ /*
+ * if NV21 pixfmt and line length not 32B
+ * aligned (e.g. NTSC), Need to set window
+ * X pixel size to be 32B aligned as well
+ */
+ if (lconfig->xsize % 32) {
+ osd_write(sd,
+ ((lconfig->xsize + 31) & ~31),
+ OSD_VIDWIN1XL);
+ osd_write(sd,
+ ((lconfig->xsize + 31) & ~31),
+ OSD_VIDWIN0XL);
+ }
+ } else if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ (lconfig->pixfmt != PIXFMT_NV12)) {
+ osd_modify(sd, OSD_MISCCTL_S420D, ~OSD_MISCCTL_S420D,
+ OSD_MISCCTL);
+ }
+
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN0YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos >> 1,
+ OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1,
+ OSD_VIDWIN1YL);
+ }
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+ }
+ }
+ break;
+ case WIN_OSD1:
+ /*
+ * The caller must ensure that OSD1 is disabled prior to
+ * switching from a normal mode to attribute mode or from
+ * attribute mode to a normal mode.
+ */
+ if (lconfig->pixfmt == PIXFMT_OSD_ATTR) {
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ winmd_mask |= OSD_OSDWIN1MD_ATN1E |
+ OSD_OSDWIN1MD_RGB1E | OSD_OSDWIN1MD_CLUTS1 |
+ OSD_OSDWIN1MD_BLND1 | OSD_OSDWIN1MD_TE1;
+ } else {
+ winmd_mask |= OSD_OSDWIN1MD_BMP1MD |
+ OSD_OSDWIN1MD_CLUTS1 | OSD_OSDWIN1MD_BLND1 |
+ OSD_OSDWIN1MD_TE1;
+ }
+ } else {
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ winmd_mask |= OSD_OSDWIN1MD_RGB1E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN1MD_RGB1E;
+ } else if ((sd->vpbe_type == VPBE_VERSION_3)
+ || (sd->vpbe_type == VPBE_VERSION_2)) {
+ winmd_mask |= OSD_OSDWIN1MD_BMP1MD;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_RGB565:
+ winmd |=
+ (1 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+ break;
+ case PIXFMT_RGB888:
+ winmd |=
+ (2 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+ _osd_enable_rgb888_pixblend(sd,
+ OSDWIN_OSD1);
+ break;
+ case PIXFMT_YCBCRI:
+ case PIXFMT_YCRCBI:
+ winmd |=
+ (3 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+ break;
+ default:
+ break;
+ }
+ }
+
+ winmd_mask |= OSD_OSDWIN1MD_BMW1;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ bmw = 0;
+ break;
+ case PIXFMT_2BPP:
+ bmw = 1;
+ break;
+ case PIXFMT_4BPP:
+ bmw = 2;
+ break;
+ case PIXFMT_8BPP:
+ bmw = 3;
+ break;
+ default:
+ break;
+ }
+ winmd |= (bmw << OSD_OSDWIN1MD_BMW1_SHIFT);
+ }
+
+ winmd_mask |= OSD_OSDWIN1MD_OFF1;
+ if (lconfig->interlaced)
+ winmd |= OSD_OSDWIN1MD_OFF1;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN1MD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_OSDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_OSDWIN1XL);
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN1YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_OSDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_OSDWIN1YL);
+ }
+ break;
+ case WIN_VID1:
+ winmd_mask |= OSD_VIDWINMD_VFF1;
+ if (lconfig->interlaced)
+ winmd |= OSD_VIDWINMD_VFF1;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
+ /*
+ * For YUV420P format the register contents are
+ * duplicated in both VID registers
+ */
+ if (sd->vpbe_type == VPBE_VERSION_2) {
+ if (lconfig->pixfmt == PIXFMT_NV12) {
+ /* other window also */
+ if (lconfig->interlaced) {
+ winmd_mask |= OSD_VIDWINMD_VFF0;
+ winmd |= OSD_VIDWINMD_VFF0;
+ osd_modify(sd, winmd_mask, winmd,
+ OSD_VIDWINMD);
+ }
+ osd_modify(sd, OSD_MISCCTL_S420D,
+ OSD_MISCCTL_S420D, OSD_MISCCTL);
+ osd_write(sd, lconfig->line_length >> 5,
+ OSD_VIDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
+ } else {
+ osd_modify(sd, OSD_MISCCTL_S420D,
+ ~OSD_MISCCTL_S420D, OSD_MISCCTL);
+ }
+ }
+
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN1YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos >> 1,
+ OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1,
+ OSD_VIDWIN0YL);
+ }
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+ }
+ }
+ break;
+ }
+}
+
+static int osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+ int reject_config;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ reject_config = try_layer_config(sd, layer, lconfig);
+ if (reject_config) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return reject_config;
+ }
+
+ /* update the current Cb/Cr order */
+ if (is_yc_pixfmt(lconfig->pixfmt))
+ osd->yc_pixfmt = lconfig->pixfmt;
+
+ /*
+ * If we are switching OSD1 from normal mode to attribute mode or from
+ * attribute mode to normal mode, then we must disable the window.
+ */
+ if (layer == WIN_OSD1) {
+ if (((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt != PIXFMT_OSD_ATTR)) ||
+ ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt == PIXFMT_OSD_ATTR))) {
+ win->is_enabled = 0;
+ _osd_disable_layer(sd, layer);
+ }
+ }
+
+ _osd_set_layer_config(sd, layer, lconfig);
+
+ if (layer == WIN_OSD1) {
+ struct osd_osdwin_state *osdwin_state =
+ &osd->osdwin[OSDWIN_OSD1];
+
+ if ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt == PIXFMT_OSD_ATTR)) {
+ /*
+ * We just switched OSD1 from attribute mode to normal
+ * mode, so we must initialize the CLUT select, the
+ * blend factor, transparency colorkey enable, and
+ * attenuation enable (DM6446 only) bits in the
+ * OSDWIN1MD register.
+ */
+ _osd_set_osd_clut(sd, OSDWIN_OSD1,
+ osdwin_state->clut);
+ _osd_set_blending_factor(sd, OSDWIN_OSD1,
+ osdwin_state->blend);
+ if (osdwin_state->colorkey_blending) {
+ _osd_enable_color_key(sd, OSDWIN_OSD1,
+ osdwin_state->
+ colorkey,
+ lconfig->pixfmt);
+ } else
+ _osd_disable_color_key(sd, OSDWIN_OSD1);
+ _osd_set_rec601_attenuation(sd, OSDWIN_OSD1,
+ osdwin_state->
+ rec601_attenuation);
+ } else if ((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt != PIXFMT_OSD_ATTR)) {
+ /*
+ * We just switched OSD1 from normal mode to attribute
+ * mode, so we must initialize the blink enable and
+ * blink interval bits in the OSDATRMD register.
+ */
+ _osd_set_blink_attribute(sd, osd->is_blinking,
+ osd->blink);
+ }
+ }
+
+ /*
+ * If we just switched to a 1-, 2-, or 4-bits-per-pixel bitmap format
+ * then configure a default palette map.
+ */
+ if ((lconfig->pixfmt != cfg->pixfmt) &&
+ ((lconfig->pixfmt == PIXFMT_1BPP) ||
+ (lconfig->pixfmt == PIXFMT_2BPP) ||
+ (lconfig->pixfmt == PIXFMT_4BPP))) {
+ enum osd_win_layer osdwin =
+ ((layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1);
+ struct osd_osdwin_state *osdwin_state =
+ &osd->osdwin[osdwin];
+ unsigned char clut_index;
+ unsigned char clut_entries = 0;
+
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ clut_entries = 2;
+ break;
+ case PIXFMT_2BPP:
+ clut_entries = 4;
+ break;
+ case PIXFMT_4BPP:
+ clut_entries = 16;
+ break;
+ default:
+ break;
+ }
+ /*
+ * The default palette map maps the pixel value to the clut
+ * index, i.e. pixel value 0 maps to clut entry 0, pixel value
+ * 1 maps to clut entry 1, etc.
+ */
+ for (clut_index = 0; clut_index < 16; clut_index++) {
+ osdwin_state->palette_map[clut_index] = clut_index;
+ if (clut_index < clut_entries) {
+ _osd_set_palette_map(sd, osdwin, clut_index,
+ clut_index,
+ lconfig->pixfmt);
+ }
+ }
+ }
+
+ *cfg = *lconfig;
+ /* DM6446: configure the RGB888 enable and window selection */
+ if (osd->win[WIN_VID0].lconfig.pixfmt == PIXFMT_RGB888)
+ _osd_enable_vid_rgb888(sd, WIN_VID0);
+ else if (osd->win[WIN_VID1].lconfig.pixfmt == PIXFMT_RGB888)
+ _osd_enable_vid_rgb888(sd, WIN_VID1);
+ else
+ _osd_disable_vid_rgb888(sd);
+
+ if (layer == WIN_VID0) {
+ osd->pingpong =
+ _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+ win->fb_base_phys,
+ cfg);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void osd_init_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ enum osd_win_layer osdwin;
+ struct osd_osdwin_state *osdwin_state;
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->is_enabled = 0;
+ _osd_disable_layer(sd, layer);
+
+ win->h_zoom = ZOOM_X1;
+ win->v_zoom = ZOOM_X1;
+ _osd_set_zoom(sd, layer, win->h_zoom, win->v_zoom);
+
+ win->fb_base_phys = 0;
+ _osd_start_layer(sd, layer, win->fb_base_phys, 0);
+
+ cfg->line_length = 0;
+ cfg->xsize = 0;
+ cfg->ysize = 0;
+ cfg->xpos = 0;
+ cfg->ypos = 0;
+ cfg->interlaced = 0;
+ switch (layer) {
+ case WIN_OSD0:
+ case WIN_OSD1:
+ osdwin = (layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1;
+ osdwin_state = &osd->osdwin[osdwin];
+ /*
+ * Other code relies on the fact that OSD windows default to a
+ * bitmap pixel format when they are deallocated, so don't
+ * change this default pixel format.
+ */
+ cfg->pixfmt = PIXFMT_8BPP;
+ _osd_set_layer_config(sd, layer, cfg);
+ osdwin_state->clut = RAM_CLUT;
+ _osd_set_osd_clut(sd, osdwin, osdwin_state->clut);
+ osdwin_state->colorkey_blending = 0;
+ _osd_disable_color_key(sd, osdwin);
+ osdwin_state->blend = OSD_8_VID_0;
+ _osd_set_blending_factor(sd, osdwin, osdwin_state->blend);
+ osdwin_state->rec601_attenuation = 0;
+ _osd_set_rec601_attenuation(sd, osdwin,
+ osdwin_state->
+ rec601_attenuation);
+ if (osdwin == OSDWIN_OSD1) {
+ osd->is_blinking = 0;
+ osd->blink = BLINK_X1;
+ }
+ break;
+ case WIN_VID0:
+ case WIN_VID1:
+ cfg->pixfmt = osd->yc_pixfmt;
+ _osd_set_layer_config(sd, layer, cfg);
+ break;
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_release_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (!win->is_allocated) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+ osd_init_layer(sd, layer);
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->is_allocated = 0;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static int osd_request_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (win->is_allocated) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return -1;
+ }
+ win->is_allocated = 1;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void _osd_init(struct osd_state *sd)
+{
+ osd_write(sd, 0, OSD_MODE);
+ osd_write(sd, 0, OSD_VIDWINMD);
+ osd_write(sd, 0, OSD_OSDWIN0MD);
+ osd_write(sd, 0, OSD_OSDWIN1MD);
+ osd_write(sd, 0, OSD_RECTCUR);
+ osd_write(sd, 0, OSD_MISCCTL);
+ if (sd->vpbe_type == VPBE_VERSION_3) {
+ osd_write(sd, 0, OSD_VBNDRY);
+ osd_write(sd, 0, OSD_EXTMODE);
+ osd_write(sd, OSD_MISCCTL_DMANG, OSD_MISCCTL);
+ }
+}
+
+static void osd_set_left_margin(struct osd_state *sd, u32 val)
+{
+ osd_write(sd, val, OSD_BASEPX);
+}
+
+static void osd_set_top_margin(struct osd_state *sd, u32 val)
+{
+ osd_write(sd, val, OSD_BASEPY);
+}
+
+static int osd_initialize(struct osd_state *osd)
+{
+ if (osd == NULL)
+ return -ENODEV;
+ _osd_init(osd);
+
+ /* set default Cb/Cr order */
+ osd->yc_pixfmt = PIXFMT_YCBCRI;
+
+ if (osd->vpbe_type == VPBE_VERSION_3) {
+ /*
+ * ROM CLUT1 on the DM355 is similar (identical?) to ROM CLUT0
+ * on the DM6446, so make ROM_CLUT1 the default on the DM355.
+ */
+ osd->rom_clut = ROM_CLUT1;
+ }
+
+ _osd_set_field_inversion(osd, osd->field_inversion);
+ _osd_set_rom_clut(osd, osd->rom_clut);
+
+ osd_init_layer(osd, WIN_OSD0);
+ osd_init_layer(osd, WIN_VID0);
+ osd_init_layer(osd, WIN_OSD1);
+ osd_init_layer(osd, WIN_VID1);
+
+ return 0;
+}
+
+static const struct vpbe_osd_ops osd_ops = {
+ .initialize = osd_initialize,
+ .request_layer = osd_request_layer,
+ .release_layer = osd_release_layer,
+ .enable_layer = osd_enable_layer,
+ .disable_layer = osd_disable_layer,
+ .set_layer_config = osd_set_layer_config,
+ .get_layer_config = osd_get_layer_config,
+ .start_layer = osd_start_layer,
+ .set_left_margin = osd_set_left_margin,
+ .set_top_margin = osd_set_top_margin,
+};
+
+static int osd_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *pdev_id;
+ struct osd_state *osd;
+ struct resource *res;
+
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id)
+ return -EINVAL;
+
+ osd = devm_kzalloc(&pdev->dev, sizeof(struct osd_state), GFP_KERNEL);
+ if (osd == NULL)
+ return -ENOMEM;
+
+
+ osd->dev = &pdev->dev;
+ osd->vpbe_type = pdev_id->driver_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ osd->osd_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(osd->osd_base))
+ return PTR_ERR(osd->osd_base);
+
+ osd->osd_base_phys = res->start;
+ osd->osd_size = resource_size(res);
+ spin_lock_init(&osd->lock);
+ osd->ops = osd_ops;
+ platform_set_drvdata(pdev, osd);
+ dev_notice(osd->dev, "OSD sub device probe success\n");
+
+ return 0;
+}
+
+static int osd_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver osd_driver = {
+ .probe = osd_probe,
+ .remove = osd_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ },
+ .id_table = vpbe_osd_devtype
+};
+
+module_platform_driver(osd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci OSD Manager Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/davinci/vpbe_osd_regs.h b/drivers/media/platform/davinci/vpbe_osd_regs.h
new file mode 100644
index 000000000..3db265f87
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_osd_regs.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _VPBE_OSD_REGS_H
+#define _VPBE_OSD_REGS_H
+
+/* VPBE Global Registers */
+#define VPBE_PID 0x0
+#define VPBE_PCR 0x4
+
+/* VPSS CLock Registers */
+#define VPSSCLK_PID 0x00
+#define VPSSCLK_CLKCTRL 0x04
+
+/* VPSS Buffer Logic Registers */
+#define VPSSBL_PID 0x00
+#define VPSSBL_PCR 0x04
+#define VPSSBL_BCR 0x08
+#define VPSSBL_INTSTAT 0x0C
+#define VPSSBL_INTSEL 0x10
+#define VPSSBL_EVTSEL 0x14
+#define VPSSBL_MEMCTRL 0x18
+#define VPSSBL_CCDCMUX 0x1C
+
+/* DM365 ISP5 system configuration */
+#define ISP5_PID 0x0
+#define ISP5_PCCR 0x4
+#define ISP5_BCR 0x8
+#define ISP5_INTSTAT 0xC
+#define ISP5_INTSEL1 0x10
+#define ISP5_INTSEL2 0x14
+#define ISP5_INTSEL3 0x18
+#define ISP5_EVTSEL 0x1c
+#define ISP5_CCDCMUX 0x20
+
+/* VPBE On-Screen Display Subsystem Registers (OSD) */
+#define OSD_MODE 0x00
+#define OSD_VIDWINMD 0x04
+#define OSD_OSDWIN0MD 0x08
+#define OSD_OSDWIN1MD 0x0C
+#define OSD_OSDATRMD 0x0C
+#define OSD_RECTCUR 0x10
+#define OSD_VIDWIN0OFST 0x18
+#define OSD_VIDWIN1OFST 0x1C
+#define OSD_OSDWIN0OFST 0x20
+#define OSD_OSDWIN1OFST 0x24
+#define OSD_VIDWINADH 0x28
+#define OSD_VIDWIN0ADL 0x2C
+#define OSD_VIDWIN0ADR 0x2C
+#define OSD_VIDWIN1ADL 0x30
+#define OSD_VIDWIN1ADR 0x30
+#define OSD_OSDWINADH 0x34
+#define OSD_OSDWIN0ADL 0x38
+#define OSD_OSDWIN0ADR 0x38
+#define OSD_OSDWIN1ADL 0x3C
+#define OSD_OSDWIN1ADR 0x3C
+#define OSD_BASEPX 0x40
+#define OSD_BASEPY 0x44
+#define OSD_VIDWIN0XP 0x48
+#define OSD_VIDWIN0YP 0x4C
+#define OSD_VIDWIN0XL 0x50
+#define OSD_VIDWIN0YL 0x54
+#define OSD_VIDWIN1XP 0x58
+#define OSD_VIDWIN1YP 0x5C
+#define OSD_VIDWIN1XL 0x60
+#define OSD_VIDWIN1YL 0x64
+#define OSD_OSDWIN0XP 0x68
+#define OSD_OSDWIN0YP 0x6C
+#define OSD_OSDWIN0XL 0x70
+#define OSD_OSDWIN0YL 0x74
+#define OSD_OSDWIN1XP 0x78
+#define OSD_OSDWIN1YP 0x7C
+#define OSD_OSDWIN1XL 0x80
+#define OSD_OSDWIN1YL 0x84
+#define OSD_CURXP 0x88
+#define OSD_CURYP 0x8C
+#define OSD_CURXL 0x90
+#define OSD_CURYL 0x94
+#define OSD_W0BMP01 0xA0
+#define OSD_W0BMP23 0xA4
+#define OSD_W0BMP45 0xA8
+#define OSD_W0BMP67 0xAC
+#define OSD_W0BMP89 0xB0
+#define OSD_W0BMPAB 0xB4
+#define OSD_W0BMPCD 0xB8
+#define OSD_W0BMPEF 0xBC
+#define OSD_W1BMP01 0xC0
+#define OSD_W1BMP23 0xC4
+#define OSD_W1BMP45 0xC8
+#define OSD_W1BMP67 0xCC
+#define OSD_W1BMP89 0xD0
+#define OSD_W1BMPAB 0xD4
+#define OSD_W1BMPCD 0xD8
+#define OSD_W1BMPEF 0xDC
+#define OSD_VBNDRY 0xE0
+#define OSD_EXTMODE 0xE4
+#define OSD_MISCCTL 0xE8
+#define OSD_CLUTRAMYCB 0xEC
+#define OSD_CLUTRAMCR 0xF0
+#define OSD_TRANSPVAL 0xF4
+#define OSD_TRANSPVALL 0xF4
+#define OSD_TRANSPVALU 0xF8
+#define OSD_TRANSPBMPIDX 0xFC
+#define OSD_PPVWIN0ADR 0xFC
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV (1 << 1)
+#define VPBE_PCR_CLK_OFF (1 << 0)
+
+#define VPSSBL_INTSTAT_HSSIINT (1 << 14)
+#define VPSSBL_INTSTAT_CFALDINT (1 << 13)
+#define VPSSBL_INTSTAT_IPIPE_INT5 (1 << 12)
+#define VPSSBL_INTSTAT_IPIPE_INT4 (1 << 11)
+#define VPSSBL_INTSTAT_IPIPE_INT3 (1 << 10)
+#define VPSSBL_INTSTAT_IPIPE_INT2 (1 << 9)
+#define VPSSBL_INTSTAT_IPIPE_INT1 (1 << 8)
+#define VPSSBL_INTSTAT_IPIPE_INT0 (1 << 7)
+#define VPSSBL_INTSTAT_IPIPEIFINT (1 << 6)
+#define VPSSBL_INTSTAT_OSDINT (1 << 5)
+#define VPSSBL_INTSTAT_VENCINT (1 << 4)
+#define VPSSBL_INTSTAT_H3AINT (1 << 3)
+#define VPSSBL_INTSTAT_CCDC_VDINT2 (1 << 2)
+#define VPSSBL_INTSTAT_CCDC_VDINT1 (1 << 1)
+#define VPSSBL_INTSTAT_CCDC_VDINT0 (1 << 0)
+
+/* DM365 ISP5 bit definitions */
+#define ISP5_INTSTAT_VENCINT (1 << 21)
+#define ISP5_INTSTAT_OSDINT (1 << 20)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC 0
+#define SDTV_PAL 1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P 0
+#define HDTV_625P 1
+#define HDTV_1080I 2
+#define HDTV_720P 3
+
+#define OSD_MODE_CS (1 << 15)
+#define OSD_MODE_OVRSZ (1 << 14)
+#define OSD_MODE_OHRSZ (1 << 13)
+#define OSD_MODE_EF (1 << 12)
+#define OSD_MODE_VVRSZ (1 << 11)
+#define OSD_MODE_VHRSZ (1 << 10)
+#define OSD_MODE_FSINV (1 << 9)
+#define OSD_MODE_BCLUT (1 << 8)
+#define OSD_MODE_CABG_SHIFT 0
+#define OSD_MODE_CABG (0xff << 0)
+
+#define OSD_VIDWINMD_VFINV (1 << 15)
+#define OSD_VIDWINMD_V1EFC (1 << 14)
+#define OSD_VIDWINMD_VHZ1_SHIFT 12
+#define OSD_VIDWINMD_VHZ1 (3 << 12)
+#define OSD_VIDWINMD_VVZ1_SHIFT 10
+#define OSD_VIDWINMD_VVZ1 (3 << 10)
+#define OSD_VIDWINMD_VFF1 (1 << 9)
+#define OSD_VIDWINMD_ACT1 (1 << 8)
+#define OSD_VIDWINMD_V0EFC (1 << 6)
+#define OSD_VIDWINMD_VHZ0_SHIFT 4
+#define OSD_VIDWINMD_VHZ0 (3 << 4)
+#define OSD_VIDWINMD_VVZ0_SHIFT 2
+#define OSD_VIDWINMD_VVZ0 (3 << 2)
+#define OSD_VIDWINMD_VFF0 (1 << 1)
+#define OSD_VIDWINMD_ACT0 (1 << 0)
+
+#define OSD_OSDWIN0MD_ATN0E (1 << 14)
+#define OSD_OSDWIN0MD_RGB0E (1 << 13)
+#define OSD_OSDWIN0MD_BMP0MD_SHIFT 13
+#define OSD_OSDWIN0MD_BMP0MD (3 << 13)
+#define OSD_OSDWIN0MD_CLUTS0 (1 << 12)
+#define OSD_OSDWIN0MD_OHZ0_SHIFT 10
+#define OSD_OSDWIN0MD_OHZ0 (3 << 10)
+#define OSD_OSDWIN0MD_OVZ0_SHIFT 8
+#define OSD_OSDWIN0MD_OVZ0 (3 << 8)
+#define OSD_OSDWIN0MD_BMW0_SHIFT 6
+#define OSD_OSDWIN0MD_BMW0 (3 << 6)
+#define OSD_OSDWIN0MD_BLND0_SHIFT 3
+#define OSD_OSDWIN0MD_BLND0 (7 << 3)
+#define OSD_OSDWIN0MD_TE0 (1 << 2)
+#define OSD_OSDWIN0MD_OFF0 (1 << 1)
+#define OSD_OSDWIN0MD_OACT0 (1 << 0)
+
+#define OSD_OSDWIN1MD_OASW (1 << 15)
+#define OSD_OSDWIN1MD_ATN1E (1 << 14)
+#define OSD_OSDWIN1MD_RGB1E (1 << 13)
+#define OSD_OSDWIN1MD_BMP1MD_SHIFT 13
+#define OSD_OSDWIN1MD_BMP1MD (3 << 13)
+#define OSD_OSDWIN1MD_CLUTS1 (1 << 12)
+#define OSD_OSDWIN1MD_OHZ1_SHIFT 10
+#define OSD_OSDWIN1MD_OHZ1 (3 << 10)
+#define OSD_OSDWIN1MD_OVZ1_SHIFT 8
+#define OSD_OSDWIN1MD_OVZ1 (3 << 8)
+#define OSD_OSDWIN1MD_BMW1_SHIFT 6
+#define OSD_OSDWIN1MD_BMW1 (3 << 6)
+#define OSD_OSDWIN1MD_BLND1_SHIFT 3
+#define OSD_OSDWIN1MD_BLND1 (7 << 3)
+#define OSD_OSDWIN1MD_TE1 (1 << 2)
+#define OSD_OSDWIN1MD_OFF1 (1 << 1)
+#define OSD_OSDWIN1MD_OACT1 (1 << 0)
+
+#define OSD_OSDATRMD_OASW (1 << 15)
+#define OSD_OSDATRMD_OHZA_SHIFT 10
+#define OSD_OSDATRMD_OHZA (3 << 10)
+#define OSD_OSDATRMD_OVZA_SHIFT 8
+#define OSD_OSDATRMD_OVZA (3 << 8)
+#define OSD_OSDATRMD_BLNKINT_SHIFT 6
+#define OSD_OSDATRMD_BLNKINT (3 << 6)
+#define OSD_OSDATRMD_OFFA (1 << 1)
+#define OSD_OSDATRMD_BLNK (1 << 0)
+
+#define OSD_RECTCUR_RCAD_SHIFT 8
+#define OSD_RECTCUR_RCAD (0xff << 8)
+#define OSD_RECTCUR_CLUTSR (1 << 7)
+#define OSD_RECTCUR_RCHW_SHIFT 4
+#define OSD_RECTCUR_RCHW (7 << 4)
+#define OSD_RECTCUR_RCVW_SHIFT 1
+#define OSD_RECTCUR_RCVW (7 << 1)
+#define OSD_RECTCUR_RCACT (1 << 0)
+
+#define OSD_VIDWIN0OFST_V0LO (0x1ff << 0)
+
+#define OSD_VIDWIN1OFST_V1LO (0x1ff << 0)
+
+#define OSD_OSDWIN0OFST_O0LO (0x1ff << 0)
+
+#define OSD_OSDWIN1OFST_O1LO (0x1ff << 0)
+
+#define OSD_WINOFST_AH_SHIFT 9
+
+#define OSD_VIDWIN0OFST_V0AH (0xf << 9)
+#define OSD_VIDWIN1OFST_V1AH (0xf << 9)
+#define OSD_OSDWIN0OFST_O0AH (0xf << 9)
+#define OSD_OSDWIN1OFST_O1AH (0xf << 9)
+
+#define OSD_VIDWINADH_V1AH_SHIFT 8
+#define OSD_VIDWINADH_V1AH (0x7f << 8)
+#define OSD_VIDWINADH_V0AH_SHIFT 0
+#define OSD_VIDWINADH_V0AH (0x7f << 0)
+
+#define OSD_VIDWIN0ADL_V0AL (0xffff << 0)
+
+#define OSD_VIDWIN1ADL_V1AL (0xffff << 0)
+
+#define OSD_OSDWINADH_O1AH_SHIFT 8
+#define OSD_OSDWINADH_O1AH (0x7f << 8)
+#define OSD_OSDWINADH_O0AH_SHIFT 0
+#define OSD_OSDWINADH_O0AH (0x7f << 0)
+
+#define OSD_OSDWIN0ADL_O0AL (0xffff << 0)
+
+#define OSD_OSDWIN1ADL_O1AL (0xffff << 0)
+
+#define OSD_BASEPX_BPX (0x3ff << 0)
+
+#define OSD_BASEPY_BPY (0x1ff << 0)
+
+#define OSD_VIDWIN0XP_V0X (0x7ff << 0)
+
+#define OSD_VIDWIN0YP_V0Y (0x7ff << 0)
+
+#define OSD_VIDWIN0XL_V0W (0x7ff << 0)
+
+#define OSD_VIDWIN0YL_V0H (0x7ff << 0)
+
+#define OSD_VIDWIN1XP_V1X (0x7ff << 0)
+
+#define OSD_VIDWIN1YP_V1Y (0x7ff << 0)
+
+#define OSD_VIDWIN1XL_V1W (0x7ff << 0)
+
+#define OSD_VIDWIN1YL_V1H (0x7ff << 0)
+
+#define OSD_OSDWIN0XP_W0X (0x7ff << 0)
+
+#define OSD_OSDWIN0YP_W0Y (0x7ff << 0)
+
+#define OSD_OSDWIN0XL_W0W (0x7ff << 0)
+
+#define OSD_OSDWIN0YL_W0H (0x7ff << 0)
+
+#define OSD_OSDWIN1XP_W1X (0x7ff << 0)
+
+#define OSD_OSDWIN1YP_W1Y (0x7ff << 0)
+
+#define OSD_OSDWIN1XL_W1W (0x7ff << 0)
+
+#define OSD_OSDWIN1YL_W1H (0x7ff << 0)
+
+#define OSD_CURXP_RCSX (0x7ff << 0)
+
+#define OSD_CURYP_RCSY (0x7ff << 0)
+
+#define OSD_CURXL_RCSW (0x7ff << 0)
+
+#define OSD_CURYL_RCSH (0x7ff << 0)
+
+#define OSD_EXTMODE_EXPMDSEL (1 << 15)
+#define OSD_EXTMODE_SCRNHEXP_SHIFT 13
+#define OSD_EXTMODE_SCRNHEXP (3 << 13)
+#define OSD_EXTMODE_SCRNVEXP (1 << 12)
+#define OSD_EXTMODE_OSD1BLDCHR (1 << 11)
+#define OSD_EXTMODE_OSD0BLDCHR (1 << 10)
+#define OSD_EXTMODE_ATNOSD1EN (1 << 9)
+#define OSD_EXTMODE_ATNOSD0EN (1 << 8)
+#define OSD_EXTMODE_OSDHRSZ15 (1 << 7)
+#define OSD_EXTMODE_VIDHRSZ15 (1 << 6)
+#define OSD_EXTMODE_ZMFILV1HEN (1 << 5)
+#define OSD_EXTMODE_ZMFILV1VEN (1 << 4)
+#define OSD_EXTMODE_ZMFILV0HEN (1 << 3)
+#define OSD_EXTMODE_ZMFILV0VEN (1 << 2)
+#define OSD_EXTMODE_EXPFILHEN (1 << 1)
+#define OSD_EXTMODE_EXPFILVEN (1 << 0)
+
+#define OSD_MISCCTL_BLDSEL (1 << 15)
+#define OSD_MISCCTL_S420D (1 << 14)
+#define OSD_MISCCTL_BMAPT (1 << 13)
+#define OSD_MISCCTL_DM365M (1 << 12)
+#define OSD_MISCCTL_RGBEN (1 << 7)
+#define OSD_MISCCTL_RGBWIN (1 << 6)
+#define OSD_MISCCTL_DMANG (1 << 6)
+#define OSD_MISCCTL_TMON (1 << 5)
+#define OSD_MISCCTL_RSEL (1 << 4)
+#define OSD_MISCCTL_CPBSY (1 << 3)
+#define OSD_MISCCTL_PPSW (1 << 2)
+#define OSD_MISCCTL_PPRV (1 << 1)
+
+#define OSD_CLUTRAMYCB_Y_SHIFT 8
+#define OSD_CLUTRAMYCB_Y (0xff << 8)
+#define OSD_CLUTRAMYCB_CB_SHIFT 0
+#define OSD_CLUTRAMYCB_CB (0xff << 0)
+
+#define OSD_CLUTRAMCR_CR_SHIFT 8
+#define OSD_CLUTRAMCR_CR (0xff << 8)
+#define OSD_CLUTRAMCR_CADDR_SHIFT 0
+#define OSD_CLUTRAMCR_CADDR (0xff << 0)
+
+#define OSD_TRANSPVAL_RGBTRANS (0xffff << 0)
+
+#define OSD_TRANSPVALL_RGBL (0xffff << 0)
+
+#define OSD_TRANSPVALU_Y_SHIFT 8
+#define OSD_TRANSPVALU_Y (0xff << 8)
+#define OSD_TRANSPVALU_RGBU_SHIFT 0
+#define OSD_TRANSPVALU_RGBU (0xff << 0)
+
+#define OSD_TRANSPBMPIDX_BMP1_SHIFT 8
+#define OSD_TRANSPBMPIDX_BMP1 (0xff << 8)
+#define OSD_TRANSPBMPIDX_BMP0_SHIFT 0
+#define OSD_TRANSPBMPIDX_BMP0 0xff
+
+#endif /* _DAVINCI_VPBE_H_ */
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
new file mode 100644
index 000000000..c2cfaa9c6
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -0,0 +1,694 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include <mach/hardware.h>
+#include <mach/mux.h>
+#endif
+
+#include <linux/platform_data/i2c-davinci.h>
+
+#include <linux/io.h>
+
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+
+#include "vpbe_venc_regs.h"
+
+#define MODULE_NAME "davinci-vpbe-venc"
+
+static const struct platform_device_id vpbe_venc_devtype[] = {
+ {
+ .name = DM644X_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_1,
+ }, {
+ .name = DM365_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_2,
+ }, {
+ .name = DM355_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_3,
+ },
+ {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(platform, vpbe_venc_devtype);
+
+static int debug = 2;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-2");
+
+struct venc_state {
+ struct v4l2_subdev sd;
+ struct venc_callback *callback;
+ struct venc_platform_data *pdata;
+ struct device *pdev;
+ u32 output;
+ v4l2_std_id std;
+ spinlock_t lock;
+ void __iomem *venc_base;
+ void __iomem *vdaccfg_reg;
+ enum vpbe_version venc_type;
+};
+
+static inline struct venc_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct venc_state, sd);
+}
+
+static inline u32 venc_read(struct v4l2_subdev *sd, u32 offset)
+{
+ struct venc_state *venc = to_state(sd);
+
+ return readl(venc->venc_base + offset);
+}
+
+static inline u32 venc_write(struct v4l2_subdev *sd, u32 offset, u32 val)
+{
+ struct venc_state *venc = to_state(sd);
+
+ writel(val, (venc->venc_base + offset));
+
+ return val;
+}
+
+static inline u32 venc_modify(struct v4l2_subdev *sd, u32 offset,
+ u32 val, u32 mask)
+{
+ u32 new_val = (venc_read(sd, offset) & ~mask) | (val & mask);
+
+ venc_write(sd, offset, new_val);
+
+ return new_val;
+}
+
+static inline u32 vdaccfg_write(struct v4l2_subdev *sd, u32 val)
+{
+ struct venc_state *venc = to_state(sd);
+
+ writel(val, venc->vdaccfg_reg);
+
+ val = readl(venc->vdaccfg_reg);
+
+ return val;
+}
+
+#define VDAC_COMPONENT 0x543
+#define VDAC_S_VIDEO 0x210
+/* This function sets the dac of the VPBE for various outputs
+ */
+static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
+{
+ switch (out_index) {
+ case 0:
+ v4l2_dbg(debug, 1, sd, "Setting output to Composite\n");
+ venc_write(sd, VENC_DACSEL, 0);
+ break;
+ case 1:
+ v4l2_dbg(debug, 1, sd, "Setting output to Component\n");
+ venc_write(sd, VENC_DACSEL, VDAC_COMPONENT);
+ break;
+ case 2:
+ v4l2_dbg(debug, 1, sd, "Setting output to S-video\n");
+ venc_write(sd, VENC_DACSEL, VDAC_S_VIDEO);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
+{
+ struct venc_state *venc = to_state(sd);
+
+ v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
+
+ if (benable) {
+ venc_write(sd, VENC_VMOD, 0);
+ venc_write(sd, VENC_CVBS, 0);
+ venc_write(sd, VENC_LCDOUT, 0);
+ venc_write(sd, VENC_HSPLS, 0);
+ venc_write(sd, VENC_HSTART, 0);
+ venc_write(sd, VENC_HVALID, 0);
+ venc_write(sd, VENC_HINT, 0);
+ venc_write(sd, VENC_VSPLS, 0);
+ venc_write(sd, VENC_VSTART, 0);
+ venc_write(sd, VENC_VVALID, 0);
+ venc_write(sd, VENC_VINT, 0);
+ venc_write(sd, VENC_YCCCTL, 0);
+ venc_write(sd, VENC_DACSEL, 0);
+
+ } else {
+ venc_write(sd, VENC_VMOD, 0);
+ /* disable VCLK output pin enable */
+ venc_write(sd, VENC_VIDCTL, 0x141);
+
+ /* Disable output sync pins */
+ venc_write(sd, VENC_SYNCCTL, 0);
+
+ /* Disable DCLOCK */
+ venc_write(sd, VENC_DCLKCTL, 0);
+ venc_write(sd, VENC_DRGBX1, 0x0000057C);
+
+ /* Disable LCD output control (accepting default polarity) */
+ venc_write(sd, VENC_LCDOUT, 0);
+ if (venc->venc_type != VPBE_VERSION_3)
+ venc_write(sd, VENC_CMPNT, 0x100);
+ venc_write(sd, VENC_HSPLS, 0);
+ venc_write(sd, VENC_HINT, 0);
+ venc_write(sd, VENC_HSTART, 0);
+ venc_write(sd, VENC_HVALID, 0);
+
+ venc_write(sd, VENC_VSPLS, 0);
+ venc_write(sd, VENC_VINT, 0);
+ venc_write(sd, VENC_VSTART, 0);
+ venc_write(sd, VENC_VVALID, 0);
+
+ venc_write(sd, VENC_HSDLY, 0);
+ venc_write(sd, VENC_VSDLY, 0);
+
+ venc_write(sd, VENC_YCCCTL, 0);
+ venc_write(sd, VENC_VSTARTA, 0);
+
+ /* Set OSD clock and OSD Sync Adavance registers */
+ venc_write(sd, VENC_OSDCLK0, 1);
+ venc_write(sd, VENC_OSDCLK1, 2);
+ }
+}
+
+static void
+venc_enable_vpss_clock(int venc_type,
+ enum vpbe_enc_timings_type type,
+ unsigned int pclock)
+{
+ if (venc_type == VPBE_VERSION_1)
+ return;
+
+ if (venc_type == VPBE_VERSION_2 && (type == VPBE_ENC_STD || (type ==
+ VPBE_ENC_DV_TIMINGS && pclock <= 27000000))) {
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
+ return;
+ }
+
+ if (venc_type == VPBE_VERSION_3 && type == VPBE_ENC_STD)
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 0);
+}
+
+#define VDAC_CONFIG_SD_V3 0x0E21A6B6
+#define VDAC_CONFIG_SD_V2 0x081141CF
+/*
+ * setting NTSC mode
+ */
+static int venc_set_ntsc(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_ntsc\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ if (pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_525_60) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_STD, V4L2_STD_525_60);
+ venc_enabledigitaloutput(sd, 0);
+
+ if (venc->venc_type == VPBE_VERSION_3) {
+ venc_write(sd, VENC_CLKCTL, 0x01);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
+ } else if (venc->venc_type == VPBE_VERSION_2) {
+ venc_write(sd, VENC_CLKCTL, 0x01);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
+ } else {
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS);
+ }
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+ venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_write(sd, VENC_DACTST, 0x0);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * setting PAL mode
+ */
+static int venc_set_pal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+
+ v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ if (venc->pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_625_50) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_STD, V4L2_STD_625_50);
+ venc_enabledigitaloutput(sd, 0);
+
+ if (venc->venc_type == VPBE_VERSION_3) {
+ venc_write(sd, VENC_CLKCTL, 0x1);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
+ } else if (venc->venc_type == VPBE_VERSION_2) {
+ venc_write(sd, VENC_CLKCTL, 0x1);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
+ } else {
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+ }
+
+ venc_modify(sd, VENC_SYNCCTL, 1 << VENC_SYNCCTL_OVD_SHIFT,
+ VENC_SYNCCTL_OVD);
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD,
+ (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD,
+ (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+ venc_modify(sd, VENC_VMOD,
+ (1 << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_write(sd, VENC_DACTST, 0x0);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+#define VDAC_CONFIG_HD_V2 0x081141EF
+/*
+ * venc_set_480p59_94
+ *
+ * This function configures the video encoder to EDTV(525p) component setting.
+ */
+static int venc_set_480p59_94(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
+ if (venc->venc_type != VPBE_VERSION_1 &&
+ venc->venc_type != VPBE_VERSION_2)
+ return -EINVAL;
+
+ /* Setup clock at VPSS & VENC for SD */
+ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 27000000) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 27000000);
+ venc_enabledigitaloutput(sd, 0);
+
+ if (venc->venc_type == VPBE_VERSION_2)
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+ if (venc->venc_type == VPBE_VERSION_1) {
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+ }
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_525P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+ VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * venc_set_625p
+ *
+ * This function configures the video encoder to HDTV(625p) component setting
+ */
+static int venc_set_576p50(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
+
+ if (venc->venc_type != VPBE_VERSION_1 &&
+ venc->venc_type != VPBE_VERSION_2)
+ return -EINVAL;
+ /* Setup clock at VPSS & VENC for SD */
+ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 27000000) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 27000000);
+ venc_enabledigitaloutput(sd, 0);
+
+ if (venc->venc_type == VPBE_VERSION_2)
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+ if (venc->venc_type == VPBE_VERSION_1) {
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+ }
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_625P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+ VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * venc_set_720p60_internal - Setup 720p60 in venc for dm365 only
+ */
+static int venc_set_720p60_internal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 74250000) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 74250000);
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+ venc_write(sd, VENC_VMOD, 0);
+ /* DM365 component HD mode */
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_720P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+ venc_write(sd, VENC_XHINTVL, 0);
+ return 0;
+}
+
+/*
+ * venc_set_1080i30_internal - Setup 1080i30 in venc for dm365 only
+ */
+static int venc_set_1080i30_internal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 74250000) < 0)
+ return -EINVAL;
+
+ venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 74250000);
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+
+ venc_write(sd, VENC_VMOD, 0);
+ /* DM365 component HD mode */
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_1080I << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+ venc_write(sd, VENC_XHINTVL, 0);
+ return 0;
+}
+
+static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
+{
+ v4l2_dbg(debug, 1, sd, "venc_s_std_output\n");
+
+ if (norm & V4L2_STD_525_60)
+ return venc_set_ntsc(sd);
+ else if (norm & V4L2_STD_625_50)
+ return venc_set_pal(sd);
+
+ return -EINVAL;
+}
+
+static int venc_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct venc_state *venc = to_state(sd);
+ u32 height = dv_timings->bt.height;
+ int ret;
+
+ v4l2_dbg(debug, 1, sd, "venc_s_dv_timings\n");
+
+ if (height == 576)
+ return venc_set_576p50(sd);
+ else if (height == 480)
+ return venc_set_480p59_94(sd);
+ else if ((height == 720) &&
+ (venc->venc_type == VPBE_VERSION_2)) {
+ /* TBD setup internal 720p mode here */
+ ret = venc_set_720p60_internal(sd);
+ /* for DM365 VPBE, there is DAC inside */
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+ return ret;
+ } else if ((height == 1080) &&
+ (venc->venc_type == VPBE_VERSION_2)) {
+ /* TBD setup internal 1080i mode here */
+ ret = venc_set_1080i30_internal(sd);
+ /* for DM365 VPBE, there is DAC inside */
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+ return ret;
+ }
+ return -EINVAL;
+}
+
+static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
+ u32 config)
+{
+ struct venc_state *venc = to_state(sd);
+ int ret;
+
+ v4l2_dbg(debug, 1, sd, "venc_s_routing\n");
+
+ ret = venc_set_dac(sd, output);
+ if (!ret)
+ venc->output = output;
+
+ return ret;
+}
+
+static long venc_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ u32 val;
+
+ switch (cmd) {
+ case VENC_GET_FLD:
+ val = venc_read(sd, VENC_VSTAT);
+ *((int *)arg) = ((val & VENC_VSTAT_FIDST) ==
+ VENC_VSTAT_FIDST);
+ break;
+ default:
+ v4l2_err(sd, "Wrong IOCTL cmd\n");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops venc_core_ops = {
+ .command = venc_command,
+};
+
+static const struct v4l2_subdev_video_ops venc_video_ops = {
+ .s_routing = venc_s_routing,
+ .s_std_output = venc_s_std_output,
+ .s_dv_timings = venc_s_dv_timings,
+};
+
+static const struct v4l2_subdev_ops venc_ops = {
+ .core = &venc_core_ops,
+ .video = &venc_video_ops,
+};
+
+static int venc_initialize(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ int ret;
+
+ /* Set default to output to composite and std to NTSC */
+ venc->output = 0;
+ venc->std = V4L2_STD_525_60;
+
+ ret = venc_s_routing(sd, 0, venc->output, 0);
+ if (ret < 0) {
+ v4l2_err(sd, "Error setting output during init\n");
+ return -EINVAL;
+ }
+
+ ret = venc_s_std_output(sd, venc->std);
+ if (ret < 0) {
+ v4l2_err(sd, "Error setting std during init\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int venc_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct venc_state **venc = data;
+
+ if (strstr(pdev->name, "vpbe-venc") != NULL)
+ *venc = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+struct v4l2_subdev *venc_sub_dev_init(struct v4l2_device *v4l2_dev,
+ const char *venc_name)
+{
+ struct venc_state *venc = NULL;
+
+ bus_for_each_dev(&platform_bus_type, NULL, &venc,
+ venc_device_get);
+ if (venc == NULL)
+ return NULL;
+
+ v4l2_subdev_init(&venc->sd, &venc_ops);
+
+ strcpy(venc->sd.name, venc_name);
+ if (v4l2_device_register_subdev(v4l2_dev, &venc->sd) < 0) {
+ v4l2_err(v4l2_dev,
+ "vpbe unable to register venc sub device\n");
+ return NULL;
+ }
+ if (venc_initialize(&venc->sd)) {
+ v4l2_err(v4l2_dev,
+ "vpbe venc initialization failed\n");
+ return NULL;
+ }
+
+ return &venc->sd;
+}
+EXPORT_SYMBOL(venc_sub_dev_init);
+
+static int venc_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *pdev_id;
+ struct venc_state *venc;
+ struct resource *res;
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "No platform data for VENC sub device");
+ return -EINVAL;
+ }
+
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id)
+ return -EINVAL;
+
+ venc = devm_kzalloc(&pdev->dev, sizeof(struct venc_state), GFP_KERNEL);
+ if (venc == NULL)
+ return -ENOMEM;
+
+ venc->venc_type = pdev_id->driver_data;
+ venc->pdev = &pdev->dev;
+ venc->pdata = pdev->dev.platform_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ venc->venc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(venc->venc_base))
+ return PTR_ERR(venc->venc_base);
+
+ if (venc->venc_type != VPBE_VERSION_1) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ venc->vdaccfg_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(venc->vdaccfg_reg))
+ return PTR_ERR(venc->vdaccfg_reg);
+ }
+ spin_lock_init(&venc->lock);
+ platform_set_drvdata(pdev, venc);
+ dev_notice(venc->pdev, "VENC sub device probe success\n");
+
+ return 0;
+}
+
+static int venc_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver venc_driver = {
+ .probe = venc_probe,
+ .remove = venc_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ },
+ .id_table = vpbe_venc_devtype
+};
+
+module_platform_driver(venc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VPBE VENC Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/davinci/vpbe_venc_regs.h b/drivers/media/platform/davinci/vpbe_venc_regs.h
new file mode 100644
index 000000000..6ad38f7ab
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_venc_regs.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2..
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _VPBE_VENC_REGS_H
+#define _VPBE_VENC_REGS_H
+
+/* VPBE Video Encoder / Digital LCD Subsystem Registers (VENC) */
+#define VENC_VMOD 0x00
+#define VENC_VIDCTL 0x04
+#define VENC_VDPRO 0x08
+#define VENC_SYNCCTL 0x0C
+#define VENC_HSPLS 0x10
+#define VENC_VSPLS 0x14
+#define VENC_HINT 0x18
+#define VENC_HSTART 0x1C
+#define VENC_HVALID 0x20
+#define VENC_VINT 0x24
+#define VENC_VSTART 0x28
+#define VENC_VVALID 0x2C
+#define VENC_HSDLY 0x30
+#define VENC_VSDLY 0x34
+#define VENC_YCCCTL 0x38
+#define VENC_RGBCTL 0x3C
+#define VENC_RGBCLP 0x40
+#define VENC_LINECTL 0x44
+#define VENC_CULLLINE 0x48
+#define VENC_LCDOUT 0x4C
+#define VENC_BRTS 0x50
+#define VENC_BRTW 0x54
+#define VENC_ACCTL 0x58
+#define VENC_PWMP 0x5C
+#define VENC_PWMW 0x60
+#define VENC_DCLKCTL 0x64
+#define VENC_DCLKPTN0 0x68
+#define VENC_DCLKPTN1 0x6C
+#define VENC_DCLKPTN2 0x70
+#define VENC_DCLKPTN3 0x74
+#define VENC_DCLKPTN0A 0x78
+#define VENC_DCLKPTN1A 0x7C
+#define VENC_DCLKPTN2A 0x80
+#define VENC_DCLKPTN3A 0x84
+#define VENC_DCLKHS 0x88
+#define VENC_DCLKHSA 0x8C
+#define VENC_DCLKHR 0x90
+#define VENC_DCLKVS 0x94
+#define VENC_DCLKVR 0x98
+#define VENC_CAPCTL 0x9C
+#define VENC_CAPDO 0xA0
+#define VENC_CAPDE 0xA4
+#define VENC_ATR0 0xA8
+#define VENC_ATR1 0xAC
+#define VENC_ATR2 0xB0
+#define VENC_VSTAT 0xB8
+#define VENC_RAMADR 0xBC
+#define VENC_RAMPORT 0xC0
+#define VENC_DACTST 0xC4
+#define VENC_YCOLVL 0xC8
+#define VENC_SCPROG 0xCC
+#define VENC_CVBS 0xDC
+#define VENC_CMPNT 0xE0
+#define VENC_ETMG0 0xE4
+#define VENC_ETMG1 0xE8
+#define VENC_ETMG2 0xEC
+#define VENC_ETMG3 0xF0
+#define VENC_DACSEL 0xF4
+#define VENC_ARGBX0 0x100
+#define VENC_ARGBX1 0x104
+#define VENC_ARGBX2 0x108
+#define VENC_ARGBX3 0x10C
+#define VENC_ARGBX4 0x110
+#define VENC_DRGBX0 0x114
+#define VENC_DRGBX1 0x118
+#define VENC_DRGBX2 0x11C
+#define VENC_DRGBX3 0x120
+#define VENC_DRGBX4 0x124
+#define VENC_VSTARTA 0x128
+#define VENC_OSDCLK0 0x12C
+#define VENC_OSDCLK1 0x130
+#define VENC_HVLDCL0 0x134
+#define VENC_HVLDCL1 0x138
+#define VENC_OSDHADV 0x13C
+#define VENC_CLKCTL 0x140
+#define VENC_GAMCTL 0x144
+#define VENC_XHINTVL 0x174
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV (1 << 1)
+#define VPBE_PCR_CLK_OFF (1 << 0)
+
+#define VENC_VMOD_VDMD_SHIFT 12
+#define VENC_VMOD_VDMD_YCBCR16 0
+#define VENC_VMOD_VDMD_YCBCR8 1
+#define VENC_VMOD_VDMD_RGB666 2
+#define VENC_VMOD_VDMD_RGB8 3
+#define VENC_VMOD_VDMD_EPSON 4
+#define VENC_VMOD_VDMD_CASIO 5
+#define VENC_VMOD_VDMD_UDISPQVGA 6
+#define VENC_VMOD_VDMD_STNLCD 7
+#define VENC_VMOD_VIE_SHIFT 1
+#define VENC_VMOD_VDMD (7 << 12)
+#define VENC_VMOD_ITLCL (1 << 11)
+#define VENC_VMOD_ITLC (1 << 10)
+#define VENC_VMOD_NSIT (1 << 9)
+#define VENC_VMOD_HDMD (1 << 8)
+#define VENC_VMOD_TVTYP_SHIFT 6
+#define VENC_VMOD_TVTYP (3 << 6)
+#define VENC_VMOD_SLAVE (1 << 5)
+#define VENC_VMOD_VMD (1 << 4)
+#define VENC_VMOD_BLNK (1 << 3)
+#define VENC_VMOD_VIE (1 << 1)
+#define VENC_VMOD_VENC (1 << 0)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC 0
+#define SDTV_PAL 1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P 0
+#define HDTV_625P 1
+#define HDTV_1080I 2
+#define HDTV_720P 3
+
+#define VENC_VIDCTL_VCLKP (1 << 14)
+#define VENC_VIDCTL_VCLKE_SHIFT 13
+#define VENC_VIDCTL_VCLKE (1 << 13)
+#define VENC_VIDCTL_VCLKZ_SHIFT 12
+#define VENC_VIDCTL_VCLKZ (1 << 12)
+#define VENC_VIDCTL_SYDIR_SHIFT 8
+#define VENC_VIDCTL_SYDIR (1 << 8)
+#define VENC_VIDCTL_DOMD_SHIFT 4
+#define VENC_VIDCTL_DOMD (3 << 4)
+#define VENC_VIDCTL_YCDIR_SHIFT 0
+#define VENC_VIDCTL_YCDIR (1 << 0)
+
+#define VENC_VDPRO_ATYCC_SHIFT 5
+#define VENC_VDPRO_ATYCC (1 << 5)
+#define VENC_VDPRO_ATCOM_SHIFT 4
+#define VENC_VDPRO_ATCOM (1 << 4)
+#define VENC_VDPRO_DAFRQ (1 << 3)
+#define VENC_VDPRO_DAUPS (1 << 2)
+#define VENC_VDPRO_CUPS (1 << 1)
+#define VENC_VDPRO_YUPS (1 << 0)
+
+#define VENC_SYNCCTL_VPL_SHIFT 3
+#define VENC_SYNCCTL_VPL (1 << 3)
+#define VENC_SYNCCTL_HPL_SHIFT 2
+#define VENC_SYNCCTL_HPL (1 << 2)
+#define VENC_SYNCCTL_SYEV_SHIFT 1
+#define VENC_SYNCCTL_SYEV (1 << 1)
+#define VENC_SYNCCTL_SYEH_SHIFT 0
+#define VENC_SYNCCTL_SYEH (1 << 0)
+#define VENC_SYNCCTL_OVD_SHIFT 14
+#define VENC_SYNCCTL_OVD (1 << 14)
+
+#define VENC_DCLKCTL_DCKEC_SHIFT 11
+#define VENC_DCLKCTL_DCKEC (1 << 11)
+#define VENC_DCLKCTL_DCKPW_SHIFT 0
+#define VENC_DCLKCTL_DCKPW (0x3f << 0)
+
+#define VENC_VSTAT_FIDST (1 << 4)
+
+#define VENC_CMPNT_MRGB_SHIFT 14
+#define VENC_CMPNT_MRGB (1 << 14)
+
+#endif /* _VPBE_VENC_REGS_H */
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
new file mode 100644
index 000000000..8613358ed
--- /dev/null
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -0,0 +1,1945 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Driver name : VPFE Capture driver
+ * VPFE Capture driver allows applications to capture and stream video
+ * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
+ * TVP5146 or Raw Bayer RGB image data from an image sensor
+ * such as Microns' MT9T001, MT9T031 etc.
+ *
+ * These SoCs have, in common, a Video Processing Subsystem (VPSS) that
+ * consists of a Video Processing Front End (VPFE) for capturing
+ * video/raw image data and Video Processing Back End (VPBE) for displaying
+ * YUV data through an in-built analog encoder or Digital LCD port. This
+ * driver is for capture through VPFE. A typical EVM using these SoCs have
+ * following high level configuration.
+ *
+ *
+ * decoder(TVP5146/ YUV/
+ * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
+ * data input | |
+ * V |
+ * SDRAM |
+ * V
+ * Image Processor
+ * |
+ * V
+ * SDRAM
+ * The data flow happens from a decoder connected to the VPFE over a
+ * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
+ * and to the input of VPFE through an optional MUX (if more inputs are
+ * to be interfaced on the EVM). The input data is first passed through
+ * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
+ * does very little or no processing on YUV data and does pre-process Raw
+ * Bayer RGB data through modules such as Defect Pixel Correction (DFC)
+ * Color Space Conversion (CSC), data gain/offset etc. After this, data
+ * can be written to SDRAM or can be connected to the image processing
+ * block such as IPIPE (on DM355 only).
+ *
+ * Features supported
+ * - MMAP IO
+ * - Capture using TVP5146 over BT.656
+ * - support for interfacing decoders using sub device model
+ * - Work with DM355 or DM6446 CCDC to do Raw Bayer RGB/YUV
+ * data capture to SDRAM.
+ * TODO list
+ * - Support multiple REQBUF after open
+ * - Support for de-allocating buffers through REQBUF
+ * - Support for Raw Bayer RGB capture
+ * - Support for chaining Image Processor
+ * - Support for static allocation of buffers
+ * - Support for USERPTR IO
+ * - Support for STREAMON before QBUF
+ * - Support for control ioctls
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <media/v4l2-common.h>
+#include <linux/io.h>
+#include <media/davinci/vpfe_capture.h>
+#include "ccdc_hw_device.h"
+
+static int debug;
+static u32 numbuffers = 3;
+static u32 bufsize = (720 * 576 * 2);
+
+module_param(numbuffers, uint, S_IRUGO);
+module_param(bufsize, uint, S_IRUGO);
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(numbuffers, "buffer count (default:3)");
+MODULE_PARM_DESC(bufsize, "buffer size in bytes (default:720 x 576 x 2)");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/* standard information */
+struct vpfe_standard {
+ v4l2_std_id std_id;
+ unsigned int width;
+ unsigned int height;
+ struct v4l2_fract pixelaspect;
+ /* 0 - progressive, 1 - interlaced */
+ int frame_format;
+};
+
+/* ccdc configuration */
+struct ccdc_config {
+ /* This make sure vpfe is probed and ready to go */
+ int vpfe_probed;
+ /* name of ccdc device */
+ char name[32];
+};
+
+/* data structures */
+static struct vpfe_config_params config_params = {
+ .min_numbuffers = 3,
+ .numbuffers = 3,
+ .min_bufsize = 720 * 480 * 2,
+ .device_bufsize = 720 * 576 * 2,
+};
+
+/* ccdc device registered */
+static const struct ccdc_hw_device *ccdc_dev;
+/* lock for accessing ccdc information */
+static DEFINE_MUTEX(ccdc_lock);
+/* ccdc configuration */
+static struct ccdc_config *ccdc_cfg;
+
+static const struct vpfe_standard vpfe_standards[] = {
+ {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
+ {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
+};
+
+/* Used when raw Bayer image from ccdc is directly captured to SDRAM */
+static const struct vpfe_pixel_format vpfe_pix_fmts[] = {
+ {
+ .fmtdesc = {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Bayer GrRBGb 8bit A-Law compr.",
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ },
+ .bpp = 1,
+ },
+ {
+ .fmtdesc = {
+ .index = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Bayer GrRBGb - 16bit",
+ .pixelformat = V4L2_PIX_FMT_SBGGR16,
+ },
+ .bpp = 2,
+ },
+ {
+ .fmtdesc = {
+ .index = 2,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Bayer GrRBGb 8bit DPCM compr.",
+ .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
+ },
+ .bpp = 1,
+ },
+ {
+ .fmtdesc = {
+ .index = 3,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "YCbCr 4:2:2 Interleaved UYVY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+ .bpp = 2,
+ },
+ {
+ .fmtdesc = {
+ .index = 4,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "YCbCr 4:2:2 Interleaved YUYV",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ .bpp = 2,
+ },
+ {
+ .fmtdesc = {
+ .index = 5,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Y/CbCr 4:2:0 - Semi planar",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ },
+ .bpp = 1,
+ },
+};
+
+/*
+ * vpfe_lookup_pix_format()
+ * lookup an entry in the vpfe pix format table based on pix_format
+ */
+static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe_pix_fmts); i++) {
+ if (pix_format == vpfe_pix_fmts[i].fmtdesc.pixelformat)
+ return &vpfe_pix_fmts[i];
+ }
+ return NULL;
+}
+
+/*
+ * vpfe_register_ccdc_device. CCDC module calls this to
+ * register with vpfe capture
+ */
+int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev)
+{
+ int ret = 0;
+ printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
+
+ BUG_ON(!dev->hw_ops.open);
+ BUG_ON(!dev->hw_ops.enable);
+ BUG_ON(!dev->hw_ops.set_hw_if_params);
+ BUG_ON(!dev->hw_ops.configure);
+ BUG_ON(!dev->hw_ops.set_buftype);
+ BUG_ON(!dev->hw_ops.get_buftype);
+ BUG_ON(!dev->hw_ops.enum_pix);
+ BUG_ON(!dev->hw_ops.set_frame_format);
+ BUG_ON(!dev->hw_ops.get_frame_format);
+ BUG_ON(!dev->hw_ops.get_pixel_format);
+ BUG_ON(!dev->hw_ops.set_pixel_format);
+ BUG_ON(!dev->hw_ops.set_image_window);
+ BUG_ON(!dev->hw_ops.get_image_window);
+ BUG_ON(!dev->hw_ops.get_line_length);
+ BUG_ON(!dev->hw_ops.getfid);
+
+ mutex_lock(&ccdc_lock);
+ if (!ccdc_cfg) {
+ /*
+ * TODO. Will this ever happen? if so, we need to fix it.
+ * Proabably we need to add the request to a linked list and
+ * walk through it during vpfe probe
+ */
+ printk(KERN_ERR "vpfe capture not initialized\n");
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ if (strcmp(dev->name, ccdc_cfg->name)) {
+ /* ignore this ccdc */
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (ccdc_dev) {
+ printk(KERN_ERR "ccdc already registered\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ccdc_dev = dev;
+unlock:
+ mutex_unlock(&ccdc_lock);
+ return ret;
+}
+EXPORT_SYMBOL(vpfe_register_ccdc_device);
+
+/*
+ * vpfe_unregister_ccdc_device. CCDC module calls this to
+ * unregister with vpfe capture
+ */
+void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev)
+{
+ if (!dev) {
+ printk(KERN_ERR "invalid ccdc device ptr\n");
+ return;
+ }
+
+ printk(KERN_NOTICE "vpfe_unregister_ccdc_device, dev->name = %s\n",
+ dev->name);
+
+ if (strcmp(dev->name, ccdc_cfg->name)) {
+ /* ignore this ccdc */
+ return;
+ }
+
+ mutex_lock(&ccdc_lock);
+ ccdc_dev = NULL;
+ mutex_unlock(&ccdc_lock);
+}
+EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
+
+/*
+ * vpfe_config_ccdc_image_format()
+ * For a pix format, configure ccdc to setup the capture
+ */
+static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe_dev)
+{
+ enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ int ret = 0;
+
+ if (ccdc_dev->hw_ops.set_pixel_format(
+ vpfe_dev->fmt.fmt.pix.pixelformat) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "couldn't set pix format in ccdc\n");
+ return -EINVAL;
+ }
+ /* configure the image window */
+ ccdc_dev->hw_ops.set_image_window(&vpfe_dev->crop);
+
+ switch (vpfe_dev->fmt.fmt.pix.field) {
+ case V4L2_FIELD_INTERLACED:
+ /* do nothing, since it is default */
+ ret = ccdc_dev->hw_ops.set_buftype(
+ CCDC_BUFTYPE_FLD_INTERLEAVED);
+ break;
+ case V4L2_FIELD_NONE:
+ frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+ /* buffer type only applicable for interlaced scan */
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ ret = ccdc_dev->hw_ops.set_buftype(
+ CCDC_BUFTYPE_FLD_SEPARATED);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set the frame format */
+ if (!ret)
+ ret = ccdc_dev->hw_ops.set_frame_format(frm_fmt);
+ return ret;
+}
+/*
+ * vpfe_config_image_format()
+ * For a given standard, this functions sets up the default
+ * pix format & crop values in the vpfe device and ccdc. It first
+ * starts with defaults based values from the standard table.
+ * It then checks if sub device supports get_fmt and then override the
+ * values based on that.Sets crop values to match with scan resolution
+ * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
+ * values in ccdc
+ */
+static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
+ v4l2_std_id std_id)
+{
+ struct vpfe_subdev_info *sdinfo = vpfe_dev->current_subdev;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mbus_fmt = &fmt.format;
+ struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
+ if (vpfe_standards[i].std_id & std_id) {
+ vpfe_dev->std_info.active_pixels =
+ vpfe_standards[i].width;
+ vpfe_dev->std_info.active_lines =
+ vpfe_standards[i].height;
+ vpfe_dev->std_info.frame_format =
+ vpfe_standards[i].frame_format;
+ vpfe_dev->std_index = i;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(vpfe_standards)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "standard not supported\n");
+ return -EINVAL;
+ }
+
+ vpfe_dev->crop.top = 0;
+ vpfe_dev->crop.left = 0;
+ vpfe_dev->crop.width = vpfe_dev->std_info.active_pixels;
+ vpfe_dev->crop.height = vpfe_dev->std_info.active_lines;
+ pix->width = vpfe_dev->crop.width;
+ pix->height = vpfe_dev->crop.height;
+
+ /* first field and frame format based on standard frame format */
+ if (vpfe_dev->std_info.frame_format) {
+ pix->field = V4L2_FIELD_INTERLACED;
+ /* assume V4L2_PIX_FMT_UYVY as default */
+ pix->pixelformat = V4L2_PIX_FMT_UYVY;
+ v4l2_fill_mbus_format(mbus_fmt, pix,
+ MEDIA_BUS_FMT_YUYV10_2X10);
+ } else {
+ pix->field = V4L2_FIELD_NONE;
+ /* assume V4L2_PIX_FMT_SBGGR8 */
+ pix->pixelformat = V4L2_PIX_FMT_SBGGR8;
+ v4l2_fill_mbus_format(mbus_fmt, pix,
+ MEDIA_BUS_FMT_SBGGR8_1X8);
+ }
+
+ /* if sub device supports get_fmt, override the defaults */
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ sdinfo->grp_id, pad, get_fmt, NULL, &fmt);
+
+ if (ret && ret != -ENOIOCTLCMD) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "error in getting get_fmt from sub device\n");
+ return ret;
+ }
+ v4l2_fill_pix_format(pix, mbus_fmt);
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ /* Sets the values in CCDC */
+ ret = vpfe_config_ccdc_image_format(vpfe_dev);
+ if (ret)
+ return ret;
+
+ /* Update the values of sizeimage and bytesperline */
+ pix->bytesperline = ccdc_dev->hw_ops.get_line_length();
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ return 0;
+}
+
+static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
+{
+ int ret;
+
+ /* set first input of current subdevice as the current input */
+ vpfe_dev->current_input = 0;
+
+ /* set default standard */
+ vpfe_dev->std_index = 0;
+
+ /* Configure the default format information */
+ ret = vpfe_config_image_format(vpfe_dev,
+ vpfe_standards[vpfe_dev->std_index].std_id);
+ if (ret)
+ return ret;
+
+ /* now open the ccdc device to initialize it */
+ mutex_lock(&ccdc_lock);
+ if (!ccdc_dev) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (!try_module_get(ccdc_dev->owner)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Couldn't lock ccdc module\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+ ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev);
+ if (!ret)
+ vpfe_dev->initialized = 1;
+
+ /* Clear all VPFE/CCDC interrupts */
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(-1);
+
+unlock:
+ mutex_unlock(&ccdc_lock);
+ return ret;
+}
+
+/*
+ * vpfe_open : It creates object of file handle structure and
+ * stores it in private_data member of filepointer
+ */
+static int vpfe_open(struct file *file)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+ struct vpfe_fh *fh;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_open\n");
+
+ if (!vpfe_dev->cfg->num_subdevs) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "No decoder registered\n");
+ return -ENODEV;
+ }
+
+ /* Allocate memory for the file handle object */
+ fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh)
+ return -ENOMEM;
+
+ /* store pointer to fh in private_data member of file */
+ file->private_data = fh;
+ fh->vpfe_dev = vpfe_dev;
+ v4l2_fh_init(&fh->fh, vdev);
+ mutex_lock(&vpfe_dev->lock);
+ /* If decoder is not initialized. initialize it */
+ if (!vpfe_dev->initialized) {
+ if (vpfe_initialize_device(vpfe_dev)) {
+ mutex_unlock(&vpfe_dev->lock);
+ v4l2_fh_exit(&fh->fh);
+ kfree(fh);
+ return -ENODEV;
+ }
+ }
+ /* Increment device usrs counter */
+ vpfe_dev->usrs++;
+ /* Set io_allowed member to false */
+ fh->io_allowed = 0;
+ v4l2_fh_add(&fh->fh);
+ mutex_unlock(&vpfe_dev->lock);
+ return 0;
+}
+
+static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev)
+{
+ unsigned long addr;
+
+ vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vpfe_dev->next_frm->queue);
+ vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE;
+ addr = videobuf_to_dma_contig(vpfe_dev->next_frm);
+
+ ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
+{
+ unsigned long addr;
+
+ addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
+ addr += vpfe_dev->field_off;
+ ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
+{
+ v4l2_get_timestamp(&vpfe_dev->cur_frm->ts);
+ vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
+ vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ wake_up_interruptible(&vpfe_dev->cur_frm->done);
+ vpfe_dev->cur_frm = vpfe_dev->next_frm;
+}
+
+/* ISR for VINT0*/
+static irqreturn_t vpfe_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+ enum v4l2_field field;
+ int fid;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n");
+ field = vpfe_dev->fmt.fmt.pix.field;
+
+ /* if streaming not started, don't do anything */
+ if (!vpfe_dev->started)
+ goto clear_intr;
+
+ /* only for 6446 this will be applicable */
+ if (ccdc_dev->hw_ops.reset)
+ ccdc_dev->hw_ops.reset();
+
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "frame format is progressive...\n");
+ if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
+ vpfe_process_buffer_complete(vpfe_dev);
+ goto clear_intr;
+ }
+
+ /* interlaced or TB capture check which field we are in hardware */
+ fid = ccdc_dev->hw_ops.getfid();
+
+ /* switch the software maintained field id */
+ vpfe_dev->field_id ^= 1;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "field id = %x:%x.\n",
+ fid, vpfe_dev->field_id);
+ if (fid == vpfe_dev->field_id) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the next frame
+ * is available, release the current frame and move on
+ */
+ if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
+ vpfe_process_buffer_complete(vpfe_dev);
+ /*
+ * based on whether the two fields are stored
+ * interleavely or separately in memory, reconfigure
+ * the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_schedule_bottom_field(vpfe_dev);
+ goto clear_intr;
+ }
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ spin_lock(&vpfe_dev->dma_queue_lock);
+ if (!list_empty(&vpfe_dev->dma_queue) &&
+ vpfe_dev->cur_frm == vpfe_dev->next_frm)
+ vpfe_schedule_next_buffer(vpfe_dev);
+ spin_unlock(&vpfe_dev->dma_queue_lock);
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe_dev->field_id = fid;
+ }
+clear_intr:
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
+ return IRQ_HANDLED;
+}
+
+/* vdint1_isr - isr handler for VINT1 interrupt */
+static irqreturn_t vdint1_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n");
+
+ /* if streaming not started, don't do anything */
+ if (!vpfe_dev->started) {
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&vpfe_dev->dma_queue_lock);
+ if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) &&
+ !list_empty(&vpfe_dev->dma_queue) &&
+ vpfe_dev->cur_frm == vpfe_dev->next_frm)
+ vpfe_schedule_next_buffer(vpfe_dev);
+ spin_unlock(&vpfe_dev->dma_queue_lock);
+
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
+ return IRQ_HANDLED;
+}
+
+static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
+{
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = ccdc_dev->hw_ops.get_frame_format();
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+ free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
+}
+
+static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
+{
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = ccdc_dev->hw_ops.get_frame_format();
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE) {
+ return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr,
+ 0, "vpfe_capture1",
+ vpfe_dev);
+ }
+ return 0;
+}
+
+/* vpfe_stop_ccdc_capture: stop streaming in ccdc/isif */
+static void vpfe_stop_ccdc_capture(struct vpfe_device *vpfe_dev)
+{
+ vpfe_dev->started = 0;
+ ccdc_dev->hw_ops.enable(0);
+ if (ccdc_dev->hw_ops.enable_out_to_sdram)
+ ccdc_dev->hw_ops.enable_out_to_sdram(0);
+}
+
+/*
+ * vpfe_release : This function deletes buffer queue, frees the
+ * buffers and the vpfe file handle
+ */
+static int vpfe_release(struct file *file)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
+
+ /* Get the device lock */
+ mutex_lock(&vpfe_dev->lock);
+ /* if this instance is doing IO */
+ if (fh->io_allowed) {
+ if (vpfe_dev->started) {
+ sdinfo = vpfe_dev->current_subdev;
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ sdinfo->grp_id,
+ video, s_stream, 0);
+ if (ret && (ret != -ENOIOCTLCMD))
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "stream off failed in subdev\n");
+ vpfe_stop_ccdc_capture(vpfe_dev);
+ vpfe_detach_irq(vpfe_dev);
+ videobuf_streamoff(&vpfe_dev->buffer_queue);
+ }
+ vpfe_dev->io_usrs = 0;
+ vpfe_dev->numbuffers = config_params.numbuffers;
+ videobuf_stop(&vpfe_dev->buffer_queue);
+ videobuf_mmap_free(&vpfe_dev->buffer_queue);
+ }
+
+ /* Decrement device usrs counter */
+ vpfe_dev->usrs--;
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+ /* If this is the last file handle */
+ if (!vpfe_dev->usrs) {
+ vpfe_dev->initialized = 0;
+ if (ccdc_dev->hw_ops.close)
+ ccdc_dev->hw_ops.close(vpfe_dev->pdev);
+ module_put(ccdc_dev->owner);
+ }
+ mutex_unlock(&vpfe_dev->lock);
+ file->private_data = NULL;
+ /* Free memory allocated to file handle object */
+ kfree(fh);
+ return 0;
+}
+
+/*
+ * vpfe_mmap : It is used to map kernel space buffers
+ * into user spaces
+ */
+static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /* Get the device object and file handle object */
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
+
+ return videobuf_mmap_mapper(&vpfe_dev->buffer_queue, vma);
+}
+
+/*
+ * vpfe_poll: It is used for select/poll system call
+ */
+static __poll_t vpfe_poll(struct file *file, poll_table *wait)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
+
+ if (vpfe_dev->started)
+ return videobuf_poll_stream(file,
+ &vpfe_dev->buffer_queue, wait);
+ return 0;
+}
+
+/* vpfe capture driver file operations */
+static const struct v4l2_file_operations vpfe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpfe_open,
+ .release = vpfe_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpfe_mmap,
+ .poll = vpfe_poll
+};
+
+/*
+ * vpfe_check_format()
+ * This function adjust the input pixel format as per hardware
+ * capabilities and update the same in pixfmt.
+ * Following algorithm used :-
+ *
+ * If given pixformat is not in the vpfe list of pix formats or not
+ * supported by the hardware, current value of pixformat in the device
+ * is used
+ * If given field is not supported, then current field is used. If field
+ * is different from current, then it is matched with that from sub device.
+ * Minimum height is 2 lines for interlaced or tb field and 1 line for
+ * progressive. Maximum height is clamped to active active lines of scan
+ * Minimum width is 32 bytes in memory and width is clamped to active
+ * pixels of scan.
+ * bytesperline is a multiple of 32.
+ */
+static const struct vpfe_pixel_format *
+ vpfe_check_format(struct vpfe_device *vpfe_dev,
+ struct v4l2_pix_format *pixfmt)
+{
+ u32 min_height = 1, min_width = 32, max_width, max_height;
+ const struct vpfe_pixel_format *vpfe_pix_fmt;
+ u32 pix;
+ int temp, found;
+
+ vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+ if (!vpfe_pix_fmt) {
+ /*
+ * use current pixel format in the vpfe device. We
+ * will find this pix format in the table
+ */
+ pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
+ vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+ }
+
+ /* check if hw supports it */
+ temp = 0;
+ found = 0;
+ while (ccdc_dev->hw_ops.enum_pix(&pix, temp) >= 0) {
+ if (vpfe_pix_fmt->fmtdesc.pixelformat == pix) {
+ found = 1;
+ break;
+ }
+ temp++;
+ }
+
+ if (!found) {
+ /* use current pixel format */
+ pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
+ /*
+ * Since this is currently used in the vpfe device, we
+ * will find this pix format in the table
+ */
+ vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+ }
+
+ /* check what field format is supported */
+ if (pixfmt->field == V4L2_FIELD_ANY) {
+ /* if field is any, use current value as default */
+ pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
+ }
+
+ /*
+ * if field is not same as current field in the vpfe device
+ * try matching the field with the sub device field
+ */
+ if (vpfe_dev->fmt.fmt.pix.field != pixfmt->field) {
+ /*
+ * If field value is not in the supported fields, use current
+ * field used in the device as default
+ */
+ switch (pixfmt->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ /* if sub device is supporting progressive, use that */
+ if (!vpfe_dev->std_info.frame_format)
+ pixfmt->field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_NONE:
+ if (vpfe_dev->std_info.frame_format)
+ pixfmt->field = V4L2_FIELD_INTERLACED;
+ break;
+
+ default:
+ /* use current field as default */
+ pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
+ break;
+ }
+ }
+
+ /* Now adjust image resolutions supported */
+ if (pixfmt->field == V4L2_FIELD_INTERLACED ||
+ pixfmt->field == V4L2_FIELD_SEQ_TB)
+ min_height = 2;
+
+ max_width = vpfe_dev->std_info.active_pixels;
+ max_height = vpfe_dev->std_info.active_lines;
+ min_width /= vpfe_pix_fmt->bpp;
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "width = %d, height = %d, bpp = %d\n",
+ pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp);
+
+ pixfmt->width = clamp((pixfmt->width), min_width, max_width);
+ pixfmt->height = clamp((pixfmt->height), min_height, max_height);
+
+ /* If interlaced, adjust height to be a multiple of 2 */
+ if (pixfmt->field == V4L2_FIELD_INTERLACED)
+ pixfmt->height &= (~1);
+ /*
+ * recalculate bytesperline and sizeimage since width
+ * and height might have changed
+ */
+ pixfmt->bytesperline = (((pixfmt->width * vpfe_pix_fmt->bpp) + 31)
+ & ~31);
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ pixfmt->sizeimage =
+ pixfmt->bytesperline * pixfmt->height +
+ ((pixfmt->bytesperline * pixfmt->height) >> 1);
+ else
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height = %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
+ pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp,
+ pixfmt->bytesperline, pixfmt->sizeimage);
+ return vpfe_pix_fmt;
+}
+
+static int vpfe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
+
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
+ strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
+ return 0;
+}
+
+static int vpfe_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt_vid_cap\n");
+ /* Fill in the information about format */
+ *fmt = vpfe_dev->fmt;
+ return 0;
+}
+
+static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ const struct vpfe_pixel_format *pix_fmt;
+ int temp_index;
+ u32 pix;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt_vid_cap\n");
+
+ if (ccdc_dev->hw_ops.enum_pix(&pix, fmt->index) < 0)
+ return -EINVAL;
+
+ /* Fill in the information about format */
+ pix_fmt = vpfe_lookup_pix_format(pix);
+ if (pix_fmt) {
+ temp_index = fmt->index;
+ *fmt = pix_fmt->fmtdesc;
+ fmt->index = temp_index;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ const struct vpfe_pixel_format *pix_fmts;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n");
+
+ /* If streaming is started, return error */
+ if (vpfe_dev->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+
+ /* Check for valid frame format */
+ pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix);
+ if (!pix_fmts)
+ return -EINVAL;
+
+ /* store the pixel format in the device object */
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ /* First detach any IRQ if currently attached */
+ vpfe_detach_irq(vpfe_dev);
+ vpfe_dev->fmt = *fmt;
+ /* set image capture parameters in the ccdc */
+ ret = vpfe_config_ccdc_image_format(vpfe_dev);
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ const struct vpfe_pixel_format *pix_fmts;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n");
+
+ pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix);
+ if (!pix_fmts)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
+ * given app input index
+ */
+static int vpfe_get_subdev_input_index(struct vpfe_device *vpfe_dev,
+ int *subdev_index,
+ int *subdev_input_index,
+ int app_input_index)
+{
+ struct vpfe_config *cfg = vpfe_dev->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < cfg->num_subdevs; i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (app_input_index < (j + sdinfo->num_inputs)) {
+ *subdev_index = i;
+ *subdev_input_index = app_input_index - j;
+ return 0;
+ }
+ j += sdinfo->num_inputs;
+ }
+ return -EINVAL;
+}
+
+/*
+ * vpfe_get_app_input - Get app input index for a given subdev input index
+ * driver stores the input index of the current sub device and translate it
+ * when application request the current input
+ */
+static int vpfe_get_app_input_index(struct vpfe_device *vpfe_dev,
+ int *app_input_index)
+{
+ struct vpfe_config *cfg = vpfe_dev->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < cfg->num_subdevs; i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (!strcmp(sdinfo->name, vpfe_dev->current_subdev->name)) {
+ if (vpfe_dev->current_input >= sdinfo->num_inputs)
+ return -1;
+ *app_input_index = j + vpfe_dev->current_input;
+ return 0;
+ }
+ j += sdinfo->num_inputs;
+ }
+ return -EINVAL;
+}
+
+static int vpfe_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int subdev, index ;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
+
+ if (vpfe_get_subdev_input_index(vpfe_dev,
+ &subdev,
+ &index,
+ inp->index) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "input information not found for the subdev\n");
+ return -EINVAL;
+ }
+ sdinfo = &vpfe_dev->cfg->sub_devs[subdev];
+ *inp = sdinfo->inputs[index];
+ return 0;
+}
+
+static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
+
+ return vpfe_get_app_input_index(vpfe_dev, index);
+}
+
+
+static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ struct vpfe_subdev_info *sdinfo;
+ int subdev_index, inp_index;
+ struct vpfe_route *route;
+ u32 input, output;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ /*
+ * If streaming is started return device busy
+ * error
+ */
+ if (vpfe_dev->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+ ret = vpfe_get_subdev_input_index(vpfe_dev,
+ &subdev_index,
+ &inp_index,
+ index);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "invalid input index\n");
+ goto unlock_out;
+ }
+
+ sdinfo = &vpfe_dev->cfg->sub_devs[subdev_index];
+ sd = vpfe_dev->sd[subdev_index];
+ route = &sdinfo->routes[inp_index];
+ if (route && sdinfo->can_route) {
+ input = route->input;
+ output = route->output;
+ } else {
+ input = 0;
+ output = 0;
+ }
+
+ if (sd)
+ ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0);
+
+ if (ret) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "vpfe_doioctl:error in setting input in decoder\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ vpfe_dev->current_subdev = sdinfo;
+ if (sd)
+ vpfe_dev->v4l2_dev.ctrl_handler = sd->ctrl_handler;
+ vpfe_dev->current_input = index;
+ vpfe_dev->std_index = 0;
+
+ /* set the bus/interface parameter for the sub device in ccdc */
+ ret = ccdc_dev->hw_ops.set_hw_if_params(&sdinfo->ccdc_if_params);
+ if (ret)
+ goto unlock_out;
+
+ /* set the default image parameters in the device */
+ ret = vpfe_config_image_format(vpfe_dev,
+ vpfe_standards[vpfe_dev->std_index].std_id);
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ sdinfo = vpfe_dev->current_subdev;
+ if (ret)
+ return ret;
+ /* Call querystd function of decoder device */
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, querystd, std_id);
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
+
+ /* Call decoder driver function to set the standard */
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ sdinfo = vpfe_dev->current_subdev;
+ /* If streaming is started, return device busy error */
+ if (vpfe_dev->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, s_std, std_id);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
+ goto unlock_out;
+ }
+ ret = vpfe_config_image_format(vpfe_dev, std_id);
+
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
+
+ *std_id = vpfe_standards[vpfe_dev->std_index].std_id;
+ return 0;
+}
+/*
+ * Videobuf operations
+ */
+static int vpfe_videobuf_setup(struct videobuf_queue *vq,
+ unsigned int *count,
+ unsigned int *size)
+{
+ struct vpfe_fh *fh = vq->priv_data;
+ struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_setup\n");
+ *size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ if (vpfe_dev->memory == V4L2_MEMORY_MMAP &&
+ vpfe_dev->fmt.fmt.pix.sizeimage > config_params.device_bufsize)
+ *size = config_params.device_bufsize;
+
+ if (*count < config_params.min_numbuffers)
+ *count = config_params.min_numbuffers;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "count=%d, size=%d\n", *count, *size);
+ return 0;
+}
+
+static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct vpfe_fh *fh = vq->priv_data;
+ struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+ unsigned long addr;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
+
+ /* If buffer is not initialized, initialize it */
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = vpfe_dev->fmt.fmt.pix.width;
+ vb->height = vpfe_dev->fmt.fmt.pix.height;
+ vb->size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ vb->field = field;
+
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret < 0)
+ return ret;
+
+ addr = videobuf_to_dma_contig(vb);
+ /* Make sure user addresses are aligned to 32 bytes */
+ if (!ALIGN(addr, 32))
+ return -EINVAL;
+
+ vb->state = VIDEOBUF_PREPARED;
+ }
+ return 0;
+}
+
+static void vpfe_videobuf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ /* Get the file handle object and device object */
+ struct vpfe_fh *fh = vq->priv_data;
+ struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+ unsigned long flags;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue\n");
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
+ list_add_tail(&vb->queue, &vpfe_dev->dma_queue);
+ spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
+
+ /* Change state of the buffer */
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+static void vpfe_videobuf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct vpfe_fh *fh = vq->priv_data;
+ struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+ unsigned long flags;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_videobuf_release\n");
+
+ /*
+ * We need to flush the buffer from the dma queue since
+ * they are de-allocated
+ */
+ spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
+ INIT_LIST_HEAD(&vpfe_dev->dma_queue);
+ spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static const struct videobuf_queue_ops vpfe_videobuf_qops = {
+ .buf_setup = vpfe_videobuf_setup,
+ .buf_prepare = vpfe_videobuf_prepare,
+ .buf_queue = vpfe_videobuf_queue,
+ .buf_release = vpfe_videobuf_release,
+};
+
+/*
+ * vpfe_reqbufs. currently support REQBUF only once opening
+ * the device.
+ */
+static int vpfe_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req_buf)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ if (vpfe_dev->io_usrs != 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ vpfe_dev->memory = req_buf->memory;
+ videobuf_queue_dma_contig_init(&vpfe_dev->buffer_queue,
+ &vpfe_videobuf_qops,
+ vpfe_dev->pdev,
+ &vpfe_dev->irqlock,
+ req_buf->type,
+ vpfe_dev->fmt.fmt.pix.field,
+ sizeof(struct videobuf_buffer),
+ fh, NULL);
+
+ fh->io_allowed = 1;
+ vpfe_dev->io_usrs = 1;
+ INIT_LIST_HEAD(&vpfe_dev->dma_queue);
+ ret = videobuf_reqbufs(&vpfe_dev->buffer_queue, req_buf);
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ if (vpfe_dev->memory != V4L2_MEMORY_MMAP) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
+ return -EINVAL;
+ }
+ /* Call videobuf_querybuf to get information */
+ return videobuf_querybuf(&vpfe_dev->buffer_queue, buf);
+}
+
+static int vpfe_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If this file handle is not allowed to do IO,
+ * return error
+ */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+ return videobuf_qbuf(&vpfe_dev->buffer_queue, p);
+}
+
+static int vpfe_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+ return videobuf_dqbuf(&vpfe_dev->buffer_queue,
+ buf, file->f_flags & O_NONBLOCK);
+}
+
+/*
+ * vpfe_calculate_offsets : This function calculates buffers offset
+ * for top and bottom field
+ */
+static void vpfe_calculate_offsets(struct vpfe_device *vpfe_dev)
+{
+ struct v4l2_rect image_win;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_calculate_offsets\n");
+
+ ccdc_dev->hw_ops.get_image_window(&image_win);
+ vpfe_dev->field_off = image_win.height * image_win.width;
+}
+
+/* vpfe_start_ccdc_capture: start streaming in ccdc/isif */
+static void vpfe_start_ccdc_capture(struct vpfe_device *vpfe_dev)
+{
+ ccdc_dev->hw_ops.enable(1);
+ if (ccdc_dev->hw_ops.enable_out_to_sdram)
+ ccdc_dev->hw_ops.enable_out_to_sdram(1);
+ vpfe_dev->started = 1;
+}
+
+/*
+ * vpfe_streamon. Assume the DMA queue is not empty.
+ * application is expected to call QBUF before calling
+ * this ioctl. If not, driver returns error
+ */
+static int vpfe_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+ struct vpfe_subdev_info *sdinfo;
+ unsigned long addr;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ /* If file handle is not allowed IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+
+ sdinfo = vpfe_dev->current_subdev;
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, s_stream, 1);
+
+ if (ret && (ret != -ENOIOCTLCMD)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "stream on failed in subdev\n");
+ return -EINVAL;
+ }
+
+ /* If buffer queue is empty, return error */
+ if (list_empty(&vpfe_dev->buffer_queue.stream)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
+ return -EIO;
+ }
+
+ /* Call videobuf_streamon to start streaming * in videobuf */
+ ret = videobuf_streamon(&vpfe_dev->buffer_queue);
+ if (ret)
+ return ret;
+
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ goto streamoff;
+ /* Get the next frame from the buffer queue */
+ vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
+ struct videobuf_buffer, queue);
+ vpfe_dev->cur_frm = vpfe_dev->next_frm;
+ /* Remove buffer from the buffer queue */
+ list_del(&vpfe_dev->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ vpfe_dev->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ vpfe_dev->field_id = 0;
+ addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
+
+ /* Calculate field offset */
+ vpfe_calculate_offsets(vpfe_dev);
+
+ if (vpfe_attach_irq(vpfe_dev) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error in attaching interrupt handle\n");
+ ret = -EFAULT;
+ goto unlock_out;
+ }
+ if (ccdc_dev->hw_ops.configure() < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error in configuring ccdc\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ ccdc_dev->hw_ops.setfbaddr((unsigned long)(addr));
+ vpfe_start_ccdc_capture(vpfe_dev);
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+streamoff:
+ videobuf_streamoff(&vpfe_dev->buffer_queue);
+ return ret;
+}
+
+static int vpfe_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ /* If io is allowed for this file handle, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+
+ /* If streaming is not started, return error */
+ if (!vpfe_dev->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "device started\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ vpfe_stop_ccdc_capture(vpfe_dev);
+ vpfe_detach_irq(vpfe_dev);
+
+ sdinfo = vpfe_dev->current_subdev;
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, s_stream, 0);
+
+ if (ret && (ret != -ENOIOCTLCMD))
+ v4l2_err(&vpfe_dev->v4l2_dev, "stream off failed in subdev\n");
+ ret = videobuf_streamoff(&vpfe_dev->buffer_queue);
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *crop)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_cropcap\n");
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ /* If std_index is invalid, then just return (== 1:1 aspect) */
+ if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
+ return 0;
+
+ crop->pixelaspect = vpfe_standards[vpfe_dev->std_index].pixelaspect;
+ return 0;
+}
+
+static int vpfe_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_selection\n");
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = vpfe_dev->crop;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.width = vpfe_standards[vpfe_dev->std_index].width;
+ sel->r.height = vpfe_standards[vpfe_dev->std_index].height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vpfe_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct v4l2_rect rect = sel->r;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_selection\n");
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ if (vpfe_dev->started) {
+ /* make sure streaming is not started */
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Cannot change crop when streaming is ON\n");
+ return -EBUSY;
+ }
+
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ if (rect.top < 0 || rect.left < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "doesn't support negative values for top & left\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ /* adjust the width to 16 pixel boundary */
+ rect.width = ((rect.width + 15) & ~0xf);
+
+ /* make sure parameters are valid */
+ if ((rect.left + rect.width >
+ vpfe_dev->std_info.active_pixels) ||
+ (rect.top + rect.height >
+ vpfe_dev->std_info.active_lines)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_SELECTION params\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ ccdc_dev->hw_ops.set_image_window(&rect);
+ vpfe_dev->fmt.fmt.pix.width = rect.width;
+ vpfe_dev->fmt.fmt.pix.height = rect.height;
+ vpfe_dev->fmt.fmt.pix.bytesperline =
+ ccdc_dev->hw_ops.get_line_length();
+ vpfe_dev->fmt.fmt.pix.sizeimage =
+ vpfe_dev->fmt.fmt.pix.bytesperline *
+ vpfe_dev->fmt.fmt.pix.height;
+ vpfe_dev->crop = rect;
+ sel->r = rect;
+unlock_out:
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+/* vpfe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
+ .vidioc_querycap = vpfe_querycap,
+ .vidioc_g_fmt_vid_cap = vpfe_g_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vpfe_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vpfe_try_fmt_vid_cap,
+ .vidioc_enum_input = vpfe_enum_input,
+ .vidioc_g_input = vpfe_g_input,
+ .vidioc_s_input = vpfe_s_input,
+ .vidioc_querystd = vpfe_querystd,
+ .vidioc_s_std = vpfe_s_std,
+ .vidioc_g_std = vpfe_g_std,
+ .vidioc_reqbufs = vpfe_reqbufs,
+ .vidioc_querybuf = vpfe_querybuf,
+ .vidioc_qbuf = vpfe_qbuf,
+ .vidioc_dqbuf = vpfe_dqbuf,
+ .vidioc_streamon = vpfe_streamon,
+ .vidioc_streamoff = vpfe_streamoff,
+ .vidioc_cropcap = vpfe_cropcap,
+ .vidioc_g_selection = vpfe_g_selection,
+ .vidioc_s_selection = vpfe_s_selection,
+};
+
+static struct vpfe_device *vpfe_initialize(void)
+{
+ struct vpfe_device *vpfe_dev;
+
+ /* Default number of buffers should be 3 */
+ if ((numbuffers > 0) &&
+ (numbuffers < config_params.min_numbuffers))
+ numbuffers = config_params.min_numbuffers;
+
+ /*
+ * Set buffer size to min buffers size if invalid buffer size is
+ * given
+ */
+ if (bufsize < config_params.min_bufsize)
+ bufsize = config_params.min_bufsize;
+
+ config_params.numbuffers = numbuffers;
+
+ if (numbuffers)
+ config_params.device_bufsize = bufsize;
+
+ /* Allocate memory for device objects */
+ vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL);
+
+ return vpfe_dev;
+}
+
+/*
+ * vpfe_probe : This function creates device entries by register
+ * itself to the V4L2 driver and initializes fields of each
+ * device objects
+ */
+static int vpfe_probe(struct platform_device *pdev)
+{
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_config *vpfe_cfg;
+ struct resource *res1;
+ struct vpfe_device *vpfe_dev;
+ struct i2c_adapter *i2c_adap;
+ struct video_device *vfd;
+ int ret, i, j;
+ int num_subdevs = 0;
+
+ /* Get the pointer to the device object */
+ vpfe_dev = vpfe_initialize();
+
+ if (!vpfe_dev) {
+ v4l2_err(pdev->dev.driver,
+ "Failed to allocate memory for vpfe_dev\n");
+ return -ENOMEM;
+ }
+
+ vpfe_dev->pdev = &pdev->dev;
+
+ if (!pdev->dev.platform_data) {
+ v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
+ ret = -ENODEV;
+ goto probe_free_dev_mem;
+ }
+
+ vpfe_cfg = pdev->dev.platform_data;
+ vpfe_dev->cfg = vpfe_cfg;
+ if (!vpfe_cfg->ccdc || !vpfe_cfg->card_name || !vpfe_cfg->sub_devs) {
+ v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+
+ /* Allocate memory for ccdc configuration */
+ ccdc_cfg = kmalloc(sizeof(*ccdc_cfg), GFP_KERNEL);
+ if (!ccdc_cfg) {
+ ret = -ENOMEM;
+ goto probe_free_dev_mem;
+ }
+
+ mutex_lock(&ccdc_lock);
+
+ strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32);
+ /* Get VINT0 irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for VINT0\n");
+ ret = -ENODEV;
+ goto probe_free_ccdc_cfg_mem;
+ }
+ vpfe_dev->ccdc_irq0 = res1->start;
+
+ /* Get VINT1 irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for VINT1\n");
+ ret = -ENODEV;
+ goto probe_free_ccdc_cfg_mem;
+ }
+ vpfe_dev->ccdc_irq1 = res1->start;
+
+ ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
+ "vpfe_capture0", vpfe_dev);
+
+ if (0 != ret) {
+ v4l2_err(pdev->dev.driver, "Unable to request interrupt\n");
+ goto probe_free_ccdc_cfg_mem;
+ }
+
+ vfd = &vpfe_dev->video_dev;
+ /* Initialize field of video device */
+ vfd->release = video_device_release_empty;
+ vfd->fops = &vpfe_fops;
+ vfd->ioctl_ops = &vpfe_ioctl_ops;
+ vfd->tvnorms = 0;
+ vfd->v4l2_dev = &vpfe_dev->v4l2_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "%s_V%d.%d.%d",
+ CAPTURE_DRV_NAME,
+ (VPFE_CAPTURE_VERSION_CODE >> 16) & 0xff,
+ (VPFE_CAPTURE_VERSION_CODE >> 8) & 0xff,
+ (VPFE_CAPTURE_VERSION_CODE) & 0xff);
+
+ ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to register v4l2 device.\n");
+ goto probe_out_release_irq;
+ }
+ v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
+ spin_lock_init(&vpfe_dev->irqlock);
+ spin_lock_init(&vpfe_dev->dma_queue_lock);
+ mutex_init(&vpfe_dev->lock);
+
+ /* Initialize field of the device objects */
+ vpfe_dev->numbuffers = config_params.numbuffers;
+
+ /* register video device */
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "trying to register vpfe device.\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "video_dev=%p\n", &vpfe_dev->video_dev);
+ vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ret = video_register_device(&vpfe_dev->video_dev,
+ VFL_TYPE_GRABBER, -1);
+
+ if (ret) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to register video device.\n");
+ goto probe_out_v4l2_unregister;
+ }
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "video device registered\n");
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpfe_dev);
+ /* set driver private data */
+ video_set_drvdata(&vpfe_dev->video_dev, vpfe_dev);
+ i2c_adap = i2c_get_adapter(vpfe_cfg->i2c_adapter_id);
+ num_subdevs = vpfe_cfg->num_subdevs;
+ vpfe_dev->sd = kmalloc_array(num_subdevs,
+ sizeof(*vpfe_dev->sd),
+ GFP_KERNEL);
+ if (!vpfe_dev->sd) {
+ ret = -ENOMEM;
+ goto probe_out_video_unregister;
+ }
+
+ for (i = 0; i < num_subdevs; i++) {
+ struct v4l2_input *inps;
+
+ sdinfo = &vpfe_cfg->sub_devs[i];
+
+ /* Load up the subdevice */
+ vpfe_dev->sd[i] =
+ v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
+ i2c_adap,
+ &sdinfo->board_info,
+ NULL);
+ if (vpfe_dev->sd[i]) {
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ sdinfo->name);
+ vpfe_dev->sd[i]->grp_id = sdinfo->grp_id;
+ /* update tvnorms from the sub devices */
+ for (j = 0; j < sdinfo->num_inputs; j++) {
+ inps = &sdinfo->inputs[j];
+ vfd->tvnorms |= inps->std;
+ }
+ } else {
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "v4l2 sub device %s register fails\n",
+ sdinfo->name);
+ ret = -ENXIO;
+ goto probe_sd_out;
+ }
+ }
+
+ /* set first sub device as current one */
+ vpfe_dev->current_subdev = &vpfe_cfg->sub_devs[0];
+ vpfe_dev->v4l2_dev.ctrl_handler = vpfe_dev->sd[0]->ctrl_handler;
+
+ /* We have at least one sub device to work with */
+ mutex_unlock(&ccdc_lock);
+ return 0;
+
+probe_sd_out:
+ kfree(vpfe_dev->sd);
+probe_out_video_unregister:
+ video_unregister_device(&vpfe_dev->video_dev);
+probe_out_v4l2_unregister:
+ v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+probe_out_release_irq:
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+probe_free_ccdc_cfg_mem:
+ kfree(ccdc_cfg);
+ mutex_unlock(&ccdc_lock);
+probe_free_dev_mem:
+ kfree(vpfe_dev);
+ return ret;
+}
+
+/*
+ * vpfe_remove : It un-register device from V4L2 driver
+ */
+static int vpfe_remove(struct platform_device *pdev)
+{
+ struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
+
+ v4l2_info(pdev->dev.driver, "vpfe_remove\n");
+
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+ kfree(vpfe_dev->sd);
+ v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+ video_unregister_device(&vpfe_dev->video_dev);
+ kfree(vpfe_dev);
+ kfree(ccdc_cfg);
+ return 0;
+}
+
+static int vpfe_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int vpfe_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops vpfe_dev_pm_ops = {
+ .suspend = vpfe_suspend,
+ .resume = vpfe_resume,
+};
+
+static struct platform_driver vpfe_driver = {
+ .driver = {
+ .name = CAPTURE_DRV_NAME,
+ .pm = &vpfe_dev_pm_ops,
+ },
+ .probe = vpfe_probe,
+ .remove = vpfe_remove,
+};
+
+module_platform_driver(vpfe_driver);
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
new file mode 100644
index 000000000..00ce9f276
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif.c
@@ -0,0 +1,556 @@
+/*
+ * vpif - Video Port Interface driver
+ * VPIF is a receiver and transmitter for video data. It has two channels(0, 1)
+ * that receiveing video byte stream and two channels(2, 3) for video output.
+ * The hardware supports SDTV, HDTV formats, raw data capture.
+ * Currently, the driver supports NTSC and PAL standards.
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/v4l2-dv-timings.h>
+#include <linux/of_graph.h>
+
+#include "vpif.h"
+
+MODULE_DESCRIPTION("TI DaVinci Video Port Interface driver");
+MODULE_LICENSE("GPL");
+
+#define VPIF_DRIVER_NAME "vpif"
+MODULE_ALIAS("platform:" VPIF_DRIVER_NAME);
+
+#define VPIF_CH0_MAX_MODES 22
+#define VPIF_CH1_MAX_MODES 2
+#define VPIF_CH2_MAX_MODES 15
+#define VPIF_CH3_MAX_MODES 2
+
+spinlock_t vpif_lock;
+EXPORT_SYMBOL_GPL(vpif_lock);
+
+void __iomem *vpif_base;
+EXPORT_SYMBOL_GPL(vpif_base);
+
+/*
+ * vpif_ch_params: video standard configuration parameters for vpif
+ *
+ * The table must include all presets from supported subdevices.
+ */
+const struct vpif_channel_config_params vpif_ch_params[] = {
+ /* HDTV formats */
+ {
+ .name = "480p59_94",
+ .width = 720,
+ .height = 480,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 138-8,
+ .sav2eav = 720,
+ .l1 = 1,
+ .l3 = 43,
+ .l5 = 523,
+ .vsize = 525,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_720X480P59_94,
+ },
+ {
+ .name = "576p50",
+ .width = 720,
+ .height = 576,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 144-8,
+ .sav2eav = 720,
+ .l1 = 1,
+ .l3 = 45,
+ .l5 = 621,
+ .vsize = 625,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_720X576P50,
+ },
+ {
+ .name = "720p50",
+ .width = 1280,
+ .height = 720,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 700-8,
+ .sav2eav = 1280,
+ .l1 = 1,
+ .l3 = 26,
+ .l5 = 746,
+ .vsize = 750,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1280X720P50,
+ },
+ {
+ .name = "720p60",
+ .width = 1280,
+ .height = 720,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 370 - 8,
+ .sav2eav = 1280,
+ .l1 = 1,
+ .l3 = 26,
+ .l5 = 746,
+ .vsize = 750,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1280X720P60,
+ },
+ {
+ .name = "1080I50",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 0,
+ .ycmux_mode = 0,
+ .eav2sav = 720 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 21,
+ .l5 = 561,
+ .l7 = 563,
+ .l9 = 584,
+ .l11 = 1124,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1920X1080I50,
+ },
+ {
+ .name = "1080I60",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 0,
+ .ycmux_mode = 0,
+ .eav2sav = 280 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 21,
+ .l5 = 561,
+ .l7 = 563,
+ .l9 = 584,
+ .l11 = 1124,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1920X1080I60,
+ },
+ {
+ .name = "1080p60",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 280 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 42,
+ .l5 = 1122,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_timings = V4L2_DV_BT_CEA_1920X1080P60,
+ },
+
+ /* SDTV formats */
+ {
+ .name = "NTSC_M",
+ .width = 720,
+ .height = 480,
+ .frm_fmt = 0,
+ .ycmux_mode = 1,
+ .eav2sav = 268,
+ .sav2eav = 1440,
+ .l1 = 1,
+ .l3 = 23,
+ .l5 = 263,
+ .l7 = 266,
+ .l9 = 286,
+ .l11 = 525,
+ .vsize = 525,
+ .capture_format = 0,
+ .vbi_supported = 1,
+ .hd_sd = 0,
+ .stdid = V4L2_STD_525_60,
+ },
+ {
+ .name = "PAL_BDGHIK",
+ .width = 720,
+ .height = 576,
+ .frm_fmt = 0,
+ .ycmux_mode = 1,
+ .eav2sav = 280,
+ .sav2eav = 1440,
+ .l1 = 1,
+ .l3 = 23,
+ .l5 = 311,
+ .l7 = 313,
+ .l9 = 336,
+ .l11 = 624,
+ .vsize = 625,
+ .capture_format = 0,
+ .vbi_supported = 1,
+ .hd_sd = 0,
+ .stdid = V4L2_STD_625_50,
+ },
+};
+EXPORT_SYMBOL_GPL(vpif_ch_params);
+
+const unsigned int vpif_ch_params_count = ARRAY_SIZE(vpif_ch_params);
+EXPORT_SYMBOL_GPL(vpif_ch_params_count);
+
+static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val)
+{
+ if (val)
+ vpif_set_bit(reg, bit);
+ else
+ vpif_clr_bit(reg, bit);
+}
+
+/* This structure is used to keep track of VPIF size register's offsets */
+struct vpif_registers {
+ u32 h_cfg, v_cfg_00, v_cfg_01, v_cfg_02, v_cfg, ch_ctrl;
+ u32 line_offset, vanc0_strt, vanc0_size, vanc1_strt;
+ u32 vanc1_size, width_mask, len_mask;
+ u8 max_modes;
+};
+
+static const struct vpif_registers vpifregs[VPIF_NUM_CHANNELS] = {
+ /* Channel0 */
+ {
+ VPIF_CH0_H_CFG, VPIF_CH0_V_CFG_00, VPIF_CH0_V_CFG_01,
+ VPIF_CH0_V_CFG_02, VPIF_CH0_V_CFG_03, VPIF_CH0_CTRL,
+ VPIF_CH0_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF,
+ VPIF_CH0_MAX_MODES,
+ },
+ /* Channel1 */
+ {
+ VPIF_CH1_H_CFG, VPIF_CH1_V_CFG_00, VPIF_CH1_V_CFG_01,
+ VPIF_CH1_V_CFG_02, VPIF_CH1_V_CFG_03, VPIF_CH1_CTRL,
+ VPIF_CH1_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF,
+ VPIF_CH1_MAX_MODES,
+ },
+ /* Channel2 */
+ {
+ VPIF_CH2_H_CFG, VPIF_CH2_V_CFG_00, VPIF_CH2_V_CFG_01,
+ VPIF_CH2_V_CFG_02, VPIF_CH2_V_CFG_03, VPIF_CH2_CTRL,
+ VPIF_CH2_IMG_ADD_OFST, VPIF_CH2_VANC0_STRT, VPIF_CH2_VANC0_SIZE,
+ VPIF_CH2_VANC1_STRT, VPIF_CH2_VANC1_SIZE, 0x7FF, 0x7FF,
+ VPIF_CH2_MAX_MODES
+ },
+ /* Channel3 */
+ {
+ VPIF_CH3_H_CFG, VPIF_CH3_V_CFG_00, VPIF_CH3_V_CFG_01,
+ VPIF_CH3_V_CFG_02, VPIF_CH3_V_CFG_03, VPIF_CH3_CTRL,
+ VPIF_CH3_IMG_ADD_OFST, VPIF_CH3_VANC0_STRT, VPIF_CH3_VANC0_SIZE,
+ VPIF_CH3_VANC1_STRT, VPIF_CH3_VANC1_SIZE, 0x7FF, 0x7FF,
+ VPIF_CH3_MAX_MODES
+ },
+};
+
+/* vpif_set_mode_info:
+ * This function is used to set horizontal and vertical config parameters
+ * As per the standard in the channel, configure the values of L1, L3,
+ * L5, L7 L9, L11 in VPIF Register , also write width and height
+ */
+static void vpif_set_mode_info(const struct vpif_channel_config_params *config,
+ u8 channel_id, u8 config_channel_id)
+{
+ u32 value;
+
+ value = (config->eav2sav & vpifregs[config_channel_id].width_mask);
+ value <<= VPIF_CH_LEN_SHIFT;
+ value |= (config->sav2eav & vpifregs[config_channel_id].width_mask);
+ regw(value, vpifregs[channel_id].h_cfg);
+
+ value = (config->l1 & vpifregs[config_channel_id].len_mask);
+ value <<= VPIF_CH_LEN_SHIFT;
+ value |= (config->l3 & vpifregs[config_channel_id].len_mask);
+ regw(value, vpifregs[channel_id].v_cfg_00);
+
+ value = (config->l5 & vpifregs[config_channel_id].len_mask);
+ value <<= VPIF_CH_LEN_SHIFT;
+ value |= (config->l7 & vpifregs[config_channel_id].len_mask);
+ regw(value, vpifregs[channel_id].v_cfg_01);
+
+ value = (config->l9 & vpifregs[config_channel_id].len_mask);
+ value <<= VPIF_CH_LEN_SHIFT;
+ value |= (config->l11 & vpifregs[config_channel_id].len_mask);
+ regw(value, vpifregs[channel_id].v_cfg_02);
+
+ value = (config->vsize & vpifregs[config_channel_id].len_mask);
+ regw(value, vpifregs[channel_id].v_cfg);
+}
+
+/* config_vpif_params
+ * Function to set the parameters of a channel
+ * Mainly modifies the channel ciontrol register
+ * It sets frame format, yc mux mode
+ */
+static void config_vpif_params(struct vpif_params *vpifparams,
+ u8 channel_id, u8 found)
+{
+ const struct vpif_channel_config_params *config = &vpifparams->std_info;
+ u32 value, ch_nip, reg;
+ u8 start, end;
+ int i;
+
+ start = channel_id;
+ end = channel_id + found;
+
+ for (i = start; i < end; i++) {
+ reg = vpifregs[i].ch_ctrl;
+ if (channel_id < 2)
+ ch_nip = VPIF_CAPTURE_CH_NIP;
+ else
+ ch_nip = VPIF_DISPLAY_CH_NIP;
+
+ vpif_wr_bit(reg, ch_nip, config->frm_fmt);
+ vpif_wr_bit(reg, VPIF_CH_YC_MUX_BIT, config->ycmux_mode);
+ vpif_wr_bit(reg, VPIF_CH_INPUT_FIELD_FRAME_BIT,
+ vpifparams->video_params.storage_mode);
+
+ /* Set raster scanning SDR Format */
+ vpif_clr_bit(reg, VPIF_CH_SDR_FMT_BIT);
+ vpif_wr_bit(reg, VPIF_CH_DATA_MODE_BIT, config->capture_format);
+
+ if (channel_id > 1) /* Set the Pixel enable bit */
+ vpif_set_bit(reg, VPIF_DISPLAY_PIX_EN_BIT);
+ else if (config->capture_format) {
+ /* Set the polarity of various pins */
+ vpif_wr_bit(reg, VPIF_CH_FID_POLARITY_BIT,
+ vpifparams->iface.fid_pol);
+ vpif_wr_bit(reg, VPIF_CH_V_VALID_POLARITY_BIT,
+ vpifparams->iface.vd_pol);
+ vpif_wr_bit(reg, VPIF_CH_H_VALID_POLARITY_BIT,
+ vpifparams->iface.hd_pol);
+
+ value = regr(reg);
+ /* Set data width */
+ value &= ~(0x3u <<
+ VPIF_CH_DATA_WIDTH_BIT);
+ value |= ((vpifparams->params.data_sz) <<
+ VPIF_CH_DATA_WIDTH_BIT);
+ regw(value, reg);
+ }
+
+ /* Write the pitch in the driver */
+ regw((vpifparams->video_params.hpitch),
+ vpifregs[i].line_offset);
+ }
+}
+
+/* vpif_set_video_params
+ * This function is used to set video parameters in VPIF register
+ */
+int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id)
+{
+ const struct vpif_channel_config_params *config = &vpifparams->std_info;
+ int found = 1;
+
+ vpif_set_mode_info(config, channel_id, channel_id);
+ if (!config->ycmux_mode) {
+ /* YC are on separate channels (HDTV formats) */
+ vpif_set_mode_info(config, channel_id + 1, channel_id);
+ found = 2;
+ }
+
+ config_vpif_params(vpifparams, channel_id, found);
+
+ regw(0x80, VPIF_REQ_SIZE);
+ regw(0x01, VPIF_EMULATION_CTRL);
+
+ return found;
+}
+EXPORT_SYMBOL(vpif_set_video_params);
+
+void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams,
+ u8 channel_id)
+{
+ u32 value;
+
+ value = 0x3F8 & (vbiparams->hstart0);
+ value |= 0x3FFFFFF & ((vbiparams->vstart0) << 16);
+ regw(value, vpifregs[channel_id].vanc0_strt);
+
+ value = 0x3F8 & (vbiparams->hstart1);
+ value |= 0x3FFFFFF & ((vbiparams->vstart1) << 16);
+ regw(value, vpifregs[channel_id].vanc1_strt);
+
+ value = 0x3F8 & (vbiparams->hsize0);
+ value |= 0x3FFFFFF & ((vbiparams->vsize0) << 16);
+ regw(value, vpifregs[channel_id].vanc0_size);
+
+ value = 0x3F8 & (vbiparams->hsize1);
+ value |= 0x3FFFFFF & ((vbiparams->vsize1) << 16);
+ regw(value, vpifregs[channel_id].vanc1_size);
+
+}
+EXPORT_SYMBOL(vpif_set_vbi_display_params);
+
+int vpif_channel_getfid(u8 channel_id)
+{
+ return (regr(vpifregs[channel_id].ch_ctrl) & VPIF_CH_FID_MASK)
+ >> VPIF_CH_FID_SHIFT;
+}
+EXPORT_SYMBOL(vpif_channel_getfid);
+
+static int vpif_probe(struct platform_device *pdev)
+{
+ static struct resource *res, *res_irq;
+ struct platform_device *pdev_capture, *pdev_display;
+ struct device_node *endpoint = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vpif_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(vpif_base))
+ return PTR_ERR(vpif_base);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get(&pdev->dev);
+
+ spin_lock_init(&vpif_lock);
+ dev_info(&pdev->dev, "vpif probe success\n");
+
+ /*
+ * If VPIF Node has endpoints, assume "new" DT support,
+ * where capture and display drivers don't have DT nodes
+ * so their devices need to be registered manually here
+ * for their legacy platform_drivers to work.
+ */
+ endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
+ endpoint);
+ if (!endpoint)
+ return 0;
+
+ /*
+ * For DT platforms, manually create platform_devices for
+ * capture/display drivers.
+ */
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq) {
+ dev_warn(&pdev->dev, "Missing IRQ resource.\n");
+ return -EINVAL;
+ }
+
+ pdev_capture = devm_kzalloc(&pdev->dev, sizeof(*pdev_capture),
+ GFP_KERNEL);
+ if (pdev_capture) {
+ pdev_capture->name = "vpif_capture";
+ pdev_capture->id = -1;
+ pdev_capture->resource = res_irq;
+ pdev_capture->num_resources = 1;
+ pdev_capture->dev.dma_mask = pdev->dev.dma_mask;
+ pdev_capture->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+ pdev_capture->dev.parent = &pdev->dev;
+ platform_device_register(pdev_capture);
+ } else {
+ dev_warn(&pdev->dev, "Unable to allocate memory for pdev_capture.\n");
+ }
+
+ pdev_display = devm_kzalloc(&pdev->dev, sizeof(*pdev_display),
+ GFP_KERNEL);
+ if (pdev_display) {
+ pdev_display->name = "vpif_display";
+ pdev_display->id = -1;
+ pdev_display->resource = res_irq;
+ pdev_display->num_resources = 1;
+ pdev_display->dev.dma_mask = pdev->dev.dma_mask;
+ pdev_display->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+ pdev_display->dev.parent = &pdev->dev;
+ platform_device_register(pdev_display);
+ } else {
+ dev_warn(&pdev->dev, "Unable to allocate memory for pdev_display.\n");
+ }
+
+ return 0;
+}
+
+static int vpif_remove(struct platform_device *pdev)
+{
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int vpif_suspend(struct device *dev)
+{
+ pm_runtime_put(dev);
+ return 0;
+}
+
+static int vpif_resume(struct device *dev)
+{
+ pm_runtime_get(dev);
+ return 0;
+}
+
+static const struct dev_pm_ops vpif_pm = {
+ .suspend = vpif_suspend,
+ .resume = vpif_resume,
+};
+
+#define vpif_pm_ops (&vpif_pm)
+#else
+#define vpif_pm_ops NULL
+#endif
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id vpif_of_match[] = {
+ { .compatible = "ti,da850-vpif", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, vpif_of_match);
+#endif
+
+static struct platform_driver vpif_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(vpif_of_match),
+ .name = VPIF_DRIVER_NAME,
+ .pm = vpif_pm_ops,
+ },
+ .remove = vpif_remove,
+ .probe = vpif_probe,
+};
+
+static void vpif_exit(void)
+{
+ platform_driver_unregister(&vpif_driver);
+}
+
+static int __init vpif_init(void)
+{
+ return platform_driver_register(&vpif_driver);
+}
+subsys_initcall(vpif_init);
+module_exit(vpif_exit);
+
diff --git a/drivers/media/platform/davinci/vpif.h b/drivers/media/platform/davinci/vpif.h
new file mode 100644
index 000000000..2466c7c77
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif.h
@@ -0,0 +1,688 @@
+/*
+ * VPIF header file
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef VPIF_H
+#define VPIF_H
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <media/davinci/vpif_types.h>
+
+/* Maximum channel allowed */
+#define VPIF_NUM_CHANNELS (4)
+#define VPIF_CAPTURE_NUM_CHANNELS (2)
+#define VPIF_DISPLAY_NUM_CHANNELS (2)
+
+/* Macros to read/write registers */
+extern void __iomem *vpif_base;
+extern spinlock_t vpif_lock;
+
+#define regr(reg) readl((reg) + vpif_base)
+#define regw(value, reg) writel(value, (reg + vpif_base))
+
+/* Register Address Offsets */
+#define VPIF_PID (0x0000)
+#define VPIF_CH0_CTRL (0x0004)
+#define VPIF_CH1_CTRL (0x0008)
+#define VPIF_CH2_CTRL (0x000C)
+#define VPIF_CH3_CTRL (0x0010)
+
+#define VPIF_INTEN (0x0020)
+#define VPIF_INTEN_SET (0x0024)
+#define VPIF_INTEN_CLR (0x0028)
+#define VPIF_STATUS (0x002C)
+#define VPIF_STATUS_CLR (0x0030)
+#define VPIF_EMULATION_CTRL (0x0034)
+#define VPIF_REQ_SIZE (0x0038)
+
+#define VPIF_CH0_TOP_STRT_ADD_LUMA (0x0040)
+#define VPIF_CH0_BTM_STRT_ADD_LUMA (0x0044)
+#define VPIF_CH0_TOP_STRT_ADD_CHROMA (0x0048)
+#define VPIF_CH0_BTM_STRT_ADD_CHROMA (0x004c)
+#define VPIF_CH0_TOP_STRT_ADD_HANC (0x0050)
+#define VPIF_CH0_BTM_STRT_ADD_HANC (0x0054)
+#define VPIF_CH0_TOP_STRT_ADD_VANC (0x0058)
+#define VPIF_CH0_BTM_STRT_ADD_VANC (0x005c)
+#define VPIF_CH0_SP_CFG (0x0060)
+#define VPIF_CH0_IMG_ADD_OFST (0x0064)
+#define VPIF_CH0_HANC_ADD_OFST (0x0068)
+#define VPIF_CH0_H_CFG (0x006c)
+#define VPIF_CH0_V_CFG_00 (0x0070)
+#define VPIF_CH0_V_CFG_01 (0x0074)
+#define VPIF_CH0_V_CFG_02 (0x0078)
+#define VPIF_CH0_V_CFG_03 (0x007c)
+
+#define VPIF_CH1_TOP_STRT_ADD_LUMA (0x0080)
+#define VPIF_CH1_BTM_STRT_ADD_LUMA (0x0084)
+#define VPIF_CH1_TOP_STRT_ADD_CHROMA (0x0088)
+#define VPIF_CH1_BTM_STRT_ADD_CHROMA (0x008c)
+#define VPIF_CH1_TOP_STRT_ADD_HANC (0x0090)
+#define VPIF_CH1_BTM_STRT_ADD_HANC (0x0094)
+#define VPIF_CH1_TOP_STRT_ADD_VANC (0x0098)
+#define VPIF_CH1_BTM_STRT_ADD_VANC (0x009c)
+#define VPIF_CH1_SP_CFG (0x00a0)
+#define VPIF_CH1_IMG_ADD_OFST (0x00a4)
+#define VPIF_CH1_HANC_ADD_OFST (0x00a8)
+#define VPIF_CH1_H_CFG (0x00ac)
+#define VPIF_CH1_V_CFG_00 (0x00b0)
+#define VPIF_CH1_V_CFG_01 (0x00b4)
+#define VPIF_CH1_V_CFG_02 (0x00b8)
+#define VPIF_CH1_V_CFG_03 (0x00bc)
+
+#define VPIF_CH2_TOP_STRT_ADD_LUMA (0x00c0)
+#define VPIF_CH2_BTM_STRT_ADD_LUMA (0x00c4)
+#define VPIF_CH2_TOP_STRT_ADD_CHROMA (0x00c8)
+#define VPIF_CH2_BTM_STRT_ADD_CHROMA (0x00cc)
+#define VPIF_CH2_TOP_STRT_ADD_HANC (0x00d0)
+#define VPIF_CH2_BTM_STRT_ADD_HANC (0x00d4)
+#define VPIF_CH2_TOP_STRT_ADD_VANC (0x00d8)
+#define VPIF_CH2_BTM_STRT_ADD_VANC (0x00dc)
+#define VPIF_CH2_SP_CFG (0x00e0)
+#define VPIF_CH2_IMG_ADD_OFST (0x00e4)
+#define VPIF_CH2_HANC_ADD_OFST (0x00e8)
+#define VPIF_CH2_H_CFG (0x00ec)
+#define VPIF_CH2_V_CFG_00 (0x00f0)
+#define VPIF_CH2_V_CFG_01 (0x00f4)
+#define VPIF_CH2_V_CFG_02 (0x00f8)
+#define VPIF_CH2_V_CFG_03 (0x00fc)
+#define VPIF_CH2_HANC0_STRT (0x0100)
+#define VPIF_CH2_HANC0_SIZE (0x0104)
+#define VPIF_CH2_HANC1_STRT (0x0108)
+#define VPIF_CH2_HANC1_SIZE (0x010c)
+#define VPIF_CH2_VANC0_STRT (0x0110)
+#define VPIF_CH2_VANC0_SIZE (0x0114)
+#define VPIF_CH2_VANC1_STRT (0x0118)
+#define VPIF_CH2_VANC1_SIZE (0x011c)
+
+#define VPIF_CH3_TOP_STRT_ADD_LUMA (0x0140)
+#define VPIF_CH3_BTM_STRT_ADD_LUMA (0x0144)
+#define VPIF_CH3_TOP_STRT_ADD_CHROMA (0x0148)
+#define VPIF_CH3_BTM_STRT_ADD_CHROMA (0x014c)
+#define VPIF_CH3_TOP_STRT_ADD_HANC (0x0150)
+#define VPIF_CH3_BTM_STRT_ADD_HANC (0x0154)
+#define VPIF_CH3_TOP_STRT_ADD_VANC (0x0158)
+#define VPIF_CH3_BTM_STRT_ADD_VANC (0x015c)
+#define VPIF_CH3_SP_CFG (0x0160)
+#define VPIF_CH3_IMG_ADD_OFST (0x0164)
+#define VPIF_CH3_HANC_ADD_OFST (0x0168)
+#define VPIF_CH3_H_CFG (0x016c)
+#define VPIF_CH3_V_CFG_00 (0x0170)
+#define VPIF_CH3_V_CFG_01 (0x0174)
+#define VPIF_CH3_V_CFG_02 (0x0178)
+#define VPIF_CH3_V_CFG_03 (0x017c)
+#define VPIF_CH3_HANC0_STRT (0x0180)
+#define VPIF_CH3_HANC0_SIZE (0x0184)
+#define VPIF_CH3_HANC1_STRT (0x0188)
+#define VPIF_CH3_HANC1_SIZE (0x018c)
+#define VPIF_CH3_VANC0_STRT (0x0190)
+#define VPIF_CH3_VANC0_SIZE (0x0194)
+#define VPIF_CH3_VANC1_STRT (0x0198)
+#define VPIF_CH3_VANC1_SIZE (0x019c)
+
+#define VPIF_IODFT_CTRL (0x01c0)
+
+/* Functions for bit Manipulation */
+static inline void vpif_set_bit(u32 reg, u32 bit)
+{
+ regw((regr(reg)) | (0x01 << bit), reg);
+}
+
+static inline void vpif_clr_bit(u32 reg, u32 bit)
+{
+ regw(((regr(reg)) & ~(0x01 << bit)), reg);
+}
+
+/* Macro for Generating mask */
+#ifdef GENERATE_MASK
+#undef GENERATE_MASK
+#endif
+
+#define GENERATE_MASK(bits, pos) \
+ ((((0xFFFFFFFF) << (32 - bits)) >> (32 - bits)) << pos)
+
+/* Bit positions in the channel control registers */
+#define VPIF_CH_DATA_MODE_BIT (2)
+#define VPIF_CH_YC_MUX_BIT (3)
+#define VPIF_CH_SDR_FMT_BIT (4)
+#define VPIF_CH_HANC_EN_BIT (8)
+#define VPIF_CH_VANC_EN_BIT (9)
+
+#define VPIF_CAPTURE_CH_NIP (10)
+#define VPIF_DISPLAY_CH_NIP (11)
+
+#define VPIF_DISPLAY_PIX_EN_BIT (10)
+
+#define VPIF_CH_INPUT_FIELD_FRAME_BIT (12)
+
+#define VPIF_CH_FID_POLARITY_BIT (15)
+#define VPIF_CH_V_VALID_POLARITY_BIT (14)
+#define VPIF_CH_H_VALID_POLARITY_BIT (13)
+#define VPIF_CH_DATA_WIDTH_BIT (28)
+
+#define VPIF_CH_CLK_EDGE_CTRL_BIT (31)
+
+/* Mask various length */
+#define VPIF_CH_EAVSAV_MASK GENERATE_MASK(13, 0)
+#define VPIF_CH_LEN_MASK GENERATE_MASK(12, 0)
+#define VPIF_CH_WIDTH_MASK GENERATE_MASK(13, 0)
+#define VPIF_CH_LEN_SHIFT (16)
+
+/* VPIF masks for registers */
+#define VPIF_REQ_SIZE_MASK (0x1ff)
+
+/* bit posotion of interrupt vpif_ch_intr register */
+#define VPIF_INTEN_FRAME_CH0 (0x00000001)
+#define VPIF_INTEN_FRAME_CH1 (0x00000002)
+#define VPIF_INTEN_FRAME_CH2 (0x00000004)
+#define VPIF_INTEN_FRAME_CH3 (0x00000008)
+
+/* bit position of clock and channel enable in vpif_chn_ctrl register */
+
+#define VPIF_CH0_CLK_EN (0x00000002)
+#define VPIF_CH0_EN (0x00000001)
+#define VPIF_CH1_CLK_EN (0x00000002)
+#define VPIF_CH1_EN (0x00000001)
+#define VPIF_CH2_CLK_EN (0x00000002)
+#define VPIF_CH2_EN (0x00000001)
+#define VPIF_CH3_CLK_EN (0x00000002)
+#define VPIF_CH3_EN (0x00000001)
+#define VPIF_CH_CLK_EN (0x00000002)
+#define VPIF_CH_EN (0x00000001)
+
+#define VPIF_INT_TOP (0x00)
+#define VPIF_INT_BOTTOM (0x01)
+#define VPIF_INT_BOTH (0x02)
+
+#define VPIF_CH0_INT_CTRL_SHIFT (6)
+#define VPIF_CH1_INT_CTRL_SHIFT (6)
+#define VPIF_CH2_INT_CTRL_SHIFT (6)
+#define VPIF_CH3_INT_CTRL_SHIFT (6)
+#define VPIF_CH_INT_CTRL_SHIFT (6)
+
+#define VPIF_CH2_CLIP_ANC_EN 14
+#define VPIF_CH2_CLIP_ACTIVE_EN 13
+
+#define VPIF_CH3_CLIP_ANC_EN 14
+#define VPIF_CH3_CLIP_ACTIVE_EN 13
+
+/* enabled interrupt on both the fields on vpid_ch0_ctrl register */
+#define channel0_intr_assert() (regw((regr(VPIF_CH0_CTRL)|\
+ (VPIF_INT_BOTH << VPIF_CH0_INT_CTRL_SHIFT)), VPIF_CH0_CTRL))
+
+/* enabled interrupt on both the fields on vpid_ch1_ctrl register */
+#define channel1_intr_assert() (regw((regr(VPIF_CH1_CTRL)|\
+ (VPIF_INT_BOTH << VPIF_CH1_INT_CTRL_SHIFT)), VPIF_CH1_CTRL))
+
+/* enabled interrupt on both the fields on vpid_ch0_ctrl register */
+#define channel2_intr_assert() (regw((regr(VPIF_CH2_CTRL)|\
+ (VPIF_INT_BOTH << VPIF_CH2_INT_CTRL_SHIFT)), VPIF_CH2_CTRL))
+
+/* enabled interrupt on both the fields on vpid_ch1_ctrl register */
+#define channel3_intr_assert() (regw((regr(VPIF_CH3_CTRL)|\
+ (VPIF_INT_BOTH << VPIF_CH3_INT_CTRL_SHIFT)), VPIF_CH3_CTRL))
+
+#define VPIF_CH_FID_MASK (0x20)
+#define VPIF_CH_FID_SHIFT (5)
+
+#define VPIF_NTSC_VBI_START_FIELD0 (1)
+#define VPIF_NTSC_VBI_START_FIELD1 (263)
+#define VPIF_PAL_VBI_START_FIELD0 (624)
+#define VPIF_PAL_VBI_START_FIELD1 (311)
+
+#define VPIF_NTSC_HBI_START_FIELD0 (1)
+#define VPIF_NTSC_HBI_START_FIELD1 (263)
+#define VPIF_PAL_HBI_START_FIELD0 (624)
+#define VPIF_PAL_HBI_START_FIELD1 (311)
+
+#define VPIF_NTSC_VBI_COUNT_FIELD0 (20)
+#define VPIF_NTSC_VBI_COUNT_FIELD1 (19)
+#define VPIF_PAL_VBI_COUNT_FIELD0 (24)
+#define VPIF_PAL_VBI_COUNT_FIELD1 (25)
+
+#define VPIF_NTSC_HBI_COUNT_FIELD0 (263)
+#define VPIF_NTSC_HBI_COUNT_FIELD1 (262)
+#define VPIF_PAL_HBI_COUNT_FIELD0 (312)
+#define VPIF_PAL_HBI_COUNT_FIELD1 (313)
+
+#define VPIF_NTSC_VBI_SAMPLES_PER_LINE (720)
+#define VPIF_PAL_VBI_SAMPLES_PER_LINE (720)
+#define VPIF_NTSC_HBI_SAMPLES_PER_LINE (268)
+#define VPIF_PAL_HBI_SAMPLES_PER_LINE (280)
+
+#define VPIF_CH_VANC_EN (0x20)
+#define VPIF_DMA_REQ_SIZE (0x080)
+#define VPIF_EMULATION_DISABLE (0x01)
+
+extern u8 irq_vpif_capture_channel[VPIF_NUM_CHANNELS];
+
+/* inline function to enable/disable channel0 */
+static inline void enable_channel0(int enable)
+{
+ if (enable)
+ regw((regr(VPIF_CH0_CTRL) | (VPIF_CH0_EN)), VPIF_CH0_CTRL);
+ else
+ regw((regr(VPIF_CH0_CTRL) & (~VPIF_CH0_EN)), VPIF_CH0_CTRL);
+}
+
+/* inline function to enable/disable channel1 */
+static inline void enable_channel1(int enable)
+{
+ if (enable)
+ regw((regr(VPIF_CH1_CTRL) | (VPIF_CH1_EN)), VPIF_CH1_CTRL);
+ else
+ regw((regr(VPIF_CH1_CTRL) & (~VPIF_CH1_EN)), VPIF_CH1_CTRL);
+}
+
+/* inline function to enable interrupt for channel0 */
+static inline void channel0_intr_enable(int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpif_lock, flags);
+
+ if (enable) {
+ regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+
+ regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH0), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH0),
+ VPIF_INTEN_SET);
+ } else {
+ regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH0)), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH0),
+ VPIF_INTEN_SET);
+ }
+ spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to enable interrupt for channel1 */
+static inline void channel1_intr_enable(int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpif_lock, flags);
+
+ if (enable) {
+ regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+
+ regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH1), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH1),
+ VPIF_INTEN_SET);
+ } else {
+ regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH1)), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH1),
+ VPIF_INTEN_SET);
+ }
+ spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to set buffer addresses in case of Y/C non mux mode */
+static inline void ch0_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH1_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH1_BTM_STRT_ADD_CHROMA);
+}
+
+/* inline function to set buffer addresses in VPIF registers for video data */
+static inline void ch0_set_videobuf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH0_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH0_BTM_STRT_ADD_CHROMA);
+}
+
+static inline void ch1_set_videobuf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+
+ regw(top_strt_luma, VPIF_CH1_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH1_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH1_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH1_BTM_STRT_ADD_CHROMA);
+}
+
+static inline void ch0_set_vbi_addr(unsigned long top_vbi,
+ unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+ regw(top_vbi, VPIF_CH0_TOP_STRT_ADD_VANC);
+ regw(btm_vbi, VPIF_CH0_BTM_STRT_ADD_VANC);
+}
+
+static inline void ch0_set_hbi_addr(unsigned long top_vbi,
+ unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+ regw(top_vbi, VPIF_CH0_TOP_STRT_ADD_HANC);
+ regw(btm_vbi, VPIF_CH0_BTM_STRT_ADD_HANC);
+}
+
+static inline void ch1_set_vbi_addr(unsigned long top_vbi,
+ unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+ regw(top_vbi, VPIF_CH1_TOP_STRT_ADD_VANC);
+ regw(btm_vbi, VPIF_CH1_BTM_STRT_ADD_VANC);
+}
+
+static inline void ch1_set_hbi_addr(unsigned long top_vbi,
+ unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+ regw(top_vbi, VPIF_CH1_TOP_STRT_ADD_HANC);
+ regw(btm_vbi, VPIF_CH1_BTM_STRT_ADD_HANC);
+}
+
+/* Inline function to enable raw vbi in the given channel */
+static inline void disable_raw_feature(u8 channel_id, u8 index)
+{
+ u32 ctrl_reg;
+ if (0 == channel_id)
+ ctrl_reg = VPIF_CH0_CTRL;
+ else
+ ctrl_reg = VPIF_CH1_CTRL;
+
+ if (1 == index)
+ vpif_clr_bit(ctrl_reg, VPIF_CH_VANC_EN_BIT);
+ else
+ vpif_clr_bit(ctrl_reg, VPIF_CH_HANC_EN_BIT);
+}
+
+static inline void enable_raw_feature(u8 channel_id, u8 index)
+{
+ u32 ctrl_reg;
+ if (0 == channel_id)
+ ctrl_reg = VPIF_CH0_CTRL;
+ else
+ ctrl_reg = VPIF_CH1_CTRL;
+
+ if (1 == index)
+ vpif_set_bit(ctrl_reg, VPIF_CH_VANC_EN_BIT);
+ else
+ vpif_set_bit(ctrl_reg, VPIF_CH_HANC_EN_BIT);
+}
+
+/* inline function to enable/disable channel2 */
+static inline void enable_channel2(int enable)
+{
+ if (enable) {
+ regw((regr(VPIF_CH2_CTRL) | (VPIF_CH2_CLK_EN)), VPIF_CH2_CTRL);
+ regw((regr(VPIF_CH2_CTRL) | (VPIF_CH2_EN)), VPIF_CH2_CTRL);
+ } else {
+ regw((regr(VPIF_CH2_CTRL) & (~VPIF_CH2_CLK_EN)), VPIF_CH2_CTRL);
+ regw((regr(VPIF_CH2_CTRL) & (~VPIF_CH2_EN)), VPIF_CH2_CTRL);
+ }
+}
+
+/* inline function to enable/disable channel3 */
+static inline void enable_channel3(int enable)
+{
+ if (enable) {
+ regw((regr(VPIF_CH3_CTRL) | (VPIF_CH3_CLK_EN)), VPIF_CH3_CTRL);
+ regw((regr(VPIF_CH3_CTRL) | (VPIF_CH3_EN)), VPIF_CH3_CTRL);
+ } else {
+ regw((regr(VPIF_CH3_CTRL) & (~VPIF_CH3_CLK_EN)), VPIF_CH3_CTRL);
+ regw((regr(VPIF_CH3_CTRL) & (~VPIF_CH3_EN)), VPIF_CH3_CTRL);
+ }
+}
+
+/* inline function to enable interrupt for channel2 */
+static inline void channel2_intr_enable(int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpif_lock, flags);
+
+ if (enable) {
+ regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+ regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH2), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH2),
+ VPIF_INTEN_SET);
+ } else {
+ regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH2)), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH2),
+ VPIF_INTEN_SET);
+ }
+ spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to enable interrupt for channel3 */
+static inline void channel3_intr_enable(int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpif_lock, flags);
+
+ if (enable) {
+ regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+
+ regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH3), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH3),
+ VPIF_INTEN_SET);
+ } else {
+ regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH3)), VPIF_INTEN);
+ regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH3),
+ VPIF_INTEN_SET);
+ }
+ spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to enable raw vbi data for channel2 */
+static inline void channel2_raw_enable(int enable, u8 index)
+{
+ u32 mask;
+
+ if (1 == index)
+ mask = VPIF_CH_VANC_EN_BIT;
+ else
+ mask = VPIF_CH_HANC_EN_BIT;
+
+ if (enable)
+ vpif_set_bit(VPIF_CH2_CTRL, mask);
+ else
+ vpif_clr_bit(VPIF_CH2_CTRL, mask);
+}
+
+/* inline function to enable raw vbi data for channel3*/
+static inline void channel3_raw_enable(int enable, u8 index)
+{
+ u32 mask;
+
+ if (1 == index)
+ mask = VPIF_CH_VANC_EN_BIT;
+ else
+ mask = VPIF_CH_HANC_EN_BIT;
+
+ if (enable)
+ vpif_set_bit(VPIF_CH3_CTRL, mask);
+ else
+ vpif_clr_bit(VPIF_CH3_CTRL, mask);
+}
+
+/* function to enable clipping (for both active and blanking regions) on ch 2 */
+static inline void channel2_clipping_enable(int enable)
+{
+ if (enable) {
+ vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN);
+ vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN);
+ } else {
+ vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN);
+ vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN);
+ }
+}
+
+/* function to enable clipping (for both active and blanking regions) on ch 3 */
+static inline void channel3_clipping_enable(int enable)
+{
+ if (enable) {
+ vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN);
+ vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN);
+ } else {
+ vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN);
+ vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN);
+ }
+}
+
+/* inline function to set buffer addresses in case of Y/C non mux mode */
+static inline void ch2_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH3_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH3_BTM_STRT_ADD_CHROMA);
+}
+
+/* inline function to set buffer addresses in VPIF registers for video data */
+static inline void ch2_set_videobuf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH2_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH2_BTM_STRT_ADD_CHROMA);
+}
+
+static inline void ch3_set_videobuf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_LUMA);
+ regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_LUMA);
+ regw(top_strt_chroma, VPIF_CH3_TOP_STRT_ADD_CHROMA);
+ regw(btm_strt_chroma, VPIF_CH3_BTM_STRT_ADD_CHROMA);
+}
+
+/* inline function to set buffer addresses in VPIF registers for vbi data */
+static inline void ch2_set_vbi_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_VANC);
+ regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_VANC);
+}
+
+static inline void ch3_set_vbi_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
+{
+ regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_VANC);
+ regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_VANC);
+}
+
+static inline int vpif_intr_status(int channel)
+{
+ int status = 0;
+ int mask;
+
+ if (channel < 0 || channel > 3)
+ return 0;
+
+ mask = 1 << channel;
+ status = regr(VPIF_STATUS) & mask;
+ regw(status, VPIF_STATUS_CLR);
+
+ return status;
+}
+
+#define VPIF_MAX_NAME (30)
+
+/* This structure will store size parameters as per the mode selected by user */
+struct vpif_channel_config_params {
+ char name[VPIF_MAX_NAME]; /* Name of the mode */
+ u16 width; /* Indicates width of the image */
+ u16 height; /* Indicates height of the image */
+ u8 frm_fmt; /* Interlaced (0) or progressive (1) */
+ u8 ycmux_mode; /* This mode requires one (0) or two (1)
+ channels */
+ u16 eav2sav; /* length of eav 2 sav */
+ u16 sav2eav; /* length of sav 2 eav */
+ u16 l1, l3, l5, l7, l9, l11; /* Other parameter configurations */
+ u16 vsize; /* Vertical size of the image */
+ u8 capture_format; /* Indicates whether capture format
+ * is in BT or in CCD/CMOS */
+ u8 vbi_supported; /* Indicates whether this mode
+ * supports capturing vbi or not */
+ u8 hd_sd; /* HDTV (1) or SDTV (0) format */
+ v4l2_std_id stdid; /* SDTV format */
+ struct v4l2_dv_timings dv_timings; /* HDTV format */
+};
+
+extern const unsigned int vpif_ch_params_count;
+extern const struct vpif_channel_config_params vpif_ch_params[];
+
+struct vpif_video_params;
+struct vpif_params;
+struct vpif_vbi_params;
+
+int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id);
+void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams,
+ u8 channel_id);
+int vpif_channel_getfid(u8 channel_id);
+
+enum data_size {
+ _8BITS = 0,
+ _10BITS,
+ _12BITS,
+};
+
+/* Structure for vpif parameters for raw vbi data */
+struct vpif_vbi_params {
+ __u32 hstart0; /* Horizontal start of raw vbi data for first field */
+ __u32 vstart0; /* Vertical start of raw vbi data for first field */
+ __u32 hsize0; /* Horizontal size of raw vbi data for first field */
+ __u32 vsize0; /* Vertical size of raw vbi data for first field */
+ __u32 hstart1; /* Horizontal start of raw vbi data for second field */
+ __u32 vstart1; /* Vertical start of raw vbi data for second field */
+ __u32 hsize1; /* Horizontal size of raw vbi data for second field */
+ __u32 vsize1; /* Vertical size of raw vbi data for second field */
+};
+
+/* structure for vpif parameters */
+struct vpif_video_params {
+ __u8 storage_mode; /* Indicates field or frame mode */
+ unsigned long hpitch;
+ v4l2_std_id stdid;
+};
+
+struct vpif_params {
+ struct vpif_interface iface;
+ struct vpif_video_params video_params;
+ struct vpif_channel_config_params std_info;
+ union param {
+ struct vpif_vbi_params vbi_params;
+ enum data_size data_sz;
+ } params;
+};
+
+#endif /* End of #ifndef VPIF_H */
+
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
new file mode 100644
index 000000000..a96f53ce8
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -0,0 +1,1832 @@
+/*
+ * Copyright (C) 2009 Texas Instruments Inc
+ * Copyright (C) 2014 Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * TODO : add support for VBI & HBI data service
+ * add static buffer allocation
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/i2c/tvp514x.h>
+#include <media/v4l2-mediabus.h>
+
+#include <linux/videodev2.h>
+
+#include "vpif.h"
+#include "vpif_capture.h"
+
+MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_CAPTURE_VERSION);
+
+#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
+#define vpif_dbg(level, debug, fmt, arg...) \
+ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg)
+
+static int debug = 1;
+
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+#define VPIF_DRIVER_NAME "vpif_capture"
+MODULE_ALIAS("platform:" VPIF_DRIVER_NAME);
+
+/* global variables */
+static struct vpif_device vpif_obj = { {NULL} };
+static struct device *vpif_dev;
+static void vpif_calculate_offsets(struct channel_obj *ch);
+static void vpif_config_addr(struct channel_obj *ch, int muxmode);
+
+static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} };
+
+/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */
+static int ycmux_mode;
+
+static inline
+struct vpif_cap_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct vpif_cap_buffer, vb);
+}
+
+/**
+ * vpif_buffer_prepare : callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into physical address
+ */
+static int vpif_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *q = vb->vb2_queue;
+ struct channel_obj *ch = vb2_get_drv_priv(q);
+ struct common_obj *common;
+ unsigned long addr;
+
+ vpif_dbg(2, debug, "vpif_buffer_prepare\n");
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage);
+ if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ vbuf->field = common->fmt.fmt.pix.field;
+
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (!IS_ALIGNED((addr + common->ytop_off), 8) ||
+ !IS_ALIGNED((addr + common->ybtm_off), 8) ||
+ !IS_ALIGNED((addr + common->ctop_off), 8) ||
+ !IS_ALIGNED((addr + common->cbtm_off), 8)) {
+ vpif_dbg(1, debug, "offset is not aligned\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpif_buffer_queue_setup : Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes: contains the size (in bytes) of each plane.
+ * @alloc_devs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer count and buffer size
+ */
+static int vpif_buffer_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ unsigned size = common->fmt.fmt.pix.sizeimage;
+
+ vpif_dbg(2, debug, "vpif_buffer_setup\n");
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ /* Calculate the offset for Y and C data in the buffer */
+ vpif_calculate_offsets(ch);
+
+ return 0;
+}
+
+/**
+ * vpif_buffer_queue : Callback function to add buffer to DMA queue
+ * @vb: ptr to vb2_buffer
+ */
+static void vpif_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpif_cap_buffer *buf = to_vpif_buffer(vbuf);
+ struct common_obj *common;
+ unsigned long flags;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ vpif_dbg(2, debug, "vpif_buffer_queue\n");
+
+ spin_lock_irqsave(&common->irqlock, flags);
+ /* add the buffer to the DMA queue */
+ list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+/**
+ * vpif_start_streaming : Starts the DMA engine for streaming
+ * @vq: ptr to vb2_buffer
+ * @count: number of buffers
+ */
+static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpif_capture_config *vpif_config_data =
+ vpif_dev->platform_data;
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpif = &ch->vpifparams;
+ struct vpif_cap_buffer *buf, *tmp;
+ unsigned long addr, flags;
+ int ret;
+
+ /* Initialize field_id */
+ ch->field_id = 0;
+
+ /* configure 1 or 2 channel mode */
+ if (vpif_config_data->setup_input_channel_mode) {
+ ret = vpif_config_data->
+ setup_input_channel_mode(vpif->std_info.ycmux_mode);
+ if (ret < 0) {
+ vpif_dbg(1, debug, "can't set vpif channel mode\n");
+ goto err;
+ }
+ }
+
+ ret = v4l2_subdev_call(ch->sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) {
+ vpif_dbg(1, debug, "stream on failed in subdev\n");
+ goto err;
+ }
+
+ /* Call vpif_set_params function to set the parameters and addresses */
+ ret = vpif_set_video_params(vpif, ch->channel_id);
+ if (ret < 0) {
+ vpif_dbg(1, debug, "can't set video params\n");
+ goto err;
+ }
+
+ ycmux_mode = ret;
+ vpif_config_addr(ch, ret);
+
+ /* Get the next frame from the buffer queue */
+ spin_lock_irqsave(&common->irqlock, flags);
+ common->cur_frm = common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_cap_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+
+ addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0);
+
+ common->set_addr(addr + common->ytop_off,
+ addr + common->ybtm_off,
+ addr + common->ctop_off,
+ addr + common->cbtm_off);
+
+ /**
+ * Set interrupt for both the fields in VPIF Register enable channel in
+ * VPIF register
+ */
+ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
+ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+ channel0_intr_assert();
+ channel0_intr_enable(1);
+ enable_channel0(1);
+ }
+ if (VPIF_CHANNEL1_VIDEO == ch->channel_id ||
+ ycmux_mode == 2) {
+ channel1_intr_assert();
+ channel1_intr_enable(1);
+ enable_channel1(1);
+ }
+
+ return 0;
+
+err:
+ spin_lock_irqsave(&common->irqlock, flags);
+ list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&common->irqlock, flags);
+
+ return ret;
+}
+
+/**
+ * vpif_stop_streaming : Stop the DMA engine
+ * @vq: ptr to vb2_queue
+ *
+ * This callback stops the DMA engine and any remaining buffers
+ * in the DMA queue are released.
+ */
+static void vpif_stop_streaming(struct vb2_queue *vq)
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common;
+ unsigned long flags;
+ int ret;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* Disable channel as per its device type and channel id */
+ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+ enable_channel0(0);
+ channel0_intr_enable(0);
+ }
+ if (VPIF_CHANNEL1_VIDEO == ch->channel_id ||
+ ycmux_mode == 2) {
+ enable_channel1(0);
+ channel1_intr_enable(0);
+ }
+
+ ycmux_mode = 0;
+
+ ret = v4l2_subdev_call(ch->sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ vpif_dbg(1, debug, "stream off failed in subdev\n");
+
+ /* release all active buffers */
+ if (common->cur_frm == common->next_frm) {
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ } else {
+ if (common->cur_frm)
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ if (common->next_frm)
+ vb2_buffer_done(&common->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+
+ spin_lock_irqsave(&common->irqlock, flags);
+ while (!list_empty(&common->dma_queue)) {
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_cap_buffer, list);
+ list_del(&common->next_frm->list);
+ vb2_buffer_done(&common->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+static const struct vb2_ops video_qops = {
+ .queue_setup = vpif_buffer_queue_setup,
+ .buf_prepare = vpif_buffer_prepare,
+ .start_streaming = vpif_start_streaming,
+ .stop_streaming = vpif_stop_streaming,
+ .buf_queue = vpif_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/**
+ * vpif_process_buffer_complete: process a completed buffer
+ * @common: ptr to common channel object
+ *
+ * This function time stamp the buffer and mark it as DONE. It also
+ * wake up any process waiting on the QUEUE and set the next buffer
+ * as current
+ */
+static void vpif_process_buffer_complete(struct common_obj *common)
+{
+ common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ /* Make curFrm pointing to nextFrm */
+ common->cur_frm = common->next_frm;
+}
+
+/**
+ * vpif_schedule_next_buffer: set next buffer address for capture
+ * @common : ptr to common channel object
+ *
+ * This function will get next buffer from the dma queue and
+ * set the buffer address in the vpif register for capture.
+ * the buffer is marked active
+ */
+static void vpif_schedule_next_buffer(struct common_obj *common)
+{
+ unsigned long addr = 0;
+
+ spin_lock(&common->irqlock);
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_cap_buffer, list);
+ /* Remove that buffer from the buffer queue */
+ list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
+ addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0);
+
+ /* Set top and bottom field addresses in VPIF registers */
+ common->set_addr(addr + common->ytop_off,
+ addr + common->ybtm_off,
+ addr + common->ctop_off,
+ addr + common->cbtm_off);
+}
+
+/**
+ * vpif_channel_isr : ISR handler for vpif capture
+ * @irq: irq number
+ * @dev_id: dev_id ptr
+ *
+ * It changes status of the captured buffer, takes next buffer from the queue
+ * and sets its address in VPIF registers
+ */
+static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
+{
+ struct vpif_device *dev = &vpif_obj;
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int channel_id;
+ int fid = -1, i;
+
+ channel_id = *(int *)(dev_id);
+ if (!vpif_intr_status(channel_id))
+ return IRQ_NONE;
+
+ ch = dev->dev[channel_id];
+
+ for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) {
+ common = &ch->common[i];
+ /* skip If streaming is not started in this channel */
+ /* Check the field format */
+ if (1 == ch->vpifparams.std_info.frm_fmt ||
+ common->fmt.fmt.pix.field == V4L2_FIELD_NONE) {
+ /* Progressive mode */
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
+ continue;
+ }
+ spin_unlock(&common->irqlock);
+
+ if (!channel_first_int[i][channel_id])
+ vpif_process_buffer_complete(common);
+
+ channel_first_int[i][channel_id] = 0;
+
+ vpif_schedule_next_buffer(common);
+
+
+ channel_first_int[i][channel_id] = 0;
+ } else {
+ /**
+ * Interlaced mode. If it is first interrupt, ignore
+ * it
+ */
+ if (channel_first_int[i][channel_id]) {
+ channel_first_int[i][channel_id] = 0;
+ continue;
+ }
+ if (0 == i) {
+ ch->field_id ^= 1;
+ /* Get field id from VPIF registers */
+ fid = vpif_channel_getfid(ch->channel_id);
+ if (fid != ch->field_id) {
+ /**
+ * If field id does not match stored
+ * field id, make them in sync
+ */
+ if (0 == fid)
+ ch->field_id = fid;
+ return IRQ_HANDLED;
+ }
+ }
+ /* device field id and local field id are in sync */
+ if (0 == fid) {
+ /* this is even field */
+ if (common->cur_frm == common->next_frm)
+ continue;
+
+ /* mark the current buffer as done */
+ vpif_process_buffer_complete(common);
+ } else if (1 == fid) {
+ /* odd field */
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue) ||
+ (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
+ continue;
+ }
+ spin_unlock(&common->irqlock);
+
+ vpif_schedule_next_buffer(common);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * vpif_update_std_info() - update standard related info
+ * @ch: ptr to channel object
+ *
+ * For a given standard selected by application, update values
+ * in the device data structures
+ */
+static int vpif_update_std_info(struct channel_obj *ch)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ const struct vpif_channel_config_params *config;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct video_obj *vid_ch = &ch->video;
+ int index;
+ struct v4l2_pix_format *pixfmt = &common->fmt.fmt.pix;
+
+ vpif_dbg(2, debug, "vpif_update_std_info\n");
+
+ /*
+ * if called after try_fmt or g_fmt, there will already be a size
+ * so use that by default.
+ */
+ if (pixfmt->width && pixfmt->height) {
+ if (pixfmt->field == V4L2_FIELD_ANY ||
+ pixfmt->field == V4L2_FIELD_NONE)
+ pixfmt->field = V4L2_FIELD_NONE;
+
+ vpifparams->iface.if_type = VPIF_IF_BT656;
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10 ||
+ pixfmt->pixelformat == V4L2_PIX_FMT_SBGGR8)
+ vpifparams->iface.if_type = VPIF_IF_RAW_BAYER;
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10)
+ vpifparams->params.data_sz = 1; /* 10 bits/pixel. */
+
+ /*
+ * For raw formats from camera sensors, we don't need
+ * the std_info from table lookup, so nothing else to do here.
+ */
+ if (vpifparams->iface.if_type == VPIF_IF_RAW_BAYER) {
+ memset(std_info, 0, sizeof(struct vpif_channel_config_params));
+ vpifparams->std_info.capture_format = 1; /* CCD/raw mode */
+ return 0;
+ }
+ }
+
+ for (index = 0; index < vpif_ch_params_count; index++) {
+ config = &vpif_ch_params[index];
+ if (config->hd_sd == 0) {
+ vpif_dbg(2, debug, "SD format\n");
+ if (config->stdid & vid_ch->stdid) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ } else {
+ vpif_dbg(2, debug, "HD format\n");
+ if (!memcmp(&config->dv_timings, &vid_ch->dv_timings,
+ sizeof(vid_ch->dv_timings))) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ }
+ }
+
+ /* standard not found */
+ if (index == vpif_ch_params_count)
+ return -EINVAL;
+
+ common->fmt.fmt.pix.width = std_info->width;
+ common->width = std_info->width;
+ common->fmt.fmt.pix.height = std_info->height;
+ common->height = std_info->height;
+ common->fmt.fmt.pix.sizeimage = common->height * common->width * 2;
+ common->fmt.fmt.pix.bytesperline = std_info->width;
+ vpifparams->video_params.hpitch = std_info->width;
+ vpifparams->video_params.storage_mode = std_info->frm_fmt;
+
+ if (vid_ch->stdid)
+ common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+ if (ch->vpifparams.std_info.frm_fmt)
+ common->fmt.fmt.pix.field = V4L2_FIELD_NONE;
+ else
+ common->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER)
+ common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;
+ else
+ common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV16;
+
+ common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ return 0;
+}
+
+/**
+ * vpif_calculate_offsets : This function calculates buffers offsets
+ * @ch : ptr to channel object
+ *
+ * This function calculates buffer offsets for Y and C in the top and
+ * bottom field
+ */
+static void vpif_calculate_offsets(struct channel_obj *ch)
+{
+ unsigned int hpitch, sizeimage;
+ struct video_obj *vid_ch = &(ch->video);
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ enum v4l2_field field = common->fmt.fmt.pix.field;
+
+ vpif_dbg(2, debug, "vpif_calculate_offsets\n");
+
+ if (V4L2_FIELD_ANY == field) {
+ if (vpifparams->std_info.frm_fmt)
+ vid_ch->buf_field = V4L2_FIELD_NONE;
+ else
+ vid_ch->buf_field = V4L2_FIELD_INTERLACED;
+ } else
+ vid_ch->buf_field = common->fmt.fmt.pix.field;
+
+ sizeimage = common->fmt.fmt.pix.sizeimage;
+
+ hpitch = common->fmt.fmt.pix.bytesperline;
+
+ if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+ (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
+ /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
+ common->ytop_off = 0;
+ common->ybtm_off = hpitch;
+ common->ctop_off = sizeimage / 2;
+ common->cbtm_off = sizeimage / 2 + hpitch;
+ } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) {
+ /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
+ common->ytop_off = 0;
+ common->ybtm_off = sizeimage / 4;
+ common->ctop_off = sizeimage / 2;
+ common->cbtm_off = common->ctop_off + sizeimage / 4;
+ } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) {
+ /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
+ common->ybtm_off = 0;
+ common->ytop_off = sizeimage / 4;
+ common->cbtm_off = sizeimage / 2;
+ common->ctop_off = common->cbtm_off + sizeimage / 4;
+ }
+ if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+ (V4L2_FIELD_INTERLACED == vid_ch->buf_field))
+ vpifparams->video_params.storage_mode = 1;
+ else
+ vpifparams->video_params.storage_mode = 0;
+
+ if (1 == vpifparams->std_info.frm_fmt)
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline;
+ else {
+ if ((field == V4L2_FIELD_ANY)
+ || (field == V4L2_FIELD_INTERLACED))
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline * 2;
+ else
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline;
+ }
+
+ ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid;
+}
+
+/**
+ * vpif_get_default_field() - Get default field type based on interface
+ * @iface: ptr to vpif interface
+ */
+static inline enum v4l2_field vpif_get_default_field(
+ struct vpif_interface *iface)
+{
+ return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE :
+ V4L2_FIELD_INTERLACED;
+}
+
+/**
+ * vpif_config_addr() - function to configure buffer address in vpif
+ * @ch: channel ptr
+ * @muxmode: channel mux mode
+ */
+static void vpif_config_addr(struct channel_obj *ch, int muxmode)
+{
+ struct common_obj *common;
+
+ vpif_dbg(2, debug, "vpif_config_addr\n");
+
+ common = &(ch->common[VPIF_VIDEO_INDEX]);
+
+ if (VPIF_CHANNEL1_VIDEO == ch->channel_id)
+ common->set_addr = ch1_set_videobuf_addr;
+ else if (2 == muxmode)
+ common->set_addr = ch0_set_videobuf_addr_yc_nmux;
+ else
+ common->set_addr = ch0_set_videobuf_addr;
+}
+
+/**
+ * vpif_input_to_subdev() - Maps input to sub device
+ * @vpif_cfg: global config ptr
+ * @chan_cfg: channel config ptr
+ * @input_index: Given input index from application
+ *
+ * lookup the sub device information for a given input index.
+ * we report all the inputs to application. inputs table also
+ * has sub device name for the each input
+ */
+static int vpif_input_to_subdev(
+ struct vpif_capture_config *vpif_cfg,
+ struct vpif_capture_chan_config *chan_cfg,
+ int input_index)
+{
+ struct vpif_subdev_info *subdev_info;
+ const char *subdev_name;
+ int i;
+
+ vpif_dbg(2, debug, "vpif_input_to_subdev\n");
+
+ if (!chan_cfg)
+ return -1;
+ if (input_index >= chan_cfg->input_count)
+ return -1;
+ subdev_name = chan_cfg->inputs[input_index].subdev_name;
+ if (!subdev_name)
+ return -1;
+
+ /* loop through the sub device list to get the sub device info */
+ for (i = 0; i < vpif_cfg->subdev_count; i++) {
+ subdev_info = &vpif_cfg->subdev_info[i];
+ if (subdev_info && !strcmp(subdev_info->name, subdev_name))
+ return i;
+ }
+ return -1;
+}
+
+/**
+ * vpif_set_input() - Select an input
+ * @vpif_cfg: global config ptr
+ * @ch: channel
+ * @index: Given input index from application
+ *
+ * Select the given input.
+ */
+static int vpif_set_input(
+ struct vpif_capture_config *vpif_cfg,
+ struct channel_obj *ch,
+ int index)
+{
+ struct vpif_capture_chan_config *chan_cfg =
+ &vpif_cfg->chan_config[ch->channel_id];
+ struct vpif_subdev_info *subdev_info = NULL;
+ struct v4l2_subdev *sd = NULL;
+ u32 input = 0, output = 0;
+ int sd_index;
+ int ret;
+
+ sd_index = vpif_input_to_subdev(vpif_cfg, chan_cfg, index);
+ if (sd_index >= 0) {
+ sd = vpif_obj.sd[sd_index];
+ subdev_info = &vpif_cfg->subdev_info[sd_index];
+ } else {
+ /* no subdevice, no input to setup */
+ return 0;
+ }
+
+ /* first setup input path from sub device to vpif */
+ if (sd && vpif_cfg->setup_input_path) {
+ ret = vpif_cfg->setup_input_path(ch->channel_id,
+ subdev_info->name);
+ if (ret < 0) {
+ vpif_dbg(1, debug, "couldn't setup input path for the" \
+ " sub device %s, for input index %d\n",
+ subdev_info->name, index);
+ return ret;
+ }
+ }
+
+ if (sd) {
+ input = chan_cfg->inputs[index].input_route;
+ output = chan_cfg->inputs[index].output_route;
+ ret = v4l2_subdev_call(sd, video, s_routing,
+ input, output, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ vpif_dbg(1, debug, "Failed to set input\n");
+ return ret;
+ }
+ }
+ ch->input_idx = index;
+ ch->sd = sd;
+ /* copy interface parameters to vpif */
+ ch->vpifparams.iface = chan_cfg->vpif_if;
+
+ /* update tvnorms from the sub device input info */
+ ch->video_dev.tvnorms = chan_cfg->inputs[index].input.std;
+ return 0;
+}
+
+/**
+ * vpif_querystd() - querystd handler
+ * @file: file ptr
+ * @priv: file handle
+ * @std_id: ptr to std id
+ *
+ * This function is called to detect standard at the selected input
+ */
+static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ int ret;
+
+ vpif_dbg(2, debug, "vpif_querystd\n");
+
+ /* Call querystd function of decoder device */
+ ret = v4l2_subdev_call(ch->sd, video, querystd, std_id);
+
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ return -ENODATA;
+ if (ret) {
+ vpif_dbg(1, debug, "Failed to query standard for sub devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vpif_g_std() - get STD handler
+ * @file: file ptr
+ * @priv: file handle
+ * @std: ptr to std id
+ */
+static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+
+ vpif_dbg(2, debug, "vpif_g_std\n");
+
+ if (!config->chan_config[ch->channel_id].inputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_STD)
+ return -ENODATA;
+
+ *std = ch->video.stdid;
+ return 0;
+}
+
+/**
+ * vpif_s_std() - set STD handler
+ * @file: file ptr
+ * @priv: file handle
+ * @std_id: ptr to std id
+ */
+static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+ int ret;
+
+ vpif_dbg(2, debug, "vpif_s_std\n");
+
+ if (!config->chan_config[ch->channel_id].inputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_STD)
+ return -ENODATA;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ /* Call encoder subdevice function to set the standard */
+ ch->video.stdid = std_id;
+ memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
+
+ /* Get the information about the standard */
+ if (vpif_update_std_info(ch)) {
+ vpif_err("Error getting the standard info\n");
+ return -EINVAL;
+ }
+
+ /* set standard in the sub device */
+ ret = v4l2_subdev_call(ch->sd, video, s_std, std_id);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) {
+ vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * vpif_enum_input() - ENUMINPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @input: ptr to input structure
+ */
+static int vpif_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_capture_chan_config *chan_cfg;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+
+ if (input->index >= chan_cfg->input_count)
+ return -EINVAL;
+
+ memcpy(input, &chan_cfg->inputs[input->index].input,
+ sizeof(*input));
+ return 0;
+}
+
+/**
+ * vpif_g_input() - Get INPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @index: ptr to input index
+ */
+static int vpif_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+
+ *index = ch->input_idx;
+ return 0;
+}
+
+/**
+ * vpif_s_input() - Set INPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @index: input index
+ */
+static int vpif_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_capture_chan_config *chan_cfg;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+
+ if (index >= chan_cfg->input_count)
+ return -EINVAL;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ return vpif_set_input(config, ch, index);
+}
+
+/**
+ * vpif_enum_fmt_vid_cap() - ENUM_FMT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to V4L2 format descriptor
+ */
+static int vpif_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+
+ if (fmt->index != 0) {
+ vpif_dbg(1, debug, "Invalid format index\n");
+ return -EINVAL;
+ }
+
+ /* Fill in the information about format */
+ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) {
+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ strcpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb");
+ fmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
+ } else {
+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ strcpy(fmt->description, "YCbCr4:2:2 Semi-Planar");
+ fmt->pixelformat = V4L2_PIX_FMT_NV16;
+ }
+ return 0;
+}
+
+/**
+ * vpif_try_fmt_vid_cap() - TRY_FMT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to v4l2 format structure
+ */
+static int vpif_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+ struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
+
+ common->fmt = *fmt;
+ vpif_update_std_info(ch);
+
+ pixfmt->field = common->fmt.fmt.pix.field;
+ pixfmt->colorspace = common->fmt.fmt.pix.colorspace;
+ pixfmt->bytesperline = common->fmt.fmt.pix.width;
+ pixfmt->width = common->fmt.fmt.pix.width;
+ pixfmt->height = common->fmt.fmt.pix.height;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2;
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10) {
+ pixfmt->bytesperline = common->fmt.fmt.pix.width * 2;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+ }
+ pixfmt->priv = 0;
+
+ dev_dbg(vpif_dev, "%s: %d x %d; pitch=%d pixelformat=0x%08x, field=%d, size=%d\n", __func__,
+ pixfmt->width, pixfmt->height,
+ pixfmt->bytesperline, pixfmt->pixelformat,
+ pixfmt->field, pixfmt->sizeimage);
+
+ return 0;
+}
+
+
+/**
+ * vpif_g_fmt_vid_cap() - Set INPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to v4l2 format structure
+ */
+static int vpif_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct v4l2_pix_format *pix_fmt = &fmt->fmt.pix;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mbus_fmt = &format.format;
+ int ret;
+
+ /* Check the validity of the buffer type */
+ if (common->fmt.type != fmt->type)
+ return -EINVAL;
+
+ /* By default, use currently set fmt */
+ *fmt = common->fmt;
+
+ /* If subdev has get_fmt, use that to override */
+ ret = v4l2_subdev_call(ch->sd, pad, get_fmt, NULL, &format);
+ if (!ret && mbus_fmt->code) {
+ v4l2_fill_pix_format(pix_fmt, mbus_fmt);
+ pix_fmt->bytesperline = pix_fmt->width;
+ if (mbus_fmt->code == MEDIA_BUS_FMT_SGRBG10_1X10) {
+ /* e.g. mt9v032 */
+ pix_fmt->pixelformat = V4L2_PIX_FMT_SGRBG10;
+ pix_fmt->bytesperline = pix_fmt->width * 2;
+ } else if (mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) {
+ /* e.g. tvp514x */
+ pix_fmt->pixelformat = V4L2_PIX_FMT_NV16;
+ pix_fmt->bytesperline = pix_fmt->width * 2;
+ } else {
+ dev_warn(vpif_dev, "%s: Unhandled media-bus format 0x%x\n",
+ __func__, mbus_fmt->code);
+ }
+ pix_fmt->sizeimage = pix_fmt->bytesperline * pix_fmt->height;
+ dev_dbg(vpif_dev, "%s: %d x %d; pitch=%d, pixelformat=0x%08x, code=0x%x, field=%d, size=%d\n", __func__,
+ pix_fmt->width, pix_fmt->height,
+ pix_fmt->bytesperline, pix_fmt->pixelformat,
+ mbus_fmt->code, pix_fmt->field, pix_fmt->sizeimage);
+
+ common->fmt = *fmt;
+ vpif_update_std_info(ch);
+ }
+
+ return 0;
+}
+
+/**
+ * vpif_s_fmt_vid_cap() - Set FMT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to v4l2 format structure
+ */
+static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ int ret;
+
+ vpif_dbg(2, debug, "%s\n", __func__);
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ ret = vpif_try_fmt_vid_cap(file, priv, fmt);
+ if (ret)
+ return ret;
+
+ /* store the format in the channel object */
+ common->fmt = *fmt;
+ return 0;
+}
+
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
+static int vpif_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ strlcpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vpif_dev));
+ strlcpy(cap->card, config->card_name, sizeof(cap->card));
+
+ return 0;
+}
+
+/**
+ * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: input timings
+ */
+static int
+vpif_enum_dv_timings(struct file *file, void *priv,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+ int ret;
+
+ if (!config->chan_config[ch->channel_id].inputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ timings->pad = 0;
+
+ ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ return -EINVAL;
+
+ return ret;
+}
+
+/**
+ * vpif_query_dv_timings() - QUERY_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: input timings
+ */
+static int
+vpif_query_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+ int ret;
+
+ if (!config->chan_config[ch->channel_id].inputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ return -ENODATA;
+
+ return ret;
+}
+
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt;
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+ int ret;
+
+ if (!config->chan_config[ch->channel_id].inputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ if (timings->type != V4L2_DV_BT_656_1120) {
+ vpif_dbg(2, debug, "Timing type not defined\n");
+ return -EINVAL;
+ }
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ /* Configure subdevice timings, if any */
+ ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ ret = 0;
+ if (ret < 0) {
+ vpif_dbg(2, debug, "Error setting custom DV timings\n");
+ return ret;
+ }
+
+ if (!(timings->bt.width && timings->bt.height &&
+ (timings->bt.hbackporch ||
+ timings->bt.hfrontporch ||
+ timings->bt.hsync) &&
+ timings->bt.vfrontporch &&
+ (timings->bt.vbackporch ||
+ timings->bt.vsync))) {
+ vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n");
+ return -EINVAL;
+ }
+
+ vid_ch->dv_timings = *timings;
+
+ /* Configure video port timings */
+
+ std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8;
+ std_info->sav2eav = bt->width;
+
+ std_info->l1 = 1;
+ std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+ std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ if (bt->interlaced) {
+ if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+ std_info->l5 = std_info->vsize/2 -
+ (bt->vfrontporch - 1);
+ std_info->l7 = std_info->vsize/2 + 1;
+ std_info->l9 = std_info->l7 + bt->il_vsync +
+ bt->il_vbackporch + 1;
+ std_info->l11 = std_info->vsize -
+ (bt->il_vfrontporch - 1);
+ } else {
+ vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n");
+ return -EINVAL;
+ }
+ } else {
+ std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+ }
+ strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
+ std_info->width = bt->width;
+ std_info->height = bt->height;
+ std_info->frm_fmt = bt->interlaced ? 0 : 1;
+ std_info->ycmux_mode = 0;
+ std_info->capture_format = 0;
+ std_info->vbi_supported = 0;
+ std_info->hd_sd = 1;
+ std_info->stdid = 0;
+
+ vid_ch->stdid = 0;
+ return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_capture_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct video_obj *vid_ch = &ch->video;
+ struct vpif_capture_chan_config *chan_cfg;
+ struct v4l2_input input;
+
+ if (!config->chan_config[ch->channel_id].inputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ input = chan_cfg->inputs[ch->input_idx].input;
+ if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ *timings = vid_ch->dv_timings;
+
+ return 0;
+}
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+ /* status for sub devices */
+ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
+/* vpif capture ioctl operations */
+static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
+ .vidioc_querycap = vpif_querycap,
+ .vidioc_enum_fmt_vid_cap = vpif_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vpif_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vpif_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vpif_try_fmt_vid_cap,
+
+ .vidioc_enum_input = vpif_enum_input,
+ .vidioc_s_input = vpif_s_input,
+ .vidioc_g_input = vpif_g_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_querystd = vpif_querystd,
+ .vidioc_s_std = vpif_s_std,
+ .vidioc_g_std = vpif_g_std,
+
+ .vidioc_enum_dv_timings = vpif_enum_dv_timings,
+ .vidioc_query_dv_timings = vpif_query_dv_timings,
+ .vidioc_s_dv_timings = vpif_s_dv_timings,
+ .vidioc_g_dv_timings = vpif_g_dv_timings,
+
+ .vidioc_log_status = vpif_log_status,
+};
+
+/* vpif file operations */
+static const struct v4l2_file_operations vpif_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll
+};
+
+/**
+ * initialize_vpif() - Initialize vpif data structures
+ *
+ * Allocate memory for data structures and initialize them
+ */
+static int initialize_vpif(void)
+{
+ int err, i, j;
+ int free_channel_objects_index;
+
+ /* Allocate memory for six channel objects */
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ vpif_obj.dev[i] =
+ kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL);
+ /* If memory allocation fails, return error */
+ if (!vpif_obj.dev[i]) {
+ free_channel_objects_index = i;
+ err = -ENOMEM;
+ goto vpif_init_free_channel_objects;
+ }
+ }
+ return 0;
+
+vpif_init_free_channel_objects:
+ for (j = 0; j < free_channel_objects_index; j++)
+ kfree(vpif_obj.dev[j]);
+ return err;
+}
+
+static int vpif_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ int i;
+
+ for (i = 0; i < vpif_obj.config->asd_sizes[0]; i++) {
+ struct v4l2_async_subdev *_asd = vpif_obj.config->asd[i];
+ const struct fwnode_handle *fwnode = _asd->match.fwnode;
+
+ if (fwnode == subdev->fwnode) {
+ vpif_obj.sd[i] = subdev;
+ vpif_obj.config->chan_config->inputs[i].subdev_name =
+ (char *)to_of_node(subdev->fwnode)->full_name;
+ vpif_dbg(2, debug,
+ "%s: setting input %d subdev_name = %s\n",
+ __func__, i,
+ vpif_obj.config->chan_config->inputs[i].subdev_name);
+ return 0;
+ }
+ }
+
+ for (i = 0; i < vpif_obj.config->subdev_count; i++)
+ if (!strcmp(vpif_obj.config->subdev_info[i].name,
+ subdev->name)) {
+ vpif_obj.sd[i] = subdev;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vpif_probe_complete(void)
+{
+ struct common_obj *common;
+ struct video_device *vdev;
+ struct channel_obj *ch;
+ struct vb2_queue *q;
+ int j, err, k;
+
+ for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
+ ch = vpif_obj.dev[j];
+ ch->channel_id = j;
+ common = &(ch->common[VPIF_VIDEO_INDEX]);
+ spin_lock_init(&common->irqlock);
+ mutex_init(&common->lock);
+
+ /* select input 0 */
+ err = vpif_set_input(vpif_obj.config, ch, 0);
+ if (err)
+ goto probe_out;
+
+ /* set initial format */
+ ch->video.stdid = V4L2_STD_525_60;
+ memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
+ common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpif_update_std_info(ch);
+
+ /* Initialize vb2 queue */
+ q = &common->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = ch;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpif_cap_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &common->lock;
+ q->dev = vpif_dev;
+
+ err = vb2_queue_init(q);
+ if (err) {
+ vpif_err("vpif_capture: vb2_queue_init() failed\n");
+ goto probe_out;
+ }
+
+ INIT_LIST_HEAD(&common->dma_queue);
+
+ /* Initialize the video_device structure */
+ vdev = &ch->video_dev;
+ strlcpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name));
+ vdev->release = video_device_release_empty;
+ vdev->fops = &vpif_fops;
+ vdev->ioctl_ops = &vpif_ioctl_ops;
+ vdev->v4l2_dev = &vpif_obj.v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = q;
+ vdev->lock = &common->lock;
+ video_set_drvdata(&ch->video_dev, ch);
+ err = video_register_device(vdev,
+ VFL_TYPE_GRABBER, (j ? 1 : 0));
+ if (err)
+ goto probe_out;
+ }
+
+ v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
+ return 0;
+
+probe_out:
+ for (k = 0; k < j; k++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[k];
+ common = &ch->common[k];
+ /* Unregister video device */
+ video_unregister_device(&ch->video_dev);
+ }
+ kfree(vpif_obj.sd);
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ return err;
+}
+
+static int vpif_async_complete(struct v4l2_async_notifier *notifier)
+{
+ return vpif_probe_complete();
+}
+
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
+static struct vpif_capture_config *
+vpif_capture_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *endpoint = NULL;
+ struct v4l2_fwnode_endpoint bus_cfg;
+ struct vpif_capture_config *pdata;
+ struct vpif_subdev_info *sdinfo;
+ struct vpif_capture_chan_config *chan;
+ unsigned int i;
+
+ /*
+ * DT boot: OF node from parent device contains
+ * video ports & endpoints data.
+ */
+ if (pdev->dev.parent && pdev->dev.parent->of_node)
+ pdev->dev.of_node = pdev->dev.parent->of_node;
+ if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+ return pdev->dev.platform_data;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+ pdata->subdev_info =
+ devm_kcalloc(&pdev->dev,
+ VPIF_CAPTURE_NUM_CHANNELS,
+ sizeof(*pdata->subdev_info),
+ GFP_KERNEL);
+
+ if (!pdata->subdev_info)
+ return NULL;
+
+ for (i = 0; i < VPIF_CAPTURE_NUM_CHANNELS; i++) {
+ struct device_node *rem;
+ unsigned int flags;
+ int err;
+
+ endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
+ endpoint);
+ if (!endpoint)
+ break;
+
+ sdinfo = &pdata->subdev_info[i];
+ chan = &pdata->chan_config[i];
+ chan->inputs = devm_kcalloc(&pdev->dev,
+ VPIF_CAPTURE_NUM_CHANNELS,
+ sizeof(*chan->inputs),
+ GFP_KERNEL);
+ if (!chan->inputs)
+ return NULL;
+
+ chan->input_count++;
+ chan->inputs[i].input.type = V4L2_INPUT_TYPE_CAMERA;
+ chan->inputs[i].input.std = V4L2_STD_ALL;
+ chan->inputs[i].input.capabilities = V4L2_IN_CAP_STD;
+
+ err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &bus_cfg);
+ if (err) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ goto done;
+ }
+ dev_dbg(&pdev->dev, "Endpoint %pOF, bus_width = %d\n",
+ endpoint, bus_cfg.bus.parallel.bus_width);
+ flags = bus_cfg.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ chan->vpif_if.hd_pol = 1;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ chan->vpif_if.vd_pol = 1;
+
+ rem = of_graph_get_remote_port_parent(endpoint);
+ if (!rem) {
+ dev_dbg(&pdev->dev, "Remote device at %pOF not found\n",
+ endpoint);
+ goto done;
+ }
+
+ dev_dbg(&pdev->dev, "Remote device %s, %pOF found\n",
+ rem->name, rem);
+ sdinfo->name = rem->full_name;
+
+ pdata->asd[i] = devm_kzalloc(&pdev->dev,
+ sizeof(struct v4l2_async_subdev),
+ GFP_KERNEL);
+ if (!pdata->asd[i]) {
+ of_node_put(rem);
+ pdata = NULL;
+ goto done;
+ }
+
+ pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ pdata->asd[i]->match.fwnode = of_fwnode_handle(rem);
+ of_node_put(rem);
+ }
+
+done:
+ if (pdata) {
+ pdata->asd_sizes[0] = i;
+ pdata->subdev_count = i;
+ pdata->card_name = "DA850/OMAP-L138 Video Capture";
+ }
+
+ return pdata;
+}
+
+/**
+ * vpif_probe : This function probes the vpif capture driver
+ * @pdev: platform device pointer
+ *
+ * This creates device entries by register itself to the V4L2 driver and
+ * initializes fields of each channel objects
+ */
+static __init int vpif_probe(struct platform_device *pdev)
+{
+ struct vpif_subdev_info *subdevdata;
+ struct i2c_adapter *i2c_adap;
+ struct resource *res;
+ int subdev_count;
+ int res_idx = 0;
+ int i, err;
+
+ pdev->dev.platform_data = vpif_capture_get_pdata(pdev);
+ if (!pdev->dev.platform_data) {
+ dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
+ return -EINVAL;
+ }
+
+ if (!pdev->dev.platform_data) {
+ dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
+ return -EINVAL;
+ }
+
+ vpif_dev = &pdev->dev;
+
+ err = initialize_vpif();
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error initializing vpif\n");
+ return err;
+ }
+
+ err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
+ return err;
+ }
+
+ while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
+ err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr,
+ IRQF_SHARED, VPIF_DRIVER_NAME,
+ (void *)(&vpif_obj.dev[res_idx]->
+ channel_id));
+ if (err) {
+ err = -EINVAL;
+ goto vpif_unregister;
+ }
+ res_idx++;
+ }
+
+ vpif_obj.config = pdev->dev.platform_data;
+
+ subdev_count = vpif_obj.config->subdev_count;
+ vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
+ if (!vpif_obj.sd) {
+ err = -ENOMEM;
+ goto vpif_unregister;
+ }
+
+ if (!vpif_obj.config->asd_sizes[0]) {
+ int i2c_id = vpif_obj.config->i2c_adapter_id;
+
+ i2c_adap = i2c_get_adapter(i2c_id);
+ WARN_ON(!i2c_adap);
+ for (i = 0; i < subdev_count; i++) {
+ subdevdata = &vpif_obj.config->subdev_info[i];
+ vpif_obj.sd[i] =
+ v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+ i2c_adap,
+ &subdevdata->
+ board_info,
+ NULL);
+
+ if (!vpif_obj.sd[i]) {
+ vpif_err("Error registering v4l2 subdevice\n");
+ err = -ENODEV;
+ goto probe_subdev_out;
+ }
+ v4l2_info(&vpif_obj.v4l2_dev,
+ "registered sub device %s\n",
+ subdevdata->name);
+ }
+ vpif_probe_complete();
+ } else {
+ vpif_obj.notifier.subdevs = vpif_obj.config->asd;
+ vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
+ vpif_obj.notifier.ops = &vpif_async_ops;
+ err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
+ &vpif_obj.notifier);
+ if (err) {
+ vpif_err("Error registering async notifier\n");
+ err = -EINVAL;
+ goto probe_subdev_out;
+ }
+ }
+
+ return 0;
+
+probe_subdev_out:
+ /* free sub devices memory */
+ kfree(vpif_obj.sd);
+vpif_unregister:
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ return err;
+}
+
+/**
+ * vpif_remove() - driver remove handler
+ * @device: ptr to platform device structure
+ *
+ * The vidoe device is unregistered
+ */
+static int vpif_remove(struct platform_device *device)
+{
+ struct channel_obj *ch;
+ int i;
+
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ kfree(vpif_obj.sd);
+ /* un-register device */
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ /* Unregister video device */
+ video_unregister_device(&ch->video_dev);
+ kfree(vpif_obj.dev[i]);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * vpif_suspend: vpif device suspend
+ * @dev: pointer to &struct device
+ */
+static int vpif_suspend(struct device *dev)
+{
+
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (!vb2_start_streaming_called(&common->buffer_queue))
+ continue;
+
+ mutex_lock(&common->lock);
+ /* Disable channel */
+ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
+ enable_channel0(0);
+ channel0_intr_enable(0);
+ }
+ if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
+ ycmux_mode == 2) {
+ enable_channel1(0);
+ channel1_intr_enable(0);
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+
+/*
+ * vpif_resume: vpif device suspend
+ */
+static int vpif_resume(struct device *dev)
+{
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (!vb2_start_streaming_called(&common->buffer_queue))
+ continue;
+
+ mutex_lock(&common->lock);
+ /* Enable channel */
+ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
+ enable_channel0(1);
+ channel0_intr_enable(1);
+ }
+ if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
+ ycmux_mode == 2) {
+ enable_channel1(1);
+ channel1_intr_enable(1);
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume);
+
+static __refdata struct platform_driver vpif_driver = {
+ .driver = {
+ .name = VPIF_DRIVER_NAME,
+ .pm = &vpif_pm_ops,
+ },
+ .probe = vpif_probe,
+ .remove = vpif_remove,
+};
+
+module_platform_driver(vpif_driver);
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
new file mode 100644
index 000000000..cf494a596
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef VPIF_CAPTURE_H
+#define VPIF_CAPTURE_H
+
+/* Header files */
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
+
+#include "vpif.h"
+
+/* Macros */
+#define VPIF_CAPTURE_VERSION "0.0.2"
+
+#define VPIF_VALID_FIELD(field) (((V4L2_FIELD_ANY == field) || \
+ (V4L2_FIELD_NONE == field)) || \
+ (((V4L2_FIELD_INTERLACED == field) || \
+ (V4L2_FIELD_SEQ_TB == field)) || \
+ (V4L2_FIELD_SEQ_BT == field)))
+
+#define VPIF_CAPTURE_MAX_DEVICES 2
+#define VPIF_VIDEO_INDEX 0
+#define VPIF_NUMBER_OF_OBJECTS 1
+
+/* Enumerated data type to give id to each device per channel */
+enum vpif_channel_id {
+ VPIF_CHANNEL0_VIDEO = 0,
+ VPIF_CHANNEL1_VIDEO,
+};
+
+struct video_obj {
+ enum v4l2_field buf_field;
+ /* Currently selected or default standard */
+ v4l2_std_id stdid;
+ struct v4l2_dv_timings dv_timings;
+};
+
+struct vpif_cap_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+struct common_obj {
+ /* Pointer pointing to current v4l2_buffer */
+ struct vpif_cap_buffer *cur_frm;
+ /* Pointer pointing to current v4l2_buffer */
+ struct vpif_cap_buffer *next_frm;
+ /* Used to store pixel format */
+ struct v4l2_format fmt;
+ /* Buffer queue used in video-buf */
+ struct vb2_queue buffer_queue;
+ /* Queue of filled frames */
+ struct list_head dma_queue;
+ /* Protects the dma_queue field */
+ spinlock_t irqlock;
+ /* lock used to access this structure */
+ struct mutex lock;
+ /* Function pointer to set the addresses */
+ void (*set_addr) (unsigned long, unsigned long, unsigned long,
+ unsigned long);
+ /* offset where Y top starts from the starting of the buffer */
+ u32 ytop_off;
+ /* offset where Y bottom starts from the starting of the buffer */
+ u32 ybtm_off;
+ /* offset where C top starts from the starting of the buffer */
+ u32 ctop_off;
+ /* offset where C bottom starts from the starting of the buffer */
+ u32 cbtm_off;
+ /* Indicates width of the image data */
+ u32 width;
+ /* Indicates height of the image data */
+ u32 height;
+};
+
+struct channel_obj {
+ /* Identifies video device for this channel */
+ struct video_device video_dev;
+ /* Indicates id of the field which is being displayed */
+ u32 field_id;
+ /* flag to indicate whether decoder is initialized */
+ u8 initialized;
+ /* Identifies channel */
+ enum vpif_channel_id channel_id;
+ /* Current input */
+ u32 input_idx;
+ /* subdev corresponding to the current input, may be NULL */
+ struct v4l2_subdev *sd;
+ /* vpif configuration params */
+ struct vpif_params vpifparams;
+ /* common object array */
+ struct common_obj common[VPIF_NUMBER_OF_OBJECTS];
+ /* video object */
+ struct video_obj video;
+};
+
+struct vpif_device {
+ struct v4l2_device v4l2_dev;
+ struct channel_obj *dev[VPIF_CAPTURE_NUM_CHANNELS];
+ struct v4l2_subdev **sd;
+ struct v4l2_async_notifier notifier;
+ struct vpif_capture_config *config;
+};
+
+#endif /* VPIF_CAPTURE_H */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
new file mode 100644
index 000000000..0f324055c
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -0,0 +1,1449 @@
+/*
+ * vpif-display - VPIF display driver
+ * Display driver for TI DaVinci VPIF
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014 Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ioctl.h>
+
+#include "vpif.h"
+#include "vpif_display.h"
+
+MODULE_DESCRIPTION("TI DaVinci VPIF Display driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_DISPLAY_VERSION);
+
+#define VPIF_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50)
+
+#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
+#define vpif_dbg(level, debug, fmt, arg...) \
+ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg)
+
+static int debug = 1;
+
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+#define VPIF_DRIVER_NAME "vpif_display"
+MODULE_ALIAS("platform:" VPIF_DRIVER_NAME);
+
+/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */
+static int ycmux_mode;
+
+static u8 channel_first_int[VPIF_NUMOBJECTS][2] = { {1, 1} };
+
+static struct vpif_device vpif_obj = { {NULL} };
+static struct device *vpif_dev;
+static void vpif_calculate_offsets(struct channel_obj *ch);
+static void vpif_config_addr(struct channel_obj *ch, int muxmode);
+
+static inline
+struct vpif_disp_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct vpif_disp_buffer, vb);
+}
+
+/**
+ * vpif_buffer_prepare : callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into physical address
+ */
+static int vpif_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
+ struct common_obj *common;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage);
+ if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ vbuf->field = common->fmt.fmt.pix.field;
+
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
+ unsigned long addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (!ISALIGNED(addr + common->ytop_off) ||
+ !ISALIGNED(addr + common->ybtm_off) ||
+ !ISALIGNED(addr + common->ctop_off) ||
+ !ISALIGNED(addr + common->cbtm_off)) {
+ vpif_err("buffer offset not aligned to 8 bytes\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vpif_buffer_queue_setup : Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes: contains the size (in bytes) of each plane.
+ * @alloc_devs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer count and buffer size
+ */
+static int vpif_buffer_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ unsigned size = common->fmt.fmt.pix.sizeimage;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ /* Calculate the offset for Y and C data in the buffer */
+ vpif_calculate_offsets(ch);
+
+ return 0;
+}
+
+/**
+ * vpif_buffer_queue : Callback function to add buffer to DMA queue
+ * @vb: ptr to vb2_buffer
+ *
+ * This callback fucntion queues the buffer to DMA engine
+ */
+static void vpif_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpif_disp_buffer *buf = to_vpif_buffer(vbuf);
+ struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
+ struct common_obj *common;
+ unsigned long flags;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&common->irqlock, flags);
+ list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+/**
+ * vpif_start_streaming : Starts the DMA engine for streaming
+ * @vq: ptr to vb2_buffer
+ * @count: number of buffers
+ */
+static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpif_display_config *vpif_config_data =
+ vpif_dev->platform_data;
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpif = &ch->vpifparams;
+ struct vpif_disp_buffer *buf, *tmp;
+ unsigned long addr, flags;
+ int ret;
+
+ spin_lock_irqsave(&common->irqlock, flags);
+
+ /* Initialize field_id */
+ ch->field_id = 0;
+
+ /* clock settings */
+ if (vpif_config_data->set_clock) {
+ ret = vpif_config_data->set_clock(ch->vpifparams.std_info.
+ ycmux_mode, ch->vpifparams.std_info.hd_sd);
+ if (ret < 0) {
+ vpif_err("can't set clock\n");
+ goto err;
+ }
+ }
+
+ /* set the parameters and addresses */
+ ret = vpif_set_video_params(vpif, ch->channel_id + 2);
+ if (ret < 0)
+ goto err;
+
+ ycmux_mode = ret;
+ vpif_config_addr(ch, ret);
+ /* Get the next frame from the buffer queue */
+ common->next_frm = common->cur_frm =
+ list_entry(common->dma_queue.next,
+ struct vpif_disp_buffer, list);
+
+ list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+
+ addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0);
+ common->set_addr((addr + common->ytop_off),
+ (addr + common->ybtm_off),
+ (addr + common->ctop_off),
+ (addr + common->cbtm_off));
+
+ /*
+ * Set interrupt for both the fields in VPIF
+ * Register enable channel in VPIF register
+ */
+ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
+ if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+ channel2_intr_assert();
+ channel2_intr_enable(1);
+ enable_channel2(1);
+ if (vpif_config_data->chan_config[VPIF_CHANNEL2_VIDEO].clip_en)
+ channel2_clipping_enable(1);
+ }
+
+ if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) {
+ channel3_intr_assert();
+ channel3_intr_enable(1);
+ enable_channel3(1);
+ if (vpif_config_data->chan_config[VPIF_CHANNEL3_VIDEO].clip_en)
+ channel3_clipping_enable(1);
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&common->irqlock, flags);
+
+ return ret;
+}
+
+/**
+ * vpif_stop_streaming : Stop the DMA engine
+ * @vq: ptr to vb2_queue
+ *
+ * This callback stops the DMA engine and any remaining buffers
+ * in the DMA queue are released.
+ */
+static void vpif_stop_streaming(struct vb2_queue *vq)
+{
+ struct channel_obj *ch = vb2_get_drv_priv(vq);
+ struct common_obj *common;
+ unsigned long flags;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* Disable channel */
+ if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+ enable_channel2(0);
+ channel2_intr_enable(0);
+ }
+ if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) {
+ enable_channel3(0);
+ channel3_intr_enable(0);
+ }
+
+ /* release all active buffers */
+ spin_lock_irqsave(&common->irqlock, flags);
+ if (common->cur_frm == common->next_frm) {
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ } else {
+ if (common->cur_frm)
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ if (common->next_frm)
+ vb2_buffer_done(&common->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&common->dma_queue)) {
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_disp_buffer, list);
+ list_del(&common->next_frm->list);
+ vb2_buffer_done(&common->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+static const struct vb2_ops video_qops = {
+ .queue_setup = vpif_buffer_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = vpif_buffer_prepare,
+ .start_streaming = vpif_start_streaming,
+ .stop_streaming = vpif_stop_streaming,
+ .buf_queue = vpif_buffer_queue,
+};
+
+static void process_progressive_mode(struct common_obj *common)
+{
+ unsigned long addr;
+
+ spin_lock(&common->irqlock);
+ /* Get the next buffer from buffer queue */
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_disp_buffer, list);
+ /* Remove that buffer from the buffer queue */
+ list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
+
+ /* Set top and bottom field addrs in VPIF registers */
+ addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0);
+ common->set_addr(addr + common->ytop_off,
+ addr + common->ybtm_off,
+ addr + common->ctop_off,
+ addr + common->cbtm_off);
+}
+
+static void process_interlaced_mode(int fid, struct common_obj *common)
+{
+ /* device field id and local field id are in sync */
+ /* If this is even field */
+ if (0 == fid) {
+ if (common->cur_frm == common->next_frm)
+ return;
+
+ /* one frame is displayed If next frame is
+ * available, release cur_frm and move on */
+ /* Copy frame display time */
+ common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
+ /* Change status of the cur_frm */
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_DONE);
+ /* Make cur_frm pointing to next_frm */
+ common->cur_frm = common->next_frm;
+
+ } else if (1 == fid) { /* odd field */
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)
+ || (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
+ return;
+ }
+ spin_unlock(&common->irqlock);
+ /* one field is displayed configure the next
+ * frame if it is available else hold on current
+ * frame */
+ /* Get next from the buffer queue */
+ process_progressive_mode(common);
+ }
+}
+
+/*
+ * vpif_channel_isr: It changes status of the displayed buffer, takes next
+ * buffer from the queue and sets its address in VPIF registers
+ */
+static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
+{
+ struct vpif_device *dev = &vpif_obj;
+ struct channel_obj *ch;
+ struct common_obj *common;
+ int fid = -1, i;
+ int channel_id;
+
+ channel_id = *(int *)(dev_id);
+ if (!vpif_intr_status(channel_id + 2))
+ return IRQ_NONE;
+
+ ch = dev->dev[channel_id];
+ for (i = 0; i < VPIF_NUMOBJECTS; i++) {
+ common = &ch->common[i];
+ /* If streaming is started in this channel */
+
+ if (1 == ch->vpifparams.std_info.frm_fmt) {
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
+ continue;
+ }
+ spin_unlock(&common->irqlock);
+
+ /* Progressive mode */
+ if (!channel_first_int[i][channel_id]) {
+ /* Mark status of the cur_frm to
+ * done and unlock semaphore on it */
+ common->cur_frm->vb.vb2_buf.timestamp =
+ ktime_get_ns();
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_DONE);
+ /* Make cur_frm pointing to next_frm */
+ common->cur_frm = common->next_frm;
+ }
+
+ channel_first_int[i][channel_id] = 0;
+ process_progressive_mode(common);
+ } else {
+ /* Interlaced mode */
+ /* If it is first interrupt, ignore it */
+
+ if (channel_first_int[i][channel_id]) {
+ channel_first_int[i][channel_id] = 0;
+ continue;
+ }
+
+ if (0 == i) {
+ ch->field_id ^= 1;
+ /* Get field id from VPIF registers */
+ fid = vpif_channel_getfid(ch->channel_id + 2);
+ /* If fid does not match with stored field id */
+ if (fid != ch->field_id) {
+ /* Make them in sync */
+ if (0 == fid)
+ ch->field_id = fid;
+
+ return IRQ_HANDLED;
+ }
+ }
+ process_interlaced_mode(fid, common);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int vpif_update_std_info(struct channel_obj *ch)
+{
+ struct video_obj *vid_ch = &ch->video;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ const struct vpif_channel_config_params *config;
+
+ int i;
+
+ for (i = 0; i < vpif_ch_params_count; i++) {
+ config = &vpif_ch_params[i];
+ if (config->hd_sd == 0) {
+ vpif_dbg(2, debug, "SD format\n");
+ if (config->stdid & vid_ch->stdid) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ }
+ }
+
+ if (i == vpif_ch_params_count) {
+ vpif_dbg(1, debug, "Format not found\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpif_update_resolution(struct channel_obj *ch)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct video_obj *vid_ch = &ch->video;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+
+ if (!vid_ch->stdid && !vid_ch->dv_timings.bt.height)
+ return -EINVAL;
+
+ if (vid_ch->stdid) {
+ if (vpif_update_std_info(ch))
+ return -EINVAL;
+ }
+
+ common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
+ common->fmt.fmt.pix.width = std_info->width;
+ common->fmt.fmt.pix.height = std_info->height;
+ vpif_dbg(1, debug, "Pixel details: Width = %d,Height = %d\n",
+ common->fmt.fmt.pix.width, common->fmt.fmt.pix.height);
+
+ /* Set height and width paramateres */
+ common->height = std_info->height;
+ common->width = std_info->width;
+ common->fmt.fmt.pix.sizeimage = common->height * common->width * 2;
+
+ if (vid_ch->stdid)
+ common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+ if (ch->vpifparams.std_info.frm_fmt)
+ common->fmt.fmt.pix.field = V4L2_FIELD_NONE;
+ else
+ common->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ return 0;
+}
+
+/*
+ * vpif_calculate_offsets: This function calculates buffers offset for Y and C
+ * in the top and bottom field
+ */
+static void vpif_calculate_offsets(struct channel_obj *ch)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ enum v4l2_field field = common->fmt.fmt.pix.field;
+ struct video_obj *vid_ch = &ch->video;
+ unsigned int hpitch, sizeimage;
+
+ if (V4L2_FIELD_ANY == common->fmt.fmt.pix.field) {
+ if (ch->vpifparams.std_info.frm_fmt)
+ vid_ch->buf_field = V4L2_FIELD_NONE;
+ else
+ vid_ch->buf_field = V4L2_FIELD_INTERLACED;
+ } else {
+ vid_ch->buf_field = common->fmt.fmt.pix.field;
+ }
+
+ sizeimage = common->fmt.fmt.pix.sizeimage;
+
+ hpitch = common->fmt.fmt.pix.bytesperline;
+ if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+ (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
+ common->ytop_off = 0;
+ common->ybtm_off = hpitch;
+ common->ctop_off = sizeimage / 2;
+ common->cbtm_off = sizeimage / 2 + hpitch;
+ } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) {
+ common->ytop_off = 0;
+ common->ybtm_off = sizeimage / 4;
+ common->ctop_off = sizeimage / 2;
+ common->cbtm_off = common->ctop_off + sizeimage / 4;
+ } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) {
+ common->ybtm_off = 0;
+ common->ytop_off = sizeimage / 4;
+ common->cbtm_off = sizeimage / 2;
+ common->ctop_off = common->cbtm_off + sizeimage / 4;
+ }
+
+ if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+ (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
+ vpifparams->video_params.storage_mode = 1;
+ } else {
+ vpifparams->video_params.storage_mode = 0;
+ }
+
+ if (ch->vpifparams.std_info.frm_fmt == 1) {
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline;
+ } else {
+ if ((field == V4L2_FIELD_ANY) ||
+ (field == V4L2_FIELD_INTERLACED))
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline * 2;
+ else
+ vpifparams->video_params.hpitch =
+ common->fmt.fmt.pix.bytesperline;
+ }
+
+ ch->vpifparams.video_params.stdid = ch->vpifparams.std_info.stdid;
+}
+
+static void vpif_config_addr(struct channel_obj *ch, int muxmode)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (VPIF_CHANNEL3_VIDEO == ch->channel_id) {
+ common->set_addr = ch3_set_videobuf_addr;
+ } else {
+ if (2 == muxmode)
+ common->set_addr = ch2_set_videobuf_addr_yc_nmux;
+ else
+ common->set_addr = ch2_set_videobuf_addr;
+ }
+}
+
+/* functions implementing ioctls */
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
+static int vpif_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ strlcpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vpif_dev));
+ strlcpy(cap->card, config->card_name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vpif_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index != 0)
+ return -EINVAL;
+
+ /* Fill in the information about format */
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ strcpy(fmt->description, "YCbCr4:2:2 YC Planar");
+ fmt->pixelformat = V4L2_PIX_FMT_YUV422P;
+ fmt->flags = 0;
+ return 0;
+}
+
+static int vpif_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* Check the validity of the buffer type */
+ if (common->fmt.type != fmt->type)
+ return -EINVAL;
+
+ if (vpif_update_resolution(ch))
+ return -EINVAL;
+ *fmt = common->fmt;
+ return 0;
+}
+
+static int vpif_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+ /*
+ * to supress v4l-compliance warnings silently correct
+ * the pixelformat
+ */
+ if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P)
+ pixfmt->pixelformat = common->fmt.fmt.pix.pixelformat;
+
+ if (vpif_update_resolution(ch))
+ return -EINVAL;
+
+ pixfmt->colorspace = common->fmt.fmt.pix.colorspace;
+ pixfmt->field = common->fmt.fmt.pix.field;
+ pixfmt->bytesperline = common->fmt.fmt.pix.width;
+ pixfmt->width = common->fmt.fmt.pix.width;
+ pixfmt->height = common->fmt.fmt.pix.height;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2;
+
+ return 0;
+}
+
+static int vpif_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+ int ret;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ ret = vpif_try_fmt_vid_out(file, priv, fmt);
+ if (ret)
+ return ret;
+
+ /* store the pix format in the channel object */
+ common->fmt.fmt.pix = *pixfmt;
+
+ /* store the format in the channel object */
+ common->fmt = *fmt;
+ return 0;
+}
+
+static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_display_chan_config *chan_cfg;
+ struct v4l2_output output;
+ int ret;
+
+ if (!config->chan_config[ch->channel_id].outputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+ if (output.capabilities != V4L2_OUT_CAP_STD)
+ return -ENODATA;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+
+ if (!(std_id & VPIF_V4L2_STD))
+ return -EINVAL;
+
+ /* Call encoder subdevice function to set the standard */
+ ch->video.stdid = std_id;
+ memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
+ /* Get the information about the standard */
+ if (vpif_update_resolution(ch))
+ return -EINVAL;
+
+ common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width;
+
+ ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
+ s_std_output, std_id);
+ if (ret < 0) {
+ vpif_err("Failed to set output standard\n");
+ return ret;
+ }
+
+ ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
+ s_std, std_id);
+ if (ret < 0)
+ vpif_err("Failed to set standard for sub devices\n");
+ return ret;
+}
+
+static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+ struct v4l2_output output;
+
+ if (!config->chan_config[ch->channel_id].outputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+ if (output.capabilities != V4L2_OUT_CAP_STD)
+ return -ENODATA;
+
+ *std = ch->video.stdid;
+ return 0;
+}
+
+static int vpif_enum_output(struct file *file, void *fh,
+ struct v4l2_output *output)
+{
+
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ if (output->index >= chan_cfg->output_count) {
+ vpif_dbg(1, debug, "Invalid output index\n");
+ return -EINVAL;
+ }
+
+ *output = chan_cfg->outputs[output->index].output;
+ return 0;
+}
+
+/**
+ * vpif_output_to_subdev() - Maps output to sub device
+ * @vpif_cfg: global config ptr
+ * @chan_cfg: channel config ptr
+ * @index: Given output index from application
+ *
+ * lookup the sub device information for a given output index.
+ * we report all the output to application. output table also
+ * has sub device name for the each output
+ */
+static int
+vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
+ struct vpif_display_chan_config *chan_cfg, int index)
+{
+ struct vpif_subdev_info *subdev_info;
+ const char *subdev_name;
+ int i;
+
+ vpif_dbg(2, debug, "vpif_output_to_subdev\n");
+
+ if (!chan_cfg->outputs)
+ return -1;
+
+ subdev_name = chan_cfg->outputs[index].subdev_name;
+ if (!subdev_name)
+ return -1;
+
+ /* loop through the sub device list to get the sub device info */
+ for (i = 0; i < vpif_cfg->subdev_count; i++) {
+ subdev_info = &vpif_cfg->subdevinfo[i];
+ if (!strcmp(subdev_info->name, subdev_name))
+ return i;
+ }
+ return -1;
+}
+
+/**
+ * vpif_set_output() - Select an output
+ * @vpif_cfg: global config ptr
+ * @ch: channel
+ * @index: Given output index from application
+ *
+ * Select the given output.
+ */
+static int vpif_set_output(struct vpif_display_config *vpif_cfg,
+ struct channel_obj *ch, int index)
+{
+ struct vpif_display_chan_config *chan_cfg =
+ &vpif_cfg->chan_config[ch->channel_id];
+ struct v4l2_subdev *sd = NULL;
+ u32 input = 0, output = 0;
+ int sd_index;
+ int ret;
+
+ sd_index = vpif_output_to_subdev(vpif_cfg, chan_cfg, index);
+ if (sd_index >= 0)
+ sd = vpif_obj.sd[sd_index];
+
+ if (sd) {
+ input = chan_cfg->outputs[index].input_route;
+ output = chan_cfg->outputs[index].output_route;
+ ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ vpif_err("Failed to set output\n");
+ return ret;
+ }
+
+ }
+ ch->output_idx = index;
+ ch->sd = sd;
+ if (chan_cfg->outputs)
+ /* update tvnorms from the sub device output info */
+ ch->video_dev.tvnorms = chan_cfg->outputs[index].output.std;
+ return 0;
+}
+
+static int vpif_s_output(struct file *file, void *priv, unsigned int i)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+
+ if (i >= chan_cfg->output_count)
+ return -EINVAL;
+
+ return vpif_set_output(config, ch, i);
+}
+
+static int vpif_g_output(struct file *file, void *priv, unsigned int *i)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+
+ *i = ch->output_idx;
+
+ return 0;
+}
+
+/**
+ * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: input timings
+ */
+static int
+vpif_enum_dv_timings(struct file *file, void *priv,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+ struct v4l2_output output;
+ int ret;
+
+ if (!config->chan_config[ch->channel_id].outputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+ if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ timings->pad = 0;
+
+ ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ return -EINVAL;
+ return ret;
+}
+
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt;
+ struct vpif_display_chan_config *chan_cfg;
+ struct v4l2_output output;
+ int ret;
+
+ if (!config->chan_config[ch->channel_id].outputs)
+ return -ENODATA;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+ if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS)
+ return -ENODATA;
+
+ if (vb2_is_busy(&common->buffer_queue))
+ return -EBUSY;
+
+ if (timings->type != V4L2_DV_BT_656_1120) {
+ vpif_dbg(2, debug, "Timing type not defined\n");
+ return -EINVAL;
+ }
+
+ /* Configure subdevice timings, if any */
+ ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
+ ret = 0;
+ if (ret < 0) {
+ vpif_dbg(2, debug, "Error setting custom DV timings\n");
+ return ret;
+ }
+
+ if (!(timings->bt.width && timings->bt.height &&
+ (timings->bt.hbackporch ||
+ timings->bt.hfrontporch ||
+ timings->bt.hsync) &&
+ timings->bt.vfrontporch &&
+ (timings->bt.vbackporch ||
+ timings->bt.vsync))) {
+ vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n");
+ return -EINVAL;
+ }
+
+ vid_ch->dv_timings = *timings;
+
+ /* Configure video port timings */
+
+ std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8;
+ std_info->sav2eav = bt->width;
+
+ std_info->l1 = 1;
+ std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+ std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ if (bt->interlaced) {
+ if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+ std_info->l5 = std_info->vsize/2 -
+ (bt->vfrontporch - 1);
+ std_info->l7 = std_info->vsize/2 + 1;
+ std_info->l9 = std_info->l7 + bt->il_vsync +
+ bt->il_vbackporch + 1;
+ std_info->l11 = std_info->vsize -
+ (bt->il_vfrontporch - 1);
+ } else {
+ vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n");
+ return -EINVAL;
+ }
+ } else {
+ std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+ }
+ strncpy(std_info->name, "Custom timings BT656/1120",
+ VPIF_MAX_NAME);
+ std_info->width = bt->width;
+ std_info->height = bt->height;
+ std_info->frm_fmt = bt->interlaced ? 0 : 1;
+ std_info->ycmux_mode = 0;
+ std_info->capture_format = 0;
+ std_info->vbi_supported = 0;
+ std_info->hd_sd = 1;
+ std_info->stdid = 0;
+ vid_ch->stdid = 0;
+
+ return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_display_config *config = vpif_dev->platform_data;
+ struct video_device *vdev = video_devdata(file);
+ struct channel_obj *ch = video_get_drvdata(vdev);
+ struct vpif_display_chan_config *chan_cfg;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_output output;
+
+ if (!config->chan_config[ch->channel_id].outputs)
+ goto error;
+
+ chan_cfg = &config->chan_config[ch->channel_id];
+ output = chan_cfg->outputs[ch->output_idx].output;
+
+ if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS)
+ goto error;
+
+ *timings = vid_ch->dv_timings;
+
+ return 0;
+error:
+ return -ENODATA;
+}
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+ /* status for sub devices */
+ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
+/* vpif display ioctl operations */
+static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
+ .vidioc_querycap = vpif_querycap,
+ .vidioc_enum_fmt_vid_out = vpif_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vpif_g_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vpif_s_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vpif_try_fmt_vid_out,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_s_std = vpif_s_std,
+ .vidioc_g_std = vpif_g_std,
+
+ .vidioc_enum_output = vpif_enum_output,
+ .vidioc_s_output = vpif_s_output,
+ .vidioc_g_output = vpif_g_output,
+
+ .vidioc_enum_dv_timings = vpif_enum_dv_timings,
+ .vidioc_s_dv_timings = vpif_s_dv_timings,
+ .vidioc_g_dv_timings = vpif_g_dv_timings,
+
+ .vidioc_log_status = vpif_log_status,
+};
+
+static const struct v4l2_file_operations vpif_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll
+};
+
+/*Configure the channels, buffer sizei, request irq */
+static int initialize_vpif(void)
+{
+ int free_channel_objects_index;
+ int err, i, j;
+
+ /* Allocate memory for six channel objects */
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ vpif_obj.dev[i] =
+ kzalloc(sizeof(struct channel_obj), GFP_KERNEL);
+ /* If memory allocation fails, return error */
+ if (!vpif_obj.dev[i]) {
+ free_channel_objects_index = i;
+ err = -ENOMEM;
+ goto vpif_init_free_channel_objects;
+ }
+ }
+
+ return 0;
+
+vpif_init_free_channel_objects:
+ for (j = 0; j < free_channel_objects_index; j++)
+ kfree(vpif_obj.dev[j]);
+ return err;
+}
+
+static void free_vpif_objs(void)
+{
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
+ kfree(vpif_obj.dev[i]);
+}
+
+static int vpif_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ int i;
+
+ for (i = 0; i < vpif_obj.config->subdev_count; i++)
+ if (!strcmp(vpif_obj.config->subdevinfo[i].name,
+ subdev->name)) {
+ vpif_obj.sd[i] = subdev;
+ vpif_obj.sd[i]->grp_id = 1 << i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vpif_probe_complete(void)
+{
+ struct common_obj *common;
+ struct video_device *vdev;
+ struct channel_obj *ch;
+ struct vb2_queue *q;
+ int j, err, k;
+
+ for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
+ ch = vpif_obj.dev[j];
+ /* Initialize field of the channel objects */
+ for (k = 0; k < VPIF_NUMOBJECTS; k++) {
+ common = &ch->common[k];
+ spin_lock_init(&common->irqlock);
+ mutex_init(&common->lock);
+ common->set_addr = NULL;
+ common->ytop_off = 0;
+ common->ybtm_off = 0;
+ common->ctop_off = 0;
+ common->cbtm_off = 0;
+ common->cur_frm = NULL;
+ common->next_frm = NULL;
+ memset(&common->fmt, 0, sizeof(common->fmt));
+ }
+ ch->initialized = 0;
+ if (vpif_obj.config->subdev_count)
+ ch->sd = vpif_obj.sd[0];
+ ch->channel_id = j;
+
+ memset(&ch->vpifparams, 0, sizeof(ch->vpifparams));
+
+ ch->common[VPIF_VIDEO_INDEX].fmt.type =
+ V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ /* select output 0 */
+ err = vpif_set_output(vpif_obj.config, ch, 0);
+ if (err)
+ goto probe_out;
+
+ /* set initial format */
+ ch->video.stdid = V4L2_STD_525_60;
+ memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
+ vpif_update_resolution(ch);
+
+ /* Initialize vb2 queue */
+ q = &common->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = ch;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpif_disp_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &common->lock;
+ q->dev = vpif_dev;
+ err = vb2_queue_init(q);
+ if (err) {
+ vpif_err("vpif_display: vb2_queue_init() failed\n");
+ goto probe_out;
+ }
+
+ INIT_LIST_HEAD(&common->dma_queue);
+
+ /* register video device */
+ vpif_dbg(1, debug, "channel=%p,channel->video_dev=%p\n",
+ ch, &ch->video_dev);
+
+ /* Initialize the video_device structure */
+ vdev = &ch->video_dev;
+ strlcpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name));
+ vdev->release = video_device_release_empty;
+ vdev->fops = &vpif_fops;
+ vdev->ioctl_ops = &vpif_ioctl_ops;
+ vdev->v4l2_dev = &vpif_obj.v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_TX;
+ vdev->queue = q;
+ vdev->lock = &common->lock;
+ video_set_drvdata(&ch->video_dev, ch);
+ err = video_register_device(vdev, VFL_TYPE_GRABBER,
+ (j ? 3 : 2));
+ if (err < 0)
+ goto probe_out;
+ }
+
+ return 0;
+
+probe_out:
+ for (k = 0; k < j; k++) {
+ ch = vpif_obj.dev[k];
+ common = &ch->common[k];
+ video_unregister_device(&ch->video_dev);
+ }
+ return err;
+}
+
+static int vpif_async_complete(struct v4l2_async_notifier *notifier)
+{
+ return vpif_probe_complete();
+}
+
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
+/*
+ * vpif_probe: This function creates device entries by register itself to the
+ * V4L2 driver and initializes fields of each channel objects
+ */
+static __init int vpif_probe(struct platform_device *pdev)
+{
+ struct vpif_subdev_info *subdevdata;
+ struct i2c_adapter *i2c_adap;
+ struct resource *res;
+ int subdev_count;
+ int res_idx = 0;
+ int i, err;
+
+ if (!pdev->dev.platform_data) {
+ dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
+ return -EINVAL;
+ }
+
+ vpif_dev = &pdev->dev;
+ err = initialize_vpif();
+
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error initializing vpif\n");
+ return err;
+ }
+
+ err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
+ goto vpif_free;
+ }
+
+ while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
+ err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr,
+ IRQF_SHARED, VPIF_DRIVER_NAME,
+ (void *)(&vpif_obj.dev[res_idx]->
+ channel_id));
+ if (err) {
+ err = -EINVAL;
+ vpif_err("VPIF IRQ request failed\n");
+ goto vpif_unregister;
+ }
+ res_idx++;
+ }
+
+ vpif_obj.config = pdev->dev.platform_data;
+ subdev_count = vpif_obj.config->subdev_count;
+ subdevdata = vpif_obj.config->subdevinfo;
+ vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
+ if (!vpif_obj.sd) {
+ err = -ENOMEM;
+ goto vpif_unregister;
+ }
+
+ if (!vpif_obj.config->asd_sizes) {
+ i2c_adap = i2c_get_adapter(vpif_obj.config->i2c_adapter_id);
+ for (i = 0; i < subdev_count; i++) {
+ vpif_obj.sd[i] =
+ v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+ i2c_adap,
+ &subdevdata[i].
+ board_info,
+ NULL);
+ if (!vpif_obj.sd[i]) {
+ vpif_err("Error registering v4l2 subdevice\n");
+ err = -ENODEV;
+ goto probe_subdev_out;
+ }
+
+ if (vpif_obj.sd[i])
+ vpif_obj.sd[i]->grp_id = 1 << i;
+ }
+ err = vpif_probe_complete();
+ if (err) {
+ goto probe_subdev_out;
+ }
+ } else {
+ vpif_obj.notifier.subdevs = vpif_obj.config->asd;
+ vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
+ vpif_obj.notifier.ops = &vpif_async_ops;
+ err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
+ &vpif_obj.notifier);
+ if (err) {
+ vpif_err("Error registering async notifier\n");
+ err = -EINVAL;
+ goto probe_subdev_out;
+ }
+ }
+
+ return 0;
+
+probe_subdev_out:
+ kfree(vpif_obj.sd);
+vpif_unregister:
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+vpif_free:
+ free_vpif_objs();
+
+ return err;
+}
+
+/*
+ * vpif_remove: It un-register channels from V4L2 driver
+ */
+static int vpif_remove(struct platform_device *device)
+{
+ struct channel_obj *ch;
+ int i;
+
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ kfree(vpif_obj.sd);
+ /* un-register device */
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ /* Unregister video device */
+ video_unregister_device(&ch->video_dev);
+ }
+ free_vpif_objs();
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vpif_suspend(struct device *dev)
+{
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (!vb2_start_streaming_called(&common->buffer_queue))
+ continue;
+
+ mutex_lock(&common->lock);
+ /* Disable channel */
+ if (ch->channel_id == VPIF_CHANNEL2_VIDEO) {
+ enable_channel2(0);
+ channel2_intr_enable(0);
+ }
+ if (ch->channel_id == VPIF_CHANNEL3_VIDEO ||
+ ycmux_mode == 2) {
+ enable_channel3(0);
+ channel3_intr_enable(0);
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+
+static int vpif_resume(struct device *dev)
+{
+
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ if (!vb2_start_streaming_called(&common->buffer_queue))
+ continue;
+
+ mutex_lock(&common->lock);
+ /* Enable channel */
+ if (ch->channel_id == VPIF_CHANNEL2_VIDEO) {
+ enable_channel2(1);
+ channel2_intr_enable(1);
+ }
+ if (ch->channel_id == VPIF_CHANNEL3_VIDEO ||
+ ycmux_mode == 2) {
+ enable_channel3(1);
+ channel3_intr_enable(1);
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume);
+
+static __refdata struct platform_driver vpif_driver = {
+ .driver = {
+ .name = VPIF_DRIVER_NAME,
+ .pm = &vpif_pm_ops,
+ },
+ .probe = vpif_probe,
+ .remove = vpif_remove,
+};
+
+module_platform_driver(vpif_driver);
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
new file mode 100644
index 000000000..af2765fdc
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -0,0 +1,125 @@
+/*
+ * VPIF display header file
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef VPIF_DISPLAY_H
+#define VPIF_DISPLAY_H
+
+/* Header files */
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
+
+#include "vpif.h"
+
+/* Macros */
+#define VPIF_DISPLAY_VERSION "0.0.2"
+
+#define VPIF_VALID_FIELD(field) \
+ (((V4L2_FIELD_ANY == field) || (V4L2_FIELD_NONE == field)) || \
+ (((V4L2_FIELD_INTERLACED == field) || (V4L2_FIELD_SEQ_TB == field)) || \
+ (V4L2_FIELD_SEQ_BT == field)))
+
+#define VPIF_DISPLAY_MAX_DEVICES (2)
+#define VPIF_SLICED_BUF_SIZE (256)
+#define VPIF_SLICED_MAX_SERVICES (3)
+#define VPIF_VIDEO_INDEX (0)
+#define VPIF_VBI_INDEX (1)
+#define VPIF_HBI_INDEX (2)
+
+/* Setting it to 1 as HBI/VBI support yet to be added , else 3*/
+#define VPIF_NUMOBJECTS (1)
+
+/* Macros */
+#define ISALIGNED(a) (0 == ((a) & 7))
+
+/* enumerated data types */
+/* Enumerated data type to give id to each device per channel */
+enum vpif_channel_id {
+ VPIF_CHANNEL2_VIDEO = 0, /* Channel2 Video */
+ VPIF_CHANNEL3_VIDEO, /* Channel3 Video */
+};
+
+/* structures */
+
+struct video_obj {
+ enum v4l2_field buf_field;
+ u32 latest_only; /* indicate whether to return
+ * most recent displayed frame only */
+ v4l2_std_id stdid; /* Currently selected or default
+ * standard */
+ struct v4l2_dv_timings dv_timings;
+};
+
+struct vpif_disp_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+struct common_obj {
+ struct vpif_disp_buffer *cur_frm; /* Pointer pointing to current
+ * vb2_buffer */
+ struct vpif_disp_buffer *next_frm; /* Pointer pointing to next
+ * vb2_buffer */
+ struct v4l2_format fmt; /* Used to store the format */
+ struct vb2_queue buffer_queue; /* Buffer queue used in
+ * video-buf */
+
+ struct list_head dma_queue; /* Queue of filled frames */
+ spinlock_t irqlock; /* Used in video-buf */
+
+ /* channel specific parameters */
+ struct mutex lock; /* lock used to access this
+ * structure */
+ u32 ytop_off; /* offset of Y top from the
+ * starting of the buffer */
+ u32 ybtm_off; /* offset of Y bottom from the
+ * starting of the buffer */
+ u32 ctop_off; /* offset of C top from the
+ * starting of the buffer */
+ u32 cbtm_off; /* offset of C bottom from the
+ * starting of the buffer */
+ /* Function pointer to set the addresses */
+ void (*set_addr)(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+ u32 height;
+ u32 width;
+};
+
+struct channel_obj {
+ /* V4l2 specific parameters */
+ struct video_device video_dev; /* Identifies video device for
+ * this channel */
+ u32 field_id; /* Indicates id of the field
+ * which is being displayed */
+ u8 initialized; /* flag to indicate whether
+ * encoder is initialized */
+ u32 output_idx; /* Current output index */
+ struct v4l2_subdev *sd; /* Current output subdev(may be NULL) */
+
+ enum vpif_channel_id channel_id;/* Identifies channel */
+ struct vpif_params vpifparams;
+ struct common_obj common[VPIF_NUMOBJECTS];
+ struct video_obj video;
+};
+
+/* vpif device structure */
+struct vpif_device {
+ struct v4l2_device v4l2_dev;
+ struct channel_obj *dev[VPIF_DISPLAY_NUM_CHANNELS];
+ struct v4l2_subdev **sd;
+ struct v4l2_async_notifier notifier;
+ struct vpif_display_config *config;
+};
+
+#endif /* VPIF_DISPLAY_H */
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
new file mode 100644
index 000000000..50fc71d0c
--- /dev/null
+++ b/drivers/media/platform/davinci/vpss.c
@@ -0,0 +1,544 @@
+/*
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * common vpss system module platform driver for all video drivers.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+
+#include <media/davinci/vpss.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VPSS Driver");
+MODULE_AUTHOR("Texas Instruments");
+
+/* DM644x defines */
+#define DM644X_SBL_PCR_VPSS (4)
+
+#define DM355_VPSSBL_INTSEL 0x10
+#define DM355_VPSSBL_EVTSEL 0x14
+/* vpss BL register offsets */
+#define DM355_VPSSBL_CCDCMUX 0x1c
+/* vpss CLK register offsets */
+#define DM355_VPSSCLK_CLKCTRL 0x04
+/* masks and shifts */
+#define VPSS_HSSISEL_SHIFT 4
+/*
+ * VDINT0 - vpss_int0, VDINT1 - vpss_int1, H3A - vpss_int4,
+ * IPIPE_INT1_SDR - vpss_int5
+ */
+#define DM355_VPSSBL_INTSEL_DEFAULT 0xff83ff10
+/* VENCINT - vpss_int8 */
+#define DM355_VPSSBL_EVTSEL_DEFAULT 0x4
+
+#define DM365_ISP5_PCCR 0x04
+#define DM365_ISP5_PCCR_BL_CLK_ENABLE BIT(0)
+#define DM365_ISP5_PCCR_ISIF_CLK_ENABLE BIT(1)
+#define DM365_ISP5_PCCR_H3A_CLK_ENABLE BIT(2)
+#define DM365_ISP5_PCCR_RSZ_CLK_ENABLE BIT(3)
+#define DM365_ISP5_PCCR_IPIPE_CLK_ENABLE BIT(4)
+#define DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE BIT(5)
+#define DM365_ISP5_PCCR_RSV BIT(6)
+
+#define DM365_ISP5_BCR 0x08
+#define DM365_ISP5_BCR_ISIF_OUT_ENABLE BIT(1)
+
+#define DM365_ISP5_INTSEL1 0x10
+#define DM365_ISP5_INTSEL2 0x14
+#define DM365_ISP5_INTSEL3 0x18
+#define DM365_ISP5_CCDCMUX 0x20
+#define DM365_ISP5_PG_FRAME_SIZE 0x28
+#define DM365_VPBE_CLK_CTRL 0x00
+
+#define VPSS_CLK_CTRL 0x01c40044
+#define VPSS_CLK_CTRL_VENCCLKEN BIT(3)
+#define VPSS_CLK_CTRL_DACCLKEN BIT(4)
+
+/*
+ * vpss interrupts. VDINT0 - vpss_int0, VDINT1 - vpss_int1,
+ * AF - vpss_int3
+ */
+#define DM365_ISP5_INTSEL1_DEFAULT 0x0b1f0100
+/* AEW - vpss_int6, RSZ_INT_DMA - vpss_int5 */
+#define DM365_ISP5_INTSEL2_DEFAULT 0x1f0a0f1f
+/* VENC - vpss_int8 */
+#define DM365_ISP5_INTSEL3_DEFAULT 0x00000015
+
+/* masks and shifts for DM365*/
+#define DM365_CCDC_PG_VD_POL_SHIFT 0
+#define DM365_CCDC_PG_HD_POL_SHIFT 1
+
+#define CCD_SRC_SEL_MASK (BIT_MASK(5) | BIT_MASK(4))
+#define CCD_SRC_SEL_SHIFT 4
+
+/* Different SoC platforms supported by this driver */
+enum vpss_platform_type {
+ DM644X,
+ DM355,
+ DM365,
+};
+
+/*
+ * vpss operations. Depends on platform. Not all functions are available
+ * on all platforms. The api, first check if a function is available before
+ * invoking it. In the probe, the function ptrs are initialized based on
+ * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc.
+ */
+struct vpss_hw_ops {
+ /* enable clock */
+ int (*enable_clock)(enum vpss_clock_sel clock_sel, int en);
+ /* select input to ccdc */
+ void (*select_ccdc_source)(enum vpss_ccdc_source_sel src_sel);
+ /* clear wbl overflow bit */
+ int (*clear_wbl_overflow)(enum vpss_wbl_sel wbl_sel);
+ /* set sync polarity */
+ void (*set_sync_pol)(struct vpss_sync_pol);
+ /* set the PG_FRAME_SIZE register*/
+ void (*set_pg_frame_size)(struct vpss_pg_frame_size);
+ /* check and clear interrupt if occurred */
+ int (*dma_complete_interrupt)(void);
+};
+
+/* vpss configuration */
+struct vpss_oper_config {
+ __iomem void *vpss_regs_base0;
+ __iomem void *vpss_regs_base1;
+ __iomem void *vpss_regs_base2;
+ enum vpss_platform_type platform;
+ spinlock_t vpss_lock;
+ struct vpss_hw_ops hw_ops;
+};
+
+static struct vpss_oper_config oper_cfg;
+
+/* register access routines */
+static inline u32 bl_regr(u32 offset)
+{
+ return __raw_readl(oper_cfg.vpss_regs_base0 + offset);
+}
+
+static inline void bl_regw(u32 val, u32 offset)
+{
+ __raw_writel(val, oper_cfg.vpss_regs_base0 + offset);
+}
+
+static inline u32 vpss_regr(u32 offset)
+{
+ return __raw_readl(oper_cfg.vpss_regs_base1 + offset);
+}
+
+static inline void vpss_regw(u32 val, u32 offset)
+{
+ __raw_writel(val, oper_cfg.vpss_regs_base1 + offset);
+}
+
+/* For DM365 only */
+static inline u32 isp5_read(u32 offset)
+{
+ return __raw_readl(oper_cfg.vpss_regs_base0 + offset);
+}
+
+/* For DM365 only */
+static inline void isp5_write(u32 val, u32 offset)
+{
+ __raw_writel(val, oper_cfg.vpss_regs_base0 + offset);
+}
+
+static void dm365_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
+{
+ u32 temp = isp5_read(DM365_ISP5_CCDCMUX) & ~CCD_SRC_SEL_MASK;
+
+ /* if we are using pattern generator, enable it */
+ if (src_sel == VPSS_PGLPBK || src_sel == VPSS_CCDCPG)
+ temp |= 0x08;
+
+ temp |= (src_sel << CCD_SRC_SEL_SHIFT);
+ isp5_write(temp, DM365_ISP5_CCDCMUX);
+}
+
+static void dm355_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
+{
+ bl_regw(src_sel << VPSS_HSSISEL_SHIFT, DM355_VPSSBL_CCDCMUX);
+}
+
+int vpss_dma_complete_interrupt(void)
+{
+ if (!oper_cfg.hw_ops.dma_complete_interrupt)
+ return 2;
+ return oper_cfg.hw_ops.dma_complete_interrupt();
+}
+EXPORT_SYMBOL(vpss_dma_complete_interrupt);
+
+int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
+{
+ if (!oper_cfg.hw_ops.select_ccdc_source)
+ return -EINVAL;
+
+ oper_cfg.hw_ops.select_ccdc_source(src_sel);
+ return 0;
+}
+EXPORT_SYMBOL(vpss_select_ccdc_source);
+
+static int dm644x_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
+{
+ u32 mask = 1, val;
+
+ if (wbl_sel < VPSS_PCR_AEW_WBL_0 ||
+ wbl_sel > VPSS_PCR_CCDC_WBL_O)
+ return -EINVAL;
+
+ /* writing a 0 clear the overflow */
+ mask = ~(mask << wbl_sel);
+ val = bl_regr(DM644X_SBL_PCR_VPSS) & mask;
+ bl_regw(val, DM644X_SBL_PCR_VPSS);
+ return 0;
+}
+
+void vpss_set_sync_pol(struct vpss_sync_pol sync)
+{
+ if (!oper_cfg.hw_ops.set_sync_pol)
+ return;
+
+ oper_cfg.hw_ops.set_sync_pol(sync);
+}
+EXPORT_SYMBOL(vpss_set_sync_pol);
+
+int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
+{
+ if (!oper_cfg.hw_ops.clear_wbl_overflow)
+ return -EINVAL;
+
+ return oper_cfg.hw_ops.clear_wbl_overflow(wbl_sel);
+}
+EXPORT_SYMBOL(vpss_clear_wbl_overflow);
+
+/*
+ * dm355_enable_clock - Enable VPSS Clock
+ * @clock_sel: Clock to be enabled/disabled
+ * @en: enable/disable flag
+ *
+ * This is called to enable or disable a vpss clock
+ */
+static int dm355_enable_clock(enum vpss_clock_sel clock_sel, int en)
+{
+ unsigned long flags;
+ u32 utemp, mask = 0x1, shift = 0;
+
+ switch (clock_sel) {
+ case VPSS_VPBE_CLOCK:
+ /* nothing since lsb */
+ break;
+ case VPSS_VENC_CLOCK_SEL:
+ shift = 2;
+ break;
+ case VPSS_CFALD_CLOCK:
+ shift = 3;
+ break;
+ case VPSS_H3A_CLOCK:
+ shift = 4;
+ break;
+ case VPSS_IPIPE_CLOCK:
+ shift = 5;
+ break;
+ case VPSS_CCDC_CLOCK:
+ shift = 6;
+ break;
+ default:
+ printk(KERN_ERR "dm355_enable_clock: Invalid selector: %d\n",
+ clock_sel);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&oper_cfg.vpss_lock, flags);
+ utemp = vpss_regr(DM355_VPSSCLK_CLKCTRL);
+ if (!en)
+ utemp &= ~(mask << shift);
+ else
+ utemp |= (mask << shift);
+
+ vpss_regw(utemp, DM355_VPSSCLK_CLKCTRL);
+ spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags);
+ return 0;
+}
+
+static int dm365_enable_clock(enum vpss_clock_sel clock_sel, int en)
+{
+ unsigned long flags;
+ u32 utemp, mask = 0x1, shift = 0, offset = DM365_ISP5_PCCR;
+ u32 (*read)(u32 offset) = isp5_read;
+ void(*write)(u32 val, u32 offset) = isp5_write;
+
+ switch (clock_sel) {
+ case VPSS_BL_CLOCK:
+ break;
+ case VPSS_CCDC_CLOCK:
+ shift = 1;
+ break;
+ case VPSS_H3A_CLOCK:
+ shift = 2;
+ break;
+ case VPSS_RSZ_CLOCK:
+ shift = 3;
+ break;
+ case VPSS_IPIPE_CLOCK:
+ shift = 4;
+ break;
+ case VPSS_IPIPEIF_CLOCK:
+ shift = 5;
+ break;
+ case VPSS_PCLK_INTERNAL:
+ shift = 6;
+ break;
+ case VPSS_PSYNC_CLOCK_SEL:
+ shift = 7;
+ break;
+ case VPSS_VPBE_CLOCK:
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_VENC_CLOCK_SEL:
+ shift = 2;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_LDC_CLOCK:
+ shift = 3;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_FDIF_CLOCK:
+ shift = 4;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_OSD_CLOCK_SEL:
+ shift = 6;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ case VPSS_LDC_CLOCK_SEL:
+ shift = 7;
+ read = vpss_regr;
+ write = vpss_regw;
+ offset = DM365_VPBE_CLK_CTRL;
+ break;
+ default:
+ printk(KERN_ERR "dm365_enable_clock: Invalid selector: %d\n",
+ clock_sel);
+ return -1;
+ }
+
+ spin_lock_irqsave(&oper_cfg.vpss_lock, flags);
+ utemp = read(offset);
+ if (!en) {
+ mask = ~mask;
+ utemp &= (mask << shift);
+ } else
+ utemp |= (mask << shift);
+
+ write(utemp, offset);
+ spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags);
+
+ return 0;
+}
+
+int vpss_enable_clock(enum vpss_clock_sel clock_sel, int en)
+{
+ if (!oper_cfg.hw_ops.enable_clock)
+ return -EINVAL;
+
+ return oper_cfg.hw_ops.enable_clock(clock_sel, en);
+}
+EXPORT_SYMBOL(vpss_enable_clock);
+
+void dm365_vpss_set_sync_pol(struct vpss_sync_pol sync)
+{
+ int val = 0;
+ val = isp5_read(DM365_ISP5_CCDCMUX);
+
+ val |= (sync.ccdpg_hdpol << DM365_CCDC_PG_HD_POL_SHIFT);
+ val |= (sync.ccdpg_vdpol << DM365_CCDC_PG_VD_POL_SHIFT);
+
+ isp5_write(val, DM365_ISP5_CCDCMUX);
+}
+EXPORT_SYMBOL(dm365_vpss_set_sync_pol);
+
+void vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
+{
+ if (!oper_cfg.hw_ops.set_pg_frame_size)
+ return;
+
+ oper_cfg.hw_ops.set_pg_frame_size(frame_size);
+}
+EXPORT_SYMBOL(vpss_set_pg_frame_size);
+
+void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
+{
+ int current_reg = ((frame_size.hlpfr >> 1) - 1) << 16;
+
+ current_reg |= (frame_size.pplen - 1);
+ isp5_write(current_reg, DM365_ISP5_PG_FRAME_SIZE);
+}
+EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
+
+static int vpss_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ char *platform_name;
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENOENT;
+ }
+
+ platform_name = pdev->dev.platform_data;
+ if (!strcmp(platform_name, "dm355_vpss"))
+ oper_cfg.platform = DM355;
+ else if (!strcmp(platform_name, "dm365_vpss"))
+ oper_cfg.platform = DM365;
+ else if (!strcmp(platform_name, "dm644x_vpss"))
+ oper_cfg.platform = DM644X;
+ else {
+ dev_err(&pdev->dev, "vpss driver not supported on this platform\n");
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "%s vpss probed\n", platform_name);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ oper_cfg.vpss_regs_base0 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(oper_cfg.vpss_regs_base0))
+ return PTR_ERR(oper_cfg.vpss_regs_base0);
+
+ if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ oper_cfg.vpss_regs_base1 = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(oper_cfg.vpss_regs_base1))
+ return PTR_ERR(oper_cfg.vpss_regs_base1);
+ }
+
+ if (oper_cfg.platform == DM355) {
+ oper_cfg.hw_ops.enable_clock = dm355_enable_clock;
+ oper_cfg.hw_ops.select_ccdc_source = dm355_select_ccdc_source;
+ /* Setup vpss interrupts */
+ bl_regw(DM355_VPSSBL_INTSEL_DEFAULT, DM355_VPSSBL_INTSEL);
+ bl_regw(DM355_VPSSBL_EVTSEL_DEFAULT, DM355_VPSSBL_EVTSEL);
+ } else if (oper_cfg.platform == DM365) {
+ oper_cfg.hw_ops.enable_clock = dm365_enable_clock;
+ oper_cfg.hw_ops.select_ccdc_source = dm365_select_ccdc_source;
+ /* Setup vpss interrupts */
+ isp5_write((isp5_read(DM365_ISP5_PCCR) |
+ DM365_ISP5_PCCR_BL_CLK_ENABLE |
+ DM365_ISP5_PCCR_ISIF_CLK_ENABLE |
+ DM365_ISP5_PCCR_H3A_CLK_ENABLE |
+ DM365_ISP5_PCCR_RSZ_CLK_ENABLE |
+ DM365_ISP5_PCCR_IPIPE_CLK_ENABLE |
+ DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE |
+ DM365_ISP5_PCCR_RSV), DM365_ISP5_PCCR);
+ isp5_write((isp5_read(DM365_ISP5_BCR) |
+ DM365_ISP5_BCR_ISIF_OUT_ENABLE), DM365_ISP5_BCR);
+ isp5_write(DM365_ISP5_INTSEL1_DEFAULT, DM365_ISP5_INTSEL1);
+ isp5_write(DM365_ISP5_INTSEL2_DEFAULT, DM365_ISP5_INTSEL2);
+ isp5_write(DM365_ISP5_INTSEL3_DEFAULT, DM365_ISP5_INTSEL3);
+ } else
+ oper_cfg.hw_ops.clear_wbl_overflow = dm644x_clear_wbl_overflow;
+
+ pm_runtime_enable(&pdev->dev);
+
+ pm_runtime_get(&pdev->dev);
+
+ spin_lock_init(&oper_cfg.vpss_lock);
+ dev_info(&pdev->dev, "%s vpss probe success\n", platform_name);
+
+ return 0;
+}
+
+static int vpss_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int vpss_suspend(struct device *dev)
+{
+ pm_runtime_put(dev);
+ return 0;
+}
+
+static int vpss_resume(struct device *dev)
+{
+ pm_runtime_get(dev);
+ return 0;
+}
+
+static const struct dev_pm_ops vpss_pm_ops = {
+ .suspend = vpss_suspend,
+ .resume = vpss_resume,
+};
+
+static struct platform_driver vpss_driver = {
+ .driver = {
+ .name = "vpss",
+ .pm = &vpss_pm_ops,
+ },
+ .remove = vpss_remove,
+ .probe = vpss_probe,
+};
+
+static void vpss_exit(void)
+{
+ iounmap(oper_cfg.vpss_regs_base2);
+ release_mem_region(VPSS_CLK_CTRL, 4);
+ platform_driver_unregister(&vpss_driver);
+}
+
+static int __init vpss_init(void)
+{
+ int ret;
+
+ if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
+ return -EBUSY;
+
+ oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+ if (unlikely(!oper_cfg.vpss_regs_base2)) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ writel(VPSS_CLK_CTRL_VENCCLKEN |
+ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
+
+ ret = platform_driver_register(&vpss_driver);
+ if (ret)
+ goto err_pd_register;
+
+ return 0;
+
+err_pd_register:
+ iounmap(oper_cfg.vpss_regs_base2);
+err_ioremap:
+ release_mem_region(VPSS_CLK_CTRL, 4);
+ return ret;
+}
+subsys_initcall(vpss_init);
+module_exit(vpss_exit);
diff --git a/drivers/media/platform/exynos-gsc/Makefile b/drivers/media/platform/exynos-gsc/Makefile
new file mode 100644
index 000000000..6d1411c6d
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/Makefile
@@ -0,0 +1,3 @@
+exynos-gsc-objs := gsc-core.o gsc-m2m.o gsc-regs.o
+
+obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC) += exynos-gsc.o
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
new file mode 100644
index 000000000..17854a379
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -0,0 +1,1368 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung EXYNOS5 SoC series G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <media/v4l2-ioctl.h>
+
+#include "gsc-core.h"
+
+static const struct gsc_fmt gsc_formats[] = {
+ {
+ .name = "RGB565",
+ .pixelformat = V4L2_PIX_FMT_RGB565X,
+ .depth = { 16 },
+ .color = GSC_RGB,
+ .num_planes = 1,
+ .num_comp = 1,
+ }, {
+ .name = "BGRX-8-8-8-8, 32 bpp",
+ .pixelformat = V4L2_PIX_FMT_BGR32,
+ .depth = { 32 },
+ .color = GSC_RGB,
+ .num_planes = 1,
+ .num_comp = 1,
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 1,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_C,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 1,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .pixelformat = V4L2_PIX_FMT_VYUY,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_C,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 1,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .pixelformat = V4L2_PIX_FMT_YVYU,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 1,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
+ }, {
+ .name = "YUV 4:4:4 planar, YCbYCr",
+ .pixelformat = V4L2_PIX_FMT_YUV32,
+ .depth = { 32 },
+ .color = GSC_YUV444,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 1,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 3,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:2 non-contig, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV16M,
+ .depth = { 8, 8 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 2,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV61,
+ .depth = { 16 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:2 non-contig, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV61M,
+ .depth = { 8, 8 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 2,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .depth = { 12 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 3,
+ }, {
+ .name = "YUV 4:2:0 planar, YCrCb",
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .depth = { 12 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 3,
+
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .depth = { 12 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 1,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .depth = { 12 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 1,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 2p, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV21M,
+ .depth = { 8, 4 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 2,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV12M,
+ .depth = { 8, 4 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 2,
+ .num_comp = 2,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV420M,
+ .depth = { 8, 2, 2 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 3,
+ .num_comp = 3,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 3p, Y/Cr/Cb",
+ .pixelformat = V4L2_PIX_FMT_YVU420M,
+ .depth = { 8, 2, 2 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 3,
+ .num_comp = 3,
+ }, {
+ .name = "YUV 4:2:0 n.c. 2p, Y/CbCr tiled",
+ .pixelformat = V4L2_PIX_FMT_NV12MT_16X16,
+ .depth = { 8, 4 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 2,
+ .num_comp = 2,
+ }
+};
+
+const struct gsc_fmt *get_format(int index)
+{
+ if (index >= ARRAY_SIZE(gsc_formats))
+ return NULL;
+
+ return (struct gsc_fmt *)&gsc_formats[index];
+}
+
+const struct gsc_fmt *find_fmt(u32 *pixelformat, u32 *mbus_code, u32 index)
+{
+ const struct gsc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+
+ if (index >= ARRAY_SIZE(gsc_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(gsc_formats); ++i) {
+ fmt = get_format(i);
+ if (pixelformat && fmt->pixelformat == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == i)
+ def_fmt = fmt;
+ }
+ return def_fmt;
+
+}
+
+void gsc_set_frame_size(struct gsc_frame *frame, int width, int height)
+{
+ frame->f_width = width;
+ frame->f_height = height;
+ frame->crop.width = width;
+ frame->crop.height = height;
+ frame->crop.left = 0;
+ frame->crop.top = 0;
+}
+
+int gsc_cal_prescaler_ratio(struct gsc_variant *var, u32 src, u32 dst,
+ u32 *ratio)
+{
+ if ((dst > src) || (dst >= src / var->poly_sc_down_max)) {
+ *ratio = 1;
+ return 0;
+ }
+
+ if ((src / var->poly_sc_down_max / var->pre_sc_down_max) > dst) {
+ pr_err("Exceeded maximum downscaling ratio (1/16))");
+ return -EINVAL;
+ }
+
+ *ratio = (dst > (src / 8)) ? 2 : 4;
+
+ return 0;
+}
+
+void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *sh)
+{
+ if (hratio == 4 && vratio == 4)
+ *sh = 4;
+ else if ((hratio == 4 && vratio == 2) ||
+ (hratio == 2 && vratio == 4))
+ *sh = 3;
+ else if ((hratio == 4 && vratio == 1) ||
+ (hratio == 1 && vratio == 4) ||
+ (hratio == 2 && vratio == 2))
+ *sh = 2;
+ else if (hratio == 1 && vratio == 1)
+ *sh = 0;
+ else
+ *sh = 1;
+}
+
+void gsc_check_src_scale_info(struct gsc_variant *var,
+ struct gsc_frame *s_frame, u32 *wratio,
+ u32 tx, u32 ty, u32 *hratio)
+{
+ int remainder = 0, walign, halign;
+
+ if (is_yuv420(s_frame->fmt->color)) {
+ walign = GSC_SC_ALIGN_4;
+ halign = GSC_SC_ALIGN_4;
+ } else if (is_yuv422(s_frame->fmt->color)) {
+ walign = GSC_SC_ALIGN_4;
+ halign = GSC_SC_ALIGN_2;
+ } else {
+ walign = GSC_SC_ALIGN_2;
+ halign = GSC_SC_ALIGN_2;
+ }
+
+ remainder = s_frame->crop.width % (*wratio * walign);
+ if (remainder) {
+ s_frame->crop.width -= remainder;
+ gsc_cal_prescaler_ratio(var, s_frame->crop.width, tx, wratio);
+ pr_info("cropped src width size is recalculated from %d to %d",
+ s_frame->crop.width + remainder, s_frame->crop.width);
+ }
+
+ remainder = s_frame->crop.height % (*hratio * halign);
+ if (remainder) {
+ s_frame->crop.height -= remainder;
+ gsc_cal_prescaler_ratio(var, s_frame->crop.height, ty, hratio);
+ pr_info("cropped src height size is recalculated from %d to %d",
+ s_frame->crop.height + remainder, s_frame->crop.height);
+ }
+}
+
+int gsc_enum_fmt_mplane(struct v4l2_fmtdesc *f)
+{
+ const struct gsc_fmt *fmt;
+
+ fmt = find_fmt(NULL, NULL, f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->pixelformat;
+
+ return 0;
+}
+
+static int get_plane_info(struct gsc_frame *frm, u32 addr, u32 *index, u32 *ret_addr)
+{
+ if (frm->addr.y == addr) {
+ *index = 0;
+ *ret_addr = frm->addr.y;
+ } else if (frm->addr.cb == addr) {
+ *index = 1;
+ *ret_addr = frm->addr.cb;
+ } else if (frm->addr.cr == addr) {
+ *index = 2;
+ *ret_addr = frm->addr.cr;
+ } else {
+ pr_err("Plane address is wrong");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm)
+{
+ u32 f_chk_addr, f_chk_len, s_chk_addr, s_chk_len;
+ f_chk_addr = f_chk_len = s_chk_addr = s_chk_len = 0;
+
+ f_chk_addr = frm->addr.y;
+ f_chk_len = frm->payload[0];
+ if (frm->fmt->num_planes == 2) {
+ s_chk_addr = frm->addr.cb;
+ s_chk_len = frm->payload[1];
+ } else if (frm->fmt->num_planes == 3) {
+ u32 low_addr, low_plane, mid_addr, mid_plane;
+ u32 high_addr, high_plane;
+ u32 t_min, t_max;
+
+ t_min = min3(frm->addr.y, frm->addr.cb, frm->addr.cr);
+ if (get_plane_info(frm, t_min, &low_plane, &low_addr))
+ return;
+ t_max = max3(frm->addr.y, frm->addr.cb, frm->addr.cr);
+ if (get_plane_info(frm, t_max, &high_plane, &high_addr))
+ return;
+
+ mid_plane = 3 - (low_plane + high_plane);
+ if (mid_plane == 0)
+ mid_addr = frm->addr.y;
+ else if (mid_plane == 1)
+ mid_addr = frm->addr.cb;
+ else if (mid_plane == 2)
+ mid_addr = frm->addr.cr;
+ else
+ return;
+
+ f_chk_addr = low_addr;
+ if (mid_addr + frm->payload[mid_plane] - low_addr >
+ high_addr + frm->payload[high_plane] - mid_addr) {
+ f_chk_len = frm->payload[low_plane];
+ s_chk_addr = mid_addr;
+ s_chk_len = high_addr +
+ frm->payload[high_plane] - mid_addr;
+ } else {
+ f_chk_len = mid_addr +
+ frm->payload[mid_plane] - low_addr;
+ s_chk_addr = high_addr;
+ s_chk_len = frm->payload[high_plane];
+ }
+ }
+ pr_debug("f_addr = 0x%08x, f_len = %d, s_addr = 0x%08x, s_len = %d\n",
+ f_chk_addr, f_chk_len, s_chk_addr, s_chk_len);
+}
+
+int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
+{
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ struct gsc_variant *variant = gsc->variant;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct gsc_fmt *fmt;
+ u32 max_w, max_h, mod_x, mod_y;
+ u32 min_w, min_h, tmp_w, tmp_h;
+ int i;
+
+ pr_debug("user put w: %d, h: %d", pix_mp->width, pix_mp->height);
+
+ fmt = find_fmt(&pix_mp->pixelformat, NULL, 0);
+ if (!fmt) {
+ pr_err("pixelformat format (0x%X) invalid\n",
+ pix_mp->pixelformat);
+ return -EINVAL;
+ }
+
+ if (pix_mp->field == V4L2_FIELD_ANY)
+ pix_mp->field = V4L2_FIELD_NONE;
+ else if (pix_mp->field != V4L2_FIELD_NONE) {
+ pr_debug("Not supported field order(%d)\n", pix_mp->field);
+ return -EINVAL;
+ }
+
+ max_w = variant->pix_max->target_rot_dis_w;
+ max_h = variant->pix_max->target_rot_dis_h;
+
+ mod_x = ffs(variant->pix_align->org_w) - 1;
+ if (is_yuv420(fmt->color))
+ mod_y = ffs(variant->pix_align->org_h) - 1;
+ else
+ mod_y = ffs(variant->pix_align->org_h) - 2;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ min_w = variant->pix_min->org_w;
+ min_h = variant->pix_min->org_h;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ pix_mp->colorspace = ctx->out_colorspace;
+ }
+
+ pr_debug("mod_x: %d, mod_y: %d, max_w: %d, max_h = %d",
+ mod_x, mod_y, max_w, max_h);
+
+ /* To check if image size is modified to adjust parameter against
+ hardware abilities */
+ tmp_w = pix_mp->width;
+ tmp_h = pix_mp->height;
+
+ v4l_bound_align_image(&pix_mp->width, min_w, max_w, mod_x,
+ &pix_mp->height, min_h, max_h, mod_y, 0);
+ if (tmp_w != pix_mp->width || tmp_h != pix_mp->height)
+ pr_debug("Image size has been modified from %dx%d to %dx%d\n",
+ tmp_w, tmp_h, pix_mp->width, pix_mp->height);
+
+ pix_mp->num_planes = fmt->num_planes;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ ctx->out_colorspace = pix_mp->colorspace;
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ struct v4l2_plane_pix_format *plane_fmt = &pix_mp->plane_fmt[i];
+ u32 bpl = plane_fmt->bytesperline;
+
+ if (fmt->num_comp == 1 && /* Packed */
+ (bpl == 0 || (bpl * 8 / fmt->depth[i]) < pix_mp->width))
+ bpl = pix_mp->width * fmt->depth[i] / 8;
+
+ if (fmt->num_comp > 1 && /* Planar */
+ (bpl == 0 || bpl < pix_mp->width))
+ bpl = pix_mp->width;
+
+ if (i != 0 && fmt->num_comp == 3)
+ bpl /= 2;
+
+ plane_fmt->bytesperline = bpl;
+ plane_fmt->sizeimage = max(pix_mp->width * pix_mp->height *
+ fmt->depth[i] / 8,
+ plane_fmt->sizeimage);
+ pr_debug("[%d]: bpl: %d, sizeimage: %d",
+ i, bpl, pix_mp->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+int gsc_g_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
+{
+ struct gsc_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+ int i;
+
+ frame = ctx_get_frame(ctx, f->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ pix_mp = &f->fmt.pix_mp;
+
+ pix_mp->width = frame->f_width;
+ pix_mp->height = frame->f_height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = frame->fmt->pixelformat;
+ pix_mp->num_planes = frame->fmt->num_planes;
+ pix_mp->colorspace = ctx->out_colorspace;
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ pix_mp->plane_fmt[i].bytesperline = (frame->f_width *
+ frame->fmt->depth[i]) / 8;
+ pix_mp->plane_fmt[i].sizeimage =
+ pix_mp->plane_fmt[i].bytesperline * frame->f_height;
+ }
+
+ return 0;
+}
+
+void gsc_check_crop_change(u32 tmp_w, u32 tmp_h, u32 *w, u32 *h)
+{
+ if (tmp_w != *w || tmp_h != *h) {
+ pr_info("Cropped size has been modified from %dx%d to %dx%d",
+ *w, *h, tmp_w, tmp_h);
+ *w = tmp_w;
+ *h = tmp_h;
+ }
+}
+
+int gsc_g_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
+{
+ struct gsc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->c = frame->crop;
+
+ return 0;
+}
+
+int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
+{
+ struct gsc_frame *f;
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ struct gsc_variant *variant = gsc->variant;
+ u32 mod_x = 0, mod_y = 0, tmp_w, tmp_h;
+ u32 min_w, min_h, max_w, max_h;
+
+ if (cr->c.top < 0 || cr->c.left < 0) {
+ pr_err("doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+ pr_debug("user put w: %d, h: %d", cr->c.width, cr->c.height);
+
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ f = &ctx->d_frame;
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ f = &ctx->s_frame;
+ else
+ return -EINVAL;
+
+ max_w = f->f_width;
+ max_h = f->f_height;
+ tmp_w = cr->c.width;
+ tmp_h = cr->c.height;
+
+ if (V4L2_TYPE_IS_OUTPUT(cr->type)) {
+ if ((is_yuv422(f->fmt->color) && f->fmt->num_comp == 1) ||
+ is_rgb(f->fmt->color))
+ min_w = 32;
+ else
+ min_w = 64;
+ if ((is_yuv422(f->fmt->color) && f->fmt->num_comp == 3) ||
+ is_yuv420(f->fmt->color))
+ min_h = 32;
+ else
+ min_h = 16;
+ } else {
+ if (is_yuv420(f->fmt->color) || is_yuv422(f->fmt->color))
+ mod_x = ffs(variant->pix_align->target_w) - 1;
+ if (is_yuv420(f->fmt->color))
+ mod_y = ffs(variant->pix_align->target_h) - 1;
+ if (ctx->gsc_ctrls.rotate->val == 90 ||
+ ctx->gsc_ctrls.rotate->val == 270) {
+ max_w = f->f_height;
+ max_h = f->f_width;
+ min_w = variant->pix_min->target_rot_en_w;
+ min_h = variant->pix_min->target_rot_en_h;
+ tmp_w = cr->c.height;
+ tmp_h = cr->c.width;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ }
+ }
+ pr_debug("mod_x: %d, mod_y: %d, min_w: %d, min_h = %d",
+ mod_x, mod_y, min_w, min_h);
+ pr_debug("tmp_w : %d, tmp_h : %d", tmp_w, tmp_h);
+
+ v4l_bound_align_image(&tmp_w, min_w, max_w, mod_x,
+ &tmp_h, min_h, max_h, mod_y, 0);
+
+ if (!V4L2_TYPE_IS_OUTPUT(cr->type) &&
+ (ctx->gsc_ctrls.rotate->val == 90 ||
+ ctx->gsc_ctrls.rotate->val == 270))
+ gsc_check_crop_change(tmp_h, tmp_w,
+ &cr->c.width, &cr->c.height);
+ else
+ gsc_check_crop_change(tmp_w, tmp_h,
+ &cr->c.width, &cr->c.height);
+
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ /* Need to add code to algin left value with 2's multiple */
+ if (cr->c.left + tmp_w > max_w)
+ cr->c.left = max_w - tmp_w;
+ if (cr->c.top + tmp_h > max_h)
+ cr->c.top = max_h - tmp_h;
+
+ if ((is_yuv420(f->fmt->color) || is_yuv422(f->fmt->color)) &&
+ cr->c.left & 1)
+ cr->c.left -= 1;
+
+ pr_debug("Aligned l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
+ cr->c.left, cr->c.top, cr->c.width, cr->c.height, max_w, max_h);
+
+ return 0;
+}
+
+int gsc_check_scaler_ratio(struct gsc_variant *var, int sw, int sh, int dw,
+ int dh, int rot, int out_path)
+{
+ int tmp_w, tmp_h, sc_down_max;
+
+ if (out_path == GSC_DMA)
+ sc_down_max = var->sc_down_max;
+ else
+ sc_down_max = var->local_sc_down;
+
+ if (rot == 90 || rot == 270) {
+ tmp_w = dh;
+ tmp_h = dw;
+ } else {
+ tmp_w = dw;
+ tmp_h = dh;
+ }
+
+ if ((sw / tmp_w) > sc_down_max ||
+ (sh / tmp_h) > sc_down_max ||
+ (tmp_w / sw) > var->sc_up_max ||
+ (tmp_h / sh) > var->sc_up_max)
+ return -EINVAL;
+
+ return 0;
+}
+
+int gsc_set_scaler_info(struct gsc_ctx *ctx)
+{
+ struct gsc_scaler *sc = &ctx->scaler;
+ struct gsc_frame *s_frame = &ctx->s_frame;
+ struct gsc_frame *d_frame = &ctx->d_frame;
+ struct gsc_variant *variant = ctx->gsc_dev->variant;
+ struct device *dev = &ctx->gsc_dev->pdev->dev;
+ int tx, ty;
+ int ret;
+
+ ret = gsc_check_scaler_ratio(variant, s_frame->crop.width,
+ s_frame->crop.height, d_frame->crop.width, d_frame->crop.height,
+ ctx->gsc_ctrls.rotate->val, ctx->out_path);
+ if (ret) {
+ pr_err("out of scaler range");
+ return ret;
+ }
+
+ if (ctx->gsc_ctrls.rotate->val == 90 ||
+ ctx->gsc_ctrls.rotate->val == 270) {
+ ty = d_frame->crop.width;
+ tx = d_frame->crop.height;
+ } else {
+ tx = d_frame->crop.width;
+ ty = d_frame->crop.height;
+ }
+
+ if (tx <= 0 || ty <= 0) {
+ dev_err(dev, "Invalid target size: %dx%d", tx, ty);
+ return -EINVAL;
+ }
+
+ ret = gsc_cal_prescaler_ratio(variant, s_frame->crop.width,
+ tx, &sc->pre_hratio);
+ if (ret) {
+ pr_err("Horizontal scale ratio is out of range");
+ return ret;
+ }
+
+ ret = gsc_cal_prescaler_ratio(variant, s_frame->crop.height,
+ ty, &sc->pre_vratio);
+ if (ret) {
+ pr_err("Vertical scale ratio is out of range");
+ return ret;
+ }
+
+ gsc_check_src_scale_info(variant, s_frame, &sc->pre_hratio,
+ tx, ty, &sc->pre_vratio);
+
+ gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+ &sc->pre_shfactor);
+
+ sc->main_hratio = (s_frame->crop.width << 16) / tx;
+ sc->main_vratio = (s_frame->crop.height << 16) / ty;
+
+ pr_debug("scaler input/output size : sx = %d, sy = %d, tx = %d, ty = %d",
+ s_frame->crop.width, s_frame->crop.height, tx, ty);
+ pr_debug("scaler ratio info : pre_shfactor : %d, pre_h : %d",
+ sc->pre_shfactor, sc->pre_hratio);
+ pr_debug("pre_v :%d, main_h : %d, main_v : %d",
+ sc->pre_vratio, sc->main_hratio, sc->main_vratio);
+
+ return 0;
+}
+
+static int __gsc_s_ctrl(struct gsc_ctx *ctx, struct v4l2_ctrl *ctrl)
+{
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ struct gsc_variant *variant = gsc->variant;
+ unsigned int flags = GSC_DST_FMT | GSC_SRC_FMT;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+
+ case V4L2_CID_ROTATE:
+ if ((ctx->state & flags) == flags) {
+ ret = gsc_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height,
+ ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->gsc_ctrls.rotate->val,
+ ctx->out_path);
+
+ if (ret)
+ return -EINVAL;
+ }
+
+ ctx->rotation = ctrl->val;
+ break;
+
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->d_frame.alpha = ctrl->val;
+ break;
+ }
+
+ ctx->state |= GSC_PARAMS;
+ return 0;
+}
+
+static int gsc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gsc_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctx->gsc_dev->slock, flags);
+ ret = __gsc_s_ctrl(ctx, ctrl);
+ spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops gsc_ctrl_ops = {
+ .s_ctrl = gsc_s_ctrl,
+};
+
+int gsc_ctrls_create(struct gsc_ctx *ctx)
+{
+ if (ctx->ctrls_rdy) {
+ pr_err("Control handler of this context was created already");
+ return 0;
+ }
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, GSC_MAX_CTRL_NUM);
+
+ ctx->gsc_ctrls.rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &gsc_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
+ ctx->gsc_ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &gsc_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctx->gsc_ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &gsc_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ ctx->gsc_ctrls.global_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &gsc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0);
+
+ ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ pr_err("Failed to create G-Scaler control handlers");
+ return err;
+ }
+
+ return 0;
+}
+
+void gsc_ctrls_delete(struct gsc_ctx *ctx)
+{
+ if (ctx->ctrls_rdy) {
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ ctx->ctrls_rdy = false;
+ }
+}
+
+/* The color format (num_comp, num_planes) must be already configured. */
+int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
+ struct gsc_frame *frame, struct gsc_addr *addr)
+{
+ int ret = 0;
+ u32 pix_size;
+
+ if ((vb == NULL) || (frame == NULL))
+ return -EINVAL;
+
+ pix_size = frame->f_width * frame->f_height;
+
+ pr_debug("num_planes= %d, num_comp= %d, pix_size= %d",
+ frame->fmt->num_planes, frame->fmt->num_comp, pix_size);
+
+ addr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (frame->fmt->num_planes == 1) {
+ switch (frame->fmt->num_comp) {
+ case 1:
+ addr->cb = 0;
+ addr->cr = 0;
+ break;
+ case 2:
+ /* decompose Y into Y/Cb */
+ addr->cb = (dma_addr_t)(addr->y + pix_size);
+ addr->cr = 0;
+ break;
+ case 3:
+ /* decompose Y into Y/Cb/Cr */
+ addr->cb = (dma_addr_t)(addr->y + pix_size);
+ if (GSC_YUV420 == frame->fmt->color)
+ addr->cr = (dma_addr_t)(addr->cb
+ + (pix_size >> 2));
+ else /* 422 */
+ addr->cr = (dma_addr_t)(addr->cb
+ + (pix_size >> 1));
+ break;
+ default:
+ pr_err("Invalid the number of color planes");
+ return -EINVAL;
+ }
+ } else {
+ if (frame->fmt->num_planes >= 2)
+ addr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
+
+ if (frame->fmt->num_planes == 3)
+ addr->cr = vb2_dma_contig_plane_dma_addr(vb, 2);
+ }
+
+ if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
+ (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
+ (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
+ (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
+ swap(addr->cb, addr->cr);
+
+ pr_debug("ADDR: y= %pad cb= %pad cr= %pad ret= %d",
+ &addr->y, &addr->cb, &addr->cr, ret);
+
+ return ret;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *priv)
+{
+ struct gsc_dev *gsc = priv;
+ struct gsc_ctx *ctx;
+ int gsc_irq;
+
+ gsc_irq = gsc_hw_get_irq_status(gsc);
+ gsc_hw_clear_irq(gsc, gsc_irq);
+
+ if (gsc_irq == GSC_IRQ_OVERRUN) {
+ pr_err("Local path input over-run interrupt has occurred!\n");
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&gsc->slock);
+
+ if (test_and_clear_bit(ST_M2M_PEND, &gsc->state)) {
+
+ gsc_hw_enable_control(gsc, false);
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDING, &gsc->state)) {
+ set_bit(ST_M2M_SUSPENDED, &gsc->state);
+ wake_up(&gsc->irq_queue);
+ goto isr_unlock;
+ }
+ ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
+
+ if (!ctx || !ctx->m2m_ctx)
+ goto isr_unlock;
+
+ spin_unlock(&gsc->slock);
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);
+
+ /* wake_up job_abort, stop_streaming */
+ if (ctx->state & GSC_CTX_STOP_REQ) {
+ ctx->state &= ~GSC_CTX_STOP_REQ;
+ wake_up(&gsc->irq_queue);
+ }
+ return IRQ_HANDLED;
+ }
+
+isr_unlock:
+ spin_unlock(&gsc->slock);
+ return IRQ_HANDLED;
+}
+
+static struct gsc_pix_max gsc_v_100_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2047,
+ .real_rot_en_h = 2047,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_max gsc_v_5250_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2016,
+ .real_rot_en_h = 2016,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_max gsc_v_5420_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2048,
+ .real_rot_en_h = 2048,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_max gsc_v_5433_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2047,
+ .real_rot_en_h = 2047,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_min gsc_v_100_min = {
+ .org_w = 64,
+ .org_h = 32,
+ .real_w = 64,
+ .real_h = 32,
+ .target_rot_dis_w = 64,
+ .target_rot_dis_h = 32,
+ .target_rot_en_w = 32,
+ .target_rot_en_h = 16,
+};
+
+static struct gsc_pix_align gsc_v_100_align = {
+ .org_h = 16,
+ .org_w = 16, /* yuv420 : 16, others : 8 */
+ .offset_h = 2, /* yuv420/422 : 2, others : 1 */
+ .real_w = 16, /* yuv420/422 : 4~16, others : 2~8 */
+ .real_h = 16, /* yuv420 : 4~16, others : 1 */
+ .target_w = 2, /* yuv420/422 : 2, others : 1 */
+ .target_h = 2, /* yuv420 : 2, others : 1 */
+};
+
+static struct gsc_variant gsc_v_100_variant = {
+ .pix_max = &gsc_v_100_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_variant gsc_v_5250_variant = {
+ .pix_max = &gsc_v_5250_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_variant gsc_v_5420_variant = {
+ .pix_max = &gsc_v_5420_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_variant gsc_v_5433_variant = {
+ .pix_max = &gsc_v_5433_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_driverdata gsc_v_100_drvdata = {
+ .variant = {
+ [0] = &gsc_v_100_variant,
+ [1] = &gsc_v_100_variant,
+ [2] = &gsc_v_100_variant,
+ [3] = &gsc_v_100_variant,
+ },
+ .num_entities = 4,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
+static struct gsc_driverdata gsc_v_5250_drvdata = {
+ .variant = {
+ [0] = &gsc_v_5250_variant,
+ [1] = &gsc_v_5250_variant,
+ [2] = &gsc_v_5250_variant,
+ [3] = &gsc_v_5250_variant,
+ },
+ .num_entities = 4,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
+static struct gsc_driverdata gsc_v_5420_drvdata = {
+ .variant = {
+ [0] = &gsc_v_5420_variant,
+ [1] = &gsc_v_5420_variant,
+ },
+ .num_entities = 2,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
+static struct gsc_driverdata gsc_5433_drvdata = {
+ .variant = {
+ [0] = &gsc_v_5433_variant,
+ [1] = &gsc_v_5433_variant,
+ [2] = &gsc_v_5433_variant,
+ },
+ .num_entities = 3,
+ .clk_names = { "pclk", "aclk", "aclk_xiu", "aclk_gsclbend" },
+ .num_clocks = 4,
+};
+
+static const struct of_device_id exynos_gsc_match[] = {
+ {
+ .compatible = "samsung,exynos5250-gsc",
+ .data = &gsc_v_5250_drvdata,
+ },
+ {
+ .compatible = "samsung,exynos5420-gsc",
+ .data = &gsc_v_5420_drvdata,
+ },
+ {
+ .compatible = "samsung,exynos5433-gsc",
+ .data = &gsc_5433_drvdata,
+ },
+ {
+ .compatible = "samsung,exynos5-gsc",
+ .data = &gsc_v_100_drvdata,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_gsc_match);
+
+static int gsc_probe(struct platform_device *pdev)
+{
+ struct gsc_dev *gsc;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ const struct gsc_driverdata *drv_data = of_device_get_match_data(dev);
+ int ret;
+ int i;
+
+ gsc = devm_kzalloc(dev, sizeof(struct gsc_dev), GFP_KERNEL);
+ if (!gsc)
+ return -ENOMEM;
+
+ ret = of_alias_get_id(pdev->dev.of_node, "gsc");
+ if (ret < 0)
+ return ret;
+
+ if (drv_data == &gsc_v_100_drvdata)
+ dev_info(dev, "compatible 'exynos5-gsc' is deprecated\n");
+
+ gsc->id = ret;
+ if (gsc->id >= drv_data->num_entities) {
+ dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
+ return -EINVAL;
+ }
+
+ gsc->num_clocks = drv_data->num_clocks;
+ gsc->variant = drv_data->variant[gsc->id];
+ gsc->pdev = pdev;
+
+ init_waitqueue_head(&gsc->irq_queue);
+ spin_lock_init(&gsc->slock);
+ mutex_init(&gsc->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gsc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gsc->regs))
+ return PTR_ERR(gsc->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to get IRQ resource\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < gsc->num_clocks; i++) {
+ gsc->clock[i] = devm_clk_get(dev, drv_data->clk_names[i]);
+ if (IS_ERR(gsc->clock[i])) {
+ dev_err(dev, "failed to get clock: %s\n",
+ drv_data->clk_names[i]);
+ return PTR_ERR(gsc->clock[i]);
+ }
+ }
+
+ for (i = 0; i < gsc->num_clocks; i++) {
+ ret = clk_prepare_enable(gsc->clock[i]);
+ if (ret) {
+ dev_err(dev, "clock prepare failed for clock: %s\n",
+ drv_data->clk_names[i]);
+ while (--i >= 0)
+ clk_disable_unprepare(gsc->clock[i]);
+ return ret;
+ }
+ }
+
+ ret = devm_request_irq(dev, res->start, gsc_irq_handler,
+ 0, pdev->name, gsc);
+ if (ret) {
+ dev_err(dev, "failed to install irq (%d)\n", ret);
+ goto err_clk;
+ }
+
+ ret = v4l2_device_register(dev, &gsc->v4l2_dev);
+ if (ret)
+ goto err_clk;
+
+ ret = gsc_register_m2m_device(gsc);
+ if (ret)
+ goto err_v4l2;
+
+ platform_set_drvdata(pdev, gsc);
+
+ gsc_hw_set_sw_reset(gsc);
+ gsc_wait_reset(gsc);
+
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ dev_dbg(dev, "gsc-%d registered successfully\n", gsc->id);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_v4l2:
+ v4l2_device_unregister(&gsc->v4l2_dev);
+err_clk:
+ for (i = gsc->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(gsc->clock[i]);
+ return ret;
+}
+
+static int gsc_remove(struct platform_device *pdev)
+{
+ struct gsc_dev *gsc = platform_get_drvdata(pdev);
+ int i;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ gsc_unregister_m2m_device(gsc);
+ v4l2_device_unregister(&gsc->v4l2_dev);
+
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+ for (i = 0; i < gsc->num_clocks; i++)
+ clk_disable_unprepare(gsc->clock[i]);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int gsc_m2m_suspend(struct gsc_dev *gsc)
+{
+ unsigned long flags;
+ int timeout;
+
+ spin_lock_irqsave(&gsc->slock, flags);
+ if (!gsc_m2m_pending(gsc)) {
+ spin_unlock_irqrestore(&gsc->slock, flags);
+ return 0;
+ }
+ clear_bit(ST_M2M_SUSPENDED, &gsc->state);
+ set_bit(ST_M2M_SUSPENDING, &gsc->state);
+ spin_unlock_irqrestore(&gsc->slock, flags);
+
+ timeout = wait_event_timeout(gsc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &gsc->state),
+ GSC_SHUTDOWN_TIMEOUT);
+
+ clear_bit(ST_M2M_SUSPENDING, &gsc->state);
+ return timeout == 0 ? -EAGAIN : 0;
+}
+
+static void gsc_m2m_resume(struct gsc_dev *gsc)
+{
+ struct gsc_ctx *ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsc->slock, flags);
+ /* Clear for full H/W setup in first run after resume */
+ ctx = gsc->m2m.ctx;
+ gsc->m2m.ctx = NULL;
+ spin_unlock_irqrestore(&gsc->slock, flags);
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(dev);
+ int ret = 0;
+ int i;
+
+ pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
+
+ for (i = 0; i < gsc->num_clocks; i++) {
+ ret = clk_prepare_enable(gsc->clock[i]);
+ if (ret) {
+ while (--i >= 0)
+ clk_disable_unprepare(gsc->clock[i]);
+ return ret;
+ }
+ }
+
+ gsc_hw_set_sw_reset(gsc);
+ gsc_wait_reset(gsc);
+ gsc_m2m_resume(gsc);
+
+ return 0;
+}
+
+static int gsc_runtime_suspend(struct device *dev)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(dev);
+ int ret = 0;
+ int i;
+
+ ret = gsc_m2m_suspend(gsc);
+ if (ret)
+ return ret;
+
+ for (i = gsc->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(gsc->clock[i]);
+
+ pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+static struct platform_driver gsc_driver = {
+ .probe = gsc_probe,
+ .remove = gsc_remove,
+ .driver = {
+ .name = GSC_MODULE_NAME,
+ .pm = &gsc_pm_ops,
+ .of_match_table = exynos_gsc_match,
+ }
+};
+
+module_platform_driver(gsc_driver);
+
+MODULE_AUTHOR("Hyunwong Kim <khw0178.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung EXYNOS5 Soc series G-Scaler driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
new file mode 100644
index 000000000..715d9c9d8
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * header file for Samsung EXYNOS5 SoC series G-Scaler driver
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef GSC_CORE_H_
+#define GSC_CORE_H_
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "gsc-regs.h"
+
+#define CONFIG_VB2_GSC_DMA_CONTIG 1
+#define GSC_MODULE_NAME "exynos-gsc"
+
+#define GSC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
+#define GSC_MAX_DEVS 4
+#define GSC_MAX_CLOCKS 4
+#define GSC_M2M_BUF_NUM 0
+#define GSC_MAX_CTRL_NUM 10
+#define GSC_SC_ALIGN_4 4
+#define GSC_SC_ALIGN_2 2
+#define DEFAULT_CSC_EQ 1
+#define DEFAULT_CSC_RANGE 1
+
+#define GSC_PARAMS (1 << 0)
+#define GSC_SRC_FMT (1 << 1)
+#define GSC_DST_FMT (1 << 2)
+#define GSC_CTX_M2M (1 << 3)
+#define GSC_CTX_STOP_REQ (1 << 6)
+#define GSC_CTX_ABORT (1 << 7)
+
+enum gsc_dev_flags {
+ /* for m2m node */
+ ST_M2M_OPEN,
+ ST_M2M_RUN,
+ ST_M2M_PEND,
+ ST_M2M_SUSPENDED,
+ ST_M2M_SUSPENDING,
+};
+
+enum gsc_irq {
+ GSC_IRQ_DONE,
+ GSC_IRQ_OVERRUN
+};
+
+/**
+ * enum gsc_datapath - the path of data used for G-Scaler
+ * @GSC_CAMERA: from camera
+ * @GSC_DMA: from/to DMA
+ * @GSC_LOCAL: to local path
+ * @GSC_WRITEBACK: from FIMD
+ */
+enum gsc_datapath {
+ GSC_CAMERA = 0x1,
+ GSC_DMA,
+ GSC_MIXER,
+ GSC_FIMD,
+ GSC_WRITEBACK,
+};
+
+enum gsc_color_fmt {
+ GSC_RGB = 0x1,
+ GSC_YUV420 = 0x2,
+ GSC_YUV422 = 0x4,
+ GSC_YUV444 = 0x8,
+};
+
+enum gsc_yuv_fmt {
+ GSC_LSB_Y = 0x10,
+ GSC_LSB_C,
+ GSC_CBCR = 0x20,
+ GSC_CRCB,
+};
+
+#define fh_to_ctx(__fh) container_of(__fh, struct gsc_ctx, fh)
+#define is_rgb(x) (!!((x) & 0x1))
+#define is_yuv420(x) (!!((x) & 0x2))
+#define is_yuv422(x) (!!((x) & 0x4))
+
+#define gsc_m2m_active(dev) test_bit(ST_M2M_RUN, &(dev)->state)
+#define gsc_m2m_pending(dev) test_bit(ST_M2M_PEND, &(dev)->state)
+#define gsc_m2m_opened(dev) test_bit(ST_M2M_OPEN, &(dev)->state)
+
+#define ctrl_to_ctx(__ctrl) \
+ container_of((__ctrl)->handler, struct gsc_ctx, ctrl_handler)
+/**
+ * struct gsc_fmt - the driver's internal color format data
+ * @mbus_code: Media Bus pixel code, -1 if not applicable
+ * @name: format description
+ * @pixelformat: the fourcc code for this format, 0 if not applicable
+ * @yorder: Y/C order
+ * @corder: Chrominance order control
+ * @num_planes: number of physically non-contiguous data planes
+ * @nr_comp: number of physically contiguous data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @flags: flags indicating which operation mode format applies to
+ */
+struct gsc_fmt {
+ u32 mbus_code;
+ char *name;
+ u32 pixelformat;
+ u32 color;
+ u32 yorder;
+ u32 corder;
+ u16 num_planes;
+ u16 num_comp;
+ u8 depth[VIDEO_MAX_PLANES];
+ u32 flags;
+};
+
+/**
+ * struct gsc_input_buf - the driver's video buffer
+ * @vb: videobuf2 buffer
+ * @list : linked list structure for buffer queue
+ * @idx : index of G-Scaler input buffer
+ */
+struct gsc_input_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ int idx;
+};
+
+/**
+ * struct gsc_addr - the G-Scaler physical address set
+ * @y: luminance plane address
+ * @cb: Cb plane address
+ * @cr: Cr plane address
+ */
+struct gsc_addr {
+ dma_addr_t y;
+ dma_addr_t cb;
+ dma_addr_t cr;
+};
+
+/* struct gsc_ctrls - the G-Scaler control set
+ * @rotate: rotation degree
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @global_alpha: the alpha value of current frame
+ */
+struct gsc_ctrls {
+ struct v4l2_ctrl *rotate;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *global_alpha;
+};
+
+/**
+ * struct gsc_scaler - the configuration data for G-Scaler inetrnal scaler
+ * @pre_shfactor: pre sclaer shift factor
+ * @pre_hratio: horizontal ratio of the prescaler
+ * @pre_vratio: vertical ratio of the prescaler
+ * @main_hratio: the main scaler's horizontal ratio
+ * @main_vratio: the main scaler's vertical ratio
+ */
+struct gsc_scaler {
+ u32 pre_shfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ u32 main_hratio;
+ u32 main_vratio;
+};
+
+struct gsc_dev;
+
+struct gsc_ctx;
+
+/**
+ * struct gsc_frame - source/target frame properties
+ * @f_width: SRC : SRCIMG_WIDTH, DST : OUTPUTDMA_WHOLE_IMG_WIDTH
+ * @f_height: SRC : SRCIMG_HEIGHT, DST : OUTPUTDMA_WHOLE_IMG_HEIGHT
+ * @crop: cropped(source)/scaled(destination) size
+ * @payload: image size in bytes (w x h x bpp)
+ * @addr: image frame buffer physical addresses
+ * @fmt: G-Scaler color format pointer
+ * @colorspace: value indicating v4l2_colorspace
+ * @alpha: frame's alpha value
+ */
+struct gsc_frame {
+ u32 f_width;
+ u32 f_height;
+ struct v4l2_rect crop;
+ unsigned long payload[VIDEO_MAX_PLANES];
+ struct gsc_addr addr;
+ const struct gsc_fmt *fmt;
+ u32 colorspace;
+ u8 alpha;
+};
+
+/**
+ * struct gsc_m2m_device - v4l2 memory-to-memory device data
+ * @vfd: the video device node for v4l2 m2m mode
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @ctx: hardware context data
+ * @refcnt: the reference counter
+ */
+struct gsc_m2m_device {
+ struct video_device *vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct gsc_ctx *ctx;
+ int refcnt;
+};
+
+/**
+ * struct gsc_pix_max - image pixel size limits in various IP configurations
+ *
+ * @org_scaler_bypass_w: max pixel width when the scaler is disabled
+ * @org_scaler_bypass_h: max pixel height when the scaler is disabled
+ * @org_scaler_input_w: max pixel width when the scaler is enabled
+ * @org_scaler_input_h: max pixel height when the scaler is enabled
+ * @real_rot_dis_w: max pixel src cropped height with the rotator is off
+ * @real_rot_dis_h: max pixel src croppped width with the rotator is off
+ * @real_rot_en_w: max pixel src cropped width with the rotator is on
+ * @real_rot_en_h: max pixel src cropped height with the rotator is on
+ * @target_rot_dis_w: max pixel dst scaled width with the rotator is off
+ * @target_rot_dis_h: max pixel dst scaled height with the rotator is off
+ * @target_rot_en_w: max pixel dst scaled width with the rotator is on
+ * @target_rot_en_h: max pixel dst scaled height with the rotator is on
+ */
+struct gsc_pix_max {
+ u16 org_scaler_bypass_w;
+ u16 org_scaler_bypass_h;
+ u16 org_scaler_input_w;
+ u16 org_scaler_input_h;
+ u16 real_rot_dis_w;
+ u16 real_rot_dis_h;
+ u16 real_rot_en_w;
+ u16 real_rot_en_h;
+ u16 target_rot_dis_w;
+ u16 target_rot_dis_h;
+ u16 target_rot_en_w;
+ u16 target_rot_en_h;
+};
+
+/**
+ * struct gsc_pix_min - image pixel size limits in various IP configurations
+ *
+ * @org_w: minimum source pixel width
+ * @org_h: minimum source pixel height
+ * @real_w: minimum input crop pixel width
+ * @real_h: minimum input crop pixel height
+ * @target_rot_dis_w: minimum output scaled pixel height when rotator is off
+ * @target_rot_dis_h: minimum output scaled pixel height when rotator is off
+ * @target_rot_en_w: minimum output scaled pixel height when rotator is on
+ * @target_rot_en_h: minimum output scaled pixel height when rotator is on
+ */
+struct gsc_pix_min {
+ u16 org_w;
+ u16 org_h;
+ u16 real_w;
+ u16 real_h;
+ u16 target_rot_dis_w;
+ u16 target_rot_dis_h;
+ u16 target_rot_en_w;
+ u16 target_rot_en_h;
+};
+
+struct gsc_pix_align {
+ u16 org_h;
+ u16 org_w;
+ u16 offset_h;
+ u16 real_w;
+ u16 real_h;
+ u16 target_w;
+ u16 target_h;
+};
+
+/**
+ * struct gsc_variant - G-Scaler variant information
+ */
+struct gsc_variant {
+ struct gsc_pix_max *pix_max;
+ struct gsc_pix_min *pix_min;
+ struct gsc_pix_align *pix_align;
+ u16 in_buf_cnt;
+ u16 out_buf_cnt;
+ u16 sc_up_max;
+ u16 sc_down_max;
+ u16 poly_sc_down_max;
+ u16 pre_sc_down_max;
+ u16 local_sc_down;
+};
+
+/**
+ * struct gsc_driverdata - per device type driver data for init time.
+ *
+ * @variant: the variant information for this driver.
+ * @num_entities: the number of g-scalers
+ */
+struct gsc_driverdata {
+ struct gsc_variant *variant[GSC_MAX_DEVS];
+ const char *clk_names[GSC_MAX_CLOCKS];
+ int num_clocks;
+ int num_entities;
+};
+
+/**
+ * struct gsc_dev - abstraction for G-Scaler entity
+ * @slock: the spinlock protecting this data structure
+ * @lock: the mutex protecting this data structure
+ * @pdev: pointer to the G-Scaler platform device
+ * @variant: the IP variant information
+ * @id: G-Scaler device index (0..GSC_MAX_DEVS)
+ * @clock: clocks required for G-Scaler operation
+ * @regs: the mapped hardware registers
+ * @irq_queue: interrupt handler waitqueue
+ * @m2m: memory-to-memory V4L2 device information
+ * @state: flags used to synchronize m2m and capture mode operation
+ * @vdev: video device for G-Scaler instance
+ */
+struct gsc_dev {
+ spinlock_t slock;
+ struct mutex lock;
+ struct platform_device *pdev;
+ struct gsc_variant *variant;
+ u16 id;
+ int num_clocks;
+ struct clk *clock[GSC_MAX_CLOCKS];
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+ struct gsc_m2m_device m2m;
+ unsigned long state;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+};
+
+/**
+ * gsc_ctx - the device context data
+ * @s_frame: source frame properties
+ * @d_frame: destination frame properties
+ * @in_path: input mode (DMA or camera)
+ * @out_path: output mode (DMA or FIFO)
+ * @scaler: image scaler properties
+ * @flags: additional flags for image conversion
+ * @state: flags to keep track of user configuration
+ * @gsc_dev: the G-Scaler device this context applies to
+ * @m2m_ctx: memory-to-memory device context
+ * @fh: v4l2 file handle
+ * @ctrl_handler: v4l2 controls handler
+ * @gsc_ctrls G-Scaler control set
+ * @ctrls_rdy: true if the control handler is initialized
+ */
+struct gsc_ctx {
+ struct gsc_frame s_frame;
+ struct gsc_frame d_frame;
+ enum gsc_datapath in_path;
+ enum gsc_datapath out_path;
+ struct gsc_scaler scaler;
+ u32 flags;
+ u32 state;
+ int rotation;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ struct gsc_dev *gsc_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct gsc_ctrls gsc_ctrls;
+ bool ctrls_rdy;
+ enum v4l2_colorspace out_colorspace;
+};
+
+void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm);
+int gsc_register_m2m_device(struct gsc_dev *gsc);
+void gsc_unregister_m2m_device(struct gsc_dev *gsc);
+void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state);
+
+u32 get_plane_size(struct gsc_frame *fr, unsigned int plane);
+const struct gsc_fmt *get_format(int index);
+const struct gsc_fmt *find_fmt(u32 *pixelformat, u32 *mbus_code, u32 index);
+int gsc_enum_fmt_mplane(struct v4l2_fmtdesc *f);
+int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f);
+void gsc_set_frame_size(struct gsc_frame *frame, int width, int height);
+int gsc_g_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f);
+void gsc_check_crop_change(u32 tmp_w, u32 tmp_h, u32 *w, u32 *h);
+int gsc_g_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr);
+int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr);
+int gsc_cal_prescaler_ratio(struct gsc_variant *var, u32 src, u32 dst,
+ u32 *ratio);
+void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *sh);
+void gsc_check_src_scale_info(struct gsc_variant *var,
+ struct gsc_frame *s_frame,
+ u32 *wratio, u32 tx, u32 ty, u32 *hratio);
+int gsc_check_scaler_ratio(struct gsc_variant *var, int sw, int sh, int dw,
+ int dh, int rot, int out_path);
+int gsc_set_scaler_info(struct gsc_ctx *ctx);
+int gsc_ctrls_create(struct gsc_ctx *ctx);
+void gsc_ctrls_delete(struct gsc_ctx *ctx);
+int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
+ struct gsc_frame *frame, struct gsc_addr *addr);
+
+static inline void gsc_ctx_state_lock_set(u32 state, struct gsc_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->gsc_dev->slock, flags);
+ ctx->state |= state;
+ spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
+}
+
+static inline void gsc_ctx_state_lock_clear(u32 state, struct gsc_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->gsc_dev->slock, flags);
+ ctx->state &= ~state;
+ spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
+}
+
+static inline int is_tiled(const struct gsc_fmt *fmt)
+{
+ return fmt->pixelformat == V4L2_PIX_FMT_NV12MT_16X16;
+}
+
+static inline void gsc_hw_enable_control(struct gsc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + GSC_ENABLE);
+
+ if (on)
+ cfg |= GSC_ENABLE_ON;
+ else
+ cfg &= ~GSC_ENABLE_ON;
+
+ writel(cfg, dev->regs + GSC_ENABLE);
+}
+
+static inline int gsc_hw_get_irq_status(struct gsc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + GSC_IRQ);
+ if (cfg & GSC_IRQ_STATUS_OR_IRQ)
+ return GSC_IRQ_OVERRUN;
+ else
+ return GSC_IRQ_DONE;
+
+}
+
+static inline void gsc_hw_clear_irq(struct gsc_dev *dev, int irq)
+{
+ u32 cfg = readl(dev->regs + GSC_IRQ);
+ if (irq == GSC_IRQ_OVERRUN)
+ cfg |= GSC_IRQ_STATUS_OR_IRQ;
+ else if (irq == GSC_IRQ_DONE)
+ cfg |= GSC_IRQ_STATUS_FRM_DONE_IRQ;
+ writel(cfg, dev->regs + GSC_IRQ);
+}
+
+static inline bool gsc_ctx_state_is_set(u32 mask, struct gsc_ctx *ctx)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&ctx->gsc_dev->slock, flags);
+ ret = (ctx->state & mask) == mask;
+ spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
+ return ret;
+}
+
+static inline struct gsc_frame *ctx_get_frame(struct gsc_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ struct gsc_frame *frame;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE == type) {
+ frame = &ctx->s_frame;
+ } else if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type) {
+ frame = &ctx->d_frame;
+ } else {
+ pr_err("Wrong buffer/video queue type (%d)", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return frame;
+}
+
+void gsc_hw_set_sw_reset(struct gsc_dev *dev);
+int gsc_wait_reset(struct gsc_dev *dev);
+
+void gsc_hw_set_frm_done_irq_mask(struct gsc_dev *dev, bool mask);
+void gsc_hw_set_gsc_irq_enable(struct gsc_dev *dev, bool mask);
+void gsc_hw_set_input_buf_masking(struct gsc_dev *dev, u32 shift, bool enable);
+void gsc_hw_set_output_buf_masking(struct gsc_dev *dev, u32 shift, bool enable);
+void gsc_hw_set_input_addr(struct gsc_dev *dev, struct gsc_addr *addr,
+ int index);
+void gsc_hw_set_output_addr(struct gsc_dev *dev, struct gsc_addr *addr,
+ int index);
+void gsc_hw_set_input_path(struct gsc_ctx *ctx);
+void gsc_hw_set_in_size(struct gsc_ctx *ctx);
+void gsc_hw_set_in_image_rgb(struct gsc_ctx *ctx);
+void gsc_hw_set_in_image_format(struct gsc_ctx *ctx);
+void gsc_hw_set_output_path(struct gsc_ctx *ctx);
+void gsc_hw_set_out_size(struct gsc_ctx *ctx);
+void gsc_hw_set_out_image_rgb(struct gsc_ctx *ctx);
+void gsc_hw_set_out_image_format(struct gsc_ctx *ctx);
+void gsc_hw_set_prescaler(struct gsc_ctx *ctx);
+void gsc_hw_set_mainscaler(struct gsc_ctx *ctx);
+void gsc_hw_set_rotation(struct gsc_ctx *ctx);
+void gsc_hw_set_global_alpha(struct gsc_ctx *ctx);
+void gsc_hw_set_sfr_update(struct gsc_ctx *ctx);
+
+#endif /* GSC_CORE_H_ */
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
new file mode 100644
index 000000000..c9d2f6c53
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -0,0 +1,803 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung EXYNOS5 SoC series G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <media/v4l2-ioctl.h>
+
+#include "gsc-core.h"
+
+static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
+{
+ struct gsc_ctx *curr_ctx;
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ int ret;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
+ if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
+ return 0;
+
+ gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
+ ret = wait_event_timeout(gsc->irq_queue,
+ !gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
+ GSC_SHUTDOWN_TIMEOUT);
+
+ return ret == 0 ? -ETIMEDOUT : ret;
+}
+
+static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
+{
+ int ret;
+
+ ret = gsc_m2m_ctx_stop_req(ctx);
+ if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
+ gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct gsc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
+ return ret > 0 ? 0 : ret;
+}
+
+static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0) {
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void gsc_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct gsc_ctx *ctx = q->drv_priv;
+
+ __gsc_m2m_job_abort(ctx);
+
+ __gsc_m2m_cleanup_queue(ctx);
+
+ pm_runtime_put(&ctx->gsc_dev->pdev->dev);
+}
+
+void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
+{
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ if (!ctx || !ctx->m2m_ctx)
+ return;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->timecode = src_vb->timecode;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags
+ & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src_vb, vb_state);
+ v4l2_m2m_buf_done(dst_vb, vb_state);
+
+ v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
+ ctx->m2m_ctx);
+ }
+}
+
+static void gsc_m2m_job_abort(void *priv)
+{
+ __gsc_m2m_job_abort((struct gsc_ctx *)priv);
+}
+
+static int gsc_get_bufs(struct gsc_ctx *ctx)
+{
+ struct gsc_frame *s_frame, *d_frame;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ int ret;
+
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ ret = gsc_prepare_addr(ctx, &src_vb->vb2_buf, s_frame, &s_frame->addr);
+ if (ret)
+ return ret;
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ ret = gsc_prepare_addr(ctx, &dst_vb->vb2_buf, d_frame, &d_frame->addr);
+ if (ret)
+ return ret;
+
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+
+ return 0;
+}
+
+static void gsc_m2m_device_run(void *priv)
+{
+ struct gsc_ctx *ctx = priv;
+ struct gsc_dev *gsc;
+ unsigned long flags;
+ int ret;
+ bool is_set = false;
+
+ if (WARN(!ctx, "null hardware context\n"))
+ return;
+
+ gsc = ctx->gsc_dev;
+ spin_lock_irqsave(&gsc->slock, flags);
+
+ set_bit(ST_M2M_PEND, &gsc->state);
+
+ /* Reconfigure hardware if the context has changed. */
+ if (gsc->m2m.ctx != ctx) {
+ pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
+ gsc->m2m.ctx, ctx);
+ ctx->state |= GSC_PARAMS;
+ gsc->m2m.ctx = ctx;
+ }
+
+ is_set = ctx->state & GSC_CTX_STOP_REQ;
+ if (is_set) {
+ ctx->state &= ~GSC_CTX_STOP_REQ;
+ ctx->state |= GSC_CTX_ABORT;
+ wake_up(&gsc->irq_queue);
+ goto put_device;
+ }
+
+ ret = gsc_get_bufs(ctx);
+ if (ret) {
+ pr_err("Wrong address");
+ goto put_device;
+ }
+
+ gsc_set_prefbuf(gsc, &ctx->s_frame);
+ gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
+ gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);
+
+ if (ctx->state & GSC_PARAMS) {
+ gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
+ gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
+ gsc_hw_set_frm_done_irq_mask(gsc, false);
+ gsc_hw_set_gsc_irq_enable(gsc, true);
+
+ if (gsc_set_scaler_info(ctx)) {
+ pr_err("Scaler setup error");
+ goto put_device;
+ }
+
+ gsc_hw_set_input_path(ctx);
+ gsc_hw_set_in_size(ctx);
+ gsc_hw_set_in_image_format(ctx);
+
+ gsc_hw_set_output_path(ctx);
+ gsc_hw_set_out_size(ctx);
+ gsc_hw_set_out_image_format(ctx);
+
+ gsc_hw_set_prescaler(ctx);
+ gsc_hw_set_mainscaler(ctx);
+ gsc_hw_set_rotation(ctx);
+ gsc_hw_set_global_alpha(ctx);
+ }
+
+ /* update shadow registers */
+ gsc_hw_set_sfr_update(ctx);
+
+ ctx->state &= ~GSC_PARAMS;
+ gsc_hw_enable_control(gsc, true);
+
+ spin_unlock_irqrestore(&gsc->slock, flags);
+ return;
+
+put_device:
+ ctx->state &= ~GSC_PARAMS;
+ spin_unlock_irqrestore(&gsc->slock, flags);
+}
+
+static int gsc_m2m_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct gsc_frame *frame;
+ int i;
+
+ frame = ctx_get_frame(ctx, vq->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ if (!frame->fmt)
+ return -EINVAL;
+
+ *num_planes = frame->fmt->num_planes;
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ sizes[i] = frame->payload[i];
+ return 0;
+}
+
+static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct gsc_frame *frame;
+ int i;
+
+ frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
+ }
+
+ return 0;
+}
+
+static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
+
+ if (ctx->m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
+}
+
+static const struct vb2_ops gsc_m2m_qops = {
+ .queue_setup = gsc_m2m_queue_setup,
+ .buf_prepare = gsc_m2m_buf_prepare,
+ .buf_queue = gsc_m2m_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = gsc_m2m_stop_streaming,
+ .start_streaming = gsc_m2m_start_streaming,
+};
+
+static int gsc_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+
+ strlcpy(cap->driver, GSC_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, GSC_MODULE_NAME " gscaler", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&gsc->pdev->dev));
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int gsc_m2m_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return gsc_enum_fmt_mplane(f);
+}
+
+static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+
+ return gsc_g_fmt_mplane(ctx, f);
+}
+
+static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+
+ return gsc_try_fmt_mplane(ctx, f);
+}
+
+static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct vb2_queue *vq;
+ struct gsc_frame *frame;
+ struct v4l2_pix_format_mplane *pix;
+ int i, ret = 0;
+
+ ret = gsc_m2m_try_fmt_mplane(file, fh, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+
+ if (vb2_is_streaming(vq)) {
+ pr_err("queue (%d) busy", f->type);
+ return -EBUSY;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+
+ pix = &f->fmt.pix_mp;
+ frame->fmt = find_fmt(&pix->pixelformat, NULL, 0);
+ frame->colorspace = pix->colorspace;
+ if (!frame->fmt)
+ return -EINVAL;
+
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ frame->payload[i] = pix->plane_fmt[i].sizeimage;
+
+ gsc_set_frame_size(frame, pix->width, pix->height);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
+ else
+ gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);
+
+ pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
+
+ return 0;
+}
+
+static int gsc_m2m_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ u32 max_cnt;
+
+ max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
+ gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
+ if (reqbufs->count > max_cnt)
+ return -EINVAL;
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int gsc_m2m_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
+}
+
+static int gsc_m2m_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int gsc_m2m_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int gsc_m2m_dqbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int gsc_m2m_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+
+ /* The source and target color format need to be set */
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
+ return -EINVAL;
+ } else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int gsc_m2m_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int gsc_m2m_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct gsc_frame *frame;
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
+ return -EINVAL;
+
+ frame = ctx_get_frame(ctx, s->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->f_width;
+ s->r.height = frame->f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = frame->crop.left;
+ s->r.top = frame->crop.top;
+ s->r.width = frame->crop.width;
+ s->r.height = frame->crop.height;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int gsc_m2m_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct gsc_frame *frame;
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_crop cr;
+ struct gsc_variant *variant = ctx->gsc_dev->variant;
+ int ret;
+
+ cr.type = s->type;
+ cr.c = s->r;
+
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
+ return -EINVAL;
+
+ ret = gsc_try_crop(ctx, &cr);
+ if (ret)
+ return ret;
+
+ if (s->flags & V4L2_SEL_FLAG_LE &&
+ !is_rectangle_enclosed(&cr.c, &s->r))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE &&
+ !is_rectangle_enclosed(&s->r, &cr.c))
+ return -ERANGE;
+
+ s->r = cr.c;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE:
+ frame = &ctx->s_frame;
+ break;
+
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ frame = &ctx->d_frame;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Check to see if scaling ratio is within supported range */
+ if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = gsc_check_scaler_ratio(variant, cr.c.width,
+ cr.c.height, ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->gsc_ctrls.rotate->val, ctx->out_path);
+ } else {
+ ret = gsc_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height, cr.c.width,
+ cr.c.height, ctx->gsc_ctrls.rotate->val,
+ ctx->out_path);
+ }
+
+ if (ret) {
+ pr_err("Out of scaler range");
+ return -EINVAL;
+ }
+ }
+
+ frame->crop = cr.c;
+
+ gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
+ .vidioc_querycap = gsc_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = gsc_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = gsc_m2m_enum_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = gsc_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = gsc_m2m_g_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = gsc_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = gsc_m2m_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane,
+ .vidioc_reqbufs = gsc_m2m_reqbufs,
+ .vidioc_expbuf = gsc_m2m_expbuf,
+ .vidioc_querybuf = gsc_m2m_querybuf,
+ .vidioc_qbuf = gsc_m2m_qbuf,
+ .vidioc_dqbuf = gsc_m2m_dqbuf,
+ .vidioc_streamon = gsc_m2m_streamon,
+ .vidioc_streamoff = gsc_m2m_streamoff,
+ .vidioc_g_selection = gsc_m2m_g_selection,
+ .vidioc_s_selection = gsc_m2m_s_selection
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct gsc_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &gsc_m2m_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->gsc_dev->lock;
+ src_vq->dev = &ctx->gsc_dev->pdev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &gsc_m2m_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->gsc_dev->lock;
+ dst_vq->dev = &ctx->gsc_dev->pdev->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int gsc_m2m_open(struct file *file)
+{
+ struct gsc_dev *gsc = video_drvdata(file);
+ struct gsc_ctx *ctx = NULL;
+ int ret;
+
+ pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state);
+
+ if (mutex_lock_interruptible(&gsc->lock))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ v4l2_fh_init(&ctx->fh, gsc->m2m.vfd);
+ ret = gsc_ctrls_create(ctx);
+ if (ret)
+ goto error_fh;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->gsc_dev = gsc;
+ /* Default color format */
+ ctx->s_frame.fmt = get_format(0);
+ ctx->d_frame.fmt = get_format(0);
+ /* Setup the device context for mem2mem mode. */
+ ctx->state = GSC_CTX_M2M;
+ ctx->flags = 0;
+ ctx->in_path = GSC_DMA;
+ ctx->out_path = GSC_DMA;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ pr_err("Failed to initialize m2m context");
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_ctrls;
+ }
+
+ if (gsc->m2m.refcnt++ == 0)
+ set_bit(ST_M2M_OPEN, &gsc->state);
+
+ pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx);
+
+ mutex_unlock(&gsc->lock);
+ return 0;
+
+error_ctrls:
+ gsc_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+error_fh:
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+unlock:
+ mutex_unlock(&gsc->lock);
+ return ret;
+}
+
+static int gsc_m2m_release(struct file *file)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+
+ pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
+ task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
+
+ mutex_lock(&gsc->lock);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ gsc_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ if (--gsc->m2m.refcnt <= 0)
+ clear_bit(ST_M2M_OPEN, &gsc->state);
+ kfree(ctx);
+
+ mutex_unlock(&gsc->lock);
+ return 0;
+}
+
+static __poll_t gsc_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ __poll_t ret;
+
+ if (mutex_lock_interruptible(&gsc->lock))
+ return EPOLLERR;
+
+ ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ mutex_unlock(&gsc->lock);
+
+ return ret;
+}
+
+static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_dev *gsc = ctx->gsc_dev;
+ int ret;
+
+ if (mutex_lock_interruptible(&gsc->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+ mutex_unlock(&gsc->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations gsc_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = gsc_m2m_open,
+ .release = gsc_m2m_release,
+ .poll = gsc_m2m_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = gsc_m2m_mmap,
+};
+
+static const struct v4l2_m2m_ops gsc_m2m_ops = {
+ .device_run = gsc_m2m_device_run,
+ .job_abort = gsc_m2m_job_abort,
+};
+
+int gsc_register_m2m_device(struct gsc_dev *gsc)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ if (!gsc)
+ return -ENODEV;
+
+ pdev = gsc->pdev;
+
+ gsc->vdev.fops = &gsc_m2m_fops;
+ gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
+ gsc->vdev.release = video_device_release_empty;
+ gsc->vdev.lock = &gsc->lock;
+ gsc->vdev.vfl_dir = VFL_DIR_M2M;
+ gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
+ snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
+ GSC_MODULE_NAME, gsc->id);
+
+ video_set_drvdata(&gsc->vdev, gsc);
+
+ gsc->m2m.vfd = &gsc->vdev;
+ gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
+ if (IS_ERR(gsc->m2m.m2m_dev)) {
+ dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
+ return PTR_ERR(gsc->m2m.m2m_dev);
+ }
+
+ ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s(): failed to register video device\n", __func__);
+ goto err_m2m_release;
+ }
+
+ pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
+ return 0;
+
+err_m2m_release:
+ v4l2_m2m_release(gsc->m2m.m2m_dev);
+
+ return ret;
+}
+
+void gsc_unregister_m2m_device(struct gsc_dev *gsc)
+{
+ if (gsc) {
+ v4l2_m2m_release(gsc->m2m.m2m_dev);
+ video_unregister_device(&gsc->vdev);
+ }
+}
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.c b/drivers/media/platform/exynos-gsc/gsc-regs.c
new file mode 100644
index 000000000..ce12a1100
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-regs.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung EXYNOS5 SoC series G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "gsc-core.h"
+
+void gsc_hw_set_sw_reset(struct gsc_dev *dev)
+{
+ writel(GSC_SW_RESET_SRESET, dev->regs + GSC_SW_RESET);
+}
+
+int gsc_wait_reset(struct gsc_dev *dev)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(50);
+ u32 cfg;
+
+ while (time_before(jiffies, end)) {
+ cfg = readl(dev->regs + GSC_SW_RESET);
+ if (!cfg)
+ return 0;
+ usleep_range(10, 20);
+ }
+
+ return -EBUSY;
+}
+
+void gsc_hw_set_frm_done_irq_mask(struct gsc_dev *dev, bool mask)
+{
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IRQ);
+ if (mask)
+ cfg |= GSC_IRQ_FRMDONE_MASK;
+ else
+ cfg &= ~GSC_IRQ_FRMDONE_MASK;
+ writel(cfg, dev->regs + GSC_IRQ);
+}
+
+void gsc_hw_set_gsc_irq_enable(struct gsc_dev *dev, bool mask)
+{
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IRQ);
+ if (mask)
+ cfg |= GSC_IRQ_ENABLE;
+ else
+ cfg &= ~GSC_IRQ_ENABLE;
+ writel(cfg, dev->regs + GSC_IRQ);
+}
+
+void gsc_hw_set_input_buf_masking(struct gsc_dev *dev, u32 shift,
+ bool enable)
+{
+ u32 cfg = readl(dev->regs + GSC_IN_BASE_ADDR_Y_MASK);
+ u32 mask = 1 << shift;
+
+ cfg &= ~mask;
+ cfg |= enable << shift;
+
+ writel(cfg, dev->regs + GSC_IN_BASE_ADDR_Y_MASK);
+ writel(cfg, dev->regs + GSC_IN_BASE_ADDR_CB_MASK);
+ writel(cfg, dev->regs + GSC_IN_BASE_ADDR_CR_MASK);
+}
+
+void gsc_hw_set_output_buf_masking(struct gsc_dev *dev, u32 shift,
+ bool enable)
+{
+ u32 cfg = readl(dev->regs + GSC_OUT_BASE_ADDR_Y_MASK);
+ u32 mask = 1 << shift;
+
+ cfg &= ~mask;
+ cfg |= enable << shift;
+
+ writel(cfg, dev->regs + GSC_OUT_BASE_ADDR_Y_MASK);
+ writel(cfg, dev->regs + GSC_OUT_BASE_ADDR_CB_MASK);
+ writel(cfg, dev->regs + GSC_OUT_BASE_ADDR_CR_MASK);
+}
+
+void gsc_hw_set_input_addr(struct gsc_dev *dev, struct gsc_addr *addr,
+ int index)
+{
+ pr_debug("src_buf[%d]: %pad, cb: %pad, cr: %pad", index,
+ &addr->y, &addr->cb, &addr->cr);
+ writel(addr->y, dev->regs + GSC_IN_BASE_ADDR_Y(index));
+ writel(addr->cb, dev->regs + GSC_IN_BASE_ADDR_CB(index));
+ writel(addr->cr, dev->regs + GSC_IN_BASE_ADDR_CR(index));
+
+}
+
+void gsc_hw_set_output_addr(struct gsc_dev *dev,
+ struct gsc_addr *addr, int index)
+{
+ pr_debug("dst_buf[%d]: %pad, cb: %pad, cr: %pad",
+ index, &addr->y, &addr->cb, &addr->cr);
+ writel(addr->y, dev->regs + GSC_OUT_BASE_ADDR_Y(index));
+ writel(addr->cb, dev->regs + GSC_OUT_BASE_ADDR_CB(index));
+ writel(addr->cr, dev->regs + GSC_OUT_BASE_ADDR_CR(index));
+}
+
+void gsc_hw_set_input_path(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+
+ u32 cfg = readl(dev->regs + GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+
+ if (ctx->in_path == GSC_DMA)
+ cfg |= GSC_IN_PATH_MEMORY;
+
+ writel(cfg, dev->regs + GSC_IN_CON);
+}
+
+void gsc_hw_set_in_size(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->s_frame;
+ u32 cfg;
+
+ /* Set input pixel offset */
+ cfg = GSC_SRCIMG_OFFSET_X(frame->crop.left);
+ cfg |= GSC_SRCIMG_OFFSET_Y(frame->crop.top);
+ writel(cfg, dev->regs + GSC_SRCIMG_OFFSET);
+
+ /* Set input original size */
+ cfg = GSC_SRCIMG_WIDTH(frame->f_width);
+ cfg |= GSC_SRCIMG_HEIGHT(frame->f_height);
+ writel(cfg, dev->regs + GSC_SRCIMG_SIZE);
+
+ /* Set input cropped size */
+ cfg = GSC_CROPPED_WIDTH(frame->crop.width);
+ cfg |= GSC_CROPPED_HEIGHT(frame->crop.height);
+ writel(cfg, dev->regs + GSC_CROPPED_SIZE);
+}
+
+void gsc_hw_set_in_image_rgb(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->s_frame;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IN_CON);
+ if (frame->colorspace == V4L2_COLORSPACE_REC709)
+ cfg |= GSC_IN_RGB_HD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_SD_WIDE;
+
+ if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB565X)
+ cfg |= GSC_IN_RGB565;
+ else if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB32)
+ cfg |= GSC_IN_XRGB8888;
+
+ writel(cfg, dev->regs + GSC_IN_CON);
+}
+
+void gsc_hw_set_in_image_format(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->s_frame;
+ u32 i, depth = 0;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IN_CON);
+ cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+ GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+ GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE);
+ writel(cfg, dev->regs + GSC_IN_CON);
+
+ if (is_rgb(frame->fmt->color)) {
+ gsc_hw_set_in_image_rgb(ctx);
+ return;
+ }
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ depth += frame->fmt->depth[i];
+
+ switch (frame->fmt->num_comp) {
+ case 1:
+ cfg |= GSC_IN_YUV422_1P;
+ if (frame->fmt->yorder == GSC_LSB_Y)
+ cfg |= GSC_IN_YUV422_1P_ORDER_LSB_Y;
+ else
+ cfg |= GSC_IN_YUV422_1P_OEDER_LSB_C;
+ if (frame->fmt->corder == GSC_CBCR)
+ cfg |= GSC_IN_CHROMA_ORDER_CBCR;
+ else
+ cfg |= GSC_IN_CHROMA_ORDER_CRCB;
+ break;
+ case 2:
+ if (depth == 12)
+ cfg |= GSC_IN_YUV420_2P;
+ else
+ cfg |= GSC_IN_YUV422_2P;
+ if (frame->fmt->corder == GSC_CBCR)
+ cfg |= GSC_IN_CHROMA_ORDER_CBCR;
+ else
+ cfg |= GSC_IN_CHROMA_ORDER_CRCB;
+ break;
+ case 3:
+ if (depth == 12)
+ cfg |= GSC_IN_YUV420_3P;
+ else
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ }
+
+ if (is_tiled(frame->fmt))
+ cfg |= GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE;
+
+ writel(cfg, dev->regs + GSC_IN_CON);
+}
+
+void gsc_hw_set_output_path(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+
+ u32 cfg = readl(dev->regs + GSC_OUT_CON);
+ cfg &= ~GSC_OUT_PATH_MASK;
+
+ if (ctx->out_path == GSC_DMA)
+ cfg |= GSC_OUT_PATH_MEMORY;
+ else
+ cfg |= GSC_OUT_PATH_LOCAL;
+
+ writel(cfg, dev->regs + GSC_OUT_CON);
+}
+
+void gsc_hw_set_out_size(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ /* Set output original size */
+ if (ctx->out_path == GSC_DMA) {
+ cfg = GSC_DSTIMG_OFFSET_X(frame->crop.left);
+ cfg |= GSC_DSTIMG_OFFSET_Y(frame->crop.top);
+ writel(cfg, dev->regs + GSC_DSTIMG_OFFSET);
+
+ cfg = GSC_DSTIMG_WIDTH(frame->f_width);
+ cfg |= GSC_DSTIMG_HEIGHT(frame->f_height);
+ writel(cfg, dev->regs + GSC_DSTIMG_SIZE);
+ }
+
+ /* Set output scaled size */
+ if (ctx->gsc_ctrls.rotate->val == 90 ||
+ ctx->gsc_ctrls.rotate->val == 270) {
+ cfg = GSC_SCALED_WIDTH(frame->crop.height);
+ cfg |= GSC_SCALED_HEIGHT(frame->crop.width);
+ } else {
+ cfg = GSC_SCALED_WIDTH(frame->crop.width);
+ cfg |= GSC_SCALED_HEIGHT(frame->crop.height);
+ }
+ writel(cfg, dev->regs + GSC_SCALED_SIZE);
+}
+
+void gsc_hw_set_out_image_rgb(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_OUT_CON);
+ if (frame->colorspace == V4L2_COLORSPACE_REC709)
+ cfg |= GSC_OUT_RGB_HD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_SD_WIDE;
+
+ if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB565X)
+ cfg |= GSC_OUT_RGB565;
+ else if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB32)
+ cfg |= GSC_OUT_XRGB8888;
+
+ writel(cfg, dev->regs + GSC_OUT_CON);
+}
+
+void gsc_hw_set_out_image_format(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->d_frame;
+ u32 i, depth = 0;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_OUT_CON);
+ cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+ GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+ GSC_OUT_TILE_TYPE_MASK | GSC_OUT_TILE_MODE);
+ writel(cfg, dev->regs + GSC_OUT_CON);
+
+ if (is_rgb(frame->fmt->color)) {
+ gsc_hw_set_out_image_rgb(ctx);
+ return;
+ }
+
+ if (ctx->out_path != GSC_DMA) {
+ cfg |= GSC_OUT_YUV444;
+ goto end_set;
+ }
+
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ depth += frame->fmt->depth[i];
+
+ switch (frame->fmt->num_comp) {
+ case 1:
+ cfg |= GSC_OUT_YUV422_1P;
+ if (frame->fmt->yorder == GSC_LSB_Y)
+ cfg |= GSC_OUT_YUV422_1P_ORDER_LSB_Y;
+ else
+ cfg |= GSC_OUT_YUV422_1P_OEDER_LSB_C;
+ if (frame->fmt->corder == GSC_CBCR)
+ cfg |= GSC_OUT_CHROMA_ORDER_CBCR;
+ else
+ cfg |= GSC_OUT_CHROMA_ORDER_CRCB;
+ break;
+ case 2:
+ if (depth == 12)
+ cfg |= GSC_OUT_YUV420_2P;
+ else
+ cfg |= GSC_OUT_YUV422_2P;
+ if (frame->fmt->corder == GSC_CBCR)
+ cfg |= GSC_OUT_CHROMA_ORDER_CBCR;
+ else
+ cfg |= GSC_OUT_CHROMA_ORDER_CRCB;
+ break;
+ case 3:
+ cfg |= GSC_OUT_YUV420_3P;
+ break;
+ }
+
+ if (is_tiled(frame->fmt))
+ cfg |= GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE;
+
+end_set:
+ writel(cfg, dev->regs + GSC_OUT_CON);
+}
+
+void gsc_hw_set_prescaler(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_scaler *sc = &ctx->scaler;
+ u32 cfg;
+
+ cfg = GSC_PRESC_SHFACTOR(sc->pre_shfactor);
+ cfg |= GSC_PRESC_H_RATIO(sc->pre_hratio);
+ cfg |= GSC_PRESC_V_RATIO(sc->pre_vratio);
+ writel(cfg, dev->regs + GSC_PRE_SCALE_RATIO);
+}
+
+void gsc_hw_set_mainscaler(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_scaler *sc = &ctx->scaler;
+ u32 cfg;
+
+ cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+ writel(cfg, dev->regs + GSC_MAIN_H_RATIO);
+
+ cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+ writel(cfg, dev->regs + GSC_MAIN_V_RATIO);
+}
+
+void gsc_hw_set_rotation(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (ctx->gsc_ctrls.rotate->val) {
+ case 270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ case 180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case 90:
+ if (ctx->gsc_ctrls.hflip->val)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (ctx->gsc_ctrls.vflip->val)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case 0:
+ if (ctx->gsc_ctrls.hflip->val)
+ cfg |= GSC_IN_ROT_XFLIP;
+ else if (ctx->gsc_ctrls.vflip->val)
+ cfg |= GSC_IN_ROT_YFLIP;
+ }
+
+ writel(cfg, dev->regs + GSC_IN_CON);
+}
+
+void gsc_hw_set_global_alpha(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ struct gsc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ if (!is_rgb(frame->fmt->color)) {
+ pr_debug("Not a RGB format");
+ return;
+ }
+
+ cfg = readl(dev->regs + GSC_OUT_CON);
+ cfg &= ~GSC_OUT_GLOBAL_ALPHA_MASK;
+
+ cfg |= GSC_OUT_GLOBAL_ALPHA(ctx->gsc_ctrls.global_alpha->val);
+ writel(cfg, dev->regs + GSC_OUT_CON);
+}
+
+void gsc_hw_set_sfr_update(struct gsc_ctx *ctx)
+{
+ struct gsc_dev *dev = ctx->gsc_dev;
+ u32 cfg;
+
+ cfg = readl(dev->regs + GSC_ENABLE);
+ cfg |= GSC_ENABLE_SFR_UPDATE;
+ writel(cfg, dev->regs + GSC_ENABLE);
+}
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.h b/drivers/media/platform/exynos-gsc/gsc-regs.h
new file mode 100644
index 000000000..4678f9a6a
--- /dev/null
+++ b/drivers/media/platform/exynos-gsc/gsc-regs.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef REGS_GSC_H_
+#define REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE 0x00
+#define GSC_ENABLE_OP_STATUS (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE (1 << 1)
+#define GSC_ENABLE_ON (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET 0x04
+#define GSC_SW_RESET_SRESET (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ 0x08
+#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
+#define GSC_IRQ_STATUS_FRM_DONE_IRQ (1 << 16)
+#define GSC_IRQ_FRMDONE_MASK (1 << 1)
+#define GSC_IRQ_ENABLE (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON 0x10
+#define GSC_IN_ROT_MASK (7 << 16)
+#define GSC_IN_ROT_270 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP (6 << 16)
+#define GSC_IN_ROT_90_XFLIP (5 << 16)
+#define GSC_IN_ROT_90 (4 << 16)
+#define GSC_IN_ROT_180 (3 << 16)
+#define GSC_IN_ROT_YFLIP (2 << 16)
+#define GSC_IN_ROT_XFLIP (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK (3 << 14)
+#define GSC_IN_RGB_HD_NARROW (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (2 << 14)
+#define GSC_IN_RGB_SD_NARROW (1 << 14)
+#define GSC_IN_RGB_SD_WIDE (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
+#define GSC_IN_FORMAT_MASK (7 << 8)
+#define GSC_IN_XRGB8888 (0 << 8)
+#define GSC_IN_RGB565 (1 << 8)
+#define GSC_IN_YUV420_2P (2 << 8)
+#define GSC_IN_YUV420_3P (3 << 8)
+#define GSC_IN_YUV422_1P (4 << 8)
+#define GSC_IN_YUV422_2P (5 << 8)
+#define GSC_IN_YUV422_3P (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK (1 << 4)
+#define GSC_IN_TILE_C_16x8 (0 << 4)
+#define GSC_IN_TILE_MODE (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
+#define GSC_IN_PATH_MASK (1 << 0)
+#define GSC_IN_PATH_MEMORY (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE 0x14
+#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
+#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET 0x18
+#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE 0x1c
+#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
+#define GSC_CROPPED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON 0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
+#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (2 << 10)
+#define GSC_OUT_RGB_SD_WIDE (1 << 10)
+#define GSC_OUT_RGB_SD_NARROW (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
+#define GSC_OUT_FORMAT_MASK (7 << 4)
+#define GSC_OUT_XRGB8888 (0 << 4)
+#define GSC_OUT_RGB565 (1 << 4)
+#define GSC_OUT_YUV420_2P (2 << 4)
+#define GSC_OUT_YUV420_3P (3 << 4)
+#define GSC_OUT_YUV422_1P (4 << 4)
+#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV444 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+#define GSC_OUT_TILE_C_16x8 (0 << 2)
+#define GSC_OUT_TILE_MODE (1 << 1)
+#define GSC_OUT_PATH_MASK (1 << 0)
+#define GSC_OUT_PATH_LOCAL (1 << 0)
+#define GSC_OUT_PATH_MEMORY (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE 0x24
+#define GSC_SCALED_HEIGHT(x) ((x) << 16)
+#define GSC_SCALED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO 0x28
+#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
+#define GSC_PRESC_V_RATIO(x) ((x) << 16)
+#define GSC_PRESC_H_RATIO(x) ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO 0x2c
+#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO 0x30
+#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE 0x40
+#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
+#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET 0x44
+#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK 0x4c
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK 0x7c
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK 0xac
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n) (0xb0 + (n) * 0x4)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK 0x10c
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK 0x15c
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK 0x1ac
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n) (0x1b0 + (n) * 0x4)
+
+#endif /* REGS_GSC_H_ */
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
new file mode 100644
index 000000000..c8e5ad8f8
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -0,0 +1,81 @@
+
+config VIDEO_SAMSUNG_EXYNOS4_IS
+ tristate "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ depends on OF && COMMON_CLK
+ select V4L2_FWNODE
+ help
+ Say Y here to enable camera host interface devices for
+ Samsung S5P and EXYNOS SoC series.
+
+if VIDEO_SAMSUNG_EXYNOS4_IS
+
+config VIDEO_EXYNOS4_IS_COMMON
+ tristate
+
+config VIDEO_S5P_FIMC
+ tristate "S5P/EXYNOS4 FIMC/CAMIF camera interface driver"
+ depends on I2C
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select MFD_SYSCON
+ select VIDEO_EXYNOS4_IS_COMMON
+ help
+ This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC camera host
+ interface and video postprocessor (FIMC) devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s5p-fimc.
+
+config VIDEO_S5P_MIPI_CSIS
+ tristate "S5P/EXYNOS MIPI-CSI2 receiver (MIPI-CSIS) driver"
+ depends on REGULATOR
+ select GENERIC_PHY
+ select V4L2_FWNODE
+ help
+ This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC MIPI-CSI2
+ receiver (MIPI-CSIS) devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s5p-csis.
+
+config VIDEO_EXYNOS_FIMC_LITE
+ tristate "EXYNOS FIMC-LITE camera interface driver"
+ depends on I2C
+ depends on SOC_EXYNOS4412 || SOC_EXYNOS5250 || COMPILE_TEST
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_EXYNOS4_IS_COMMON
+ help
+ This is a V4L2 driver for Samsung EXYNOS4/5 SoC FIMC-LITE camera
+ host interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called exynos-fimc-lite.
+
+config VIDEO_EXYNOS4_FIMC_IS
+ tristate "EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver"
+ depends on I2C
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ depends on OF
+ select FW_LOADER
+ help
+ This is a V4L2 driver for Samsung EXYNOS4x12 SoC series
+ FIMC-IS (Imaging Subsystem).
+
+ To compile this driver as a module, choose M here: the
+ module will be called exynos4-fimc-is.
+
+config VIDEO_EXYNOS4_ISP_DMA_CAPTURE
+ bool "EXYNOS4x12 FIMC-IS ISP Direct DMA capture support"
+ depends on VIDEO_EXYNOS4_FIMC_IS
+ select VIDEO_EXYNOS4_IS_COMMON
+ default y
+ help
+ This option enables an additional video device node exposing a V4L2
+ video capture interface for the FIMC-IS ISP raw (Bayer) capture DMA.
+
+endif # VIDEO_SAMSUNG_EXYNOS4_IS
diff --git a/drivers/media/platform/exynos4-is/Makefile b/drivers/media/platform/exynos4-is/Makefile
new file mode 100644
index 000000000..a5ab01c73
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-m2m.o fimc-capture.o media-dev.o
+exynos-fimc-lite-objs += fimc-lite-reg.o fimc-lite.o
+s5p-csis-objs := mipi-csis.o
+exynos4-is-common-objs := common.o
+
+exynos-fimc-is-objs := fimc-is.o fimc-isp.o fimc-is-sensor.o fimc-is-regs.o
+exynos-fimc-is-objs += fimc-is-param.o fimc-is-errno.o fimc-is-i2c.o
+
+ifeq ($(CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE),y)
+exynos-fimc-is-objs += fimc-isp-video.o
+endif
+
+obj-$(CONFIG_VIDEO_S5P_MIPI_CSIS) += s5p-csis.o
+obj-$(CONFIG_VIDEO_EXYNOS_FIMC_LITE) += exynos-fimc-lite.o
+obj-$(CONFIG_VIDEO_EXYNOS4_FIMC_IS) += exynos-fimc-is.o
+obj-$(CONFIG_VIDEO_S5P_FIMC) += s5p-fimc.o
+obj-$(CONFIG_VIDEO_EXYNOS4_IS_COMMON) += exynos4-is-common.o
diff --git a/drivers/media/platform/exynos4-is/common.c b/drivers/media/platform/exynos4-is/common.c
new file mode 100644
index 000000000..b90f5bb15
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/common.c
@@ -0,0 +1,52 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC Camera Subsystem driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <media/drv-intf/exynos-fimc.h>
+#include "common.h"
+
+/* Called with the media graph mutex held or entity->stream_count > 0. */
+struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity)
+{
+ struct media_pad *pad = &entity->pads[0];
+ struct v4l2_subdev *sd;
+
+ while (pad->flags & MEDIA_PAD_FL_SINK) {
+ /* source pad */
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (sd->grp_id == GRP_ID_FIMC_IS_SENSOR ||
+ sd->grp_id == GRP_ID_SENSOR)
+ return sd;
+ /* sink pad */
+ pad = &sd->entity.pads[0];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(fimc_find_remote_sensor);
+
+void __fimc_vidioc_querycap(struct device *dev, struct v4l2_capability *cap,
+ unsigned int caps)
+{
+ strlcpy(cap->driver, dev->driver->name, sizeof(cap->driver));
+ strlcpy(cap->card, dev->driver->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev_name(dev));
+ cap->device_caps = caps;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+}
+EXPORT_SYMBOL(__fimc_vidioc_querycap);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/exynos4-is/common.h b/drivers/media/platform/exynos4-is/common.h
new file mode 100644
index 000000000..75b9c71d9
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/common.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity);
+void __fimc_vidioc_querycap(struct device *dev, struct v4l2_capability *cap,
+ unsigned int caps);
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
new file mode 100644
index 000000000..a3cdac188
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -0,0 +1,1919 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
+ *
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "common.h"
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "media-dev.h"
+
+static int fimc_capture_hw_init(struct fimc_dev *fimc)
+{
+ struct fimc_source_info *si = &fimc->vid_cap.source_config;
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ int ret;
+ unsigned long flags;
+
+ if (ctx == NULL || ctx->s_frame.fmt == NULL)
+ return -EINVAL;
+
+ if (si->fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK) {
+ ret = fimc_hw_camblk_cfg_writeback(fimc);
+ if (ret < 0)
+ return ret;
+ }
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+ fimc_set_yuv_order(ctx);
+
+ fimc_hw_set_camera_polarity(fimc, si);
+ fimc_hw_set_camera_type(fimc, si);
+ fimc_hw_set_camera_source(fimc, si);
+ fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
+ ret = fimc_set_scaler_info(ctx);
+ if (!ret) {
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_hw_set_output_path(ctx);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->drv_data->alpha_color)
+ fimc_hw_set_rgb_alpha(ctx);
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return ret;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
+static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_vid_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ streaming = fimc->state & (1 << ST_CAPT_ISP_STREAM);
+
+ fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_SHUT |
+ 1 << ST_CAPT_STREAM | 1 << ST_CAPT_ISP_STREAM);
+ if (suspend)
+ fimc->state |= (1 << ST_CAPT_SUSPENDED);
+ else
+ fimc->state &= ~(1 << ST_CAPT_PEND | 1 << ST_CAPT_SUSPENDED);
+
+ /* Release unused buffers */
+ while (!suspend && !list_empty(&cap->pending_buf_q)) {
+ buf = fimc_pending_queue_pop(cap);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ /* If suspending put unused buffers onto pending queue */
+ while (!list_empty(&cap->active_buf_q)) {
+ buf = fimc_active_queue_pop(cap);
+ if (suspend)
+ fimc_pending_queue_add(cap, buf);
+ else
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ fimc_hw_reset(fimc);
+ cap->buf_index = 0;
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (streaming)
+ return fimc_pipeline_call(&cap->ve, set_stream, 0);
+ else
+ return 0;
+}
+
+static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
+{
+ unsigned long flags;
+
+ if (!fimc_capture_active(fimc))
+ return 0;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_bit(ST_CAPT_SHUT, &fimc->state);
+ fimc_deactivate_capture(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_CAPT_SHUT, &fimc->state),
+ (2*HZ/10)); /* 200 ms */
+
+ return fimc_capture_state_cleanup(fimc, suspend);
+}
+
+/**
+ * fimc_capture_config_update - apply the camera interface configuration
+ * @ctx: FIMC capture context
+ *
+ * To be called from within the interrupt handler with fimc.slock
+ * spinlock held. It updates the camera pixel crop, rotation and
+ * image flip in H/W.
+ */
+static int fimc_capture_config_update(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret;
+
+ fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
+ ret = fimc_set_scaler_info(ctx);
+ if (ret)
+ return ret;
+
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->drv_data->alpha_color)
+ fimc_hw_set_rgb_alpha(ctx);
+
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ return ret;
+}
+
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_pipeline *p = to_fimc_pipeline(cap->ve.pipe);
+ struct v4l2_subdev *csis = p->subdevs[IDX_CSIS];
+ struct fimc_frame *f = &cap->ctx->d_frame;
+ struct fimc_vid_buffer *v_buf;
+
+ if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ goto done;
+ }
+
+ if (!list_empty(&cap->active_buf_q) &&
+ test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) {
+ v_buf = fimc_active_queue_pop(cap);
+
+ v_buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ v_buf->vb.sequence = cap->frame_count++;
+
+ vb2_buffer_done(&v_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ if (!list_empty(&cap->pending_buf_q)) {
+
+ v_buf = fimc_pending_queue_pop(cap);
+ fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
+ v_buf->index = cap->buf_index;
+
+ /* Move the buffer to the capture active queue */
+ fimc_active_queue_add(cap, v_buf);
+
+ dbg("next frame: %d, done frame: %d",
+ fimc_hw_get_frame_index(fimc), v_buf->index);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ }
+ /*
+ * Set up a buffer at MIPI-CSIS if current image format
+ * requires the frame embedded data capture.
+ */
+ if (f->fmt->mdataplanes && !list_empty(&cap->active_buf_q)) {
+ unsigned int plane = ffs(f->fmt->mdataplanes) - 1;
+ unsigned int size = f->payload[plane];
+ s32 index = fimc_hw_get_frame_index(fimc);
+ void *vaddr;
+
+ list_for_each_entry(v_buf, &cap->active_buf_q, list) {
+ if (v_buf->index != index)
+ continue;
+ vaddr = vb2_plane_vaddr(&v_buf->vb.vb2_buf, plane);
+ v4l2_subdev_call(csis, video, s_rx_buffer,
+ vaddr, &size);
+ break;
+ }
+ }
+
+ if (cap->active_buf_cnt == 0) {
+ if (deq_buf)
+ clear_bit(ST_CAPT_RUN, &fimc->state);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ } else {
+ set_bit(ST_CAPT_RUN, &fimc->state);
+ }
+
+ if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ fimc_capture_config_update(cap->ctx);
+done:
+ if (cap->active_buf_cnt == 1) {
+ fimc_deactivate_capture(fimc);
+ clear_bit(ST_CAPT_STREAM, &fimc->state);
+ }
+
+ dbg("frame: %d, active_buf_cnt: %d",
+ fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
+}
+
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ int min_bufs;
+ int ret;
+
+ vid_cap->frame_count = 0;
+
+ ret = fimc_capture_hw_init(fimc);
+ if (ret) {
+ fimc_capture_state_cleanup(fimc, false);
+ return ret;
+ }
+
+ set_bit(ST_CAPT_PEND, &fimc->state);
+
+ min_bufs = fimc->vid_cap.reqbufs_count > 1 ? 2 : 1;
+
+ if (vid_cap->active_buf_cnt >= min_bufs &&
+ !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
+ fimc_activate_capture(ctx);
+
+ if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
+ return fimc_pipeline_call(&vid_cap->ve, set_stream, 1);
+ }
+
+ return 0;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ if (!fimc_capture_active(fimc))
+ return;
+
+ fimc_stop_capture(fimc, false);
+}
+
+int fimc_capture_suspend(struct fimc_dev *fimc)
+{
+ bool suspend = fimc_capture_busy(fimc);
+
+ int ret = fimc_stop_capture(fimc, suspend);
+ if (ret)
+ return ret;
+ return fimc_pipeline_call(&fimc->vid_cap.ve, close);
+}
+
+static void buffer_queue(struct vb2_buffer *vb);
+
+int fimc_capture_resume(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct exynos_video_entity *ve = &vid_cap->ve;
+ struct fimc_vid_buffer *buf;
+ int i;
+
+ if (!test_and_clear_bit(ST_CAPT_SUSPENDED, &fimc->state))
+ return 0;
+
+ INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
+ vid_cap->buf_index = 0;
+ fimc_pipeline_call(ve, open, &ve->vdev.entity, false);
+ fimc_capture_hw_init(fimc);
+
+ clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ for (i = 0; i < vid_cap->reqbufs_count; i++) {
+ if (list_empty(&vid_cap->pending_buf_q))
+ break;
+ buf = fimc_pending_queue_pop(vid_cap);
+ buffer_queue(&buf->vb.vb2_buf);
+ }
+ return 0;
+
+}
+
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct fimc_ctx *ctx = vq->drv_priv;
+ struct fimc_frame *frame = &ctx->d_frame;
+ struct fimc_fmt *fmt = frame->fmt;
+ unsigned long wh = frame->f_width * frame->f_height;
+ int i;
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ if (*num_planes) {
+ if (*num_planes != fmt->memplanes)
+ return -EINVAL;
+ for (i = 0; i < *num_planes; i++)
+ if (sizes[i] < (wh * fmt->depth[i]) / 8)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ unsigned int size = (wh * fmt->depth[i]) / 8;
+
+ if (fimc_fmt_is_user_defined(fmt->color))
+ sizes[i] = frame->payload[i];
+ else
+ sizes[i] = max_t(u32, size, frame->payload[i]);
+ }
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct fimc_ctx *ctx = vq->drv_priv;
+ int i;
+
+ if (ctx->d_frame.fmt == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) {
+ unsigned long size = ctx->d_frame.payload[i];
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(&ctx->fimc_dev->vid_cap.ve.vdev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fimc_vid_buffer *buf
+ = container_of(vbuf, struct fimc_vid_buffer, vb);
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct exynos_video_entity *ve = &vid_cap->ve;
+ unsigned long flags;
+ int min_bufs;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc_prepare_addr(ctx, &buf->vb.vb2_buf, &ctx->d_frame, &buf->paddr);
+
+ if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) &&
+ !test_bit(ST_CAPT_STREAM, &fimc->state) &&
+ vid_cap->active_buf_cnt < FIMC_MAX_OUT_BUFS) {
+ /* Setup the buffer directly for processing. */
+ int buf_id = (vid_cap->reqbufs_count == 1) ? -1 :
+ vid_cap->buf_index;
+
+ fimc_hw_set_output_addr(fimc, &buf->paddr, buf_id);
+ buf->index = vid_cap->buf_index;
+ fimc_active_queue_add(vid_cap, buf);
+
+ if (++vid_cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ vid_cap->buf_index = 0;
+ } else {
+ fimc_pending_queue_add(vid_cap, buf);
+ }
+
+ min_bufs = vid_cap->reqbufs_count > 1 ? 2 : 1;
+
+
+ if (vb2_is_streaming(&vid_cap->vbq) &&
+ vid_cap->active_buf_cnt >= min_bufs &&
+ !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
+ int ret;
+
+ fimc_activate_capture(ctx);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
+ return;
+
+ ret = fimc_pipeline_call(ve, set_stream, 1);
+ if (ret < 0)
+ v4l2_err(&ve->vdev, "stream on failed: %d\n", ret);
+ return;
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static const struct vb2_ops fimc_capture_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static int fimc_capture_set_default_format(struct fimc_dev *fimc);
+
+static int fimc_capture_open(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct exynos_video_entity *ve = &vc->ve;
+ int ret = -EBUSY;
+
+ dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+ mutex_lock(&fimc->lock);
+
+ if (fimc_m2m_active(fimc))
+ goto unlock;
+
+ set_bit(ST_CAPT_BUSY, &fimc->state);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ goto unlock;
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ goto unlock;
+ }
+
+ if (v4l2_fh_is_singular_file(file)) {
+ fimc_md_graph_lock(ve);
+
+ ret = fimc_pipeline_call(ve, open, &ve->vdev.entity, true);
+
+ if (ret == 0 && vc->user_subdev_api && vc->inh_sensor_ctrls) {
+ /*
+ * Recreate controls of the the video node to drop
+ * any controls inherited from the sensor subdev.
+ */
+ fimc_ctrls_delete(vc->ctx);
+
+ ret = fimc_ctrls_create(vc->ctx);
+ if (ret == 0)
+ vc->inh_sensor_ctrls = false;
+ }
+ if (ret == 0)
+ ve->vdev.entity.use_count++;
+
+ fimc_md_graph_unlock(ve);
+
+ if (ret == 0)
+ ret = fimc_capture_set_default_format(fimc);
+
+ if (ret < 0) {
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ v4l2_fh_release(file);
+ }
+ }
+unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_capture_release(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ bool close = v4l2_fh_is_singular_file(file);
+ int ret;
+
+ dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+ mutex_lock(&fimc->lock);
+
+ if (close && vc->streaming) {
+ media_pipeline_stop(&vc->ve.vdev.entity);
+ vc->streaming = false;
+ }
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (close) {
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ fimc_pipeline_call(&vc->ve, close);
+ clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ fimc_md_graph_lock(&vc->ve);
+ vc->ve.vdev.entity.use_count--;
+ fimc_md_graph_unlock(&vc->ve);
+ }
+
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations fimc_capture_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_capture_open,
+ .release = fimc_capture_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * Format and crop negotiation helpers
+ */
+
+static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
+ u32 *width, u32 *height,
+ u32 *code, u32 *fourcc, int pad)
+{
+ bool rotation = ctx->rotation == 90 || ctx->rotation == 270;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ const struct fimc_variant *var = fimc->variant;
+ const struct fimc_pix_limit *pl = var->pix_limit;
+ struct fimc_frame *dst = &ctx->d_frame;
+ u32 depth, min_w, max_w, min_h, align_h = 3;
+ u32 mask = FMT_FLAGS_CAM;
+ struct fimc_fmt *ffmt;
+
+ /* Conversion from/to JPEG or User Defined format is not supported */
+ if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE &&
+ fimc_fmt_is_user_defined(ctx->s_frame.fmt->color))
+ *code = ctx->s_frame.fmt->mbus_code;
+
+ if (fourcc && *fourcc != V4L2_PIX_FMT_JPEG && pad == FIMC_SD_PAD_SOURCE)
+ mask |= FMT_FLAGS_M2M;
+
+ if (pad == FIMC_SD_PAD_SINK_FIFO)
+ mask = FMT_FLAGS_WRITEBACK;
+
+ ffmt = fimc_find_format(fourcc, code, mask, 0);
+ if (WARN_ON(!ffmt))
+ return NULL;
+
+ if (code)
+ *code = ffmt->mbus_code;
+ if (fourcc)
+ *fourcc = ffmt->fourcc;
+
+ if (pad != FIMC_SD_PAD_SOURCE) {
+ max_w = fimc_fmt_is_user_defined(ffmt->color) ?
+ pl->scaler_dis_w : pl->scaler_en_w;
+ /* Apply the camera input interface pixel constraints */
+ v4l_bound_align_image(width, max_t(u32, *width, 32), max_w, 4,
+ height, max_t(u32, *height, 32),
+ FIMC_CAMIF_MAX_HEIGHT,
+ fimc_fmt_is_user_defined(ffmt->color) ?
+ 3 : 1,
+ 0);
+ return ffmt;
+ }
+ /* Can't scale or crop in transparent (JPEG) transfer mode */
+ if (fimc_fmt_is_user_defined(ffmt->color)) {
+ *width = ctx->s_frame.f_width;
+ *height = ctx->s_frame.f_height;
+ return ffmt;
+ }
+ /* Apply the scaler and the output DMA constraints */
+ max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
+ if (ctx->state & FIMC_COMPOSE) {
+ min_w = dst->offs_h + dst->width;
+ min_h = dst->offs_v + dst->height;
+ } else {
+ min_w = var->min_out_pixsize;
+ min_h = var->min_out_pixsize;
+ }
+ if (var->min_vsize_align == 1 && !rotation)
+ align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
+
+ depth = fimc_get_format_depth(ffmt);
+ v4l_bound_align_image(width, min_w, max_w,
+ ffs(var->min_out_pixsize) - 1,
+ height, min_h, FIMC_CAMIF_MAX_HEIGHT,
+ align_h,
+ 64/(ALIGN(depth, 8)));
+
+ dbg("pad%d: code: 0x%x, %dx%d. dst fmt: %dx%d",
+ pad, code ? *code : 0, *width, *height,
+ dst->f_width, dst->f_height);
+
+ return ffmt;
+}
+
+static void fimc_capture_try_selection(struct fimc_ctx *ctx,
+ struct v4l2_rect *r,
+ int target)
+{
+ bool rotate = ctx->rotation == 90 || ctx->rotation == 270;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ const struct fimc_variant *var = fimc->variant;
+ const struct fimc_pix_limit *pl = var->pix_limit;
+ struct fimc_frame *sink = &ctx->s_frame;
+ u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
+ u32 align_sz = 0, align_h = 4;
+ u32 max_sc_h, max_sc_v;
+
+ /* In JPEG transparent transfer mode cropping is not supported */
+ if (fimc_fmt_is_user_defined(ctx->d_frame.fmt->color)) {
+ r->width = sink->f_width;
+ r->height = sink->f_height;
+ r->left = r->top = 0;
+ return;
+ }
+ if (target == V4L2_SEL_TGT_COMPOSE) {
+ u32 tmp_min_h = ffs(sink->width) - 3;
+ u32 tmp_min_v = ffs(sink->height) - 1;
+
+ if (ctx->rotation != 90 && ctx->rotation != 270)
+ align_h = 1;
+ max_sc_h = min(SCALER_MAX_HRATIO, 1 << tmp_min_h);
+ max_sc_v = min(SCALER_MAX_VRATIO, 1 << tmp_min_v);
+ min_sz = var->min_out_pixsize;
+ } else {
+ u32 depth = fimc_get_format_depth(sink->fmt);
+ align_sz = 64/ALIGN(depth, 8);
+ min_sz = var->min_inp_pixsize;
+ min_w = min_h = min_sz;
+ max_sc_h = max_sc_v = 1;
+ }
+ /*
+ * For the compose rectangle the following constraints must be met:
+ * - it must fit in the sink pad format rectangle (f_width/f_height);
+ * - maximum downscaling ratio is 64;
+ * - maximum crop size depends if the rotator is used or not;
+ * - the sink pad format width/height must be 4 multiple of the
+ * prescaler ratios determined by sink pad size and source pad crop,
+ * the prescaler ratio is returned by fimc_get_scaler_factor().
+ */
+ max_w = min_t(u32,
+ rotate ? pl->out_rot_en_w : pl->out_rot_dis_w,
+ rotate ? sink->f_height : sink->f_width);
+ max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height);
+
+ if (target == V4L2_SEL_TGT_COMPOSE) {
+ min_w = min_t(u32, max_w, sink->f_width / max_sc_h);
+ min_h = min_t(u32, max_h, sink->f_height / max_sc_v);
+ if (rotate) {
+ swap(max_sc_h, max_sc_v);
+ swap(min_w, min_h);
+ }
+ }
+ v4l_bound_align_image(&r->width, min_w, max_w, ffs(min_sz) - 1,
+ &r->height, min_h, max_h, align_h,
+ align_sz);
+ /* Adjust left/top if crop/compose rectangle is out of bounds */
+ r->left = clamp_t(u32, r->left, 0, sink->f_width - r->width);
+ r->top = clamp_t(u32, r->top, 0, sink->f_height - r->height);
+ r->left = round_down(r->left, var->hor_offs_align);
+
+ dbg("target %#x: (%d,%d)/%dx%d, sink fmt: %dx%d",
+ target, r->left, r->top, r->width, r->height,
+ sink->f_width, sink->f_height);
+}
+
+/*
+ * The video node ioctl operations
+ */
+static int fimc_cap_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ __fimc_vidioc_querycap(&fimc->pdev->dev, cap, V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE);
+ return 0;
+}
+
+static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM | FMT_FLAGS_M2M,
+ f->index);
+ if (!fmt)
+ return -EINVAL;
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ if (fmt->fourcc == MEDIA_BUS_FMT_JPEG_1X8)
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
+ return 0;
+}
+
+static struct media_entity *fimc_pipeline_get_head(struct media_entity *me)
+{
+ struct media_pad *pad = &me->pads[0];
+
+ while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) {
+ pad = media_entity_remote_pad(pad);
+ if (!pad)
+ break;
+ me = pad->entity;
+ pad = &me->pads[0];
+ }
+
+ return me;
+}
+
+/**
+ * fimc_pipeline_try_format - negotiate and/or set formats at pipeline
+ * elements
+ * @ctx: FIMC capture context
+ * @tfmt: media bus format to try/set on subdevs
+ * @fmt_id: fimc pixel format id corresponding to returned @tfmt (output)
+ * @set: true to set format on subdevs, false to try only
+ */
+static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
+ struct v4l2_mbus_framefmt *tfmt,
+ struct fimc_fmt **fmt_id,
+ bool set)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_pipeline *p = to_fimc_pipeline(fimc->vid_cap.ve.pipe);
+ struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
+ struct v4l2_subdev_format sfmt;
+ struct v4l2_mbus_framefmt *mf = &sfmt.format;
+ struct media_entity *me;
+ struct fimc_fmt *ffmt;
+ struct media_pad *pad;
+ int ret, i = 1;
+ u32 fcc;
+
+ if (WARN_ON(!sd || !tfmt))
+ return -EINVAL;
+
+ memset(&sfmt, 0, sizeof(sfmt));
+ sfmt.format = *tfmt;
+ sfmt.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE : V4L2_SUBDEV_FORMAT_TRY;
+
+ me = fimc_pipeline_get_head(&sd->entity);
+
+ while (1) {
+ ffmt = fimc_find_format(NULL, mf->code != 0 ? &mf->code : NULL,
+ FMT_FLAGS_CAM, i++);
+ if (ffmt == NULL) {
+ /*
+ * Notify user-space if common pixel code for
+ * host and sensor does not exist.
+ */
+ return -EINVAL;
+ }
+ mf->code = tfmt->code = ffmt->mbus_code;
+
+ /* set format on all pipeline subdevs */
+ while (me != &fimc->vid_cap.subdev.entity) {
+ sd = media_entity_to_v4l2_subdev(me);
+
+ sfmt.pad = 0;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt);
+ if (ret)
+ return ret;
+
+ if (me->pads[0].flags & MEDIA_PAD_FL_SINK) {
+ sfmt.pad = me->num_pads - 1;
+ mf->code = tfmt->code;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL,
+ &sfmt);
+ if (ret)
+ return ret;
+ }
+
+ pad = media_entity_remote_pad(&me->pads[sfmt.pad]);
+ if (!pad)
+ return -EINVAL;
+ me = pad->entity;
+ }
+
+ if (mf->code != tfmt->code)
+ continue;
+
+ fcc = ffmt->fourcc;
+ tfmt->width = mf->width;
+ tfmt->height = mf->height;
+ ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height,
+ NULL, &fcc, FIMC_SD_PAD_SINK_CAM);
+ ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height,
+ NULL, &fcc, FIMC_SD_PAD_SOURCE);
+ if (ffmt && ffmt->mbus_code)
+ mf->code = ffmt->mbus_code;
+ if (mf->width != tfmt->width || mf->height != tfmt->height)
+ continue;
+ tfmt->code = mf->code;
+ break;
+ }
+
+ if (fmt_id && ffmt)
+ *fmt_id = ffmt;
+ *tfmt = *mf;
+
+ return 0;
+}
+
+/**
+ * fimc_get_sensor_frame_desc - query the sensor for media bus frame parameters
+ * @sensor: pointer to the sensor subdev
+ * @plane_fmt: provides plane sizes corresponding to the frame layout entries
+ * @num_planes: number of planes
+ * @try: true to set the frame parameters, false to query only
+ *
+ * This function is used by this driver only for compressed/blob data formats.
+ */
+static int fimc_get_sensor_frame_desc(struct v4l2_subdev *sensor,
+ struct v4l2_plane_pix_format *plane_fmt,
+ unsigned int num_planes, bool try)
+{
+ struct v4l2_mbus_frame_desc fd;
+ int i, ret;
+ int pad;
+
+ for (i = 0; i < num_planes; i++)
+ fd.entry[i].length = plane_fmt[i].sizeimage;
+
+ pad = sensor->entity.num_pads - 1;
+ if (try)
+ ret = v4l2_subdev_call(sensor, pad, set_frame_desc, pad, &fd);
+ else
+ ret = v4l2_subdev_call(sensor, pad, get_frame_desc, pad, &fd);
+
+ if (ret < 0)
+ return ret;
+
+ if (num_planes != fd.num_entries)
+ return -EINVAL;
+
+ for (i = 0; i < num_planes; i++)
+ plane_fmt[i].sizeimage = fd.entry[i].length;
+
+ if (fd.entry[0].length > FIMC_MAX_JPEG_BUF_SIZE) {
+ v4l2_err(sensor->v4l2_dev, "Unsupported buffer size: %u\n",
+ fd.entry[0].length);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ __fimc_get_format(&fimc->vid_cap.ctx->d_frame, f);
+ return 0;
+}
+
+/*
+ * Try or set format on the fimc.X.capture video node and additionally
+ * on the whole pipeline if @try is false.
+ * Locking: the caller must _not_ hold the graph mutex.
+ */
+static int __video_try_or_set_format(struct fimc_dev *fimc,
+ struct v4l2_format *f, bool try,
+ struct fimc_fmt **inp_fmt,
+ struct fimc_fmt **out_fmt)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct exynos_video_entity *ve = &vc->ve;
+ struct fimc_ctx *ctx = vc->ctx;
+ unsigned int width = 0, height = 0;
+ int ret = 0;
+
+ /* Pre-configure format at the camera input interface, for JPEG only */
+ if (fimc_jpeg_fourcc(pix->pixelformat)) {
+ fimc_capture_try_format(ctx, &pix->width, &pix->height,
+ NULL, &pix->pixelformat,
+ FIMC_SD_PAD_SINK_CAM);
+ if (try) {
+ width = pix->width;
+ height = pix->height;
+ } else {
+ ctx->s_frame.f_width = pix->width;
+ ctx->s_frame.f_height = pix->height;
+ }
+ }
+
+ /* Try the format at the scaler and the DMA output */
+ *out_fmt = fimc_capture_try_format(ctx, &pix->width, &pix->height,
+ NULL, &pix->pixelformat,
+ FIMC_SD_PAD_SOURCE);
+ if (*out_fmt == NULL)
+ return -EINVAL;
+
+ /* Restore image width/height for JPEG (no resizing supported). */
+ if (try && fimc_jpeg_fourcc(pix->pixelformat)) {
+ pix->width = width;
+ pix->height = height;
+ }
+
+ /* Try to match format at the host and the sensor */
+ if (!vc->user_subdev_api) {
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = try ? &mbus_fmt : &fimc->vid_cap.ci_fmt;
+
+ mf->code = (*out_fmt)->mbus_code;
+ mf->width = pix->width;
+ mf->height = pix->height;
+
+ fimc_md_graph_lock(ve);
+ ret = fimc_pipeline_try_format(ctx, mf, inp_fmt, try);
+ fimc_md_graph_unlock(ve);
+
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf->width;
+ pix->height = mf->height;
+ }
+
+ fimc_adjust_mplane_format(*out_fmt, pix->width, pix->height, pix);
+
+ if ((*out_fmt)->flags & FMT_FLAGS_COMPRESSED) {
+ struct v4l2_subdev *sensor;
+
+ fimc_md_graph_lock(ve);
+
+ sensor = __fimc_md_get_subdev(ve->pipe, IDX_SENSOR);
+ if (sensor)
+ fimc_get_sensor_frame_desc(sensor, pix->plane_fmt,
+ (*out_fmt)->memplanes, try);
+ else
+ ret = -EPIPE;
+
+ fimc_md_graph_unlock(ve);
+ }
+
+ return ret;
+}
+
+static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_fmt *out_fmt = NULL, *inp_fmt = NULL;
+
+ return __video_try_or_set_format(fimc, f, true, &inp_fmt, &out_fmt);
+}
+
+static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx,
+ enum fimc_color_fmt color)
+{
+ bool jpeg = fimc_fmt_is_user_defined(color);
+
+ ctx->scaler.enabled = !jpeg;
+ fimc_ctrls_activate(ctx, !jpeg);
+
+ if (jpeg)
+ set_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
+ else
+ clear_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
+}
+
+static int __fimc_capture_set_format(struct fimc_dev *fimc,
+ struct v4l2_format *f)
+{
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct fimc_ctx *ctx = vc->ctx;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct fimc_frame *ff = &ctx->d_frame;
+ struct fimc_fmt *inp_fmt = NULL;
+ int ret, i;
+
+ if (vb2_is_busy(&fimc->vid_cap.vbq))
+ return -EBUSY;
+
+ ret = __video_try_or_set_format(fimc, f, false, &inp_fmt, &ff->fmt);
+ if (ret < 0)
+ return ret;
+
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
+ for (i = 0; i < ff->fmt->memplanes; i++) {
+ ff->bytesperline[i] = pix->plane_fmt[i].bytesperline;
+ ff->payload[i] = pix->plane_fmt[i].sizeimage;
+ }
+
+ set_frame_bounds(ff, pix->width, pix->height);
+ /* Reset the composition rectangle if not yet configured */
+ if (!(ctx->state & FIMC_COMPOSE))
+ set_frame_crop(ff, 0, 0, pix->width, pix->height);
+
+ fimc_capture_mark_jpeg_xfer(ctx, ff->fmt->color);
+
+ /* Reset cropping and set format at the camera interface input */
+ if (!vc->user_subdev_api) {
+ ctx->s_frame.fmt = inp_fmt;
+ set_frame_bounds(&ctx->s_frame, pix->width, pix->height);
+ set_frame_crop(&ctx->s_frame, 0, 0, pix->width, pix->height);
+ }
+
+ return ret;
+}
+
+static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ return __fimc_capture_set_format(fimc, f);
+}
+
+static int fimc_cap_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct exynos_video_entity *ve = &fimc->vid_cap.ve;
+ struct v4l2_subdev *sd;
+
+ if (i->index != 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ fimc_md_graph_lock(ve);
+ sd = __fimc_md_get_subdev(ve->pipe, IDX_SENSOR);
+ fimc_md_graph_unlock(ve);
+
+ if (sd)
+ strlcpy(i->name, sd->name, sizeof(i->name));
+
+ return 0;
+}
+
+static int fimc_cap_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return i == 0 ? i : -EINVAL;
+}
+
+static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+/**
+ * fimc_pipeline_validate - check for formats inconsistencies
+ * between source and sink pad of each link
+ * @fimc: the FIMC device this context applies to
+ *
+ * Return 0 if all formats match or -EPIPE otherwise.
+ */
+static int fimc_pipeline_validate(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct v4l2_subdev *sd = &vc->subdev;
+ struct fimc_pipeline *p = to_fimc_pipeline(vc->ve.pipe);
+ struct media_pad *sink_pad, *src_pad;
+ int i, ret;
+
+ while (1) {
+ /*
+ * Find current entity sink pad and any remote sink pad linked
+ * to it. We stop if there is no sink pad in current entity or
+ * it is not linked to any other remote entity.
+ */
+ src_pad = NULL;
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ struct media_pad *p = &sd->entity.pads[i];
+
+ if (p->flags & MEDIA_PAD_FL_SINK) {
+ sink_pad = p;
+ src_pad = media_entity_remote_pad(sink_pad);
+ if (src_pad)
+ break;
+ }
+ }
+
+ if (!src_pad || !is_media_entity_v4l2_subdev(src_pad->entity))
+ break;
+
+ /* Don't call FIMC subdev operation to avoid nested locking */
+ if (sd == &vc->subdev) {
+ struct fimc_frame *ff = &vc->ctx->s_frame;
+ sink_fmt.format.width = ff->f_width;
+ sink_fmt.format.height = ff->f_height;
+ sink_fmt.format.code = ff->fmt ? ff->fmt->mbus_code : 0;
+ } else {
+ sink_fmt.pad = sink_pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+ }
+
+ /* Retrieve format at the source pad */
+ sd = media_entity_to_v4l2_subdev(src_pad->entity);
+ src_fmt.pad = src_pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+
+ if (sd == p->subdevs[IDX_SENSOR] &&
+ fimc_user_defined_mbus_fmt(src_fmt.format.code)) {
+ struct v4l2_plane_pix_format plane_fmt[FIMC_MAX_PLANES];
+ struct fimc_frame *frame = &vc->ctx->d_frame;
+ unsigned int i;
+
+ ret = fimc_get_sensor_frame_desc(sd, plane_fmt,
+ frame->fmt->memplanes,
+ false);
+ if (ret < 0)
+ return -EPIPE;
+
+ for (i = 0; i < frame->fmt->memplanes; i++)
+ if (frame->payload[i] < plane_fmt[i].sizeimage)
+ return -EPIPE;
+ }
+ }
+ return 0;
+}
+
+static int fimc_cap_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct media_entity *entity = &vc->ve.vdev.entity;
+ struct fimc_source_info *si = NULL;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ if (fimc_capture_active(fimc))
+ return -EBUSY;
+
+ ret = media_pipeline_start(entity, &vc->ve.pipe->mp);
+ if (ret < 0)
+ return ret;
+
+ sd = __fimc_md_get_subdev(vc->ve.pipe, IDX_SENSOR);
+ if (sd)
+ si = v4l2_get_subdev_hostdata(sd);
+
+ if (si == NULL) {
+ ret = -EPIPE;
+ goto err_p_stop;
+ }
+ /*
+ * Save configuration data related to currently attached image
+ * sensor or other data source, e.g. FIMC-IS.
+ */
+ vc->source_config = *si;
+
+ if (vc->input == GRP_ID_FIMC_IS)
+ vc->source_config.fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK;
+
+ if (vc->user_subdev_api) {
+ ret = fimc_pipeline_validate(fimc);
+ if (ret < 0)
+ goto err_p_stop;
+ }
+
+ ret = vb2_ioctl_streamon(file, priv, type);
+ if (!ret) {
+ vc->streaming = true;
+ return ret;
+ }
+
+err_p_stop:
+ media_pipeline_stop(entity);
+ return ret;
+}
+
+static int fimc_cap_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ int ret;
+
+ ret = vb2_ioctl_streamoff(file, priv, type);
+ if (ret < 0)
+ return ret;
+
+ media_pipeline_stop(&vc->ve.vdev.entity);
+ vc->streaming = false;
+ return 0;
+}
+
+static int fimc_cap_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ int ret;
+
+ ret = vb2_ioctl_reqbufs(file, priv, reqbufs);
+
+ if (!ret)
+ fimc->vid_cap.reqbufs_count = reqbufs->count;
+
+ return ret;
+}
+
+static int fimc_cap_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *f = &ctx->s_frame;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ f = &ctx->d_frame;
+ /* fall through */
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = f->o_width;
+ s->r.height = f->o_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ f = &ctx->d_frame;
+ /* fall through */
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = f->offs_h;
+ s->r.top = f->offs_v;
+ s->r.width = f->width;
+ s->r.height = f->height;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int fimc_cap_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct v4l2_rect rect = s->r;
+ struct fimc_frame *f;
+ unsigned long flags;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (s->target == V4L2_SEL_TGT_COMPOSE)
+ f = &ctx->d_frame;
+ else if (s->target == V4L2_SEL_TGT_CROP)
+ f = &ctx->s_frame;
+ else
+ return -EINVAL;
+
+ fimc_capture_try_selection(ctx, &rect, s->target);
+
+ if (s->flags & V4L2_SEL_FLAG_LE &&
+ !enclosed_rectangle(&rect, &s->r))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE &&
+ !enclosed_rectangle(&s->r, &rect))
+ return -ERANGE;
+
+ s->r = rect;
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_frame_crop(f, s->r.left, s->r.top, s->r.width,
+ s->r.height);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
+ .vidioc_querycap = fimc_cap_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_cap_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_cap_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_cap_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_cap_g_fmt_mplane,
+
+ .vidioc_reqbufs = fimc_cap_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+
+ .vidioc_streamon = fimc_cap_streamon,
+ .vidioc_streamoff = fimc_cap_streamoff,
+
+ .vidioc_g_selection = fimc_cap_g_selection,
+ .vidioc_s_selection = fimc_cap_s_selection,
+
+ .vidioc_enum_input = fimc_cap_enum_input,
+ .vidioc_s_input = fimc_cap_s_input,
+ .vidioc_g_input = fimc_cap_g_input,
+};
+
+/* Capture subdev media entity operations */
+static int fimc_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct v4l2_subdev *sensor;
+
+ if (!is_media_entity_v4l2_subdev(remote->entity))
+ return -EINVAL;
+
+ if (WARN_ON(fimc == NULL))
+ return 0;
+
+ dbg("%s --> %s, flags: 0x%x. input: 0x%x",
+ local->entity->name, remote->entity->name, flags,
+ fimc->vid_cap.input);
+
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ fimc->vid_cap.input = 0;
+ return 0;
+ }
+
+ if (vc->input != 0)
+ return -EBUSY;
+
+ vc->input = sd->grp_id;
+
+ if (vc->user_subdev_api || vc->inh_sensor_ctrls)
+ return 0;
+
+ /* Inherit V4L2 controls from the image sensor subdev. */
+ sensor = fimc_find_remote_sensor(&vc->subdev.entity);
+ if (sensor == NULL)
+ return 0;
+
+ return v4l2_ctrl_add_handler(&vc->ctx->ctrls.handler,
+ sensor->ctrl_handler, NULL);
+}
+
+static const struct media_entity_operations fimc_sd_media_ops = {
+ .link_setup = fimc_link_setup,
+};
+
+/**
+ * fimc_sensor_notify - v4l2_device notification from a sensor subdev
+ * @sd: pointer to a subdev generating the notification
+ * @notification: the notification type, must be S5P_FIMC_TX_END_NOTIFY
+ * @arg: pointer to an u32 type integer that stores the frame payload value
+ *
+ * The End Of Frame notification sent by sensor subdev in its still capture
+ * mode. If there is only a single VSYNC generated by the sensor at the
+ * beginning of a frame transmission, FIMC does not issue the LastIrq
+ * (end of frame) interrupt. And this notification is used to complete the
+ * frame capture and returning a buffer to user-space. Subdev drivers should
+ * call this notification from their last 'End of frame capture' interrupt.
+ */
+void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg)
+{
+ struct fimc_source_info *si;
+ struct fimc_vid_buffer *buf;
+ struct fimc_md *fmd;
+ struct fimc_dev *fimc;
+ unsigned long flags;
+
+ if (sd == NULL)
+ return;
+
+ si = v4l2_get_subdev_hostdata(sd);
+ fmd = entity_to_fimc_mdev(&sd->entity);
+
+ spin_lock_irqsave(&fmd->slock, flags);
+
+ fimc = si ? source_to_sensor_info(si)->host : NULL;
+
+ if (fimc && arg && notification == S5P_FIMC_TX_END_NOTIFY &&
+ test_bit(ST_CAPT_PEND, &fimc->state)) {
+ unsigned long irq_flags;
+ spin_lock_irqsave(&fimc->slock, irq_flags);
+ if (!list_empty(&fimc->vid_cap.active_buf_q)) {
+ buf = list_entry(fimc->vid_cap.active_buf_q.next,
+ struct fimc_vid_buffer, list);
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0,
+ *((u32 *)arg));
+ }
+ fimc_capture_irq_handler(fimc, 1);
+ fimc_deactivate_capture(fimc);
+ spin_unlock_irqrestore(&fimc->slock, irq_flags);
+ }
+ spin_unlock_irqrestore(&fmd->slock, flags);
+}
+
+static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, code->index);
+ if (!fmt)
+ return -EINVAL;
+ code->code = fmt->mbus_code;
+ return 0;
+}
+
+static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *ff = &ctx->s_frame;
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mf = &fmt->format;
+ mutex_lock(&fimc->lock);
+
+ switch (fmt->pad) {
+ case FIMC_SD_PAD_SOURCE:
+ if (!WARN_ON(ff->fmt == NULL))
+ mf->code = ff->fmt->mbus_code;
+ /* Sink pads crop rectangle size */
+ mf->width = ff->width;
+ mf->height = ff->height;
+ break;
+ case FIMC_SD_PAD_SINK_FIFO:
+ *mf = fimc->vid_cap.wb_fmt;
+ break;
+ case FIMC_SD_PAD_SINK_CAM:
+ default:
+ *mf = fimc->vid_cap.ci_fmt;
+ break;
+ }
+
+ mutex_unlock(&fimc->lock);
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+ return 0;
+}
+
+static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct fimc_ctx *ctx = vc->ctx;
+ struct fimc_frame *ff;
+ struct fimc_fmt *ffmt;
+
+ dbg("pad%d: code: 0x%x, %dx%d",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ if (fmt->pad == FIMC_SD_PAD_SOURCE && vb2_is_busy(&vc->vbq))
+ return -EBUSY;
+
+ mutex_lock(&fimc->lock);
+ ffmt = fimc_capture_try_format(ctx, &mf->width, &mf->height,
+ &mf->code, NULL, fmt->pad);
+ mutex_unlock(&fimc->lock);
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *mf = fmt->format;
+ return 0;
+ }
+ /* There must be a bug in the driver if this happens */
+ if (WARN_ON(ffmt == NULL))
+ return -EINVAL;
+
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
+ fimc_capture_mark_jpeg_xfer(ctx, ffmt->color);
+ if (fmt->pad == FIMC_SD_PAD_SOURCE) {
+ ff = &ctx->d_frame;
+ /* Sink pads crop rectangle size */
+ mf->width = ctx->s_frame.width;
+ mf->height = ctx->s_frame.height;
+ } else {
+ ff = &ctx->s_frame;
+ }
+
+ mutex_lock(&fimc->lock);
+ set_frame_bounds(ff, mf->width, mf->height);
+
+ if (fmt->pad == FIMC_SD_PAD_SINK_FIFO)
+ vc->wb_fmt = *mf;
+ else if (fmt->pad == FIMC_SD_PAD_SINK_CAM)
+ vc->ci_fmt = *mf;
+
+ ff->fmt = ffmt;
+
+ /* Reset the crop rectangle if required. */
+ if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_COMPOSE)))
+ set_frame_crop(ff, 0, 0, mf->width, mf->height);
+
+ if (fmt->pad != FIMC_SD_PAD_SOURCE)
+ ctx->state &= ~FIMC_COMPOSE;
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *f = &ctx->s_frame;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
+
+ if (sel->pad == FIMC_SD_PAD_SOURCE)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ f = &ctx->d_frame;
+ /* fall through */
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ r->width = f->o_width;
+ r->height = f->o_height;
+ r->left = 0;
+ r->top = 0;
+ mutex_unlock(&fimc->lock);
+ return 0;
+
+ case V4L2_SEL_TGT_CROP:
+ try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ f = &ctx->d_frame;
+ break;
+ default:
+ mutex_unlock(&fimc->lock);
+ return -EINVAL;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *try_sel;
+ } else {
+ r->left = f->offs_h;
+ r->top = f->offs_v;
+ r->width = f->width;
+ r->height = f->height;
+ }
+
+ dbg("target %#x: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
+ sel->pad, r->left, r->top, r->width, r->height,
+ f->f_width, f->f_height);
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_frame *f = &ctx->s_frame;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
+ unsigned long flags;
+
+ if (sel->pad == FIMC_SD_PAD_SOURCE)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
+ fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ f = &ctx->d_frame;
+ break;
+ default:
+ mutex_unlock(&fimc->lock);
+ return -EINVAL;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *try_sel = sel->r;
+ } else {
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_frame_crop(f, r->left, r->top, r->width, r->height);
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ if (sel->target == V4L2_SEL_TGT_COMPOSE)
+ ctx->state |= FIMC_COMPOSE;
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+
+ dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top,
+ r->width, r->height);
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = {
+ .enum_mbus_code = fimc_subdev_enum_mbus_code,
+ .get_selection = fimc_subdev_get_selection,
+ .set_selection = fimc_subdev_set_selection,
+ .get_fmt = fimc_subdev_get_fmt,
+ .set_fmt = fimc_subdev_set_fmt,
+};
+
+static const struct v4l2_subdev_ops fimc_subdev_ops = {
+ .pad = &fimc_subdev_pad_ops,
+};
+
+/* Set default format at the sensor and host interface */
+static int fimc_capture_set_default_format(struct fimc_dev *fimc)
+{
+ struct v4l2_format fmt = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ .fmt.pix_mp = {
+ .width = FIMC_DEFAULT_WIDTH,
+ .height = FIMC_DEFAULT_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ },
+ };
+
+ return __fimc_capture_set_format(fimc, &fmt);
+}
+
+/* fimc->lock must be already initialized */
+static int fimc_register_capture_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev)
+{
+ struct video_device *vfd = &fimc->vid_cap.ve.vdev;
+ struct vb2_queue *q = &fimc->vid_cap.vbq;
+ struct fimc_ctx *ctx;
+ struct fimc_vid_cap *vid_cap;
+ struct fimc_fmt *fmt;
+ int ret = -ENOMEM;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->fimc_dev = fimc;
+ ctx->in_path = FIMC_IO_CAMERA;
+ ctx->out_path = FIMC_IO_DMA;
+ ctx->state = FIMC_CTX_CAP;
+ ctx->s_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
+ ctx->d_frame.fmt = ctx->s_frame.fmt;
+
+ memset(vfd, 0, sizeof(*vfd));
+ snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.capture", fimc->id);
+
+ vfd->fops = &fimc_capture_fops;
+ vfd->ioctl_ops = &fimc_capture_ioctl_ops;
+ vfd->v4l2_dev = v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->queue = q;
+ vfd->lock = &fimc->lock;
+
+ video_set_drvdata(vfd, fimc);
+ vid_cap = &fimc->vid_cap;
+ vid_cap->active_buf_cnt = 0;
+ vid_cap->reqbufs_count = 0;
+ vid_cap->ctx = ctx;
+
+ INIT_LIST_HEAD(&vid_cap->pending_buf_q);
+ INIT_LIST_HEAD(&vid_cap->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = ctx;
+ q->ops = &fimc_capture_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct fimc_vid_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &fimc->lock;
+ q->dev = &fimc->pdev->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto err_free_ctx;
+
+ /* Default format configuration */
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
+ vid_cap->ci_fmt.width = FIMC_DEFAULT_WIDTH;
+ vid_cap->ci_fmt.height = FIMC_DEFAULT_HEIGHT;
+ vid_cap->ci_fmt.code = fmt->mbus_code;
+
+ ctx->s_frame.width = FIMC_DEFAULT_WIDTH;
+ ctx->s_frame.height = FIMC_DEFAULT_HEIGHT;
+ ctx->s_frame.fmt = fmt;
+
+ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_WRITEBACK, 0);
+ vid_cap->wb_fmt = vid_cap->ci_fmt;
+ vid_cap->wb_fmt.code = fmt->mbus_code;
+
+ vid_cap->vd_pad.flags = MEDIA_PAD_FL_SINK;
+ vfd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+ ret = media_entity_pads_init(&vfd->entity, 1, &vid_cap->vd_pad);
+ if (ret)
+ goto err_free_ctx;
+
+ ret = fimc_ctrls_create(ctx);
+ if (ret)
+ goto err_me_cleanup;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_ctrl_free;
+
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+
+ vfd->ctrl_handler = &ctx->ctrls.handler;
+ return 0;
+
+err_ctrl_free:
+ fimc_ctrls_delete(ctx);
+err_me_cleanup:
+ media_entity_cleanup(&vfd->entity);
+err_free_ctx:
+ kfree(ctx);
+ return ret;
+}
+
+static int fimc_capture_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (fimc == NULL)
+ return -ENXIO;
+
+ ret = fimc_register_m2m_device(fimc, sd->v4l2_dev);
+ if (ret)
+ return ret;
+
+ fimc->vid_cap.ve.pipe = v4l2_get_subdev_hostdata(sd);
+
+ ret = fimc_register_capture_device(fimc, sd->v4l2_dev);
+ if (ret) {
+ fimc_unregister_m2m_device(fimc);
+ fimc->vid_cap.ve.pipe = NULL;
+ }
+
+ return ret;
+}
+
+static void fimc_capture_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ struct video_device *vdev;
+
+ if (fimc == NULL)
+ return;
+
+ mutex_lock(&fimc->lock);
+
+ fimc_unregister_m2m_device(fimc);
+ vdev = &fimc->vid_cap.ve.vdev;
+
+ if (video_is_registered(vdev)) {
+ video_unregister_device(vdev);
+ media_entity_cleanup(&vdev->entity);
+ fimc_ctrls_delete(fimc->vid_cap.ctx);
+ fimc->vid_cap.ve.pipe = NULL;
+ }
+ kfree(fimc->vid_cap.ctx);
+ fimc->vid_cap.ctx = NULL;
+
+ mutex_unlock(&fimc->lock);
+}
+
+static const struct v4l2_subdev_internal_ops fimc_capture_sd_internal_ops = {
+ .registered = fimc_capture_subdev_registered,
+ .unregistered = fimc_capture_subdev_unregistered,
+};
+
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &fimc_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->id);
+
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK_CAM].flags = MEDIA_PAD_FL_SINK;
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK_FIFO].flags = MEDIA_PAD_FL_SINK;
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, FIMC_SD_PADS_NUM,
+ fimc->vid_cap.sd_pads);
+ if (ret)
+ return ret;
+
+ sd->entity.ops = &fimc_sd_media_ops;
+ sd->internal_ops = &fimc_capture_sd_internal_ops;
+ v4l2_set_subdevdata(sd, fimc);
+ return 0;
+}
+
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_set_subdevdata(sd, NULL);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
new file mode 100644
index 000000000..d8d8c9902
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -0,0 +1,1261 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series FIMC (CAMIF) driver
+ *
+ * Copyright (C) 2010-2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/mfd/syscon.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "media-dev.h"
+
+static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
+ "sclk_fimc", "fimc"
+};
+
+static struct fimc_fmt fimc_formats[] = {
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = { 16 },
+ .color = FIMC_FMT_RGB565,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "BGR666",
+ .fourcc = V4L2_PIX_FMT_BGR666,
+ .depth = { 32 },
+ .color = FIMC_FMT_RGB666,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "BGRA8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .depth = { 32 },
+ .color = FIMC_FMT_RGB888,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M | FMT_HAS_ALPHA,
+ }, {
+ .name = "ARGB1555",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .depth = { 16 },
+ .color = FIMC_FMT_RGB555,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
+ }, {
+ .name = "ARGB4444",
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .depth = { 16 },
+ .color = FIMC_FMT_RGB444,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
+ }, {
+ .name = "YUV 4:4:4",
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .flags = FMT_FLAGS_WRITEBACK,
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = { 16 },
+ .color = FIMC_FMT_CBYCRY422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = { 16 },
+ .color = FIMC_FMT_CRYCBY422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCRYCB422,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .colplanes = 3,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCRYCB422,
+ .memplanes = 1,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = { 12 },
+ .color = FIMC_FMT_YCBCR420,
+ .memplanes = 1,
+ .colplanes = 3,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = { 12 },
+ .color = FIMC_FMT_YCBCR420,
+ .memplanes = 1,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .color = FIMC_FMT_YCBCR420,
+ .depth = { 8, 4 },
+ .memplanes = 2,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .color = FIMC_FMT_YCBCR420,
+ .depth = { 8, 2, 2 },
+ .memplanes = 3,
+ .colplanes = 3,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "YUV 4:2:0 non-contig. 2p, tiled",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .color = FIMC_FMT_YCBCR420,
+ .depth = { 8, 4 },
+ .memplanes = 2,
+ .colplanes = 2,
+ .flags = FMT_FLAGS_M2M,
+ }, {
+ .name = "JPEG encoded data",
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .color = FIMC_FMT_JPEG,
+ .depth = { 8 },
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
+ .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
+ }, {
+ .name = "S5C73MX interleaved UYVY/JPEG",
+ .fourcc = V4L2_PIX_FMT_S5C_UYVY_JPG,
+ .color = FIMC_FMT_YUYV_JPEG,
+ .depth = { 8 },
+ .memplanes = 2,
+ .colplanes = 1,
+ .mdataplanes = 0x2, /* plane 1 holds frame meta data */
+ .mbus_code = MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8,
+ .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
+ },
+};
+
+struct fimc_fmt *fimc_get_format(unsigned int index)
+{
+ if (index >= ARRAY_SIZE(fimc_formats))
+ return NULL;
+
+ return &fimc_formats[index];
+}
+
+int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
+ int dw, int dh, int rotation)
+{
+ if (rotation == 90 || rotation == 270)
+ swap(dw, dh);
+
+ if (!ctx->scaler.enabled)
+ return (sw == dw && sh == dh) ? 0 : -EINVAL;
+
+ if ((sw >= SCALER_MAX_HRATIO * dw) || (sh >= SCALER_MAX_VRATIO * dh))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+ u32 sh = 6;
+
+ if (src >= 64 * tar)
+ return -EINVAL;
+
+ while (sh--) {
+ u32 tmp = 1 << sh;
+ if (src >= tar * tmp) {
+ *shift = sh, *ratio = tmp;
+ return 0;
+ }
+ }
+ *shift = 0, *ratio = 1;
+ return 0;
+}
+
+int fimc_set_scaler_info(struct fimc_ctx *ctx)
+{
+ const struct fimc_variant *variant = ctx->fimc_dev->variant;
+ struct device *dev = &ctx->fimc_dev->pdev->dev;
+ struct fimc_scaler *sc = &ctx->scaler;
+ struct fimc_frame *s_frame = &ctx->s_frame;
+ struct fimc_frame *d_frame = &ctx->d_frame;
+ int tx, ty, sx, sy;
+ int ret;
+
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ ty = d_frame->width;
+ tx = d_frame->height;
+ } else {
+ tx = d_frame->width;
+ ty = d_frame->height;
+ }
+ if (tx <= 0 || ty <= 0) {
+ dev_err(dev, "Invalid target size: %dx%d\n", tx, ty);
+ return -EINVAL;
+ }
+
+ sx = s_frame->width;
+ sy = s_frame->height;
+ if (sx <= 0 || sy <= 0) {
+ dev_err(dev, "Invalid source size: %dx%d\n", sx, sy);
+ return -EINVAL;
+ }
+ sc->real_width = sx;
+ sc->real_height = sy;
+
+ ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
+ if (ret)
+ return ret;
+
+ ret = fimc_get_scaler_factor(sy, ty, &sc->pre_vratio, &sc->vfactor);
+ if (ret)
+ return ret;
+
+ sc->pre_dst_width = sx / sc->pre_hratio;
+ sc->pre_dst_height = sy / sc->pre_vratio;
+
+ if (variant->has_mainscaler_ext) {
+ sc->main_hratio = (sx << 14) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 14) / (ty << sc->vfactor);
+ } else {
+ sc->main_hratio = (sx << 8) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 8) / (ty << sc->vfactor);
+
+ }
+
+ sc->scaleup_h = (tx >= sx) ? 1 : 0;
+ sc->scaleup_v = (ty >= sy) ? 1 : 0;
+
+ /* check to see if input and output size/format differ */
+ if (s_frame->fmt->color == d_frame->fmt->color
+ && s_frame->width == d_frame->width
+ && s_frame->height == d_frame->height)
+ sc->copy_mode = 1;
+ else
+ sc->copy_mode = 0;
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *priv)
+{
+ struct fimc_dev *fimc = priv;
+ struct fimc_ctx *ctx;
+
+ fimc_hw_clear_irq(fimc);
+
+ spin_lock(&fimc->slock);
+
+ if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
+ if (test_and_clear_bit(ST_M2M_SUSPENDING, &fimc->state)) {
+ set_bit(ST_M2M_SUSPENDED, &fimc->state);
+ wake_up(&fimc->irq_queue);
+ goto out;
+ }
+ ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
+ if (ctx != NULL) {
+ spin_unlock(&fimc->slock);
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);
+
+ if (ctx->state & FIMC_CTX_SHUT) {
+ ctx->state &= ~FIMC_CTX_SHUT;
+ wake_up(&fimc->irq_queue);
+ }
+ return IRQ_HANDLED;
+ }
+ } else if (test_bit(ST_CAPT_PEND, &fimc->state)) {
+ int last_buf = test_bit(ST_CAPT_JPEG, &fimc->state) &&
+ fimc->vid_cap.reqbufs_count == 1;
+ fimc_capture_irq_handler(fimc, !last_buf);
+ }
+out:
+ spin_unlock(&fimc->slock);
+ return IRQ_HANDLED;
+}
+
+/* The color format (colplanes, memplanes) must be already configured. */
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
+ struct fimc_frame *frame, struct fimc_addr *paddr)
+{
+ int ret = 0;
+ u32 pix_size;
+
+ if (vb == NULL || frame == NULL)
+ return -EINVAL;
+
+ pix_size = frame->width * frame->height;
+
+ dbg("memplanes= %d, colplanes= %d, pix_size= %d",
+ frame->fmt->memplanes, frame->fmt->colplanes, pix_size);
+
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (frame->fmt->memplanes == 1) {
+ switch (frame->fmt->colplanes) {
+ case 1:
+ paddr->cb = 0;
+ paddr->cr = 0;
+ break;
+ case 2:
+ /* decompose Y into Y/Cb */
+ paddr->cb = (u32)(paddr->y + pix_size);
+ paddr->cr = 0;
+ break;
+ case 3:
+ paddr->cb = (u32)(paddr->y + pix_size);
+ /* decompose Y into Y/Cb/Cr */
+ if (FIMC_FMT_YCBCR420 == frame->fmt->color)
+ paddr->cr = (u32)(paddr->cb
+ + (pix_size >> 2));
+ else /* 422 */
+ paddr->cr = (u32)(paddr->cb
+ + (pix_size >> 1));
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (!frame->fmt->mdataplanes) {
+ if (frame->fmt->memplanes >= 2)
+ paddr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
+
+ if (frame->fmt->memplanes == 3)
+ paddr->cr = vb2_dma_contig_plane_dma_addr(vb, 2);
+ }
+
+ dbg("PHYS_ADDR: y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
+ paddr->y, paddr->cb, paddr->cr, ret);
+
+ return ret;
+}
+
+/* Set order for 1 and 2 plane YCBCR 4:2:2 formats. */
+void fimc_set_yuv_order(struct fimc_ctx *ctx)
+{
+ /* The one only mode supported in SoC. */
+ ctx->in_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
+ ctx->out_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
+
+ /* Set order for 1 plane input formats. */
+ switch (ctx->s_frame.fmt->color) {
+ case FIMC_FMT_YCRYCB422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case FIMC_FMT_CBYCRY422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case FIMC_FMT_CRYCBY422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case FIMC_FMT_YCBYCR422:
+ default:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCBYCR;
+ break;
+ }
+ dbg("ctx->in_order_1p= %d", ctx->in_order_1p);
+
+ switch (ctx->d_frame.fmt->color) {
+ case FIMC_FMT_YCRYCB422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case FIMC_FMT_CBYCRY422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case FIMC_FMT_CRYCBY422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case FIMC_FMT_YCBYCR422:
+ default:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ }
+ dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
+}
+
+void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
+{
+ bool pix_hoff = ctx->fimc_dev->drv_data->dma_pix_hoff;
+ u32 i, depth = 0;
+
+ for (i = 0; i < f->fmt->memplanes; i++)
+ depth += f->fmt->depth[i];
+
+ f->dma_offset.y_h = f->offs_h;
+ if (!pix_hoff)
+ f->dma_offset.y_h *= (depth >> 3);
+
+ f->dma_offset.y_v = f->offs_v;
+
+ f->dma_offset.cb_h = f->offs_h;
+ f->dma_offset.cb_v = f->offs_v;
+
+ f->dma_offset.cr_h = f->offs_h;
+ f->dma_offset.cr_v = f->offs_v;
+
+ if (!pix_hoff) {
+ if (f->fmt->colplanes == 3) {
+ f->dma_offset.cb_h >>= 1;
+ f->dma_offset.cr_h >>= 1;
+ }
+ if (f->fmt->color == FIMC_FMT_YCBCR420) {
+ f->dma_offset.cb_v >>= 1;
+ f->dma_offset.cr_v >>= 1;
+ }
+ }
+
+ dbg("in_offset: color= %d, y_h= %d, y_v= %d",
+ f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
+}
+
+static int fimc_set_color_effect(struct fimc_ctx *ctx, enum v4l2_colorfx colorfx)
+{
+ struct fimc_effect *effect = &ctx->effect;
+
+ switch (colorfx) {
+ case V4L2_COLORFX_NONE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+ break;
+ case V4L2_COLORFX_BW:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = 128;
+ effect->pat_cr = 128;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = 115;
+ effect->pat_cr = 145;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_NEGATIVE;
+ break;
+ case V4L2_COLORFX_EMBOSS:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_EMBOSSING;
+ break;
+ case V4L2_COLORFX_ART_FREEZE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARTFREEZE;
+ break;
+ case V4L2_COLORFX_SILHOUETTE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_SILHOUETTE;
+ break;
+ case V4L2_COLORFX_SET_CBCR:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = ctx->ctrls.colorfx_cbcr->val >> 8;
+ effect->pat_cr = ctx->ctrls.colorfx_cbcr->val & 0xff;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * V4L2 controls handling
+ */
+#define ctrl_to_ctx(__ctrl) \
+ container_of((__ctrl)->handler, struct fimc_ctx, ctrls.handler)
+
+static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ const struct fimc_variant *variant = fimc->variant;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+
+ case V4L2_CID_ROTATE:
+ if (fimc_capture_pending(fimc)) {
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctrl->val);
+ if (ret)
+ return -EINVAL;
+ }
+ if ((ctrl->val == 90 || ctrl->val == 270) &&
+ !variant->has_out_rot)
+ return -EINVAL;
+
+ ctx->rotation = ctrl->val;
+ break;
+
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->d_frame.alpha = ctrl->val;
+ break;
+
+ case V4L2_CID_COLORFX:
+ ret = fimc_set_color_effect(ctx, ctrl->val);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ ctx->state |= FIMC_PARAMS;
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ return 0;
+}
+
+static int fimc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fimc_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
+ ret = __fimc_s_ctrl(ctx, ctrl);
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops fimc_ctrl_ops = {
+ .s_ctrl = fimc_s_ctrl,
+};
+
+int fimc_ctrls_create(struct fimc_ctx *ctx)
+{
+ unsigned int max_alpha = fimc_get_alpha_mask(ctx->d_frame.fmt);
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+ struct v4l2_ctrl_handler *handler = &ctrls->handler;
+
+ if (ctx->ctrls.ready)
+ return 0;
+
+ v4l2_ctrl_handler_init(handler, 6);
+
+ ctrls->rotate = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+ ctrls->hflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (ctx->fimc_dev->drv_data->alpha_color)
+ ctrls->alpha = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, max_alpha, 1, 0);
+ else
+ ctrls->alpha = NULL;
+
+ ctrls->colorfx = v4l2_ctrl_new_std_menu(handler, &fimc_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
+ ~0x983f, V4L2_COLORFX_NONE);
+
+ ctrls->colorfx_cbcr = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+
+ ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+
+ if (!handler->error) {
+ v4l2_ctrl_cluster(2, &ctrls->colorfx);
+ ctrls->ready = true;
+ }
+
+ return handler->error;
+}
+
+void fimc_ctrls_delete(struct fimc_ctx *ctx)
+{
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+
+ if (ctrls->ready) {
+ v4l2_ctrl_handler_free(&ctrls->handler);
+ ctrls->ready = false;
+ ctrls->alpha = NULL;
+ }
+}
+
+void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
+{
+ unsigned int has_alpha = ctx->d_frame.fmt->flags & FMT_HAS_ALPHA;
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+
+ if (!ctrls->ready)
+ return;
+
+ mutex_lock(ctrls->handler.lock);
+ v4l2_ctrl_activate(ctrls->rotate, active);
+ v4l2_ctrl_activate(ctrls->hflip, active);
+ v4l2_ctrl_activate(ctrls->vflip, active);
+ v4l2_ctrl_activate(ctrls->colorfx, active);
+ if (ctrls->alpha)
+ v4l2_ctrl_activate(ctrls->alpha, active && has_alpha);
+
+ if (active) {
+ fimc_set_color_effect(ctx, ctrls->colorfx->cur.val);
+ ctx->rotation = ctrls->rotate->val;
+ ctx->hflip = ctrls->hflip->val;
+ ctx->vflip = ctrls->vflip->val;
+ } else {
+ ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+ ctx->rotation = 0;
+ ctx->hflip = 0;
+ ctx->vflip = 0;
+ }
+ mutex_unlock(ctrls->handler.lock);
+}
+
+/* Update maximum value of the alpha color control */
+void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_ctrl *ctrl = ctx->ctrls.alpha;
+
+ if (ctrl == NULL || !fimc->drv_data->alpha_color)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+ ctrl->maximum = fimc_get_alpha_mask(ctx->d_frame.fmt);
+
+ if (ctrl->cur.val > ctrl->maximum)
+ ctrl->cur.val = ctrl->maximum;
+
+ v4l2_ctrl_unlock(ctrl);
+}
+
+void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ int i;
+
+ pixm->width = frame->o_width;
+ pixm->height = frame->o_height;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->pixelformat = frame->fmt->fourcc;
+ pixm->colorspace = V4L2_COLORSPACE_JPEG;
+ pixm->num_planes = frame->fmt->memplanes;
+
+ for (i = 0; i < pixm->num_planes; ++i) {
+ pixm->plane_fmt[i].bytesperline = frame->bytesperline[i];
+ pixm->plane_fmt[i].sizeimage = frame->payload[i];
+ }
+}
+
+/**
+ * fimc_adjust_mplane_format - adjust bytesperline/sizeimage for each plane
+ * @fmt: fimc pixel format description (input)
+ * @width: requested pixel width
+ * @height: requested pixel height
+ * @pix: multi-plane format to adjust
+ */
+void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+ struct v4l2_pix_format_mplane *pix)
+{
+ u32 bytesperline = 0;
+ int i;
+
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->field = V4L2_FIELD_NONE;
+ pix->num_planes = fmt->memplanes;
+ pix->pixelformat = fmt->fourcc;
+ pix->height = height;
+ pix->width = width;
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+ u32 bpl = plane_fmt->bytesperline;
+ u32 sizeimage;
+
+ if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
+ bpl = pix->width; /* Planar */
+
+ if (fmt->colplanes == 1 && /* Packed */
+ (bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width))
+ bpl = (pix->width * fmt->depth[0]) / 8;
+ /*
+ * Currently bytesperline for each plane is same, except
+ * V4L2_PIX_FMT_YUV420M format. This calculation may need
+ * to be changed when other multi-planar formats are added
+ * to the fimc_formats[] array.
+ */
+ if (i == 0)
+ bytesperline = bpl;
+ else if (i == 1 && fmt->memplanes == 3)
+ bytesperline /= 2;
+
+ plane_fmt->bytesperline = bytesperline;
+ sizeimage = pix->width * pix->height * fmt->depth[i] / 8;
+
+ /* Ensure full last row for tiled formats */
+ if (tiled_fmt(fmt)) {
+ /* 64 * 32 * plane_fmt->bytesperline / 64 */
+ u32 row_size = plane_fmt->bytesperline * 32;
+
+ sizeimage = roundup(sizeimage, row_size);
+ }
+
+ plane_fmt->sizeimage = max(sizeimage, plane_fmt->sizeimage);
+ }
+}
+
+/**
+ * fimc_find_format - lookup fimc color format by fourcc or media bus format
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @mask: the color flags to match
+ * @index: offset in the fimc_formats array, ignored if negative
+ */
+struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
+ unsigned int mask, int index)
+{
+ struct fimc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(fimc_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
+ fmt = &fimc_formats[i];
+ if (!(fmt->flags & mask))
+ continue;
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static void fimc_clk_put(struct fimc_dev *fimc)
+{
+ int i;
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
+ if (IS_ERR(fimc->clock[i]))
+ continue;
+ clk_unprepare(fimc->clock[i]);
+ clk_put(fimc->clock[i]);
+ fimc->clock[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int fimc_clk_get(struct fimc_dev *fimc)
+{
+ int i, ret;
+
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++)
+ fimc->clock[i] = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
+ fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clocks[i]);
+ if (IS_ERR(fimc->clock[i])) {
+ ret = PTR_ERR(fimc->clock[i]);
+ goto err;
+ }
+ ret = clk_prepare(fimc->clock[i]);
+ if (ret < 0) {
+ clk_put(fimc->clock[i]);
+ fimc->clock[i] = ERR_PTR(-EINVAL);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ fimc_clk_put(fimc);
+ dev_err(&fimc->pdev->dev, "failed to get clock: %s\n",
+ fimc_clocks[i]);
+ return -ENXIO;
+}
+
+#ifdef CONFIG_PM
+static int fimc_m2m_suspend(struct fimc_dev *fimc)
+{
+ unsigned long flags;
+ int timeout;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!fimc_m2m_pending(fimc)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
+ clear_bit(ST_M2M_SUSPENDED, &fimc->state);
+ set_bit(ST_M2M_SUSPENDING, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ timeout = wait_event_timeout(fimc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &fimc->state),
+ FIMC_SHUTDOWN_TIMEOUT);
+
+ clear_bit(ST_M2M_SUSPENDING, &fimc->state);
+ return timeout == 0 ? -EAGAIN : 0;
+}
+
+static int fimc_m2m_resume(struct fimc_dev *fimc)
+{
+ struct fimc_ctx *ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ /* Clear for full H/W setup in first run after resume */
+ ctx = fimc->m2m.ctx;
+ fimc->m2m.ctx = NULL;
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct of_device_id fimc_of_match[];
+
+static int fimc_parse_dt(struct fimc_dev *fimc, u32 *clk_freq)
+{
+ struct device *dev = &fimc->pdev->dev;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *of_id;
+ struct fimc_variant *v;
+ struct fimc_pix_limit *lim;
+ u32 args[FIMC_PIX_LIMITS_MAX];
+ int ret;
+
+ if (of_property_read_bool(node, "samsung,lcd-wb"))
+ return -ENODEV;
+
+ v = devm_kzalloc(dev, sizeof(*v) + sizeof(*lim), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ of_id = of_match_node(fimc_of_match, node);
+ if (!of_id)
+ return -EINVAL;
+ fimc->drv_data = of_id->data;
+ ret = of_property_read_u32_array(node, "samsung,pix-limits",
+ args, FIMC_PIX_LIMITS_MAX);
+ if (ret < 0)
+ return ret;
+
+ lim = (struct fimc_pix_limit *)&v[1];
+
+ lim->scaler_en_w = args[0];
+ lim->scaler_dis_w = args[1];
+ lim->out_rot_en_w = args[2];
+ lim->out_rot_dis_w = args[3];
+ v->pix_limit = lim;
+
+ ret = of_property_read_u32_array(node, "samsung,min-pix-sizes",
+ args, 2);
+ v->min_inp_pixsize = ret ? FIMC_DEF_MIN_SIZE : args[0];
+ v->min_out_pixsize = ret ? FIMC_DEF_MIN_SIZE : args[1];
+ ret = of_property_read_u32_array(node, "samsung,min-pix-alignment",
+ args, 2);
+ v->min_vsize_align = ret ? FIMC_DEF_HEIGHT_ALIGN : args[0];
+ v->hor_offs_align = ret ? FIMC_DEF_HOR_OFFS_ALIGN : args[1];
+
+ ret = of_property_read_u32(node, "samsung,rotators", &args[1]);
+ v->has_inp_rot = ret ? 1 : args[1] & 0x01;
+ v->has_out_rot = ret ? 1 : args[1] & 0x10;
+ v->has_mainscaler_ext = of_property_read_bool(node,
+ "samsung,mainscaler-ext");
+
+ v->has_isp_wb = of_property_read_bool(node, "samsung,isp-wb");
+ v->has_cam_if = of_property_read_bool(node, "samsung,cam-if");
+ of_property_read_u32(node, "clock-frequency", clk_freq);
+ fimc->id = of_alias_get_id(node, "fimc");
+
+ fimc->variant = v;
+ return 0;
+}
+
+static int fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ u32 lclk_freq = 0;
+ struct fimc_dev *fimc;
+ struct resource *res;
+ int ret = 0;
+
+ fimc = devm_kzalloc(dev, sizeof(*fimc), GFP_KERNEL);
+ if (!fimc)
+ return -ENOMEM;
+
+ fimc->pdev = pdev;
+
+ if (dev->of_node) {
+ ret = fimc_parse_dt(fimc, &lclk_freq);
+ if (ret < 0)
+ return ret;
+ } else {
+ fimc->drv_data = fimc_get_drvdata(pdev);
+ fimc->id = pdev->id;
+ }
+ if (!fimc->drv_data || fimc->id >= fimc->drv_data->num_entities ||
+ fimc->id < 0) {
+ dev_err(dev, "Invalid driver data or device id (%d)\n",
+ fimc->id);
+ return -EINVAL;
+ }
+ if (!dev->of_node)
+ fimc->variant = fimc->drv_data->variant[fimc->id];
+
+ init_waitqueue_head(&fimc->irq_queue);
+ spin_lock_init(&fimc->slock);
+ mutex_init(&fimc->lock);
+
+ fimc->sysreg = fimc_get_sysreg_regmap(dev->of_node);
+ if (IS_ERR(fimc->sysreg))
+ return PTR_ERR(fimc->sysreg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fimc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fimc->regs))
+ return PTR_ERR(fimc->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "Failed to get IRQ resource\n");
+ return -ENXIO;
+ }
+
+ ret = fimc_clk_get(fimc);
+ if (ret)
+ return ret;
+
+ if (lclk_freq == 0)
+ lclk_freq = fimc->drv_data->lclk_frequency;
+
+ ret = clk_set_rate(fimc->clock[CLK_BUS], lclk_freq);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(fimc->clock[CLK_BUS]);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(dev, res->start, fimc_irq_handler,
+ 0, dev_name(dev), fimc);
+ if (ret < 0) {
+ dev_err(dev, "failed to install irq (%d)\n", ret);
+ goto err_sclk;
+ }
+
+ ret = fimc_initialize_capture_subdev(fimc);
+ if (ret < 0)
+ goto err_sclk;
+
+ platform_set_drvdata(pdev, fimc);
+ pm_runtime_enable(dev);
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = clk_enable(fimc->clock[CLK_GATE]);
+ if (ret < 0)
+ goto err_sd;
+ }
+
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ dev_dbg(dev, "FIMC.%d registered successfully\n", fimc->id);
+ return 0;
+
+err_sd:
+ fimc_unregister_capture_subdev(fimc);
+err_sclk:
+ clk_disable(fimc->clock[CLK_BUS]);
+ fimc_clk_put(fimc);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+ /* Enable clocks and perform basic initialization */
+ clk_enable(fimc->clock[CLK_GATE]);
+ fimc_hw_reset(fimc);
+
+ /* Resume the capture or mem-to-mem device */
+ if (fimc_capture_busy(fimc))
+ return fimc_capture_resume(fimc);
+
+ return fimc_m2m_resume(fimc);
+}
+
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fimc_capture_busy(fimc))
+ ret = fimc_capture_suspend(fimc);
+ else
+ ret = fimc_m2m_suspend(fimc);
+ if (!ret)
+ clk_disable(fimc->clock[CLK_GATE]);
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+ /* Do not resume if the device was idle before system suspend */
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!test_and_clear_bit(ST_LPM, &fimc->state) ||
+ (!fimc_m2m_active(fimc) && !fimc_capture_busy(fimc))) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
+ fimc_hw_reset(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (fimc_capture_busy(fimc))
+ return fimc_capture_resume(fimc);
+
+ return fimc_m2m_resume(fimc);
+}
+
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_dev *fimc = dev_get_drvdata(dev);
+
+ dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+ if (test_and_set_bit(ST_LPM, &fimc->state))
+ return 0;
+ if (fimc_capture_busy(fimc))
+ return fimc_capture_suspend(fimc);
+
+ return fimc_m2m_suspend(fimc);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int fimc_remove(struct platform_device *pdev)
+{
+ struct fimc_dev *fimc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ clk_disable(fimc->clock[CLK_GATE]);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ fimc_unregister_capture_subdev(fimc);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+
+ clk_disable(fimc->clock[CLK_BUS]);
+ fimc_clk_put(fimc);
+
+ dev_info(&pdev->dev, "driver unloaded\n");
+ return 0;
+}
+
+/* Image pixel limits, similar across several FIMC HW revisions. */
+static const struct fimc_pix_limit s5p_pix_limit[4] = {
+ [0] = {
+ .scaler_en_w = 3264,
+ .scaler_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+ },
+ [1] = {
+ .scaler_en_w = 4224,
+ .scaler_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+ },
+ [2] = {
+ .scaler_en_w = 1920,
+ .scaler_dis_w = 8192,
+ .out_rot_en_w = 1280,
+ .out_rot_dis_w = 1920,
+ },
+};
+
+static const struct fimc_variant fimc0_variant_s5pv210 = {
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .has_cam_if = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .min_vsize_align = 16,
+ .pix_limit = &s5p_pix_limit[1],
+};
+
+static const struct fimc_variant fimc1_variant_s5pv210 = {
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .has_cam_if = 1,
+ .has_mainscaler_ext = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 1,
+ .min_vsize_align = 1,
+ .pix_limit = &s5p_pix_limit[2],
+};
+
+static const struct fimc_variant fimc2_variant_s5pv210 = {
+ .has_cam_if = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .min_vsize_align = 16,
+ .pix_limit = &s5p_pix_limit[2],
+};
+
+/* S5PV210, S5PC110 */
+static const struct fimc_drvdata fimc_drvdata_s5pv210 = {
+ .variant = {
+ [0] = &fimc0_variant_s5pv210,
+ [1] = &fimc1_variant_s5pv210,
+ [2] = &fimc2_variant_s5pv210,
+ },
+ .num_entities = 3,
+ .lclk_frequency = 166000000UL,
+ .out_buf_count = 4,
+ .dma_pix_hoff = 1,
+};
+
+/* EXYNOS4210, S5PV310, S5PC210 */
+static const struct fimc_drvdata fimc_drvdata_exynos4210 = {
+ .num_entities = 4,
+ .lclk_frequency = 166000000UL,
+ .dma_pix_hoff = 1,
+ .cistatus2 = 1,
+ .alpha_color = 1,
+ .out_buf_count = 32,
+};
+
+/* EXYNOS4412 */
+static const struct fimc_drvdata fimc_drvdata_exynos4x12 = {
+ .num_entities = 4,
+ .lclk_frequency = 166000000UL,
+ .dma_pix_hoff = 1,
+ .cistatus2 = 1,
+ .alpha_color = 1,
+ .out_buf_count = 32,
+};
+
+static const struct of_device_id fimc_of_match[] = {
+ {
+ .compatible = "samsung,s5pv210-fimc",
+ .data = &fimc_drvdata_s5pv210,
+ }, {
+ .compatible = "samsung,exynos4210-fimc",
+ .data = &fimc_drvdata_exynos4210,
+ }, {
+ .compatible = "samsung,exynos4212-fimc",
+ .data = &fimc_drvdata_exynos4x12,
+ },
+ { /* sentinel */ },
+};
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+static struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = fimc_remove,
+ .driver = {
+ .of_match_table = fimc_of_match,
+ .name = FIMC_DRIVER_NAME,
+ .pm = &fimc_pm_ops,
+ }
+};
+
+int __init fimc_register_driver(void)
+{
+ return platform_driver_register(&fimc_driver);
+}
+
+void __exit fimc_unregister_driver(void)
+{
+ platform_driver_unregister(&fimc_driver);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
new file mode 100644
index 000000000..82d514df9
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -0,0 +1,725 @@
+/*
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_CORE_H_
+#define FIMC_CORE_H_
+
+/*#define DEBUG*/
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/io.h>
+#include <linux/sizes.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-mediabus.h>
+#include <media/drv-intf/exynos-fimc.h>
+
+#define dbg(fmt, args...) \
+ pr_debug("%s:%d: " fmt "\n", __func__, __LINE__, ##args)
+
+/* Time to wait for next frame VSYNC interrupt while stopping operation. */
+#define FIMC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
+#define MAX_FIMC_CLOCKS 2
+#define FIMC_DRIVER_NAME "exynos4-fimc"
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_OUT_BUFS 4
+#define SCALER_MAX_HRATIO 64
+#define SCALER_MAX_VRATIO 64
+#define DMA_MIN_SIZE 8
+#define FIMC_CAMIF_MAX_HEIGHT 0x2000
+#define FIMC_MAX_JPEG_BUF_SIZE (10 * SZ_1M)
+#define FIMC_MAX_PLANES 3
+#define FIMC_PIX_LIMITS_MAX 4
+#define FIMC_DEF_MIN_SIZE 16
+#define FIMC_DEF_HEIGHT_ALIGN 2
+#define FIMC_DEF_HOR_OFFS_ALIGN 1
+#define FIMC_DEFAULT_WIDTH 640
+#define FIMC_DEFAULT_HEIGHT 480
+
+/* indices to the clocks array */
+enum {
+ CLK_BUS,
+ CLK_GATE,
+};
+
+enum fimc_dev_flags {
+ ST_LPM,
+ /* m2m node */
+ ST_M2M_RUN,
+ ST_M2M_PEND,
+ ST_M2M_SUSPENDING,
+ ST_M2M_SUSPENDED,
+ /* capture node */
+ ST_CAPT_PEND,
+ ST_CAPT_RUN,
+ ST_CAPT_STREAM,
+ ST_CAPT_ISP_STREAM,
+ ST_CAPT_SUSPENDED,
+ ST_CAPT_SHUT,
+ ST_CAPT_BUSY,
+ ST_CAPT_APPLY_CFG,
+ ST_CAPT_JPEG,
+};
+
+#define fimc_m2m_active(dev) test_bit(ST_M2M_RUN, &(dev)->state)
+#define fimc_m2m_pending(dev) test_bit(ST_M2M_PEND, &(dev)->state)
+
+#define fimc_capture_running(dev) test_bit(ST_CAPT_RUN, &(dev)->state)
+#define fimc_capture_pending(dev) test_bit(ST_CAPT_PEND, &(dev)->state)
+#define fimc_capture_busy(dev) test_bit(ST_CAPT_BUSY, &(dev)->state)
+
+enum fimc_datapath {
+ FIMC_IO_NONE,
+ FIMC_IO_CAMERA,
+ FIMC_IO_DMA,
+ FIMC_IO_LCDFIFO,
+ FIMC_IO_WRITEBACK,
+ FIMC_IO_ISP,
+};
+
+enum fimc_color_fmt {
+ FIMC_FMT_RGB444 = 0x10,
+ FIMC_FMT_RGB555,
+ FIMC_FMT_RGB565,
+ FIMC_FMT_RGB666,
+ FIMC_FMT_RGB888,
+ FIMC_FMT_RGB30_LOCAL,
+ FIMC_FMT_YCBCR420 = 0x20,
+ FIMC_FMT_YCBYCR422,
+ FIMC_FMT_YCRYCB422,
+ FIMC_FMT_CBYCRY422,
+ FIMC_FMT_CRYCBY422,
+ FIMC_FMT_YCBCR444_LOCAL,
+ FIMC_FMT_RAW8 = 0x40,
+ FIMC_FMT_RAW10,
+ FIMC_FMT_RAW12,
+ FIMC_FMT_JPEG = 0x80,
+ FIMC_FMT_YUYV_JPEG = 0x100,
+};
+
+#define fimc_fmt_is_user_defined(x) (!!((x) & 0x180))
+#define fimc_fmt_is_rgb(x) (!!((x) & 0x10))
+
+#define IS_M2M(__strt) ((__strt) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || \
+ __strt == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+
+/* The hardware context state. */
+#define FIMC_PARAMS (1 << 0)
+#define FIMC_COMPOSE (1 << 1)
+#define FIMC_CTX_M2M (1 << 16)
+#define FIMC_CTX_CAP (1 << 17)
+#define FIMC_CTX_SHUT (1 << 18)
+
+/* Image conversion flags */
+#define FIMC_IN_DMA_ACCESS_TILED (1 << 0)
+#define FIMC_IN_DMA_ACCESS_LINEAR (0 << 0)
+#define FIMC_OUT_DMA_ACCESS_TILED (1 << 1)
+#define FIMC_OUT_DMA_ACCESS_LINEAR (0 << 1)
+#define FIMC_SCAN_MODE_PROGRESSIVE (0 << 2)
+#define FIMC_SCAN_MODE_INTERLACED (1 << 2)
+/*
+ * YCbCr data dynamic range for RGB-YUV color conversion.
+ * Y/Cb/Cr: (0 ~ 255) */
+#define FIMC_COLOR_RANGE_WIDE (0 << 3)
+/* Y (16 ~ 235), Cb/Cr (16 ~ 240) */
+#define FIMC_COLOR_RANGE_NARROW (1 << 3)
+
+/**
+ * struct fimc_dma_offset - pixel offset information for DMA
+ * @y_h: y value horizontal offset
+ * @y_v: y value vertical offset
+ * @cb_h: cb value horizontal offset
+ * @cb_v: cb value vertical offset
+ * @cr_h: cr value horizontal offset
+ * @cr_v: cr value vertical offset
+ */
+struct fimc_dma_offset {
+ int y_h;
+ int y_v;
+ int cb_h;
+ int cb_v;
+ int cr_h;
+ int cr_v;
+};
+
+/**
+ * struct fimc_effect - color effect information
+ * @type: effect type
+ * @pat_cb: cr value when type is "arbitrary"
+ * @pat_cr: cr value when type is "arbitrary"
+ */
+struct fimc_effect {
+ u32 type;
+ u8 pat_cb;
+ u8 pat_cr;
+};
+
+/**
+ * struct fimc_scaler - the configuration data for FIMC inetrnal scaler
+ * @scaleup_h: flag indicating scaling up horizontally
+ * @scaleup_v: flag indicating scaling up vertically
+ * @copy_mode: flag indicating transparent DMA transfer (no scaling
+ * and color format conversion)
+ * @enabled: flag indicating if the scaler is used
+ * @hfactor: horizontal shift factor
+ * @vfactor: vertical shift factor
+ * @pre_hratio: horizontal ratio of the prescaler
+ * @pre_vratio: vertical ratio of the prescaler
+ * @pre_dst_width: the prescaler's destination width
+ * @pre_dst_height: the prescaler's destination height
+ * @main_hratio: the main scaler's horizontal ratio
+ * @main_vratio: the main scaler's vertical ratio
+ * @real_width: source pixel (width - offset)
+ * @real_height: source pixel (height - offset)
+ */
+struct fimc_scaler {
+ unsigned int scaleup_h:1;
+ unsigned int scaleup_v:1;
+ unsigned int copy_mode:1;
+ unsigned int enabled:1;
+ u32 hfactor;
+ u32 vfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ u32 pre_dst_width;
+ u32 pre_dst_height;
+ u32 main_hratio;
+ u32 main_vratio;
+ u32 real_width;
+ u32 real_height;
+};
+
+/**
+ * struct fimc_addr - the FIMC physical address set for DMA
+ * @y: luminance plane physical address
+ * @cb: Cb plane physical address
+ * @cr: Cr plane physical address
+ */
+struct fimc_addr {
+ u32 y;
+ u32 cb;
+ u32 cr;
+};
+
+/**
+ * struct fimc_vid_buffer - the driver's video buffer
+ * @vb: v4l videobuf buffer
+ * @list: linked list structure for buffer queue
+ * @paddr: precalculated physical address set
+ * @index: buffer index for the output DMA engine
+ */
+struct fimc_vid_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ struct fimc_addr paddr;
+ int index;
+};
+
+/**
+ * struct fimc_frame - source/target frame properties
+ * @f_width: image full width (virtual screen size)
+ * @f_height: image full height (virtual screen size)
+ * @o_width: original image width as set by S_FMT
+ * @o_height: original image height as set by S_FMT
+ * @offs_h: image horizontal pixel offset
+ * @offs_v: image vertical pixel offset
+ * @width: image pixel width
+ * @height: image pixel weight
+ * @payload: image size in bytes (w x h x bpp)
+ * @bytesperline: bytesperline value for each plane
+ * @paddr: image frame buffer physical addresses
+ * @dma_offset: DMA offset in bytes
+ * @fmt: fimc color format pointer
+ */
+struct fimc_frame {
+ u32 f_width;
+ u32 f_height;
+ u32 o_width;
+ u32 o_height;
+ u32 offs_h;
+ u32 offs_v;
+ u32 width;
+ u32 height;
+ unsigned int payload[VIDEO_MAX_PLANES];
+ unsigned int bytesperline[VIDEO_MAX_PLANES];
+ struct fimc_addr paddr;
+ struct fimc_dma_offset dma_offset;
+ struct fimc_fmt *fmt;
+ u8 alpha;
+};
+
+/**
+ * struct fimc_m2m_device - v4l2 memory-to-memory device data
+ * @vfd: the video device node for v4l2 m2m mode
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @ctx: hardware context data
+ * @refcnt: the reference counter
+ */
+struct fimc_m2m_device {
+ struct video_device vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct fimc_ctx *ctx;
+ int refcnt;
+};
+
+#define FIMC_SD_PAD_SINK_CAM 0
+#define FIMC_SD_PAD_SINK_FIFO 1
+#define FIMC_SD_PAD_SOURCE 2
+#define FIMC_SD_PADS_NUM 3
+
+/**
+ * struct fimc_vid_cap - camera capture device information
+ * @ctx: hardware context data
+ * @subdev: subdev exposing the FIMC processing block
+ * @ve: exynos video device entity structure
+ * @vd_pad: fimc video capture node pad
+ * @sd_pads: fimc video processing block pads
+ * @ci_fmt: image format at the FIMC camera input (and the scaler output)
+ * @wb_fmt: image format at the FIMC ISP Writeback input
+ * @source_config: external image source related configuration structure
+ * @pending_buf_q: the pending buffer queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vbq: the capture am video buffer queue
+ * @active_buf_cnt: number of video buffers scheduled in hardware
+ * @buf_index: index for managing the output DMA buffers
+ * @frame_count: the frame counter for statistics
+ * @reqbufs_count: the number of buffers requested in REQBUFS ioctl
+ * @input_index: input (camera sensor) index
+ * @input: capture input type, grp_id of the attached subdev
+ * @user_subdev_api: true if subdevs are not configured by the host driver
+ * @inh_sensor_ctrls: a flag indicating v4l2 controls are inherited from
+ * an image sensor subdev
+ */
+struct fimc_vid_cap {
+ struct fimc_ctx *ctx;
+ struct v4l2_subdev subdev;
+ struct exynos_video_entity ve;
+ struct media_pad vd_pad;
+ struct media_pad sd_pads[FIMC_SD_PADS_NUM];
+ struct v4l2_mbus_framefmt ci_fmt;
+ struct v4l2_mbus_framefmt wb_fmt;
+ struct fimc_source_info source_config;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct vb2_queue vbq;
+ int active_buf_cnt;
+ int buf_index;
+ unsigned int frame_count;
+ unsigned int reqbufs_count;
+ bool streaming;
+ int input_index;
+ u32 input;
+ bool user_subdev_api;
+ bool inh_sensor_ctrls;
+};
+
+/**
+ * struct fimc_pix_limit - image pixel size limits in various IP configurations
+ *
+ * @scaler_en_w: max input pixel width when the scaler is enabled
+ * @scaler_dis_w: max input pixel width when the scaler is disabled
+ * @in_rot_en_h: max input width with the input rotator is on
+ * @in_rot_dis_w: max input width with the input rotator is off
+ * @out_rot_en_w: max output width with the output rotator on
+ * @out_rot_dis_w: max output width with the output rotator off
+ */
+struct fimc_pix_limit {
+ u16 scaler_en_w;
+ u16 scaler_dis_w;
+ u16 in_rot_en_h;
+ u16 in_rot_dis_w;
+ u16 out_rot_en_w;
+ u16 out_rot_dis_w;
+};
+
+/**
+ * struct fimc_variant - FIMC device variant information
+ * @has_inp_rot: set if has input rotator
+ * @has_out_rot: set if has output rotator
+ * @has_mainscaler_ext: 1 if extended mainscaler ratios in CIEXTEN register
+ * are present in this IP revision
+ * @has_cam_if: set if this instance has a camera input interface
+ * @has_isp_wb: set if this instance has ISP writeback input
+ * @pix_limit: pixel size constraints for the scaler
+ * @min_inp_pixsize: minimum input pixel size
+ * @min_out_pixsize: minimum output pixel size
+ * @hor_offs_align: horizontal pixel offset alignment
+ * @min_vsize_align: minimum vertical pixel size alignment
+ */
+struct fimc_variant {
+ unsigned int has_inp_rot:1;
+ unsigned int has_out_rot:1;
+ unsigned int has_mainscaler_ext:1;
+ unsigned int has_cam_if:1;
+ unsigned int has_isp_wb:1;
+ const struct fimc_pix_limit *pix_limit;
+ u16 min_inp_pixsize;
+ u16 min_out_pixsize;
+ u16 hor_offs_align;
+ u16 min_vsize_align;
+};
+
+/**
+ * struct fimc_drvdata - per device type driver data
+ * @variant: variant information for this device
+ * @num_entities: number of fimc instances available in a SoC
+ * @lclk_frequency: local bus clock frequency
+ * @cistatus2: 1 if the FIMC IPs have CISTATUS2 register
+ * @dma_pix_hoff: the horizontal DMA offset unit: 1 - pixels, 0 - bytes
+ * @alpha_color: 1 if alpha color component is supported
+ * @out_buf_count: maximum number of output DMA buffers supported
+ */
+struct fimc_drvdata {
+ const struct fimc_variant *variant[FIMC_MAX_DEVS];
+ int num_entities;
+ unsigned long lclk_frequency;
+ /* Fields common to all FIMC IP instances */
+ u8 cistatus2;
+ u8 dma_pix_hoff;
+ u8 alpha_color;
+ u8 out_buf_count;
+};
+
+#define fimc_get_drvdata(_pdev) \
+ ((struct fimc_drvdata *) platform_get_device_id(_pdev)->driver_data)
+
+struct fimc_ctx;
+
+/**
+ * struct fimc_dev - abstraction for FIMC entity
+ * @slock: the spinlock protecting this data structure
+ * @lock: the mutex protecting this data structure
+ * @pdev: pointer to the FIMC platform device
+ * @pdata: pointer to the device platform data
+ * @sysreg: pointer to the SYSREG regmap
+ * @variant: the IP variant information
+ * @id: FIMC device index (0..FIMC_MAX_DEVS)
+ * @clock: clocks required for FIMC operation
+ * @regs: the mapped hardware registers
+ * @irq_queue: interrupt handler waitqueue
+ * @v4l2_dev: root v4l2_device
+ * @m2m: memory-to-memory V4L2 device information
+ * @vid_cap: camera capture device information
+ * @state: flags used to synchronize m2m and capture mode operation
+ * @pipeline: fimc video capture pipeline data structure
+ */
+struct fimc_dev {
+ spinlock_t slock;
+ struct mutex lock;
+ struct platform_device *pdev;
+ struct s5p_platform_fimc *pdata;
+ struct regmap *sysreg;
+ const struct fimc_variant *variant;
+ const struct fimc_drvdata *drv_data;
+ int id;
+ struct clk *clock[MAX_FIMC_CLOCKS];
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+ struct v4l2_device *v4l2_dev;
+ struct fimc_m2m_device m2m;
+ struct fimc_vid_cap vid_cap;
+ unsigned long state;
+};
+
+/**
+ * struct fimc_ctrls - v4l2 controls structure
+ * @handler: the control handler
+ * @colorfx: image effect control
+ * @colorfx_cbcr: Cb/Cr coefficients control
+ * @rotate: image rotation control
+ * @hflip: horizontal flip control
+ * @vflip: vertical flip control
+ * @alpha: RGB alpha control
+ * @ready: true if @handler is initialized
+ */
+struct fimc_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *colorfx;
+ struct v4l2_ctrl *colorfx_cbcr;
+ };
+ struct v4l2_ctrl *rotate;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *alpha;
+ bool ready;
+};
+
+/**
+ * fimc_ctx - the device context data
+ * @s_frame: source frame properties
+ * @d_frame: destination frame properties
+ * @out_order_1p: output 1-plane YCBCR order
+ * @out_order_2p: output 2-plane YCBCR order
+ * @in_order_1p input 1-plane YCBCR order
+ * @in_order_2p: input 2-plane YCBCR order
+ * @in_path: input mode (DMA or camera)
+ * @out_path: output mode (DMA or FIFO)
+ * @scaler: image scaler properties
+ * @effect: image effect
+ * @rotation: image clockwise rotation in degrees
+ * @hflip: indicates image horizontal flip if set
+ * @vflip: indicates image vertical flip if set
+ * @flags: additional flags for image conversion
+ * @state: flags to keep track of user configuration
+ * @fimc_dev: the FIMC device this context applies to
+ * @fh: v4l2 file handle
+ * @ctrls: v4l2 controls structure
+ */
+struct fimc_ctx {
+ struct fimc_frame s_frame;
+ struct fimc_frame d_frame;
+ u32 out_order_1p;
+ u32 out_order_2p;
+ u32 in_order_1p;
+ u32 in_order_2p;
+ enum fimc_datapath in_path;
+ enum fimc_datapath out_path;
+ struct fimc_scaler scaler;
+ struct fimc_effect effect;
+ int rotation;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ u32 flags;
+ u32 state;
+ struct fimc_dev *fimc_dev;
+ struct v4l2_fh fh;
+ struct fimc_ctrls ctrls;
+};
+
+#define fh_to_ctx(__fh) container_of(__fh, struct fimc_ctx, fh)
+
+static inline void set_frame_bounds(struct fimc_frame *f, u32 width, u32 height)
+{
+ f->o_width = width;
+ f->o_height = height;
+ f->f_width = width;
+ f->f_height = height;
+}
+
+static inline void set_frame_crop(struct fimc_frame *f,
+ u32 left, u32 top, u32 width, u32 height)
+{
+ f->offs_h = left;
+ f->offs_v = top;
+ f->width = width;
+ f->height = height;
+}
+
+static inline u32 fimc_get_format_depth(struct fimc_fmt *ff)
+{
+ u32 i, depth = 0;
+
+ if (ff != NULL)
+ for (i = 0; i < ff->colplanes; i++)
+ depth += ff->depth[i];
+ return depth;
+}
+
+static inline bool fimc_capture_active(struct fimc_dev *fimc)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ ret = !!(fimc->state & (1 << ST_CAPT_RUN) ||
+ fimc->state & (1 << ST_CAPT_PEND));
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return ret;
+}
+
+static inline void fimc_ctx_state_set(u32 state, struct fimc_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
+ ctx->state |= state;
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
+}
+
+static inline bool fimc_ctx_state_is_set(u32 mask, struct fimc_ctx *ctx)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
+ ret = (ctx->state & mask) == mask;
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
+ return ret;
+}
+
+static inline int tiled_fmt(struct fimc_fmt *fmt)
+{
+ return fmt->fourcc == V4L2_PIX_FMT_NV12MT;
+}
+
+static inline bool fimc_jpeg_fourcc(u32 pixelformat)
+{
+ return (pixelformat == V4L2_PIX_FMT_JPEG ||
+ pixelformat == V4L2_PIX_FMT_S5C_UYVY_JPG);
+}
+
+static inline bool fimc_user_defined_mbus_fmt(u32 code)
+{
+ return (code == MEDIA_BUS_FMT_JPEG_1X8 ||
+ code == MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8);
+}
+
+/* Return the alpha component bit mask */
+static inline int fimc_get_alpha_mask(struct fimc_fmt *fmt)
+{
+ switch (fmt->color) {
+ case FIMC_FMT_RGB444: return 0x0f;
+ case FIMC_FMT_RGB555: return 0x01;
+ case FIMC_FMT_RGB888: return 0xff;
+ default: return 0;
+ };
+}
+
+static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ struct fimc_frame *frame;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE == type) {
+ if (fimc_ctx_state_is_set(FIMC_CTX_M2M, ctx))
+ frame = &ctx->s_frame;
+ else
+ return ERR_PTR(-EINVAL);
+ } else if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type) {
+ frame = &ctx->d_frame;
+ } else {
+ v4l2_err(ctx->fimc_dev->v4l2_dev,
+ "Wrong buffer/video queue type (%d)\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return frame;
+}
+
+/* -----------------------------------------------------*/
+/* fimc-core.c */
+int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+int fimc_ctrls_create(struct fimc_ctx *ctx);
+void fimc_ctrls_delete(struct fimc_ctx *ctx);
+void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active);
+void fimc_alpha_ctrl_update(struct fimc_ctx *ctx);
+void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f);
+void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+ struct v4l2_pix_format_mplane *pix);
+struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
+ unsigned int mask, int index);
+struct fimc_fmt *fimc_get_format(unsigned int index);
+
+int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
+ int dw, int dh, int rotation);
+int fimc_set_scaler_info(struct fimc_ctx *ctx);
+int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags);
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
+ struct fimc_frame *frame, struct fimc_addr *paddr);
+void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f);
+void fimc_set_yuv_order(struct fimc_ctx *ctx);
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf);
+
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev);
+void fimc_unregister_m2m_device(struct fimc_dev *fimc);
+int fimc_register_driver(void);
+void fimc_unregister_driver(void);
+
+#ifdef CONFIG_MFD_SYSCON
+static inline struct regmap * fimc_get_sysreg_regmap(struct device_node *node)
+{
+ return syscon_regmap_lookup_by_phandle(node, "samsung,sysreg");
+}
+#else
+#define fimc_get_sysreg_regmap(node) (NULL)
+#endif
+
+/* -----------------------------------------------------*/
+/* fimc-m2m.c */
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state);
+
+/* -----------------------------------------------------*/
+/* fimc-capture.c */
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc);
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc);
+int fimc_capture_ctrls_create(struct fimc_dev *fimc);
+void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg);
+int fimc_capture_suspend(struct fimc_dev *fimc);
+int fimc_capture_resume(struct fimc_dev *fimc);
+
+/*
+ * Buffer list manipulation functions. Must be called with fimc.slock held.
+ */
+
+/**
+ * fimc_active_queue_add - add buffer to the capture active buffers queue
+ * @buf: buffer to add to the active buffers list
+ */
+static inline void fimc_active_queue_add(struct fimc_vid_cap *vid_cap,
+ struct fimc_vid_buffer *buf)
+{
+ list_add_tail(&buf->list, &vid_cap->active_buf_q);
+ vid_cap->active_buf_cnt++;
+}
+
+/**
+ * fimc_active_queue_pop - pop buffer from the capture active buffers queue
+ *
+ * The caller must assure the active_buf_q list is not empty.
+ */
+static inline struct fimc_vid_buffer *fimc_active_queue_pop(
+ struct fimc_vid_cap *vid_cap)
+{
+ struct fimc_vid_buffer *buf;
+ buf = list_entry(vid_cap->active_buf_q.next,
+ struct fimc_vid_buffer, list);
+ list_del(&buf->list);
+ vid_cap->active_buf_cnt--;
+ return buf;
+}
+
+/**
+ * fimc_pending_queue_add - add buffer to the capture pending buffers queue
+ * @buf: buffer to add to the pending buffers list
+ */
+static inline void fimc_pending_queue_add(struct fimc_vid_cap *vid_cap,
+ struct fimc_vid_buffer *buf)
+{
+ list_add_tail(&buf->list, &vid_cap->pending_buf_q);
+}
+
+/**
+ * fimc_pending_queue_pop - pop buffer from the capture pending buffers queue
+ *
+ * The caller must assure the pending_buf_q list is not empty.
+ */
+static inline struct fimc_vid_buffer *fimc_pending_queue_pop(
+ struct fimc_vid_cap *vid_cap)
+{
+ struct fimc_vid_buffer *buf;
+ buf = list_entry(vid_cap->pending_buf_q.next,
+ struct fimc_vid_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* FIMC_CORE_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-command.h b/drivers/media/platform/exynos4-is/fimc-is-command.h
new file mode 100644
index 000000000..0d1f52e39
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-command.h
@@ -0,0 +1,137 @@
+/*
+ * Samsung Exynos4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * FIMC-IS command set definitions
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_IS_CMD_H_
+#define FIMC_IS_CMD_H_
+
+#define FIMC_IS_COMMAND_VER 110 /* FIMC-IS command set version 1.10 */
+
+/* Enumeration of commands beetween the FIMC-IS and the host processor. */
+
+/* HOST to FIMC-IS */
+#define HIC_PREVIEW_STILL 0x0001
+#define HIC_PREVIEW_VIDEO 0x0002
+#define HIC_CAPTURE_STILL 0x0003
+#define HIC_CAPTURE_VIDEO 0x0004
+#define HIC_STREAM_ON 0x0005
+#define HIC_STREAM_OFF 0x0006
+#define HIC_SET_PARAMETER 0x0007
+#define HIC_GET_PARAMETER 0x0008
+#define HIC_SET_TUNE 0x0009
+#define HIC_GET_STATUS 0x000b
+/* Sensor part */
+#define HIC_OPEN_SENSOR 0x000c
+#define HIC_CLOSE_SENSOR 0x000d
+#define HIC_SIMMIAN_INIT 0x000e
+#define HIC_SIMMIAN_WRITE 0x000f
+#define HIC_SIMMIAN_READ 0x0010
+#define HIC_POWER_DOWN 0x0011
+#define HIC_GET_SET_FILE_ADDR 0x0012
+#define HIC_LOAD_SET_FILE 0x0013
+#define HIC_MSG_CONFIG 0x0014
+#define HIC_MSG_TEST 0x0015
+/* FIMC-IS to HOST */
+#define IHC_GET_SENSOR_NUM 0x1000
+#define IHC_SET_SHOT_MARK 0x1001
+/* parameter1: frame number */
+/* parameter2: confidence level (smile 0~100) */
+/* parameter3: confidence level (blink 0~100) */
+#define IHC_SET_FACE_MARK 0x1002
+/* parameter1: coordinate count */
+/* parameter2: coordinate buffer address */
+#define IHC_FRAME_DONE 0x1003
+/* parameter1: frame start number */
+/* parameter2: frame count */
+#define IHC_AA_DONE 0x1004
+#define IHC_NOT_READY 0x1005
+
+#define IH_REPLY_DONE 0x2000
+#define IH_REPLY_NOT_DONE 0x2001
+
+enum fimc_is_scenario {
+ IS_SC_PREVIEW_STILL,
+ IS_SC_PREVIEW_VIDEO,
+ IS_SC_CAPTURE_STILL,
+ IS_SC_CAPTURE_VIDEO,
+ IS_SC_MAX
+};
+
+enum fimc_is_sub_scenario {
+ IS_SC_SUB_DEFAULT,
+ IS_SC_SUB_PS_VTCALL,
+ IS_SC_SUB_CS_VTCALL,
+ IS_SC_SUB_PV_VTCALL,
+ IS_SC_SUB_CV_VTCALL,
+};
+
+struct is_common_regs {
+ u32 hicmd;
+ u32 hic_sensorid;
+ u32 hic_param[4];
+ u32 reserved1[4];
+
+ u32 ihcmd;
+ u32 ihc_sensorid;
+ u32 ihc_param[4];
+ u32 reserved2[4];
+
+ u32 isp_sensor_id;
+ u32 isp_param[2];
+ u32 reserved3[1];
+
+ u32 scc_sensor_id;
+ u32 scc_param[2];
+ u32 reserved4[1];
+
+ u32 dnr_sensor_id;
+ u32 dnr_param[2];
+ u32 reserved5[1];
+
+ u32 scp_sensor_id;
+ u32 scp_param[2];
+ u32 reserved6[29];
+} __packed;
+
+struct is_mcuctl_reg {
+ u32 mcuctl;
+ u32 bboar;
+
+ u32 intgr0;
+ u32 intcr0;
+ u32 intmr0;
+ u32 intsr0;
+ u32 intmsr0;
+
+ u32 intgr1;
+ u32 intcr1;
+ u32 intmr1;
+ u32 intsr1;
+ u32 intmsr1;
+
+ u32 intcr2;
+ u32 intmr2;
+ u32 intsr2;
+ u32 intmsr2;
+
+ u32 gpoctrl;
+ u32 cpoenctlr;
+ u32 gpictlr;
+
+ u32 reserved[0xd];
+
+ struct is_common_regs common;
+} __packed;
+
+#endif /* FIMC_IS_CMD_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.c b/drivers/media/platform/exynos4-is/fimc-is-errno.c
new file mode 100644
index 000000000..e050e63fe
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-errno.c
@@ -0,0 +1,272 @@
+/*
+ * Samsung Exynos4 SoC series FIMC-IS slave interface driver
+ *
+ * Error log interface functions
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "fimc-is-errno.h"
+
+const char *fimc_is_param_strerr(unsigned int error)
+{
+ switch (error) {
+ case ERROR_COMMON_CMD:
+ return "ERROR_COMMON_CMD: Invalid Command";
+ case ERROR_COMMON_PARAMETER:
+ return "ERROR_COMMON_PARAMETER: Invalid Parameter";
+ case ERROR_COMMON_SETFILE_LOAD:
+ return "ERROR_COMMON_SETFILE_LOAD: Illegal Setfile Loading";
+ case ERROR_COMMON_SETFILE_ADJUST:
+ return "ERROR_COMMON_SETFILE_ADJUST: Setfile isn't adjusted";
+ case ERROR_COMMON_SETFILE_INDEX:
+ return "ERROR_COMMON_SETFILE_INDEX: Invalid setfile index";
+ case ERROR_COMMON_INPUT_PATH:
+ return "ERROR_COMMON_INPUT_PATH: Input path can be changed in ready state";
+ case ERROR_COMMON_INPUT_INIT:
+ return "ERROR_COMMON_INPUT_INIT: IP can not start if input path is not set";
+ case ERROR_COMMON_OUTPUT_PATH:
+ return "ERROR_COMMON_OUTPUT_PATH: Output path can be changed in ready state (stop)";
+ case ERROR_COMMON_OUTPUT_INIT:
+ return "ERROR_COMMON_OUTPUT_INIT: IP can not start if output path is not set";
+ case ERROR_CONTROL_BYPASS:
+ return "ERROR_CONTROL_BYPASS";
+ case ERROR_OTF_INPUT_FORMAT:
+ return "ERROR_OTF_INPUT_FORMAT: Invalid format (DRC: YUV444, FD: YUV444, 422, 420)";
+ case ERROR_OTF_INPUT_WIDTH:
+ return "ERROR_OTF_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
+ case ERROR_OTF_INPUT_HEIGHT:
+ return "ERROR_OTF_INPUT_HEIGHT: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
+ case ERROR_OTF_INPUT_BIT_WIDTH:
+ return "ERROR_OTF_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
+ case ERROR_DMA_INPUT_WIDTH:
+ return "ERROR_DMA_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
+ case ERROR_DMA_INPUT_HEIGHT:
+ return "ERROR_DMA_INPUT_HEIGHT: Invalid height (DRC: 64~8192, FD: 16~8190)";
+ case ERROR_DMA_INPUT_FORMAT:
+ return "ERROR_DMA_INPUT_FORMAT: Invalid format (DRC: YUV444 or YUV422, FD: YUV444,422,420)";
+ case ERROR_DMA_INPUT_BIT_WIDTH:
+ return "ERROR_DMA_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
+ case ERROR_DMA_INPUT_ORDER:
+ return "ERROR_DMA_INPUT_ORDER: Invalid order(DRC: YYCbCr,YCbYCr,FD:NO,YYCbCr,YCbYCr,CbCr,CrCb)";
+ case ERROR_DMA_INPUT_PLANE:
+ return "ERROR_DMA_INPUT_PLANE: Invalid palne (DRC: 3, FD: 1, 2, 3)";
+ case ERROR_OTF_OUTPUT_WIDTH:
+ return "ERROR_OTF_OUTPUT_WIDTH: Invalid width (DRC: 128~8192)";
+ case ERROR_OTF_OUTPUT_HEIGHT:
+ return "ERROR_OTF_OUTPUT_HEIGHT: Invalid height (DRC: 64~8192)";
+ case ERROR_OTF_OUTPUT_FORMAT:
+ return "ERROR_OTF_OUTPUT_FORMAT: Invalid format (DRC: YUV444)";
+ case ERROR_OTF_OUTPUT_BIT_WIDTH:
+ return "ERROR_OTF_OUTPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
+ case ERROR_DMA_OUTPUT_WIDTH:
+ return "ERROR_DMA_OUTPUT_WIDTH";
+ case ERROR_DMA_OUTPUT_HEIGHT:
+ return "ERROR_DMA_OUTPUT_HEIGHT";
+ case ERROR_DMA_OUTPUT_FORMAT:
+ return "ERROR_DMA_OUTPUT_FORMAT";
+ case ERROR_DMA_OUTPUT_BIT_WIDTH:
+ return "ERROR_DMA_OUTPUT_BIT_WIDTH";
+ case ERROR_DMA_OUTPUT_PLANE:
+ return "ERROR_DMA_OUTPUT_PLANE";
+ case ERROR_DMA_OUTPUT_ORDER:
+ return "ERROR_DMA_OUTPUT_ORDER";
+
+ /* Sensor Error(100~199) */
+ case ERROR_SENSOR_I2C_FAIL:
+ return "ERROR_SENSOR_I2C_FAIL";
+ case ERROR_SENSOR_INVALID_FRAMERATE:
+ return "ERROR_SENSOR_INVALID_FRAMERATE";
+ case ERROR_SENSOR_INVALID_EXPOSURETIME:
+ return "ERROR_SENSOR_INVALID_EXPOSURETIME";
+ case ERROR_SENSOR_INVALID_SIZE:
+ return "ERROR_SENSOR_INVALID_SIZE";
+ case ERROR_SENSOR_INVALID_SETTING:
+ return "ERROR_SENSOR_INVALID_SETTING";
+ case ERROR_SENSOR_ACTURATOR_INIT_FAIL:
+ return "ERROR_SENSOR_ACTURATOR_INIT_FAIL";
+ case ERROR_SENSOR_INVALID_AF_POS:
+ return "ERROR_SENSOR_INVALID_AF_POS";
+ case ERROR_SENSOR_UNSUPPORT_FUNC:
+ return "ERROR_SENSOR_UNSUPPORT_FUNC";
+ case ERROR_SENSOR_UNSUPPORT_PERI:
+ return "ERROR_SENSOR_UNSUPPORT_PERI";
+ case ERROR_SENSOR_UNSUPPORT_AF:
+ return "ERROR_SENSOR_UNSUPPORT_AF";
+
+ /* ISP Error (200~299) */
+ case ERROR_ISP_AF_BUSY:
+ return "ERROR_ISP_AF_BUSY";
+ case ERROR_ISP_AF_INVALID_COMMAND:
+ return "ERROR_ISP_AF_INVALID_COMMAND";
+ case ERROR_ISP_AF_INVALID_MODE:
+ return "ERROR_ISP_AF_INVALID_MODE";
+
+ /* DRC Error (300~399) */
+ /* FD Error (400~499) */
+ case ERROR_FD_CONFIG_MAX_NUMBER_STATE:
+ return "ERROR_FD_CONFIG_MAX_NUMBER_STATE";
+ case ERROR_FD_CONFIG_MAX_NUMBER_INVALID:
+ return "ERROR_FD_CONFIG_MAX_NUMBER_INVALID";
+ case ERROR_FD_CONFIG_YAW_ANGLE_STATE:
+ return "ERROR_FD_CONFIG_YAW_ANGLE_STATE";
+ case ERROR_FD_CONFIG_YAW_ANGLE_INVALID:
+ return "ERROR_FD_CONFIG_YAW_ANGLE_INVALID\n";
+ case ERROR_FD_CONFIG_ROLL_ANGLE_STATE:
+ return "ERROR_FD_CONFIG_ROLL_ANGLE_STATE";
+ case ERROR_FD_CONFIG_ROLL_ANGLE_INVALID:
+ return "ERROR_FD_CONFIG_ROLL_ANGLE_INVALID";
+ case ERROR_FD_CONFIG_SMILE_MODE_INVALID:
+ return "ERROR_FD_CONFIG_SMILE_MODE_INVALID";
+ case ERROR_FD_CONFIG_BLINK_MODE_INVALID:
+ return "ERROR_FD_CONFIG_BLINK_MODE_INVALID";
+ case ERROR_FD_CONFIG_EYES_DETECT_INVALID:
+ return "ERROR_FD_CONFIG_EYES_DETECT_INVALID";
+ case ERROR_FD_CONFIG_MOUTH_DETECT_INVALID:
+ return "ERROR_FD_CONFIG_MOUTH_DETECT_INVALID";
+ case ERROR_FD_CONFIG_ORIENTATION_STATE:
+ return "ERROR_FD_CONFIG_ORIENTATION_STATE";
+ case ERROR_FD_CONFIG_ORIENTATION_INVALID:
+ return "ERROR_FD_CONFIG_ORIENTATION_INVALID";
+ case ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID:
+ return "ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID";
+ case ERROR_FD_RESULT:
+ return "ERROR_FD_RESULT";
+ case ERROR_FD_MODE:
+ return "ERROR_FD_MODE";
+ default:
+ return "Unknown";
+ }
+}
+
+const char *fimc_is_strerr(unsigned int error)
+{
+ error &= ~IS_ERROR_TIME_OUT_FLAG;
+
+ switch (error) {
+ /* General */
+ case IS_ERROR_INVALID_COMMAND:
+ return "IS_ERROR_INVALID_COMMAND";
+ case IS_ERROR_REQUEST_FAIL:
+ return "IS_ERROR_REQUEST_FAIL";
+ case IS_ERROR_INVALID_SCENARIO:
+ return "IS_ERROR_INVALID_SCENARIO";
+ case IS_ERROR_INVALID_SENSORID:
+ return "IS_ERROR_INVALID_SENSORID";
+ case IS_ERROR_INVALID_MODE_CHANGE:
+ return "IS_ERROR_INVALID_MODE_CHANGE";
+ case IS_ERROR_INVALID_MAGIC_NUMBER:
+ return "IS_ERROR_INVALID_MAGIC_NUMBER";
+ case IS_ERROR_INVALID_SETFILE_HDR:
+ return "IS_ERROR_INVALID_SETFILE_HDR";
+ case IS_ERROR_BUSY:
+ return "IS_ERROR_BUSY";
+ case IS_ERROR_SET_PARAMETER:
+ return "IS_ERROR_SET_PARAMETER";
+ case IS_ERROR_INVALID_PATH:
+ return "IS_ERROR_INVALID_PATH";
+ case IS_ERROR_OPEN_SENSOR_FAIL:
+ return "IS_ERROR_OPEN_SENSOR_FAIL";
+ case IS_ERROR_ENTRY_MSG_THREAD_DOWN:
+ return "IS_ERROR_ENTRY_MSG_THREAD_DOWN";
+ case IS_ERROR_ISP_FRAME_END_NOT_DONE:
+ return "IS_ERROR_ISP_FRAME_END_NOT_DONE";
+ case IS_ERROR_DRC_FRAME_END_NOT_DONE:
+ return "IS_ERROR_DRC_FRAME_END_NOT_DONE";
+ case IS_ERROR_SCALERC_FRAME_END_NOT_DONE:
+ return "IS_ERROR_SCALERC_FRAME_END_NOT_DONE";
+ case IS_ERROR_ODC_FRAME_END_NOT_DONE:
+ return "IS_ERROR_ODC_FRAME_END_NOT_DONE";
+ case IS_ERROR_DIS_FRAME_END_NOT_DONE:
+ return "IS_ERROR_DIS_FRAME_END_NOT_DONE";
+ case IS_ERROR_TDNR_FRAME_END_NOT_DONE:
+ return "IS_ERROR_TDNR_FRAME_END_NOT_DONE";
+ case IS_ERROR_SCALERP_FRAME_END_NOT_DONE:
+ return "IS_ERROR_SCALERP_FRAME_END_NOT_DONE";
+ case IS_ERROR_WAIT_STREAM_OFF_NOT_DONE:
+ return "IS_ERROR_WAIT_STREAM_OFF_NOT_DONE";
+ case IS_ERROR_NO_MSG_IS_RECEIVED:
+ return "IS_ERROR_NO_MSG_IS_RECEIVED";
+ case IS_ERROR_SENSOR_MSG_FAIL:
+ return "IS_ERROR_SENSOR_MSG_FAIL";
+ case IS_ERROR_ISP_MSG_FAIL:
+ return "IS_ERROR_ISP_MSG_FAIL";
+ case IS_ERROR_DRC_MSG_FAIL:
+ return "IS_ERROR_DRC_MSG_FAIL";
+ case IS_ERROR_LHFD_MSG_FAIL:
+ return "IS_ERROR_LHFD_MSG_FAIL";
+ case IS_ERROR_UNKNOWN:
+ return "IS_ERROR_UNKNOWN";
+
+ /* Sensor */
+ case IS_ERROR_SENSOR_PWRDN_FAIL:
+ return "IS_ERROR_SENSOR_PWRDN_FAIL";
+
+ /* ISP */
+ case IS_ERROR_ISP_PWRDN_FAIL:
+ return "IS_ERROR_ISP_PWRDN_FAIL";
+ case IS_ERROR_ISP_MULTIPLE_INPUT:
+ return "IS_ERROR_ISP_MULTIPLE_INPUT";
+ case IS_ERROR_ISP_ABSENT_INPUT:
+ return "IS_ERROR_ISP_ABSENT_INPUT";
+ case IS_ERROR_ISP_ABSENT_OUTPUT:
+ return "IS_ERROR_ISP_ABSENT_OUTPUT";
+ case IS_ERROR_ISP_NONADJACENT_OUTPUT:
+ return "IS_ERROR_ISP_NONADJACENT_OUTPUT";
+ case IS_ERROR_ISP_FORMAT_MISMATCH:
+ return "IS_ERROR_ISP_FORMAT_MISMATCH";
+ case IS_ERROR_ISP_WIDTH_MISMATCH:
+ return "IS_ERROR_ISP_WIDTH_MISMATCH";
+ case IS_ERROR_ISP_HEIGHT_MISMATCH:
+ return "IS_ERROR_ISP_HEIGHT_MISMATCH";
+ case IS_ERROR_ISP_BITWIDTH_MISMATCH:
+ return "IS_ERROR_ISP_BITWIDTH_MISMATCH";
+ case IS_ERROR_ISP_FRAME_END_TIME_OUT:
+ return "IS_ERROR_ISP_FRAME_END_TIME_OUT";
+
+ /* DRC */
+ case IS_ERROR_DRC_PWRDN_FAIL:
+ return "IS_ERROR_DRC_PWRDN_FAIL";
+ case IS_ERROR_DRC_MULTIPLE_INPUT:
+ return "IS_ERROR_DRC_MULTIPLE_INPUT";
+ case IS_ERROR_DRC_ABSENT_INPUT:
+ return "IS_ERROR_DRC_ABSENT_INPUT";
+ case IS_ERROR_DRC_NONADJACENT_INPUT:
+ return "IS_ERROR_DRC_NONADJACENT_INPUT";
+ case IS_ERROR_DRC_ABSENT_OUTPUT:
+ return "IS_ERROR_DRC_ABSENT_OUTPUT";
+ case IS_ERROR_DRC_NONADJACENT_OUTPUT:
+ return "IS_ERROR_DRC_NONADJACENT_OUTPUT";
+ case IS_ERROR_DRC_FORMAT_MISMATCH:
+ return "IS_ERROR_DRC_FORMAT_MISMATCH";
+ case IS_ERROR_DRC_WIDTH_MISMATCH:
+ return "IS_ERROR_DRC_WIDTH_MISMATCH";
+ case IS_ERROR_DRC_HEIGHT_MISMATCH:
+ return "IS_ERROR_DRC_HEIGHT_MISMATCH";
+ case IS_ERROR_DRC_BITWIDTH_MISMATCH:
+ return "IS_ERROR_DRC_BITWIDTH_MISMATCH";
+ case IS_ERROR_DRC_FRAME_END_TIME_OUT:
+ return "IS_ERROR_DRC_FRAME_END_TIME_OUT";
+
+ /* FD */
+ case IS_ERROR_FD_PWRDN_FAIL:
+ return "IS_ERROR_FD_PWRDN_FAIL";
+ case IS_ERROR_FD_MULTIPLE_INPUT:
+ return "IS_ERROR_FD_MULTIPLE_INPUT";
+ case IS_ERROR_FD_ABSENT_INPUT:
+ return "IS_ERROR_FD_ABSENT_INPUT";
+ case IS_ERROR_FD_NONADJACENT_INPUT:
+ return "IS_ERROR_FD_NONADJACENT_INPUT";
+ case IS_ERROR_LHFD_FRAME_END_TIME_OUT:
+ return "IS_ERROR_LHFD_FRAME_END_TIME_OUT";
+ default:
+ return "Unknown";
+ }
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.h b/drivers/media/platform/exynos4-is/fimc-is-errno.h
new file mode 100644
index 000000000..ef981e745
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-errno.h
@@ -0,0 +1,248 @@
+/*
+ * Samsung Exynos4 SoC series FIMC-IS slave interface driver
+ *
+ * FIMC-IS error code definition
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef FIMC_IS_ERR_H_
+#define FIMC_IS_ERR_H_
+
+#define IS_ERROR_VER 011 /* IS ERROR VERSION 0.11 */
+
+enum {
+ IS_ERROR_NONE,
+
+ /* General 1 ~ 99 */
+ IS_ERROR_INVALID_COMMAND,
+ IS_ERROR_REQUEST_FAIL,
+ IS_ERROR_INVALID_SCENARIO,
+ IS_ERROR_INVALID_SENSORID,
+ IS_ERROR_INVALID_MODE_CHANGE,
+ IS_ERROR_INVALID_MAGIC_NUMBER,
+ IS_ERROR_INVALID_SETFILE_HDR,
+ IS_ERROR_BUSY,
+ IS_ERROR_SET_PARAMETER,
+ IS_ERROR_INVALID_PATH,
+ IS_ERROR_OPEN_SENSOR_FAIL,
+ IS_ERROR_ENTRY_MSG_THREAD_DOWN,
+ IS_ERROR_ISP_FRAME_END_NOT_DONE,
+ IS_ERROR_DRC_FRAME_END_NOT_DONE,
+ IS_ERROR_SCALERC_FRAME_END_NOT_DONE,
+ IS_ERROR_ODC_FRAME_END_NOT_DONE,
+ IS_ERROR_DIS_FRAME_END_NOT_DONE,
+ IS_ERROR_TDNR_FRAME_END_NOT_DONE,
+ IS_ERROR_SCALERP_FRAME_END_NOT_DONE,
+ IS_ERROR_WAIT_STREAM_OFF_NOT_DONE,
+ IS_ERROR_NO_MSG_IS_RECEIVED,
+ IS_ERROR_SENSOR_MSG_FAIL,
+ IS_ERROR_ISP_MSG_FAIL,
+ IS_ERROR_DRC_MSG_FAIL,
+ IS_ERROR_SCALERC_MSG_FAIL,
+ IS_ERROR_ODC_MSG_FAIL,
+ IS_ERROR_DIS_MSG_FAIL,
+ IS_ERROR_TDNR_MSG_FAIL,
+ IS_ERROR_SCALERP_MSG_FAIL,
+ IS_ERROR_LHFD_MSG_FAIL,
+ IS_ERROR_LHFD_INTERNAL_STOP,
+
+ /* Sensor 100 ~ 199 */
+ IS_ERROR_SENSOR_PWRDN_FAIL = 100,
+ IS_ERROR_SENSOR_STREAM_ON_FAIL,
+ IS_ERROR_SENSOR_STREAM_OFF_FAIL,
+
+ /* ISP 200 ~ 299 */
+ IS_ERROR_ISP_PWRDN_FAIL = 200,
+ IS_ERROR_ISP_MULTIPLE_INPUT,
+ IS_ERROR_ISP_ABSENT_INPUT,
+ IS_ERROR_ISP_ABSENT_OUTPUT,
+ IS_ERROR_ISP_NONADJACENT_OUTPUT,
+ IS_ERROR_ISP_FORMAT_MISMATCH,
+ IS_ERROR_ISP_WIDTH_MISMATCH,
+ IS_ERROR_ISP_HEIGHT_MISMATCH,
+ IS_ERROR_ISP_BITWIDTH_MISMATCH,
+ IS_ERROR_ISP_FRAME_END_TIME_OUT,
+
+ /* DRC 300 ~ 399 */
+ IS_ERROR_DRC_PWRDN_FAIL = 300,
+ IS_ERROR_DRC_MULTIPLE_INPUT,
+ IS_ERROR_DRC_ABSENT_INPUT,
+ IS_ERROR_DRC_NONADJACENT_INPUT,
+ IS_ERROR_DRC_ABSENT_OUTPUT,
+ IS_ERROR_DRC_NONADJACENT_OUTPUT,
+ IS_ERROR_DRC_FORMAT_MISMATCH,
+ IS_ERROR_DRC_WIDTH_MISMATCH,
+ IS_ERROR_DRC_HEIGHT_MISMATCH,
+ IS_ERROR_DRC_BITWIDTH_MISMATCH,
+ IS_ERROR_DRC_FRAME_END_TIME_OUT,
+
+ /* SCALERC 400 ~ 499 */
+ IS_ERROR_SCALERC_PWRDN_FAIL = 400,
+
+ /* ODC 500 ~ 599 */
+ IS_ERROR_ODC_PWRDN_FAIL = 500,
+
+ /* DIS 600 ~ 699 */
+ IS_ERROR_DIS_PWRDN_FAIL = 600,
+
+ /* TDNR 700 ~ 799 */
+ IS_ERROR_TDNR_PWRDN_FAIL = 700,
+
+ /* SCALERC 800 ~ 899 */
+ IS_ERROR_SCALERP_PWRDN_FAIL = 800,
+
+ /* FD 900 ~ 999 */
+ IS_ERROR_FD_PWRDN_FAIL = 900,
+ IS_ERROR_FD_MULTIPLE_INPUT,
+ IS_ERROR_FD_ABSENT_INPUT,
+ IS_ERROR_FD_NONADJACENT_INPUT,
+ IS_ERROR_LHFD_FRAME_END_TIME_OUT,
+
+ IS_ERROR_UNKNOWN = 1000,
+};
+
+#define IS_ERROR_TIME_OUT_FLAG 0x80000000
+
+/* Set parameter error enum */
+enum fimc_is_error {
+ /* Common error (0~99) */
+ ERROR_COMMON_NONE = 0,
+ ERROR_COMMON_CMD = 1, /* Invalid command */
+ ERROR_COMMON_PARAMETER = 2, /* Invalid parameter */
+ /* setfile is not loaded before adjusting */
+ ERROR_COMMON_SETFILE_LOAD = 3,
+ /* setfile is not Adjusted before runnng. */
+ ERROR_COMMON_SETFILE_ADJUST = 4,
+ /* Index of setfile is not valid (0~MAX_SETFILE_NUM-1) */
+ ERROR_COMMON_SETFILE_INDEX = 5,
+ /* Input path can be changed in ready state(stop) */
+ ERROR_COMMON_INPUT_PATH = 6,
+ /* IP can not start if input path is not set */
+ ERROR_COMMON_INPUT_INIT = 7,
+ /* Output path can be changed in ready state (stop) */
+ ERROR_COMMON_OUTPUT_PATH = 8,
+ /* IP can not start if output path is not set */
+ ERROR_COMMON_OUTPUT_INIT = 9,
+
+ ERROR_CONTROL_NONE = ERROR_COMMON_NONE,
+ ERROR_CONTROL_BYPASS = 11, /* Enable or Disable */
+
+ ERROR_OTF_INPUT_NONE = ERROR_COMMON_NONE,
+ ERROR_OTF_INPUT_CMD = 21,
+ /* invalid format (DRC: YUV444, FD: YUV444, 422, 420) */
+ ERROR_OTF_INPUT_FORMAT = 22,
+ /* invalid width (DRC: 128~8192, FD: 32~8190) */
+ ERROR_OTF_INPUT_WIDTH = 23,
+ /* invalid height (DRC: 64~8192, FD: 16~8190) */
+ ERROR_OTF_INPUT_HEIGHT = 24,
+ /* invalid bit-width (DRC: 8~12bits, FD: 8bit) */
+ ERROR_OTF_INPUT_BIT_WIDTH = 25,
+ /* invalid FrameTime for ISP */
+ ERROR_OTF_INPUT_USER_FRAMETIIME = 26,
+
+ ERROR_DMA_INPUT_NONE = ERROR_COMMON_NONE,
+ /* invalid width (DRC: 128~8192, FD: 32~8190) */
+ ERROR_DMA_INPUT_WIDTH = 31,
+ /* invalid height (DRC: 64~8192, FD: 16~8190) */
+ ERROR_DMA_INPUT_HEIGHT = 32,
+ /* invalid format (DRC: YUV444 or YUV422, FD: YUV444, 422, 420) */
+ ERROR_DMA_INPUT_FORMAT = 33,
+ /* invalid bit-width (DRC: 8~12bit, FD: 8bit) */
+ ERROR_DMA_INPUT_BIT_WIDTH = 34,
+ /* invalid order(DRC: YYCbCrorYCbYCr, FD:NO,YYCbCr,YCbYCr,CbCr,CrCb) */
+ ERROR_DMA_INPUT_ORDER = 35,
+ /* invalid palne (DRC: 3, FD: 1, 2, 3) */
+ ERROR_DMA_INPUT_PLANE = 36,
+
+ ERROR_OTF_OUTPUT_NONE = ERROR_COMMON_NONE,
+ /* invalid width (DRC: 128~8192) */
+ ERROR_OTF_OUTPUT_WIDTH = 41,
+ /* invalid height (DRC: 64~8192) */
+ ERROR_OTF_OUTPUT_HEIGHT = 42,
+ /* invalid format (DRC: YUV444) */
+ ERROR_OTF_OUTPUT_FORMAT = 43,
+ /* invalid bit-width (DRC: 8~12bits) */
+ ERROR_OTF_OUTPUT_BIT_WIDTH = 44,
+
+ ERROR_DMA_OUTPUT_NONE = ERROR_COMMON_NONE,
+ ERROR_DMA_OUTPUT_WIDTH = 51, /* invalid width */
+ ERROR_DMA_OUTPUT_HEIGHT = 52, /* invalid height */
+ ERROR_DMA_OUTPUT_FORMAT = 53, /* invalid format */
+ ERROR_DMA_OUTPUT_BIT_WIDTH = 54, /* invalid bit-width */
+ ERROR_DMA_OUTPUT_PLANE = 55, /* invalid plane */
+ ERROR_DMA_OUTPUT_ORDER = 56, /* invalid order */
+
+ ERROR_GLOBAL_SHOTMODE_NONE = ERROR_COMMON_NONE,
+
+ /* SENSOR Error(100~199) */
+ ERROR_SENSOR_NONE = ERROR_COMMON_NONE,
+ ERROR_SENSOR_I2C_FAIL = 101,
+ ERROR_SENSOR_INVALID_FRAMERATE,
+ ERROR_SENSOR_INVALID_EXPOSURETIME,
+ ERROR_SENSOR_INVALID_SIZE,
+ ERROR_SENSOR_INVALID_SETTING,
+ ERROR_SENSOR_ACTURATOR_INIT_FAIL,
+ ERROR_SENSOR_INVALID_AF_POS,
+ ERROR_SENSOR_UNSUPPORT_FUNC,
+ ERROR_SENSOR_UNSUPPORT_PERI,
+ ERROR_SENSOR_UNSUPPORT_AF,
+
+ /* ISP Error (200~299) */
+ ERROR_ISP_AF_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_AF_BUSY = 201,
+ ERROR_ISP_AF_INVALID_COMMAND = 202,
+ ERROR_ISP_AF_INVALID_MODE = 203,
+ ERROR_ISP_FLASH_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_AWB_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_IMAGE_EFFECT_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_ISO_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_ADJUST_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_METERING_NONE = ERROR_COMMON_NONE,
+ ERROR_ISP_AFC_NONE = ERROR_COMMON_NONE,
+
+ /* DRC Error (300~399) */
+
+ /* FD Error (400~499) */
+ ERROR_FD_NONE = ERROR_COMMON_NONE,
+ /* Invalid max number (1~16) */
+ ERROR_FD_CONFIG_MAX_NUMBER_STATE = 401,
+ ERROR_FD_CONFIG_MAX_NUMBER_INVALID = 402,
+ ERROR_FD_CONFIG_YAW_ANGLE_STATE = 403,
+ ERROR_FD_CONFIG_YAW_ANGLE_INVALID = 404,
+ ERROR_FD_CONFIG_ROLL_ANGLE_STATE = 405,
+ ERROR_FD_CONFIG_ROLL_ANGLE_INVALID = 406,
+ ERROR_FD_CONFIG_SMILE_MODE_INVALID = 407,
+ ERROR_FD_CONFIG_BLINK_MODE_INVALID = 408,
+ ERROR_FD_CONFIG_EYES_DETECT_INVALID = 409,
+ ERROR_FD_CONFIG_MOUTH_DETECT_INVALID = 410,
+ ERROR_FD_CONFIG_ORIENTATION_STATE = 411,
+ ERROR_FD_CONFIG_ORIENTATION_INVALID = 412,
+ ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID = 413,
+ /* PARAM_FdResultStr can be only applied in ready-state or stream off */
+ ERROR_FD_RESULT = 414,
+ /* PARAM_FdModeStr can be only applied in ready-state or stream off */
+ ERROR_FD_MODE = 415,
+ /* Scaler Error (500 ~ 599) */
+ ERROR_SCALER_NO_NONE = ERROR_COMMON_NONE,
+ ERROR_SCALER_DMA_OUTSEL = 501,
+ ERROR_SCALER_H_RATIO = 502,
+ ERROR_SCALER_V_RATIO = 503,
+
+ ERROR_SCALER_IMAGE_EFFECT = 510,
+
+ ERROR_SCALER_ROTATE = 520,
+ ERROR_SCALER_FLIP = 521,
+};
+
+const char *fimc_is_strerr(unsigned int error);
+const char *fimc_is_param_strerr(unsigned int error);
+
+#endif /* FIMC_IS_ERR_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
new file mode 100644
index 000000000..70dd4852b
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
@@ -0,0 +1,162 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include "fimc-is-i2c.h"
+
+struct fimc_is_i2c {
+ struct i2c_adapter adapter;
+ struct clk *clock;
+};
+
+/*
+ * An empty algorithm is used as the actual I2C bus controller driver
+ * is implemented in the FIMC-IS subsystem firmware and the host CPU
+ * doesn't access the I2C bus controller.
+ */
+static u32 is_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm fimc_is_i2c_algorithm = {
+ .functionality = is_i2c_func,
+};
+
+static int fimc_is_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct fimc_is_i2c *isp_i2c;
+ struct i2c_adapter *i2c_adap;
+ int ret;
+
+ isp_i2c = devm_kzalloc(&pdev->dev, sizeof(*isp_i2c), GFP_KERNEL);
+ if (!isp_i2c)
+ return -ENOMEM;
+
+ isp_i2c->clock = devm_clk_get(&pdev->dev, "i2c_isp");
+ if (IS_ERR(isp_i2c->clock)) {
+ dev_err(&pdev->dev, "failed to get the clock\n");
+ return PTR_ERR(isp_i2c->clock);
+ }
+
+ i2c_adap = &isp_i2c->adapter;
+ i2c_adap->dev.of_node = node;
+ i2c_adap->dev.parent = &pdev->dev;
+ strlcpy(i2c_adap->name, "exynos4x12-isp-i2c", sizeof(i2c_adap->name));
+ i2c_adap->owner = THIS_MODULE;
+ i2c_adap->algo = &fimc_is_i2c_algorithm;
+ i2c_adap->class = I2C_CLASS_SPD;
+
+ platform_set_drvdata(pdev, isp_i2c);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = i2c_add_adapter(i2c_adap);
+ if (ret < 0)
+ goto err_pm_dis;
+ /*
+ * Client drivers of this adapter don't do any I2C transfers as that
+ * is handled by the ISP firmware. But we rely on the runtime PM
+ * state propagation from the clients up to the adapter driver so
+ * clear the ignore_children flags here. PM rutnime calls are not
+ * used in probe() handler of clients of this adapter so there is
+ * no issues with clearing the flag right after registering the I2C
+ * adapter.
+ */
+ pm_suspend_ignore_children(&i2c_adap->dev, false);
+ return 0;
+
+err_pm_dis:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int fimc_is_i2c_remove(struct platform_device *pdev)
+{
+ struct fimc_is_i2c *isp_i2c = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ i2c_del_adapter(&isp_i2c->adapter);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int fimc_is_i2c_runtime_suspend(struct device *dev)
+{
+ struct fimc_is_i2c *isp_i2c = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isp_i2c->clock);
+ return 0;
+}
+
+static int fimc_is_i2c_runtime_resume(struct device *dev)
+{
+ struct fimc_is_i2c *isp_i2c = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(isp_i2c->clock);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_is_i2c_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_is_i2c_runtime_suspend(dev);
+}
+
+static int fimc_is_i2c_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_is_i2c_runtime_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops fimc_is_i2c_pm_ops = {
+ SET_RUNTIME_PM_OPS(fimc_is_i2c_runtime_suspend,
+ fimc_is_i2c_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_is_i2c_suspend, fimc_is_i2c_resume)
+};
+
+static const struct of_device_id fimc_is_i2c_of_match[] = {
+ { .compatible = FIMC_IS_I2C_COMPATIBLE },
+ { },
+};
+
+static struct platform_driver fimc_is_i2c_driver = {
+ .probe = fimc_is_i2c_probe,
+ .remove = fimc_is_i2c_remove,
+ .driver = {
+ .of_match_table = fimc_is_i2c_of_match,
+ .name = "fimc-isp-i2c",
+ .pm = &fimc_is_i2c_pm_ops,
+ }
+};
+
+int fimc_is_register_i2c_driver(void)
+{
+ return platform_driver_register(&fimc_is_i2c_driver);
+}
+
+void fimc_is_unregister_i2c_driver(void)
+{
+ platform_driver_unregister(&fimc_is_i2c_driver);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.h b/drivers/media/platform/exynos4-is/fimc-is-i2c.h
new file mode 100644
index 000000000..0d38d6bb9
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.h
@@ -0,0 +1,15 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define FIMC_IS_I2C_COMPATIBLE "samsung,exynos4212-i2c-isp"
+
+int fimc_is_register_i2c_driver(void);
+void fimc_is_unregister_i2c_driver(void);
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.c b/drivers/media/platform/exynos4-is/fimc-is-param.c
new file mode 100644
index 000000000..72b9b436c
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-param.c
@@ -0,0 +1,896 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#include "fimc-is.h"
+#include "fimc-is-command.h"
+#include "fimc-is-errno.h"
+#include "fimc-is-param.h"
+#include "fimc-is-regs.h"
+#include "fimc-is-sensor.h"
+
+static void __hw_param_copy(void *dst, void *src)
+{
+ memcpy(dst, src, FIMC_IS_PARAM_MAX_SIZE);
+}
+
+static void __fimc_is_hw_update_param_global_shotmode(struct fimc_is *is)
+{
+ struct param_global_shotmode *dst, *src;
+
+ dst = &is->is_p_region->parameter.global.shotmode;
+ src = &is->config[is->config_index].global.shotmode;
+ __hw_param_copy(dst, src);
+}
+
+static void __fimc_is_hw_update_param_sensor_framerate(struct fimc_is *is)
+{
+ struct param_sensor_framerate *dst, *src;
+
+ dst = &is->is_p_region->parameter.sensor.frame_rate;
+ src = &is->config[is->config_index].sensor.frame_rate;
+ __hw_param_copy(dst, src);
+}
+
+int __fimc_is_hw_update_param(struct fimc_is *is, u32 offset)
+{
+ struct is_param_region *par = &is->is_p_region->parameter;
+ struct chain_config *cfg = &is->config[is->config_index];
+
+ switch (offset) {
+ case PARAM_ISP_CONTROL:
+ __hw_param_copy(&par->isp.control, &cfg->isp.control);
+ break;
+
+ case PARAM_ISP_OTF_INPUT:
+ __hw_param_copy(&par->isp.otf_input, &cfg->isp.otf_input);
+ break;
+
+ case PARAM_ISP_DMA1_INPUT:
+ __hw_param_copy(&par->isp.dma1_input, &cfg->isp.dma1_input);
+ break;
+
+ case PARAM_ISP_DMA2_INPUT:
+ __hw_param_copy(&par->isp.dma2_input, &cfg->isp.dma2_input);
+ break;
+
+ case PARAM_ISP_AA:
+ __hw_param_copy(&par->isp.aa, &cfg->isp.aa);
+ break;
+
+ case PARAM_ISP_FLASH:
+ __hw_param_copy(&par->isp.flash, &cfg->isp.flash);
+ break;
+
+ case PARAM_ISP_AWB:
+ __hw_param_copy(&par->isp.awb, &cfg->isp.awb);
+ break;
+
+ case PARAM_ISP_IMAGE_EFFECT:
+ __hw_param_copy(&par->isp.effect, &cfg->isp.effect);
+ break;
+
+ case PARAM_ISP_ISO:
+ __hw_param_copy(&par->isp.iso, &cfg->isp.iso);
+ break;
+
+ case PARAM_ISP_ADJUST:
+ __hw_param_copy(&par->isp.adjust, &cfg->isp.adjust);
+ break;
+
+ case PARAM_ISP_METERING:
+ __hw_param_copy(&par->isp.metering, &cfg->isp.metering);
+ break;
+
+ case PARAM_ISP_AFC:
+ __hw_param_copy(&par->isp.afc, &cfg->isp.afc);
+ break;
+
+ case PARAM_ISP_OTF_OUTPUT:
+ __hw_param_copy(&par->isp.otf_output, &cfg->isp.otf_output);
+ break;
+
+ case PARAM_ISP_DMA1_OUTPUT:
+ __hw_param_copy(&par->isp.dma1_output, &cfg->isp.dma1_output);
+ break;
+
+ case PARAM_ISP_DMA2_OUTPUT:
+ __hw_param_copy(&par->isp.dma2_output, &cfg->isp.dma2_output);
+ break;
+
+ case PARAM_DRC_CONTROL:
+ __hw_param_copy(&par->drc.control, &cfg->drc.control);
+ break;
+
+ case PARAM_DRC_OTF_INPUT:
+ __hw_param_copy(&par->drc.otf_input, &cfg->drc.otf_input);
+ break;
+
+ case PARAM_DRC_DMA_INPUT:
+ __hw_param_copy(&par->drc.dma_input, &cfg->drc.dma_input);
+ break;
+
+ case PARAM_DRC_OTF_OUTPUT:
+ __hw_param_copy(&par->drc.otf_output, &cfg->drc.otf_output);
+ break;
+
+ case PARAM_FD_CONTROL:
+ __hw_param_copy(&par->fd.control, &cfg->fd.control);
+ break;
+
+ case PARAM_FD_OTF_INPUT:
+ __hw_param_copy(&par->fd.otf_input, &cfg->fd.otf_input);
+ break;
+
+ case PARAM_FD_DMA_INPUT:
+ __hw_param_copy(&par->fd.dma_input, &cfg->fd.dma_input);
+ break;
+
+ case PARAM_FD_CONFIG:
+ __hw_param_copy(&par->fd.config, &cfg->fd.config);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+unsigned int __get_pending_param_count(struct fimc_is *is)
+{
+ struct chain_config *config = &is->config[is->config_index];
+ unsigned long flags;
+ unsigned int count;
+
+ spin_lock_irqsave(&is->slock, flags);
+ count = hweight32(config->p_region_index[0]);
+ count += hweight32(config->p_region_index[1]);
+ spin_unlock_irqrestore(&is->slock, flags);
+
+ return count;
+}
+
+int __is_hw_update_params(struct fimc_is *is)
+{
+ unsigned long *p_index;
+ int i, id, ret = 0;
+
+ id = is->config_index;
+ p_index = &is->config[id].p_region_index[0];
+
+ if (test_bit(PARAM_GLOBAL_SHOTMODE, p_index))
+ __fimc_is_hw_update_param_global_shotmode(is);
+
+ if (test_bit(PARAM_SENSOR_FRAME_RATE, p_index))
+ __fimc_is_hw_update_param_sensor_framerate(is);
+
+ for (i = PARAM_ISP_CONTROL; i < PARAM_DRC_CONTROL; i++) {
+ if (test_bit(i, p_index))
+ ret = __fimc_is_hw_update_param(is, i);
+ }
+
+ for (i = PARAM_DRC_CONTROL; i < PARAM_SCALERC_CONTROL; i++) {
+ if (test_bit(i, p_index))
+ ret = __fimc_is_hw_update_param(is, i);
+ }
+
+ for (i = PARAM_FD_CONTROL; i <= PARAM_FD_CONFIG; i++) {
+ if (test_bit(i, p_index))
+ ret = __fimc_is_hw_update_param(is, i);
+ }
+
+ return ret;
+}
+
+void __is_get_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf)
+{
+ struct isp_param *isp;
+
+ isp = &is->config[is->config_index].isp;
+ mf->width = isp->otf_input.width;
+ mf->height = isp->otf_input.height;
+}
+
+void __is_set_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+ struct drc_param *drc;
+ struct fd_param *fd;
+
+ isp = &is->config[index].isp;
+ drc = &is->config[index].drc;
+ fd = &is->config[index].fd;
+
+ /* Update isp size info (OTF only) */
+ isp->otf_input.width = mf->width;
+ isp->otf_input.height = mf->height;
+ isp->otf_output.width = mf->width;
+ isp->otf_output.height = mf->height;
+ /* Update drc size info (OTF only) */
+ drc->otf_input.width = mf->width;
+ drc->otf_input.height = mf->height;
+ drc->otf_output.width = mf->width;
+ drc->otf_output.height = mf->height;
+ /* Update fd size info (OTF only) */
+ fd->otf_input.width = mf->width;
+ fd->otf_input.height = mf->height;
+
+ if (test_bit(PARAM_ISP_OTF_INPUT,
+ &is->config[index].p_region_index[0]))
+ return;
+
+ /* Update field */
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_INPUT);
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_OUTPUT);
+ fimc_is_set_param_bit(is, PARAM_DRC_OTF_INPUT);
+ fimc_is_set_param_bit(is, PARAM_DRC_OTF_OUTPUT);
+ fimc_is_set_param_bit(is, PARAM_FD_OTF_INPUT);
+}
+
+int fimc_is_hw_get_sensor_max_framerate(struct fimc_is *is)
+{
+ switch (is->sensor->drvdata->id) {
+ case FIMC_IS_SENSOR_ID_S5K6A3:
+ return 30;
+ default:
+ return 15;
+ }
+}
+
+void __is_set_sensor(struct fimc_is *is, int fps)
+{
+ unsigned int index = is->config_index;
+ struct sensor_param *sensor;
+ struct isp_param *isp;
+
+ sensor = &is->config[index].sensor;
+ isp = &is->config[index].isp;
+
+ if (fps == 0) {
+ sensor->frame_rate.frame_rate =
+ fimc_is_hw_get_sensor_max_framerate(is);
+ isp->otf_input.frametime_min = 0;
+ isp->otf_input.frametime_max = 66666;
+ } else {
+ sensor->frame_rate.frame_rate = fps;
+ isp->otf_input.frametime_min = 0;
+ isp->otf_input.frametime_max = (u32)1000000 / fps;
+ }
+
+ fimc_is_set_param_bit(is, PARAM_SENSOR_FRAME_RATE);
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_INPUT);
+}
+
+static void __maybe_unused __is_set_init_isp_aa(struct fimc_is *is)
+{
+ struct isp_param *isp;
+
+ isp = &is->config[is->config_index].isp;
+
+ isp->aa.cmd = ISP_AA_COMMAND_START;
+ isp->aa.target = ISP_AA_TARGET_AF | ISP_AA_TARGET_AE |
+ ISP_AA_TARGET_AWB;
+ isp->aa.mode = 0;
+ isp->aa.scene = 0;
+ isp->aa.sleep = 0;
+ isp->aa.face = 0;
+ isp->aa.touch_x = 0;
+ isp->aa.touch_y = 0;
+ isp->aa.manual_af_setting = 0;
+ isp->aa.err = ISP_AF_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_AA);
+}
+
+void __is_set_isp_flash(struct fimc_is *is, u32 cmd, u32 redeye)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp = &is->config[index].isp;
+
+ isp->flash.cmd = cmd;
+ isp->flash.redeye = redeye;
+ isp->flash.err = ISP_FLASH_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_FLASH);
+}
+
+void __is_set_isp_awb(struct fimc_is *is, u32 cmd, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+
+ isp = &is->config[index].isp;
+
+ isp->awb.cmd = cmd;
+ isp->awb.illumination = val;
+ isp->awb.err = ISP_AWB_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_AWB);
+}
+
+void __is_set_isp_effect(struct fimc_is *is, u32 cmd)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+
+ isp = &is->config[index].isp;
+
+ isp->effect.cmd = cmd;
+ isp->effect.err = ISP_IMAGE_EFFECT_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_IMAGE_EFFECT);
+}
+
+void __is_set_isp_iso(struct fimc_is *is, u32 cmd, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+
+ isp = &is->config[index].isp;
+
+ isp->iso.cmd = cmd;
+ isp->iso.value = val;
+ isp->iso.err = ISP_ISO_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_ISO);
+}
+
+void __is_set_isp_adjust(struct fimc_is *is, u32 cmd, u32 val)
+{
+ unsigned int index = is->config_index;
+ unsigned long *p_index;
+ struct isp_param *isp;
+
+ p_index = &is->config[index].p_region_index[0];
+ isp = &is->config[index].isp;
+
+ switch (cmd) {
+ case ISP_ADJUST_COMMAND_MANUAL_CONTRAST:
+ isp->adjust.contrast = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_SATURATION:
+ isp->adjust.saturation = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_SHARPNESS:
+ isp->adjust.sharpness = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_EXPOSURE:
+ isp->adjust.exposure = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS:
+ isp->adjust.brightness = val;
+ break;
+ case ISP_ADJUST_COMMAND_MANUAL_HUE:
+ isp->adjust.hue = val;
+ break;
+ case ISP_ADJUST_COMMAND_AUTO:
+ isp->adjust.contrast = 0;
+ isp->adjust.saturation = 0;
+ isp->adjust.sharpness = 0;
+ isp->adjust.exposure = 0;
+ isp->adjust.brightness = 0;
+ isp->adjust.hue = 0;
+ break;
+ }
+
+ if (!test_bit(PARAM_ISP_ADJUST, p_index)) {
+ isp->adjust.cmd = cmd;
+ isp->adjust.err = ISP_ADJUST_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_ADJUST);
+ } else {
+ isp->adjust.cmd |= cmd;
+ }
+}
+
+void __is_set_isp_metering(struct fimc_is *is, u32 id, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[0];
+ isp = &is->config[index].isp;
+
+ switch (id) {
+ case IS_METERING_CONFIG_CMD:
+ isp->metering.cmd = val;
+ break;
+ case IS_METERING_CONFIG_WIN_POS_X:
+ isp->metering.win_pos_x = val;
+ break;
+ case IS_METERING_CONFIG_WIN_POS_Y:
+ isp->metering.win_pos_y = val;
+ break;
+ case IS_METERING_CONFIG_WIN_WIDTH:
+ isp->metering.win_width = val;
+ break;
+ case IS_METERING_CONFIG_WIN_HEIGHT:
+ isp->metering.win_height = val;
+ break;
+ default:
+ return;
+ }
+
+ if (!test_bit(PARAM_ISP_METERING, p_index)) {
+ isp->metering.err = ISP_METERING_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_METERING);
+ }
+}
+
+void __is_set_isp_afc(struct fimc_is *is, u32 cmd, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct isp_param *isp;
+
+ isp = &is->config[index].isp;
+
+ isp->afc.cmd = cmd;
+ isp->afc.manual = val;
+ isp->afc.err = ISP_AFC_ERROR_NONE;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_AFC);
+}
+
+void __is_set_drc_control(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct drc_param *drc;
+
+ drc = &is->config[index].drc;
+
+ drc->control.bypass = val;
+
+ fimc_is_set_param_bit(is, PARAM_DRC_CONTROL);
+}
+
+void __is_set_fd_control(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->control.cmd = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index))
+ fimc_is_set_param_bit(is, PARAM_FD_CONTROL);
+}
+
+void __is_set_fd_config_maxface(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.max_number = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_MAXIMUM_NUMBER;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_MAXIMUM_NUMBER;
+ }
+}
+
+void __is_set_fd_config_rollangle(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.roll_angle = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_ROLL_ANGLE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_ROLL_ANGLE;
+ }
+}
+
+void __is_set_fd_config_yawangle(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.yaw_angle = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_YAW_ANGLE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_YAW_ANGLE;
+ }
+}
+
+void __is_set_fd_config_smilemode(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.smile_mode = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_SMILE_MODE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_SMILE_MODE;
+ }
+}
+
+void __is_set_fd_config_blinkmode(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.blink_mode = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_BLINK_MODE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_BLINK_MODE;
+ }
+}
+
+void __is_set_fd_config_eyedetect(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.eye_detect = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_EYES_DETECT;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_EYES_DETECT;
+ }
+}
+
+void __is_set_fd_config_mouthdetect(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.mouth_detect = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_MOUTH_DETECT;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_MOUTH_DETECT;
+ }
+}
+
+void __is_set_fd_config_orientation(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.orientation = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_ORIENTATION;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_ORIENTATION;
+ }
+}
+
+void __is_set_fd_config_orientation_val(struct fimc_is *is, u32 val)
+{
+ unsigned int index = is->config_index;
+ struct fd_param *fd;
+ unsigned long *p_index;
+
+ p_index = &is->config[index].p_region_index[1];
+ fd = &is->config[index].fd;
+
+ fd->config.orientation_value = val;
+
+ if (!test_bit((PARAM_FD_CONFIG - 32), p_index)) {
+ fd->config.cmd = FD_CONFIG_COMMAND_ORIENTATION_VALUE;
+ fd->config.err = ERROR_FD_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_CONFIG);
+ } else {
+ fd->config.cmd |= FD_CONFIG_COMMAND_ORIENTATION_VALUE;
+ }
+}
+
+void fimc_is_set_initial_params(struct fimc_is *is)
+{
+ struct global_param *global;
+ struct isp_param *isp;
+ struct drc_param *drc;
+ struct fd_param *fd;
+ unsigned long *p_index;
+ unsigned int index;
+
+ index = is->config_index;
+ global = &is->config[index].global;
+ isp = &is->config[index].isp;
+ drc = &is->config[index].drc;
+ fd = &is->config[index].fd;
+ p_index = &is->config[index].p_region_index[0];
+
+ /* Global */
+ global->shotmode.cmd = 1;
+ fimc_is_set_param_bit(is, PARAM_GLOBAL_SHOTMODE);
+
+ /* ISP */
+ isp->control.cmd = CONTROL_COMMAND_START;
+ isp->control.bypass = CONTROL_BYPASS_DISABLE;
+ isp->control.err = CONTROL_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_CONTROL);
+
+ isp->otf_input.cmd = OTF_INPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_ISP_OTF_INPUT, p_index)) {
+ isp->otf_input.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ isp->otf_input.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_INPUT);
+ }
+ if (is->sensor->test_pattern)
+ isp->otf_input.format = OTF_INPUT_FORMAT_STRGEN_COLORBAR_BAYER;
+ else
+ isp->otf_input.format = OTF_INPUT_FORMAT_BAYER;
+ isp->otf_input.bitwidth = 10;
+ isp->otf_input.order = OTF_INPUT_ORDER_BAYER_GR_BG;
+ isp->otf_input.crop_offset_x = 0;
+ isp->otf_input.crop_offset_y = 0;
+ isp->otf_input.err = OTF_INPUT_ERROR_NONE;
+
+ isp->dma1_input.cmd = DMA_INPUT_COMMAND_DISABLE;
+ isp->dma1_input.width = 0;
+ isp->dma1_input.height = 0;
+ isp->dma1_input.format = 0;
+ isp->dma1_input.bitwidth = 0;
+ isp->dma1_input.plane = 0;
+ isp->dma1_input.order = 0;
+ isp->dma1_input.buffer_number = 0;
+ isp->dma1_input.width = 0;
+ isp->dma1_input.err = DMA_INPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA1_INPUT);
+
+ isp->dma2_input.cmd = DMA_INPUT_COMMAND_DISABLE;
+ isp->dma2_input.width = 0;
+ isp->dma2_input.height = 0;
+ isp->dma2_input.format = 0;
+ isp->dma2_input.bitwidth = 0;
+ isp->dma2_input.plane = 0;
+ isp->dma2_input.order = 0;
+ isp->dma2_input.buffer_number = 0;
+ isp->dma2_input.width = 0;
+ isp->dma2_input.err = DMA_INPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_INPUT);
+
+ isp->aa.cmd = ISP_AA_COMMAND_START;
+ isp->aa.target = ISP_AA_TARGET_AE | ISP_AA_TARGET_AWB;
+ fimc_is_set_param_bit(is, PARAM_ISP_AA);
+
+ if (!test_bit(PARAM_ISP_FLASH, p_index))
+ __is_set_isp_flash(is, ISP_FLASH_COMMAND_DISABLE,
+ ISP_FLASH_REDEYE_DISABLE);
+
+ if (!test_bit(PARAM_ISP_AWB, p_index))
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_AUTO, 0);
+
+ if (!test_bit(PARAM_ISP_IMAGE_EFFECT, p_index))
+ __is_set_isp_effect(is, ISP_IMAGE_EFFECT_DISABLE);
+
+ if (!test_bit(PARAM_ISP_ISO, p_index))
+ __is_set_isp_iso(is, ISP_ISO_COMMAND_AUTO, 0);
+
+ if (!test_bit(PARAM_ISP_ADJUST, p_index)) {
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_CONTRAST, 0);
+ __is_set_isp_adjust(is,
+ ISP_ADJUST_COMMAND_MANUAL_SATURATION, 0);
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_SHARPNESS, 0);
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_EXPOSURE, 0);
+ __is_set_isp_adjust(is,
+ ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS, 0);
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_HUE, 0);
+ }
+
+ if (!test_bit(PARAM_ISP_METERING, p_index)) {
+ __is_set_isp_metering(is, 0, ISP_METERING_COMMAND_CENTER);
+ __is_set_isp_metering(is, 1, 0);
+ __is_set_isp_metering(is, 2, 0);
+ __is_set_isp_metering(is, 3, 0);
+ __is_set_isp_metering(is, 4, 0);
+ }
+
+ if (!test_bit(PARAM_ISP_AFC, p_index))
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_AUTO, 0);
+
+ isp->otf_output.cmd = OTF_OUTPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_ISP_OTF_OUTPUT, p_index)) {
+ isp->otf_output.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ isp->otf_output.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_ISP_OTF_OUTPUT);
+ }
+ isp->otf_output.format = OTF_OUTPUT_FORMAT_YUV444;
+ isp->otf_output.bitwidth = 12;
+ isp->otf_output.order = 0;
+ isp->otf_output.err = OTF_OUTPUT_ERROR_NONE;
+
+ if (!test_bit(PARAM_ISP_DMA1_OUTPUT, p_index)) {
+ isp->dma1_output.cmd = DMA_OUTPUT_COMMAND_DISABLE;
+ isp->dma1_output.width = 0;
+ isp->dma1_output.height = 0;
+ isp->dma1_output.format = 0;
+ isp->dma1_output.bitwidth = 0;
+ isp->dma1_output.plane = 0;
+ isp->dma1_output.order = 0;
+ isp->dma1_output.buffer_number = 0;
+ isp->dma1_output.buffer_address = 0;
+ isp->dma1_output.notify_dma_done = 0;
+ isp->dma1_output.dma_out_mask = 0;
+ isp->dma1_output.err = DMA_OUTPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA1_OUTPUT);
+ }
+
+ if (!test_bit(PARAM_ISP_DMA2_OUTPUT, p_index)) {
+ isp->dma2_output.cmd = DMA_OUTPUT_COMMAND_DISABLE;
+ isp->dma2_output.width = 0;
+ isp->dma2_output.height = 0;
+ isp->dma2_output.format = 0;
+ isp->dma2_output.bitwidth = 0;
+ isp->dma2_output.plane = 0;
+ isp->dma2_output.order = 0;
+ isp->dma2_output.buffer_number = 0;
+ isp->dma2_output.buffer_address = 0;
+ isp->dma2_output.notify_dma_done = 0;
+ isp->dma2_output.dma_out_mask = 0;
+ isp->dma2_output.err = DMA_OUTPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
+ }
+
+ /* Sensor */
+ if (!test_bit(PARAM_SENSOR_FRAME_RATE, p_index)) {
+ if (is->config_index == 0)
+ __is_set_sensor(is, 0);
+ }
+
+ /* DRC */
+ drc->control.cmd = CONTROL_COMMAND_START;
+ __is_set_drc_control(is, CONTROL_BYPASS_ENABLE);
+
+ drc->otf_input.cmd = OTF_INPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_DRC_OTF_INPUT, p_index)) {
+ drc->otf_input.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ drc->otf_input.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_DRC_OTF_INPUT);
+ }
+ drc->otf_input.format = OTF_INPUT_FORMAT_YUV444;
+ drc->otf_input.bitwidth = 12;
+ drc->otf_input.order = 0;
+ drc->otf_input.err = OTF_INPUT_ERROR_NONE;
+
+ drc->dma_input.cmd = DMA_INPUT_COMMAND_DISABLE;
+ drc->dma_input.width = 0;
+ drc->dma_input.height = 0;
+ drc->dma_input.format = 0;
+ drc->dma_input.bitwidth = 0;
+ drc->dma_input.plane = 0;
+ drc->dma_input.order = 0;
+ drc->dma_input.buffer_number = 0;
+ drc->dma_input.width = 0;
+ drc->dma_input.err = DMA_INPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_DRC_DMA_INPUT);
+
+ drc->otf_output.cmd = OTF_OUTPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_DRC_OTF_OUTPUT, p_index)) {
+ drc->otf_output.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ drc->otf_output.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_DRC_OTF_OUTPUT);
+ }
+ drc->otf_output.format = OTF_OUTPUT_FORMAT_YUV444;
+ drc->otf_output.bitwidth = 8;
+ drc->otf_output.order = 0;
+ drc->otf_output.err = OTF_OUTPUT_ERROR_NONE;
+
+ /* FD */
+ __is_set_fd_control(is, CONTROL_COMMAND_STOP);
+ fd->control.bypass = CONTROL_BYPASS_DISABLE;
+
+ fd->otf_input.cmd = OTF_INPUT_COMMAND_ENABLE;
+ if (!test_bit(PARAM_FD_OTF_INPUT, p_index)) {
+ fd->otf_input.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ fd->otf_input.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ fimc_is_set_param_bit(is, PARAM_FD_OTF_INPUT);
+ }
+
+ fd->otf_input.format = OTF_INPUT_FORMAT_YUV444;
+ fd->otf_input.bitwidth = 8;
+ fd->otf_input.order = 0;
+ fd->otf_input.err = OTF_INPUT_ERROR_NONE;
+
+ fd->dma_input.cmd = DMA_INPUT_COMMAND_DISABLE;
+ fd->dma_input.width = 0;
+ fd->dma_input.height = 0;
+ fd->dma_input.format = 0;
+ fd->dma_input.bitwidth = 0;
+ fd->dma_input.plane = 0;
+ fd->dma_input.order = 0;
+ fd->dma_input.buffer_number = 0;
+ fd->dma_input.width = 0;
+ fd->dma_input.err = DMA_INPUT_ERROR_NONE;
+ fimc_is_set_param_bit(is, PARAM_FD_DMA_INPUT);
+
+ __is_set_fd_config_maxface(is, 5);
+ __is_set_fd_config_rollangle(is, FD_CONFIG_ROLL_ANGLE_FULL);
+ __is_set_fd_config_yawangle(is, FD_CONFIG_YAW_ANGLE_45_90);
+ __is_set_fd_config_smilemode(is, FD_CONFIG_SMILE_MODE_DISABLE);
+ __is_set_fd_config_blinkmode(is, FD_CONFIG_BLINK_MODE_DISABLE);
+ __is_set_fd_config_eyedetect(is, FD_CONFIG_EYES_DETECT_ENABLE);
+ __is_set_fd_config_mouthdetect(is, FD_CONFIG_MOUTH_DETECT_DISABLE);
+ __is_set_fd_config_orientation(is, FD_CONFIG_ORIENTATION_DISABLE);
+ __is_set_fd_config_orientation_val(is, 0);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.h b/drivers/media/platform/exynos4-is/fimc-is-param.h
new file mode 100644
index 000000000..8e31f7642
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-param.h
@@ -0,0 +1,1025 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_IS_PARAM_H_
+#define FIMC_IS_PARAM_H_
+
+#include <linux/compiler.h>
+
+#define FIMC_IS_CONFIG_TIMEOUT 3000 /* ms */
+#define IS_DEFAULT_WIDTH 1280
+#define IS_DEFAULT_HEIGHT 720
+
+#define DEFAULT_PREVIEW_STILL_WIDTH IS_DEFAULT_WIDTH
+#define DEFAULT_PREVIEW_STILL_HEIGHT IS_DEFAULT_HEIGHT
+#define DEFAULT_CAPTURE_STILL_WIDTH IS_DEFAULT_WIDTH
+#define DEFAULT_CAPTURE_STILL_HEIGHT IS_DEFAULT_HEIGHT
+#define DEFAULT_PREVIEW_VIDEO_WIDTH IS_DEFAULT_WIDTH
+#define DEFAULT_PREVIEW_VIDEO_HEIGHT IS_DEFAULT_HEIGHT
+#define DEFAULT_CAPTURE_VIDEO_WIDTH IS_DEFAULT_WIDTH
+#define DEFAULT_CAPTURE_VIDEO_HEIGHT IS_DEFAULT_HEIGHT
+
+#define DEFAULT_PREVIEW_STILL_FRAMERATE 30
+#define DEFAULT_CAPTURE_STILL_FRAMERATE 15
+#define DEFAULT_PREVIEW_VIDEO_FRAMERATE 30
+#define DEFAULT_CAPTURE_VIDEO_FRAMERATE 30
+
+#define FIMC_IS_REGION_VER 124 /* IS REGION VERSION 1.24 */
+#define FIMC_IS_PARAM_SIZE (FIMC_IS_REGION_SIZE + 1)
+#define FIMC_IS_MAGIC_NUMBER 0x01020304
+#define FIMC_IS_PARAM_MAX_SIZE 64 /* in bytes */
+#define FIMC_IS_PARAM_MAX_ENTRIES (FIMC_IS_PARAM_MAX_SIZE / 4)
+
+/* The parameter bitmask bit definitions. */
+enum is_param_bit {
+ PARAM_GLOBAL_SHOTMODE,
+ PARAM_SENSOR_CONTROL,
+ PARAM_SENSOR_OTF_OUTPUT,
+ PARAM_SENSOR_FRAME_RATE,
+ PARAM_BUFFER_CONTROL,
+ PARAM_BUFFER_OTF_INPUT,
+ PARAM_BUFFER_OTF_OUTPUT,
+ PARAM_ISP_CONTROL,
+ PARAM_ISP_OTF_INPUT,
+ PARAM_ISP_DMA1_INPUT,
+ /* 10 */
+ PARAM_ISP_DMA2_INPUT,
+ PARAM_ISP_AA,
+ PARAM_ISP_FLASH,
+ PARAM_ISP_AWB,
+ PARAM_ISP_IMAGE_EFFECT,
+ PARAM_ISP_ISO,
+ PARAM_ISP_ADJUST,
+ PARAM_ISP_METERING,
+ PARAM_ISP_AFC,
+ PARAM_ISP_OTF_OUTPUT,
+ /* 20 */
+ PARAM_ISP_DMA1_OUTPUT,
+ PARAM_ISP_DMA2_OUTPUT,
+ PARAM_DRC_CONTROL,
+ PARAM_DRC_OTF_INPUT,
+ PARAM_DRC_DMA_INPUT,
+ PARAM_DRC_OTF_OUTPUT,
+ PARAM_SCALERC_CONTROL,
+ PARAM_SCALERC_OTF_INPUT,
+ PARAM_SCALERC_IMAGE_EFFECT,
+ PARAM_SCALERC_INPUT_CROP,
+ /* 30 */
+ PARAM_SCALERC_OUTPUT_CROP,
+ PARAM_SCALERC_OTF_OUTPUT,
+ PARAM_SCALERC_DMA_OUTPUT,
+ PARAM_ODC_CONTROL,
+ PARAM_ODC_OTF_INPUT,
+ PARAM_ODC_OTF_OUTPUT,
+ PARAM_DIS_CONTROL,
+ PARAM_DIS_OTF_INPUT,
+ PARAM_DIS_OTF_OUTPUT,
+ PARAM_TDNR_CONTROL,
+ /* 40 */
+ PARAM_TDNR_OTF_INPUT,
+ PARAM_TDNR_1ST_FRAME,
+ PARAM_TDNR_OTF_OUTPUT,
+ PARAM_TDNR_DMA_OUTPUT,
+ PARAM_SCALERP_CONTROL,
+ PARAM_SCALERP_OTF_INPUT,
+ PARAM_SCALERP_IMAGE_EFFECT,
+ PARAM_SCALERP_INPUT_CROP,
+ PARAM_SCALERP_OUTPUT_CROP,
+ PARAM_SCALERP_ROTATION,
+ /* 50 */
+ PARAM_SCALERP_FLIP,
+ PARAM_SCALERP_OTF_OUTPUT,
+ PARAM_SCALERP_DMA_OUTPUT,
+ PARAM_FD_CONTROL,
+ PARAM_FD_OTF_INPUT,
+ PARAM_FD_DMA_INPUT,
+ PARAM_FD_CONFIG,
+};
+
+/* Interrupt map */
+#define FIMC_IS_INT_GENERAL 0
+#define FIMC_IS_INT_FRAME_DONE_ISP 1
+
+/* Input */
+
+#define CONTROL_COMMAND_STOP 0
+#define CONTROL_COMMAND_START 1
+
+#define CONTROL_BYPASS_DISABLE 0
+#define CONTROL_BYPASS_ENABLE 1
+
+#define CONTROL_ERROR_NONE 0
+
+/* OTF (On-The-Fly) input interface commands */
+#define OTF_INPUT_COMMAND_DISABLE 0
+#define OTF_INPUT_COMMAND_ENABLE 1
+
+/* OTF input interface color formats */
+enum oft_input_fmt {
+ OTF_INPUT_FORMAT_BAYER = 0, /* 1 channel */
+ OTF_INPUT_FORMAT_YUV444 = 1, /* 3 channels */
+ OTF_INPUT_FORMAT_YUV422 = 2, /* 3 channels */
+ OTF_INPUT_FORMAT_YUV420 = 3, /* 3 channels */
+ OTF_INPUT_FORMAT_STRGEN_COLORBAR_BAYER = 10,
+ OTF_INPUT_FORMAT_BAYER_DMA = 11,
+};
+
+#define OTF_INPUT_ORDER_BAYER_GR_BG 0
+
+/* OTF input error codes */
+#define OTF_INPUT_ERROR_NONE 0 /* Input setting is done */
+
+/* DMA input commands */
+#define DMA_INPUT_COMMAND_DISABLE 0
+#define DMA_INPUT_COMMAND_ENABLE 1
+
+/* DMA input color formats */
+enum dma_input_fmt {
+ DMA_INPUT_FORMAT_BAYER = 0,
+ DMA_INPUT_FORMAT_YUV444 = 1,
+ DMA_INPUT_FORMAT_YUV422 = 2,
+ DMA_INPUT_FORMAT_YUV420 = 3,
+};
+
+enum dma_input_order {
+ /* (for DMA_INPUT_PLANE_3) */
+ DMA_INPUT_ORDER_NO = 0,
+ /* (only valid at DMA_INPUT_PLANE_2) */
+ DMA_INPUT_ORDER_CBCR = 1,
+ /* (only valid at DMA_INPUT_PLANE_2) */
+ DMA_INPUT_ORDER_CRCB = 2,
+ /* (only valid at DMA_INPUT_PLANE_1 & DMA_INPUT_FORMAT_YUV444) */
+ DMA_INPUT_ORDER_YCBCR = 3,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_YYCBCR = 4,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_YCBYCR = 5,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_YCRYCB = 6,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_CBYCRY = 7,
+ /* (only valid at DMA_INPUT_FORMAT_YUV422 & DMA_INPUT_PLANE_1) */
+ DMA_INPUT_ORDER_CRYCBY = 8,
+ /* (only valid at DMA_INPUT_FORMAT_BAYER) */
+ DMA_INPUT_ORDER_GR_BG = 9
+};
+
+#define DMA_INPUT_ERROR_NONE 0 /* DMA input setting
+ is done */
+/*
+ * Data output parameter definitions
+ */
+#define OTF_OUTPUT_CROP_DISABLE 0
+#define OTF_OUTPUT_CROP_ENABLE 1
+
+#define OTF_OUTPUT_COMMAND_DISABLE 0
+#define OTF_OUTPUT_COMMAND_ENABLE 1
+
+enum otf_output_fmt {
+ OTF_OUTPUT_FORMAT_YUV444 = 1,
+ OTF_OUTPUT_FORMAT_YUV422 = 2,
+ OTF_OUTPUT_FORMAT_YUV420 = 3,
+ OTF_OUTPUT_FORMAT_RGB = 4,
+};
+
+#define OTF_OUTPUT_ORDER_BAYER_GR_BG 0
+
+#define OTF_OUTPUT_ERROR_NONE 0 /* Output Setting is done */
+
+#define DMA_OUTPUT_COMMAND_DISABLE 0
+#define DMA_OUTPUT_COMMAND_ENABLE 1
+
+enum dma_output_fmt {
+ DMA_OUTPUT_FORMAT_BAYER = 0,
+ DMA_OUTPUT_FORMAT_YUV444 = 1,
+ DMA_OUTPUT_FORMAT_YUV422 = 2,
+ DMA_OUTPUT_FORMAT_YUV420 = 3,
+ DMA_OUTPUT_FORMAT_RGB = 4,
+};
+
+enum dma_output_order {
+ DMA_OUTPUT_ORDER_NO = 0,
+ /* for DMA_OUTPUT_PLANE_3 */
+ DMA_OUTPUT_ORDER_CBCR = 1,
+ /* only valid at DMA_INPUT_PLANE_2) */
+ DMA_OUTPUT_ORDER_CRCB = 2,
+ /* only valid at DMA_OUTPUT_PLANE_2) */
+ DMA_OUTPUT_ORDER_YYCBCR = 3,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_YCBYCR = 4,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_YCRYCB = 5,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CBYCRY = 6,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CRYCBY = 7,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV422 & DMA_OUTPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_YCBCR = 8,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CRYCB = 9,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CRCBY = 10,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CBYCR = 11,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_YCRCB = 12,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_CBCRY = 13,
+ /* only valid at DMA_OUTPUT_FORMAT_YUV444 & DMA_OUPUT_PLANE_1 */
+ DMA_OUTPUT_ORDER_BGR = 14,
+ /* only valid at DMA_OUTPUT_FORMAT_RGB */
+ DMA_OUTPUT_ORDER_GB_BG = 15
+ /* only valid at DMA_OUTPUT_FORMAT_BAYER */
+};
+
+/* enum dma_output_notify_dma_done */
+#define DMA_OUTPUT_NOTIFY_DMA_DONE_DISABLE 0
+#define DMA_OUTPUT_NOTIFY_DMA_DONE_ENABLE 1
+
+/* DMA output error codes */
+#define DMA_OUTPUT_ERROR_NONE 0 /* DMA output setting
+ is done */
+
+/* ---------------------- Global ----------------------------------- */
+#define GLOBAL_SHOTMODE_ERROR_NONE 0 /* shot-mode setting
+ is done */
+/* 3A lock commands */
+#define ISP_AA_COMMAND_START 0
+#define ISP_AA_COMMAND_STOP 1
+
+/* 3A lock target */
+#define ISP_AA_TARGET_AF 1
+#define ISP_AA_TARGET_AE 2
+#define ISP_AA_TARGET_AWB 4
+
+enum isp_af_mode {
+ ISP_AF_MODE_MANUAL = 0,
+ ISP_AF_MODE_SINGLE = 1,
+ ISP_AF_MODE_CONTINUOUS = 2,
+ ISP_AF_MODE_TOUCH = 3,
+ ISP_AF_MODE_SLEEP = 4,
+ ISP_AF_MODE_INIT = 5,
+ ISP_AF_MODE_SET_CENTER_WINDOW = 6,
+ ISP_AF_MODE_SET_TOUCH_WINDOW = 7
+};
+
+/* Face AF commands */
+#define ISP_AF_FACE_DISABLE 0
+#define ISP_AF_FACE_ENABLE 1
+
+/* AF range */
+#define ISP_AF_RANGE_NORMAL 0
+#define ISP_AF_RANGE_MACRO 1
+
+/* AF sleep */
+#define ISP_AF_SLEEP_OFF 0
+#define ISP_AF_SLEEP_ON 1
+
+/* Continuous AF commands */
+#define ISP_AF_CONTINUOUS_DISABLE 0
+#define ISP_AF_CONTINUOUS_ENABLE 1
+
+/* ISP AF error codes */
+#define ISP_AF_ERROR_NONE 0 /* AF mode change is done */
+#define ISP_AF_ERROR_NONE_LOCK_DONE 1 /* AF lock is done */
+
+/* Flash commands */
+#define ISP_FLASH_COMMAND_DISABLE 0
+#define ISP_FLASH_COMMAND_MANUAL_ON 1 /* (forced flash) */
+#define ISP_FLASH_COMMAND_AUTO 2
+#define ISP_FLASH_COMMAND_TORCH 3 /* 3 sec */
+
+/* Flash red-eye commads */
+#define ISP_FLASH_REDEYE_DISABLE 0
+#define ISP_FLASH_REDEYE_ENABLE 1
+
+/* Flash error codes */
+#define ISP_FLASH_ERROR_NONE 0 /* Flash setting is done */
+
+/* -------------------------- AWB ------------------------------------ */
+enum isp_awb_command {
+ ISP_AWB_COMMAND_AUTO = 0,
+ ISP_AWB_COMMAND_ILLUMINATION = 1,
+ ISP_AWB_COMMAND_MANUAL = 2
+};
+
+enum isp_awb_illumination {
+ ISP_AWB_ILLUMINATION_DAYLIGHT = 0,
+ ISP_AWB_ILLUMINATION_CLOUDY = 1,
+ ISP_AWB_ILLUMINATION_TUNGSTEN = 2,
+ ISP_AWB_ILLUMINATION_FLUORESCENT = 3
+};
+
+/* ISP AWN error codes */
+#define ISP_AWB_ERROR_NONE 0 /* AWB setting is done */
+
+/* -------------------------- Effect ----------------------------------- */
+enum isp_imageeffect_command {
+ ISP_IMAGE_EFFECT_DISABLE = 0,
+ ISP_IMAGE_EFFECT_MONOCHROME = 1,
+ ISP_IMAGE_EFFECT_NEGATIVE_MONO = 2,
+ ISP_IMAGE_EFFECT_NEGATIVE_COLOR = 3,
+ ISP_IMAGE_EFFECT_SEPIA = 4
+};
+
+/* Image effect error codes */
+#define ISP_IMAGE_EFFECT_ERROR_NONE 0 /* Image effect setting
+ is done */
+/* ISO commands */
+#define ISP_ISO_COMMAND_AUTO 0
+#define ISP_ISO_COMMAND_MANUAL 1
+
+/* ISO error codes */
+#define ISP_ISO_ERROR_NONE 0 /* ISO setting is done */
+
+/* ISP adjust commands */
+#define ISP_ADJUST_COMMAND_AUTO (0 << 0)
+#define ISP_ADJUST_COMMAND_MANUAL_CONTRAST (1 << 0)
+#define ISP_ADJUST_COMMAND_MANUAL_SATURATION (1 << 1)
+#define ISP_ADJUST_COMMAND_MANUAL_SHARPNESS (1 << 2)
+#define ISP_ADJUST_COMMAND_MANUAL_EXPOSURE (1 << 3)
+#define ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS (1 << 4)
+#define ISP_ADJUST_COMMAND_MANUAL_HUE (1 << 5)
+#define ISP_ADJUST_COMMAND_MANUAL_ALL 0x7f
+
+/* ISP adjustment error codes */
+#define ISP_ADJUST_ERROR_NONE 0 /* Adjust setting is done */
+
+/*
+ * Exposure metering
+ */
+enum isp_metering_command {
+ ISP_METERING_COMMAND_AVERAGE = 0,
+ ISP_METERING_COMMAND_SPOT = 1,
+ ISP_METERING_COMMAND_MATRIX = 2,
+ ISP_METERING_COMMAND_CENTER = 3
+};
+
+/* ISP metering error codes */
+#define ISP_METERING_ERROR_NONE 0 /* Metering setting is done */
+
+/*
+ * AFC
+ */
+enum isp_afc_command {
+ ISP_AFC_COMMAND_DISABLE = 0,
+ ISP_AFC_COMMAND_AUTO = 1,
+ ISP_AFC_COMMAND_MANUAL = 2,
+};
+
+#define ISP_AFC_MANUAL_50HZ 50
+#define ISP_AFC_MANUAL_60HZ 60
+
+/* ------------------------ SCENE MODE--------------------------------- */
+enum isp_scene_mode {
+ ISP_SCENE_NONE = 0,
+ ISP_SCENE_PORTRAIT = 1,
+ ISP_SCENE_LANDSCAPE = 2,
+ ISP_SCENE_SPORTS = 3,
+ ISP_SCENE_PARTYINDOOR = 4,
+ ISP_SCENE_BEACHSNOW = 5,
+ ISP_SCENE_SUNSET = 6,
+ ISP_SCENE_DAWN = 7,
+ ISP_SCENE_FALL = 8,
+ ISP_SCENE_NIGHT = 9,
+ ISP_SCENE_AGAINSTLIGHTWLIGHT = 10,
+ ISP_SCENE_AGAINSTLIGHTWOLIGHT = 11,
+ ISP_SCENE_FIRE = 12,
+ ISP_SCENE_TEXT = 13,
+ ISP_SCENE_CANDLE = 14
+};
+
+/* AFC error codes */
+#define ISP_AFC_ERROR_NONE 0 /* AFC setting is done */
+
+/* ---------------------------- FD ------------------------------------- */
+enum fd_config_command {
+ FD_CONFIG_COMMAND_MAXIMUM_NUMBER = 0x1,
+ FD_CONFIG_COMMAND_ROLL_ANGLE = 0x2,
+ FD_CONFIG_COMMAND_YAW_ANGLE = 0x4,
+ FD_CONFIG_COMMAND_SMILE_MODE = 0x8,
+ FD_CONFIG_COMMAND_BLINK_MODE = 0x10,
+ FD_CONFIG_COMMAND_EYES_DETECT = 0x20,
+ FD_CONFIG_COMMAND_MOUTH_DETECT = 0x40,
+ FD_CONFIG_COMMAND_ORIENTATION = 0x80,
+ FD_CONFIG_COMMAND_ORIENTATION_VALUE = 0x100
+};
+
+enum fd_config_roll_angle {
+ FD_CONFIG_ROLL_ANGLE_BASIC = 0,
+ FD_CONFIG_ROLL_ANGLE_PRECISE_BASIC = 1,
+ FD_CONFIG_ROLL_ANGLE_SIDES = 2,
+ FD_CONFIG_ROLL_ANGLE_PRECISE_SIDES = 3,
+ FD_CONFIG_ROLL_ANGLE_FULL = 4,
+ FD_CONFIG_ROLL_ANGLE_PRECISE_FULL = 5,
+};
+
+enum fd_config_yaw_angle {
+ FD_CONFIG_YAW_ANGLE_0 = 0,
+ FD_CONFIG_YAW_ANGLE_45 = 1,
+ FD_CONFIG_YAW_ANGLE_90 = 2,
+ FD_CONFIG_YAW_ANGLE_45_90 = 3,
+};
+
+/* Smile mode configuration */
+#define FD_CONFIG_SMILE_MODE_DISABLE 0
+#define FD_CONFIG_SMILE_MODE_ENABLE 1
+
+/* Blink mode configuration */
+#define FD_CONFIG_BLINK_MODE_DISABLE 0
+#define FD_CONFIG_BLINK_MODE_ENABLE 1
+
+/* Eyes detection configuration */
+#define FD_CONFIG_EYES_DETECT_DISABLE 0
+#define FD_CONFIG_EYES_DETECT_ENABLE 1
+
+/* Mouth detection configuration */
+#define FD_CONFIG_MOUTH_DETECT_DISABLE 0
+#define FD_CONFIG_MOUTH_DETECT_ENABLE 1
+
+#define FD_CONFIG_ORIENTATION_DISABLE 0
+#define FD_CONFIG_ORIENTATION_ENABLE 1
+
+struct param_control {
+ u32 cmd;
+ u32 bypass;
+ u32 buffer_address;
+ u32 buffer_size;
+ u32 skip_frames; /* only valid at ISP */
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 6];
+ u32 err;
+};
+
+struct param_otf_input {
+ u32 cmd;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 bitwidth;
+ u32 order;
+ u32 crop_offset_x;
+ u32 crop_offset_y;
+ u32 crop_width;
+ u32 crop_height;
+ u32 frametime_min;
+ u32 frametime_max;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 13];
+ u32 err;
+};
+
+struct param_dma_input {
+ u32 cmd;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 bitwidth;
+ u32 plane;
+ u32 order;
+ u32 buffer_number;
+ u32 buffer_address;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 10];
+ u32 err;
+};
+
+struct param_otf_output {
+ u32 cmd;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 bitwidth;
+ u32 order;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 7];
+ u32 err;
+};
+
+struct param_dma_output {
+ u32 cmd;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 bitwidth;
+ u32 plane;
+ u32 order;
+ u32 buffer_number;
+ u32 buffer_address;
+ u32 notify_dma_done;
+ u32 dma_out_mask;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 12];
+ u32 err;
+};
+
+struct param_global_shotmode {
+ u32 cmd;
+ u32 skip_frames;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_sensor_framerate {
+ u32 frame_rate;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_isp_aa {
+ u32 cmd;
+ u32 target;
+ u32 mode;
+ u32 scene;
+ u32 sleep;
+ u32 face;
+ u32 touch_x;
+ u32 touch_y;
+ u32 manual_af_setting;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 10];
+ u32 err;
+};
+
+struct param_isp_flash {
+ u32 cmd;
+ u32 redeye;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_isp_awb {
+ u32 cmd;
+ u32 illumination;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_isp_imageeffect {
+ u32 cmd;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_isp_iso {
+ u32 cmd;
+ u32 value;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_isp_adjust {
+ u32 cmd;
+ s32 contrast;
+ s32 saturation;
+ s32 sharpness;
+ s32 exposure;
+ s32 brightness;
+ s32 hue;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 8];
+ u32 err;
+};
+
+struct param_isp_metering {
+ u32 cmd;
+ u32 win_pos_x;
+ u32 win_pos_y;
+ u32 win_width;
+ u32 win_height;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 6];
+ u32 err;
+};
+
+struct param_isp_afc {
+ u32 cmd;
+ u32 manual;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 3];
+ u32 err;
+};
+
+struct param_scaler_imageeffect {
+ u32 cmd;
+ u32 arbitrary_cb;
+ u32 arbitrary_cr;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 4];
+ u32 err;
+};
+
+struct param_scaler_input_crop {
+ u32 cmd;
+ u32 crop_offset_x;
+ u32 crop_offset_y;
+ u32 crop_width;
+ u32 crop_height;
+ u32 in_width;
+ u32 in_height;
+ u32 out_width;
+ u32 out_height;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 10];
+ u32 err;
+};
+
+struct param_scaler_output_crop {
+ u32 cmd;
+ u32 crop_offset_x;
+ u32 crop_offset_y;
+ u32 crop_width;
+ u32 crop_height;
+ u32 out_format;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 7];
+ u32 err;
+};
+
+struct param_scaler_rotation {
+ u32 cmd;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_scaler_flip {
+ u32 cmd;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_3dnr_1stframe {
+ u32 cmd;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 2];
+ u32 err;
+};
+
+struct param_fd_config {
+ u32 cmd;
+ u32 max_number;
+ u32 roll_angle;
+ u32 yaw_angle;
+ u32 smile_mode;
+ u32 blink_mode;
+ u32 eye_detect;
+ u32 mouth_detect;
+ u32 orientation;
+ u32 orientation_value;
+ u32 reserved[FIMC_IS_PARAM_MAX_ENTRIES - 11];
+ u32 err;
+};
+
+struct global_param {
+ struct param_global_shotmode shotmode;
+};
+
+struct sensor_param {
+ struct param_control control;
+ struct param_otf_output otf_output;
+ struct param_sensor_framerate frame_rate;
+} __packed;
+
+struct buffer_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_otf_output otf_output;
+} __packed;
+
+struct isp_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_dma_input dma1_input;
+ struct param_dma_input dma2_input;
+ struct param_isp_aa aa;
+ struct param_isp_flash flash;
+ struct param_isp_awb awb;
+ struct param_isp_imageeffect effect;
+ struct param_isp_iso iso;
+ struct param_isp_adjust adjust;
+ struct param_isp_metering metering;
+ struct param_isp_afc afc;
+ struct param_otf_output otf_output;
+ struct param_dma_output dma1_output;
+ struct param_dma_output dma2_output;
+} __packed;
+
+struct drc_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_dma_input dma_input;
+ struct param_otf_output otf_output;
+} __packed;
+
+struct scalerc_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_scaler_imageeffect effect;
+ struct param_scaler_input_crop input_crop;
+ struct param_scaler_output_crop output_crop;
+ struct param_otf_output otf_output;
+ struct param_dma_output dma_output;
+} __packed;
+
+struct odc_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_otf_output otf_output;
+} __packed;
+
+struct dis_param {
+ struct param_control control;
+ struct param_otf_output otf_input;
+ struct param_otf_output otf_output;
+} __packed;
+
+struct tdnr_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_3dnr_1stframe frame;
+ struct param_otf_output otf_output;
+ struct param_dma_output dma_output;
+} __packed;
+
+struct scalerp_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_scaler_imageeffect effect;
+ struct param_scaler_input_crop input_crop;
+ struct param_scaler_output_crop output_crop;
+ struct param_scaler_rotation rotation;
+ struct param_scaler_flip flip;
+ struct param_otf_output otf_output;
+ struct param_dma_output dma_output;
+} __packed;
+
+struct fd_param {
+ struct param_control control;
+ struct param_otf_input otf_input;
+ struct param_dma_input dma_input;
+ struct param_fd_config config;
+} __packed;
+
+struct is_param_region {
+ struct global_param global;
+ struct sensor_param sensor;
+ struct buffer_param buf;
+ struct isp_param isp;
+ struct drc_param drc;
+ struct scalerc_param scalerc;
+ struct odc_param odc;
+ struct dis_param dis;
+ struct tdnr_param tdnr;
+ struct scalerp_param scalerp;
+ struct fd_param fd;
+} __packed;
+
+#define NUMBER_OF_GAMMA_CURVE_POINTS 32
+
+struct is_tune_sensor {
+ u32 exposure;
+ u32 analog_gain;
+ u32 frame_rate;
+ u32 actuator_position;
+};
+
+struct is_tune_gammacurve {
+ u32 num_pts_x[NUMBER_OF_GAMMA_CURVE_POINTS];
+ u32 num_pts_y_r[NUMBER_OF_GAMMA_CURVE_POINTS];
+ u32 num_pts_y_g[NUMBER_OF_GAMMA_CURVE_POINTS];
+ u32 num_pts_y_b[NUMBER_OF_GAMMA_CURVE_POINTS];
+};
+
+struct is_tune_isp {
+ /* Brightness level: range 0...100, default 7. */
+ u32 brightness_level;
+ /* Contrast level: range -127...127, default 0. */
+ s32 contrast_level;
+ /* Saturation level: range -127...127, default 0. */
+ s32 saturation_level;
+ s32 gamma_level;
+ struct is_tune_gammacurve gamma_curve[4];
+ /* Hue: range -127...127, default 0. */
+ s32 hue;
+ /* Sharpness blur: range -127...127, default 0. */
+ s32 sharpness_blur;
+ /* Despeckle : range -127~127, default : 0 */
+ s32 despeckle;
+ /* Edge color supression: range -127...127, default 0. */
+ s32 edge_color_supression;
+ /* Noise reduction: range -127...127, default 0. */
+ s32 noise_reduction;
+ /* (32 * 4 + 9) * 4 = 548 bytes */
+} __packed;
+
+struct is_tune_region {
+ struct is_tune_sensor sensor;
+ struct is_tune_isp isp;
+} __packed;
+
+struct rational {
+ u32 num;
+ u32 den;
+};
+
+struct srational {
+ s32 num;
+ s32 den;
+};
+
+#define FLASH_FIRED_SHIFT 0
+#define FLASH_NOT_FIRED 0
+#define FLASH_FIRED 1
+
+#define FLASH_STROBE_SHIFT 1
+#define FLASH_STROBE_NO_DETECTION 0
+#define FLASH_STROBE_RESERVED 1
+#define FLASH_STROBE_RETURN_LIGHT_NOT_DETECTED 2
+#define FLASH_STROBE_RETURN_LIGHT_DETECTED 3
+
+#define FLASH_MODE_SHIFT 3
+#define FLASH_MODE_UNKNOWN 0
+#define FLASH_MODE_COMPULSORY_FLASH_FIRING 1
+#define FLASH_MODE_COMPULSORY_FLASH_SUPPRESSION 2
+#define FLASH_MODE_AUTO_MODE 3
+
+#define FLASH_FUNCTION_SHIFT 5
+#define FLASH_FUNCTION_PRESENT 0
+#define FLASH_FUNCTION_NONE 1
+
+#define FLASH_RED_EYE_SHIFT 6
+#define FLASH_RED_EYE_DISABLED 0
+#define FLASH_RED_EYE_SUPPORTED 1
+
+enum apex_aperture_value {
+ F1_0 = 0,
+ F1_4 = 1,
+ F2_0 = 2,
+ F2_8 = 3,
+ F4_0 = 4,
+ F5_6 = 5,
+ F8_9 = 6,
+ F11_0 = 7,
+ F16_0 = 8,
+ F22_0 = 9,
+ F32_0 = 10,
+};
+
+struct exif_attribute {
+ struct rational exposure_time;
+ struct srational shutter_speed;
+ u32 iso_speed_rating;
+ u32 flash;
+ struct srational brightness;
+} __packed;
+
+struct is_frame_header {
+ u32 valid;
+ u32 bad_mark;
+ u32 captured;
+ u32 frame_number;
+ struct exif_attribute exif;
+} __packed;
+
+struct is_fd_rect {
+ u32 offset_x;
+ u32 offset_y;
+ u32 width;
+ u32 height;
+};
+
+struct is_face_marker {
+ u32 frame_number;
+ struct is_fd_rect face;
+ struct is_fd_rect left_eye;
+ struct is_fd_rect right_eye;
+ struct is_fd_rect mouth;
+ u32 roll_angle;
+ u32 yaw_angle;
+ u32 confidence;
+ s32 smile_level;
+ s32 blink_level;
+} __packed;
+
+#define MAX_FRAME_COUNT 8
+#define MAX_FRAME_COUNT_PREVIEW 4
+#define MAX_FRAME_COUNT_CAPTURE 1
+#define MAX_FACE_COUNT 16
+#define MAX_SHARED_COUNT 500
+
+struct is_region {
+ struct is_param_region parameter;
+ struct is_tune_region tune;
+ struct is_frame_header header[MAX_FRAME_COUNT];
+ struct is_face_marker face[MAX_FACE_COUNT];
+ u32 shared[MAX_SHARED_COUNT];
+} __packed;
+
+/* Offset to the ISP DMA2 output buffer address array. */
+#define DMA2_OUTPUT_ADDR_ARRAY_OFFS \
+ (offsetof(struct is_region, shared) + 32 * sizeof(u32))
+
+struct is_debug_frame_descriptor {
+ u32 sensor_frame_time;
+ u32 sensor_exposure_time;
+ s32 sensor_analog_gain;
+ /* monitor for AA */
+ u32 req_lei;
+
+ u32 next_next_lei_exp;
+ u32 next_next_lei_a_gain;
+ u32 next_next_lei_d_gain;
+ u32 next_next_lei_statlei;
+ u32 next_next_lei_lei;
+
+ u32 dummy0;
+};
+
+#define MAX_FRAMEDESCRIPTOR_CONTEXT_NUM (30*20) /* 600 frames */
+#define MAX_VERSION_DISPLAY_BUF 32
+
+struct is_share_region {
+ u32 frame_time;
+ u32 exposure_time;
+ s32 analog_gain;
+
+ u32 r_gain;
+ u32 g_gain;
+ u32 b_gain;
+
+ u32 af_position;
+ u32 af_status;
+ /* 0 : SIRC_ISP_CAMERA_AUTOFOCUSMESSAGE_NOMESSAGE */
+ /* 1 : SIRC_ISP_CAMERA_AUTOFOCUSMESSAGE_REACHED */
+ /* 2 : SIRC_ISP_CAMERA_AUTOFOCUSMESSAGE_UNABLETOREACH */
+ /* 3 : SIRC_ISP_CAMERA_AUTOFOCUSMESSAGE_LOST */
+ /* default : unknown */
+ u32 af_scene_type;
+
+ u32 frame_descp_onoff_control;
+ u32 frame_descp_update_done;
+ u32 frame_descp_idx;
+ u32 frame_descp_max_idx;
+ struct is_debug_frame_descriptor
+ dbg_frame_descp_ctx[MAX_FRAMEDESCRIPTOR_CONTEXT_NUM];
+
+ u32 chip_id;
+ u32 chip_rev_no;
+ u8 isp_fw_ver_no[MAX_VERSION_DISPLAY_BUF];
+ u8 isp_fw_ver_date[MAX_VERSION_DISPLAY_BUF];
+ u8 sirc_sdk_ver_no[MAX_VERSION_DISPLAY_BUF];
+ u8 sirc_sdk_rev_no[MAX_VERSION_DISPLAY_BUF];
+ u8 sirc_sdk_rev_date[MAX_VERSION_DISPLAY_BUF];
+} __packed;
+
+struct is_debug_control {
+ u32 write_point; /* 0~ 500KB boundary */
+ u32 assert_flag; /* 0: Not invoked, 1: Invoked */
+ u32 pabort_flag; /* 0: Not invoked, 1: Invoked */
+ u32 dabort_flag; /* 0: Not invoked, 1: Invoked */
+};
+
+struct sensor_open_extended {
+ u32 actuator_type;
+ u32 mclk;
+ u32 mipi_lane_num;
+ u32 mipi_speed;
+ /* Skip setfile loading when fast_open_sensor is not 0 */
+ u32 fast_open_sensor;
+ /* Activating sensor self calibration mode (6A3) */
+ u32 self_calibration_mode;
+ /* This field is to adjust I2c clock based on ACLK200 */
+ /* This value is varied in case of rev 0.2 */
+ u32 i2c_sclk;
+};
+
+struct fimc_is;
+
+int fimc_is_hw_get_sensor_max_framerate(struct fimc_is *is);
+int __fimc_is_hw_update_param(struct fimc_is *is, u32 offset);
+void fimc_is_set_initial_params(struct fimc_is *is);
+unsigned int __get_pending_param_count(struct fimc_is *is);
+
+int __is_hw_update_params(struct fimc_is *is);
+void __is_get_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf);
+void __is_set_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf);
+void __is_set_sensor(struct fimc_is *is, int fps);
+void __is_set_isp_aa_ae(struct fimc_is *is);
+void __is_set_isp_flash(struct fimc_is *is, u32 cmd, u32 redeye);
+void __is_set_isp_awb(struct fimc_is *is, u32 cmd, u32 val);
+void __is_set_isp_effect(struct fimc_is *is, u32 cmd);
+void __is_set_isp_iso(struct fimc_is *is, u32 cmd, u32 val);
+void __is_set_isp_adjust(struct fimc_is *is, u32 cmd, u32 val);
+void __is_set_isp_metering(struct fimc_is *is, u32 id, u32 val);
+void __is_set_isp_afc(struct fimc_is *is, u32 cmd, u32 val);
+void __is_set_drc_control(struct fimc_is *is, u32 val);
+void __is_set_fd_control(struct fimc_is *is, u32 val);
+void __is_set_fd_config_maxface(struct fimc_is *is, u32 val);
+void __is_set_fd_config_rollangle(struct fimc_is *is, u32 val);
+void __is_set_fd_config_yawangle(struct fimc_is *is, u32 val);
+void __is_set_fd_config_smilemode(struct fimc_is *is, u32 val);
+void __is_set_fd_config_blinkmode(struct fimc_is *is, u32 val);
+void __is_set_fd_config_eyedetect(struct fimc_is *is, u32 val);
+void __is_set_fd_config_mouthdetect(struct fimc_is *is, u32 val);
+void __is_set_fd_config_orientation(struct fimc_is *is, u32 val);
+void __is_set_fd_config_orientation_val(struct fimc_is *is, u32 val);
+void __is_set_isp_aa_af_mode(struct fimc_is *is, int cmd);
+void __is_set_isp_aa_af_start_stop(struct fimc_is *is, int cmd);
+
+#endif
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c
new file mode 100644
index 000000000..e0e291066
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -0,0 +1,233 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+
+#include "fimc-is.h"
+#include "fimc-is-command.h"
+#include "fimc-is-regs.h"
+#include "fimc-is-sensor.h"
+
+void fimc_is_fw_clear_irq1(struct fimc_is *is, unsigned int nr)
+{
+ mcuctl_write(1UL << nr, is, MCUCTL_REG_INTCR1);
+}
+
+void fimc_is_fw_clear_irq2(struct fimc_is *is)
+{
+ u32 cfg = mcuctl_read(is, MCUCTL_REG_INTSR2);
+ mcuctl_write(cfg, is, MCUCTL_REG_INTCR2);
+}
+
+void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is)
+{
+ mcuctl_write(INTGR0_INTGD(0), is, MCUCTL_REG_INTGR0);
+}
+
+int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is)
+{
+ unsigned int timeout = 2000;
+ u32 cfg, status;
+
+ do {
+ cfg = mcuctl_read(is, MCUCTL_REG_INTMSR0);
+ status = INTMSR0_GET_INTMSD(0, cfg);
+
+ if (--timeout == 0) {
+ dev_warn(&is->pdev->dev, "%s timeout\n",
+ __func__);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ } while (status != 0);
+
+ return 0;
+}
+
+int fimc_is_hw_set_param(struct fimc_is *is)
+{
+ struct chain_config *config = &is->config[is->config_index];
+ unsigned int param_count = __get_pending_param_count(is);
+
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+
+ mcuctl_write(HIC_SET_PARAMETER, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(is->config_index, is, MCUCTL_REG_ISSR(2));
+
+ mcuctl_write(param_count, is, MCUCTL_REG_ISSR(3));
+ mcuctl_write(config->p_region_index[0], is, MCUCTL_REG_ISSR(4));
+ mcuctl_write(config->p_region_index[1], is, MCUCTL_REG_ISSR(5));
+
+ fimc_is_hw_set_intgr0_gd0(is);
+ return 0;
+}
+
+static int __maybe_unused fimc_is_hw_set_tune(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+
+ mcuctl_write(HIC_SET_TUNE, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(is->h2i_cmd.entry_id, is, MCUCTL_REG_ISSR(2));
+
+ fimc_is_hw_set_intgr0_gd0(is);
+ return 0;
+}
+
+#define FIMC_IS_MAX_PARAMS 4
+
+int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num_args)
+{
+ int i;
+
+ if (num_args > FIMC_IS_MAX_PARAMS)
+ return -EINVAL;
+
+ is->i2h_cmd.num_args = num_args;
+
+ for (i = 0; i < FIMC_IS_MAX_PARAMS; i++) {
+ if (i < num_args)
+ is->i2h_cmd.args[i] = mcuctl_read(is,
+ MCUCTL_REG_ISSR(12 + i));
+ else
+ is->i2h_cmd.args[i] = 0;
+ }
+ return 0;
+}
+
+void fimc_is_hw_set_isp_buf_mask(struct fimc_is *is, unsigned int mask)
+{
+ if (hweight32(mask) == 1) {
+ dev_err(&is->pdev->dev, "%s(): not enough buffers (mask %#x)\n",
+ __func__, mask);
+ return;
+ }
+
+ if (mcuctl_read(is, MCUCTL_REG_ISSR(23)) != 0)
+ dev_dbg(&is->pdev->dev, "non-zero DMA buffer mask\n");
+
+ mcuctl_write(mask, is, MCUCTL_REG_ISSR(23));
+}
+
+void fimc_is_hw_set_sensor_num(struct fimc_is *is)
+{
+ pr_debug("setting sensor index to: %d\n", is->sensor_index);
+
+ mcuctl_write(IH_REPLY_DONE, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(IHC_GET_SENSOR_NUM, is, MCUCTL_REG_ISSR(2));
+ mcuctl_write(FIMC_IS_SENSORS_NUM, is, MCUCTL_REG_ISSR(3));
+}
+
+void fimc_is_hw_close_sensor(struct fimc_is *is, unsigned int index)
+{
+ if (is->sensor_index != index)
+ return;
+
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_CLOSE_SENSOR, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(2));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+void fimc_is_hw_get_setfile_addr(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_GET_SET_FILE_ADDR, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+void fimc_is_hw_load_setfile(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_LOAD_SET_FILE, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+int fimc_is_hw_change_mode(struct fimc_is *is)
+{
+ static const u8 cmd[] = {
+ HIC_PREVIEW_STILL, HIC_PREVIEW_VIDEO,
+ HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
+ };
+
+ if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
+ return -EINVAL;
+
+ mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(is->setfile.sub_index, is, MCUCTL_REG_ISSR(2));
+ fimc_is_hw_set_intgr0_gd0(is);
+ return 0;
+}
+
+void fimc_is_hw_stream_on(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_STREAM_ON, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(0, is, MCUCTL_REG_ISSR(2));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+void fimc_is_hw_stream_off(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_STREAM_OFF, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+void fimc_is_hw_subip_power_off(struct fimc_is *is)
+{
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ mcuctl_write(HIC_POWER_DOWN, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ fimc_is_hw_set_intgr0_gd0(is);
+}
+
+int fimc_is_itf_s_param(struct fimc_is *is, bool update)
+{
+ int ret;
+
+ if (update)
+ __is_hw_update_params(is);
+
+ fimc_is_mem_barrier();
+
+ clear_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state);
+ fimc_is_hw_set_param(is);
+ ret = fimc_is_wait_event(is, IS_ST_BLOCK_CMD_CLEARED, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0)
+ dev_err(&is->pdev->dev, "%s() timeout\n", __func__);
+
+ return ret;
+}
+
+int fimc_is_itf_mode_change(struct fimc_is *is)
+{
+ int ret;
+
+ clear_bit(IS_ST_CHANGE_MODE, &is->state);
+ fimc_is_hw_change_mode(is);
+ ret = fimc_is_wait_event(is, IS_ST_CHANGE_MODE, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0)
+ dev_err(&is->pdev->dev, "%s(): mode change (%d) timeout\n",
+ __func__, is->config_index);
+ return ret;
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.h b/drivers/media/platform/exynos4-is/fimc-is-regs.h
new file mode 100644
index 000000000..141e5ddad
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.h
@@ -0,0 +1,164 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_IS_REG_H_
+#define FIMC_IS_REG_H_
+
+/* WDT_ISP register */
+#define REG_WDT_ISP 0x00170000
+
+/* MCUCTL registers base offset */
+#define MCUCTL_BASE 0x00180000
+
+/* MCU Controller Register */
+#define MCUCTL_REG_MCUCTRL (MCUCTL_BASE + 0x00)
+#define MCUCTRL_MSWRST (1 << 0)
+
+/* Boot Base Offset Address Register */
+#define MCUCTL_REG_BBOAR (MCUCTL_BASE + 0x04)
+
+/* Interrupt Generation Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTGR0 (MCUCTL_BASE + 0x08)
+/* __n = 0...9 */
+#define INTGR0_INTGC(__n) (1 << ((__n) + 16))
+/* __n = 0...5 */
+#define INTGR0_INTGD(__n) (1 << (__n))
+
+/* Interrupt Clear Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTCR0 (MCUCTL_BASE + 0x0c)
+/* __n = 0...9 */
+#define INTCR0_INTGC(__n) (1 << ((__n) + 16))
+/* __n = 0...5 */
+#define INTCR0_INTCD(__n) (1 << ((__n) + 16))
+
+/* Interrupt Mask Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTMR0 (MCUCTL_BASE + 0x10)
+/* __n = 0...9 */
+#define INTMR0_INTMC(__n) (1 << ((__n) + 16))
+/* __n = 0...5 */
+#define INTMR0_INTMD(__n) (1 << (__n))
+
+/* Interrupt Status Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTSR0 (MCUCTL_BASE + 0x14)
+/* __n (bit number) = 0...4 */
+#define INTSR0_GET_INTSD(x, __n) (((x) >> (__n)) & 0x1)
+/* __n (bit number) = 0...9 */
+#define INTSR0_GET_INTSC(x, __n) (((x) >> ((__n) + 16)) & 0x1)
+
+/* Interrupt Mask Status Register 0 from Host CPU to VIC */
+#define MCUCTL_REG_INTMSR0 (MCUCTL_BASE + 0x18)
+/* __n (bit number) = 0...4 */
+#define INTMSR0_GET_INTMSD(x, __n) (((x) >> (__n)) & 0x1)
+/* __n (bit number) = 0...9 */
+#define INTMSR0_GET_INTMSC(x, __n) (((x) >> ((__n) + 16)) & 0x1)
+
+/* Interrupt Generation Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTGR1 (MCUCTL_BASE + 0x1c)
+/* __n = 0...9 */
+#define INTGR1_INTGC(__n) (1 << (__n))
+
+/* Interrupt Clear Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTCR1 (MCUCTL_BASE + 0x20)
+/* __n = 0...9 */
+#define INTCR1_INTCC(__n) (1 << (__n))
+
+/* Interrupt Mask Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTMR1 (MCUCTL_BASE + 0x24)
+/* __n = 0...9 */
+#define INTMR1_INTMC(__n) (1 << (__n))
+
+/* Interrupt Status Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTSR1 (MCUCTL_BASE + 0x28)
+/* Interrupt Mask Status Register 1 from ISP CPU to Host IC */
+#define MCUCTL_REG_INTMSR1 (MCUCTL_BASE + 0x2c)
+
+/* Interrupt Clear Register 2 from ISP BLK's interrupts to Host IC */
+#define MCUCTL_REG_INTCR2 (MCUCTL_BASE + 0x30)
+/* __n = 0...5 */
+#define INTCR2_INTCC(__n) (1 << ((__n) + 16))
+
+/* Interrupt Mask Register 2 from ISP BLK's interrupts to Host IC */
+#define MCUCTL_REG_INTMR2 (MCUCTL_BASE + 0x34)
+/* __n = 0...25 */
+#define INTMR2_INTMCIS(__n) (1 << (__n))
+
+/* Interrupt Status Register 2 from ISP BLK's interrupts to Host IC */
+#define MCUCTL_REG_INTSR2 (MCUCTL_BASE + 0x38)
+/* Interrupt Mask Status Register 2 from ISP BLK's interrupts to Host IC */
+#define MCUCTL_REG_INTMSR2 (MCUCTL_BASE + 0x3c)
+
+/* General Purpose Output Control Register (0~17) */
+#define MCUCTL_REG_GPOCTLR (MCUCTL_BASE + 0x40)
+/* __n = 0...17 */
+#define GPOCTLR_GPOG(__n) (1 << (__n))
+
+/* General Purpose Pad Output Enable Register (0~17) */
+#define MCUCTL_REG_GPOENCTLR (MCUCTL_BASE + 0x44)
+/* __n = 0...17 */
+#define GPOENCTLR_GPOEN(__n) (1 << (__n))
+
+/* General Purpose Input Control Register (0~17) */
+#define MCUCTL_REG_GPICTLR (MCUCTL_BASE + 0x48)
+
+/* Shared registers between ISP CPU and the host CPU - ISSRxx */
+
+/* ISSR(1): Command Host -> IS */
+/* ISSR(1): Sensor ID for Command, ISSR2...5 = Parameter 1...4 */
+
+/* ISSR(10): Reply IS -> Host */
+/* ISSR(11): Sensor ID for Reply, ISSR12...15 = Parameter 1...4 */
+
+/* ISSR(20): ISP_FRAME_DONE : SENSOR ID */
+/* ISSR(21): ISP_FRAME_DONE : PARAMETER 1 */
+
+/* ISSR(24): SCALERC_FRAME_DONE : SENSOR ID */
+/* ISSR(25): SCALERC_FRAME_DONE : PARAMETER 1 */
+
+/* ISSR(28): 3DNR_FRAME_DONE : SENSOR ID */
+/* ISSR(29): 3DNR_FRAME_DONE : PARAMETER 1 */
+
+/* ISSR(32): SCALERP_FRAME_DONE : SENSOR ID */
+/* ISSR(33): SCALERP_FRAME_DONE : PARAMETER 1 */
+
+/* __n = 0...63 */
+#define MCUCTL_REG_ISSR(__n) (MCUCTL_BASE + 0x80 + ((__n) * 4))
+
+/* PMU ISP register offsets */
+#define REG_CMU_RESET_ISP_SYS_PWR_REG 0x1174
+#define REG_CMU_SYSCLK_ISP_SYS_PWR_REG 0x13b8
+#define REG_PMU_ISP_ARM_SYS 0x1050
+#define REG_PMU_ISP_ARM_CONFIGURATION 0x2280
+#define REG_PMU_ISP_ARM_STATUS 0x2284
+#define REG_PMU_ISP_ARM_OPTION 0x2288
+
+void fimc_is_fw_clear_irq1(struct fimc_is *is, unsigned int bit);
+void fimc_is_fw_clear_irq2(struct fimc_is *is);
+int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num);
+
+void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is);
+int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is);
+void fimc_is_hw_set_sensor_num(struct fimc_is *is);
+void fimc_is_hw_set_isp_buf_mask(struct fimc_is *is, unsigned int mask);
+void fimc_is_hw_stream_on(struct fimc_is *is);
+void fimc_is_hw_stream_off(struct fimc_is *is);
+int fimc_is_hw_set_param(struct fimc_is *is);
+int fimc_is_hw_change_mode(struct fimc_is *is);
+
+void fimc_is_hw_close_sensor(struct fimc_is *is, unsigned int index);
+void fimc_is_hw_get_setfile_addr(struct fimc_is *is);
+void fimc_is_hw_load_setfile(struct fimc_is *is);
+void fimc_is_hw_subip_power_off(struct fimc_is *is);
+
+int fimc_is_itf_s_param(struct fimc_is *is, bool update);
+int fimc_is_itf_mode_change(struct fimc_is *is);
+
+#endif /* FIMC_IS_REG_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-sensor.c b/drivers/media/platform/exynos4-is/fimc-is-sensor.c
new file mode 100644
index 000000000..10e82e21b
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-sensor.c
@@ -0,0 +1,34 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "fimc-is-sensor.h"
+
+static const struct sensor_drv_data s5k6a3_drvdata = {
+ .id = FIMC_IS_SENSOR_ID_S5K6A3,
+ .open_timeout = S5K6A3_OPEN_TIMEOUT,
+};
+
+static const struct of_device_id fimc_is_sensor_of_ids[] = {
+ {
+ .compatible = "samsung,s5k6a3",
+ .data = &s5k6a3_drvdata,
+ },
+ { }
+};
+
+const struct sensor_drv_data *fimc_is_sensor_get_drvdata(
+ struct device_node *node)
+{
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(fimc_is_sensor_of_ids, node);
+ return of_id ? of_id->data : NULL;
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-sensor.h b/drivers/media/platform/exynos4-is/fimc-is-sensor.h
new file mode 100644
index 000000000..173ccffa4
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is-sensor.h
@@ -0,0 +1,56 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_IS_SENSOR_H_
+#define FIMC_IS_SENSOR_H_
+
+#include <linux/of.h>
+#include <linux/types.h>
+
+#define S5K6A3_OPEN_TIMEOUT 2000 /* ms */
+#define S5K6A3_SENSOR_WIDTH 1392
+#define S5K6A3_SENSOR_HEIGHT 1392
+
+enum fimc_is_sensor_id {
+ FIMC_IS_SENSOR_ID_S5K3H2 = 1,
+ FIMC_IS_SENSOR_ID_S5K6A3,
+ FIMC_IS_SENSOR_ID_S5K4E5,
+ FIMC_IS_SENSOR_ID_S5K3H7,
+ FIMC_IS_SENSOR_ID_CUSTOM,
+ FIMC_IS_SENSOR_ID_END
+};
+
+#define IS_SENSOR_CTRL_BUS_I2C0 0
+#define IS_SENSOR_CTRL_BUS_I2C1 1
+
+struct sensor_drv_data {
+ enum fimc_is_sensor_id id;
+ /* sensor open timeout in ms */
+ unsigned short open_timeout;
+};
+
+/**
+ * struct fimc_is_sensor - fimc-is sensor data structure
+ * @drvdata: a pointer to the sensor's parameters data structure
+ * @i2c_bus: ISP I2C bus index (0...1)
+ * @test_pattern: true to enable video test pattern
+ */
+struct fimc_is_sensor {
+ const struct sensor_drv_data *drvdata;
+ unsigned int i2c_bus;
+ u8 test_pattern;
+};
+
+const struct sensor_drv_data *fimc_is_sensor_get_drvdata(
+ struct device_node *node);
+
+#endif /* FIMC_IS_SENSOR_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
new file mode 100644
index 000000000..0f3f82bd4
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -0,0 +1,1009 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-contiguous.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "media-dev.h"
+#include "fimc-is.h"
+#include "fimc-is-command.h"
+#include "fimc-is-errno.h"
+#include "fimc-is-i2c.h"
+#include "fimc-is-param.h"
+#include "fimc-is-regs.h"
+
+
+static char *fimc_is_clocks[ISS_CLKS_MAX] = {
+ [ISS_CLK_PPMUISPX] = "ppmuispx",
+ [ISS_CLK_PPMUISPMX] = "ppmuispmx",
+ [ISS_CLK_LITE0] = "lite0",
+ [ISS_CLK_LITE1] = "lite1",
+ [ISS_CLK_MPLL] = "mpll",
+ [ISS_CLK_ISP] = "isp",
+ [ISS_CLK_DRC] = "drc",
+ [ISS_CLK_FD] = "fd",
+ [ISS_CLK_MCUISP] = "mcuisp",
+ [ISS_CLK_GICISP] = "gicisp",
+ [ISS_CLK_PWM_ISP] = "pwm_isp",
+ [ISS_CLK_MCUCTL_ISP] = "mcuctl_isp",
+ [ISS_CLK_UART] = "uart",
+ [ISS_CLK_ISP_DIV0] = "ispdiv0",
+ [ISS_CLK_ISP_DIV1] = "ispdiv1",
+ [ISS_CLK_MCUISP_DIV0] = "mcuispdiv0",
+ [ISS_CLK_MCUISP_DIV1] = "mcuispdiv1",
+ [ISS_CLK_ACLK200] = "aclk200",
+ [ISS_CLK_ACLK200_DIV] = "div_aclk200",
+ [ISS_CLK_ACLK400MCUISP] = "aclk400mcuisp",
+ [ISS_CLK_ACLK400MCUISP_DIV] = "div_aclk400mcuisp",
+};
+
+static void fimc_is_put_clocks(struct fimc_is *is)
+{
+ int i;
+
+ for (i = 0; i < ISS_CLKS_MAX; i++) {
+ if (IS_ERR(is->clocks[i]))
+ continue;
+ clk_put(is->clocks[i]);
+ is->clocks[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int fimc_is_get_clocks(struct fimc_is *is)
+{
+ int i, ret;
+
+ for (i = 0; i < ISS_CLKS_MAX; i++)
+ is->clocks[i] = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ISS_CLKS_MAX; i++) {
+ is->clocks[i] = clk_get(&is->pdev->dev, fimc_is_clocks[i]);
+ if (IS_ERR(is->clocks[i])) {
+ ret = PTR_ERR(is->clocks[i]);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ fimc_is_put_clocks(is);
+ dev_err(&is->pdev->dev, "failed to get clock: %s\n",
+ fimc_is_clocks[i]);
+ return ret;
+}
+
+static int fimc_is_setup_clocks(struct fimc_is *is)
+{
+ int ret;
+
+ ret = clk_set_parent(is->clocks[ISS_CLK_ACLK200],
+ is->clocks[ISS_CLK_ACLK200_DIV]);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_set_parent(is->clocks[ISS_CLK_ACLK400MCUISP],
+ is->clocks[ISS_CLK_ACLK400MCUISP_DIV]);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_set_rate(is->clocks[ISS_CLK_ISP_DIV0], ACLK_AXI_FREQUENCY);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_set_rate(is->clocks[ISS_CLK_ISP_DIV1], ACLK_AXI_FREQUENCY);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_set_rate(is->clocks[ISS_CLK_MCUISP_DIV0],
+ ATCLK_MCUISP_FREQUENCY);
+ if (ret < 0)
+ return ret;
+
+ return clk_set_rate(is->clocks[ISS_CLK_MCUISP_DIV1],
+ ATCLK_MCUISP_FREQUENCY);
+}
+
+static int fimc_is_enable_clocks(struct fimc_is *is)
+{
+ int i, ret;
+
+ for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
+ if (IS_ERR(is->clocks[i]))
+ continue;
+ ret = clk_prepare_enable(is->clocks[i]);
+ if (ret < 0) {
+ dev_err(&is->pdev->dev, "clock %s enable failed\n",
+ fimc_is_clocks[i]);
+ for (--i; i >= 0; i--)
+ clk_disable_unprepare(is->clocks[i]);
+ return ret;
+ }
+ pr_debug("enabled clock: %s\n", fimc_is_clocks[i]);
+ }
+ return 0;
+}
+
+static void fimc_is_disable_clocks(struct fimc_is *is)
+{
+ int i;
+
+ for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
+ if (!IS_ERR(is->clocks[i])) {
+ clk_disable_unprepare(is->clocks[i]);
+ pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
+ }
+ }
+}
+
+static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index,
+ struct device_node *node)
+{
+ struct fimc_is_sensor *sensor = &is->sensor[index];
+ struct device_node *ep, *port;
+ u32 tmp = 0;
+ int ret;
+
+ sensor->drvdata = fimc_is_sensor_get_drvdata(node);
+ if (!sensor->drvdata) {
+ dev_err(&is->pdev->dev, "no driver data found for: %pOF\n",
+ node);
+ return -EINVAL;
+ }
+
+ ep = of_graph_get_next_endpoint(node, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ port = of_graph_get_remote_port(ep);
+ of_node_put(ep);
+ if (!port)
+ return -ENXIO;
+
+ /* Use MIPI-CSIS channel id to determine the ISP I2C bus index. */
+ ret = of_property_read_u32(port, "reg", &tmp);
+ if (ret < 0) {
+ dev_err(&is->pdev->dev, "reg property not found at: %pOF\n",
+ port);
+ of_node_put(port);
+ return ret;
+ }
+
+ of_node_put(port);
+ sensor->i2c_bus = tmp - FIMC_INPUT_MIPI_CSI2_0;
+ return 0;
+}
+
+static int fimc_is_register_subdevs(struct fimc_is *is)
+{
+ struct device_node *i2c_bus, *child;
+ int ret, index = 0;
+
+ ret = fimc_isp_subdev_create(&is->isp);
+ if (ret < 0)
+ return ret;
+
+ for_each_compatible_node(i2c_bus, NULL, FIMC_IS_I2C_COMPATIBLE) {
+ for_each_available_child_of_node(i2c_bus, child) {
+ ret = fimc_is_parse_sensor_config(is, index, child);
+
+ if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) {
+ of_node_put(child);
+ return ret;
+ }
+ index++;
+ }
+ }
+ return 0;
+}
+
+static int fimc_is_unregister_subdevs(struct fimc_is *is)
+{
+ fimc_isp_subdev_destroy(&is->isp);
+ return 0;
+}
+
+static int fimc_is_load_setfile(struct fimc_is *is, char *file_name)
+{
+ const struct firmware *fw;
+ void *buf;
+ int ret;
+
+ ret = request_firmware(&fw, file_name, &is->pdev->dev);
+ if (ret < 0) {
+ dev_err(&is->pdev->dev, "firmware request failed (%d)\n", ret);
+ return ret;
+ }
+ buf = is->memory.vaddr + is->setfile.base;
+ memcpy(buf, fw->data, fw->size);
+ fimc_is_mem_barrier();
+ is->setfile.size = fw->size;
+
+ pr_debug("mem vaddr: %p, setfile buf: %p\n", is->memory.vaddr, buf);
+
+ memcpy(is->fw.setfile_info,
+ fw->data + fw->size - FIMC_IS_SETFILE_INFO_LEN,
+ FIMC_IS_SETFILE_INFO_LEN - 1);
+
+ is->fw.setfile_info[FIMC_IS_SETFILE_INFO_LEN - 1] = '\0';
+ is->setfile.state = 1;
+
+ pr_debug("FIMC-IS setfile loaded: base: %#x, size: %zu B\n",
+ is->setfile.base, fw->size);
+
+ release_firmware(fw);
+ return ret;
+}
+
+int fimc_is_cpu_set_power(struct fimc_is *is, int on)
+{
+ unsigned int timeout = FIMC_IS_POWER_ON_TIMEOUT;
+
+ if (on) {
+ /* Disable watchdog */
+ mcuctl_write(0, is, REG_WDT_ISP);
+
+ /* Cortex-A5 start address setting */
+ mcuctl_write(is->memory.paddr, is, MCUCTL_REG_BBOAR);
+
+ /* Enable and start Cortex-A5 */
+ pmuisp_write(0x18000, is, REG_PMU_ISP_ARM_OPTION);
+ pmuisp_write(0x1, is, REG_PMU_ISP_ARM_CONFIGURATION);
+ } else {
+ /* A5 power off */
+ pmuisp_write(0x10000, is, REG_PMU_ISP_ARM_OPTION);
+ pmuisp_write(0x0, is, REG_PMU_ISP_ARM_CONFIGURATION);
+
+ while (pmuisp_read(is, REG_PMU_ISP_ARM_STATUS) & 1) {
+ if (timeout == 0)
+ return -ETIME;
+ timeout--;
+ udelay(1);
+ }
+ }
+
+ return 0;
+}
+
+/* Wait until @bit of @is->state is set to @state in the interrupt handler. */
+int fimc_is_wait_event(struct fimc_is *is, unsigned long bit,
+ unsigned int state, unsigned int timeout)
+{
+
+ int ret = wait_event_timeout(is->irq_queue,
+ !state ^ test_bit(bit, &is->state),
+ timeout);
+ if (ret == 0) {
+ dev_WARN(&is->pdev->dev, "%s() timed out\n", __func__);
+ return -ETIME;
+ }
+ return 0;
+}
+
+int fimc_is_start_firmware(struct fimc_is *is)
+{
+ struct device *dev = &is->pdev->dev;
+ int ret;
+
+ if (is->fw.f_w == NULL) {
+ dev_err(dev, "firmware is not loaded\n");
+ return -EINVAL;
+ }
+
+ memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
+ wmb();
+
+ ret = fimc_is_cpu_set_power(is, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = fimc_is_wait_event(is, IS_ST_A5_PWR_ON, 1,
+ msecs_to_jiffies(FIMC_IS_FW_LOAD_TIMEOUT));
+ if (ret < 0)
+ dev_err(dev, "FIMC-IS CPU power on failed\n");
+
+ return ret;
+}
+
+/* Allocate working memory for the FIMC-IS CPU. */
+static int fimc_is_alloc_cpu_memory(struct fimc_is *is)
+{
+ struct device *dev = &is->pdev->dev;
+
+ is->memory.vaddr = dma_alloc_coherent(dev, FIMC_IS_CPU_MEM_SIZE,
+ &is->memory.paddr, GFP_KERNEL);
+ if (is->memory.vaddr == NULL)
+ return -ENOMEM;
+
+ is->memory.size = FIMC_IS_CPU_MEM_SIZE;
+ memset(is->memory.vaddr, 0, is->memory.size);
+
+ dev_info(dev, "FIMC-IS CPU memory base: %#x\n", (u32)is->memory.paddr);
+
+ if (((u32)is->memory.paddr) & FIMC_IS_FW_ADDR_MASK) {
+ dev_err(dev, "invalid firmware memory alignment: %#x\n",
+ (u32)is->memory.paddr);
+ dma_free_coherent(dev, is->memory.size, is->memory.vaddr,
+ is->memory.paddr);
+ return -EIO;
+ }
+
+ is->is_p_region = (struct is_region *)(is->memory.vaddr +
+ FIMC_IS_CPU_MEM_SIZE - FIMC_IS_REGION_SIZE);
+
+ is->is_dma_p_region = is->memory.paddr +
+ FIMC_IS_CPU_MEM_SIZE - FIMC_IS_REGION_SIZE;
+
+ is->is_shared_region = (struct is_share_region *)(is->memory.vaddr +
+ FIMC_IS_SHARED_REGION_OFFSET);
+ return 0;
+}
+
+static void fimc_is_free_cpu_memory(struct fimc_is *is)
+{
+ struct device *dev = &is->pdev->dev;
+
+ if (is->memory.vaddr == NULL)
+ return;
+
+ dma_free_coherent(dev, is->memory.size, is->memory.vaddr,
+ is->memory.paddr);
+}
+
+static void fimc_is_load_firmware(const struct firmware *fw, void *context)
+{
+ struct fimc_is *is = context;
+ struct device *dev = &is->pdev->dev;
+ void *buf;
+ int ret;
+
+ if (fw == NULL) {
+ dev_err(dev, "firmware request failed\n");
+ return;
+ }
+ mutex_lock(&is->lock);
+
+ if (fw->size < FIMC_IS_FW_SIZE_MIN || fw->size > FIMC_IS_FW_SIZE_MAX) {
+ dev_err(dev, "wrong firmware size: %zu\n", fw->size);
+ goto done;
+ }
+
+ is->fw.size = fw->size;
+
+ ret = fimc_is_alloc_cpu_memory(is);
+ if (ret < 0) {
+ dev_err(dev, "failed to allocate FIMC-IS CPU memory\n");
+ goto done;
+ }
+
+ memcpy(is->memory.vaddr, fw->data, fw->size);
+ wmb();
+
+ /* Read firmware description. */
+ buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_DESC_LEN);
+ memcpy(&is->fw.info, buf, FIMC_IS_FW_INFO_LEN);
+ is->fw.info[FIMC_IS_FW_INFO_LEN] = 0;
+
+ buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_VER_LEN);
+ memcpy(&is->fw.version, buf, FIMC_IS_FW_VER_LEN);
+ is->fw.version[FIMC_IS_FW_VER_LEN - 1] = 0;
+
+ is->fw.state = 1;
+
+ dev_info(dev, "loaded firmware: %s, rev. %s\n",
+ is->fw.info, is->fw.version);
+ dev_dbg(dev, "FW size: %zu, paddr: %pad\n", fw->size, &is->memory.paddr);
+
+ is->is_shared_region->chip_id = 0xe4412;
+ is->is_shared_region->chip_rev_no = 1;
+
+ fimc_is_mem_barrier();
+
+ /*
+ * FIXME: The firmware is not being released for now, as it is
+ * needed around for copying to the IS working memory every
+ * time before the Cortex-A5 is restarted.
+ */
+ release_firmware(is->fw.f_w);
+ is->fw.f_w = fw;
+done:
+ mutex_unlock(&is->lock);
+}
+
+static int fimc_is_request_firmware(struct fimc_is *is, const char *fw_name)
+{
+ return request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_HOTPLUG, fw_name, &is->pdev->dev,
+ GFP_KERNEL, is, fimc_is_load_firmware);
+}
+
+/* General IS interrupt handler */
+static void fimc_is_general_irq_handler(struct fimc_is *is)
+{
+ is->i2h_cmd.cmd = mcuctl_read(is, MCUCTL_REG_ISSR(10));
+
+ switch (is->i2h_cmd.cmd) {
+ case IHC_GET_SENSOR_NUM:
+ fimc_is_hw_get_params(is, 1);
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+ fimc_is_hw_set_sensor_num(is);
+ pr_debug("ISP FW version: %#x\n", is->i2h_cmd.args[0]);
+ break;
+ case IHC_SET_FACE_MARK:
+ case IHC_FRAME_DONE:
+ fimc_is_hw_get_params(is, 2);
+ break;
+ case IHC_SET_SHOT_MARK:
+ case IHC_AA_DONE:
+ case IH_REPLY_DONE:
+ fimc_is_hw_get_params(is, 3);
+ break;
+ case IH_REPLY_NOT_DONE:
+ fimc_is_hw_get_params(is, 4);
+ break;
+ case IHC_NOT_READY:
+ break;
+ default:
+ pr_info("unknown command: %#x\n", is->i2h_cmd.cmd);
+ }
+
+ fimc_is_fw_clear_irq1(is, FIMC_IS_INT_GENERAL);
+
+ switch (is->i2h_cmd.cmd) {
+ case IHC_GET_SENSOR_NUM:
+ fimc_is_hw_set_intgr0_gd0(is);
+ set_bit(IS_ST_A5_PWR_ON, &is->state);
+ break;
+
+ case IHC_SET_SHOT_MARK:
+ break;
+
+ case IHC_SET_FACE_MARK:
+ is->fd_header.count = is->i2h_cmd.args[0];
+ is->fd_header.index = is->i2h_cmd.args[1];
+ is->fd_header.offset = 0;
+ break;
+
+ case IHC_FRAME_DONE:
+ break;
+
+ case IHC_AA_DONE:
+ pr_debug("AA_DONE - %d, %d, %d\n", is->i2h_cmd.args[0],
+ is->i2h_cmd.args[1], is->i2h_cmd.args[2]);
+ break;
+
+ case IH_REPLY_DONE:
+ pr_debug("ISR_DONE: args[0]: %#x\n", is->i2h_cmd.args[0]);
+
+ switch (is->i2h_cmd.args[0]) {
+ case HIC_PREVIEW_STILL...HIC_CAPTURE_VIDEO:
+ /* Get CAC margin */
+ set_bit(IS_ST_CHANGE_MODE, &is->state);
+ is->isp.cac_margin_x = is->i2h_cmd.args[1];
+ is->isp.cac_margin_y = is->i2h_cmd.args[2];
+ pr_debug("CAC margin (x,y): (%d,%d)\n",
+ is->isp.cac_margin_x, is->isp.cac_margin_y);
+ break;
+
+ case HIC_STREAM_ON:
+ clear_bit(IS_ST_STREAM_OFF, &is->state);
+ set_bit(IS_ST_STREAM_ON, &is->state);
+ break;
+
+ case HIC_STREAM_OFF:
+ clear_bit(IS_ST_STREAM_ON, &is->state);
+ set_bit(IS_ST_STREAM_OFF, &is->state);
+ break;
+
+ case HIC_SET_PARAMETER:
+ is->config[is->config_index].p_region_index[0] = 0;
+ is->config[is->config_index].p_region_index[1] = 0;
+ set_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state);
+ pr_debug("HIC_SET_PARAMETER\n");
+ break;
+
+ case HIC_GET_PARAMETER:
+ break;
+
+ case HIC_SET_TUNE:
+ break;
+
+ case HIC_GET_STATUS:
+ break;
+
+ case HIC_OPEN_SENSOR:
+ set_bit(IS_ST_OPEN_SENSOR, &is->state);
+ pr_debug("data lanes: %d, settle line: %d\n",
+ is->i2h_cmd.args[2], is->i2h_cmd.args[1]);
+ break;
+
+ case HIC_CLOSE_SENSOR:
+ clear_bit(IS_ST_OPEN_SENSOR, &is->state);
+ is->sensor_index = 0;
+ break;
+
+ case HIC_MSG_TEST:
+ pr_debug("config MSG level completed\n");
+ break;
+
+ case HIC_POWER_DOWN:
+ clear_bit(IS_ST_PWR_SUBIP_ON, &is->state);
+ break;
+
+ case HIC_GET_SET_FILE_ADDR:
+ is->setfile.base = is->i2h_cmd.args[1];
+ set_bit(IS_ST_SETFILE_LOADED, &is->state);
+ break;
+
+ case HIC_LOAD_SET_FILE:
+ set_bit(IS_ST_SETFILE_LOADED, &is->state);
+ break;
+ }
+ break;
+
+ case IH_REPLY_NOT_DONE:
+ pr_err("ISR_NDONE: %d: %#x, %s\n", is->i2h_cmd.args[0],
+ is->i2h_cmd.args[1],
+ fimc_is_strerr(is->i2h_cmd.args[1]));
+
+ if (is->i2h_cmd.args[1] & IS_ERROR_TIME_OUT_FLAG)
+ pr_err("IS_ERROR_TIME_OUT\n");
+
+ switch (is->i2h_cmd.args[1]) {
+ case IS_ERROR_SET_PARAMETER:
+ fimc_is_mem_barrier();
+ }
+
+ switch (is->i2h_cmd.args[0]) {
+ case HIC_SET_PARAMETER:
+ is->config[is->config_index].p_region_index[0] = 0;
+ is->config[is->config_index].p_region_index[1] = 0;
+ set_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state);
+ break;
+ }
+ break;
+
+ case IHC_NOT_READY:
+ pr_err("IS control sequence error: Not Ready\n");
+ break;
+ }
+
+ wake_up(&is->irq_queue);
+}
+
+static irqreturn_t fimc_is_irq_handler(int irq, void *priv)
+{
+ struct fimc_is *is = priv;
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&is->slock, flags);
+ status = mcuctl_read(is, MCUCTL_REG_INTSR1);
+
+ if (status & (1UL << FIMC_IS_INT_GENERAL))
+ fimc_is_general_irq_handler(is);
+
+ if (status & (1UL << FIMC_IS_INT_FRAME_DONE_ISP))
+ fimc_isp_irq_handler(is);
+
+ spin_unlock_irqrestore(&is->slock, flags);
+ return IRQ_HANDLED;
+}
+
+static int fimc_is_hw_open_sensor(struct fimc_is *is,
+ struct fimc_is_sensor *sensor)
+{
+ struct sensor_open_extended *soe = (void *)&is->is_p_region->shared;
+
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
+
+ soe->self_calibration_mode = 1;
+ soe->actuator_type = 0;
+ soe->mipi_lane_num = 0;
+ soe->mclk = 0;
+ soe->mipi_speed = 0;
+ soe->fast_open_sensor = 0;
+ soe->i2c_sclk = 88000000;
+
+ fimc_is_mem_barrier();
+
+ /*
+ * Some user space use cases hang up here without this
+ * empirically chosen delay.
+ */
+ udelay(100);
+
+ mcuctl_write(HIC_OPEN_SENSOR, is, MCUCTL_REG_ISSR(0));
+ mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
+ mcuctl_write(sensor->drvdata->id, is, MCUCTL_REG_ISSR(2));
+ mcuctl_write(sensor->i2c_bus, is, MCUCTL_REG_ISSR(3));
+ mcuctl_write(is->is_dma_p_region, is, MCUCTL_REG_ISSR(4));
+
+ fimc_is_hw_set_intgr0_gd0(is);
+
+ return fimc_is_wait_event(is, IS_ST_OPEN_SENSOR, 1,
+ sensor->drvdata->open_timeout);
+}
+
+
+int fimc_is_hw_initialize(struct fimc_is *is)
+{
+ const int config_ids[] = {
+ IS_SC_PREVIEW_STILL, IS_SC_PREVIEW_VIDEO,
+ IS_SC_CAPTURE_STILL, IS_SC_CAPTURE_VIDEO
+ };
+ struct device *dev = &is->pdev->dev;
+ u32 prev_id;
+ int i, ret;
+
+ /* Sensor initialization. Only one sensor is currently supported. */
+ ret = fimc_is_hw_open_sensor(is, &is->sensor[0]);
+ if (ret < 0)
+ return ret;
+
+ /* Get the setfile address. */
+ fimc_is_hw_get_setfile_addr(is);
+
+ ret = fimc_is_wait_event(is, IS_ST_SETFILE_LOADED, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev, "get setfile address timed out\n");
+ return ret;
+ }
+ pr_debug("setfile.base: %#x\n", is->setfile.base);
+
+ /* Load the setfile. */
+ fimc_is_load_setfile(is, FIMC_IS_SETFILE_6A3);
+ clear_bit(IS_ST_SETFILE_LOADED, &is->state);
+ fimc_is_hw_load_setfile(is);
+ ret = fimc_is_wait_event(is, IS_ST_SETFILE_LOADED, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev, "loading setfile timed out\n");
+ return ret;
+ }
+
+ pr_debug("setfile: base: %#x, size: %d\n",
+ is->setfile.base, is->setfile.size);
+ pr_info("FIMC-IS Setfile info: %s\n", is->fw.setfile_info);
+
+ /* Check magic number. */
+ if (is->is_p_region->shared[MAX_SHARED_COUNT - 1] !=
+ FIMC_IS_MAGIC_NUMBER) {
+ dev_err(dev, "magic number error!\n");
+ return -EIO;
+ }
+
+ pr_debug("shared region: %pad, parameter region: %pad\n",
+ &is->memory.paddr + FIMC_IS_SHARED_REGION_OFFSET,
+ &is->is_dma_p_region);
+
+ is->setfile.sub_index = 0;
+
+ /* Stream off. */
+ fimc_is_hw_stream_off(is);
+ ret = fimc_is_wait_event(is, IS_ST_STREAM_OFF, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev, "stream off timeout\n");
+ return ret;
+ }
+
+ /* Preserve previous mode. */
+ prev_id = is->config_index;
+
+ /* Set initial parameter values. */
+ for (i = 0; i < ARRAY_SIZE(config_ids); i++) {
+ is->config_index = config_ids[i];
+ fimc_is_set_initial_params(is);
+ ret = fimc_is_itf_s_param(is, true);
+ if (ret < 0) {
+ is->config_index = prev_id;
+ return ret;
+ }
+ }
+ is->config_index = prev_id;
+
+ set_bit(IS_ST_INIT_DONE, &is->state);
+ dev_info(dev, "initialization sequence completed (%d)\n",
+ is->config_index);
+ return 0;
+}
+
+static int fimc_is_log_show(struct seq_file *s, void *data)
+{
+ struct fimc_is *is = s->private;
+ const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET;
+
+ if (is->memory.vaddr == NULL) {
+ dev_err(&is->pdev->dev, "firmware memory is not initialized\n");
+ return -EIO;
+ }
+
+ seq_printf(s, "%s\n", buf);
+ return 0;
+}
+
+static int fimc_is_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fimc_is_log_show, inode->i_private);
+}
+
+static const struct file_operations fimc_is_debugfs_fops = {
+ .open = fimc_is_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void fimc_is_debugfs_remove(struct fimc_is *is)
+{
+ debugfs_remove_recursive(is->debugfs_entry);
+ is->debugfs_entry = NULL;
+}
+
+static int fimc_is_debugfs_create(struct fimc_is *is)
+{
+ struct dentry *dentry;
+
+ is->debugfs_entry = debugfs_create_dir("fimc_is", NULL);
+
+ dentry = debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry,
+ is, &fimc_is_debugfs_fops);
+ if (!dentry)
+ fimc_is_debugfs_remove(is);
+
+ return is->debugfs_entry == NULL ? -EIO : 0;
+}
+
+static int fimc_is_runtime_resume(struct device *dev);
+static int fimc_is_runtime_suspend(struct device *dev);
+
+static int fimc_is_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_is *is;
+ struct resource res;
+ struct device_node *node;
+ int ret;
+
+ is = devm_kzalloc(&pdev->dev, sizeof(*is), GFP_KERNEL);
+ if (!is)
+ return -ENOMEM;
+
+ is->pdev = pdev;
+ is->isp.pdev = pdev;
+
+ init_waitqueue_head(&is->irq_queue);
+ spin_lock_init(&is->slock);
+ mutex_init(&is->lock);
+
+ ret = of_address_to_resource(dev->of_node, 0, &res);
+ if (ret < 0)
+ return ret;
+
+ is->regs = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(is->regs))
+ return PTR_ERR(is->regs);
+
+ node = of_get_child_by_name(dev->of_node, "pmu");
+ if (!node)
+ return -ENODEV;
+
+ is->pmu_regs = of_iomap(node, 0);
+ of_node_put(node);
+ if (!is->pmu_regs)
+ return -ENOMEM;
+
+ is->irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!is->irq) {
+ dev_err(dev, "no irq found\n");
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+
+ ret = fimc_is_get_clocks(is);
+ if (ret < 0)
+ goto err_iounmap;
+
+ platform_set_drvdata(pdev, is);
+
+ ret = request_irq(is->irq, fimc_is_irq_handler, 0, dev_name(dev), is);
+ if (ret < 0) {
+ dev_err(dev, "irq request failed\n");
+ goto err_clk;
+ }
+ pm_runtime_enable(dev);
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = fimc_is_runtime_resume(dev);
+ if (ret < 0)
+ goto err_irq;
+ }
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm;
+
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ ret = devm_of_platform_populate(dev);
+ if (ret < 0)
+ goto err_pm;
+
+ /*
+ * Register FIMC-IS V4L2 subdevs to this driver. The video nodes
+ * will be created within the subdev's registered() callback.
+ */
+ ret = fimc_is_register_subdevs(is);
+ if (ret < 0)
+ goto err_pm;
+
+ ret = fimc_is_debugfs_create(is);
+ if (ret < 0)
+ goto err_sd;
+
+ ret = fimc_is_request_firmware(is, FIMC_IS_FW_FILENAME);
+ if (ret < 0)
+ goto err_dfs;
+
+ pm_runtime_put_sync(dev);
+
+ dev_dbg(dev, "FIMC-IS registered successfully\n");
+ return 0;
+
+err_dfs:
+ fimc_is_debugfs_remove(is);
+err_sd:
+ fimc_is_unregister_subdevs(is);
+err_pm:
+ if (!pm_runtime_enabled(dev))
+ fimc_is_runtime_suspend(dev);
+err_irq:
+ free_irq(is->irq, is);
+err_clk:
+ fimc_is_put_clocks(is);
+err_iounmap:
+ iounmap(is->pmu_regs);
+ return ret;
+}
+
+static int fimc_is_runtime_resume(struct device *dev)
+{
+ struct fimc_is *is = dev_get_drvdata(dev);
+ int ret;
+
+ ret = fimc_is_setup_clocks(is);
+ if (ret)
+ return ret;
+
+ return fimc_is_enable_clocks(is);
+}
+
+static int fimc_is_runtime_suspend(struct device *dev)
+{
+ struct fimc_is *is = dev_get_drvdata(dev);
+
+ fimc_is_disable_clocks(is);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_is_resume(struct device *dev)
+{
+ /* TODO: */
+ return 0;
+}
+
+static int fimc_is_suspend(struct device *dev)
+{
+ struct fimc_is *is = dev_get_drvdata(dev);
+
+ /* TODO: */
+ if (test_bit(IS_ST_A5_PWR_ON, &is->state))
+ return -EBUSY;
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int fimc_is_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_is *is = dev_get_drvdata(dev);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ if (!pm_runtime_status_suspended(dev))
+ fimc_is_runtime_suspend(dev);
+ free_irq(is->irq, is);
+ fimc_is_unregister_subdevs(is);
+ vb2_dma_contig_clear_max_seg_size(dev);
+ fimc_is_put_clocks(is);
+ iounmap(is->pmu_regs);
+ fimc_is_debugfs_remove(is);
+ release_firmware(is->fw.f_w);
+ fimc_is_free_cpu_memory(is);
+
+ return 0;
+}
+
+static const struct of_device_id fimc_is_of_match[] = {
+ { .compatible = "samsung,exynos4212-fimc-is" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, fimc_is_of_match);
+
+static const struct dev_pm_ops fimc_is_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_is_suspend, fimc_is_resume)
+ SET_RUNTIME_PM_OPS(fimc_is_runtime_suspend, fimc_is_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver fimc_is_driver = {
+ .probe = fimc_is_probe,
+ .remove = fimc_is_remove,
+ .driver = {
+ .of_match_table = fimc_is_of_match,
+ .name = FIMC_IS_DRV_NAME,
+ .pm = &fimc_is_pm_ops,
+ }
+};
+
+static int fimc_is_module_init(void)
+{
+ int ret;
+
+ ret = fimc_is_register_i2c_driver();
+ if (ret < 0)
+ return ret;
+
+ ret = platform_driver_register(&fimc_is_driver);
+
+ if (ret < 0)
+ fimc_is_unregister_i2c_driver();
+
+ return ret;
+}
+
+static void fimc_is_module_exit(void)
+{
+ fimc_is_unregister_i2c_driver();
+ platform_driver_unregister(&fimc_is_driver);
+}
+
+module_init(fimc_is_module_init);
+module_exit(fimc_is_module_exit);
+
+MODULE_ALIAS("platform:" FIMC_IS_DRV_NAME);
+MODULE_AUTHOR("Younghwan Joo <yhwan.joo@samsung.com>");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h
new file mode 100644
index 000000000..ee05da034
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-is.h
@@ -0,0 +1,345 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Younghwan Joo <yhwan.joo@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_IS_H_
+#define FIMC_IS_H_
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+
+#include "fimc-isp.h"
+#include "fimc-is-command.h"
+#include "fimc-is-sensor.h"
+#include "fimc-is-param.h"
+#include "fimc-is-regs.h"
+
+#define FIMC_IS_DRV_NAME "exynos4-fimc-is"
+
+#define FIMC_IS_FW_FILENAME "exynos4_fimc_is_fw.bin"
+#define FIMC_IS_SETFILE_6A3 "exynos4_s5k6a3_setfile.bin"
+
+#define FIMC_IS_FW_LOAD_TIMEOUT 1000 /* ms */
+#define FIMC_IS_POWER_ON_TIMEOUT 1000 /* us */
+
+#define FIMC_IS_SENSORS_NUM 2
+
+/* Memory definitions */
+#define FIMC_IS_CPU_MEM_SIZE (0xa00000)
+#define FIMC_IS_CPU_BASE_MASK ((1 << 26) - 1)
+#define FIMC_IS_REGION_SIZE 0x5000
+
+#define FIMC_IS_DEBUG_REGION_OFFSET 0x0084b000
+#define FIMC_IS_SHARED_REGION_OFFSET 0x008c0000
+#define FIMC_IS_FW_INFO_LEN 31
+#define FIMC_IS_FW_VER_LEN 7
+#define FIMC_IS_FW_DESC_LEN (FIMC_IS_FW_INFO_LEN + \
+ FIMC_IS_FW_VER_LEN)
+#define FIMC_IS_SETFILE_INFO_LEN 39
+
+#define FIMC_IS_EXTRA_MEM_SIZE (FIMC_IS_EXTRA_FW_SIZE + \
+ FIMC_IS_EXTRA_SETFILE_SIZE + 0x1000)
+#define FIMC_IS_EXTRA_FW_SIZE 0x180000
+#define FIMC_IS_EXTRA_SETFILE_SIZE 0x4b000
+
+/* TODO: revisit */
+#define FIMC_IS_FW_ADDR_MASK ((1 << 26) - 1)
+#define FIMC_IS_FW_SIZE_MAX (SZ_4M)
+#define FIMC_IS_FW_SIZE_MIN (SZ_32K)
+
+#define ATCLK_MCUISP_FREQUENCY 100000000UL
+#define ACLK_AXI_FREQUENCY 100000000UL
+
+enum {
+ ISS_CLK_PPMUISPX,
+ ISS_CLK_PPMUISPMX,
+ ISS_CLK_LITE0,
+ ISS_CLK_LITE1,
+ ISS_CLK_MPLL,
+ ISS_CLK_ISP,
+ ISS_CLK_DRC,
+ ISS_CLK_FD,
+ ISS_CLK_MCUISP,
+ ISS_CLK_GICISP,
+ ISS_CLK_PWM_ISP,
+ ISS_CLK_MCUCTL_ISP,
+ ISS_CLK_UART,
+ ISS_GATE_CLKS_MAX,
+ ISS_CLK_ISP_DIV0 = ISS_GATE_CLKS_MAX,
+ ISS_CLK_ISP_DIV1,
+ ISS_CLK_MCUISP_DIV0,
+ ISS_CLK_MCUISP_DIV1,
+ ISS_CLK_ACLK200,
+ ISS_CLK_ACLK200_DIV,
+ ISS_CLK_ACLK400MCUISP,
+ ISS_CLK_ACLK400MCUISP_DIV,
+ ISS_CLKS_MAX
+};
+
+/* The driver's internal state flags */
+enum {
+ IS_ST_IDLE,
+ IS_ST_PWR_ON,
+ IS_ST_A5_PWR_ON,
+ IS_ST_FW_LOADED,
+ IS_ST_OPEN_SENSOR,
+ IS_ST_SETFILE_LOADED,
+ IS_ST_INIT_DONE,
+ IS_ST_STREAM_ON,
+ IS_ST_STREAM_OFF,
+ IS_ST_CHANGE_MODE,
+ IS_ST_BLOCK_CMD_CLEARED,
+ IS_ST_SET_ZOOM,
+ IS_ST_PWR_SUBIP_ON,
+ IS_ST_END,
+};
+
+enum af_state {
+ FIMC_IS_AF_IDLE = 0,
+ FIMC_IS_AF_SETCONFIG = 1,
+ FIMC_IS_AF_RUNNING = 2,
+ FIMC_IS_AF_LOCK = 3,
+ FIMC_IS_AF_ABORT = 4,
+ FIMC_IS_AF_FAILED = 5,
+};
+
+enum af_lock_state {
+ FIMC_IS_AF_UNLOCKED = 0,
+ FIMC_IS_AF_LOCKED = 2
+};
+
+enum ae_lock_state {
+ FIMC_IS_AE_UNLOCKED = 0,
+ FIMC_IS_AE_LOCKED = 1
+};
+
+enum awb_lock_state {
+ FIMC_IS_AWB_UNLOCKED = 0,
+ FIMC_IS_AWB_LOCKED = 1
+};
+
+enum {
+ IS_METERING_CONFIG_CMD,
+ IS_METERING_CONFIG_WIN_POS_X,
+ IS_METERING_CONFIG_WIN_POS_Y,
+ IS_METERING_CONFIG_WIN_WIDTH,
+ IS_METERING_CONFIG_WIN_HEIGHT,
+ IS_METERING_CONFIG_MAX
+};
+
+struct is_setfile {
+ const struct firmware *info;
+ int state;
+ u32 sub_index;
+ u32 base;
+ size_t size;
+};
+
+struct is_fd_result_header {
+ u32 offset;
+ u32 count;
+ u32 index;
+ u32 curr_index;
+ u32 width;
+ u32 height;
+};
+
+struct is_af_info {
+ u16 mode;
+ u32 af_state;
+ u32 af_lock_state;
+ u32 ae_lock_state;
+ u32 awb_lock_state;
+ u16 pos_x;
+ u16 pos_y;
+ u16 prev_pos_x;
+ u16 prev_pos_y;
+ u16 use_af;
+};
+
+struct fimc_is_firmware {
+ const struct firmware *f_w;
+
+ dma_addr_t paddr;
+ void *vaddr;
+ unsigned int size;
+
+ char info[FIMC_IS_FW_INFO_LEN + 1];
+ char version[FIMC_IS_FW_VER_LEN + 1];
+ char setfile_info[FIMC_IS_SETFILE_INFO_LEN + 1];
+ u8 state;
+};
+
+struct fimc_is_memory {
+ /* physical base address */
+ dma_addr_t paddr;
+ /* virtual base address */
+ void *vaddr;
+ /* total length */
+ unsigned int size;
+};
+
+#define FIMC_IS_I2H_MAX_ARGS 12
+
+struct i2h_cmd {
+ u32 cmd;
+ u32 sensor_id;
+ u16 num_args;
+ u32 args[FIMC_IS_I2H_MAX_ARGS];
+};
+
+struct h2i_cmd {
+ u16 cmd_type;
+ u32 entry_id;
+};
+
+#define FIMC_IS_DEBUG_MSG 0x3f
+#define FIMC_IS_DEBUG_LEVEL 3
+
+struct fimc_is_setfile {
+ const struct firmware *info;
+ unsigned int state;
+ unsigned int size;
+ u32 sub_index;
+ u32 base;
+};
+
+struct chain_config {
+ struct global_param global;
+ struct sensor_param sensor;
+ struct isp_param isp;
+ struct drc_param drc;
+ struct fd_param fd;
+
+ unsigned long p_region_index[2];
+};
+
+/**
+ * struct fimc_is - fimc-is data structure
+ * @pdev: pointer to FIMC-IS platform device
+ * @pctrl: pointer to pinctrl structure for this device
+ * @v4l2_dev: pointer to top the level v4l2_device
+ * @lock: mutex serializing video device and the subdev operations
+ * @slock: spinlock protecting this data structure and the hw registers
+ * @clocks: FIMC-LITE gate clock
+ * @regs: MCUCTL mmapped registers region
+ * @pmu_regs: PMU ISP mmapped registers region
+ * @irq_queue: interrupt handling waitqueue
+ * @lpm: low power mode flag
+ * @state: internal driver's state flags
+ */
+struct fimc_is {
+ struct platform_device *pdev;
+ struct pinctrl *pctrl;
+ struct v4l2_device *v4l2_dev;
+
+ struct fimc_is_firmware fw;
+ struct fimc_is_memory memory;
+ struct firmware *f_w;
+
+ struct fimc_isp isp;
+ struct fimc_is_sensor sensor[FIMC_IS_SENSORS_NUM];
+ struct fimc_is_setfile setfile;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct mutex lock;
+ spinlock_t slock;
+
+ struct clk *clocks[ISS_CLKS_MAX];
+ void __iomem *regs;
+ void __iomem *pmu_regs;
+ int irq;
+ wait_queue_head_t irq_queue;
+ u8 lpm;
+
+ unsigned long state;
+ unsigned int sensor_index;
+
+ struct i2h_cmd i2h_cmd;
+ struct h2i_cmd h2i_cmd;
+ struct is_fd_result_header fd_header;
+
+ struct chain_config config[IS_SC_MAX];
+ unsigned config_index;
+
+ struct is_region *is_p_region;
+ dma_addr_t is_dma_p_region;
+ struct is_share_region *is_shared_region;
+ struct is_af_info af;
+
+ struct dentry *debugfs_entry;
+};
+
+static inline struct fimc_is *fimc_isp_to_is(struct fimc_isp *isp)
+{
+ return container_of(isp, struct fimc_is, isp);
+}
+
+static inline struct chain_config *__get_curr_is_config(struct fimc_is *is)
+{
+ return &is->config[is->config_index];
+}
+
+static inline void fimc_is_mem_barrier(void)
+{
+ mb();
+}
+
+static inline void fimc_is_set_param_bit(struct fimc_is *is, int num)
+{
+ struct chain_config *cfg = &is->config[is->config_index];
+
+ set_bit(num, &cfg->p_region_index[0]);
+}
+
+static inline void fimc_is_set_param_ctrl_cmd(struct fimc_is *is, int cmd)
+{
+ is->is_p_region->parameter.isp.control.cmd = cmd;
+}
+
+static inline void mcuctl_write(u32 v, struct fimc_is *is, unsigned int offset)
+{
+ writel(v, is->regs + offset);
+}
+
+static inline u32 mcuctl_read(struct fimc_is *is, unsigned int offset)
+{
+ return readl(is->regs + offset);
+}
+
+static inline void pmuisp_write(u32 v, struct fimc_is *is, unsigned int offset)
+{
+ writel(v, is->pmu_regs + offset);
+}
+
+static inline u32 pmuisp_read(struct fimc_is *is, unsigned int offset)
+{
+ return readl(is->pmu_regs + offset);
+}
+
+int fimc_is_wait_event(struct fimc_is *is, unsigned long bit,
+ unsigned int state, unsigned int timeout);
+int fimc_is_cpu_set_power(struct fimc_is *is, int on);
+int fimc_is_start_firmware(struct fimc_is *is);
+int fimc_is_hw_initialize(struct fimc_is *is);
+void fimc_is_log_dump(const char *level, const void *buf, size_t len);
+
+#endif /* FIMC_IS_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
new file mode 100644
index 000000000..c9ef74ee4
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -0,0 +1,661 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * FIMC-IS ISP video input and video output DMA interface driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * The hardware handling code derived from a driver written by
+ * Younghwan Joo <yhwan.joo@samsung.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/drv-intf/exynos-fimc.h>
+
+#include "common.h"
+#include "media-dev.h"
+#include "fimc-is.h"
+#include "fimc-isp-video.h"
+#include "fimc-is-param.h"
+
+static int isp_video_capture_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt;
+ const struct fimc_fmt *fmt = isp->video_capture.format;
+ unsigned int wh, i;
+
+ wh = vid_fmt->width * vid_fmt->height;
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ *num_buffers = clamp_t(u32, *num_buffers, FIMC_ISP_REQ_BUFS_MIN,
+ FIMC_ISP_REQ_BUFS_MAX);
+ if (*num_planes) {
+ if (*num_planes != fmt->memplanes)
+ return -EINVAL;
+ for (i = 0; i < *num_planes; i++)
+ if (sizes[i] < (wh * fmt->depth[i]) / 8)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++)
+ sizes[i] = (wh * fmt->depth[i]) / 8;
+
+ return 0;
+}
+
+static inline struct param_dma_output *__get_isp_dma2(struct fimc_is *is)
+{
+ return &__get_curr_is_config(is)->isp.dma2_output;
+}
+
+static int isp_video_capture_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(q);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct param_dma_output *dma = __get_isp_dma2(is);
+ struct fimc_is_video *video = &isp->video_capture;
+ int ret;
+
+ if (!test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state) ||
+ test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state))
+ return 0;
+
+
+ dma->cmd = DMA_OUTPUT_COMMAND_ENABLE;
+ dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_ENABLE;
+ dma->buffer_address = is->is_dma_p_region +
+ DMA2_OUTPUT_ADDR_ARRAY_OFFS;
+ dma->buffer_number = video->reqbufs_count;
+ dma->dma_out_mask = video->buf_mask;
+
+ isp_dbg(2, &video->ve.vdev,
+ "buf_count: %d, planes: %d, dma addr table: %#x\n",
+ video->buf_count, video->format->memplanes,
+ dma->buffer_address);
+
+ fimc_is_mem_barrier();
+
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
+ __fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT);
+
+ ret = fimc_is_itf_s_param(is, false);
+ if (ret < 0)
+ return ret;
+
+ ret = fimc_pipeline_call(&video->ve, set_stream, 1);
+ if (ret < 0)
+ return ret;
+
+ set_bit(ST_ISP_VID_CAP_STREAMING, &isp->state);
+ return ret;
+}
+
+static void isp_video_capture_stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(q);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct param_dma_output *dma = __get_isp_dma2(is);
+ int ret;
+
+ ret = fimc_pipeline_call(&isp->video_capture.ve, set_stream, 0);
+ if (ret < 0)
+ return;
+
+ dma->cmd = DMA_OUTPUT_COMMAND_DISABLE;
+ dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_DISABLE;
+ dma->buffer_number = 0;
+ dma->buffer_address = 0;
+ dma->dma_out_mask = 0;
+
+ fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
+ __fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT);
+
+ ret = fimc_is_itf_s_param(is, false);
+ if (ret < 0)
+ dev_warn(&is->pdev->dev, "%s: DMA stop failed\n", __func__);
+
+ fimc_is_hw_set_isp_buf_mask(is, 0);
+
+ clear_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state);
+ clear_bit(ST_ISP_VID_CAP_STREAMING, &isp->state);
+
+ isp->video_capture.buf_count = 0;
+}
+
+static int isp_video_capture_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_is_video *video = &isp->video_capture;
+ int i;
+
+ if (video->format == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < video->format->memplanes; i++) {
+ unsigned long size = video->pixfmt.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(&video->ve.vdev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ /* Check if we get one of the already known buffers. */
+ if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) {
+ dma_addr_t dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ int i;
+
+ for (i = 0; i < video->buf_count; i++)
+ if (video->buffers[i]->dma_addr[0] == dma_addr)
+ return 0;
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_is_video *video = &isp->video_capture;
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct isp_video_buf *ivb = to_isp_video_buf(vbuf);
+ unsigned long flags;
+ unsigned int i;
+
+ if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) {
+ spin_lock_irqsave(&is->slock, flags);
+ video->buf_mask |= BIT(ivb->index);
+ spin_unlock_irqrestore(&is->slock, flags);
+ } else {
+ unsigned int num_planes = video->format->memplanes;
+
+ ivb->index = video->buf_count;
+ video->buffers[ivb->index] = ivb;
+
+ for (i = 0; i < num_planes; i++) {
+ int buf_index = ivb->index * num_planes + i;
+
+ ivb->dma_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ is->is_p_region->shared[32 + buf_index] =
+ ivb->dma_addr[i];
+
+ isp_dbg(2, &video->ve.vdev,
+ "dma_buf %d (%d/%d/%d) addr: %pad\n",
+ buf_index, ivb->index, i, vb->index,
+ &ivb->dma_addr[i]);
+ }
+
+ if (++video->buf_count < video->reqbufs_count)
+ return;
+
+ video->buf_mask = (1UL << video->buf_count) - 1;
+ set_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state);
+ }
+
+ if (!test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state))
+ isp_video_capture_start_streaming(vb->vb2_queue, 0);
+}
+
+/*
+ * FIMC-IS ISP input and output DMA interface interrupt handler.
+ * Locking: called with is->slock spinlock held.
+ */
+void fimc_isp_video_irq_handler(struct fimc_is *is)
+{
+ struct fimc_is_video *video = &is->isp.video_capture;
+ struct vb2_v4l2_buffer *vbuf;
+ int buf_index;
+
+ /* TODO: Ensure the DMA is really stopped in stop_streaming callback */
+ if (!test_bit(ST_ISP_VID_CAP_STREAMING, &is->isp.state))
+ return;
+
+ buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count;
+ vbuf = &video->buffers[buf_index]->vb;
+
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+
+ video->buf_mask &= ~BIT(buf_index);
+ fimc_is_hw_set_isp_buf_mask(is, video->buf_mask);
+}
+
+static const struct vb2_ops isp_video_capture_qops = {
+ .queue_setup = isp_video_capture_queue_setup,
+ .buf_prepare = isp_video_capture_buffer_prepare,
+ .buf_queue = isp_video_capture_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = isp_video_capture_start_streaming,
+ .stop_streaming = isp_video_capture_stop_streaming,
+};
+
+static int isp_video_open(struct file *file)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct exynos_video_entity *ve = &isp->video_capture.ve;
+ struct media_entity *me = &ve->vdev.entity;
+ int ret;
+
+ if (mutex_lock_interruptible(&isp->video_lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ ret = pm_runtime_get_sync(&isp->pdev->dev);
+ if (ret < 0)
+ goto rel_fh;
+
+ if (v4l2_fh_is_singular_file(file)) {
+ mutex_lock(&me->graph_obj.mdev->graph_mutex);
+
+ ret = fimc_pipeline_call(ve, open, me, true);
+
+ /* Mark the video pipeline as in use. */
+ if (ret == 0)
+ me->use_count++;
+
+ mutex_unlock(&me->graph_obj.mdev->graph_mutex);
+ }
+ if (!ret)
+ goto unlock;
+rel_fh:
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&isp->video_lock);
+ return ret;
+}
+
+static int isp_video_release(struct file *file)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct fimc_is_video *ivc = &isp->video_capture;
+ struct media_entity *entity = &ivc->ve.vdev.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ bool is_singular_file;
+
+ mutex_lock(&isp->video_lock);
+
+ is_singular_file = v4l2_fh_is_singular_file(file);
+
+ if (is_singular_file && ivc->streaming) {
+ media_pipeline_stop(entity);
+ ivc->streaming = 0;
+ }
+
+ _vb2_fop_release(file, NULL);
+
+ if (is_singular_file) {
+ fimc_pipeline_call(&ivc->ve, close);
+
+ mutex_lock(&mdev->graph_mutex);
+ entity->use_count--;
+ mutex_unlock(&mdev->graph_mutex);
+ }
+
+ pm_runtime_put(&isp->pdev->dev);
+ mutex_unlock(&isp->video_lock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations isp_video_fops = {
+ .owner = THIS_MODULE,
+ .open = isp_video_open,
+ .release = isp_video_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * Video node ioctl operations
+ */
+static int isp_video_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+
+ __fimc_vidioc_querycap(&isp->pdev->dev, cap, V4L2_CAP_STREAMING);
+ return 0;
+}
+
+static int isp_video_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct fimc_fmt *fmt;
+
+ if (f->index >= FIMC_ISP_NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = fimc_isp_find_format(NULL, NULL, f->index);
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int isp_video_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+
+ f->fmt.pix_mp = isp->video_capture.pixfmt;
+ return 0;
+}
+
+static void __isp_video_try_fmt(struct fimc_isp *isp,
+ struct v4l2_pix_format_mplane *pixm,
+ const struct fimc_fmt **fmt)
+{
+ const struct fimc_fmt *__fmt;
+
+ __fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+ if (fmt)
+ *fmt = __fmt;
+
+ pixm->colorspace = V4L2_COLORSPACE_SRGB;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->num_planes = __fmt->memplanes;
+ pixm->pixelformat = __fmt->fourcc;
+ /*
+ * TODO: double check with the docmentation these width/height
+ * constraints are correct.
+ */
+ v4l_bound_align_image(&pixm->width, FIMC_ISP_SOURCE_WIDTH_MIN,
+ FIMC_ISP_SOURCE_WIDTH_MAX, 3,
+ &pixm->height, FIMC_ISP_SOURCE_HEIGHT_MIN,
+ FIMC_ISP_SOURCE_HEIGHT_MAX, 0, 0);
+}
+
+static int isp_video_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+
+ __isp_video_try_fmt(isp, &f->fmt.pix_mp, NULL);
+ return 0;
+}
+
+static int isp_video_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ const struct fimc_fmt *ifmt = NULL;
+ struct param_dma_output *dma = __get_isp_dma2(is);
+
+ __isp_video_try_fmt(isp, pixm, &ifmt);
+
+ if (WARN_ON(ifmt == NULL))
+ return -EINVAL;
+
+ dma->format = DMA_OUTPUT_FORMAT_BAYER;
+ dma->order = DMA_OUTPUT_ORDER_GB_BG;
+ dma->plane = ifmt->memplanes;
+ dma->bitwidth = ifmt->depth[0];
+ dma->width = pixm->width;
+ dma->height = pixm->height;
+
+ fimc_is_mem_barrier();
+
+ isp->video_capture.format = ifmt;
+ isp->video_capture.pixfmt = *pixm;
+
+ return 0;
+}
+
+/*
+ * Check for source/sink format differences at each link.
+ * Return 0 if the formats match or -EPIPE otherwise.
+ */
+static int isp_video_pipeline_validate(struct fimc_isp *isp)
+{
+ struct v4l2_subdev *sd = &isp->subdev;
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ while (1) {
+ /* Retrieve format at the sink pad */
+ pad = &sd->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+ sink_fmt.pad = pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ /* Retrieve format at the source pad */
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static int isp_video_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct exynos_video_entity *ve = &isp->video_capture.ve;
+ struct media_entity *me = &ve->vdev.entity;
+ int ret;
+
+ ret = media_pipeline_start(me, &ve->pipe->mp);
+ if (ret < 0)
+ return ret;
+
+ ret = isp_video_pipeline_validate(isp);
+ if (ret < 0)
+ goto p_stop;
+
+ ret = vb2_ioctl_streamon(file, priv, type);
+ if (ret < 0)
+ goto p_stop;
+
+ isp->video_capture.streaming = 1;
+ return 0;
+p_stop:
+ media_pipeline_stop(me);
+ return ret;
+}
+
+static int isp_video_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ struct fimc_is_video *video = &isp->video_capture;
+ int ret;
+
+ ret = vb2_ioctl_streamoff(file, priv, type);
+ if (ret < 0)
+ return ret;
+
+ media_pipeline_stop(&video->ve.vdev.entity);
+ video->streaming = 0;
+ return 0;
+}
+
+static int isp_video_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct fimc_isp *isp = video_drvdata(file);
+ int ret;
+
+ ret = vb2_ioctl_reqbufs(file, priv, rb);
+ if (ret < 0)
+ return ret;
+
+ if (rb->count && rb->count < FIMC_ISP_REQ_BUFS_MIN) {
+ rb->count = 0;
+ vb2_ioctl_reqbufs(file, priv, rb);
+ ret = -ENOMEM;
+ }
+
+ isp->video_capture.reqbufs_count = rb->count;
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
+ .vidioc_querycap = isp_video_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = isp_video_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = isp_video_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = isp_video_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = isp_video_g_fmt_mplane,
+ .vidioc_reqbufs = isp_video_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = isp_video_streamon,
+ .vidioc_streamoff = isp_video_streamoff,
+};
+
+int fimc_isp_video_device_register(struct fimc_isp *isp,
+ struct v4l2_device *v4l2_dev,
+ enum v4l2_buf_type type)
+{
+ struct vb2_queue *q = &isp->video_capture.vb_queue;
+ struct fimc_is_video *iv;
+ struct video_device *vdev;
+ int ret;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ iv = &isp->video_capture;
+ else
+ return -ENOSYS;
+
+ mutex_init(&isp->video_lock);
+ INIT_LIST_HEAD(&iv->pending_buf_q);
+ INIT_LIST_HEAD(&iv->active_buf_q);
+ iv->format = fimc_isp_find_format(NULL, NULL, 0);
+ iv->pixfmt.width = IS_DEFAULT_WIDTH;
+ iv->pixfmt.height = IS_DEFAULT_HEIGHT;
+ iv->pixfmt.pixelformat = iv->format->fourcc;
+ iv->pixfmt.colorspace = V4L2_COLORSPACE_SRGB;
+ iv->reqbufs_count = 0;
+
+ memset(q, 0, sizeof(*q));
+ q->type = type;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &isp_video_capture_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct isp_video_buf);
+ q->drv_priv = isp;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &isp->video_lock;
+ q->dev = &isp->pdev->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ return ret;
+
+ vdev = &iv->ve.vdev;
+ memset(vdev, 0, sizeof(*vdev));
+ snprintf(vdev->name, sizeof(vdev->name), "fimc-is-isp.%s",
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ?
+ "capture" : "output");
+ vdev->queue = q;
+ vdev->fops = &isp_video_fops;
+ vdev->ioctl_ops = &isp_video_ioctl_ops;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->minor = -1;
+ vdev->release = video_device_release_empty;
+ vdev->lock = &isp->video_lock;
+
+ iv->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, &iv->pad);
+ if (ret < 0)
+ return ret;
+
+ video_set_drvdata(vdev, isp);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ media_entity_cleanup(&vdev->entity);
+ return ret;
+ }
+
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
+
+ return 0;
+}
+
+void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+ enum v4l2_buf_type type)
+{
+ struct exynos_video_entity *ve;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ve = &isp->video_capture.ve;
+ else
+ return;
+
+ mutex_lock(&isp->video_lock);
+
+ if (video_is_registered(&ve->vdev)) {
+ video_unregister_device(&ve->vdev);
+ media_entity_cleanup(&ve->vdev.entity);
+ ve->pipe = NULL;
+ }
+
+ mutex_unlock(&isp->video_lock);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.h b/drivers/media/platform/exynos4-is/fimc-isp-video.h
new file mode 100644
index 000000000..67ef85249
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.h
@@ -0,0 +1,44 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_ISP_VIDEO__
+#define FIMC_ISP_VIDEO__
+
+#include <media/videobuf2-v4l2.h>
+#include "fimc-isp.h"
+
+#ifdef CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE
+int fimc_isp_video_device_register(struct fimc_isp *isp,
+ struct v4l2_device *v4l2_dev,
+ enum v4l2_buf_type type);
+
+void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+ enum v4l2_buf_type type);
+
+void fimc_isp_video_irq_handler(struct fimc_is *is);
+#else
+static inline void fimc_isp_video_irq_handler(struct fimc_is *is)
+{
+}
+
+static inline int fimc_isp_video_device_register(struct fimc_isp *isp,
+ struct v4l2_device *v4l2_dev,
+ enum v4l2_buf_type type)
+{
+ return 0;
+}
+
+static inline void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+ enum v4l2_buf_type type)
+{
+}
+#endif /* !CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE */
+
+#endif /* FIMC_ISP_VIDEO__ */
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
new file mode 100644
index 000000000..1dbebdc1c
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -0,0 +1,789 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+
+#include "media-dev.h"
+#include "fimc-isp-video.h"
+#include "fimc-is-command.h"
+#include "fimc-is-param.h"
+#include "fimc-is-regs.h"
+#include "fimc-is.h"
+
+int fimc_isp_debug;
+module_param_named(debug_isp, fimc_isp_debug, int, S_IRUGO | S_IWUSR);
+
+static const struct fimc_fmt fimc_isp_formats[FIMC_ISP_NUM_FORMATS] = {
+ {
+ .name = "RAW8 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .depth = { 8 },
+ .color = FIMC_FMT_RAW8,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ }, {
+ .name = "RAW10 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .depth = { 10 },
+ .color = FIMC_FMT_RAW10,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ }, {
+ .name = "RAW12 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .depth = { 12 },
+ .color = FIMC_FMT_RAW12,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ },
+};
+
+/**
+ * fimc_isp_find_format - lookup color format by fourcc or media bus code
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @index: index to the fimc_isp_formats array, ignored if negative
+ */
+const struct fimc_fmt *fimc_isp_find_format(const u32 *pixelformat,
+ const u32 *mbus_code, int index)
+{
+ const struct fimc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(fimc_isp_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_isp_formats); ++i) {
+ fmt = &fimc_isp_formats[i];
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+void fimc_isp_irq_handler(struct fimc_is *is)
+{
+ is->i2h_cmd.args[0] = mcuctl_read(is, MCUCTL_REG_ISSR(20));
+ is->i2h_cmd.args[1] = mcuctl_read(is, MCUCTL_REG_ISSR(21));
+
+ fimc_is_fw_clear_irq1(is, FIMC_IS_INT_FRAME_DONE_ISP);
+ fimc_isp_video_irq_handler(is);
+
+ wake_up(&is->irq_queue);
+}
+
+/* Capture subdev media entity operations */
+static int fimc_is_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ return 0;
+}
+
+static const struct media_entity_operations fimc_is_subdev_media_ops = {
+ .link_setup = fimc_is_link_setup,
+};
+
+static int fimc_is_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_isp_find_format(NULL, NULL, code->index);
+ if (!fmt)
+ return -EINVAL;
+ code->code = fmt->mbus_code;
+ return 0;
+}
+
+static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *mf = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ return 0;
+ }
+
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+
+ mutex_lock(&isp->subdev_lock);
+
+ if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
+ /* ISP OTF input image format */
+ *mf = isp->sink_fmt;
+ } else {
+ /* ISP OTF output image format */
+ *mf = isp->src_fmt;
+
+ if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) {
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->code = MEDIA_BUS_FMT_YUV10_1X30;
+ }
+ }
+
+ mutex_unlock(&isp->subdev_lock);
+
+ isp_dbg(1, sd, "%s: pad%d: fmt: 0x%x, %dx%d\n", __func__,
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ return 0;
+}
+
+static void __isp_subdev_try_format(struct fimc_isp *isp,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct v4l2_mbus_framefmt *format;
+
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+
+ if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
+ v4l_bound_align_image(&mf->width, FIMC_ISP_SINK_WIDTH_MIN,
+ FIMC_ISP_SINK_WIDTH_MAX, 0,
+ &mf->height, FIMC_ISP_SINK_HEIGHT_MIN,
+ FIMC_ISP_SINK_HEIGHT_MAX, 0, 0);
+ mf->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ } else {
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ format = v4l2_subdev_get_try_format(&isp->subdev, cfg,
+ FIMC_ISP_SD_PAD_SINK);
+ else
+ format = &isp->sink_fmt;
+
+ /* Allow changing format only on sink pad */
+ mf->width = format->width - FIMC_ISP_CAC_MARGIN_WIDTH;
+ mf->height = format->height - FIMC_ISP_CAC_MARGIN_HEIGHT;
+
+ if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) {
+ mf->code = MEDIA_BUS_FMT_YUV10_1X30;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ } else {
+ mf->code = format->code;
+ }
+ }
+}
+
+static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ int ret = 0;
+
+ isp_dbg(1, sd, "%s: pad%d: code: 0x%x, %dx%d\n",
+ __func__, fmt->pad, mf->code, mf->width, mf->height);
+
+ mutex_lock(&isp->subdev_lock);
+ __isp_subdev_try_format(isp, cfg, fmt);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *mf = fmt->format;
+
+ /* Propagate format to the source pads */
+ if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
+ struct v4l2_subdev_format format = *fmt;
+ unsigned int pad;
+
+ for (pad = FIMC_ISP_SD_PAD_SRC_FIFO;
+ pad < FIMC_ISP_SD_PADS_NUM; pad++) {
+ format.pad = pad;
+ __isp_subdev_try_format(isp, cfg, &format);
+ mf = v4l2_subdev_get_try_format(sd, cfg, pad);
+ *mf = format.format;
+ }
+ }
+ } else {
+ if (sd->entity.stream_count == 0) {
+ if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
+ struct v4l2_subdev_format format = *fmt;
+
+ isp->sink_fmt = *mf;
+
+ format.pad = FIMC_ISP_SD_PAD_SRC_DMA;
+ __isp_subdev_try_format(isp, cfg, &format);
+
+ isp->src_fmt = format.format;
+ __is_set_frame_size(is, &isp->src_fmt);
+ } else {
+ isp->src_fmt = *mf;
+ }
+ } else {
+ ret = -EBUSY;
+ }
+ }
+
+ mutex_unlock(&isp->subdev_lock);
+ return ret;
+}
+
+static int fimc_isp_subdev_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ int ret;
+
+ isp_dbg(1, sd, "%s: on: %d\n", __func__, on);
+
+ if (!test_bit(IS_ST_INIT_DONE, &is->state))
+ return -EBUSY;
+
+ fimc_is_mem_barrier();
+
+ if (on) {
+ if (__get_pending_param_count(is)) {
+ ret = fimc_is_itf_s_param(is, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ isp_dbg(1, sd, "changing mode to %d\n", is->config_index);
+
+ ret = fimc_is_itf_mode_change(is);
+ if (ret)
+ return -EINVAL;
+
+ clear_bit(IS_ST_STREAM_ON, &is->state);
+ fimc_is_hw_stream_on(is);
+ ret = fimc_is_wait_event(is, IS_ST_STREAM_ON, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ v4l2_err(sd, "stream on timeout\n");
+ return ret;
+ }
+ } else {
+ clear_bit(IS_ST_STREAM_OFF, &is->state);
+ fimc_is_hw_stream_off(is);
+ ret = fimc_is_wait_event(is, IS_ST_STREAM_OFF, 1,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ v4l2_err(sd, "stream off timeout\n");
+ return ret;
+ }
+ is->setfile.sub_index = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ int ret = 0;
+
+ pr_debug("on: %d\n", on);
+
+ if (on) {
+ ret = pm_runtime_get_sync(&is->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put(&is->pdev->dev);
+ return ret;
+ }
+ set_bit(IS_ST_PWR_ON, &is->state);
+
+ ret = fimc_is_start_firmware(is);
+ if (ret < 0) {
+ v4l2_err(sd, "firmware booting failed\n");
+ pm_runtime_put(&is->pdev->dev);
+ return ret;
+ }
+ set_bit(IS_ST_PWR_SUBIP_ON, &is->state);
+
+ ret = fimc_is_hw_initialize(is);
+ } else {
+ /* Close sensor */
+ if (!test_bit(IS_ST_PWR_ON, &is->state)) {
+ fimc_is_hw_close_sensor(is, 0);
+
+ ret = fimc_is_wait_event(is, IS_ST_OPEN_SENSOR, 0,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ v4l2_err(sd, "sensor close timeout\n");
+ return ret;
+ }
+ }
+
+ /* SUB IP power off */
+ if (test_bit(IS_ST_PWR_SUBIP_ON, &is->state)) {
+ fimc_is_hw_subip_power_off(is);
+ ret = fimc_is_wait_event(is, IS_ST_PWR_SUBIP_ON, 0,
+ FIMC_IS_CONFIG_TIMEOUT);
+ if (ret < 0) {
+ v4l2_err(sd, "sub-IP power off timeout\n");
+ return ret;
+ }
+ }
+
+ fimc_is_cpu_set_power(is, 0);
+ pm_runtime_put_sync(&is->pdev->dev);
+
+ clear_bit(IS_ST_PWR_ON, &is->state);
+ clear_bit(IS_ST_INIT_DONE, &is->state);
+ is->state = 0;
+ is->config[is->config_index].p_region_index[0] = 0;
+ is->config[is->config_index].p_region_index[1] = 0;
+ set_bit(IS_ST_IDLE, &is->state);
+ wmb();
+ }
+
+ return ret;
+}
+
+static int fimc_isp_subdev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_mbus_framefmt fmt = {
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .code = fimc_isp_formats[0].mbus_code,
+ .width = DEFAULT_PREVIEW_STILL_WIDTH + FIMC_ISP_CAC_MARGIN_WIDTH,
+ .height = DEFAULT_PREVIEW_STILL_HEIGHT + FIMC_ISP_CAC_MARGIN_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ };
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SINK);
+ *format = fmt;
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SRC_FIFO);
+ fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ *format = fmt;
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SRC_DMA);
+ *format = fmt;
+
+ return 0;
+}
+
+static int fimc_isp_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+ int ret;
+
+ /* Use pipeline object allocated by the media device. */
+ isp->video_capture.ve.pipe = v4l2_get_subdev_hostdata(sd);
+
+ ret = fimc_isp_video_device_register(isp, sd->v4l2_dev,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret < 0)
+ isp->video_capture.ve.pipe = NULL;
+
+ return ret;
+}
+
+static void fimc_isp_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_isp *isp = v4l2_get_subdevdata(sd);
+
+ fimc_isp_video_device_unregister(isp,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+}
+
+static const struct v4l2_subdev_internal_ops fimc_is_subdev_internal_ops = {
+ .registered = fimc_isp_subdev_registered,
+ .unregistered = fimc_isp_subdev_unregistered,
+ .open = fimc_isp_subdev_open,
+};
+
+static const struct v4l2_subdev_pad_ops fimc_is_subdev_pad_ops = {
+ .enum_mbus_code = fimc_is_subdev_enum_mbus_code,
+ .get_fmt = fimc_isp_subdev_get_fmt,
+ .set_fmt = fimc_isp_subdev_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops fimc_is_subdev_video_ops = {
+ .s_stream = fimc_isp_subdev_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops fimc_is_core_ops = {
+ .s_power = fimc_isp_subdev_s_power,
+};
+
+static const struct v4l2_subdev_ops fimc_is_subdev_ops = {
+ .core = &fimc_is_core_ops,
+ .video = &fimc_is_subdev_video_ops,
+ .pad = &fimc_is_subdev_pad_ops,
+};
+
+static int __ctrl_set_white_balance(struct fimc_is *is, int value)
+{
+ switch (value) {
+ case V4L2_WHITE_BALANCE_AUTO:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_AUTO, 0);
+ break;
+ case V4L2_WHITE_BALANCE_DAYLIGHT:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_ILLUMINATION,
+ ISP_AWB_ILLUMINATION_DAYLIGHT);
+ break;
+ case V4L2_WHITE_BALANCE_CLOUDY:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_ILLUMINATION,
+ ISP_AWB_ILLUMINATION_CLOUDY);
+ break;
+ case V4L2_WHITE_BALANCE_INCANDESCENT:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_ILLUMINATION,
+ ISP_AWB_ILLUMINATION_TUNGSTEN);
+ break;
+ case V4L2_WHITE_BALANCE_FLUORESCENT:
+ __is_set_isp_awb(is, ISP_AWB_COMMAND_ILLUMINATION,
+ ISP_AWB_ILLUMINATION_FLUORESCENT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __ctrl_set_aewb_lock(struct fimc_is *is,
+ struct v4l2_ctrl *ctrl)
+{
+ bool awb_lock = ctrl->val & V4L2_LOCK_WHITE_BALANCE;
+ bool ae_lock = ctrl->val & V4L2_LOCK_EXPOSURE;
+ struct isp_param *isp = &is->is_p_region->parameter.isp;
+ int cmd, ret;
+
+ cmd = ae_lock ? ISP_AA_COMMAND_STOP : ISP_AA_COMMAND_START;
+ isp->aa.cmd = cmd;
+ isp->aa.target = ISP_AA_TARGET_AE;
+ fimc_is_set_param_bit(is, PARAM_ISP_AA);
+ is->af.ae_lock_state = ae_lock;
+ wmb();
+
+ ret = fimc_is_itf_s_param(is, false);
+ if (ret < 0)
+ return ret;
+
+ cmd = awb_lock ? ISP_AA_COMMAND_STOP : ISP_AA_COMMAND_START;
+ isp->aa.cmd = cmd;
+ isp->aa.target = ISP_AA_TARGET_AE;
+ fimc_is_set_param_bit(is, PARAM_ISP_AA);
+ is->af.awb_lock_state = awb_lock;
+ wmb();
+
+ return fimc_is_itf_s_param(is, false);
+}
+
+/* Supported manual ISO values */
+static const s64 iso_qmenu[] = {
+ 50, 100, 200, 400, 800,
+};
+
+static int __ctrl_set_iso(struct fimc_is *is, int value)
+{
+ unsigned int idx, iso;
+
+ if (value == V4L2_ISO_SENSITIVITY_AUTO) {
+ __is_set_isp_iso(is, ISP_ISO_COMMAND_AUTO, 0);
+ return 0;
+ }
+ idx = is->isp.ctrls.iso->val;
+ if (idx >= ARRAY_SIZE(iso_qmenu))
+ return -EINVAL;
+
+ iso = iso_qmenu[idx];
+ __is_set_isp_iso(is, ISP_ISO_COMMAND_MANUAL, iso);
+ return 0;
+}
+
+static int __ctrl_set_metering(struct fimc_is *is, unsigned int value)
+{
+ unsigned int val;
+
+ switch (value) {
+ case V4L2_EXPOSURE_METERING_AVERAGE:
+ val = ISP_METERING_COMMAND_AVERAGE;
+ break;
+ case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
+ val = ISP_METERING_COMMAND_CENTER;
+ break;
+ case V4L2_EXPOSURE_METERING_SPOT:
+ val = ISP_METERING_COMMAND_SPOT;
+ break;
+ case V4L2_EXPOSURE_METERING_MATRIX:
+ val = ISP_METERING_COMMAND_MATRIX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ __is_set_isp_metering(is, IS_METERING_CONFIG_CMD, val);
+ return 0;
+}
+
+static int __ctrl_set_afc(struct fimc_is *is, int value)
+{
+ switch (value) {
+ case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_DISABLE, 0);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_MANUAL, 50);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_MANUAL, 60);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_AUTO:
+ __is_set_isp_afc(is, ISP_AFC_COMMAND_AUTO, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __ctrl_set_image_effect(struct fimc_is *is, int value)
+{
+ static const u8 effects[][2] = {
+ { V4L2_COLORFX_NONE, ISP_IMAGE_EFFECT_DISABLE },
+ { V4L2_COLORFX_BW, ISP_IMAGE_EFFECT_MONOCHROME },
+ { V4L2_COLORFX_SEPIA, ISP_IMAGE_EFFECT_SEPIA },
+ { V4L2_COLORFX_NEGATIVE, ISP_IMAGE_EFFECT_NEGATIVE_MONO },
+ { 16 /* TODO */, ISP_IMAGE_EFFECT_NEGATIVE_COLOR },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(effects); i++) {
+ if (effects[i][0] != value)
+ continue;
+
+ __is_set_isp_effect(is, effects[i][1]);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int fimc_is_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fimc_isp *isp = ctrl_to_fimc_isp(ctrl);
+ struct fimc_is *is = fimc_isp_to_is(isp);
+ bool set_param = true;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_CONTRAST,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_SATURATION:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_SATURATION,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_SHARPNESS,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_EXPOSURE,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_BRIGHTNESS:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_HUE:
+ __is_set_isp_adjust(is, ISP_ADJUST_COMMAND_MANUAL_HUE,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_METERING:
+ ret = __ctrl_set_metering(is, ctrl->val);
+ break;
+
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ ret = __ctrl_set_white_balance(is, ctrl->val);
+ break;
+
+ case V4L2_CID_3A_LOCK:
+ ret = __ctrl_set_aewb_lock(is, ctrl);
+ set_param = false;
+ break;
+
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ ret = __ctrl_set_iso(is, ctrl->val);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ ret = __ctrl_set_afc(is, ctrl->val);
+ break;
+
+ case V4L2_CID_COLORFX:
+ __ctrl_set_image_effect(is, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0) {
+ v4l2_err(&isp->subdev, "Failed to set control: %s (%d)\n",
+ ctrl->name, ctrl->val);
+ return ret;
+ }
+
+ if (set_param && test_bit(IS_ST_STREAM_ON, &is->state))
+ return fimc_is_itf_s_param(is, true);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fimc_isp_ctrl_ops = {
+ .s_ctrl = fimc_is_s_ctrl,
+};
+
+static void __isp_subdev_set_default_format(struct fimc_isp *isp)
+{
+ struct fimc_is *is = fimc_isp_to_is(isp);
+
+ isp->sink_fmt.width = DEFAULT_PREVIEW_STILL_WIDTH +
+ FIMC_ISP_CAC_MARGIN_WIDTH;
+ isp->sink_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT +
+ FIMC_ISP_CAC_MARGIN_HEIGHT;
+ isp->sink_fmt.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ isp->src_fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
+ isp->src_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
+ isp->src_fmt.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ __is_set_frame_size(is, &isp->src_fmt);
+}
+
+int fimc_isp_subdev_create(struct fimc_isp *isp)
+{
+ const struct v4l2_ctrl_ops *ops = &fimc_isp_ctrl_ops;
+ struct v4l2_ctrl_handler *handler = &isp->ctrls.handler;
+ struct v4l2_subdev *sd = &isp->subdev;
+ struct fimc_isp_ctrls *ctrls = &isp->ctrls;
+ int ret;
+
+ mutex_init(&isp->subdev_lock);
+
+ v4l2_subdev_init(sd, &fimc_is_subdev_ops);
+
+ sd->owner = THIS_MODULE;
+ sd->grp_id = GRP_ID_FIMC_IS;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC-IS-ISP");
+
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ isp->subdev_pads[FIMC_ISP_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ isp->subdev_pads[FIMC_ISP_SD_PAD_SRC_FIFO].flags = MEDIA_PAD_FL_SOURCE;
+ isp->subdev_pads[FIMC_ISP_SD_PAD_SRC_DMA].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, FIMC_ISP_SD_PADS_NUM,
+ isp->subdev_pads);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 20);
+
+ ctrls->saturation = v4l2_ctrl_new_std(handler, ops, V4L2_CID_SATURATION,
+ -2, 2, 1, 0);
+ ctrls->brightness = v4l2_ctrl_new_std(handler, ops, V4L2_CID_BRIGHTNESS,
+ -4, 4, 1, 0);
+ ctrls->contrast = v4l2_ctrl_new_std(handler, ops, V4L2_CID_CONTRAST,
+ -2, 2, 1, 0);
+ ctrls->sharpness = v4l2_ctrl_new_std(handler, ops, V4L2_CID_SHARPNESS,
+ -2, 2, 1, 0);
+ ctrls->hue = v4l2_ctrl_new_std(handler, ops, V4L2_CID_HUE,
+ -2, 2, 1, 0);
+
+ ctrls->auto_wb = v4l2_ctrl_new_std_menu(handler, ops,
+ V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ 8, ~0x14e, V4L2_WHITE_BALANCE_AUTO);
+
+ ctrls->exposure = v4l2_ctrl_new_std(handler, ops,
+ V4L2_CID_EXPOSURE_ABSOLUTE,
+ -4, 4, 1, 0);
+
+ ctrls->exp_metering = v4l2_ctrl_new_std_menu(handler, ops,
+ V4L2_CID_EXPOSURE_METERING, 3,
+ ~0xf, V4L2_EXPOSURE_METERING_AVERAGE);
+
+ v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
+ /* ISO sensitivity */
+ ctrls->auto_iso = v4l2_ctrl_new_std_menu(handler, ops,
+ V4L2_CID_ISO_SENSITIVITY_AUTO, 1, 0,
+ V4L2_ISO_SENSITIVITY_AUTO);
+
+ ctrls->iso = v4l2_ctrl_new_int_menu(handler, ops,
+ V4L2_CID_ISO_SENSITIVITY, ARRAY_SIZE(iso_qmenu) - 1,
+ ARRAY_SIZE(iso_qmenu)/2 - 1, iso_qmenu);
+
+ ctrls->aewb_lock = v4l2_ctrl_new_std(handler, ops,
+ V4L2_CID_3A_LOCK, 0, 0x3, 0, 0);
+
+ /* TODO: Add support for NEGATIVE_COLOR option */
+ ctrls->colorfx = v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_COLORFX,
+ V4L2_COLORFX_SET_CBCR + 1, ~0x1000f, V4L2_COLORFX_NONE);
+
+ if (handler->error) {
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_iso,
+ V4L2_ISO_SENSITIVITY_MANUAL, false);
+
+ sd->ctrl_handler = handler;
+ sd->internal_ops = &fimc_is_subdev_internal_ops;
+ sd->entity.ops = &fimc_is_subdev_media_ops;
+ v4l2_set_subdevdata(sd, isp);
+
+ __isp_subdev_set_default_format(isp);
+
+ return 0;
+}
+
+void fimc_isp_subdev_destroy(struct fimc_isp *isp)
+{
+ struct v4l2_subdev *sd = &isp->subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&isp->ctrls.handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.h b/drivers/media/platform/exynos4-is/fimc-isp.h
new file mode 100644
index 000000000..3cdd52491
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-isp.h
@@ -0,0 +1,193 @@
+/*
+ * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Younghwan Joo <yhwan.joo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FIMC_ISP_H_
+#define FIMC_ISP_H_
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/drv-intf/exynos-fimc.h>
+
+extern int fimc_isp_debug;
+
+#define isp_dbg(level, dev, fmt, arg...) \
+ v4l2_dbg(level, fimc_isp_debug, dev, fmt, ## arg)
+
+/* FIXME: revisit these constraints */
+#define FIMC_ISP_SINK_WIDTH_MIN (16 + 8)
+#define FIMC_ISP_SINK_HEIGHT_MIN (12 + 8)
+#define FIMC_ISP_SOURCE_WIDTH_MIN 8
+#define FIMC_ISP_SOURCE_HEIGHT_MIN 8
+#define FIMC_ISP_CAC_MARGIN_WIDTH 16
+#define FIMC_ISP_CAC_MARGIN_HEIGHT 12
+
+#define FIMC_ISP_SINK_WIDTH_MAX (4000 - 16)
+#define FIMC_ISP_SINK_HEIGHT_MAX (4000 + 12)
+#define FIMC_ISP_SOURCE_WIDTH_MAX 4000
+#define FIMC_ISP_SOURCE_HEIGHT_MAX 4000
+
+#define FIMC_ISP_NUM_FORMATS 3
+#define FIMC_ISP_REQ_BUFS_MIN 2
+#define FIMC_ISP_REQ_BUFS_MAX 32
+
+#define FIMC_ISP_SD_PAD_SINK 0
+#define FIMC_ISP_SD_PAD_SRC_FIFO 1
+#define FIMC_ISP_SD_PAD_SRC_DMA 2
+#define FIMC_ISP_SD_PADS_NUM 3
+#define FIMC_ISP_MAX_PLANES 1
+
+/**
+ * struct fimc_isp_frame - source/target frame properties
+ * @width: full image width
+ * @height: full image height
+ * @rect: crop/composition rectangle
+ */
+struct fimc_isp_frame {
+ u16 width;
+ u16 height;
+ struct v4l2_rect rect;
+};
+
+struct fimc_isp_ctrls {
+ struct v4l2_ctrl_handler handler;
+
+ /* Auto white balance */
+ struct v4l2_ctrl *auto_wb;
+ /* Auto ISO control cluster */
+ struct {
+ struct v4l2_ctrl *auto_iso;
+ struct v4l2_ctrl *iso;
+ };
+ /* Adjust - contrast */
+ struct v4l2_ctrl *contrast;
+ /* Adjust - saturation */
+ struct v4l2_ctrl *saturation;
+ /* Adjust - sharpness */
+ struct v4l2_ctrl *sharpness;
+ /* Adjust - brightness */
+ struct v4l2_ctrl *brightness;
+ /* Adjust - hue */
+ struct v4l2_ctrl *hue;
+
+ /* Auto/manual exposure */
+ struct v4l2_ctrl *auto_exp;
+ /* Manual exposure value */
+ struct v4l2_ctrl *exposure;
+ /* AE/AWB lock/unlock */
+ struct v4l2_ctrl *aewb_lock;
+ /* Exposure metering mode */
+ struct v4l2_ctrl *exp_metering;
+ /* AFC */
+ struct v4l2_ctrl *afc;
+ /* ISP image effect */
+ struct v4l2_ctrl *colorfx;
+};
+
+struct isp_video_buf {
+ struct vb2_v4l2_buffer vb;
+ dma_addr_t dma_addr[FIMC_ISP_MAX_PLANES];
+ unsigned int index;
+};
+
+#define to_isp_video_buf(_b) container_of(_b, struct isp_video_buf, vb)
+
+#define FIMC_ISP_MAX_BUFS 4
+
+/**
+ * struct fimc_is_video - fimc-is video device structure
+ * @vdev: video_device structure
+ * @type: video device type (CAPTURE/OUTPUT)
+ * @pad: video device media (sink) pad
+ * @pending_buf_q: pending buffers queue head
+ * @active_buf_q: a queue head of buffers scheduled in hardware
+ * @vb_queue: vb2 buffer queue
+ * @active_buf_count: number of video buffers scheduled in hardware
+ * @frame_count: counter of frames dequeued to user space
+ * @reqbufs_count: number of buffers requested with REQBUFS ioctl
+ * @format: current pixel format
+ */
+struct fimc_is_video {
+ struct exynos_video_entity ve;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct vb2_queue vb_queue;
+ unsigned int reqbufs_count;
+ unsigned int buf_count;
+ unsigned int buf_mask;
+ unsigned int frame_count;
+ int streaming;
+ struct isp_video_buf *buffers[FIMC_ISP_MAX_BUFS];
+ const struct fimc_fmt *format;
+ struct v4l2_pix_format_mplane pixfmt;
+};
+
+/* struct fimc_isp:state bit definitions */
+#define ST_ISP_VID_CAP_BUF_PREP 0
+#define ST_ISP_VID_CAP_STREAMING 1
+
+/**
+ * struct fimc_isp - FIMC-IS ISP data structure
+ * @pdev: pointer to FIMC-IS platform device
+ * @subdev: ISP v4l2_subdev
+ * @subdev_pads: the ISP subdev media pads
+ * @test_pattern: test pattern controls
+ * @ctrls: v4l2 controls structure
+ * @video_lock: mutex serializing video device and the subdev operations
+ * @cac_margin_x: horizontal CAC margin in pixels
+ * @cac_margin_y: vertical CAC margin in pixels
+ * @state: driver state flags
+ * @video_capture: the ISP block video capture device
+ */
+struct fimc_isp {
+ struct platform_device *pdev;
+ struct v4l2_subdev subdev;
+ struct media_pad subdev_pads[FIMC_ISP_SD_PADS_NUM];
+ struct v4l2_mbus_framefmt src_fmt;
+ struct v4l2_mbus_framefmt sink_fmt;
+ struct v4l2_ctrl *test_pattern;
+ struct fimc_isp_ctrls ctrls;
+
+ struct mutex video_lock;
+ struct mutex subdev_lock;
+
+ unsigned int cac_margin_x;
+ unsigned int cac_margin_y;
+
+ unsigned long state;
+
+ struct fimc_is_video video_capture;
+};
+
+#define ctrl_to_fimc_isp(_ctrl) \
+ container_of(ctrl->handler, struct fimc_isp, ctrls.handler)
+
+struct fimc_is;
+
+int fimc_isp_subdev_create(struct fimc_isp *isp);
+void fimc_isp_subdev_destroy(struct fimc_isp *isp);
+void fimc_isp_irq_handler(struct fimc_is *is);
+int fimc_is_create_controls(struct fimc_isp *isp);
+int fimc_is_delete_controls(struct fimc_isp *isp);
+const struct fimc_fmt *fimc_isp_find_format(const u32 *pixelformat,
+ const u32 *mbus_code, int index);
+#endif /* FIMC_ISP_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
new file mode 100644
index 000000000..16565a0b4
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
@@ -0,0 +1,349 @@
+/*
+ * Register interface file for EXYNOS FIMC-LITE (camera interface) driver
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <media/drv-intf/exynos-fimc.h>
+
+#include "fimc-lite-reg.h"
+#include "fimc-lite.h"
+#include "fimc-core.h"
+
+#define FLITE_RESET_TIMEOUT 50 /* in ms */
+
+void flite_hw_reset(struct fimc_lite *dev)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(FLITE_RESET_TIMEOUT);
+ u32 cfg;
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg |= FLITE_REG_CIGCTRL_SWRST_REQ;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ while (time_is_after_jiffies(end)) {
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ if (cfg & FLITE_REG_CIGCTRL_SWRST_RDY)
+ break;
+ usleep_range(1000, 5000);
+ }
+
+ cfg |= FLITE_REG_CIGCTRL_SWRST;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_clear_pending_irq(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS);
+ cfg &= ~FLITE_REG_CISTATUS_IRQ_CAM;
+ writel(cfg, dev->regs + FLITE_REG_CISTATUS);
+}
+
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev)
+{
+ u32 intsrc = readl(dev->regs + FLITE_REG_CISTATUS);
+ return intsrc & FLITE_REG_CISTATUS_IRQ_MASK;
+}
+
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev)
+{
+
+ u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS2);
+ cfg &= ~FLITE_REG_CISTATUS2_LASTCAPEND;
+ writel(cfg, dev->regs + FLITE_REG_CISTATUS2);
+}
+
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev)
+{
+ u32 cfg, intsrc;
+
+ /* Select interrupts to be enabled for each output mode */
+ if (atomic_read(&dev->out_path) == FIMC_IO_DMA) {
+ intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+ FLITE_REG_CIGCTRL_IRQ_LASTEN |
+ FLITE_REG_CIGCTRL_IRQ_STARTEN |
+ FLITE_REG_CIGCTRL_IRQ_ENDEN;
+ } else {
+ /* An output to the FIMC-IS */
+ intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+ FLITE_REG_CIGCTRL_IRQ_LASTEN;
+ }
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg |= FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK;
+ cfg &= ~intsrc;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_capture_start(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+ cfg |= FLITE_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+void flite_hw_capture_stop(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+ cfg &= ~FLITE_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+/*
+ * Test pattern (color bars) enable/disable. External sensor
+ * pixel clock must be active for the test pattern to work.
+ */
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ if (on)
+ cfg |= FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+ else
+ cfg &= ~FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+static const u32 src_pixfmt_map[8][3] = {
+ { MEDIA_BUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { MEDIA_BUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { MEDIA_BUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { MEDIA_BUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 0, FLITE_REG_CIGCTRL_RAW8 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 0, FLITE_REG_CIGCTRL_RAW10 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 0, FLITE_REG_CIGCTRL_RAW12 },
+ { MEDIA_BUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) },
+};
+
+/* Set camera input pixel format and resolution */
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
+{
+ u32 pixelcode = f->fmt->mbus_code;
+ int i = ARRAY_SIZE(src_pixfmt_map);
+ u32 cfg;
+
+ while (--i) {
+ if (src_pixfmt_map[i][0] == pixelcode)
+ break;
+ }
+
+ if (i == 0 && src_pixfmt_map[i][0] != pixelcode) {
+ v4l2_err(&dev->ve.vdev,
+ "Unsupported pixel code, falling back to %#08x\n",
+ src_pixfmt_map[i][0]);
+ }
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg &= ~FLITE_REG_CIGCTRL_FMT_MASK;
+ cfg |= src_pixfmt_map[i][2];
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ cfg = readl(dev->regs + FLITE_REG_CISRCSIZE);
+ cfg &= ~(FLITE_REG_CISRCSIZE_ORDER422_MASK |
+ FLITE_REG_CISRCSIZE_SIZE_CAM_MASK);
+ cfg |= (f->f_width << 16) | f->f_height;
+ cfg |= src_pixfmt_map[i][1];
+ writel(cfg, dev->regs + FLITE_REG_CISRCSIZE);
+}
+
+/* Set the camera host input window offsets (cropping) */
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f)
+{
+ u32 hoff2, voff2;
+ u32 cfg;
+
+ cfg = readl(dev->regs + FLITE_REG_CIWDOFST);
+ cfg &= ~FLITE_REG_CIWDOFST_OFST_MASK;
+ cfg |= (f->rect.left << 16) | f->rect.top;
+ cfg |= FLITE_REG_CIWDOFST_WINOFSEN;
+ writel(cfg, dev->regs + FLITE_REG_CIWDOFST);
+
+ hoff2 = f->f_width - f->rect.width - f->rect.left;
+ voff2 = f->f_height - f->rect.height - f->rect.top;
+
+ cfg = (hoff2 << 16) | voff2;
+ writel(cfg, dev->regs + FLITE_REG_CIWDOFST2);
+}
+
+/* Select camera port (A, B) */
+static void flite_hw_set_camera_port(struct fimc_lite *dev, int id)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGENERAL);
+ if (id == 0)
+ cfg &= ~FLITE_REG_CIGENERAL_CAM_B;
+ else
+ cfg |= FLITE_REG_CIGENERAL_CAM_B;
+ writel(cfg, dev->regs + FLITE_REG_CIGENERAL);
+}
+
+/* Select serial or parallel bus, camera port (A,B) and set signals polarity */
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+ struct fimc_source_info *si)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ unsigned int flags = si->flags;
+
+ if (si->sensor_bus_type != FIMC_BUS_TYPE_MIPI_CSI2) {
+ cfg &= ~(FLITE_REG_CIGCTRL_SELCAM_MIPI |
+ FLITE_REG_CIGCTRL_INVPOLPCLK |
+ FLITE_REG_CIGCTRL_INVPOLVSYNC |
+ FLITE_REG_CIGCTRL_INVPOLHREF);
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLPCLK;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLVSYNC;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLHREF;
+ } else {
+ cfg |= FLITE_REG_CIGCTRL_SELCAM_MIPI;
+ }
+
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ flite_hw_set_camera_port(dev, si->mux_id);
+}
+
+static void flite_hw_set_pack12(struct fimc_lite *dev, int on)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
+
+ cfg &= ~FLITE_REG_CIODMAFMT_PACK12;
+
+ if (on)
+ cfg |= FLITE_REG_CIODMAFMT_PACK12;
+
+ writel(cfg, dev->regs + FLITE_REG_CIODMAFMT);
+}
+
+static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
+{
+ static const u32 pixcode[4][2] = {
+ { MEDIA_BUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
+ { MEDIA_BUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB },
+ { MEDIA_BUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY },
+ { MEDIA_BUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
+ };
+ u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
+ int i = ARRAY_SIZE(pixcode);
+
+ while (--i)
+ if (pixcode[i][0] == f->fmt->mbus_code)
+ break;
+ cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
+ writel(cfg | pixcode[i][1], dev->regs + FLITE_REG_CIODMAFMT);
+}
+
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f)
+{
+ u32 cfg;
+
+ /* Maximum output pixel size */
+ cfg = readl(dev->regs + FLITE_REG_CIOCAN);
+ cfg &= ~FLITE_REG_CIOCAN_MASK;
+ cfg |= (f->f_height << 16) | f->f_width;
+ writel(cfg, dev->regs + FLITE_REG_CIOCAN);
+
+ /* DMA offsets */
+ cfg = readl(dev->regs + FLITE_REG_CIOOFF);
+ cfg &= ~FLITE_REG_CIOOFF_MASK;
+ cfg |= (f->rect.top << 16) | f->rect.left;
+ writel(cfg, dev->regs + FLITE_REG_CIOOFF);
+}
+
+void flite_hw_set_dma_buffer(struct fimc_lite *dev, struct flite_buffer *buf)
+{
+ unsigned int index;
+ u32 cfg;
+
+ if (dev->dd->max_dma_bufs == 1)
+ index = 0;
+ else
+ index = buf->index;
+
+ if (index == 0)
+ writel(buf->paddr, dev->regs + FLITE_REG_CIOSA);
+ else
+ writel(buf->paddr, dev->regs + FLITE_REG_CIOSAN(index - 1));
+
+ cfg = readl(dev->regs + FLITE_REG_CIFCNTSEQ);
+ cfg |= BIT(index);
+ writel(cfg, dev->regs + FLITE_REG_CIFCNTSEQ);
+}
+
+void flite_hw_mask_dma_buffer(struct fimc_lite *dev, u32 index)
+{
+ u32 cfg;
+
+ if (dev->dd->max_dma_bufs == 1)
+ index = 0;
+
+ cfg = readl(dev->regs + FLITE_REG_CIFCNTSEQ);
+ cfg &= ~BIT(index);
+ writel(cfg, dev->regs + FLITE_REG_CIFCNTSEQ);
+}
+
+/* Enable/disable output DMA, set output pixel size and offsets (composition) */
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+ bool enable)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+
+ if (!enable) {
+ cfg |= FLITE_REG_CIGCTRL_ODMA_DISABLE;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+ return;
+ }
+
+ cfg &= ~FLITE_REG_CIGCTRL_ODMA_DISABLE;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ flite_hw_set_out_order(dev, f);
+ flite_hw_set_dma_window(dev, f);
+ flite_hw_set_pack12(dev, 0);
+}
+
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x00, "CISRCSIZE" },
+ { 0x04, "CIGCTRL" },
+ { 0x08, "CIIMGCPT" },
+ { 0x0c, "CICPTSEQ" },
+ { 0x10, "CIWDOFST" },
+ { 0x14, "CIWDOFST2" },
+ { 0x18, "CIODMAFMT" },
+ { 0x20, "CIOCAN" },
+ { 0x24, "CIOOFF" },
+ { 0x30, "CIOSA" },
+ { 0x40, "CISTATUS" },
+ { 0x44, "CISTATUS2" },
+ { 0xf0, "CITHOLD" },
+ { 0xfc, "CIGENERAL" },
+ };
+ u32 i;
+
+ v4l2_info(&dev->subdev, "--- %s ---\n", label);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = readl(dev->regs + registers[i].offset);
+ v4l2_info(&dev->subdev, "%9s: 0x%08x\n",
+ registers[i].name, cfg);
+ }
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.h b/drivers/media/platform/exynos4-is/fimc-lite-reg.h
new file mode 100644
index 000000000..10a7d7bbc
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_REG_H_
+#define FIMC_LITE_REG_H_
+
+#include "fimc-lite.h"
+
+/* Camera Source size */
+#define FLITE_REG_CISRCSIZE 0x00
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR (0 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB (1 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY (2 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY (3 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_MASK (0x3 << 14)
+#define FLITE_REG_CISRCSIZE_SIZE_CAM_MASK (0x3fff << 16 | 0x3fff)
+
+/* Global control */
+#define FLITE_REG_CIGCTRL 0x04
+#define FLITE_REG_CIGCTRL_YUV422_1P (0x1e << 24)
+#define FLITE_REG_CIGCTRL_RAW8 (0x2a << 24)
+#define FLITE_REG_CIGCTRL_RAW10 (0x2b << 24)
+#define FLITE_REG_CIGCTRL_RAW12 (0x2c << 24)
+#define FLITE_REG_CIGCTRL_RAW14 (0x2d << 24)
+/* User defined formats. x = 0...15 */
+#define FLITE_REG_CIGCTRL_USER(x) ((0x30 + x - 1) << 24)
+#define FLITE_REG_CIGCTRL_FMT_MASK (0x3f << 24)
+#define FLITE_REG_CIGCTRL_SHADOWMASK_DISABLE (1 << 21)
+#define FLITE_REG_CIGCTRL_ODMA_DISABLE (1 << 20)
+#define FLITE_REG_CIGCTRL_SWRST_REQ (1 << 19)
+#define FLITE_REG_CIGCTRL_SWRST_RDY (1 << 18)
+#define FLITE_REG_CIGCTRL_SWRST (1 << 17)
+#define FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR (1 << 15)
+#define FLITE_REG_CIGCTRL_INVPOLPCLK (1 << 14)
+#define FLITE_REG_CIGCTRL_INVPOLVSYNC (1 << 13)
+#define FLITE_REG_CIGCTRL_INVPOLHREF (1 << 12)
+/* Interrupts mask bits (1 disables an interrupt) */
+#define FLITE_REG_CIGCTRL_IRQ_LASTEN (1 << 8)
+#define FLITE_REG_CIGCTRL_IRQ_ENDEN (1 << 7)
+#define FLITE_REG_CIGCTRL_IRQ_STARTEN (1 << 6)
+#define FLITE_REG_CIGCTRL_IRQ_OVFEN (1 << 5)
+#define FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK (0xf << 5)
+#define FLITE_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
+
+/* Image Capture Enable */
+#define FLITE_REG_CIIMGCPT 0x08
+#define FLITE_REG_CIIMGCPT_IMGCPTEN (1 << 31)
+#define FLITE_REG_CIIMGCPT_CPT_FREN (1 << 25)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FRCNT (1 << 18)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FREN (0 << 18)
+
+/* Capture Sequence */
+#define FLITE_REG_CICPTSEQ 0x0c
+
+/* Camera Window Offset */
+#define FLITE_REG_CIWDOFST 0x10
+#define FLITE_REG_CIWDOFST_WINOFSEN (1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVIY (1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVFICB (1 << 15)
+#define FLITE_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FLITE_REG_CIWDOFST_OFST_MASK ((0x1fff << 16) | 0x1fff)
+
+/* Camera Window Offset2 */
+#define FLITE_REG_CIWDOFST2 0x14
+
+/* Camera Output DMA Format */
+#define FLITE_REG_CIODMAFMT 0x18
+#define FLITE_REG_CIODMAFMT_RAW_CON (1 << 15)
+#define FLITE_REG_CIODMAFMT_PACK12 (1 << 14)
+#define FLITE_REG_CIODMAFMT_YCBYCR (0 << 4)
+#define FLITE_REG_CIODMAFMT_YCRYCB (1 << 4)
+#define FLITE_REG_CIODMAFMT_CBYCRY (2 << 4)
+#define FLITE_REG_CIODMAFMT_CRYCBY (3 << 4)
+#define FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK (0x3 << 4)
+
+/* Camera Output Canvas */
+#define FLITE_REG_CIOCAN 0x20
+#define FLITE_REG_CIOCAN_MASK ((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Offset */
+#define FLITE_REG_CIOOFF 0x24
+#define FLITE_REG_CIOOFF_MASK ((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Start Address */
+#define FLITE_REG_CIOSA 0x30
+
+/* Camera Status */
+#define FLITE_REG_CISTATUS 0x40
+#define FLITE_REG_CISTATUS_MIPI_VVALID (1 << 22)
+#define FLITE_REG_CISTATUS_MIPI_HVALID (1 << 21)
+#define FLITE_REG_CISTATUS_MIPI_DVALID (1 << 20)
+#define FLITE_REG_CISTATUS_ITU_VSYNC (1 << 14)
+#define FLITE_REG_CISTATUS_ITU_HREFF (1 << 13)
+#define FLITE_REG_CISTATUS_OVFIY (1 << 10)
+#define FLITE_REG_CISTATUS_OVFICB (1 << 9)
+#define FLITE_REG_CISTATUS_OVFICR (1 << 8)
+#define FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW (1 << 7)
+#define FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND (1 << 6)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART (1 << 5)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMEND (1 << 4)
+#define FLITE_REG_CISTATUS_IRQ_CAM (1 << 0)
+#define FLITE_REG_CISTATUS_IRQ_MASK (0xf << 4)
+
+/* Camera Status2 */
+#define FLITE_REG_CISTATUS2 0x44
+#define FLITE_REG_CISTATUS2_LASTCAPEND (1 << 1)
+#define FLITE_REG_CISTATUS2_FRMEND (1 << 0)
+
+/* Qos Threshold */
+#define FLITE_REG_CITHOLD 0xf0
+#define FLITE_REG_CITHOLD_W_QOS_EN (1 << 30)
+
+/* Camera General Purpose */
+#define FLITE_REG_CIGENERAL 0xfc
+/* b0: 1 - camera B, 0 - camera A */
+#define FLITE_REG_CIGENERAL_CAM_B (1 << 0)
+
+#define FLITE_REG_CIFCNTSEQ 0x100
+#define FLITE_REG_CIOSAN(x) (0x200 + (4 * (x)))
+
+/* ----------------------------------------------------------------------------
+ * Function declarations
+ */
+void flite_hw_reset(struct fimc_lite *dev);
+void flite_hw_clear_pending_irq(struct fimc_lite *dev);
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev);
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev);
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev);
+void flite_hw_capture_start(struct fimc_lite *dev);
+void flite_hw_capture_stop(struct fimc_lite *dev);
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+ struct fimc_source_info *s_info);
+void flite_hw_set_camera_polarity(struct fimc_lite *dev,
+ struct fimc_source_info *cam);
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f);
+
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+ bool enable);
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on);
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label);
+void flite_hw_set_dma_buffer(struct fimc_lite *dev, struct flite_buffer *buf);
+void flite_hw_mask_dma_buffer(struct fimc_lite *dev, u32 index);
+
+static inline void flite_hw_set_dma_buf_mask(struct fimc_lite *dev, u32 mask)
+{
+ writel(mask, dev->regs + FLITE_REG_CIFCNTSEQ);
+}
+
+#endif /* FIMC_LITE_REG_H */
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
new file mode 100644
index 000000000..10fe7d2e8
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -0,0 +1,1692 @@
+/*
+ * Samsung EXYNOS FIMC-LITE (camera host interface) driver
+*
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/drv-intf/exynos-fimc.h>
+
+#include "common.h"
+#include "fimc-core.h"
+#include "fimc-lite.h"
+#include "fimc-lite-reg.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+static const struct fimc_fmt fimc_lite_formats[] = {
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAGS_YUV,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .depth = { 16 },
+ .color = FIMC_FMT_CBYCRY422,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .flags = FMT_FLAGS_YUV,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .depth = { 16 },
+ .color = FIMC_FMT_CRYCBY422,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .flags = FMT_FLAGS_YUV,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCRYCB422,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .flags = FMT_FLAGS_YUV,
+ }, {
+ .name = "RAW8 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = { 8 },
+ .color = FIMC_FMT_RAW8,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .flags = FMT_FLAGS_RAW_BAYER,
+ }, {
+ .name = "RAW10 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = { 16 },
+ .color = FIMC_FMT_RAW10,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .flags = FMT_FLAGS_RAW_BAYER,
+ }, {
+ .name = "RAW12 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = { 16 },
+ .color = FIMC_FMT_RAW12,
+ .memplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .flags = FMT_FLAGS_RAW_BAYER,
+ },
+};
+
+/**
+ * fimc_lite_find_format - lookup fimc color format by fourcc or media bus code
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @mask: the color format flags to match
+ * @index: index to the fimc_lite_formats array, ignored if negative
+ */
+static const struct fimc_fmt *fimc_lite_find_format(const u32 *pixelformat,
+ const u32 *mbus_code, unsigned int mask, int index)
+{
+ const struct fimc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(fimc_lite_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_lite_formats); ++i) {
+ fmt = &fimc_lite_formats[i];
+ if (mask && !(fmt->flags & mask))
+ continue;
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static int fimc_lite_hw_init(struct fimc_lite *fimc, bool isp_output)
+{
+ struct fimc_source_info *si;
+ unsigned long flags;
+
+ if (fimc->sensor == NULL)
+ return -ENXIO;
+
+ if (fimc->inp_frame.fmt == NULL || fimc->out_frame.fmt == NULL)
+ return -EINVAL;
+
+ /* Get sensor configuration data from the sensor subdev */
+ si = v4l2_get_subdev_hostdata(fimc->sensor);
+ if (!si)
+ return -EINVAL;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ flite_hw_set_camera_bus(fimc, si);
+ flite_hw_set_source_format(fimc, &fimc->inp_frame);
+ flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+ flite_hw_set_dma_buf_mask(fimc, 0);
+ flite_hw_set_output_dma(fimc, &fimc->out_frame, !isp_output);
+ flite_hw_set_interrupt_mask(fimc);
+ flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+
+ if (debug > 0)
+ flite_hw_dump_regs(fimc, __func__);
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
+static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend)
+{
+ struct flite_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ streaming = fimc->state & (1 << ST_SENSOR_STREAM);
+
+ fimc->state &= ~(1 << ST_FLITE_RUN | 1 << ST_FLITE_OFF |
+ 1 << ST_FLITE_STREAM | 1 << ST_SENSOR_STREAM);
+ if (suspend)
+ fimc->state |= (1 << ST_FLITE_SUSPENDED);
+ else
+ fimc->state &= ~(1 << ST_FLITE_PENDING |
+ 1 << ST_FLITE_SUSPENDED);
+
+ /* Release unused buffers */
+ while (!suspend && !list_empty(&fimc->pending_buf_q)) {
+ buf = fimc_lite_pending_queue_pop(fimc);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ /* If suspending put unused buffers onto pending queue */
+ while (!list_empty(&fimc->active_buf_q)) {
+ buf = fimc_lite_active_queue_pop(fimc);
+ if (suspend)
+ fimc_lite_pending_queue_add(fimc, buf);
+ else
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ flite_hw_reset(fimc);
+
+ if (!streaming)
+ return 0;
+
+ return fimc_pipeline_call(&fimc->ve, set_stream, 0);
+}
+
+static int fimc_lite_stop_capture(struct fimc_lite *fimc, bool suspend)
+{
+ unsigned long flags;
+
+ if (!fimc_lite_active(fimc))
+ return 0;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_bit(ST_FLITE_OFF, &fimc->state);
+ flite_hw_capture_stop(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_FLITE_OFF, &fimc->state),
+ (2*HZ/10)); /* 200 ms */
+
+ return fimc_lite_reinit(fimc, suspend);
+}
+
+/* Must be called with fimc.slock spinlock held. */
+static void fimc_lite_config_update(struct fimc_lite *fimc)
+{
+ flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+ flite_hw_set_dma_window(fimc, &fimc->out_frame);
+ flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+ clear_bit(ST_FLITE_CONFIG, &fimc->state);
+}
+
+static irqreturn_t flite_irq_handler(int irq, void *priv)
+{
+ struct fimc_lite *fimc = priv;
+ struct flite_buffer *vbuf;
+ unsigned long flags;
+ u32 intsrc;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ intsrc = flite_hw_get_interrupt_source(fimc);
+ flite_hw_clear_pending_irq(fimc);
+
+ if (test_and_clear_bit(ST_FLITE_OFF, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ goto done;
+ }
+
+ if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW) {
+ clear_bit(ST_FLITE_RUN, &fimc->state);
+ fimc->events.data_overflow++;
+ }
+
+ if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND) {
+ flite_hw_clear_last_capture_end(fimc);
+ clear_bit(ST_FLITE_STREAM, &fimc->state);
+ wake_up(&fimc->irq_queue);
+ }
+
+ if (atomic_read(&fimc->out_path) != FIMC_IO_DMA)
+ goto done;
+
+ if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART) &&
+ test_bit(ST_FLITE_RUN, &fimc->state) &&
+ !list_empty(&fimc->pending_buf_q)) {
+ vbuf = fimc_lite_pending_queue_pop(fimc);
+ flite_hw_set_dma_buffer(fimc, vbuf);
+ fimc_lite_active_queue_add(fimc, vbuf);
+ }
+
+ if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMEND) &&
+ test_bit(ST_FLITE_RUN, &fimc->state) &&
+ !list_empty(&fimc->active_buf_q)) {
+ vbuf = fimc_lite_active_queue_pop(fimc);
+ vbuf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vbuf->vb.sequence = fimc->frame_count++;
+ flite_hw_mask_dma_buffer(fimc, vbuf->index);
+ vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ if (test_bit(ST_FLITE_CONFIG, &fimc->state))
+ fimc_lite_config_update(fimc);
+
+ if (list_empty(&fimc->pending_buf_q)) {
+ flite_hw_capture_stop(fimc);
+ clear_bit(ST_FLITE_STREAM, &fimc->state);
+ }
+done:
+ set_bit(ST_FLITE_RUN, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return IRQ_HANDLED;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_lite *fimc = q->drv_priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ fimc->buf_index = 0;
+ fimc->frame_count = 0;
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ ret = fimc_lite_hw_init(fimc, false);
+ if (ret) {
+ fimc_lite_reinit(fimc, false);
+ return ret;
+ }
+
+ set_bit(ST_FLITE_PENDING, &fimc->state);
+
+ if (!list_empty(&fimc->active_buf_q) &&
+ !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+ flite_hw_capture_start(fimc);
+
+ if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+ fimc_pipeline_call(&fimc->ve, set_stream, 1);
+ }
+ if (debug > 0)
+ flite_hw_dump_regs(fimc, __func__);
+
+ return 0;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_lite *fimc = q->drv_priv;
+
+ if (!fimc_lite_active(fimc))
+ return;
+
+ fimc_lite_stop_capture(fimc, false);
+}
+
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct fimc_lite *fimc = vq->drv_priv;
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = frame->fmt;
+ unsigned long wh = frame->f_width * frame->f_height;
+ int i;
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ if (*num_planes) {
+ if (*num_planes != fmt->memplanes)
+ return -EINVAL;
+ for (i = 0; i < *num_planes; i++)
+ if (sizes[i] < (wh * fmt->depth[i]) / 8)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++)
+ sizes[i] = (wh * fmt->depth[i]) / 8;
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct fimc_lite *fimc = vq->drv_priv;
+ int i;
+
+ if (fimc->out_frame.fmt == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < fimc->out_frame.fmt->memplanes; i++) {
+ unsigned long size = fimc->payload[i];
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(&fimc->ve.vdev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct flite_buffer *buf
+ = container_of(vbuf, struct flite_buffer, vb);
+ struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ buf->paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ buf->index = fimc->buf_index++;
+ if (fimc->buf_index >= fimc->reqbufs_count)
+ fimc->buf_index = 0;
+
+ if (!test_bit(ST_FLITE_SUSPENDED, &fimc->state) &&
+ !test_bit(ST_FLITE_STREAM, &fimc->state) &&
+ list_empty(&fimc->active_buf_q)) {
+ flite_hw_set_dma_buffer(fimc, buf);
+ fimc_lite_active_queue_add(fimc, buf);
+ } else {
+ fimc_lite_pending_queue_add(fimc, buf);
+ }
+
+ if (vb2_is_streaming(&fimc->vb_queue) &&
+ !list_empty(&fimc->pending_buf_q) &&
+ !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+ flite_hw_capture_start(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+ fimc_pipeline_call(&fimc->ve, set_stream, 1);
+ return;
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static const struct vb2_ops fimc_lite_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ memset(&fimc->events, 0, sizeof(fimc->events));
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static int fimc_lite_open(struct file *file)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct media_entity *me = &fimc->ve.vdev.entity;
+ int ret;
+
+ mutex_lock(&fimc->lock);
+ if (atomic_read(&fimc->out_path) != FIMC_IO_DMA) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ set_bit(ST_FLITE_IN_USE, &fimc->state);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ goto err_pm;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto err_pm;
+
+ if (!v4l2_fh_is_singular_file(file) ||
+ atomic_read(&fimc->out_path) != FIMC_IO_DMA)
+ goto unlock;
+
+ mutex_lock(&me->graph_obj.mdev->graph_mutex);
+
+ ret = fimc_pipeline_call(&fimc->ve, open, me, true);
+
+ /* Mark video pipeline ending at this video node as in use. */
+ if (ret == 0)
+ me->use_count++;
+
+ mutex_unlock(&me->graph_obj.mdev->graph_mutex);
+
+ if (!ret) {
+ fimc_lite_clear_event_counters(fimc);
+ goto unlock;
+ }
+
+ v4l2_fh_release(file);
+err_pm:
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_lite_release(struct file *file)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct media_entity *entity = &fimc->ve.vdev.entity;
+
+ mutex_lock(&fimc->lock);
+
+ if (v4l2_fh_is_singular_file(file) &&
+ atomic_read(&fimc->out_path) == FIMC_IO_DMA) {
+ if (fimc->streaming) {
+ media_pipeline_stop(entity);
+ fimc->streaming = false;
+ }
+ fimc_lite_stop_capture(fimc, false);
+ fimc_pipeline_call(&fimc->ve, close);
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+
+ mutex_lock(&entity->graph_obj.mdev->graph_mutex);
+ entity->use_count--;
+ mutex_unlock(&entity->graph_obj.mdev->graph_mutex);
+ }
+
+ _vb2_fop_release(file, NULL);
+ pm_runtime_put(&fimc->pdev->dev);
+ clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static const struct v4l2_file_operations fimc_lite_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_lite_open,
+ .release = fimc_lite_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * Format and crop negotiation helpers
+ */
+
+static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct flite_drvdata *dd = fimc->dd;
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ const struct fimc_fmt *fmt = NULL;
+
+ if (format->pad == FLITE_SD_PAD_SINK) {
+ v4l_bound_align_image(&mf->width, 8, dd->max_width,
+ ffs(dd->out_width_align) - 1,
+ &mf->height, 0, dd->max_height, 0, 0);
+
+ fmt = fimc_lite_find_format(NULL, &mf->code, 0, 0);
+ if (WARN_ON(!fmt))
+ return NULL;
+
+ mf->colorspace = fmt->colorspace;
+ mf->code = fmt->mbus_code;
+ } else {
+ struct flite_frame *sink = &fimc->inp_frame;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *rect;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sink_fmt = v4l2_subdev_get_try_format(&fimc->subdev, cfg,
+ FLITE_SD_PAD_SINK);
+
+ mf->code = sink_fmt->code;
+ mf->colorspace = sink_fmt->colorspace;
+
+ rect = v4l2_subdev_get_try_crop(&fimc->subdev, cfg,
+ FLITE_SD_PAD_SINK);
+ } else {
+ mf->code = sink->fmt->mbus_code;
+ mf->colorspace = sink->fmt->colorspace;
+ rect = &sink->rect;
+ }
+
+ /* Allow changing format only on sink pad */
+ mf->width = rect->width;
+ mf->height = rect->height;
+ }
+
+ mf->field = V4L2_FIELD_NONE;
+
+ v4l2_dbg(1, debug, &fimc->subdev, "code: %#x (%d), %dx%d\n",
+ mf->code, mf->colorspace, mf->width, mf->height);
+
+ return fmt;
+}
+
+static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+ struct flite_frame *frame = &fimc->inp_frame;
+
+ v4l_bound_align_image(&r->width, 0, frame->f_width, 0,
+ &r->height, 0, frame->f_height, 0, 0);
+
+ /* Adjust left/top if cropping rectangle got out of bounds */
+ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+ r->left = round_down(r->left, fimc->dd->win_hor_offs_align);
+ r->top = clamp_t(u32, r->top, 0, frame->f_height - r->height);
+
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d\n",
+ r->left, r->top, r->width, r->height,
+ frame->f_width, frame->f_height);
+}
+
+static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+ struct flite_frame *frame = &fimc->out_frame;
+ struct v4l2_rect *crop_rect = &fimc->inp_frame.rect;
+
+ /* Scaling is not supported so we enforce compose rectangle size
+ same as size of the sink crop rectangle. */
+ r->width = crop_rect->width;
+ r->height = crop_rect->height;
+
+ /* Adjust left/top if the composing rectangle got out of bounds */
+ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+ r->left = round_down(r->left, fimc->dd->out_hor_offs_align);
+ r->top = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height);
+
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d\n",
+ r->left, r->top, r->width, r->height,
+ frame->f_width, frame->f_height);
+}
+
+/*
+ * Video node ioctl operations
+ */
+static int fimc_lite_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ strlcpy(cap->driver, FIMC_LITE_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, FIMC_LITE_DRV_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&fimc->pdev->dev));
+
+ cap->device_caps = V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int fimc_lite_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct fimc_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(fimc_lite_formats))
+ return -EINVAL;
+
+ fmt = &fimc_lite_formats[f->index];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int fimc_lite_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt = &pixm->plane_fmt[0];
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = frame->fmt;
+
+ plane_fmt->bytesperline = (frame->f_width * fmt->depth[0]) / 8;
+ plane_fmt->sizeimage = plane_fmt->bytesperline * frame->f_height;
+
+ pixm->num_planes = fmt->memplanes;
+ pixm->pixelformat = fmt->fourcc;
+ pixm->width = frame->f_width;
+ pixm->height = frame->f_height;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->colorspace = fmt->colorspace;
+ return 0;
+}
+
+static int fimc_lite_try_fmt(struct fimc_lite *fimc,
+ struct v4l2_pix_format_mplane *pixm,
+ const struct fimc_fmt **ffmt)
+{
+ u32 bpl = pixm->plane_fmt[0].bytesperline;
+ struct flite_drvdata *dd = fimc->dd;
+ const struct fimc_fmt *inp_fmt = fimc->inp_frame.fmt;
+ const struct fimc_fmt *fmt;
+
+ if (WARN_ON(inp_fmt == NULL))
+ return -EINVAL;
+ /*
+ * We allow some flexibility only for YUV formats. In case of raw
+ * raw Bayer the FIMC-LITE's output format must match its camera
+ * interface input format.
+ */
+ if (inp_fmt->flags & FMT_FLAGS_YUV)
+ fmt = fimc_lite_find_format(&pixm->pixelformat, NULL,
+ inp_fmt->flags, 0);
+ else
+ fmt = inp_fmt;
+
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+ if (ffmt)
+ *ffmt = fmt;
+ v4l_bound_align_image(&pixm->width, 8, dd->max_width,
+ ffs(dd->out_width_align) - 1,
+ &pixm->height, 0, dd->max_height, 0, 0);
+
+ if ((bpl == 0 || ((bpl * 8) / fmt->depth[0]) < pixm->width))
+ pixm->plane_fmt[0].bytesperline = (pixm->width *
+ fmt->depth[0]) / 8;
+
+ if (pixm->plane_fmt[0].sizeimage == 0)
+ pixm->plane_fmt[0].sizeimage = (pixm->width * pixm->height *
+ fmt->depth[0]) / 8;
+ pixm->num_planes = fmt->memplanes;
+ pixm->pixelformat = fmt->fourcc;
+ pixm->colorspace = fmt->colorspace;
+ pixm->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static int fimc_lite_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ return fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, NULL);
+}
+
+static int fimc_lite_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = NULL;
+ int ret;
+
+ if (vb2_is_busy(&fimc->vb_queue))
+ return -EBUSY;
+
+ ret = fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, &fmt);
+ if (ret < 0)
+ return ret;
+
+ frame->fmt = fmt;
+ fimc->payload[0] = max((pixm->width * pixm->height * fmt->depth[0]) / 8,
+ pixm->plane_fmt[0].sizeimage);
+ frame->f_width = pixm->width;
+ frame->f_height = pixm->height;
+
+ return 0;
+}
+
+static int fimc_pipeline_validate(struct fimc_lite *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->subdev;
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ while (1) {
+ /* Retrieve format at the sink pad */
+ pad = &sd->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+ /* Don't call FIMC subdev operation to avoid nested locking */
+ if (sd == &fimc->subdev) {
+ struct flite_frame *ff = &fimc->out_frame;
+ sink_fmt.format.width = ff->f_width;
+ sink_fmt.format.height = ff->f_height;
+ sink_fmt.format.code = fimc->inp_frame.fmt->mbus_code;
+ } else {
+ sink_fmt.pad = pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
+ &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+ }
+ /* Retrieve format at the source pad */
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+ }
+ return 0;
+}
+
+static int fimc_lite_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct media_entity *entity = &fimc->ve.vdev.entity;
+ int ret;
+
+ if (fimc_lite_active(fimc))
+ return -EBUSY;
+
+ ret = media_pipeline_start(entity, &fimc->ve.pipe->mp);
+ if (ret < 0)
+ return ret;
+
+ ret = fimc_pipeline_validate(fimc);
+ if (ret < 0)
+ goto err_p_stop;
+
+ fimc->sensor = fimc_find_remote_sensor(&fimc->subdev.entity);
+
+ ret = vb2_ioctl_streamon(file, priv, type);
+ if (!ret) {
+ fimc->streaming = true;
+ return ret;
+ }
+
+err_p_stop:
+ media_pipeline_stop(entity);
+ return 0;
+}
+
+static int fimc_lite_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ ret = vb2_ioctl_streamoff(file, priv, type);
+ if (ret < 0)
+ return ret;
+
+ media_pipeline_stop(&fimc->ve.vdev.entity);
+ fimc->streaming = false;
+ return 0;
+}
+
+static int fimc_lite_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ reqbufs->count = max_t(u32, FLITE_REQ_BUFS_MIN, reqbufs->count);
+ ret = vb2_ioctl_reqbufs(file, priv, reqbufs);
+ if (!ret)
+ fimc->reqbufs_count = reqbufs->count;
+
+ return ret;
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int fimc_lite_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *f = &fimc->out_frame;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = f->f_width;
+ sel->r.height = f->f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = f->rect;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int fimc_lite_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *f = &fimc->out_frame;
+ struct v4l2_rect rect = sel->r;
+ unsigned long flags;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ fimc_lite_try_compose(fimc, &rect);
+
+ if ((sel->flags & V4L2_SEL_FLAG_LE) &&
+ !enclosed_rectangle(&rect, &sel->r))
+ return -ERANGE;
+
+ if ((sel->flags & V4L2_SEL_FLAG_GE) &&
+ !enclosed_rectangle(&sel->r, &rect))
+ return -ERANGE;
+
+ sel->r = rect;
+ spin_lock_irqsave(&fimc->slock, flags);
+ f->rect = rect;
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_lite_ioctl_ops = {
+ .vidioc_querycap = fimc_lite_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_lite_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_lite_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_lite_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_lite_g_fmt_mplane,
+ .vidioc_g_selection = fimc_lite_g_selection,
+ .vidioc_s_selection = fimc_lite_s_selection,
+ .vidioc_reqbufs = fimc_lite_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = fimc_lite_streamon,
+ .vidioc_streamoff = fimc_lite_streamoff,
+};
+
+/* Capture subdev media entity operations */
+static int fimc_lite_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (WARN_ON(fimc == NULL))
+ return 0;
+
+ v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x\n",
+ __func__, remote->entity->name, local->entity->name,
+ flags, fimc->source_subdev_grp_id);
+
+ switch (local->index) {
+ case FLITE_SD_PAD_SINK:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (fimc->source_subdev_grp_id == 0)
+ fimc->source_subdev_grp_id = sd->grp_id;
+ else
+ ret = -EBUSY;
+ } else {
+ fimc->source_subdev_grp_id = 0;
+ fimc->sensor = NULL;
+ }
+ break;
+
+ case FLITE_SD_PAD_SOURCE_DMA:
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ atomic_set(&fimc->out_path, FIMC_IO_NONE);
+ else
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
+ break;
+
+ case FLITE_SD_PAD_SOURCE_ISP:
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ atomic_set(&fimc->out_path, FIMC_IO_NONE);
+ else
+ atomic_set(&fimc->out_path, FIMC_IO_ISP);
+ break;
+
+ default:
+ v4l2_err(sd, "Invalid pad index\n");
+ ret = -EINVAL;
+ }
+ mb();
+
+ return ret;
+}
+
+static const struct media_entity_operations fimc_lite_subdev_media_ops = {
+ .link_setup = fimc_lite_link_setup,
+};
+
+static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_lite_find_format(NULL, NULL, 0, code->index);
+ if (!fmt)
+ return -EINVAL;
+ code->code = fmt->mbus_code;
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *__fimc_lite_subdev_get_try_fmt(
+ struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad)
+{
+ if (pad != FLITE_SD_PAD_SINK)
+ pad = FLITE_SD_PAD_SOURCE_DMA;
+
+ return v4l2_subdev_get_try_format(sd, cfg, pad);
+}
+
+static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct flite_frame *f = &fimc->inp_frame;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = __fimc_lite_subdev_get_try_fmt(sd, cfg, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mutex_lock(&fimc->lock);
+ mf->colorspace = f->fmt->colorspace;
+ mf->code = f->fmt->mbus_code;
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ /* full camera input frame size */
+ mf->width = f->f_width;
+ mf->height = f->f_height;
+ } else {
+ /* crop size */
+ mf->width = f->rect.width;
+ mf->height = f->rect.height;
+ }
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct flite_frame *sink = &fimc->inp_frame;
+ struct flite_frame *source = &fimc->out_frame;
+ const struct fimc_fmt *ffmt;
+
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d\n",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ mutex_lock(&fimc->lock);
+
+ if ((atomic_read(&fimc->out_path) == FIMC_IO_ISP &&
+ sd->entity.stream_count > 0) ||
+ (atomic_read(&fimc->out_path) == FIMC_IO_DMA &&
+ vb2_is_busy(&fimc->vb_queue))) {
+ mutex_unlock(&fimc->lock);
+ return -EBUSY;
+ }
+
+ ffmt = fimc_lite_subdev_try_fmt(fimc, cfg, fmt);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ mf = __fimc_lite_subdev_get_try_fmt(sd, cfg, fmt->pad);
+ *mf = fmt->format;
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ unsigned int pad = FLITE_SD_PAD_SOURCE_DMA;
+ src_fmt = __fimc_lite_subdev_get_try_fmt(sd, cfg, pad);
+ *src_fmt = *mf;
+ }
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+ }
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ sink->f_width = mf->width;
+ sink->f_height = mf->height;
+ sink->fmt = ffmt;
+ /* Set sink crop rectangle */
+ sink->rect.width = mf->width;
+ sink->rect.height = mf->height;
+ sink->rect.left = 0;
+ sink->rect.top = 0;
+ /* Reset source format and crop rectangle */
+ source->rect = sink->rect;
+ source->f_width = mf->width;
+ source->f_height = mf->height;
+ }
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct flite_frame *f = &fimc->inp_frame;
+
+ if ((sel->target != V4L2_SEL_TGT_CROP &&
+ sel->target != V4L2_SEL_TGT_CROP_BOUNDS) ||
+ sel->pad != FLITE_SD_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&fimc->lock);
+ if (sel->target == V4L2_SEL_TGT_CROP) {
+ sel->r = f->rect;
+ } else {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = f->f_width;
+ sel->r.height = f->f_height;
+ }
+ mutex_unlock(&fimc->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
+ __func__, f->rect.left, f->rect.top, f->rect.width,
+ f->rect.height, f->f_width, f->f_height);
+
+ return 0;
+}
+
+static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct flite_frame *f = &fimc->inp_frame;
+ int ret = 0;
+
+ if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != FLITE_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
+ fimc_lite_try_crop(fimc, &sel->r);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_crop(sd, cfg, sel->pad) = sel->r;
+ } else {
+ unsigned long flags;
+ spin_lock_irqsave(&fimc->slock, flags);
+ f->rect = sel->r;
+ /* Same crop rectangle on the source pad */
+ fimc->out_frame.rect = sel->r;
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+ mutex_unlock(&fimc->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
+ __func__, f->rect.left, f->rect.top, f->rect.width,
+ f->rect.height, f->f_width, f->f_height);
+
+ return ret;
+}
+
+static int fimc_lite_subdev_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Find sensor subdev linked to FIMC-LITE directly or through
+ * MIPI-CSIS. This is required for configuration where FIMC-LITE
+ * is used as a subdev only and feeds data internally to FIMC-IS.
+ * The pipeline links are protected through entity.stream_count
+ * so there is no need to take the media graph mutex here.
+ */
+ fimc->sensor = fimc_find_remote_sensor(&sd->entity);
+
+ if (atomic_read(&fimc->out_path) != FIMC_IO_ISP)
+ return -ENOIOCTLCMD;
+
+ mutex_lock(&fimc->lock);
+ if (on) {
+ flite_hw_reset(fimc);
+ ret = fimc_lite_hw_init(fimc, true);
+ if (!ret) {
+ spin_lock_irqsave(&fimc->slock, flags);
+ flite_hw_capture_start(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+ } else {
+ set_bit(ST_FLITE_OFF, &fimc->state);
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ flite_hw_capture_stop(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ ret = wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_FLITE_OFF, &fimc->state),
+ msecs_to_jiffies(200));
+ if (ret == 0)
+ v4l2_err(sd, "s_stream(0) timeout\n");
+ clear_bit(ST_FLITE_RUN, &fimc->state);
+ }
+
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_lite_log_status(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ flite_hw_dump_regs(fimc, __func__);
+ return 0;
+}
+
+static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct vb2_queue *q = &fimc->vb_queue;
+ struct video_device *vfd = &fimc->ve.vdev;
+ int ret;
+
+ memset(vfd, 0, sizeof(*vfd));
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
+
+ snprintf(vfd->name, sizeof(vfd->name), "fimc-lite.%d.capture",
+ fimc->index);
+
+ vfd->fops = &fimc_lite_fops;
+ vfd->ioctl_ops = &fimc_lite_ioctl_ops;
+ vfd->v4l2_dev = sd->v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->queue = q;
+ fimc->reqbufs_count = 0;
+
+ INIT_LIST_HEAD(&fimc->pending_buf_q);
+ INIT_LIST_HEAD(&fimc->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &fimc_lite_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct flite_buffer);
+ q->drv_priv = fimc;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &fimc->lock;
+ q->dev = &fimc->pdev->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ return ret;
+
+ fimc->vd_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &fimc->vd_pad);
+ if (ret < 0)
+ return ret;
+
+ video_set_drvdata(vfd, fimc);
+ fimc->ve.pipe = v4l2_get_subdev_hostdata(sd);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ media_entity_cleanup(&vfd->entity);
+ fimc->ve.pipe = NULL;
+ return ret;
+ }
+
+ v4l2_info(sd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+}
+
+static void fimc_lite_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc == NULL)
+ return;
+
+ mutex_lock(&fimc->lock);
+
+ if (video_is_registered(&fimc->ve.vdev)) {
+ video_unregister_device(&fimc->ve.vdev);
+ media_entity_cleanup(&fimc->ve.vdev.entity);
+ fimc->ve.pipe = NULL;
+ }
+
+ mutex_unlock(&fimc->lock);
+}
+
+static const struct v4l2_subdev_internal_ops fimc_lite_subdev_internal_ops = {
+ .registered = fimc_lite_subdev_registered,
+ .unregistered = fimc_lite_subdev_unregistered,
+};
+
+static const struct v4l2_subdev_pad_ops fimc_lite_subdev_pad_ops = {
+ .enum_mbus_code = fimc_lite_subdev_enum_mbus_code,
+ .get_selection = fimc_lite_subdev_get_selection,
+ .set_selection = fimc_lite_subdev_set_selection,
+ .get_fmt = fimc_lite_subdev_get_fmt,
+ .set_fmt = fimc_lite_subdev_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops fimc_lite_subdev_video_ops = {
+ .s_stream = fimc_lite_subdev_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops fimc_lite_core_ops = {
+ .log_status = fimc_lite_log_status,
+};
+
+static const struct v4l2_subdev_ops fimc_lite_subdev_ops = {
+ .core = &fimc_lite_core_ops,
+ .video = &fimc_lite_subdev_video_ops,
+ .pad = &fimc_lite_subdev_pad_ops,
+};
+
+static int fimc_lite_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fimc_lite *fimc = container_of(ctrl->handler, struct fimc_lite,
+ ctrl_handler);
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fimc_lite_ctrl_ops = {
+ .s_ctrl = fimc_lite_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config fimc_lite_ctrl = {
+ .ops = &fimc_lite_ctrl_ops,
+ .id = V4L2_CTRL_CLASS_USER | 0x1001,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Test Pattern 640x480",
+ .step = 1,
+};
+
+static void fimc_lite_set_default_config(struct fimc_lite *fimc)
+{
+ struct flite_frame *sink = &fimc->inp_frame;
+ struct flite_frame *source = &fimc->out_frame;
+
+ sink->fmt = &fimc_lite_formats[0];
+ sink->f_width = FLITE_DEFAULT_WIDTH;
+ sink->f_height = FLITE_DEFAULT_HEIGHT;
+
+ sink->rect.width = FLITE_DEFAULT_WIDTH;
+ sink->rect.height = FLITE_DEFAULT_HEIGHT;
+ sink->rect.left = 0;
+ sink->rect.top = 0;
+
+ *source = *sink;
+}
+
+static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
+{
+ struct v4l2_ctrl_handler *handler = &fimc->ctrl_handler;
+ struct v4l2_subdev *sd = &fimc->subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &fimc_lite_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC-LITE.%d", fimc->index);
+
+ fimc->subdev_pads[FLITE_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ fimc->subdev_pads[FLITE_SD_PAD_SOURCE_DMA].flags = MEDIA_PAD_FL_SOURCE;
+ fimc->subdev_pads[FLITE_SD_PAD_SOURCE_ISP].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, FLITE_SD_PADS_NUM,
+ fimc->subdev_pads);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 1);
+ fimc->test_pattern = v4l2_ctrl_new_custom(handler, &fimc_lite_ctrl,
+ NULL);
+ if (handler->error) {
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ sd->ctrl_handler = handler;
+ sd->internal_ops = &fimc_lite_subdev_internal_ops;
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+ sd->entity.ops = &fimc_lite_subdev_media_ops;
+ sd->owner = THIS_MODULE;
+ v4l2_set_subdevdata(sd, fimc);
+
+ return 0;
+}
+
+static void fimc_lite_unregister_capture_subdev(struct fimc_lite *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&fimc->ctrl_handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
+
+static void fimc_lite_clk_put(struct fimc_lite *fimc)
+{
+ if (IS_ERR(fimc->clock))
+ return;
+
+ clk_put(fimc->clock);
+ fimc->clock = ERR_PTR(-EINVAL);
+}
+
+static int fimc_lite_clk_get(struct fimc_lite *fimc)
+{
+ fimc->clock = clk_get(&fimc->pdev->dev, FLITE_CLK_NAME);
+ return PTR_ERR_OR_ZERO(fimc->clock);
+}
+
+static const struct of_device_id flite_of_match[];
+
+static int fimc_lite_probe(struct platform_device *pdev)
+{
+ struct flite_drvdata *drv_data = NULL;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ struct fimc_lite *fimc;
+ struct resource *res;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ fimc = devm_kzalloc(dev, sizeof(*fimc), GFP_KERNEL);
+ if (!fimc)
+ return -ENOMEM;
+
+ of_id = of_match_node(flite_of_match, dev->of_node);
+ if (of_id)
+ drv_data = (struct flite_drvdata *)of_id->data;
+ fimc->index = of_alias_get_id(dev->of_node, "fimc-lite");
+
+ if (!drv_data || fimc->index >= drv_data->num_instances ||
+ fimc->index < 0) {
+ dev_err(dev, "Wrong %pOF node alias\n", dev->of_node);
+ return -EINVAL;
+ }
+
+ fimc->dd = drv_data;
+ fimc->pdev = pdev;
+
+ init_waitqueue_head(&fimc->irq_queue);
+ spin_lock_init(&fimc->slock);
+ mutex_init(&fimc->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fimc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fimc->regs))
+ return PTR_ERR(fimc->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "Failed to get IRQ resource\n");
+ return -ENXIO;
+ }
+
+ ret = fimc_lite_clk_get(fimc);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, res->start, flite_irq_handler,
+ 0, dev_name(dev), fimc);
+ if (ret) {
+ dev_err(dev, "Failed to install irq (%d)\n", ret);
+ goto err_clk_put;
+ }
+
+ /* The video node will be created within the subdev's registered() op */
+ ret = fimc_lite_create_capture_subdev(fimc);
+ if (ret)
+ goto err_clk_put;
+
+ platform_set_drvdata(pdev, fimc);
+ pm_runtime_enable(dev);
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = clk_prepare_enable(fimc->clock);
+ if (ret < 0)
+ goto err_sd;
+ }
+
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ fimc_lite_set_default_config(fimc);
+
+ dev_dbg(dev, "FIMC-LITE.%d registered successfully\n",
+ fimc->index);
+ return 0;
+
+err_sd:
+ fimc_lite_unregister_capture_subdev(fimc);
+err_clk_put:
+ fimc_lite_clk_put(fimc);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int fimc_lite_runtime_resume(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+ clk_prepare_enable(fimc->clock);
+ return 0;
+}
+
+static int fimc_lite_runtime_suspend(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(fimc->clock);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_lite_resume(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ struct flite_buffer *buf;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!test_and_clear_bit(ST_LPM, &fimc->state) ||
+ !test_bit(ST_FLITE_IN_USE, &fimc->state)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
+ flite_hw_reset(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (!test_and_clear_bit(ST_FLITE_SUSPENDED, &fimc->state))
+ return 0;
+
+ INIT_LIST_HEAD(&fimc->active_buf_q);
+ fimc_pipeline_call(&fimc->ve, open,
+ &fimc->ve.vdev.entity, false);
+ fimc_lite_hw_init(fimc, atomic_read(&fimc->out_path) == FIMC_IO_ISP);
+ clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+
+ for (i = 0; i < fimc->reqbufs_count; i++) {
+ if (list_empty(&fimc->pending_buf_q))
+ break;
+ buf = fimc_lite_pending_queue_pop(fimc);
+ buffer_queue(&buf->vb.vb2_buf);
+ }
+ return 0;
+}
+
+static int fimc_lite_suspend(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ bool suspend = test_bit(ST_FLITE_IN_USE, &fimc->state);
+ int ret;
+
+ if (test_and_set_bit(ST_LPM, &fimc->state))
+ return 0;
+
+ ret = fimc_lite_stop_capture(fimc, suspend);
+ if (ret < 0 || !fimc_lite_active(fimc))
+ return ret;
+
+ return fimc_pipeline_call(&fimc->ve, close);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int fimc_lite_remove(struct platform_device *pdev)
+{
+ struct fimc_lite *fimc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ fimc_lite_unregister_capture_subdev(fimc);
+ vb2_dma_contig_clear_max_seg_size(dev);
+ fimc_lite_clk_put(fimc);
+
+ dev_info(dev, "Driver unloaded\n");
+ return 0;
+}
+
+static const struct dev_pm_ops fimc_lite_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_lite_suspend, fimc_lite_resume)
+ SET_RUNTIME_PM_OPS(fimc_lite_runtime_suspend, fimc_lite_runtime_resume,
+ NULL)
+};
+
+/* EXYNOS4412 */
+static struct flite_drvdata fimc_lite_drvdata_exynos4 = {
+ .max_width = 8192,
+ .max_height = 8192,
+ .out_width_align = 8,
+ .win_hor_offs_align = 2,
+ .out_hor_offs_align = 8,
+ .max_dma_bufs = 1,
+ .num_instances = 2,
+};
+
+/* EXYNOS5250 */
+static struct flite_drvdata fimc_lite_drvdata_exynos5 = {
+ .max_width = 8192,
+ .max_height = 8192,
+ .out_width_align = 8,
+ .win_hor_offs_align = 2,
+ .out_hor_offs_align = 8,
+ .max_dma_bufs = 32,
+ .num_instances = 3,
+};
+
+static const struct of_device_id flite_of_match[] = {
+ {
+ .compatible = "samsung,exynos4212-fimc-lite",
+ .data = &fimc_lite_drvdata_exynos4,
+ },
+ {
+ .compatible = "samsung,exynos5250-fimc-lite",
+ .data = &fimc_lite_drvdata_exynos5,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, flite_of_match);
+
+static struct platform_driver fimc_lite_driver = {
+ .probe = fimc_lite_probe,
+ .remove = fimc_lite_remove,
+ .driver = {
+ .of_match_table = flite_of_match,
+ .name = FIMC_LITE_DRV_NAME,
+ .pm = &fimc_lite_pm_ops,
+ }
+};
+module_platform_driver(fimc_lite_driver);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" FIMC_LITE_DRV_NAME);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.h b/drivers/media/platform/exynos4-is/fimc-lite.h
new file mode 100644
index 000000000..3e238b8c8
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-lite.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_H_
+#define FIMC_LITE_H_
+
+#include <linux/sizes.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/drv-intf/exynos-fimc.h>
+
+#define FIMC_LITE_DRV_NAME "exynos-fimc-lite"
+#define FLITE_CLK_NAME "flite"
+#define FIMC_LITE_MAX_DEVS 3
+#define FLITE_REQ_BUFS_MIN 2
+#define FLITE_DEFAULT_WIDTH 640
+#define FLITE_DEFAULT_HEIGHT 480
+
+/* Bit index definitions for struct fimc_lite::state */
+enum {
+ ST_FLITE_LPM,
+ ST_FLITE_PENDING,
+ ST_FLITE_RUN,
+ ST_FLITE_STREAM,
+ ST_FLITE_SUSPENDED,
+ ST_FLITE_OFF,
+ ST_FLITE_IN_USE,
+ ST_FLITE_CONFIG,
+ ST_SENSOR_STREAM,
+};
+
+#define FLITE_SD_PAD_SINK 0
+#define FLITE_SD_PAD_SOURCE_DMA 1
+#define FLITE_SD_PAD_SOURCE_ISP 2
+#define FLITE_SD_PADS_NUM 3
+
+/**
+ * struct flite_drvdata - FIMC-LITE IP variant data structure
+ * @max_width: maximum camera interface input width in pixels
+ * @max_height: maximum camera interface input height in pixels
+ * @out_width_align: minimum output width alignment in pixels
+ * @win_hor_offs_align: minimum camera interface crop window horizontal
+ * offset alignment in pixels
+ * @out_hor_offs_align: minimum output DMA compose rectangle horizontal
+ * offset alignment in pixels
+ * @max_dma_bufs: number of output DMA buffer start address registers
+ * @num_instances: total number of FIMC-LITE IP instances available
+ */
+struct flite_drvdata {
+ unsigned short max_width;
+ unsigned short max_height;
+ unsigned short out_width_align;
+ unsigned short win_hor_offs_align;
+ unsigned short out_hor_offs_align;
+ unsigned short max_dma_bufs;
+ unsigned short num_instances;
+};
+
+struct fimc_lite_events {
+ unsigned int data_overflow;
+};
+
+#define FLITE_MAX_PLANES 1
+
+/**
+ * struct flite_frame - source/target frame properties
+ * @f_width: full pixel width
+ * @f_height: full pixel height
+ * @rect: crop/composition rectangle
+ * @fmt: pointer to pixel format description data structure
+ */
+struct flite_frame {
+ u16 f_width;
+ u16 f_height;
+ struct v4l2_rect rect;
+ const struct fimc_fmt *fmt;
+};
+
+/**
+ * struct flite_buffer - video buffer structure
+ * @vb: vb2 buffer
+ * @list: list head for the buffers queue
+ * @paddr: DMA buffer start address
+ * @index: DMA start address register's index
+ */
+struct flite_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ dma_addr_t paddr;
+ unsigned short index;
+};
+
+/**
+ * struct fimc_lite - fimc lite structure
+ * @pdev: pointer to FIMC-LITE platform device
+ * @dd: SoC specific driver data structure
+ * @ve: exynos video device entity structure
+ * @v4l2_dev: pointer to top the level v4l2_device
+ * @fh: v4l2 file handle
+ * @subdev: FIMC-LITE subdev
+ * @vd_pad: media (sink) pad for the capture video node
+ * @subdev_pads: the subdev media pads
+ * @sensor: sensor subdev attached to FIMC-LITE directly or through MIPI-CSIS
+ * @ctrl_handler: v4l2 control handler
+ * @test_pattern: test pattern controls
+ * @index: FIMC-LITE platform device index
+ * @pipeline: video capture pipeline data structure
+ * @pipeline_ops: media pipeline ops for the video node driver
+ * @slock: spinlock protecting this data structure and the hw registers
+ * @lock: mutex serializing video device and the subdev operations
+ * @clock: FIMC-LITE gate clock
+ * @regs: memory mapped io registers
+ * @irq_queue: interrupt handler waitqueue
+ * @payload: image size in bytes (w x h x bpp)
+ * @inp_frame: camera input frame structure
+ * @out_frame: DMA output frame structure
+ * @out_path: output data path (DMA or FIFO)
+ * @source_subdev_grp_id: source subdev group id
+ * @state: driver state flags
+ * @pending_buf_q: pending buffers queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vb_queue: vb2 buffers queue
+ * @buf_index: helps to keep track of the DMA start address register index
+ * @active_buf_count: number of video buffers scheduled in hardware
+ * @frame_count: the captured frames counter
+ * @reqbufs_count: the number of buffers requested with REQBUFS ioctl
+ */
+struct fimc_lite {
+ struct platform_device *pdev;
+ struct flite_drvdata *dd;
+ struct exynos_video_entity ve;
+ struct v4l2_device *v4l2_dev;
+ struct v4l2_fh fh;
+ struct v4l2_subdev subdev;
+ struct media_pad vd_pad;
+ struct media_pad subdev_pads[FLITE_SD_PADS_NUM];
+ struct v4l2_subdev *sensor;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *test_pattern;
+ int index;
+
+ struct mutex lock;
+ spinlock_t slock;
+
+ struct clk *clock;
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+
+ unsigned long payload[FLITE_MAX_PLANES];
+ struct flite_frame inp_frame;
+ struct flite_frame out_frame;
+ atomic_t out_path;
+ unsigned int source_subdev_grp_id;
+
+ unsigned long state;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct vb2_queue vb_queue;
+ unsigned short buf_index;
+ unsigned int frame_count;
+ unsigned int reqbufs_count;
+
+ struct fimc_lite_events events;
+ bool streaming;
+};
+
+static inline bool fimc_lite_active(struct fimc_lite *fimc)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ ret = fimc->state & (1 << ST_FLITE_RUN) ||
+ fimc->state & (1 << ST_FLITE_PENDING);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return ret;
+}
+
+static inline void fimc_lite_active_queue_add(struct fimc_lite *dev,
+ struct flite_buffer *buf)
+{
+ list_add_tail(&buf->list, &dev->active_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_active_queue_pop(
+ struct fimc_lite *dev)
+{
+ struct flite_buffer *buf = list_entry(dev->active_buf_q.next,
+ struct flite_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+static inline void fimc_lite_pending_queue_add(struct fimc_lite *dev,
+ struct flite_buffer *buf)
+{
+ list_add_tail(&buf->list, &dev->pending_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_pending_queue_pop(
+ struct fimc_lite *dev)
+{
+ struct flite_buffer *buf = list_entry(dev->pending_buf_q.next,
+ struct flite_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* FIMC_LITE_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
new file mode 100644
index 000000000..a19f8b164
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -0,0 +1,761 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series FIMC (video postprocessor) driver
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "common.h"
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "media-dev.h"
+
+static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
+{
+ if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return FMT_FLAGS_M2M_IN;
+ else
+ return FMT_FLAGS_M2M_OUT;
+}
+
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
+{
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ if (!ctx || !ctx->fh.m2m_ctx)
+ return;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (src_vb)
+ v4l2_m2m_buf_done(src_vb, vb_state);
+ if (dst_vb)
+ v4l2_m2m_buf_done(dst_vb, vb_state);
+ if (src_vb && dst_vb)
+ v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
+ ctx->fh.m2m_ctx);
+}
+
+/* Complete the transaction which has been scheduled for execution. */
+static void fimc_m2m_shutdown(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ if (!fimc_m2m_pending(fimc))
+ return;
+
+ fimc_ctx_state_set(FIMC_CTX_SHUT, ctx);
+
+ wait_event_timeout(fimc->irq_queue,
+ !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
+ FIMC_SHUTDOWN_TIMEOUT);
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
+ return ret > 0 ? 0 : ret;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+
+
+ fimc_m2m_shutdown(ctx);
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ pm_runtime_put(&ctx->fimc_dev->pdev->dev);
+}
+
+static void fimc_device_run(void *priv)
+{
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ struct fimc_ctx *ctx = priv;
+ struct fimc_frame *sf, *df;
+ struct fimc_dev *fimc;
+ unsigned long flags;
+ int ret;
+
+ if (WARN(!ctx, "Null context\n"))
+ return;
+
+ fimc = ctx->fimc_dev;
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ set_bit(ST_M2M_PEND, &fimc->state);
+ sf = &ctx->s_frame;
+ df = &ctx->d_frame;
+
+ if (ctx->state & FIMC_PARAMS) {
+ /* Prepare the DMA offsets for scaler */
+ fimc_prepare_dma_offset(ctx, sf);
+ fimc_prepare_dma_offset(ctx, df);
+ }
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ ret = fimc_prepare_addr(ctx, &src_vb->vb2_buf, sf, &sf->paddr);
+ if (ret)
+ goto dma_unlock;
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ ret = fimc_prepare_addr(ctx, &dst_vb->vb2_buf, df, &df->paddr);
+ if (ret)
+ goto dma_unlock;
+
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ /* Reconfigure hardware if the context has changed. */
+ if (fimc->m2m.ctx != ctx) {
+ ctx->state |= FIMC_PARAMS;
+ fimc->m2m.ctx = ctx;
+ }
+
+ if (ctx->state & FIMC_PARAMS) {
+ fimc_set_yuv_order(ctx);
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_in_dma(ctx);
+ ret = fimc_set_scaler_info(ctx);
+ if (ret)
+ goto dma_unlock;
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->drv_data->alpha_color)
+ fimc_hw_set_rgb_alpha(ctx);
+ fimc_hw_set_output_path(ctx);
+ }
+ fimc_hw_set_input_addr(fimc, &sf->paddr);
+ fimc_hw_set_output_addr(fimc, &df->paddr, -1);
+
+ fimc_activate_capture(ctx);
+ ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
+ fimc_hw_activate_input_dma(fimc, true);
+
+dma_unlock:
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_job_abort(void *priv)
+{
+ fimc_m2m_shutdown(priv);
+}
+
+static int fimc_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct fimc_frame *f;
+ int i;
+
+ f = ctx_get_frame(ctx, vq->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ /*
+ * Return number of non-contiguous planes (plane buffers)
+ * depending on the configured color format.
+ */
+ if (!f->fmt)
+ return -EINVAL;
+
+ *num_planes = f->fmt->memplanes;
+ for (i = 0; i < f->fmt->memplanes; i++)
+ sizes[i] = f->payload[i];
+ return 0;
+}
+
+static int fimc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_frame *frame;
+ int i;
+
+ frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ for (i = 0; i < frame->fmt->memplanes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
+
+ return 0;
+}
+
+static void fimc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static const struct vb2_ops fimc_qops = {
+ .queue_setup = fimc_queue_setup,
+ .buf_prepare = fimc_buf_prepare,
+ .buf_queue = fimc_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = stop_streaming,
+ .start_streaming = start_streaming,
+};
+
+/*
+ * V4L2 ioctl handlers
+ */
+static int fimc_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ unsigned int caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+
+ __fimc_vidioc_querycap(&fimc->pdev->dev, cap, caps);
+ return 0;
+}
+
+static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
+ f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
+
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ __fimc_get_format(frame, f);
+ return 0;
+}
+
+static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ const struct fimc_variant *variant = fimc->variant;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct fimc_fmt *fmt;
+ u32 max_w, mod_x, mod_y;
+
+ if (!IS_M2M(f->type))
+ return -EINVAL;
+
+ fmt = fimc_find_format(&pix->pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (WARN(fmt == NULL, "Pixel format lookup failed"))
+ return -EINVAL;
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+ else if (pix->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ max_w = variant->pix_limit->scaler_dis_w;
+ mod_x = ffs(variant->min_inp_pixsize) - 1;
+ } else {
+ max_w = variant->pix_limit->out_rot_dis_w;
+ mod_x = ffs(variant->min_out_pixsize) - 1;
+ }
+
+ if (tiled_fmt(fmt)) {
+ mod_x = 6; /* 64 x 32 pixels tile */
+ mod_y = 5;
+ } else {
+ if (variant->min_vsize_align == 1)
+ mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
+ else
+ mod_y = ffs(variant->min_vsize_align) - 1;
+ }
+
+ v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
+ &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
+
+ fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
+ return 0;
+}
+
+static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ return fimc_try_fmt_mplane(ctx, f);
+}
+
+static void __set_frame_format(struct fimc_frame *frame, struct fimc_fmt *fmt,
+ struct v4l2_pix_format_mplane *pixm)
+{
+ int i;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ frame->bytesperline[i] = pixm->plane_fmt[i].bytesperline;
+ frame->payload[i] = pixm->plane_fmt[i].sizeimage;
+ }
+
+ frame->f_width = pixm->width;
+ frame->f_height = pixm->height;
+ frame->o_width = pixm->width;
+ frame->o_height = pixm->height;
+ frame->width = pixm->width;
+ frame->height = pixm->height;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+ frame->fmt = fmt;
+}
+
+static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_fmt *fmt;
+ struct vb2_queue *vq;
+ struct fimc_frame *frame;
+ int ret;
+
+ ret = fimc_try_fmt_mplane(ctx, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&fimc->m2m.vfd, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+
+ fmt = fimc_find_format(&f->fmt.pix_mp.pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (!fmt)
+ return -EINVAL;
+
+ __set_frame_format(frame, fmt, &f->fmt.pix_mp);
+
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
+ return 0;
+}
+
+static int fimc_m2m_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = frame->o_width;
+ cr->bounds.height = frame->o_height;
+ cr->defrect = cr->bounds;
+
+ return 0;
+}
+
+static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->c.left = frame->offs_h;
+ cr->c.top = frame->offs_v;
+ cr->c.width = frame->width;
+ cr->c.height = frame->height;
+
+ return 0;
+}
+
+static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_frame *f;
+ u32 min_size, halign, depth = 0;
+ int i;
+
+ if (cr->c.top < 0 || cr->c.left < 0) {
+ v4l2_err(&fimc->m2m.vfd,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ f = &ctx->d_frame;
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ f = &ctx->s_frame;
+ else
+ return -EINVAL;
+
+ min_size = (f == &ctx->s_frame) ?
+ fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
+
+ /* Get pixel alignment constraints. */
+ if (fimc->variant->min_vsize_align == 1)
+ halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
+ else
+ halign = ffs(fimc->variant->min_vsize_align) - 1;
+
+ for (i = 0; i < f->fmt->memplanes; i++)
+ depth += f->fmt->depth[i];
+
+ v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
+ ffs(min_size) - 1,
+ &cr->c.height, min_size, f->o_height,
+ halign, 64/(ALIGN(depth, 8)));
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ if (cr->c.left + cr->c.width > f->o_width)
+ cr->c.left = f->o_width - cr->c.width;
+ if (cr->c.top + cr->c.height > f->o_height)
+ cr->c.top = f->o_height - cr->c.height;
+
+ cr->c.left = round_down(cr->c.left, min_size);
+ cr->c.top = round_down(cr->c.top, fimc->variant->hor_offs_align);
+
+ dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
+ cr->c.left, cr->c.top, cr->c.width, cr->c.height,
+ f->f_width, f->f_height);
+
+ return 0;
+}
+
+static int fimc_m2m_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_crop cr = *crop;
+ struct fimc_frame *f;
+ int ret;
+
+ ret = fimc_m2m_try_crop(ctx, &cr);
+ if (ret)
+ return ret;
+
+ f = (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
+ &ctx->s_frame : &ctx->d_frame;
+
+ /* Check to see if scaling ratio is within supported range */
+ if (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = fimc_check_scaler_ratio(ctx, cr.c.width,
+ cr.c.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctx->rotation);
+ } else {
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, cr.c.width,
+ cr.c.height, ctx->rotation);
+ }
+ if (ret) {
+ v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
+ return -EINVAL;
+ }
+
+ f->offs_h = cr.c.left;
+ f->offs_v = cr.c.top;
+ f->width = cr.c.width;
+ f->height = cr.c.height;
+
+ fimc_ctx_state_set(FIMC_PARAMS, ctx);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
+ .vidioc_querycap = fimc_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_crop = fimc_m2m_g_crop,
+ .vidioc_s_crop = fimc_m2m_s_crop,
+ .vidioc_cropcap = fimc_m2m_cropcap
+
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &fimc_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->fimc_dev->lock;
+ src_vq->dev = &ctx->fimc_dev->pdev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &fimc_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->fimc_dev->lock;
+ dst_vq->dev = &ctx->fimc_dev->pdev->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int fimc_m2m_set_default_format(struct fimc_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane pixm = {
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .width = 800,
+ .height = 600,
+ .plane_fmt[0] = {
+ .bytesperline = 800 * 4,
+ .sizeimage = 800 * 4 * 600,
+ },
+ };
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(&pixm.pixelformat, NULL, FMT_FLAGS_M2M, 0);
+ if (!fmt)
+ return -EINVAL;
+
+ __set_frame_format(&ctx->s_frame, fmt, &pixm);
+ __set_frame_format(&ctx->d_frame, fmt, &pixm);
+
+ return 0;
+}
+
+static int fimc_m2m_open(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx;
+ int ret = -EBUSY;
+
+ pr_debug("pid: %d, state: %#lx\n", task_pid_nr(current), fimc->state);
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+ /*
+ * Don't allow simultaneous open() of the mem-to-mem and the
+ * capture video node that belong to same FIMC IP instance.
+ */
+ if (test_bit(ST_CAPT_BUSY, &fimc->state))
+ goto unlock;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ v4l2_fh_init(&ctx->fh, &fimc->m2m.vfd);
+ ctx->fimc_dev = fimc;
+
+ /* Default color format */
+ ctx->s_frame.fmt = fimc_get_format(0);
+ ctx->d_frame.fmt = fimc_get_format(0);
+
+ ret = fimc_ctrls_create(ctx);
+ if (ret)
+ goto error_fh;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrls.handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ /* Setup the device context for memory-to-memory mode */
+ ctx->state = FIMC_CTX_M2M;
+ ctx->flags = 0;
+ ctx->in_path = FIMC_IO_DMA;
+ ctx->out_path = FIMC_IO_DMA;
+ ctx->scaler.enabled = 1;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto error_c;
+ }
+
+ if (fimc->m2m.refcnt++ == 0)
+ set_bit(ST_M2M_RUN, &fimc->state);
+
+ ret = fimc_m2m_set_default_format(ctx);
+ if (ret < 0)
+ goto error_m2m_ctx;
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+
+error_m2m_ctx:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+error_c:
+ fimc_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+error_fh:
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_m2m_release(struct file *file)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ dbg("pid: %d, state: 0x%lx, refcnt= %d",
+ task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
+
+ mutex_lock(&fimc->lock);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ fimc_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ if (--fimc->m2m.refcnt <= 0)
+ clear_bit(ST_M2M_RUN, &fimc->state);
+ kfree(ctx);
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static const struct v4l2_file_operations fimc_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_m2m_open,
+ .release = fimc_m2m_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = fimc_device_run,
+ .job_abort = fimc_job_abort,
+};
+
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev)
+{
+ struct video_device *vfd = &fimc->m2m.vfd;
+ int ret;
+
+ fimc->v4l2_dev = v4l2_dev;
+
+ memset(vfd, 0, sizeof(*vfd));
+ vfd->fops = &fimc_m2m_fops;
+ vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
+ vfd->v4l2_dev = v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &fimc->lock;
+ vfd->vfl_dir = VFL_DIR_M2M;
+
+ snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
+ video_set_drvdata(vfd, fimc);
+
+ fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(fimc->m2m.m2m_dev)) {
+ v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
+ return PTR_ERR(fimc->m2m.m2m_dev);
+ }
+
+ ret = media_entity_pads_init(&vfd->entity, 0, NULL);
+ if (ret)
+ goto err_me;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vd;
+
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+err_vd:
+ media_entity_cleanup(&vfd->entity);
+err_me:
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+ return ret;
+}
+
+void fimc_unregister_m2m_device(struct fimc_dev *fimc)
+{
+ if (!fimc)
+ return;
+
+ if (fimc->m2m.m2m_dev)
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+
+ if (video_is_registered(&fimc->m2m.vfd)) {
+ video_unregister_device(&fimc->m2m.vfd);
+ media_entity_cleanup(&fimc->m2m.vfd.entity);
+ }
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.c b/drivers/media/platform/exynos4-is/fimc-reg.c
new file mode 100644
index 000000000..080672455
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-reg.c
@@ -0,0 +1,842 @@
+/*
+ * Register interface file for Samsung Camera Interface (FIMC) driver
+ *
+ * Copyright (C) 2010 - 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+
+#include <media/drv-intf/exynos-fimc.h>
+#include "media-dev.h"
+
+#include "fimc-reg.h"
+#include "fimc-core.h"
+
+void fimc_hw_reset(struct fimc_dev *dev)
+{
+ u32 cfg;
+
+ cfg = readl(dev->regs + FIMC_REG_CISRCFMT);
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ writel(cfg, dev->regs + FIMC_REG_CISRCFMT);
+
+ /* Software reset. */
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg |= (FIMC_REG_CIGCTRL_SWRST | FIMC_REG_CIGCTRL_IRQ_LEVEL);
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+ udelay(10);
+
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg &= ~FIMC_REG_CIGCTRL_SWRST;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+
+ if (dev->drv_data->out_buf_count > 4)
+ fimc_hw_set_dma_seq(dev, 0xF);
+}
+
+static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
+{
+ u32 flip = FIMC_REG_MSCTRL_FLIP_NORMAL;
+
+ if (ctx->hflip)
+ flip = FIMC_REG_MSCTRL_FLIP_Y_MIRROR;
+ if (ctx->vflip)
+ flip = FIMC_REG_MSCTRL_FLIP_X_MIRROR;
+
+ if (ctx->rotation <= 90)
+ return flip;
+
+ return (flip ^ FIMC_REG_MSCTRL_FLIP_180) & FIMC_REG_MSCTRL_FLIP_180;
+}
+
+static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx)
+{
+ u32 flip = FIMC_REG_CITRGFMT_FLIP_NORMAL;
+
+ if (ctx->hflip)
+ flip |= FIMC_REG_CITRGFMT_FLIP_Y_MIRROR;
+ if (ctx->vflip)
+ flip |= FIMC_REG_CITRGFMT_FLIP_X_MIRROR;
+
+ if (ctx->rotation <= 90)
+ return flip;
+
+ return (flip ^ FIMC_REG_CITRGFMT_FLIP_180) & FIMC_REG_CITRGFMT_FLIP_180;
+}
+
+void fimc_hw_set_rotation(struct fimc_ctx *ctx)
+{
+ u32 cfg, flip;
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+ cfg &= ~(FIMC_REG_CITRGFMT_INROT90 | FIMC_REG_CITRGFMT_OUTROT90 |
+ FIMC_REG_CITRGFMT_FLIP_180);
+
+ /*
+ * The input and output rotator cannot work simultaneously.
+ * Use the output rotator in output DMA mode or the input rotator
+ * in direct fifo output mode.
+ */
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ if (ctx->out_path == FIMC_IO_LCDFIFO)
+ cfg |= FIMC_REG_CITRGFMT_INROT90;
+ else
+ cfg |= FIMC_REG_CITRGFMT_OUTROT90;
+ }
+
+ if (ctx->out_path == FIMC_IO_DMA) {
+ cfg |= fimc_hw_get_target_flip(ctx);
+ writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
+ } else {
+ /* LCD FIFO path */
+ flip = readl(dev->regs + FIMC_REG_MSCTRL);
+ flip &= ~FIMC_REG_MSCTRL_FLIP_MASK;
+ flip |= fimc_hw_get_in_flip(ctx);
+ writel(flip, dev->regs + FIMC_REG_MSCTRL);
+ }
+}
+
+void fimc_hw_set_target_format(struct fimc_ctx *ctx)
+{
+ u32 cfg;
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+
+ dbg("w= %d, h= %d color: %d", frame->width,
+ frame->height, frame->fmt->color);
+
+ cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+ cfg &= ~(FIMC_REG_CITRGFMT_FMT_MASK | FIMC_REG_CITRGFMT_HSIZE_MASK |
+ FIMC_REG_CITRGFMT_VSIZE_MASK);
+
+ switch (frame->fmt->color) {
+ case FIMC_FMT_RGB444...FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_CITRGFMT_RGB;
+ break;
+ case FIMC_FMT_YCBCR420:
+ cfg |= FIMC_REG_CITRGFMT_YCBCR420;
+ break;
+ case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
+ if (frame->fmt->colplanes == 1)
+ cfg |= FIMC_REG_CITRGFMT_YCBCR422_1P;
+ else
+ cfg |= FIMC_REG_CITRGFMT_YCBCR422;
+ break;
+ default:
+ break;
+ }
+
+ if (ctx->rotation == 90 || ctx->rotation == 270)
+ cfg |= (frame->height << 16) | frame->width;
+ else
+ cfg |= (frame->width << 16) | frame->height;
+
+ writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
+
+ cfg = readl(dev->regs + FIMC_REG_CITAREA);
+ cfg &= ~FIMC_REG_CITAREA_MASK;
+ cfg |= (frame->width * frame->height);
+ writel(cfg, dev->regs + FIMC_REG_CITAREA);
+}
+
+static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ cfg = (frame->f_height << 16) | frame->f_width;
+ writel(cfg, dev->regs + FIMC_REG_ORGOSIZE);
+
+ /* Select color space conversion equation (HD/SD size).*/
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ if (frame->f_width >= 1280) /* HD */
+ cfg |= FIMC_REG_CIGCTRL_CSC_ITU601_709;
+ else /* SD */
+ cfg &= ~FIMC_REG_CIGCTRL_CSC_ITU601_709;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+
+}
+
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+ struct fimc_dma_offset *offset = &frame->dma_offset;
+ struct fimc_fmt *fmt = frame->fmt;
+ u32 cfg;
+
+ /* Set the input dma offsets. */
+ cfg = (offset->y_v << 16) | offset->y_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOYOFF);
+
+ cfg = (offset->cb_v << 16) | offset->cb_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOCBOFF);
+
+ cfg = (offset->cr_v << 16) | offset->cr_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOCROFF);
+
+ fimc_hw_set_out_dma_size(ctx);
+
+ /* Configure chroma components order. */
+ cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+
+ cfg &= ~(FIMC_REG_CIOCTRL_ORDER2P_MASK |
+ FIMC_REG_CIOCTRL_ORDER422_MASK |
+ FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK |
+ FIMC_REG_CIOCTRL_RGB16FMT_MASK);
+
+ if (fmt->colplanes == 1)
+ cfg |= ctx->out_order_1p;
+ else if (fmt->colplanes == 2)
+ cfg |= ctx->out_order_2p | FIMC_REG_CIOCTRL_YCBCR_2PLANE;
+ else if (fmt->colplanes == 3)
+ cfg |= FIMC_REG_CIOCTRL_YCBCR_3PLANE;
+
+ if (fmt->color == FIMC_FMT_RGB565)
+ cfg |= FIMC_REG_CIOCTRL_RGB565;
+ else if (fmt->color == FIMC_FMT_RGB555)
+ cfg |= FIMC_REG_CIOCTRL_ARGB1555;
+ else if (fmt->color == FIMC_FMT_RGB444)
+ cfg |= FIMC_REG_CIOCTRL_ARGB4444;
+
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
+}
+
+static void fimc_hw_en_autoload(struct fimc_dev *dev, int enable)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_ORGISIZE);
+ if (enable)
+ cfg |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+ else
+ cfg &= ~FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+ writel(cfg, dev->regs + FIMC_REG_ORGISIZE);
+}
+
+void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+ if (enable)
+ cfg |= FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
+ else
+ cfg &= ~FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
+}
+
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_scaler *sc = &ctx->scaler;
+ u32 cfg, shfactor;
+
+ shfactor = 10 - (sc->hfactor + sc->vfactor);
+ cfg = shfactor << 28;
+
+ cfg |= (sc->pre_hratio << 16) | sc->pre_vratio;
+ writel(cfg, dev->regs + FIMC_REG_CISCPRERATIO);
+
+ cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
+ writel(cfg, dev->regs + FIMC_REG_CISCPREDST);
+}
+
+static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_scaler *sc = &ctx->scaler;
+ struct fimc_frame *src_frame = &ctx->s_frame;
+ struct fimc_frame *dst_frame = &ctx->d_frame;
+
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+
+ cfg &= ~(FIMC_REG_CISCCTRL_CSCR2Y_WIDE | FIMC_REG_CISCCTRL_CSCY2R_WIDE |
+ FIMC_REG_CISCCTRL_SCALEUP_H | FIMC_REG_CISCCTRL_SCALEUP_V |
+ FIMC_REG_CISCCTRL_SCALERBYPASS | FIMC_REG_CISCCTRL_ONE2ONE |
+ FIMC_REG_CISCCTRL_INRGB_FMT_MASK | FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK |
+ FIMC_REG_CISCCTRL_INTERLACE | FIMC_REG_CISCCTRL_RGB_EXT);
+
+ if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
+ cfg |= (FIMC_REG_CISCCTRL_CSCR2Y_WIDE |
+ FIMC_REG_CISCCTRL_CSCY2R_WIDE);
+
+ if (!sc->enabled)
+ cfg |= FIMC_REG_CISCCTRL_SCALERBYPASS;
+
+ if (sc->scaleup_h)
+ cfg |= FIMC_REG_CISCCTRL_SCALEUP_H;
+
+ if (sc->scaleup_v)
+ cfg |= FIMC_REG_CISCCTRL_SCALEUP_V;
+
+ if (sc->copy_mode)
+ cfg |= FIMC_REG_CISCCTRL_ONE2ONE;
+
+ if (ctx->in_path == FIMC_IO_DMA) {
+ switch (src_frame->fmt->color) {
+ case FIMC_FMT_RGB565:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB565;
+ break;
+ case FIMC_FMT_RGB666:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB666;
+ break;
+ case FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB888;
+ break;
+ }
+ }
+
+ if (ctx->out_path == FIMC_IO_DMA) {
+ u32 color = dst_frame->fmt->color;
+
+ if (color >= FIMC_FMT_RGB444 && color <= FIMC_FMT_RGB565)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565;
+ else if (color == FIMC_FMT_RGB666)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666;
+ else if (color == FIMC_FMT_RGB888)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
+ } else {
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
+
+ if (ctx->flags & FIMC_SCAN_MODE_INTERLACED)
+ cfg |= FIMC_REG_CISCCTRL_INTERLACE;
+ }
+
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ const struct fimc_variant *variant = dev->variant;
+ struct fimc_scaler *sc = &ctx->scaler;
+ u32 cfg;
+
+ dbg("main_hratio= 0x%X main_vratio= 0x%X",
+ sc->main_hratio, sc->main_vratio);
+
+ fimc_hw_set_scaler(ctx);
+
+ cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ cfg &= ~(FIMC_REG_CISCCTRL_MHRATIO_MASK |
+ FIMC_REG_CISCCTRL_MVRATIO_MASK);
+
+ if (variant->has_mainscaler_ext) {
+ cfg |= FIMC_REG_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
+ cfg |= FIMC_REG_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+
+ cfg = readl(dev->regs + FIMC_REG_CIEXTEN);
+
+ cfg &= ~(FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK |
+ FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK);
+ cfg |= FIMC_REG_CIEXTEN_MHRATIO_EXT(sc->main_hratio);
+ cfg |= FIMC_REG_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CIEXTEN);
+ } else {
+ cfg |= FIMC_REG_CISCCTRL_MHRATIO(sc->main_hratio);
+ cfg |= FIMC_REG_CISCCTRL_MVRATIO(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+ }
+}
+
+void fimc_hw_enable_capture(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ u32 cfg;
+
+ cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg |= FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE;
+
+ if (ctx->scaler.enabled)
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
+ else
+ cfg &= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
+
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
+void fimc_hw_disable_capture(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg &= ~(FIMC_REG_CIIMGCPT_IMGCPTEN |
+ FIMC_REG_CIIMGCPT_IMGCPTEN_SC);
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
+void fimc_hw_set_effect(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_effect *effect = &ctx->effect;
+ u32 cfg = 0;
+
+ if (effect->type != FIMC_REG_CIIMGEFF_FIN_BYPASS) {
+ cfg |= FIMC_REG_CIIMGEFF_IE_SC_AFTER |
+ FIMC_REG_CIIMGEFF_IE_ENABLE;
+ cfg |= effect->type;
+ if (effect->type == FIMC_REG_CIIMGEFF_FIN_ARBITRARY)
+ cfg |= (effect->pat_cb << 13) | effect->pat_cr;
+ }
+
+ writel(cfg, dev->regs + FIMC_REG_CIIMGEFF);
+}
+
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ if (!(frame->fmt->flags & FMT_HAS_ALPHA))
+ return;
+
+ cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+ cfg &= ~FIMC_REG_CIOCTRL_ALPHA_OUT_MASK;
+ cfg |= (frame->alpha << 4);
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
+}
+
+static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->s_frame;
+ u32 cfg_o = 0;
+ u32 cfg_r = 0;
+
+ if (FIMC_IO_LCDFIFO == ctx->out_path)
+ cfg_r |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+
+ cfg_o |= (frame->f_height << 16) | frame->f_width;
+ cfg_r |= (frame->height << 16) | frame->width;
+
+ writel(cfg_o, dev->regs + FIMC_REG_ORGISIZE);
+ writel(cfg_r, dev->regs + FIMC_REG_CIREAL_ISIZE);
+}
+
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->s_frame;
+ struct fimc_dma_offset *offset = &frame->dma_offset;
+ u32 cfg;
+
+ /* Set the pixel offsets. */
+ cfg = (offset->y_v << 16) | offset->y_h;
+ writel(cfg, dev->regs + FIMC_REG_CIIYOFF);
+
+ cfg = (offset->cb_v << 16) | offset->cb_h;
+ writel(cfg, dev->regs + FIMC_REG_CIICBOFF);
+
+ cfg = (offset->cr_v << 16) | offset->cr_h;
+ writel(cfg, dev->regs + FIMC_REG_CIICROFF);
+
+ /* Input original and real size. */
+ fimc_hw_set_in_dma_size(ctx);
+
+ /* Use DMA autoload only in FIFO mode. */
+ fimc_hw_en_autoload(dev, ctx->out_path == FIMC_IO_LCDFIFO);
+
+ /* Set the input DMA to process single frame only. */
+ cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ cfg &= ~(FIMC_REG_MSCTRL_INFORMAT_MASK
+ | FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK
+ | FIMC_REG_MSCTRL_INPUT_MASK
+ | FIMC_REG_MSCTRL_C_INT_IN_MASK
+ | FIMC_REG_MSCTRL_2P_IN_ORDER_MASK
+ | FIMC_REG_MSCTRL_ORDER422_MASK);
+
+ cfg |= (FIMC_REG_MSCTRL_IN_BURST_COUNT(4)
+ | FIMC_REG_MSCTRL_INPUT_MEMORY
+ | FIMC_REG_MSCTRL_FIFO_CTRL_FULL);
+
+ switch (frame->fmt->color) {
+ case FIMC_FMT_RGB565...FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_RGB;
+ break;
+ case FIMC_FMT_YCBCR420:
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR420;
+
+ if (frame->fmt->colplanes == 2)
+ cfg |= ctx->in_order_2p | FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
+ else
+ cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
+
+ break;
+ case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
+ if (frame->fmt->colplanes == 1) {
+ cfg |= ctx->in_order_1p
+ | FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P;
+ } else {
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR422;
+
+ if (frame->fmt->colplanes == 2)
+ cfg |= ctx->in_order_2p
+ | FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
+ else
+ cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
+ }
+ break;
+ default:
+ break;
+ }
+
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+
+ /* Input/output DMA linear/tiled mode. */
+ cfg = readl(dev->regs + FIMC_REG_CIDMAPARAM);
+ cfg &= ~FIMC_REG_CIDMAPARAM_TILE_MASK;
+
+ if (tiled_fmt(ctx->s_frame.fmt))
+ cfg |= FIMC_REG_CIDMAPARAM_R_64X32;
+
+ if (tiled_fmt(ctx->d_frame.fmt))
+ cfg |= FIMC_REG_CIDMAPARAM_W_64X32;
+
+ writel(cfg, dev->regs + FIMC_REG_CIDMAPARAM);
+}
+
+
+void fimc_hw_set_input_path(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ cfg &= ~FIMC_REG_MSCTRL_INPUT_MASK;
+
+ if (ctx->in_path == FIMC_IO_DMA)
+ cfg |= FIMC_REG_MSCTRL_INPUT_MEMORY;
+ else
+ cfg |= FIMC_REG_MSCTRL_INPUT_EXTCAM;
+
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+}
+
+void fimc_hw_set_output_path(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ cfg &= ~FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+ if (ctx->out_path == FIMC_IO_LCDFIFO)
+ cfg |= FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIREAL_ISIZE);
+ cfg |= FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
+
+ writel(paddr->y, dev->regs + FIMC_REG_CIIYSA(0));
+ writel(paddr->cb, dev->regs + FIMC_REG_CIICBSA(0));
+ writel(paddr->cr, dev->regs + FIMC_REG_CIICRSA(0));
+
+ cfg &= ~FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
+}
+
+void fimc_hw_set_output_addr(struct fimc_dev *dev,
+ struct fimc_addr *paddr, int index)
+{
+ int i = (index == -1) ? 0 : index;
+ do {
+ writel(paddr->y, dev->regs + FIMC_REG_CIOYSA(i));
+ writel(paddr->cb, dev->regs + FIMC_REG_CIOCBSA(i));
+ writel(paddr->cr, dev->regs + FIMC_REG_CIOCRSA(i));
+ dbg("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X",
+ i, paddr->y, paddr->cb, paddr->cr);
+ } while (index == -1 && ++i < FIMC_MAX_OUT_BUFS);
+}
+
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct fimc_source_info *cam)
+{
+ u32 cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
+
+ cfg &= ~(FIMC_REG_CIGCTRL_INVPOLPCLK | FIMC_REG_CIGCTRL_INVPOLVSYNC |
+ FIMC_REG_CIGCTRL_INVPOLHREF | FIMC_REG_CIGCTRL_INVPOLHSYNC |
+ FIMC_REG_CIGCTRL_INVPOLFIELD);
+
+ if (cam->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLPCLK;
+
+ if (cam->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLVSYNC;
+
+ if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLHREF;
+
+ if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLHSYNC;
+
+ if (cam->flags & V4L2_MBUS_FIELD_EVEN_LOW)
+ cfg |= FIMC_REG_CIGCTRL_INVPOLFIELD;
+
+ writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
+
+ return 0;
+}
+
+struct mbus_pixfmt_desc {
+ u32 pixelcode;
+ u32 cisrcfmt;
+ u16 bus_width;
+};
+
+static const struct mbus_pixfmt_desc pix_desc[] = {
+ { MEDIA_BUS_FMT_YUYV8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCBYCR, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCRYCB, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CRYCBY, 8 },
+ { MEDIA_BUS_FMT_UYVY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CBYCRY, 8 },
+};
+
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct fimc_source_info *source)
+{
+ struct fimc_vid_cap *vc = &fimc->vid_cap;
+ struct fimc_frame *f = &vc->ctx->s_frame;
+ u32 bus_width, cfg = 0;
+ int i;
+
+ switch (source->fimc_bus_type) {
+ case FIMC_BUS_TYPE_ITU_601:
+ case FIMC_BUS_TYPE_ITU_656:
+ for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
+ if (vc->ci_fmt.code == pix_desc[i].pixelcode) {
+ cfg = pix_desc[i].cisrcfmt;
+ bus_width = pix_desc[i].bus_width;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(pix_desc)) {
+ v4l2_err(&vc->ve.vdev,
+ "Camera color format not supported: %d\n",
+ vc->ci_fmt.code);
+ return -EINVAL;
+ }
+
+ if (source->fimc_bus_type == FIMC_BUS_TYPE_ITU_601) {
+ if (bus_width == 8)
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ else if (bus_width == 16)
+ cfg |= FIMC_REG_CISRCFMT_ITU601_16BIT;
+ } /* else defaults to ITU-R BT.656 8-bit */
+ break;
+ case FIMC_BUS_TYPE_MIPI_CSI2:
+ if (fimc_fmt_is_user_defined(f->fmt->color))
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ break;
+ default:
+ case FIMC_BUS_TYPE_ISP_WRITEBACK:
+ /* Anything to do here ? */
+ break;
+ }
+
+ cfg |= (f->o_width << 16) | f->o_height;
+ writel(cfg, fimc->regs + FIMC_REG_CISRCFMT);
+ return 0;
+}
+
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
+{
+ u32 hoff2, voff2;
+
+ u32 cfg = readl(fimc->regs + FIMC_REG_CIWDOFST);
+
+ cfg &= ~(FIMC_REG_CIWDOFST_HOROFF_MASK | FIMC_REG_CIWDOFST_VEROFF_MASK);
+ cfg |= FIMC_REG_CIWDOFST_OFF_EN |
+ (f->offs_h << 16) | f->offs_v;
+
+ writel(cfg, fimc->regs + FIMC_REG_CIWDOFST);
+
+ /* See CIWDOFSTn register description in the datasheet for details. */
+ hoff2 = f->o_width - f->width - f->offs_h;
+ voff2 = f->o_height - f->height - f->offs_v;
+ cfg = (hoff2 << 16) | voff2;
+ writel(cfg, fimc->regs + FIMC_REG_CIWDOFST2);
+}
+
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct fimc_source_info *source)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ u32 csis_data_alignment = 32;
+ u32 cfg, tmp;
+
+ cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
+
+ /* Select ITU B interface, disable Writeback path and test pattern. */
+ cfg &= ~(FIMC_REG_CIGCTRL_TESTPAT_MASK | FIMC_REG_CIGCTRL_SELCAM_ITU_A |
+ FIMC_REG_CIGCTRL_SELCAM_MIPI | FIMC_REG_CIGCTRL_CAMIF_SELWB |
+ FIMC_REG_CIGCTRL_SELCAM_MIPI_A | FIMC_REG_CIGCTRL_CAM_JPEG |
+ FIMC_REG_CIGCTRL_SELWB_A);
+
+ switch (source->fimc_bus_type) {
+ case FIMC_BUS_TYPE_MIPI_CSI2:
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI;
+
+ if (source->mux_id == 0)
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI_A;
+
+ /* TODO: add remaining supported formats. */
+ switch (vid_cap->ci_fmt.code) {
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT;
+ break;
+ case MEDIA_BUS_FMT_JPEG_1X8:
+ case MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8:
+ tmp = FIMC_REG_CSIIMGFMT_USER(1);
+ cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
+ break;
+ default:
+ v4l2_err(&vid_cap->ve.vdev,
+ "Not supported camera pixel format: %#x\n",
+ vid_cap->ci_fmt.code);
+ return -EINVAL;
+ }
+ tmp |= (csis_data_alignment == 32) << 8;
+
+ writel(tmp, fimc->regs + FIMC_REG_CSIIMGFMT);
+ break;
+ case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
+ if (source->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_ITU_A;
+ break;
+ case FIMC_BUS_TYPE_LCD_WRITEBACK_A:
+ cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
+ /* fall through */
+ case FIMC_BUS_TYPE_ISP_WRITEBACK:
+ if (fimc->variant->has_isp_wb)
+ cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
+ else
+ WARN_ONCE(1, "ISP Writeback input is not supported\n");
+ break;
+ default:
+ v4l2_err(&vid_cap->ve.vdev,
+ "Invalid FIMC bus type selected: %d\n",
+ source->fimc_bus_type);
+ return -EINVAL;
+ }
+ writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
+
+ return 0;
+}
+
+void fimc_hw_clear_irq(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg |= FIMC_REG_CIGCTRL_IRQ_CLR;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+}
+
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ if (on)
+ cfg |= FIMC_REG_CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~FIMC_REG_CISCCTRL_SCALERSTART;
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ if (on)
+ cfg |= FIMC_REG_MSCTRL_ENVID;
+ else
+ cfg &= ~FIMC_REG_MSCTRL_ENVID;
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+}
+
+/* Return an index to the buffer actually being written. */
+s32 fimc_hw_get_frame_index(struct fimc_dev *dev)
+{
+ s32 reg;
+
+ if (dev->drv_data->cistatus2) {
+ reg = readl(dev->regs + FIMC_REG_CISTATUS2) & 0x3f;
+ return reg - 1;
+ }
+
+ reg = readl(dev->regs + FIMC_REG_CISTATUS);
+
+ return (reg & FIMC_REG_CISTATUS_FRAMECNT_MASK) >>
+ FIMC_REG_CISTATUS_FRAMECNT_SHIFT;
+}
+
+/* Return an index to the buffer being written previously. */
+s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev)
+{
+ s32 reg;
+
+ if (!dev->drv_data->cistatus2)
+ return -1;
+
+ reg = readl(dev->regs + FIMC_REG_CISTATUS2);
+ return ((reg >> 7) & 0x3f) - 1;
+}
+
+/* Locking: the caller holds fimc->slock */
+void fimc_activate_capture(struct fimc_ctx *ctx)
+{
+ fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
+ fimc_hw_enable_capture(ctx);
+}
+
+void fimc_deactivate_capture(struct fimc_dev *fimc)
+{
+ fimc_hw_en_lastirq(fimc, true);
+ fimc_hw_disable_capture(fimc);
+ fimc_hw_enable_scaler(fimc, false);
+ fimc_hw_en_lastirq(fimc, false);
+}
+
+int fimc_hw_camblk_cfg_writeback(struct fimc_dev *fimc)
+{
+ struct regmap *map = fimc->sysreg;
+ unsigned int mask, val, camblk_cfg;
+ int ret;
+
+ if (map == NULL)
+ return 0;
+
+ ret = regmap_read(map, SYSREG_CAMBLK, &camblk_cfg);
+ if (ret < 0 || ((camblk_cfg & 0x00700000) >> 20 != 0x3))
+ return ret;
+
+ if (!WARN(fimc->id >= 3, "not supported id: %d\n", fimc->id))
+ val = 0x1 << (fimc->id + 20);
+ else
+ val = 0;
+
+ mask = SYSREG_CAMBLK_FIFORST_ISP | SYSREG_CAMBLK_ISPWB_FULL_EN;
+ ret = regmap_update_bits(map, SYSREG_CAMBLK, mask, val);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ val |= SYSREG_CAMBLK_FIFORST_ISP;
+ ret = regmap_update_bits(map, SYSREG_CAMBLK, mask, val);
+ if (ret < 0)
+ return ret;
+
+ mask = SYSREG_ISPBLK_FIFORST_CAM_BLK;
+ ret = regmap_update_bits(map, SYSREG_ISPBLK, mask, ~mask);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ return regmap_update_bits(map, SYSREG_ISPBLK, mask, mask);
+}
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.h b/drivers/media/platform/exynos4-is/fimc-reg.h
new file mode 100644
index 000000000..6c97798c7
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/fimc-reg.h
@@ -0,0 +1,338 @@
+/*
+ * Samsung camera host interface (FIMC) registers definition
+ *
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_REG_H_
+#define FIMC_REG_H_
+
+#include "fimc-core.h"
+
+/* Input source format */
+#define FIMC_REG_CISRCFMT 0x00
+#define FIMC_REG_CISRCFMT_ITU601_8BIT (1 << 31)
+#define FIMC_REG_CISRCFMT_ITU601_16BIT (1 << 29)
+#define FIMC_REG_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+
+/* Window offset */
+#define FIMC_REG_CIWDOFST 0x04
+#define FIMC_REG_CIWDOFST_OFF_EN (1 << 31)
+#define FIMC_REG_CIWDOFST_CLROVFIY (1 << 30)
+#define FIMC_REG_CIWDOFST_CLROVRLB (1 << 29)
+#define FIMC_REG_CIWDOFST_HOROFF_MASK (0x7ff << 16)
+#define FIMC_REG_CIWDOFST_CLROVFICB (1 << 15)
+#define FIMC_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FIMC_REG_CIWDOFST_VEROFF_MASK (0xfff << 0)
+
+/* Global control */
+#define FIMC_REG_CIGCTRL 0x08
+#define FIMC_REG_CIGCTRL_SWRST (1 << 31)
+#define FIMC_REG_CIGCTRL_CAMRST_A (1 << 30)
+#define FIMC_REG_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define FIMC_REG_CIGCTRL_TESTPAT_NORMAL (0 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_VER_INC (3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_MASK (3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_SHIFT 27
+#define FIMC_REG_CIGCTRL_INVPOLPCLK (1 << 26)
+#define FIMC_REG_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define FIMC_REG_CIGCTRL_INVPOLHREF (1 << 24)
+#define FIMC_REG_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define FIMC_REG_CIGCTRL_HREF_MASK (1 << 21)
+#define FIMC_REG_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define FIMC_REG_CIGCTRL_IRQ_CLR (1 << 19)
+#define FIMC_REG_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define FIMC_REG_CIGCTRL_SHDW_DISABLE (1 << 12)
+/* 0 - selects Writeback A (LCD), 1 - selects Writeback B (LCD/ISP) */
+#define FIMC_REG_CIGCTRL_SELWB_A (1 << 10)
+#define FIMC_REG_CIGCTRL_CAM_JPEG (1 << 8)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define FIMC_REG_CIGCTRL_CAMIF_SELWB (1 << 6)
+/* 0 - ITU601; 1 - ITU709 */
+#define FIMC_REG_CIGCTRL_CSC_ITU601_709 (1 << 5)
+#define FIMC_REG_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
+#define FIMC_REG_CIGCTRL_INVPOLFIELD (1 << 1)
+#define FIMC_REG_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset 2 */
+#define FIMC_REG_CIWDOFST2 0x14
+#define FIMC_REG_CIWDOFST2_HOROFF_MASK (0xfff << 16)
+#define FIMC_REG_CIWDOFST2_VEROFF_MASK (0xfff << 0)
+
+/* Output DMA Y/Cb/Cr plane start addresses */
+#define FIMC_REG_CIOYSA(n) (0x18 + (n) * 4)
+#define FIMC_REG_CIOCBSA(n) (0x28 + (n) * 4)
+#define FIMC_REG_CIOCRSA(n) (0x38 + (n) * 4)
+
+/* Target image format */
+#define FIMC_REG_CITRGFMT 0x48
+#define FIMC_REG_CITRGFMT_INROT90 (1 << 31)
+#define FIMC_REG_CITRGFMT_YCBCR420 (0 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422 (1 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422_1P (2 << 29)
+#define FIMC_REG_CITRGFMT_RGB (3 << 29)
+#define FIMC_REG_CITRGFMT_FMT_MASK (3 << 29)
+#define FIMC_REG_CITRGFMT_HSIZE_MASK (0xfff << 16)
+#define FIMC_REG_CITRGFMT_FLIP_SHIFT 14
+#define FIMC_REG_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_180 (3 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_MASK (3 << 14)
+#define FIMC_REG_CITRGFMT_OUTROT90 (1 << 13)
+#define FIMC_REG_CITRGFMT_VSIZE_MASK (0xfff << 0)
+
+/* Output DMA control */
+#define FIMC_REG_CIOCTRL 0x4c
+#define FIMC_REG_CIOCTRL_ORDER422_MASK (3 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCBYCR (0 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CRYCBY (3 << 0)
+#define FIMC_REG_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define FIMC_REG_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define FIMC_REG_CIOCTRL_ALPHA_OUT_MASK (0xff << 4)
+#define FIMC_REG_CIOCTRL_RGB16FMT_MASK (3 << 16)
+#define FIMC_REG_CIOCTRL_RGB565 (0 << 16)
+#define FIMC_REG_CIOCTRL_ARGB1555 (1 << 16)
+#define FIMC_REG_CIOCTRL_ARGB4444 (2 << 16)
+#define FIMC_REG_CIOCTRL_ORDER2P_SHIFT 24
+#define FIMC_REG_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB (0 << 24)
+
+/* Pre-scaler control 1 */
+#define FIMC_REG_CISCPRERATIO 0x50
+
+#define FIMC_REG_CISCPREDST 0x54
+
+/* Main scaler control */
+#define FIMC_REG_CISCCTRL 0x58
+#define FIMC_REG_CISCCTRL_SCALERBYPASS (1 << 31)
+#define FIMC_REG_CISCCTRL_SCALEUP_H (1 << 30)
+#define FIMC_REG_CISCCTRL_SCALEUP_V (1 << 29)
+#define FIMC_REG_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define FIMC_REG_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define FIMC_REG_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define FIMC_REG_CISCCTRL_INTERLACE (1 << 25)
+#define FIMC_REG_CISCCTRL_SCALERSTART (1 << 15)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_MASK (3 << 13)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
+#define FIMC_REG_CISCCTRL_RGB_EXT (1 << 10)
+#define FIMC_REG_CISCCTRL_ONE2ONE (1 << 9)
+#define FIMC_REG_CISCCTRL_MHRATIO(x) ((x) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO(x) ((x) << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_MASK (0x1ff << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_MASK (0x1ff << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_EXT(x) (((x) >> 6) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_EXT(x) (((x) >> 6) << 0)
+
+/* Target area */
+#define FIMC_REG_CITAREA 0x5c
+#define FIMC_REG_CITAREA_MASK 0x0fffffff
+
+/* General status */
+#define FIMC_REG_CISTATUS 0x64
+#define FIMC_REG_CISTATUS_OVFIY (1 << 31)
+#define FIMC_REG_CISTATUS_OVFICB (1 << 30)
+#define FIMC_REG_CISTATUS_OVFICR (1 << 29)
+#define FIMC_REG_CISTATUS_VSYNC (1 << 28)
+#define FIMC_REG_CISTATUS_FRAMECNT_MASK (3 << 26)
+#define FIMC_REG_CISTATUS_FRAMECNT_SHIFT 26
+#define FIMC_REG_CISTATUS_WINOFF_EN (1 << 25)
+#define FIMC_REG_CISTATUS_IMGCPT_EN (1 << 22)
+#define FIMC_REG_CISTATUS_IMGCPT_SCEN (1 << 21)
+#define FIMC_REG_CISTATUS_VSYNC_A (1 << 20)
+#define FIMC_REG_CISTATUS_VSYNC_B (1 << 19)
+#define FIMC_REG_CISTATUS_OVRLB (1 << 18)
+#define FIMC_REG_CISTATUS_FRAME_END (1 << 17)
+#define FIMC_REG_CISTATUS_LASTCAPT_END (1 << 16)
+#define FIMC_REG_CISTATUS_VVALID_A (1 << 15)
+#define FIMC_REG_CISTATUS_VVALID_B (1 << 14)
+
+/* Indexes to the last and the currently processed buffer. */
+#define FIMC_REG_CISTATUS2 0x68
+
+/* Image capture control */
+#define FIMC_REG_CIIMGCPT 0xc0
+#define FIMC_REG_CIIMGCPT_IMGCPTEN (1 << 31)
+#define FIMC_REG_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Frame capture sequence */
+#define FIMC_REG_CICPTSEQ 0xc4
+
+/* Image effect */
+#define FIMC_REG_CIIMGEFF 0xd0
+#define FIMC_REG_CIIMGEFF_IE_ENABLE (1 << 30)
+#define FIMC_REG_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define FIMC_REG_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define FIMC_REG_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_MASK (7 << 26)
+#define FIMC_REG_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | 0xff)
+
+/* Input DMA Y/Cb/Cr plane start address 0/1 */
+#define FIMC_REG_CIIYSA(n) (0xd4 + (n) * 0x70)
+#define FIMC_REG_CIICBSA(n) (0xd8 + (n) * 0x70)
+#define FIMC_REG_CIICRSA(n) (0xdc + (n) * 0x70)
+
+/* Real input DMA image size */
+#define FIMC_REG_CIREAL_ISIZE 0xf8
+#define FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN (1 << 31)
+#define FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS (1 << 30)
+
+/* Input DMA control */
+#define FIMC_REG_MSCTRL 0xfc
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK (0xf << 24)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_MASK (3 << 16)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_SHIFT 16
+#define FIMC_REG_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_MASK (1 << 15)
+#define FIMC_REG_MSCTRL_FLIP_SHIFT 13
+#define FIMC_REG_MSCTRL_FLIP_MASK (3 << 13)
+#define FIMC_REG_MSCTRL_FLIP_NORMAL (0 << 13)
+#define FIMC_REG_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define FIMC_REG_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define FIMC_REG_MSCTRL_FLIP_180 (3 << 13)
+#define FIMC_REG_MSCTRL_FIFO_CTRL_FULL (1 << 12)
+#define FIMC_REG_MSCTRL_ORDER422_SHIFT 4
+#define FIMC_REG_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_MASK (3 << 4)
+#define FIMC_REG_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MEMORY (1 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MASK (1 << 3)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P (2 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_RGB (3 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_MASK (3 << 1)
+#define FIMC_REG_MSCTRL_ENVID (1 << 0)
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT(x) ((x) << 24)
+
+/* Output DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIOYOFF 0x168
+#define FIMC_REG_CIOCBOFF 0x16c
+#define FIMC_REG_CIOCROFF 0x170
+
+/* Input DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIIYOFF 0x174
+#define FIMC_REG_CIICBOFF 0x178
+#define FIMC_REG_CIICROFF 0x17c
+
+/* Input DMA original image size */
+#define FIMC_REG_ORGISIZE 0x180
+
+/* Output DMA original image size */
+#define FIMC_REG_ORGOSIZE 0x184
+
+/* Real output DMA image size (extension register) */
+#define FIMC_REG_CIEXTEN 0x188
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT(x) (((x) & 0x3f) << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT(x) ((x) & 0x3f)
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK (0x3f << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK 0x3f
+
+#define FIMC_REG_CIDMAPARAM 0x18c
+#define FIMC_REG_CIDMAPARAM_R_LINEAR (0 << 29)
+#define FIMC_REG_CIDMAPARAM_R_64X32 (3 << 29)
+#define FIMC_REG_CIDMAPARAM_W_LINEAR (0 << 13)
+#define FIMC_REG_CIDMAPARAM_W_64X32 (3 << 13)
+#define FIMC_REG_CIDMAPARAM_TILE_MASK ((3 << 29) | (3 << 13))
+
+/* MIPI CSI image format */
+#define FIMC_REG_CSIIMGFMT 0x194
+#define FIMC_REG_CSIIMGFMT_YCBCR422_8BIT 0x1e
+#define FIMC_REG_CSIIMGFMT_RAW8 0x2a
+#define FIMC_REG_CSIIMGFMT_RAW10 0x2b
+#define FIMC_REG_CSIIMGFMT_RAW12 0x2c
+/* User defined formats. x = 0...16. */
+#define FIMC_REG_CSIIMGFMT_USER(x) (0x30 + x - 1)
+
+/* Output frame buffer sequence mask */
+#define FIMC_REG_CIFCNTSEQ 0x1fc
+
+/* SYSREG ISP Writeback register address offsets */
+#define SYSREG_ISPBLK 0x020c
+#define SYSREG_ISPBLK_FIFORST_CAM_BLK (1 << 7)
+
+#define SYSREG_CAMBLK 0x0218
+#define SYSREG_CAMBLK_FIFORST_ISP (1 << 15)
+#define SYSREG_CAMBLK_ISPWB_FULL_EN (7 << 20)
+
+/*
+ * Function declarations
+ */
+void fimc_hw_reset(struct fimc_dev *fimc);
+void fimc_hw_set_rotation(struct fimc_ctx *ctx);
+void fimc_hw_set_target_format(struct fimc_ctx *ctx);
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
+void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
+void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
+void fimc_hw_enable_capture(struct fimc_ctx *ctx);
+void fimc_hw_set_effect(struct fimc_ctx *ctx);
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
+void fimc_hw_set_input_path(struct fimc_ctx *ctx);
+void fimc_hw_set_output_path(struct fimc_ctx *ctx);
+void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
+void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
+ int index);
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct fimc_source_info *cam);
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct fimc_source_info *cam);
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct fimc_source_info *cam);
+void fimc_hw_clear_irq(struct fimc_dev *dev);
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on);
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on);
+void fimc_hw_disable_capture(struct fimc_dev *dev);
+s32 fimc_hw_get_frame_index(struct fimc_dev *dev);
+s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev);
+int fimc_hw_camblk_cfg_writeback(struct fimc_dev *fimc);
+void fimc_activate_capture(struct fimc_ctx *ctx);
+void fimc_deactivate_capture(struct fimc_dev *fimc);
+
+/**
+ * fimc_hw_set_dma_seq - configure output DMA buffer sequence
+ * @mask: bitmask for the DMA output buffer registers, set to 0 to skip buffer
+ * This function masks output DMA ring buffers, it allows to select which of
+ * the 32 available output buffer address registers will be used by the DMA
+ * engine.
+ */
+static inline void fimc_hw_set_dma_seq(struct fimc_dev *dev, u32 mask)
+{
+ writel(mask, dev->regs + FIMC_REG_CIFCNTSEQ);
+}
+
+#endif /* FIMC_REG_H_ */
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
new file mode 100644
index 000000000..3261dc72c
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -0,0 +1,1584 @@
+/*
+ * S5P/EXYNOS4 SoC series camera host interface media device driver
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+#include <media/media-device.h>
+#include <media/drv-intf/exynos-fimc.h>
+
+#include "media-dev.h"
+#include "fimc-core.h"
+#include "fimc-is.h"
+#include "fimc-lite.h"
+#include "mipi-csis.h"
+
+/* Set up image sensor subdev -> FIMC capture node notifications. */
+static void __setup_sensor_notification(struct fimc_md *fmd,
+ struct v4l2_subdev *sensor,
+ struct v4l2_subdev *fimc_sd)
+{
+ struct fimc_source_info *src_inf;
+ struct fimc_sensor_info *md_si;
+ unsigned long flags;
+
+ src_inf = v4l2_get_subdev_hostdata(sensor);
+ if (!src_inf || WARN_ON(fmd == NULL))
+ return;
+
+ md_si = source_to_sensor_info(src_inf);
+ spin_lock_irqsave(&fmd->slock, flags);
+ md_si->host = v4l2_get_subdevdata(fimc_sd);
+ spin_unlock_irqrestore(&fmd->slock, flags);
+}
+
+/**
+ * fimc_pipeline_prepare - update pipeline information with subdevice pointers
+ * @p: fimc pipeline
+ * @me: media entity terminating the pipeline
+ *
+ * Caller holds the graph mutex.
+ */
+static void fimc_pipeline_prepare(struct fimc_pipeline *p,
+ struct media_entity *me)
+{
+ struct fimc_md *fmd = entity_to_fimc_mdev(me);
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev *sensor = NULL;
+ int i;
+
+ for (i = 0; i < IDX_MAX; i++)
+ p->subdevs[i] = NULL;
+
+ while (1) {
+ struct media_pad *pad = NULL;
+
+ /* Find remote source pad */
+ for (i = 0; i < me->num_pads; i++) {
+ struct media_pad *spad = &me->pads[i];
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+ pad = media_entity_remote_pad(spad);
+ if (pad)
+ break;
+ }
+
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ switch (sd->grp_id) {
+ case GRP_ID_SENSOR:
+ sensor = sd;
+ /* fall through */
+ case GRP_ID_FIMC_IS_SENSOR:
+ p->subdevs[IDX_SENSOR] = sd;
+ break;
+ case GRP_ID_CSIS:
+ p->subdevs[IDX_CSIS] = sd;
+ break;
+ case GRP_ID_FLITE:
+ p->subdevs[IDX_FLITE] = sd;
+ break;
+ case GRP_ID_FIMC:
+ p->subdevs[IDX_FIMC] = sd;
+ break;
+ case GRP_ID_FIMC_IS:
+ p->subdevs[IDX_IS_ISP] = sd;
+ break;
+ default:
+ break;
+ }
+ me = &sd->entity;
+ if (me->num_pads == 1)
+ break;
+ }
+
+ if (sensor && p->subdevs[IDX_FIMC])
+ __setup_sensor_notification(fmd, sensor, p->subdevs[IDX_FIMC]);
+}
+
+/**
+ * __subdev_set_power - change power state of a single subdev
+ * @sd: subdevice to change power state for
+ * @on: 1 to enable power or 0 to disable
+ *
+ * Return result of s_power subdev operation or -ENXIO if sd argument
+ * is NULL. Return 0 if the subdevice does not implement s_power.
+ */
+static int __subdev_set_power(struct v4l2_subdev *sd, int on)
+{
+ int *use_count;
+ int ret;
+
+ if (sd == NULL)
+ return -ENXIO;
+
+ use_count = &sd->entity.use_count;
+ if (on && (*use_count)++ > 0)
+ return 0;
+ else if (!on && (*use_count == 0 || --(*use_count) > 0))
+ return 0;
+ ret = v4l2_subdev_call(sd, core, s_power, on);
+
+ return ret != -ENOIOCTLCMD ? ret : 0;
+}
+
+/**
+ * fimc_pipeline_s_power - change power state of all pipeline subdevs
+ * @p: fimc device terminating the pipeline
+ * @on: true to power on, false to power off
+ *
+ * Needs to be called with the graph mutex held.
+ */
+static int fimc_pipeline_s_power(struct fimc_pipeline *p, bool on)
+{
+ static const u8 seq[2][IDX_MAX - 1] = {
+ { IDX_IS_ISP, IDX_SENSOR, IDX_CSIS, IDX_FLITE },
+ { IDX_CSIS, IDX_FLITE, IDX_SENSOR, IDX_IS_ISP },
+ };
+ int i, ret = 0;
+
+ if (p->subdevs[IDX_SENSOR] == NULL)
+ return -ENXIO;
+
+ for (i = 0; i < IDX_MAX - 1; i++) {
+ unsigned int idx = seq[on][i];
+
+ ret = __subdev_set_power(p->subdevs[idx], on);
+
+
+ if (ret < 0 && ret != -ENXIO)
+ goto error;
+ }
+ return 0;
+error:
+ for (; i >= 0; i--) {
+ unsigned int idx = seq[on][i];
+ __subdev_set_power(p->subdevs[idx], !on);
+ }
+ return ret;
+}
+
+/**
+ * __fimc_pipeline_enable - enable power of all pipeline subdevs
+ * and the sensor clock
+ * @ep: video pipeline structure
+ * @fmd: fimc media device
+ *
+ * Called with the graph mutex held.
+ */
+static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
+ struct fimc_md *fmd)
+{
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+ int ret;
+
+ /* Enable PXLASYNC clock if this pipeline includes FIMC-IS */
+ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
+ ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = fimc_pipeline_s_power(p, 1);
+ if (!ret)
+ return 0;
+
+ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
+ clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
+
+ return ret;
+}
+
+/**
+ * __fimc_pipeline_open - update the pipeline information, enable power
+ * of all pipeline subdevs and the sensor clock
+ * @ep: fimc device terminating the pipeline
+ * @me: media entity to start graph walk with
+ * @prepare: true to walk the current pipeline and acquire all subdevs
+ *
+ * Called with the graph mutex held.
+ */
+static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
+ struct media_entity *me, bool prepare)
+{
+ struct fimc_md *fmd = entity_to_fimc_mdev(me);
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+ struct v4l2_subdev *sd;
+
+ if (WARN_ON(p == NULL || me == NULL))
+ return -EINVAL;
+
+ if (prepare)
+ fimc_pipeline_prepare(p, me);
+
+ sd = p->subdevs[IDX_SENSOR];
+ if (sd == NULL) {
+ pr_warn("%s(): No sensor subdev\n", __func__);
+ /*
+ * Pipeline open cannot fail so as to make it possible
+ * for the user space to configure the pipeline.
+ */
+ return 0;
+ }
+
+ return __fimc_pipeline_enable(ep, fmd);
+}
+
+/**
+ * __fimc_pipeline_close - disable the sensor clock and pipeline power
+ * @ep: fimc device terminating the pipeline
+ *
+ * Disable power of all subdevs and turn the external sensor clock off.
+ */
+static int __fimc_pipeline_close(struct exynos_media_pipeline *ep)
+{
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+ struct v4l2_subdev *sd = p ? p->subdevs[IDX_SENSOR] : NULL;
+ struct fimc_md *fmd;
+ int ret;
+
+ if (sd == NULL) {
+ pr_warn("%s(): No sensor subdev\n", __func__);
+ return 0;
+ }
+
+ ret = fimc_pipeline_s_power(p, 0);
+
+ fmd = entity_to_fimc_mdev(&sd->entity);
+
+ /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */
+ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
+ clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
+
+ return ret == -ENXIO ? 0 : ret;
+}
+
+/**
+ * __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs
+ * @ep: video pipeline structure
+ * @on: passed as the s_stream() callback argument
+ */
+static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
+{
+ static const u8 seq[2][IDX_MAX] = {
+ { IDX_FIMC, IDX_SENSOR, IDX_IS_ISP, IDX_CSIS, IDX_FLITE },
+ { IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP },
+ };
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+ struct fimc_md *fmd = entity_to_fimc_mdev(&p->subdevs[IDX_CSIS]->entity);
+ enum fimc_subdev_index sd_id;
+ int i, ret = 0;
+
+ if (p->subdevs[IDX_SENSOR] == NULL) {
+ if (!fmd->user_subdev_api) {
+ /*
+ * Sensor must be already discovered if we
+ * aren't in the user_subdev_api mode
+ */
+ return -ENODEV;
+ }
+
+ /* Get pipeline sink entity */
+ if (p->subdevs[IDX_FIMC])
+ sd_id = IDX_FIMC;
+ else if (p->subdevs[IDX_IS_ISP])
+ sd_id = IDX_IS_ISP;
+ else if (p->subdevs[IDX_FLITE])
+ sd_id = IDX_FLITE;
+ else
+ return -ENODEV;
+
+ /*
+ * Sensor could have been linked between open and STREAMON -
+ * check if this is the case.
+ */
+ fimc_pipeline_prepare(p, &p->subdevs[sd_id]->entity);
+
+ if (p->subdevs[IDX_SENSOR] == NULL)
+ return -ENODEV;
+
+ ret = __fimc_pipeline_enable(ep, fmd);
+ if (ret < 0)
+ return ret;
+
+ }
+
+ for (i = 0; i < IDX_MAX; i++) {
+ unsigned int idx = seq[on][i];
+
+ ret = v4l2_subdev_call(p->subdevs[idx], video, s_stream, on);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ goto error;
+ }
+
+ return 0;
+error:
+ fimc_pipeline_s_power(p, !on);
+ for (; i >= 0; i--) {
+ unsigned int idx = seq[on][i];
+ v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on);
+ }
+ return ret;
+}
+
+/* Media pipeline operations for the FIMC/FIMC-LITE video device driver */
+static const struct exynos_media_pipeline_ops fimc_pipeline_ops = {
+ .open = __fimc_pipeline_open,
+ .close = __fimc_pipeline_close,
+ .set_stream = __fimc_pipeline_s_stream,
+};
+
+static struct exynos_media_pipeline *fimc_md_pipeline_create(
+ struct fimc_md *fmd)
+{
+ struct fimc_pipeline *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ list_add_tail(&p->list, &fmd->pipelines);
+
+ p->ep.ops = &fimc_pipeline_ops;
+ return &p->ep;
+}
+
+static void fimc_md_pipelines_free(struct fimc_md *fmd)
+{
+ while (!list_empty(&fmd->pipelines)) {
+ struct fimc_pipeline *p;
+
+ p = list_entry(fmd->pipelines.next, typeof(*p), list);
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+/* Parse port node and register as a sub-device any sensor specified there. */
+static int fimc_md_parse_port_node(struct fimc_md *fmd,
+ struct device_node *port,
+ unsigned int index)
+{
+ struct fimc_source_info *pd = &fmd->sensor[index].pdata;
+ struct device_node *rem, *ep, *np;
+ struct v4l2_fwnode_endpoint endpoint;
+ int ret;
+
+ /* Assume here a port node can have only one endpoint node. */
+ ep = of_get_next_child(port, NULL);
+ if (!ep)
+ return 0;
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &endpoint);
+ if (ret) {
+ of_node_put(ep);
+ return ret;
+ }
+
+ if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS) {
+ of_node_put(ep);
+ return -EINVAL;
+ }
+
+ pd->mux_id = (endpoint.base.port - 1) & 0x1;
+
+ rem = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (rem == NULL) {
+ v4l2_info(&fmd->v4l2_dev, "Remote device at %pOF not found\n",
+ ep);
+ return 0;
+ }
+
+ if (fimc_input_is_parallel(endpoint.base.port)) {
+ if (endpoint.bus_type == V4L2_MBUS_PARALLEL)
+ pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_601;
+ else
+ pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_656;
+ pd->flags = endpoint.bus.parallel.flags;
+ } else if (fimc_input_is_mipi_csi(endpoint.base.port)) {
+ /*
+ * MIPI CSI-2: only input mux selection and
+ * the sensor's clock frequency is needed.
+ */
+ pd->sensor_bus_type = FIMC_BUS_TYPE_MIPI_CSI2;
+ } else {
+ v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %pOF\n",
+ endpoint.base.port, rem);
+ }
+ /*
+ * For FIMC-IS handled sensors, that are placed under i2c-isp device
+ * node, FIMC is connected to the FIMC-IS through its ISP Writeback
+ * input. Sensors are attached to the FIMC-LITE hostdata interface
+ * directly or through MIPI-CSIS, depending on the external media bus
+ * used. This needs to be handled in a more reliable way, not by just
+ * checking parent's node name.
+ */
+ np = of_get_parent(rem);
+
+ if (np && !of_node_cmp(np->name, "i2c-isp"))
+ pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK;
+ else
+ pd->fimc_bus_type = pd->sensor_bus_type;
+
+ if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor))) {
+ of_node_put(rem);
+ return -EINVAL;
+ }
+
+ fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ fmd->sensor[index].asd.match.fwnode = of_fwnode_handle(rem);
+ fmd->async_subdevs[index] = &fmd->sensor[index].asd;
+
+ fmd->num_sensors++;
+
+ of_node_put(rem);
+ return 0;
+}
+
+/* Register all SoC external sub-devices */
+static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
+{
+ struct device_node *parent = fmd->pdev->dev.of_node;
+ struct device_node *node, *ports;
+ int index = 0;
+ int ret;
+
+ /*
+ * Runtime resume one of the FIMC entities to make sure
+ * the sclk_cam clocks are not globally disabled.
+ */
+ if (!fmd->pmf)
+ return -ENXIO;
+
+ ret = pm_runtime_get_sync(fmd->pmf);
+ if (ret < 0) {
+ pm_runtime_put(fmd->pmf);
+ return ret;
+ }
+
+ fmd->num_sensors = 0;
+
+ /* Attach sensors linked to MIPI CSI-2 receivers */
+ for_each_available_child_of_node(parent, node) {
+ struct device_node *port;
+
+ if (of_node_cmp(node->name, "csis"))
+ continue;
+ /* The csis node can have only port subnode. */
+ port = of_get_next_child(node, NULL);
+ if (!port)
+ continue;
+
+ ret = fimc_md_parse_port_node(fmd, port, index);
+ of_node_put(port);
+ if (ret < 0) {
+ of_node_put(node);
+ goto rpm_put;
+ }
+ index++;
+ }
+
+ /* Attach sensors listed in the parallel-ports node */
+ ports = of_get_child_by_name(parent, "parallel-ports");
+ if (!ports)
+ goto rpm_put;
+
+ for_each_child_of_node(ports, node) {
+ ret = fimc_md_parse_port_node(fmd, node, index);
+ if (ret < 0) {
+ of_node_put(node);
+ break;
+ }
+ index++;
+ }
+rpm_put:
+ pm_runtime_put(fmd->pmf);
+ return ret;
+}
+
+static int __of_get_csis_id(struct device_node *np)
+{
+ u32 reg = 0;
+
+ np = of_get_child_by_name(np, "port");
+ if (!np)
+ return -EINVAL;
+ of_property_read_u32(np, "reg", &reg);
+ of_node_put(np);
+ return reg - FIMC_INPUT_MIPI_CSI2_0;
+}
+
+/*
+ * MIPI-CSIS, FIMC and FIMC-LITE platform devices registration.
+ */
+static int register_fimc_lite_entity(struct fimc_md *fmd,
+ struct fimc_lite *fimc_lite)
+{
+ struct v4l2_subdev *sd;
+ struct exynos_media_pipeline *ep;
+ int ret;
+
+ if (WARN_ON(fimc_lite->index >= FIMC_LITE_MAX_DEVS ||
+ fmd->fimc_lite[fimc_lite->index]))
+ return -EBUSY;
+
+ sd = &fimc_lite->subdev;
+ sd->grp_id = GRP_ID_FLITE;
+
+ ep = fimc_md_pipeline_create(fmd);
+ if (!ep)
+ return -ENOMEM;
+
+ v4l2_set_subdev_hostdata(sd, ep);
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (!ret)
+ fmd->fimc_lite[fimc_lite->index] = fimc_lite;
+ else
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.LITE%d\n",
+ fimc_lite->index);
+ return ret;
+}
+
+static int register_fimc_entity(struct fimc_md *fmd, struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd;
+ struct exynos_media_pipeline *ep;
+ int ret;
+
+ if (WARN_ON(fimc->id >= FIMC_MAX_DEVS || fmd->fimc[fimc->id]))
+ return -EBUSY;
+
+ sd = &fimc->vid_cap.subdev;
+ sd->grp_id = GRP_ID_FIMC;
+
+ ep = fimc_md_pipeline_create(fmd);
+ if (!ep)
+ return -ENOMEM;
+
+ v4l2_set_subdev_hostdata(sd, ep);
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (!ret) {
+ if (!fmd->pmf && fimc->pdev)
+ fmd->pmf = &fimc->pdev->dev;
+ fmd->fimc[fimc->id] = fimc;
+ fimc->vid_cap.user_subdev_api = fmd->user_subdev_api;
+ } else {
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
+ fimc->id, ret);
+ }
+ return ret;
+}
+
+static int register_csis_entity(struct fimc_md *fmd,
+ struct platform_device *pdev,
+ struct v4l2_subdev *sd)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int id, ret;
+
+ id = node ? __of_get_csis_id(node) : max(0, pdev->id);
+
+ if (WARN_ON(id < 0 || id >= CSIS_MAX_ENTITIES))
+ return -ENOENT;
+
+ if (WARN_ON(fmd->csis[id].sd))
+ return -EBUSY;
+
+ sd->grp_id = GRP_ID_CSIS;
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (!ret)
+ fmd->csis[id].sd = sd;
+ else
+ v4l2_err(&fmd->v4l2_dev,
+ "Failed to register MIPI-CSIS.%d (%d)\n", id, ret);
+ return ret;
+}
+
+static int register_fimc_is_entity(struct fimc_md *fmd, struct fimc_is *is)
+{
+ struct v4l2_subdev *sd = &is->isp.subdev;
+ struct exynos_media_pipeline *ep;
+ int ret;
+
+ /* Allocate pipeline object for the ISP capture video node. */
+ ep = fimc_md_pipeline_create(fmd);
+ if (!ep)
+ return -ENOMEM;
+
+ v4l2_set_subdev_hostdata(sd, ep);
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (ret) {
+ v4l2_err(&fmd->v4l2_dev,
+ "Failed to register FIMC-ISP (%d)\n", ret);
+ return ret;
+ }
+
+ fmd->fimc_is = is;
+ return 0;
+}
+
+static int fimc_md_register_platform_entity(struct fimc_md *fmd,
+ struct platform_device *pdev,
+ int plat_entity)
+{
+ struct device *dev = &pdev->dev;
+ int ret = -EPROBE_DEFER;
+ void *drvdata;
+
+ /* Lock to ensure dev->driver won't change. */
+ device_lock(dev);
+
+ if (!dev->driver || !try_module_get(dev->driver->owner))
+ goto dev_unlock;
+
+ drvdata = dev_get_drvdata(dev);
+ /* Some subdev didn't probe successfully id drvdata is NULL */
+ if (drvdata) {
+ switch (plat_entity) {
+ case IDX_FIMC:
+ ret = register_fimc_entity(fmd, drvdata);
+ break;
+ case IDX_FLITE:
+ ret = register_fimc_lite_entity(fmd, drvdata);
+ break;
+ case IDX_CSIS:
+ ret = register_csis_entity(fmd, pdev, drvdata);
+ break;
+ case IDX_IS_ISP:
+ ret = register_fimc_is_entity(fmd, drvdata);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ }
+
+ module_put(dev->driver->owner);
+dev_unlock:
+ device_unlock(dev);
+ if (ret == -EPROBE_DEFER)
+ dev_info(&fmd->pdev->dev, "deferring %s device registration\n",
+ dev_name(dev));
+ else if (ret < 0)
+ dev_err(&fmd->pdev->dev, "%s device registration failed (%d)\n",
+ dev_name(dev), ret);
+ return ret;
+}
+
+/* Register FIMC, FIMC-LITE and CSIS media entities */
+static int fimc_md_register_platform_entities(struct fimc_md *fmd,
+ struct device_node *parent)
+{
+ struct device_node *node;
+ int ret = 0;
+
+ for_each_available_child_of_node(parent, node) {
+ struct platform_device *pdev;
+ int plat_entity = -1;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ continue;
+
+ /* If driver of any entity isn't ready try all again later. */
+ if (!strcmp(node->name, CSIS_OF_NODE_NAME))
+ plat_entity = IDX_CSIS;
+ else if (!strcmp(node->name, FIMC_IS_OF_NODE_NAME))
+ plat_entity = IDX_IS_ISP;
+ else if (!strcmp(node->name, FIMC_LITE_OF_NODE_NAME))
+ plat_entity = IDX_FLITE;
+ else if (!strcmp(node->name, FIMC_OF_NODE_NAME) &&
+ !of_property_read_bool(node, "samsung,lcd-wb"))
+ plat_entity = IDX_FIMC;
+
+ if (plat_entity >= 0)
+ ret = fimc_md_register_platform_entity(fmd, pdev,
+ plat_entity);
+ put_device(&pdev->dev);
+ if (ret < 0) {
+ of_node_put(node);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void fimc_md_unregister_entities(struct fimc_md *fmd)
+{
+ int i;
+
+ for (i = 0; i < FIMC_MAX_DEVS; i++) {
+ struct fimc_dev *dev = fmd->fimc[i];
+ if (dev == NULL)
+ continue;
+ v4l2_device_unregister_subdev(&dev->vid_cap.subdev);
+ dev->vid_cap.ve.pipe = NULL;
+ fmd->fimc[i] = NULL;
+ }
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ struct fimc_lite *dev = fmd->fimc_lite[i];
+ if (dev == NULL)
+ continue;
+ v4l2_device_unregister_subdev(&dev->subdev);
+ dev->ve.pipe = NULL;
+ fmd->fimc_lite[i] = NULL;
+ }
+ for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
+ if (fmd->csis[i].sd == NULL)
+ continue;
+ v4l2_device_unregister_subdev(fmd->csis[i].sd);
+ fmd->csis[i].sd = NULL;
+ }
+
+ if (fmd->fimc_is)
+ v4l2_device_unregister_subdev(&fmd->fimc_is->isp.subdev);
+
+ v4l2_info(&fmd->v4l2_dev, "Unregistered all entities\n");
+}
+
+/**
+ * __fimc_md_create_fimc_links - create links to all FIMC entities
+ * @fmd: fimc media device
+ * @source: the source entity to create links to all fimc entities from
+ * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
+ * @pad: the source entity pad index
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
+ */
+static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
+ struct media_entity *source,
+ struct v4l2_subdev *sensor,
+ int pad, int link_mask)
+{
+ struct fimc_source_info *si = NULL;
+ struct media_entity *sink;
+ unsigned int flags = 0;
+ int i, ret = 0;
+
+ if (sensor) {
+ si = v4l2_get_subdev_hostdata(sensor);
+ /* Skip direct FIMC links in the logical FIMC-IS sensor path */
+ if (si && si->fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK)
+ ret = 1;
+ }
+
+ for (i = 0; !ret && i < FIMC_MAX_DEVS; i++) {
+ if (!fmd->fimc[i])
+ continue;
+ /*
+ * Some FIMC variants are not fitted with camera capture
+ * interface. Skip creating a link from sensor for those.
+ */
+ if (!fmd->fimc[i]->variant->has_cam_if)
+ continue;
+
+ flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
+
+ sink = &fmd->fimc[i]->vid_cap.subdev.entity;
+ ret = media_create_pad_link(source, pad, sink,
+ FIMC_SD_PAD_SINK_CAM, flags);
+ if (ret)
+ return ret;
+
+ /* Notify FIMC capture subdev entity */
+ ret = media_entity_call(sink, link_setup, &sink->pads[0],
+ &source->pads[pad], flags);
+ if (ret)
+ break;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n",
+ source->name, flags ? '=' : '-', sink->name);
+ }
+
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ if (!fmd->fimc_lite[i])
+ continue;
+
+ sink = &fmd->fimc_lite[i]->subdev.entity;
+ ret = media_create_pad_link(source, pad, sink,
+ FLITE_SD_PAD_SINK, 0);
+ if (ret)
+ return ret;
+
+ /* Notify FIMC-LITE subdev entity */
+ ret = media_entity_call(sink, link_setup, &sink->pads[0],
+ &source->pads[pad], 0);
+ if (ret)
+ break;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] -> [%s]\n",
+ source->name, sink->name);
+ }
+ return 0;
+}
+
+/* Create links from FIMC-LITE source pads to other entities */
+static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
+{
+ struct media_entity *source, *sink;
+ int i, ret = 0;
+
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ struct fimc_lite *fimc = fmd->fimc_lite[i];
+
+ if (fimc == NULL)
+ continue;
+
+ source = &fimc->subdev.entity;
+ sink = &fimc->ve.vdev.entity;
+ /* FIMC-LITE's subdev and video node */
+ ret = media_create_pad_link(source, FLITE_SD_PAD_SOURCE_DMA,
+ sink, 0, 0);
+ if (ret)
+ break;
+ /* Link from FIMC-LITE to IS-ISP subdev */
+ sink = &fmd->fimc_is->isp.subdev.entity;
+ ret = media_create_pad_link(source, FLITE_SD_PAD_SOURCE_ISP,
+ sink, 0, 0);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* Create FIMC-IS links */
+static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd)
+{
+ struct fimc_isp *isp = &fmd->fimc_is->isp;
+ struct media_entity *source, *sink;
+ int i, ret;
+
+ source = &isp->subdev.entity;
+
+ for (i = 0; i < FIMC_MAX_DEVS; i++) {
+ if (fmd->fimc[i] == NULL)
+ continue;
+
+ /* Link from FIMC-IS-ISP subdev to FIMC */
+ sink = &fmd->fimc[i]->vid_cap.subdev.entity;
+ ret = media_create_pad_link(source, FIMC_ISP_SD_PAD_SRC_FIFO,
+ sink, FIMC_SD_PAD_SINK_FIFO, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Link from FIMC-IS-ISP subdev to fimc-is-isp.capture video node */
+ sink = &isp->video_capture.ve.vdev.entity;
+
+ /* Skip this link if the fimc-is-isp video node driver isn't built-in */
+ if (sink->num_pads == 0)
+ return 0;
+
+ return media_create_pad_link(source, FIMC_ISP_SD_PAD_SRC_DMA,
+ sink, 0, 0);
+}
+
+/**
+ * fimc_md_create_links - create default links between registered entities
+ * @fmd: fimc media device
+ *
+ * Parallel interface sensor entities are connected directly to FIMC capture
+ * entities. The sensors using MIPI CSIS bus are connected through immutable
+ * link with CSI receiver entity specified by mux_id. Any registered CSIS
+ * entity has a link to each registered FIMC capture entity. Enabled links
+ * are created by default between each subsequent registered sensor and
+ * subsequent FIMC capture entity. The number of default active links is
+ * determined by the number of available sensors or FIMC entities,
+ * whichever is less.
+ */
+static int fimc_md_create_links(struct fimc_md *fmd)
+{
+ struct v4l2_subdev *csi_sensors[CSIS_MAX_ENTITIES] = { NULL };
+ struct v4l2_subdev *sensor, *csis;
+ struct fimc_source_info *pdata;
+ struct media_entity *source, *sink;
+ int i, pad, fimc_id = 0, ret = 0;
+ u32 flags, link_mask = 0;
+
+ for (i = 0; i < fmd->num_sensors; i++) {
+ if (fmd->sensor[i].subdev == NULL)
+ continue;
+
+ sensor = fmd->sensor[i].subdev;
+ pdata = v4l2_get_subdev_hostdata(sensor);
+ if (!pdata)
+ continue;
+
+ source = NULL;
+
+ switch (pdata->sensor_bus_type) {
+ case FIMC_BUS_TYPE_MIPI_CSI2:
+ if (WARN(pdata->mux_id >= CSIS_MAX_ENTITIES,
+ "Wrong CSI channel id: %d\n", pdata->mux_id))
+ return -EINVAL;
+
+ csis = fmd->csis[pdata->mux_id].sd;
+ if (WARN(csis == NULL,
+ "MIPI-CSI interface specified but s5p-csis module is not loaded!\n"))
+ return -EINVAL;
+
+ pad = sensor->entity.num_pads - 1;
+ ret = media_create_pad_link(&sensor->entity, pad,
+ &csis->entity, CSIS_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]\n",
+ sensor->entity.name, csis->entity.name);
+
+ source = NULL;
+ csi_sensors[pdata->mux_id] = sensor;
+ break;
+
+ case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
+ source = &sensor->entity;
+ pad = 0;
+ break;
+
+ default:
+ v4l2_err(&fmd->v4l2_dev, "Wrong bus_type: %x\n",
+ pdata->sensor_bus_type);
+ return -EINVAL;
+ }
+ if (source == NULL)
+ continue;
+
+ link_mask = 1 << fimc_id++;
+ ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
+ pad, link_mask);
+ }
+
+ for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
+ if (fmd->csis[i].sd == NULL)
+ continue;
+
+ source = &fmd->csis[i].sd->entity;
+ pad = CSIS_PAD_SOURCE;
+ sensor = csi_sensors[i];
+
+ link_mask = 1 << fimc_id++;
+ ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
+ pad, link_mask);
+ }
+
+ /* Create immutable links between each FIMC's subdev and video node */
+ flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
+ for (i = 0; i < FIMC_MAX_DEVS; i++) {
+ if (!fmd->fimc[i])
+ continue;
+
+ source = &fmd->fimc[i]->vid_cap.subdev.entity;
+ sink = &fmd->fimc[i]->vid_cap.ve.vdev.entity;
+
+ ret = media_create_pad_link(source, FIMC_SD_PAD_SOURCE,
+ sink, 0, flags);
+ if (ret)
+ break;
+ }
+
+ ret = __fimc_md_create_flite_source_links(fmd);
+ if (ret < 0)
+ return ret;
+
+ if (fmd->use_isp)
+ ret = __fimc_md_create_fimc_is_links(fmd);
+
+ return ret;
+}
+
+/*
+ * The peripheral sensor and CAM_BLK (PIXELASYNCMx) clocks management.
+ */
+static void fimc_md_put_clocks(struct fimc_md *fmd)
+{
+ int i = FIMC_MAX_CAMCLKS;
+
+ while (--i >= 0) {
+ if (IS_ERR(fmd->camclk[i].clock))
+ continue;
+ clk_put(fmd->camclk[i].clock);
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+ }
+
+ /* Writeback (PIXELASYNCMx) clocks */
+ for (i = 0; i < FIMC_MAX_WBCLKS; i++) {
+ if (IS_ERR(fmd->wbclk[i]))
+ continue;
+ clk_put(fmd->wbclk[i]);
+ fmd->wbclk[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int fimc_md_get_clocks(struct fimc_md *fmd)
+{
+ struct device *dev = &fmd->pdev->dev;
+ char clk_name[32];
+ struct clk *clock;
+ int i, ret = 0;
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++)
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
+ snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i);
+ clock = clk_get(dev, clk_name);
+
+ if (IS_ERR(clock)) {
+ dev_err(dev, "Failed to get clock: %s\n", clk_name);
+ ret = PTR_ERR(clock);
+ break;
+ }
+ fmd->camclk[i].clock = clock;
+ }
+ if (ret)
+ fimc_md_put_clocks(fmd);
+
+ if (!fmd->use_isp)
+ return 0;
+ /*
+ * For now get only PIXELASYNCM1 clock (Writeback B/ISP),
+ * leave PIXELASYNCM0 out for the LCD Writeback driver.
+ */
+ fmd->wbclk[CLK_IDX_WB_A] = ERR_PTR(-EINVAL);
+
+ for (i = CLK_IDX_WB_B; i < FIMC_MAX_WBCLKS; i++) {
+ snprintf(clk_name, sizeof(clk_name), "pxl_async%u", i);
+ clock = clk_get(dev, clk_name);
+ if (IS_ERR(clock)) {
+ v4l2_err(&fmd->v4l2_dev, "Failed to get clock: %s\n",
+ clk_name);
+ ret = PTR_ERR(clock);
+ break;
+ }
+ fmd->wbclk[i] = clock;
+ }
+ if (ret)
+ fimc_md_put_clocks(fmd);
+
+ return ret;
+}
+
+static int __fimc_md_modify_pipeline(struct media_entity *entity, bool enable)
+{
+ struct exynos_video_entity *ve;
+ struct fimc_pipeline *p;
+ struct video_device *vdev;
+ int ret;
+
+ vdev = media_entity_to_video_device(entity);
+ if (vdev->entity.use_count == 0)
+ return 0;
+
+ ve = vdev_to_exynos_video_entity(vdev);
+ p = to_fimc_pipeline(ve->pipe);
+ /*
+ * Nothing to do if we are disabling the pipeline, some link
+ * has been disconnected and p->subdevs array is cleared now.
+ */
+ if (!enable && p->subdevs[IDX_SENSOR] == NULL)
+ return 0;
+
+ if (enable)
+ ret = __fimc_pipeline_open(ve->pipe, entity, true);
+ else
+ ret = __fimc_pipeline_close(ve->pipe);
+
+ if (ret == 0 && !enable)
+ memset(p->subdevs, 0, sizeof(p->subdevs));
+
+ return ret;
+}
+
+/* Locking: called with entity->graph_obj.mdev->graph_mutex mutex held. */
+static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable,
+ struct media_graph *graph)
+{
+ struct media_entity *entity_err = entity;
+ int ret;
+
+ /*
+ * Walk current graph and call the pipeline open/close routine for each
+ * opened video node that belongs to the graph of entities connected
+ * through active links. This is needed as we cannot power on/off the
+ * subdevs in random order.
+ */
+ media_graph_walk_start(graph, entity);
+
+ while ((entity = media_graph_walk_next(graph))) {
+ if (!is_media_entity_v4l2_video_device(entity))
+ continue;
+
+ ret = __fimc_md_modify_pipeline(entity, enable);
+
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ media_graph_walk_start(graph, entity_err);
+
+ while ((entity_err = media_graph_walk_next(graph))) {
+ if (!is_media_entity_v4l2_video_device(entity_err))
+ continue;
+
+ __fimc_md_modify_pipeline(entity_err, !enable);
+
+ if (entity_err == entity)
+ break;
+ }
+
+ return ret;
+}
+
+static int fimc_md_link_notify(struct media_link *link, unsigned int flags,
+ unsigned int notification)
+{
+ struct media_graph *graph =
+ &container_of(link->graph_obj.mdev, struct fimc_md,
+ media_dev)->link_setup_graph;
+ struct media_entity *sink = link->sink->entity;
+ int ret = 0;
+
+ /* Before link disconnection */
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH) {
+ ret = media_graph_walk_init(graph,
+ link->graph_obj.mdev);
+ if (ret)
+ return ret;
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ ret = __fimc_md_modify_pipelines(sink, false, graph);
+#if 0
+ else
+ /* TODO: Link state change validation */
+#endif
+ /* After link activation */
+ } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH) {
+ if (link->flags & MEDIA_LNK_FL_ENABLED)
+ ret = __fimc_md_modify_pipelines(sink, true, graph);
+ media_graph_walk_cleanup(graph);
+ }
+
+ return ret ? -EPIPE : 0;
+}
+
+static const struct media_device_ops fimc_md_ops = {
+ .link_notify = fimc_md_link_notify,
+};
+
+static ssize_t fimc_md_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fimc_md *fmd = dev_get_drvdata(dev);
+
+ if (fmd->user_subdev_api)
+ return strlcpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE);
+
+ return strlcpy(buf, "V4L2 video node only API (vid-dev)\n", PAGE_SIZE);
+}
+
+static ssize_t fimc_md_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fimc_md *fmd = dev_get_drvdata(dev);
+ bool subdev_api;
+ int i;
+
+ if (!strcmp(buf, "vid-dev\n"))
+ subdev_api = false;
+ else if (!strcmp(buf, "sub-dev\n"))
+ subdev_api = true;
+ else
+ return count;
+
+ fmd->user_subdev_api = subdev_api;
+ for (i = 0; i < FIMC_MAX_DEVS; i++)
+ if (fmd->fimc[i])
+ fmd->fimc[i]->vid_cap.user_subdev_api = subdev_api;
+ return count;
+}
+/*
+ * This device attribute is to select video pipeline configuration method.
+ * There are following valid values:
+ * vid-dev - for V4L2 video node API only, subdevice will be configured
+ * by the host driver.
+ * sub-dev - for media controller API, subdevs must be configured in user
+ * space before starting streaming.
+ */
+static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
+ fimc_md_sysfs_show, fimc_md_sysfs_store);
+
+static int fimc_md_get_pinctrl(struct fimc_md *fmd)
+{
+ struct device *dev = &fmd->pdev->dev;
+ struct fimc_pinctrl *pctl = &fmd->pinctl;
+
+ pctl->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(pctl->pinctrl))
+ return PTR_ERR(pctl->pinctrl);
+
+ pctl->state_default = pinctrl_lookup_state(pctl->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(pctl->state_default))
+ return PTR_ERR(pctl->state_default);
+
+ /* PINCTRL_STATE_IDLE is optional */
+ pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
+ PINCTRL_STATE_IDLE);
+ return 0;
+}
+
+static int cam_clk_prepare(struct clk_hw *hw)
+{
+ struct cam_clk *camclk = to_cam_clk(hw);
+ int ret;
+
+ if (camclk->fmd->pmf == NULL)
+ return -ENODEV;
+
+ ret = pm_runtime_get_sync(camclk->fmd->pmf);
+ return ret < 0 ? ret : 0;
+}
+
+static void cam_clk_unprepare(struct clk_hw *hw)
+{
+ struct cam_clk *camclk = to_cam_clk(hw);
+
+ if (camclk->fmd->pmf == NULL)
+ return;
+
+ pm_runtime_put_sync(camclk->fmd->pmf);
+}
+
+static const struct clk_ops cam_clk_ops = {
+ .prepare = cam_clk_prepare,
+ .unprepare = cam_clk_unprepare,
+};
+
+static void fimc_md_unregister_clk_provider(struct fimc_md *fmd)
+{
+ struct cam_clk_provider *cp = &fmd->clk_provider;
+ unsigned int i;
+
+ if (cp->of_node)
+ of_clk_del_provider(cp->of_node);
+
+ for (i = 0; i < cp->num_clocks; i++)
+ clk_unregister(cp->clks[i]);
+}
+
+static int fimc_md_register_clk_provider(struct fimc_md *fmd)
+{
+ struct cam_clk_provider *cp = &fmd->clk_provider;
+ struct device *dev = &fmd->pdev->dev;
+ int i, ret;
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
+ struct cam_clk *camclk = &cp->camclk[i];
+ struct clk_init_data init;
+ const char *p_name;
+
+ ret = of_property_read_string_index(dev->of_node,
+ "clock-output-names", i, &init.name);
+ if (ret < 0)
+ break;
+
+ p_name = __clk_get_name(fmd->camclk[i].clock);
+
+ /* It's safe since clk_register() will duplicate the string. */
+ init.parent_names = &p_name;
+ init.num_parents = 1;
+ init.ops = &cam_clk_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ camclk->hw.init = &init;
+ camclk->fmd = fmd;
+
+ cp->clks[i] = clk_register(NULL, &camclk->hw);
+ if (IS_ERR(cp->clks[i])) {
+ dev_err(dev, "failed to register clock: %s (%ld)\n",
+ init.name, PTR_ERR(cp->clks[i]));
+ ret = PTR_ERR(cp->clks[i]);
+ goto err;
+ }
+ cp->num_clocks++;
+ }
+
+ if (cp->num_clocks == 0) {
+ dev_warn(dev, "clk provider not registered\n");
+ return 0;
+ }
+
+ cp->clk_data.clks = cp->clks;
+ cp->clk_data.clk_num = cp->num_clocks;
+ cp->of_node = dev->of_node;
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
+ &cp->clk_data);
+ if (ret == 0)
+ return 0;
+err:
+ fimc_md_unregister_clk_provider(fmd);
+ return ret;
+}
+
+static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct fimc_md *fmd = notifier_to_fimc_md(notifier);
+ struct fimc_sensor_info *si = NULL;
+ int i;
+
+ /* Find platform data for this sensor subdev */
+ for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++)
+ if (fmd->sensor[i].asd.match.fwnode ==
+ of_fwnode_handle(subdev->dev->of_node))
+ si = &fmd->sensor[i];
+
+ if (si == NULL)
+ return -EINVAL;
+
+ v4l2_set_subdev_hostdata(subdev, &si->pdata);
+
+ if (si->pdata.fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK)
+ subdev->grp_id = GRP_ID_FIMC_IS_SENSOR;
+ else
+ subdev->grp_id = GRP_ID_SENSOR;
+
+ si->subdev = subdev;
+
+ v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice: %s (%d)\n",
+ subdev->name, fmd->num_sensors);
+
+ fmd->num_sensors++;
+
+ return 0;
+}
+
+static int subdev_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct fimc_md *fmd = notifier_to_fimc_md(notifier);
+ int ret;
+
+ mutex_lock(&fmd->media_dev.graph_mutex);
+
+ ret = fimc_md_create_links(fmd);
+ if (ret < 0)
+ goto unlock;
+
+ ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev);
+unlock:
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+ if (ret < 0)
+ return ret;
+
+ return media_device_register(&fmd->media_dev);
+}
+
+static const struct v4l2_async_notifier_operations subdev_notifier_ops = {
+ .bound = subdev_notifier_bound,
+ .complete = subdev_notifier_complete,
+};
+
+static int fimc_md_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct v4l2_device *v4l2_dev;
+ struct fimc_md *fmd;
+ int ret;
+
+ fmd = devm_kzalloc(dev, sizeof(*fmd), GFP_KERNEL);
+ if (!fmd)
+ return -ENOMEM;
+
+ spin_lock_init(&fmd->slock);
+ INIT_LIST_HEAD(&fmd->pipelines);
+ fmd->pdev = pdev;
+
+ strlcpy(fmd->media_dev.model, "SAMSUNG S5P FIMC",
+ sizeof(fmd->media_dev.model));
+ fmd->media_dev.ops = &fimc_md_ops;
+ fmd->media_dev.dev = dev;
+
+ v4l2_dev = &fmd->v4l2_dev;
+ v4l2_dev->mdev = &fmd->media_dev;
+ v4l2_dev->notify = fimc_sensor_notify;
+ strlcpy(v4l2_dev->name, "s5p-fimc-md", sizeof(v4l2_dev->name));
+
+ fmd->use_isp = fimc_md_is_isp_available(dev->of_node);
+ fmd->user_subdev_api = true;
+
+ media_device_init(&fmd->media_dev);
+
+ ret = v4l2_device_register(dev, &fmd->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
+ return ret;
+ }
+
+ ret = fimc_md_get_clocks(fmd);
+ if (ret)
+ goto err_md;
+
+ ret = fimc_md_get_pinctrl(fmd);
+ if (ret < 0) {
+ if (ret != EPROBE_DEFER)
+ dev_err(dev, "Failed to get pinctrl: %d\n", ret);
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, fmd);
+
+ ret = fimc_md_register_platform_entities(fmd, dev->of_node);
+ if (ret)
+ goto err_clk;
+
+ ret = fimc_md_register_sensor_entities(fmd);
+ if (ret)
+ goto err_m_ent;
+
+ ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+ if (ret)
+ goto err_m_ent;
+ /*
+ * FIMC platform devices need to be registered before the sclk_cam
+ * clocks provider, as one of these devices needs to be activated
+ * to enable the clock.
+ */
+ ret = fimc_md_register_clk_provider(fmd);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "clock provider registration failed\n");
+ goto err_attr;
+ }
+
+ if (fmd->num_sensors > 0) {
+ fmd->subdev_notifier.subdevs = fmd->async_subdevs;
+ fmd->subdev_notifier.num_subdevs = fmd->num_sensors;
+ fmd->subdev_notifier.ops = &subdev_notifier_ops;
+ fmd->num_sensors = 0;
+
+ ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
+ &fmd->subdev_notifier);
+ if (ret)
+ goto err_clk_p;
+ }
+
+ return 0;
+
+err_clk_p:
+ fimc_md_unregister_clk_provider(fmd);
+err_attr:
+ device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+err_clk:
+ fimc_md_put_clocks(fmd);
+err_m_ent:
+ fimc_md_unregister_entities(fmd);
+err_md:
+ media_device_cleanup(&fmd->media_dev);
+ v4l2_device_unregister(&fmd->v4l2_dev);
+ return ret;
+}
+
+static int fimc_md_remove(struct platform_device *pdev)
+{
+ struct fimc_md *fmd = platform_get_drvdata(pdev);
+
+ if (!fmd)
+ return 0;
+
+ fimc_md_unregister_clk_provider(fmd);
+ v4l2_async_notifier_unregister(&fmd->subdev_notifier);
+
+ v4l2_device_unregister(&fmd->v4l2_dev);
+ device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+ fimc_md_unregister_entities(fmd);
+ fimc_md_pipelines_free(fmd);
+ media_device_unregister(&fmd->media_dev);
+ media_device_cleanup(&fmd->media_dev);
+ fimc_md_put_clocks(fmd);
+
+ return 0;
+}
+
+static const struct platform_device_id fimc_driver_ids[] __always_unused = {
+ { .name = "s5p-fimc-md" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct of_device_id fimc_md_of_match[] = {
+ { .compatible = "samsung,fimc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, fimc_md_of_match);
+
+static struct platform_driver fimc_md_driver = {
+ .probe = fimc_md_probe,
+ .remove = fimc_md_remove,
+ .driver = {
+ .of_match_table = of_match_ptr(fimc_md_of_match),
+ .name = "s5p-fimc-md",
+ }
+};
+
+static int __init fimc_md_init(void)
+{
+ int ret;
+
+ request_module("s5p-csis");
+ ret = fimc_register_driver();
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&fimc_md_driver);
+}
+
+static void __exit fimc_md_exit(void)
+{
+ platform_driver_unregister(&fimc_md_driver);
+ fimc_unregister_driver();
+}
+
+module_init(fimc_md_init);
+module_exit(fimc_md_exit);
+
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.0.1");
diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h
new file mode 100644
index 000000000..957787a2f
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/media-dev.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_MDEVICE_H_
+#define FIMC_MDEVICE_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/drv-intf/exynos-fimc.h>
+
+#include "fimc-core.h"
+#include "fimc-lite.h"
+#include "mipi-csis.h"
+
+#define FIMC_OF_NODE_NAME "fimc"
+#define FIMC_LITE_OF_NODE_NAME "fimc-lite"
+#define FIMC_IS_OF_NODE_NAME "fimc-is"
+#define CSIS_OF_NODE_NAME "csis"
+
+#define PINCTRL_STATE_IDLE "idle"
+
+#define FIMC_MAX_SENSORS 4
+#define FIMC_MAX_CAMCLKS 2
+#define DEFAULT_SENSOR_CLK_FREQ 24000000U
+
+/* LCD/ISP Writeback clocks (PIXELASYNCMx) */
+enum {
+ CLK_IDX_WB_A,
+ CLK_IDX_WB_B,
+ FIMC_MAX_WBCLKS
+};
+
+enum fimc_subdev_index {
+ IDX_SENSOR,
+ IDX_CSIS,
+ IDX_FLITE,
+ IDX_IS_ISP,
+ IDX_FIMC,
+ IDX_MAX,
+};
+
+/*
+ * This structure represents a chain of media entities, including a data
+ * source entity (e.g. an image sensor subdevice), a data capture entity
+ * - a video capture device node and any remaining entities.
+ */
+struct fimc_pipeline {
+ struct exynos_media_pipeline ep;
+ struct list_head list;
+ struct media_entity *vdev_entity;
+ struct v4l2_subdev *subdevs[IDX_MAX];
+};
+
+#define to_fimc_pipeline(_ep) container_of(_ep, struct fimc_pipeline, ep)
+
+struct fimc_csis_info {
+ struct v4l2_subdev *sd;
+ int id;
+};
+
+struct fimc_camclk_info {
+ struct clk *clock;
+ int use_count;
+ unsigned long frequency;
+};
+
+/**
+ * struct fimc_sensor_info - image data source subdev information
+ * @pdata: sensor's atrributes passed as media device's platform data
+ * @asd: asynchronous subdev registration data structure
+ * @subdev: image sensor v4l2 subdev
+ * @host: fimc device the sensor is currently linked to
+ *
+ * This data structure applies to image sensor and the writeback subdevs.
+ */
+struct fimc_sensor_info {
+ struct fimc_source_info pdata;
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+ struct fimc_dev *host;
+};
+
+struct cam_clk {
+ struct clk_hw hw;
+ struct fimc_md *fmd;
+};
+#define to_cam_clk(_hw) container_of(_hw, struct cam_clk, hw)
+
+/**
+ * struct fimc_md - fimc media device information
+ * @csis: MIPI CSIS subdevs data
+ * @sensor: array of registered sensor subdevs
+ * @num_sensors: actual number of registered sensors
+ * @camclk: external sensor clock information
+ * @fimc: array of registered fimc devices
+ * @fimc_is: fimc-is data structure
+ * @use_isp: set to true when FIMC-IS subsystem is used
+ * @pmf: handle to the CAMCLK clock control FIMC helper device
+ * @media_dev: top level media device
+ * @v4l2_dev: top level v4l2_device holding up the subdevs
+ * @pdev: platform device this media device is hooked up into
+ * @pinctrl: camera port pinctrl handle
+ * @state_default: pinctrl default state handle
+ * @state_idle: pinctrl idle state handle
+ * @cam_clk_provider: CAMCLK clock provider structure
+ * @user_subdev_api: true if subdevs are not configured by the host driver
+ * @slock: spinlock protecting @sensor array
+ */
+struct fimc_md {
+ struct fimc_csis_info csis[CSIS_MAX_ENTITIES];
+ struct fimc_sensor_info sensor[FIMC_MAX_SENSORS];
+ int num_sensors;
+ struct fimc_camclk_info camclk[FIMC_MAX_CAMCLKS];
+ struct clk *wbclk[FIMC_MAX_WBCLKS];
+ struct fimc_lite *fimc_lite[FIMC_LITE_MAX_DEVS];
+ struct fimc_dev *fimc[FIMC_MAX_DEVS];
+ struct fimc_is *fimc_is;
+ bool use_isp;
+ struct device *pmf;
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct platform_device *pdev;
+
+ struct fimc_pinctrl {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *state_default;
+ struct pinctrl_state *state_idle;
+ } pinctl;
+
+ struct cam_clk_provider {
+ struct clk *clks[FIMC_MAX_CAMCLKS];
+ struct clk_onecell_data clk_data;
+ struct device_node *of_node;
+ struct cam_clk camclk[FIMC_MAX_CAMCLKS];
+ int num_clocks;
+ } clk_provider;
+
+ struct v4l2_async_notifier subdev_notifier;
+ struct v4l2_async_subdev *async_subdevs[FIMC_MAX_SENSORS];
+
+ bool user_subdev_api;
+ spinlock_t slock;
+ struct list_head pipelines;
+ struct media_graph link_setup_graph;
+};
+
+static inline
+struct fimc_sensor_info *source_to_sensor_info(struct fimc_source_info *si)
+{
+ return container_of(si, struct fimc_sensor_info, pdata);
+}
+
+static inline struct fimc_md *entity_to_fimc_mdev(struct media_entity *me)
+{
+ return me->graph_obj.mdev == NULL ? NULL :
+ container_of(me->graph_obj.mdev, struct fimc_md, media_dev);
+}
+
+static inline struct fimc_md *notifier_to_fimc_md(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct fimc_md, subdev_notifier);
+}
+
+static inline void fimc_md_graph_lock(struct exynos_video_entity *ve)
+{
+ mutex_lock(&ve->vdev.entity.graph_obj.mdev->graph_mutex);
+}
+
+static inline void fimc_md_graph_unlock(struct exynos_video_entity *ve)
+{
+ mutex_unlock(&ve->vdev.entity.graph_obj.mdev->graph_mutex);
+}
+
+int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on);
+
+#ifdef CONFIG_OF
+static inline bool fimc_md_is_isp_available(struct device_node *node)
+{
+ node = of_get_child_by_name(node, FIMC_IS_OF_NODE_NAME);
+ return node ? of_device_is_available(node) : false;
+}
+#else
+#define fimc_md_is_isp_available(node) (false)
+#endif /* CONFIG_OF */
+
+static inline struct v4l2_subdev *__fimc_md_get_subdev(
+ struct exynos_media_pipeline *ep,
+ unsigned int index)
+{
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+
+ if (!p || index >= IDX_MAX)
+ return NULL;
+ else
+ return p->subdevs[index];
+}
+
+#endif
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
new file mode 100644
index 000000000..efab3ebc6
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -0,0 +1,1043 @@
+/*
+ * Samsung S5P/EXYNOS SoC series MIPI-CSI receiver driver
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <media/drv-intf/exynos-fimc.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#include "mipi-csis.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* Register map definition */
+
+/* CSIS global control */
+#define S5PCSIS_CTRL 0x00
+#define S5PCSIS_CTRL_DPDN_DEFAULT (0 << 31)
+#define S5PCSIS_CTRL_DPDN_SWAP (1 << 31)
+#define S5PCSIS_CTRL_ALIGN_32BIT (1 << 20)
+#define S5PCSIS_CTRL_UPDATE_SHADOW (1 << 16)
+#define S5PCSIS_CTRL_WCLK_EXTCLK (1 << 8)
+#define S5PCSIS_CTRL_RESET (1 << 4)
+#define S5PCSIS_CTRL_ENABLE (1 << 0)
+
+/* D-PHY control */
+#define S5PCSIS_DPHYCTRL 0x04
+#define S5PCSIS_DPHYCTRL_HSS_MASK (0x1f << 27)
+#define S5PCSIS_DPHYCTRL_ENABLE (0x1f << 0)
+
+#define S5PCSIS_CONFIG 0x08
+#define S5PCSIS_CFG_FMT_YCBCR422_8BIT (0x1e << 2)
+#define S5PCSIS_CFG_FMT_RAW8 (0x2a << 2)
+#define S5PCSIS_CFG_FMT_RAW10 (0x2b << 2)
+#define S5PCSIS_CFG_FMT_RAW12 (0x2c << 2)
+/* User defined formats, x = 1...4 */
+#define S5PCSIS_CFG_FMT_USER(x) ((0x30 + x - 1) << 2)
+#define S5PCSIS_CFG_FMT_MASK (0x3f << 2)
+#define S5PCSIS_CFG_NR_LANE_MASK 3
+
+/* Interrupt mask */
+#define S5PCSIS_INTMSK 0x10
+#define S5PCSIS_INTMSK_EVEN_BEFORE (1 << 31)
+#define S5PCSIS_INTMSK_EVEN_AFTER (1 << 30)
+#define S5PCSIS_INTMSK_ODD_BEFORE (1 << 29)
+#define S5PCSIS_INTMSK_ODD_AFTER (1 << 28)
+#define S5PCSIS_INTMSK_FRAME_START (1 << 27)
+#define S5PCSIS_INTMSK_FRAME_END (1 << 26)
+#define S5PCSIS_INTMSK_ERR_SOT_HS (1 << 12)
+#define S5PCSIS_INTMSK_ERR_LOST_FS (1 << 5)
+#define S5PCSIS_INTMSK_ERR_LOST_FE (1 << 4)
+#define S5PCSIS_INTMSK_ERR_OVER (1 << 3)
+#define S5PCSIS_INTMSK_ERR_ECC (1 << 2)
+#define S5PCSIS_INTMSK_ERR_CRC (1 << 1)
+#define S5PCSIS_INTMSK_ERR_UNKNOWN (1 << 0)
+#define S5PCSIS_INTMSK_EXYNOS4_EN_ALL 0xf000103f
+#define S5PCSIS_INTMSK_EXYNOS5_EN_ALL 0xfc00103f
+
+/* Interrupt source */
+#define S5PCSIS_INTSRC 0x14
+#define S5PCSIS_INTSRC_EVEN_BEFORE (1 << 31)
+#define S5PCSIS_INTSRC_EVEN_AFTER (1 << 30)
+#define S5PCSIS_INTSRC_EVEN (0x3 << 30)
+#define S5PCSIS_INTSRC_ODD_BEFORE (1 << 29)
+#define S5PCSIS_INTSRC_ODD_AFTER (1 << 28)
+#define S5PCSIS_INTSRC_ODD (0x3 << 28)
+#define S5PCSIS_INTSRC_NON_IMAGE_DATA (0xf << 28)
+#define S5PCSIS_INTSRC_FRAME_START (1 << 27)
+#define S5PCSIS_INTSRC_FRAME_END (1 << 26)
+#define S5PCSIS_INTSRC_ERR_SOT_HS (0xf << 12)
+#define S5PCSIS_INTSRC_ERR_LOST_FS (1 << 5)
+#define S5PCSIS_INTSRC_ERR_LOST_FE (1 << 4)
+#define S5PCSIS_INTSRC_ERR_OVER (1 << 3)
+#define S5PCSIS_INTSRC_ERR_ECC (1 << 2)
+#define S5PCSIS_INTSRC_ERR_CRC (1 << 1)
+#define S5PCSIS_INTSRC_ERR_UNKNOWN (1 << 0)
+#define S5PCSIS_INTSRC_ERRORS 0xf03f
+
+/* Pixel resolution */
+#define S5PCSIS_RESOL 0x2c
+#define CSIS_MAX_PIX_WIDTH 0xffff
+#define CSIS_MAX_PIX_HEIGHT 0xffff
+
+/* Non-image packet data buffers */
+#define S5PCSIS_PKTDATA_ODD 0x2000
+#define S5PCSIS_PKTDATA_EVEN 0x3000
+#define S5PCSIS_PKTDATA_SIZE SZ_4K
+
+enum {
+ CSIS_CLK_MUX,
+ CSIS_CLK_GATE,
+};
+
+static char *csi_clock_name[] = {
+ [CSIS_CLK_MUX] = "sclk_csis",
+ [CSIS_CLK_GATE] = "csis",
+};
+#define NUM_CSIS_CLOCKS ARRAY_SIZE(csi_clock_name)
+#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
+
+static const char * const csis_supply_name[] = {
+ "vddcore", /* CSIS Core (1.0V, 1.1V or 1.2V) suppply */
+ "vddio", /* CSIS I/O and PLL (1.8V) supply */
+};
+#define CSIS_NUM_SUPPLIES ARRAY_SIZE(csis_supply_name)
+
+enum {
+ ST_POWERED = 1,
+ ST_STREAMING = 2,
+ ST_SUSPENDED = 4,
+};
+
+struct s5pcsis_event {
+ u32 mask;
+ const char * const name;
+ unsigned int counter;
+};
+
+static const struct s5pcsis_event s5pcsis_events[] = {
+ /* Errors */
+ { S5PCSIS_INTSRC_ERR_SOT_HS, "SOT Error" },
+ { S5PCSIS_INTSRC_ERR_LOST_FS, "Lost Frame Start Error" },
+ { S5PCSIS_INTSRC_ERR_LOST_FE, "Lost Frame End Error" },
+ { S5PCSIS_INTSRC_ERR_OVER, "FIFO Overflow Error" },
+ { S5PCSIS_INTSRC_ERR_ECC, "ECC Error" },
+ { S5PCSIS_INTSRC_ERR_CRC, "CRC Error" },
+ { S5PCSIS_INTSRC_ERR_UNKNOWN, "Unknown Error" },
+ /* Non-image data receive events */
+ { S5PCSIS_INTSRC_EVEN_BEFORE, "Non-image data before even frame" },
+ { S5PCSIS_INTSRC_EVEN_AFTER, "Non-image data after even frame" },
+ { S5PCSIS_INTSRC_ODD_BEFORE, "Non-image data before odd frame" },
+ { S5PCSIS_INTSRC_ODD_AFTER, "Non-image data after odd frame" },
+ /* Frame start/end */
+ { S5PCSIS_INTSRC_FRAME_START, "Frame Start" },
+ { S5PCSIS_INTSRC_FRAME_END, "Frame End" },
+};
+#define S5PCSIS_NUM_EVENTS ARRAY_SIZE(s5pcsis_events)
+
+struct csis_pktbuf {
+ u32 *data;
+ unsigned int len;
+};
+
+struct csis_drvdata {
+ /* Mask of all used interrupts in S5PCSIS_INTMSK register */
+ u32 interrupt_mask;
+};
+
+/**
+ * struct csis_state - the driver's internal state data structure
+ * @lock: mutex serializing the subdev and power management operations,
+ * protecting @format and @flags members
+ * @pads: CSIS pads array
+ * @sd: v4l2_subdev associated with CSIS device instance
+ * @index: the hardware instance index
+ * @pdev: CSIS platform device
+ * @phy: pointer to the CSIS generic PHY
+ * @regs: mmaped I/O registers memory
+ * @supplies: CSIS regulator supplies
+ * @clock: CSIS clocks
+ * @irq: requested s5p-mipi-csis irq number
+ * @interrupt_mask: interrupt mask of the all used interrupts
+ * @flags: the state variable for power and streaming control
+ * @clk_frequency: device bus clock frequency
+ * @hs_settle: HS-RX settle time
+ * @num_lanes: number of MIPI-CSI data lanes used
+ * @max_num_lanes: maximum number of MIPI-CSI data lanes supported
+ * @wclk_ext: CSI wrapper clock: 0 - bus clock, 1 - external SCLK_CAM
+ * @csis_fmt: current CSIS pixel format
+ * @format: common media bus format for the source and sink pad
+ * @slock: spinlock protecting structure members below
+ * @pkt_buf: the frame embedded (non-image) data buffer
+ * @events: MIPI-CSIS event (error) counters
+ */
+struct csis_state {
+ struct mutex lock;
+ struct media_pad pads[CSIS_PADS_NUM];
+ struct v4l2_subdev sd;
+ u8 index;
+ struct platform_device *pdev;
+ struct phy *phy;
+ void __iomem *regs;
+ struct regulator_bulk_data supplies[CSIS_NUM_SUPPLIES];
+ struct clk *clock[NUM_CSIS_CLOCKS];
+ int irq;
+ u32 interrupt_mask;
+ u32 flags;
+
+ u32 clk_frequency;
+ u32 hs_settle;
+ u32 num_lanes;
+ u32 max_num_lanes;
+ u8 wclk_ext;
+
+ const struct csis_pix_format *csis_fmt;
+ struct v4l2_mbus_framefmt format;
+
+ spinlock_t slock;
+ struct csis_pktbuf pkt_buf;
+ struct s5pcsis_event events[S5PCSIS_NUM_EVENTS];
+};
+
+/**
+ * struct csis_pix_format - CSIS pixel format description
+ * @pix_width_alignment: horizontal pixel alignment, width will be
+ * multiple of 2^pix_width_alignment
+ * @code: corresponding media bus code
+ * @fmt_reg: S5PCSIS_CONFIG register value
+ * @data_alignment: MIPI-CSI data alignment in bits
+ */
+struct csis_pix_format {
+ unsigned int pix_width_alignment;
+ u32 code;
+ u32 fmt_reg;
+ u8 data_alignment;
+};
+
+static const struct csis_pix_format s5pcsis_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT,
+ .data_alignment = 32,
+ }, {
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_USER(1),
+ .data_alignment = 32,
+ }, {
+ .code = MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_USER(1),
+ .data_alignment = 32,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW8,
+ .data_alignment = 24,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW10,
+ .data_alignment = 24,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW12,
+ .data_alignment = 24,
+ }
+};
+
+#define s5pcsis_write(__csis, __r, __v) writel(__v, __csis->regs + __r)
+#define s5pcsis_read(__csis, __r) readl(__csis->regs + __r)
+
+static struct csis_state *sd_to_csis_state(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csis_state, sd);
+}
+
+static const struct csis_pix_format *find_csis_format(
+ struct v4l2_mbus_framefmt *mf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s5pcsis_formats); i++)
+ if (mf->code == s5pcsis_formats[i].code)
+ return &s5pcsis_formats[i];
+ return NULL;
+}
+
+static void s5pcsis_enable_interrupts(struct csis_state *state, bool on)
+{
+ u32 val = s5pcsis_read(state, S5PCSIS_INTMSK);
+ if (on)
+ val |= state->interrupt_mask;
+ else
+ val &= ~state->interrupt_mask;
+ s5pcsis_write(state, S5PCSIS_INTMSK, val);
+}
+
+static void s5pcsis_reset(struct csis_state *state)
+{
+ u32 val = s5pcsis_read(state, S5PCSIS_CTRL);
+
+ s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_RESET);
+ udelay(10);
+}
+
+static void s5pcsis_system_enable(struct csis_state *state, int on)
+{
+ u32 val, mask;
+
+ val = s5pcsis_read(state, S5PCSIS_CTRL);
+ if (on)
+ val |= S5PCSIS_CTRL_ENABLE;
+ else
+ val &= ~S5PCSIS_CTRL_ENABLE;
+ s5pcsis_write(state, S5PCSIS_CTRL, val);
+
+ val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
+ val &= ~S5PCSIS_DPHYCTRL_ENABLE;
+ if (on) {
+ mask = (1 << (state->num_lanes + 1)) - 1;
+ val |= (mask & S5PCSIS_DPHYCTRL_ENABLE);
+ }
+ s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
+}
+
+/* Called with the state.lock mutex held */
+static void __s5pcsis_set_format(struct csis_state *state)
+{
+ struct v4l2_mbus_framefmt *mf = &state->format;
+ u32 val;
+
+ v4l2_dbg(1, debug, &state->sd, "fmt: %#x, %d x %d\n",
+ mf->code, mf->width, mf->height);
+
+ /* Color format */
+ val = s5pcsis_read(state, S5PCSIS_CONFIG);
+ val = (val & ~S5PCSIS_CFG_FMT_MASK) | state->csis_fmt->fmt_reg;
+ s5pcsis_write(state, S5PCSIS_CONFIG, val);
+
+ /* Pixel resolution */
+ val = (mf->width << 16) | mf->height;
+ s5pcsis_write(state, S5PCSIS_RESOL, val);
+}
+
+static void s5pcsis_set_hsync_settle(struct csis_state *state, int settle)
+{
+ u32 val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
+
+ val = (val & ~S5PCSIS_DPHYCTRL_HSS_MASK) | (settle << 27);
+ s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
+}
+
+static void s5pcsis_set_params(struct csis_state *state)
+{
+ u32 val;
+
+ val = s5pcsis_read(state, S5PCSIS_CONFIG);
+ val = (val & ~S5PCSIS_CFG_NR_LANE_MASK) | (state->num_lanes - 1);
+ s5pcsis_write(state, S5PCSIS_CONFIG, val);
+
+ __s5pcsis_set_format(state);
+ s5pcsis_set_hsync_settle(state, state->hs_settle);
+
+ val = s5pcsis_read(state, S5PCSIS_CTRL);
+ if (state->csis_fmt->data_alignment == 32)
+ val |= S5PCSIS_CTRL_ALIGN_32BIT;
+ else /* 24-bits */
+ val &= ~S5PCSIS_CTRL_ALIGN_32BIT;
+
+ val &= ~S5PCSIS_CTRL_WCLK_EXTCLK;
+ if (state->wclk_ext)
+ val |= S5PCSIS_CTRL_WCLK_EXTCLK;
+ s5pcsis_write(state, S5PCSIS_CTRL, val);
+
+ /* Update the shadow register. */
+ val = s5pcsis_read(state, S5PCSIS_CTRL);
+ s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_UPDATE_SHADOW);
+}
+
+static void s5pcsis_clk_put(struct csis_state *state)
+{
+ int i;
+
+ for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
+ if (IS_ERR(state->clock[i]))
+ continue;
+ clk_unprepare(state->clock[i]);
+ clk_put(state->clock[i]);
+ state->clock[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int s5pcsis_clk_get(struct csis_state *state)
+{
+ struct device *dev = &state->pdev->dev;
+ int i, ret;
+
+ for (i = 0; i < NUM_CSIS_CLOCKS; i++)
+ state->clock[i] = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
+ state->clock[i] = clk_get(dev, csi_clock_name[i]);
+ if (IS_ERR(state->clock[i])) {
+ ret = PTR_ERR(state->clock[i]);
+ goto err;
+ }
+ ret = clk_prepare(state->clock[i]);
+ if (ret < 0) {
+ clk_put(state->clock[i]);
+ state->clock[i] = ERR_PTR(-EINVAL);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ s5pcsis_clk_put(state);
+ dev_err(dev, "failed to get clock: %s\n", csi_clock_name[i]);
+ return ret;
+}
+
+static void dump_regs(struct csis_state *state, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x00, "CTRL" },
+ { 0x04, "DPHYCTRL" },
+ { 0x08, "CONFIG" },
+ { 0x0c, "DPHYSTS" },
+ { 0x10, "INTMSK" },
+ { 0x2c, "RESOL" },
+ { 0x38, "SDW_CONFIG" },
+ };
+ u32 i;
+
+ v4l2_info(&state->sd, "--- %s ---\n", label);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = s5pcsis_read(state, registers[i].offset);
+ v4l2_info(&state->sd, "%10s: 0x%08x\n", registers[i].name, cfg);
+ }
+}
+
+static void s5pcsis_start_stream(struct csis_state *state)
+{
+ s5pcsis_reset(state);
+ s5pcsis_set_params(state);
+ s5pcsis_system_enable(state, true);
+ s5pcsis_enable_interrupts(state, true);
+}
+
+static void s5pcsis_stop_stream(struct csis_state *state)
+{
+ s5pcsis_enable_interrupts(state, false);
+ s5pcsis_system_enable(state, false);
+}
+
+static void s5pcsis_clear_counters(struct csis_state *state)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&state->slock, flags);
+ for (i = 0; i < S5PCSIS_NUM_EVENTS; i++)
+ state->events[i].counter = 0;
+ spin_unlock_irqrestore(&state->slock, flags);
+}
+
+static void s5pcsis_log_counters(struct csis_state *state, bool non_errors)
+{
+ int i = non_errors ? S5PCSIS_NUM_EVENTS : S5PCSIS_NUM_EVENTS - 4;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->slock, flags);
+
+ for (i--; i >= 0; i--) {
+ if (state->events[i].counter > 0 || debug)
+ v4l2_info(&state->sd, "%s events: %d\n",
+ state->events[i].name,
+ state->events[i].counter);
+ }
+ spin_unlock_irqrestore(&state->slock, flags);
+}
+
+/*
+ * V4L2 subdev operations
+ */
+static int s5pcsis_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ struct device *dev = &state->pdev->dev;
+
+ if (on)
+ return pm_runtime_get_sync(dev);
+
+ return pm_runtime_put_sync(dev);
+}
+
+static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ int ret = 0;
+
+ v4l2_dbg(1, debug, sd, "%s: %d, state: 0x%x\n",
+ __func__, enable, state->flags);
+
+ if (enable) {
+ s5pcsis_clear_counters(state);
+ ret = pm_runtime_get_sync(&state->pdev->dev);
+ if (ret && ret != 1) {
+ pm_runtime_put_noidle(&state->pdev->dev);
+ return ret;
+ }
+ }
+
+ mutex_lock(&state->lock);
+ if (enable) {
+ if (state->flags & ST_SUSPENDED) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ s5pcsis_start_stream(state);
+ state->flags |= ST_STREAMING;
+ } else {
+ s5pcsis_stop_stream(state);
+ state->flags &= ~ST_STREAMING;
+ if (debug > 0)
+ s5pcsis_log_counters(state, true);
+ }
+unlock:
+ mutex_unlock(&state->lock);
+ if (!enable)
+ pm_runtime_put(&state->pdev->dev);
+
+ return ret == 1 ? 0 : ret;
+}
+
+static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(s5pcsis_formats))
+ return -EINVAL;
+
+ code->code = s5pcsis_formats[code->index].code;
+ return 0;
+}
+
+static struct csis_pix_format const *s5pcsis_try_format(
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct csis_pix_format const *csis_fmt;
+
+ csis_fmt = find_csis_format(mf);
+ if (csis_fmt == NULL)
+ csis_fmt = &s5pcsis_formats[0];
+
+ mf->code = csis_fmt->code;
+ v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH,
+ csis_fmt->pix_width_alignment,
+ &mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1,
+ 0);
+ return csis_fmt;
+}
+
+static struct v4l2_mbus_framefmt *__s5pcsis_get_format(
+ struct csis_state *state, struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return cfg ? v4l2_subdev_get_try_format(&state->sd, cfg, 0) : NULL;
+
+ return &state->format;
+}
+
+static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ struct csis_pix_format const *csis_fmt;
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = __s5pcsis_get_format(state, cfg, fmt->which);
+
+ if (fmt->pad == CSIS_PAD_SOURCE) {
+ if (mf) {
+ mutex_lock(&state->lock);
+ fmt->format = *mf;
+ mutex_unlock(&state->lock);
+ }
+ return 0;
+ }
+ csis_fmt = s5pcsis_try_format(&fmt->format);
+ if (mf) {
+ mutex_lock(&state->lock);
+ *mf = fmt->format;
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ state->csis_fmt = csis_fmt;
+ mutex_unlock(&state->lock);
+ }
+ return 0;
+}
+
+static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = __s5pcsis_get_format(state, cfg, fmt->which);
+ if (!mf)
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ fmt->format = *mf;
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+static int s5pcsis_s_rx_buffer(struct v4l2_subdev *sd, void *buf,
+ unsigned int *size)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+ unsigned long flags;
+
+ *size = min_t(unsigned int, *size, S5PCSIS_PKTDATA_SIZE);
+
+ spin_lock_irqsave(&state->slock, flags);
+ state->pkt_buf.data = buf;
+ state->pkt_buf.len = *size;
+ spin_unlock_irqrestore(&state->slock, flags);
+
+ return 0;
+}
+
+static int s5pcsis_log_status(struct v4l2_subdev *sd)
+{
+ struct csis_state *state = sd_to_csis_state(sd);
+
+ mutex_lock(&state->lock);
+ s5pcsis_log_counters(state, true);
+ if (debug && (state->flags & ST_POWERED))
+ dump_regs(state, __func__);
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops s5pcsis_core_ops = {
+ .s_power = s5pcsis_s_power,
+ .log_status = s5pcsis_log_status,
+};
+
+static const struct v4l2_subdev_pad_ops s5pcsis_pad_ops = {
+ .enum_mbus_code = s5pcsis_enum_mbus_code,
+ .get_fmt = s5pcsis_get_fmt,
+ .set_fmt = s5pcsis_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops s5pcsis_video_ops = {
+ .s_rx_buffer = s5pcsis_s_rx_buffer,
+ .s_stream = s5pcsis_s_stream,
+};
+
+static const struct v4l2_subdev_ops s5pcsis_subdev_ops = {
+ .core = &s5pcsis_core_ops,
+ .pad = &s5pcsis_pad_ops,
+ .video = &s5pcsis_video_ops,
+};
+
+static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id)
+{
+ struct csis_state *state = dev_id;
+ struct csis_pktbuf *pktbuf = &state->pkt_buf;
+ unsigned long flags;
+ u32 status;
+
+ status = s5pcsis_read(state, S5PCSIS_INTSRC);
+ spin_lock_irqsave(&state->slock, flags);
+
+ if ((status & S5PCSIS_INTSRC_NON_IMAGE_DATA) && pktbuf->data) {
+ u32 offset;
+
+ if (status & S5PCSIS_INTSRC_EVEN)
+ offset = S5PCSIS_PKTDATA_EVEN;
+ else
+ offset = S5PCSIS_PKTDATA_ODD;
+
+ memcpy(pktbuf->data, (u8 __force *)state->regs + offset,
+ pktbuf->len);
+ pktbuf->data = NULL;
+ rmb();
+ }
+
+ /* Update the event/error counters */
+ if ((status & S5PCSIS_INTSRC_ERRORS) || debug) {
+ int i;
+ for (i = 0; i < S5PCSIS_NUM_EVENTS; i++) {
+ if (!(status & state->events[i].mask))
+ continue;
+ state->events[i].counter++;
+ v4l2_dbg(2, debug, &state->sd, "%s: %d\n",
+ state->events[i].name,
+ state->events[i].counter);
+ }
+ v4l2_dbg(2, debug, &state->sd, "status: %08x\n", status);
+ }
+ spin_unlock_irqrestore(&state->slock, flags);
+
+ s5pcsis_write(state, S5PCSIS_INTSRC, status);
+ return IRQ_HANDLED;
+}
+
+static int s5pcsis_parse_dt(struct platform_device *pdev,
+ struct csis_state *state)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct v4l2_fwnode_endpoint endpoint;
+ int ret;
+
+ if (of_property_read_u32(node, "clock-frequency",
+ &state->clk_frequency))
+ state->clk_frequency = DEFAULT_SCLK_CSIS_FREQ;
+ if (of_property_read_u32(node, "bus-width",
+ &state->max_num_lanes))
+ return -EINVAL;
+
+ node = of_graph_get_next_endpoint(node, NULL);
+ if (!node) {
+ dev_err(&pdev->dev, "No port node at %pOF\n",
+ pdev->dev.of_node);
+ return -EINVAL;
+ }
+ /* Get port node and validate MIPI-CSI channel id. */
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &endpoint);
+ if (ret)
+ goto err;
+
+ state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
+ if (state->index >= CSIS_MAX_ENTITIES) {
+ ret = -ENXIO;
+ goto err;
+ }
+
+ /* Get MIPI CSI-2 bus configration from the endpoint node. */
+ of_property_read_u32(node, "samsung,csis-hs-settle",
+ &state->hs_settle);
+ state->wclk_ext = of_property_read_bool(node,
+ "samsung,csis-wclk");
+
+ state->num_lanes = endpoint.bus.mipi_csi2.num_data_lanes;
+
+err:
+ of_node_put(node);
+ return ret;
+}
+
+static int s5pcsis_pm_resume(struct device *dev, bool runtime);
+static const struct of_device_id s5pcsis_of_match[];
+
+static int s5pcsis_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ const struct csis_drvdata *drv_data;
+ struct device *dev = &pdev->dev;
+ struct resource *mem_res;
+ struct csis_state *state;
+ int ret = -ENOMEM;
+ int i;
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->lock);
+ spin_lock_init(&state->slock);
+ state->pdev = pdev;
+
+ of_id = of_match_node(s5pcsis_of_match, dev->of_node);
+ if (WARN_ON(of_id == NULL))
+ return -EINVAL;
+
+ drv_data = of_id->data;
+ state->interrupt_mask = drv_data->interrupt_mask;
+
+ ret = s5pcsis_parse_dt(pdev, state);
+ if (ret < 0)
+ return ret;
+
+ if (state->num_lanes == 0 || state->num_lanes > state->max_num_lanes) {
+ dev_err(dev, "Unsupported number of data lanes: %d (max. %d)\n",
+ state->num_lanes, state->max_num_lanes);
+ return -EINVAL;
+ }
+
+ state->phy = devm_phy_get(dev, "csis");
+ if (IS_ERR(state->phy))
+ return PTR_ERR(state->phy);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ state->regs = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(state->regs))
+ return PTR_ERR(state->regs);
+
+ state->irq = platform_get_irq(pdev, 0);
+ if (state->irq < 0) {
+ dev_err(dev, "Failed to get irq\n");
+ return state->irq;
+ }
+
+ for (i = 0; i < CSIS_NUM_SUPPLIES; i++)
+ state->supplies[i].supply = csis_supply_name[i];
+
+ ret = devm_regulator_bulk_get(dev, CSIS_NUM_SUPPLIES,
+ state->supplies);
+ if (ret)
+ return ret;
+
+ ret = s5pcsis_clk_get(state);
+ if (ret < 0)
+ return ret;
+
+ if (state->clk_frequency)
+ ret = clk_set_rate(state->clock[CSIS_CLK_MUX],
+ state->clk_frequency);
+ else
+ dev_WARN(dev, "No clock frequency specified!\n");
+ if (ret < 0)
+ goto e_clkput;
+
+ ret = clk_enable(state->clock[CSIS_CLK_MUX]);
+ if (ret < 0)
+ goto e_clkput;
+
+ ret = devm_request_irq(dev, state->irq, s5pcsis_irq_handler,
+ 0, dev_name(dev), state);
+ if (ret) {
+ dev_err(dev, "Interrupt request failed\n");
+ goto e_clkdis;
+ }
+
+ v4l2_subdev_init(&state->sd, &s5pcsis_subdev_ops);
+ state->sd.owner = THIS_MODULE;
+ snprintf(state->sd.name, sizeof(state->sd.name), "%s.%d",
+ CSIS_SUBDEV_NAME, state->index);
+ state->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ state->csis_fmt = &s5pcsis_formats[0];
+
+ state->format.code = s5pcsis_formats[0].code;
+ state->format.width = S5PCSIS_DEF_PIX_WIDTH;
+ state->format.height = S5PCSIS_DEF_PIX_HEIGHT;
+
+ state->sd.entity.function = MEDIA_ENT_F_IO_V4L;
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&state->sd.entity,
+ CSIS_PADS_NUM, state->pads);
+ if (ret < 0)
+ goto e_clkdis;
+
+ /* This allows to retrieve the platform device id by the host driver */
+ v4l2_set_subdevdata(&state->sd, pdev);
+
+ /* .. and a pointer to the subdev. */
+ platform_set_drvdata(pdev, &state->sd);
+ memcpy(state->events, s5pcsis_events, sizeof(state->events));
+
+ pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = s5pcsis_pm_resume(dev, true);
+ if (ret < 0)
+ goto e_m_ent;
+ }
+
+ dev_info(&pdev->dev, "lanes: %d, hs_settle: %d, wclk: %d, freq: %u\n",
+ state->num_lanes, state->hs_settle, state->wclk_ext,
+ state->clk_frequency);
+ return 0;
+
+e_m_ent:
+ media_entity_cleanup(&state->sd.entity);
+e_clkdis:
+ clk_disable(state->clock[CSIS_CLK_MUX]);
+e_clkput:
+ s5pcsis_clk_put(state);
+ return ret;
+}
+
+static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csis_state *state = sd_to_csis_state(sd);
+ int ret = 0;
+
+ v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
+ __func__, state->flags);
+
+ mutex_lock(&state->lock);
+ if (state->flags & ST_POWERED) {
+ s5pcsis_stop_stream(state);
+ ret = phy_power_off(state->phy);
+ if (ret)
+ goto unlock;
+ ret = regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+ state->supplies);
+ if (ret)
+ goto unlock;
+ clk_disable(state->clock[CSIS_CLK_GATE]);
+ state->flags &= ~ST_POWERED;
+ if (!runtime)
+ state->flags |= ST_SUSPENDED;
+ }
+ unlock:
+ mutex_unlock(&state->lock);
+ return ret ? -EAGAIN : 0;
+}
+
+static int s5pcsis_pm_resume(struct device *dev, bool runtime)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csis_state *state = sd_to_csis_state(sd);
+ int ret = 0;
+
+ v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
+ __func__, state->flags);
+
+ mutex_lock(&state->lock);
+ if (!runtime && !(state->flags & ST_SUSPENDED))
+ goto unlock;
+
+ if (!(state->flags & ST_POWERED)) {
+ ret = regulator_bulk_enable(CSIS_NUM_SUPPLIES,
+ state->supplies);
+ if (ret)
+ goto unlock;
+ ret = phy_power_on(state->phy);
+ if (!ret) {
+ state->flags |= ST_POWERED;
+ } else {
+ regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+ state->supplies);
+ goto unlock;
+ }
+ clk_enable(state->clock[CSIS_CLK_GATE]);
+ }
+ if (state->flags & ST_STREAMING)
+ s5pcsis_start_stream(state);
+
+ state->flags &= ~ST_SUSPENDED;
+ unlock:
+ mutex_unlock(&state->lock);
+ return ret ? -EAGAIN : 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int s5pcsis_suspend(struct device *dev)
+{
+ return s5pcsis_pm_suspend(dev, false);
+}
+
+static int s5pcsis_resume(struct device *dev)
+{
+ return s5pcsis_pm_resume(dev, false);
+}
+#endif
+
+#ifdef CONFIG_PM
+static int s5pcsis_runtime_suspend(struct device *dev)
+{
+ return s5pcsis_pm_suspend(dev, true);
+}
+
+static int s5pcsis_runtime_resume(struct device *dev)
+{
+ return s5pcsis_pm_resume(dev, true);
+}
+#endif
+
+static int s5pcsis_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csis_state *state = sd_to_csis_state(sd);
+
+ pm_runtime_disable(&pdev->dev);
+ s5pcsis_pm_suspend(&pdev->dev, true);
+ clk_disable(state->clock[CSIS_CLK_MUX]);
+ pm_runtime_set_suspended(&pdev->dev);
+ s5pcsis_clk_put(state);
+
+ media_entity_cleanup(&state->sd.entity);
+
+ return 0;
+}
+
+static const struct dev_pm_ops s5pcsis_pm_ops = {
+ SET_RUNTIME_PM_OPS(s5pcsis_runtime_suspend, s5pcsis_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(s5pcsis_suspend, s5pcsis_resume)
+};
+
+static const struct csis_drvdata exynos4_csis_drvdata = {
+ .interrupt_mask = S5PCSIS_INTMSK_EXYNOS4_EN_ALL,
+};
+
+static const struct csis_drvdata exynos5_csis_drvdata = {
+ .interrupt_mask = S5PCSIS_INTMSK_EXYNOS5_EN_ALL,
+};
+
+static const struct of_device_id s5pcsis_of_match[] = {
+ {
+ .compatible = "samsung,s5pv210-csis",
+ .data = &exynos4_csis_drvdata,
+ }, {
+ .compatible = "samsung,exynos4210-csis",
+ .data = &exynos4_csis_drvdata,
+ }, {
+ .compatible = "samsung,exynos5250-csis",
+ .data = &exynos5_csis_drvdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s5pcsis_of_match);
+
+static struct platform_driver s5pcsis_driver = {
+ .probe = s5pcsis_probe,
+ .remove = s5pcsis_remove,
+ .driver = {
+ .of_match_table = s5pcsis_of_match,
+ .name = CSIS_DRIVER_NAME,
+ .pm = &s5pcsis_pm_ops,
+ },
+};
+
+module_platform_driver(s5pcsis_driver);
+
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC MIPI-CSI2 receiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.h b/drivers/media/platform/exynos4-is/mipi-csis.h
new file mode 100644
index 000000000..28c11c408
--- /dev/null
+++ b/drivers/media/platform/exynos4-is/mipi-csis.h
@@ -0,0 +1,26 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series MIPI-CSI receiver driver
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef S5P_MIPI_CSIS_H_
+#define S5P_MIPI_CSIS_H_
+
+#define CSIS_DRIVER_NAME "s5p-mipi-csis"
+#define CSIS_SUBDEV_NAME CSIS_DRIVER_NAME
+#define CSIS_MAX_ENTITIES 2
+#define CSIS0_MAX_LANES 4
+#define CSIS1_MAX_LANES 2
+
+#define CSIS_PAD_SINK 0
+#define CSIS_PAD_SOURCE 1
+#define CSIS_PADS_NUM 2
+
+#define S5PCSIS_DEF_PIX_WIDTH 640
+#define S5PCSIS_DEF_PIX_HEIGHT 480
+
+#endif
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
new file mode 100644
index 000000000..83086eea1
--- /dev/null
+++ b/drivers/media/platform/fsl-viu.c
@@ -0,0 +1,1618 @@
+/*
+ * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Freescale VIU video driver
+ *
+ * Authors: Hongjun Chen <hong-jun.chen@freescale.com>
+ * Porting to 2.6.35 by DENX Software Engineering,
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf-dma-contig.h>
+
+#define DRV_NAME "fsl_viu"
+#define VIU_VERSION "0.5.1"
+
+/* Allow building this driver with COMPILE_TEST */
+#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE)
+#define out_be32(v, a) iowrite32be(a, (void __iomem *)v)
+#define in_be32(a) ioread32be((void __iomem *)a)
+#endif
+
+#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+
+#define VIU_VID_MEM_LIMIT 4 /* Video memory limit, in Mb */
+
+/* I2C address of video decoder chip is 0x4A */
+#define VIU_VIDEO_DECODER_ADDR 0x25
+
+static int info_level;
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (level <= info_level) \
+ printk(KERN_DEBUG "viu: " fmt , ## arg); \
+ } while (0)
+
+/*
+ * Basic structures
+ */
+struct viu_fmt {
+ u32 fourcc; /* v4l2 format id */
+ u32 pixelformat;
+ int depth;
+};
+
+static struct viu_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ }
+};
+
+struct viu_dev;
+struct viu_buf;
+
+/* buffer for one video frame */
+struct viu_buf {
+ /* common v4l buffer stuff -- must be first */
+ struct videobuf_buffer vb;
+ struct viu_fmt *fmt;
+};
+
+struct viu_dmaqueue {
+ struct viu_dev *dev;
+ struct list_head active;
+ struct list_head queued;
+ struct timer_list timeout;
+};
+
+struct viu_status {
+ u32 field_irq;
+ u32 vsync_irq;
+ u32 hsync_irq;
+ u32 vstart_irq;
+ u32 dma_end_irq;
+ u32 error_irq;
+};
+
+struct viu_reg {
+ u32 status_cfg;
+ u32 luminance;
+ u32 chroma_r;
+ u32 chroma_g;
+ u32 chroma_b;
+ u32 field_base_addr;
+ u32 dma_inc;
+ u32 picture_count;
+ u32 req_alarm;
+ u32 alpha;
+} __attribute__ ((packed));
+
+struct viu_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
+ struct mutex lock;
+ spinlock_t slock;
+ int users;
+
+ struct device *dev;
+ /* various device info */
+ struct video_device *vdev;
+ struct viu_dmaqueue vidq;
+ enum v4l2_field capfield;
+ int field;
+ int first;
+ int dma_done;
+
+ /* Hardware register area */
+ struct viu_reg __iomem *vr;
+
+ /* Interrupt vector */
+ int irq;
+ struct viu_status irqs;
+
+ /* video overlay */
+ struct v4l2_framebuffer ovbuf;
+ struct viu_fmt *ovfmt;
+ unsigned int ovenable;
+ enum v4l2_field ovfield;
+
+ /* crop */
+ struct v4l2_rect crop_current;
+
+ /* clock pointer */
+ struct clk *clk;
+
+ /* decoder */
+ struct v4l2_subdev *decoder;
+
+ v4l2_std_id std;
+};
+
+struct viu_fh {
+ /* must remain the first field of this struct */
+ struct v4l2_fh fh;
+ struct viu_dev *dev;
+
+ /* video capture */
+ struct videobuf_queue vb_vidq;
+ spinlock_t vbq_lock; /* spinlock for the videobuf queue */
+
+ /* video overlay */
+ struct v4l2_window win;
+ struct v4l2_clip clips[1];
+
+ /* video capture */
+ struct viu_fmt *fmt;
+ int width, height, sizeimage;
+ enum v4l2_buf_type type;
+};
+
+static struct viu_reg reg_val;
+
+/*
+ * Macro definitions of VIU registers
+ */
+
+/* STATUS_CONFIG register */
+enum status_config {
+ SOFT_RST = 1 << 0,
+
+ ERR_MASK = 0x0f << 4, /* Error code mask */
+ ERR_NO = 0x00, /* No error */
+ ERR_DMA_V = 0x01 << 4, /* DMA in vertical active */
+ ERR_DMA_VB = 0x02 << 4, /* DMA in vertical blanking */
+ ERR_LINE_TOO_LONG = 0x04 << 4, /* Line too long */
+ ERR_TOO_MANG_LINES = 0x05 << 4, /* Too many lines in field */
+ ERR_LINE_TOO_SHORT = 0x06 << 4, /* Line too short */
+ ERR_NOT_ENOUGH_LINE = 0x07 << 4, /* Not enough lines in field */
+ ERR_FIFO_OVERFLOW = 0x08 << 4, /* FIFO overflow */
+ ERR_FIFO_UNDERFLOW = 0x09 << 4, /* FIFO underflow */
+ ERR_1bit_ECC = 0x0a << 4, /* One bit ECC error */
+ ERR_MORE_ECC = 0x0b << 4, /* Two/more bits ECC error */
+
+ INT_FIELD_EN = 0x01 << 8, /* Enable field interrupt */
+ INT_VSYNC_EN = 0x01 << 9, /* Enable vsync interrupt */
+ INT_HSYNC_EN = 0x01 << 10, /* Enable hsync interrupt */
+ INT_VSTART_EN = 0x01 << 11, /* Enable vstart interrupt */
+ INT_DMA_END_EN = 0x01 << 12, /* Enable DMA end interrupt */
+ INT_ERROR_EN = 0x01 << 13, /* Enable error interrupt */
+ INT_ECC_EN = 0x01 << 14, /* Enable ECC interrupt */
+
+ INT_FIELD_STATUS = 0x01 << 16, /* field interrupt status */
+ INT_VSYNC_STATUS = 0x01 << 17, /* vsync interrupt status */
+ INT_HSYNC_STATUS = 0x01 << 18, /* hsync interrupt status */
+ INT_VSTART_STATUS = 0x01 << 19, /* vstart interrupt status */
+ INT_DMA_END_STATUS = 0x01 << 20, /* DMA end interrupt status */
+ INT_ERROR_STATUS = 0x01 << 21, /* error interrupt status */
+
+ DMA_ACT = 0x01 << 27, /* Enable DMA transfer */
+ FIELD_NO = 0x01 << 28, /* Field number */
+ DITHER_ON = 0x01 << 29, /* Dithering is on */
+ ROUND_ON = 0x01 << 30, /* Round is on */
+ MODE_32BIT = 0x01 << 31, /* Data in RGBa888,
+ * 0 in RGB565
+ */
+};
+
+#define norm_maxw() 720
+#define norm_maxh() 576
+
+#define INT_ALL_STATUS (INT_FIELD_STATUS | INT_VSYNC_STATUS | \
+ INT_HSYNC_STATUS | INT_VSTART_STATUS | \
+ INT_DMA_END_STATUS | INT_ERROR_STATUS)
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static irqreturn_t viu_intr(int irq, void *dev_id);
+
+static struct viu_fmt *format_by_fourcc(int fourcc)
+{
+ int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].pixelformat == fourcc)
+ return formats + i;
+ }
+
+ dprintk(0, "unknown pixelformat:'%4.4s'\n", (char *)&fourcc);
+ return NULL;
+}
+
+static void viu_start_dma(struct viu_dev *dev)
+{
+ struct viu_reg __iomem *vr = dev->vr;
+
+ dev->field = 0;
+
+ /* Enable DMA operation */
+ out_be32(&vr->status_cfg, SOFT_RST);
+ out_be32(&vr->status_cfg, INT_FIELD_EN);
+}
+
+static void viu_stop_dma(struct viu_dev *dev)
+{
+ struct viu_reg __iomem *vr = dev->vr;
+ int cnt = 100;
+ u32 status_cfg;
+
+ out_be32(&vr->status_cfg, 0);
+
+ /* Clear pending interrupts */
+ status_cfg = in_be32(&vr->status_cfg);
+ if (status_cfg & 0x3f0000)
+ out_be32(&vr->status_cfg, status_cfg & 0x3f0000);
+
+ if (status_cfg & DMA_ACT) {
+ do {
+ status_cfg = in_be32(&vr->status_cfg);
+ if (status_cfg & INT_DMA_END_STATUS)
+ break;
+ } while (cnt--);
+
+ if (cnt < 0) {
+ /* timed out, issue soft reset */
+ out_be32(&vr->status_cfg, SOFT_RST);
+ out_be32(&vr->status_cfg, 0);
+ } else {
+ /* clear DMA_END and other pending irqs */
+ out_be32(&vr->status_cfg, status_cfg & 0x3f0000);
+ }
+ }
+
+ dev->field = 0;
+}
+
+static int restart_video_queue(struct viu_dmaqueue *vidq)
+{
+ struct viu_buf *buf, *prev;
+
+ dprintk(1, "%s vidq=%p\n", __func__, vidq);
+ if (!list_empty(&vidq->active)) {
+ buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
+ dprintk(2, "restart_queue [%p/%d]: restart dma\n",
+ buf, buf->vb.i);
+
+ viu_stop_dma(vidq->dev);
+
+ /* cancel all outstanding capture requests */
+ list_for_each_entry_safe(buf, prev, &vidq->active, vb.queue) {
+ list_del(&buf->vb.queue);
+ buf->vb.state = VIDEOBUF_ERROR;
+ wake_up(&buf->vb.done);
+ }
+ mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
+ return 0;
+ }
+
+ prev = NULL;
+ for (;;) {
+ if (list_empty(&vidq->queued))
+ return 0;
+ buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue);
+ if (prev == NULL) {
+ list_move_tail(&buf->vb.queue, &vidq->active);
+
+ dprintk(1, "Restarting video dma\n");
+ viu_stop_dma(vidq->dev);
+ viu_start_dma(vidq->dev);
+
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
+ dprintk(2, "[%p/%d] restart_queue - first active\n",
+ buf, buf->vb.i);
+
+ } else if (prev->vb.width == buf->vb.width &&
+ prev->vb.height == buf->vb.height &&
+ prev->fmt == buf->fmt) {
+ list_move_tail(&buf->vb.queue, &vidq->active);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ dprintk(2, "[%p/%d] restart_queue - move to active\n",
+ buf, buf->vb.i);
+ } else {
+ return 0;
+ }
+ prev = buf;
+ }
+}
+
+static void viu_vid_timeout(struct timer_list *t)
+{
+ struct viu_dev *dev = from_timer(dev, t, vidq.timeout);
+ struct viu_buf *buf;
+ struct viu_dmaqueue *vidq = &dev->vidq;
+
+ while (!list_empty(&vidq->active)) {
+ buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
+ list_del(&buf->vb.queue);
+ buf->vb.state = VIDEOBUF_ERROR;
+ wake_up(&buf->vb.done);
+ dprintk(1, "viu/0: [%p/%d] timeout\n", buf, buf->vb.i);
+ }
+
+ restart_video_queue(vidq);
+}
+
+/*
+ * Videobuf operations
+ */
+static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct viu_fh *fh = vq->priv_data;
+
+ *size = fh->width * fh->height * fh->fmt->depth >> 3;
+ if (*count == 0)
+ *count = 32;
+
+ while (*size * *count > VIU_VID_MEM_LIMIT * 1024 * 1024)
+ (*count)--;
+
+ dprintk(1, "%s, count=%d, size=%d\n", __func__, *count, *size);
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct viu_buf *buf)
+{
+ struct videobuf_buffer *vb = &buf->vb;
+ void *vaddr = NULL;
+
+ BUG_ON(in_interrupt());
+
+ videobuf_waiton(vq, &buf->vb, 0, 0);
+
+ if (vq->int_ops && vq->int_ops->vaddr)
+ vaddr = vq->int_ops->vaddr(vb);
+
+ if (vaddr)
+ videobuf_dma_contig_free(vq, &buf->vb);
+
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+inline int buffer_activate(struct viu_dev *dev, struct viu_buf *buf)
+{
+ struct viu_reg __iomem *vr = dev->vr;
+ int bpp;
+
+ /* setup the DMA base address */
+ reg_val.field_base_addr = videobuf_to_dma_contig(&buf->vb);
+
+ dprintk(1, "buffer_activate [%p/%d]: dma addr 0x%lx\n",
+ buf, buf->vb.i, (unsigned long)reg_val.field_base_addr);
+
+ /* interlace is on by default, set horizontal DMA increment */
+ reg_val.status_cfg = 0;
+ bpp = buf->fmt->depth >> 3;
+ switch (bpp) {
+ case 2:
+ reg_val.status_cfg &= ~MODE_32BIT;
+ reg_val.dma_inc = buf->vb.width * 2;
+ break;
+ case 4:
+ reg_val.status_cfg |= MODE_32BIT;
+ reg_val.dma_inc = buf->vb.width * 4;
+ break;
+ default:
+ dprintk(0, "doesn't support color depth(%d)\n",
+ bpp * 8);
+ return -EINVAL;
+ }
+
+ /* setup picture_count register */
+ reg_val.picture_count = (buf->vb.height / 2) << 16 |
+ buf->vb.width;
+
+ reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN;
+
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ dev->capfield = buf->vb.field;
+
+ /* reset dma increment if needed */
+ if (!V4L2_FIELD_HAS_BOTH(buf->vb.field))
+ reg_val.dma_inc = 0;
+
+ out_be32(&vr->dma_inc, reg_val.dma_inc);
+ out_be32(&vr->picture_count, reg_val.picture_count);
+ out_be32(&vr->field_base_addr, reg_val.field_base_addr);
+ mod_timer(&dev->vidq.timeout, jiffies + BUFFER_TIMEOUT);
+ return 0;
+}
+
+static int buffer_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct viu_fh *fh = vq->priv_data;
+ struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
+ int rc;
+
+ BUG_ON(fh->fmt == NULL);
+
+ if (fh->width < 48 || fh->width > norm_maxw() ||
+ fh->height < 32 || fh->height > norm_maxh())
+ return -EINVAL;
+ buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3;
+ if (buf->vb.baddr != 0 && buf->vb.bsize < buf->vb.size)
+ return -EINVAL;
+
+ if (buf->fmt != fh->fmt ||
+ buf->vb.width != fh->width ||
+ buf->vb.height != fh->height ||
+ buf->vb.field != field) {
+ buf->fmt = fh->fmt;
+ buf->vb.width = fh->width;
+ buf->vb.height = fh->height;
+ buf->vb.field = field;
+ }
+
+ if (buf->vb.state == VIDEOBUF_NEEDS_INIT) {
+ rc = videobuf_iolock(vq, &buf->vb, NULL);
+ if (rc != 0)
+ goto fail;
+
+ buf->vb.width = fh->width;
+ buf->vb.height = fh->height;
+ buf->vb.field = field;
+ buf->fmt = fh->fmt;
+ }
+
+ buf->vb.state = VIDEOBUF_PREPARED;
+ return 0;
+
+fail:
+ free_buffer(vq, buf);
+ return rc;
+}
+
+static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
+ struct viu_fh *fh = vq->priv_data;
+ struct viu_dev *dev = fh->dev;
+ struct viu_dmaqueue *vidq = &dev->vidq;
+ struct viu_buf *prev;
+
+ if (!list_empty(&vidq->queued)) {
+ dprintk(1, "adding vb queue=%p\n", &buf->vb.queue);
+ dprintk(1, "vidq pointer 0x%p, queued 0x%p\n",
+ vidq, &vidq->queued);
+ dprintk(1, "dev %p, queued: self %p, next %p, head %p\n",
+ dev, &vidq->queued, vidq->queued.next,
+ vidq->queued.prev);
+ list_add_tail(&buf->vb.queue, &vidq->queued);
+ buf->vb.state = VIDEOBUF_QUEUED;
+ dprintk(2, "[%p/%d] buffer_queue - append to queued\n",
+ buf, buf->vb.i);
+ } else if (list_empty(&vidq->active)) {
+ dprintk(1, "adding vb active=%p\n", &buf->vb.queue);
+ list_add_tail(&buf->vb.queue, &vidq->active);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
+ dprintk(2, "[%p/%d] buffer_queue - first active\n",
+ buf, buf->vb.i);
+
+ buffer_activate(dev, buf);
+ } else {
+ dprintk(1, "adding vb queue2=%p\n", &buf->vb.queue);
+ prev = list_entry(vidq->active.prev, struct viu_buf, vb.queue);
+ if (prev->vb.width == buf->vb.width &&
+ prev->vb.height == buf->vb.height &&
+ prev->fmt == buf->fmt) {
+ list_add_tail(&buf->vb.queue, &vidq->active);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ dprintk(2, "[%p/%d] buffer_queue - append to active\n",
+ buf, buf->vb.i);
+ } else {
+ list_add_tail(&buf->vb.queue, &vidq->queued);
+ buf->vb.state = VIDEOBUF_QUEUED;
+ dprintk(2, "[%p/%d] buffer_queue - first queued\n",
+ buf, buf->vb.i);
+ }
+ }
+}
+
+static void buffer_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
+ struct viu_fh *fh = vq->priv_data;
+ struct viu_dev *dev = (struct viu_dev *)fh->dev;
+
+ viu_stop_dma(dev);
+ free_buffer(vq, buf);
+}
+
+static const struct videobuf_queue_ops viu_video_qops = {
+ .buf_setup = buffer_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .buf_release = buffer_release,
+};
+
+/*
+ * IOCTL vidioc handling
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "viu");
+ strcpy(cap->card, "viu");
+ strcpy(cap->bus_info, "platform:viu");
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_OVERLAY |
+ V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ int index = f->index;
+
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ f->pixelformat = formats[index].fourcc;
+ return 0;
+}
+
+static int vidioc_g_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fh *fh = priv;
+
+ f->fmt.pix.width = fh->width;
+ f->fmt.pix.height = fh->height;
+ f->fmt.pix.field = fh->vb_vidq.field;
+ f->fmt.pix.pixelformat = fh->fmt->pixelformat;
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = fh->sizeimage;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int vidioc_try_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fmt *fmt;
+ unsigned int maxw, maxh;
+
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ if (!fmt) {
+ dprintk(1, "Fourcc format (0x%08x) invalid.",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ maxw = norm_maxw();
+ maxh = norm_maxh();
+
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ if (f->fmt.pix.height < 32)
+ f->fmt.pix.height = 32;
+ if (f->fmt.pix.height > maxh)
+ f->fmt.pix.height = maxh;
+ if (f->fmt.pix.width < 48)
+ f->fmt.pix.width = 48;
+ if (f->fmt.pix.width > maxw)
+ f->fmt.pix.width = maxw;
+ f->fmt.pix.width &= ~0x03;
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ return 0;
+}
+
+static int vidioc_s_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fh *fh = priv;
+ int ret;
+
+ ret = vidioc_try_fmt_cap(file, fh, f);
+ if (ret < 0)
+ return ret;
+
+ fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ fh->width = f->fmt.pix.width;
+ fh->height = f->fmt.pix.height;
+ fh->sizeimage = f->fmt.pix.sizeimage;
+ fh->vb_vidq.field = f->fmt.pix.field;
+ fh->type = f->type;
+ return 0;
+}
+
+static int vidioc_g_fmt_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fh *fh = priv;
+
+ f->fmt.win = fh->win;
+ return 0;
+}
+
+static int verify_preview(struct viu_dev *dev, struct v4l2_window *win)
+{
+ enum v4l2_field field;
+ int maxw, maxh;
+
+ if (dev->ovbuf.base == NULL)
+ return -EINVAL;
+ if (dev->ovfmt == NULL)
+ return -EINVAL;
+ if (win->w.width < 48 || win->w.height < 32)
+ return -EINVAL;
+
+ field = win->field;
+ maxw = dev->crop_current.width;
+ maxh = dev->crop_current.height;
+
+ if (field == V4L2_FIELD_ANY) {
+ field = (win->w.height > maxh/2)
+ ? V4L2_FIELD_INTERLACED
+ : V4L2_FIELD_TOP;
+ }
+ switch (field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ maxh = maxh / 2;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ win->field = field;
+ if (win->w.width > maxw)
+ win->w.width = maxw;
+ if (win->w.height > maxh)
+ win->w.height = maxh;
+ return 0;
+}
+
+inline void viu_activate_overlay(struct viu_reg __iomem *vr)
+{
+ out_be32(&vr->field_base_addr, reg_val.field_base_addr);
+ out_be32(&vr->dma_inc, reg_val.dma_inc);
+ out_be32(&vr->picture_count, reg_val.picture_count);
+}
+
+static int viu_setup_preview(struct viu_dev *dev, struct viu_fh *fh)
+{
+ int bpp;
+
+ dprintk(1, "%s %dx%d\n", __func__,
+ fh->win.w.width, fh->win.w.height);
+
+ reg_val.status_cfg = 0;
+
+ /* setup window */
+ reg_val.picture_count = (fh->win.w.height / 2) << 16 |
+ fh->win.w.width;
+
+ /* setup color depth and dma increment */
+ bpp = dev->ovfmt->depth / 8;
+ switch (bpp) {
+ case 2:
+ reg_val.status_cfg &= ~MODE_32BIT;
+ reg_val.dma_inc = fh->win.w.width * 2;
+ break;
+ case 4:
+ reg_val.status_cfg |= MODE_32BIT;
+ reg_val.dma_inc = fh->win.w.width * 4;
+ break;
+ default:
+ dprintk(0, "device doesn't support color depth(%d)\n",
+ bpp * 8);
+ return -EINVAL;
+ }
+
+ dev->ovfield = fh->win.field;
+ if (!V4L2_FIELD_HAS_BOTH(dev->ovfield))
+ reg_val.dma_inc = 0;
+
+ reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN;
+
+ /* setup the base address of the overlay buffer */
+ reg_val.field_base_addr = (u32)(long)dev->ovbuf.base;
+
+ return 0;
+}
+
+static int vidioc_s_fmt_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = (struct viu_dev *)fh->dev;
+ unsigned long flags;
+ int err;
+
+ err = verify_preview(dev, &f->fmt.win);
+ if (err)
+ return err;
+
+ fh->win = f->fmt.win;
+
+ spin_lock_irqsave(&dev->slock, flags);
+ viu_setup_preview(dev, fh);
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return 0;
+}
+
+static int vidioc_try_fmt_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return 0;
+}
+
+static int vidioc_overlay(struct file *file, void *priv, unsigned int on)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = (struct viu_dev *)fh->dev;
+ unsigned long flags;
+
+ if (on) {
+ spin_lock_irqsave(&dev->slock, flags);
+ viu_activate_overlay(dev->vr);
+ dev->ovenable = 1;
+
+ /* start dma */
+ viu_start_dma(dev);
+ spin_unlock_irqrestore(&dev->slock, flags);
+ } else {
+ viu_stop_dma(dev);
+ dev->ovenable = 0;
+ }
+
+ return 0;
+}
+
+static int vidioc_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *arg)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = fh->dev;
+ struct v4l2_framebuffer *fb = arg;
+
+ *fb = dev->ovbuf;
+ fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
+ return 0;
+}
+
+static int vidioc_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *arg)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = fh->dev;
+ const struct v4l2_framebuffer *fb = arg;
+ struct viu_fmt *fmt;
+
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ /* check args */
+ fmt = format_by_fourcc(fb->fmt.pixelformat);
+ if (fmt == NULL)
+ return -EINVAL;
+
+ /* ok, accept it */
+ dev->ovbuf = *fb;
+ dev->ovfmt = fmt;
+ if (dev->ovbuf.fmt.bytesperline == 0) {
+ dev->ovbuf.fmt.bytesperline =
+ dev->ovbuf.fmt.width * fmt->depth / 8;
+ }
+ return 0;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct viu_fh *fh = priv;
+
+ return videobuf_reqbufs(&fh->vb_vidq, p);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct viu_fh *fh = priv;
+
+ return videobuf_querybuf(&fh->vb_vidq, p);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct viu_fh *fh = priv;
+
+ return videobuf_qbuf(&fh->vb_vidq, p);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct viu_fh *fh = priv;
+
+ return videobuf_dqbuf(&fh->vb_vidq, p,
+ file->f_flags & O_NONBLOCK);
+}
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct viu_fh *fh = priv;
+ struct viu_dev *dev = fh->dev;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (fh->type != i)
+ return -EINVAL;
+
+ if (dev->ovenable)
+ dev->ovenable = 0;
+
+ viu_start_dma(fh->dev);
+
+ return videobuf_streamon(&fh->vb_vidq);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct viu_fh *fh = priv;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (fh->type != i)
+ return -EINVAL;
+
+ viu_stop_dma(fh->dev);
+
+ return videobuf_streamoff(&fh->vb_vidq);
+}
+
+#define decoder_call(viu, o, f, args...) \
+ v4l2_subdev_call(viu->decoder, o, f, ##args)
+
+static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct viu_fh *fh = priv;
+
+ decoder_call(fh->dev, video, querystd, std_id);
+ return 0;
+}
+
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct viu_fh *fh = priv;
+
+ fh->dev->std = id;
+ decoder_call(fh->dev, video, s_std, id);
+ return 0;
+}
+
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct viu_fh *fh = priv;
+
+ *std_id = fh->dev->std;
+ return 0;
+}
+
+/* only one input in this driver */
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct viu_fh *fh = priv;
+
+ if (inp->index != 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = fh->dev->vdev->tvnorms;
+ strcpy(inp->name, "Camera");
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct viu_fh *fh = priv;
+
+ if (i)
+ return -EINVAL;
+
+ decoder_call(fh->dev, video, s_routing, i, 0, 0);
+ return 0;
+}
+
+inline void viu_activate_next_buf(struct viu_dev *dev,
+ struct viu_dmaqueue *viuq)
+{
+ struct viu_dmaqueue *vidq = viuq;
+ struct viu_buf *buf;
+
+ /* launch another DMA operation for an active/queued buffer */
+ if (!list_empty(&vidq->active)) {
+ buf = list_entry(vidq->active.next, struct viu_buf,
+ vb.queue);
+ dprintk(1, "start another queued buffer: 0x%p\n", buf);
+ buffer_activate(dev, buf);
+ } else if (!list_empty(&vidq->queued)) {
+ buf = list_entry(vidq->queued.next, struct viu_buf,
+ vb.queue);
+ list_del(&buf->vb.queue);
+
+ dprintk(1, "start another queued buffer: 0x%p\n", buf);
+ list_add_tail(&buf->vb.queue, &vidq->active);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ buffer_activate(dev, buf);
+ }
+}
+
+inline void viu_default_settings(struct viu_reg __iomem *vr)
+{
+ out_be32(&vr->luminance, 0x9512A254);
+ out_be32(&vr->chroma_r, 0x03310000);
+ out_be32(&vr->chroma_g, 0x06600F38);
+ out_be32(&vr->chroma_b, 0x00000409);
+ out_be32(&vr->alpha, 0x000000ff);
+ out_be32(&vr->req_alarm, 0x00000090);
+ dprintk(1, "status reg: 0x%08x, field base: 0x%08x\n",
+ in_be32(&vr->status_cfg), in_be32(&vr->field_base_addr));
+}
+
+static void viu_overlay_intr(struct viu_dev *dev, u32 status)
+{
+ struct viu_reg __iomem *vr = dev->vr;
+
+ if (status & INT_DMA_END_STATUS)
+ dev->dma_done = 1;
+
+ if (status & INT_FIELD_STATUS) {
+ if (dev->dma_done) {
+ u32 addr = reg_val.field_base_addr;
+
+ dev->dma_done = 0;
+ if (status & FIELD_NO)
+ addr += reg_val.dma_inc;
+
+ out_be32(&vr->field_base_addr, addr);
+ out_be32(&vr->dma_inc, reg_val.dma_inc);
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) |
+ (status & INT_ALL_STATUS) |
+ reg_val.status_cfg);
+ } else if (status & INT_VSYNC_STATUS) {
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) |
+ (status & INT_ALL_STATUS) |
+ reg_val.status_cfg);
+ }
+ }
+}
+
+static void viu_capture_intr(struct viu_dev *dev, u32 status)
+{
+ struct viu_dmaqueue *vidq = &dev->vidq;
+ struct viu_reg __iomem *vr = dev->vr;
+ struct viu_buf *buf;
+ int field_num;
+ int need_two;
+ int dma_done = 0;
+
+ field_num = status & FIELD_NO;
+ need_two = V4L2_FIELD_HAS_BOTH(dev->capfield);
+
+ if (status & INT_DMA_END_STATUS) {
+ dma_done = 1;
+ if (((field_num == 0) && (dev->field == 0)) ||
+ (field_num && (dev->field == 1)))
+ dev->field++;
+ }
+
+ if (status & INT_FIELD_STATUS) {
+ dprintk(1, "irq: field %d, done %d\n",
+ !!field_num, dma_done);
+ if (unlikely(dev->first)) {
+ if (field_num == 0) {
+ dev->first = 0;
+ dprintk(1, "activate first buf\n");
+ viu_activate_next_buf(dev, vidq);
+ } else
+ dprintk(1, "wait field 0\n");
+ return;
+ }
+
+ /* setup buffer address for next dma operation */
+ if (!list_empty(&vidq->active)) {
+ u32 addr = reg_val.field_base_addr;
+
+ if (field_num && need_two) {
+ addr += reg_val.dma_inc;
+ dprintk(1, "field 1, 0x%lx, dev field %d\n",
+ (unsigned long)addr, dev->field);
+ }
+ out_be32(&vr->field_base_addr, addr);
+ out_be32(&vr->dma_inc, reg_val.dma_inc);
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) |
+ (status & INT_ALL_STATUS) |
+ reg_val.status_cfg);
+ return;
+ }
+ }
+
+ if (dma_done && field_num && (dev->field == 2)) {
+ dev->field = 0;
+ buf = list_entry(vidq->active.next,
+ struct viu_buf, vb.queue);
+ dprintk(1, "viu/0: [%p/%d] 0x%lx/0x%lx: dma complete\n",
+ buf, buf->vb.i,
+ (unsigned long)videobuf_to_dma_contig(&buf->vb),
+ (unsigned long)in_be32(&vr->field_base_addr));
+
+ if (waitqueue_active(&buf->vb.done)) {
+ list_del(&buf->vb.queue);
+ v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.field_count++;
+ wake_up(&buf->vb.done);
+ }
+ /* activate next dma buffer */
+ viu_activate_next_buf(dev, vidq);
+ }
+}
+
+static irqreturn_t viu_intr(int irq, void *dev_id)
+{
+ struct viu_dev *dev = (struct viu_dev *)dev_id;
+ struct viu_reg __iomem *vr = dev->vr;
+ u32 status;
+ u32 error;
+
+ status = in_be32(&vr->status_cfg);
+
+ if (status & INT_ERROR_STATUS) {
+ dev->irqs.error_irq++;
+ error = status & ERR_MASK;
+ if (error)
+ dprintk(1, "Err: error(%d), times:%d!\n",
+ error >> 4, dev->irqs.error_irq);
+ /* Clear interrupt error bit and error flags */
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) | INT_ERROR_STATUS);
+ }
+
+ if (status & INT_DMA_END_STATUS) {
+ dev->irqs.dma_end_irq++;
+ dev->dma_done = 1;
+ dprintk(2, "VIU DMA end interrupt times: %d\n",
+ dev->irqs.dma_end_irq);
+ }
+
+ if (status & INT_HSYNC_STATUS)
+ dev->irqs.hsync_irq++;
+
+ if (status & INT_FIELD_STATUS) {
+ dev->irqs.field_irq++;
+ dprintk(2, "VIU field interrupt times: %d\n",
+ dev->irqs.field_irq);
+ }
+
+ if (status & INT_VSTART_STATUS)
+ dev->irqs.vstart_irq++;
+
+ if (status & INT_VSYNC_STATUS) {
+ dev->irqs.vsync_irq++;
+ dprintk(2, "VIU vsync interrupt times: %d\n",
+ dev->irqs.vsync_irq);
+ }
+
+ /* clear all pending irqs */
+ status = in_be32(&vr->status_cfg);
+ out_be32(&vr->status_cfg,
+ (status & 0xffc0ffff) | (status & INT_ALL_STATUS));
+
+ if (dev->ovenable) {
+ viu_overlay_intr(dev, status);
+ return IRQ_HANDLED;
+ }
+
+ /* Capture mode */
+ viu_capture_intr(dev, status);
+ return IRQ_HANDLED;
+}
+
+/*
+ * File operations for the device
+ */
+static int viu_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct viu_dev *dev = video_get_drvdata(vdev);
+ struct viu_fh *fh;
+ struct viu_reg __iomem *vr;
+ int minor = vdev->minor;
+ u32 status_cfg;
+
+ dprintk(1, "viu: open (minor=%d)\n", minor);
+
+ dev->users++;
+ if (dev->users > 1) {
+ dev->users--;
+ return -EBUSY;
+ }
+
+ vr = dev->vr;
+
+ dprintk(1, "open minor=%d type=%s users=%d\n", minor,
+ v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users);
+
+ if (mutex_lock_interruptible(&dev->lock)) {
+ dev->users--;
+ return -ERESTARTSYS;
+ }
+
+ /* allocate and initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh) {
+ dev->users--;
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+
+ v4l2_fh_init(&fh->fh, vdev);
+ file->private_data = fh;
+ fh->dev = dev;
+
+ fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ fh->fmt = format_by_fourcc(V4L2_PIX_FMT_RGB32);
+ fh->width = norm_maxw();
+ fh->height = norm_maxh();
+ dev->crop_current.width = fh->width;
+ dev->crop_current.height = fh->height;
+
+ dprintk(1, "Open: fh=%p, dev=%p, dev->vidq=%p\n", fh, dev, &dev->vidq);
+ dprintk(1, "Open: list_empty queued=%d\n",
+ list_empty(&dev->vidq.queued));
+ dprintk(1, "Open: list_empty active=%d\n",
+ list_empty(&dev->vidq.active));
+
+ viu_default_settings(vr);
+
+ status_cfg = in_be32(&vr->status_cfg);
+ out_be32(&vr->status_cfg,
+ status_cfg & ~(INT_VSYNC_EN | INT_HSYNC_EN |
+ INT_FIELD_EN | INT_VSTART_EN |
+ INT_DMA_END_EN | INT_ERROR_EN | INT_ECC_EN));
+
+ status_cfg = in_be32(&vr->status_cfg);
+ out_be32(&vr->status_cfg, status_cfg | INT_ALL_STATUS);
+
+ spin_lock_init(&fh->vbq_lock);
+ videobuf_queue_dma_contig_init(&fh->vb_vidq, &viu_video_qops,
+ dev->dev, &fh->vbq_lock,
+ fh->type, V4L2_FIELD_INTERLACED,
+ sizeof(struct viu_buf), fh,
+ &fh->dev->lock);
+ v4l2_fh_add(&fh->fh);
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+static ssize_t viu_read(struct file *file, char __user *data, size_t count,
+ loff_t *ppos)
+{
+ struct viu_fh *fh = file->private_data;
+ struct viu_dev *dev = fh->dev;
+ int ret = 0;
+
+ dprintk(2, "%s\n", __func__);
+ if (dev->ovenable)
+ dev->ovenable = 0;
+
+ if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+ viu_start_dma(dev);
+ ret = videobuf_read_stream(&fh->vb_vidq, data, count,
+ ppos, 0, file->f_flags & O_NONBLOCK);
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+ return 0;
+}
+
+static __poll_t viu_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct viu_fh *fh = file->private_data;
+ struct videobuf_queue *q = &fh->vb_vidq;
+ struct viu_dev *dev = fh->dev;
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
+ return EPOLLERR;
+
+ if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
+ return res;
+
+ mutex_lock(&dev->lock);
+ res |= videobuf_poll_stream(file, q, wait);
+ mutex_unlock(&dev->lock);
+ return res;
+}
+
+static int viu_release(struct file *file)
+{
+ struct viu_fh *fh = file->private_data;
+ struct viu_dev *dev = fh->dev;
+ int minor = video_devdata(file)->minor;
+
+ mutex_lock(&dev->lock);
+ viu_stop_dma(dev);
+ videobuf_stop(&fh->vb_vidq);
+ videobuf_mmap_free(&fh->vb_vidq);
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+ mutex_unlock(&dev->lock);
+
+ kfree(fh);
+
+ dev->users--;
+ dprintk(1, "close (minor=%d, users=%d)\n",
+ minor, dev->users);
+ return 0;
+}
+
+static void viu_reset(struct viu_reg __iomem *reg)
+{
+ out_be32(&reg->status_cfg, 0);
+ out_be32(&reg->luminance, 0x9512a254);
+ out_be32(&reg->chroma_r, 0x03310000);
+ out_be32(&reg->chroma_g, 0x06600f38);
+ out_be32(&reg->chroma_b, 0x00000409);
+ out_be32(&reg->field_base_addr, 0);
+ out_be32(&reg->dma_inc, 0);
+ out_be32(&reg->picture_count, 0x01e002d0);
+ out_be32(&reg->req_alarm, 0x00000090);
+ out_be32(&reg->alpha, 0x000000ff);
+}
+
+static int viu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct viu_fh *fh = file->private_data;
+ struct viu_dev *dev = fh->dev;
+ int ret;
+
+ dprintk(1, "mmap called, vma=%p\n", vma);
+
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+ ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
+ mutex_unlock(&dev->lock);
+
+ dprintk(1, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ (unsigned long)vma->vm_start,
+ (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
+ ret);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations viu_fops = {
+ .owner = THIS_MODULE,
+ .open = viu_open,
+ .release = viu_release,
+ .read = viu_read,
+ .poll = viu_poll,
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .mmap = viu_mmap,
+};
+
+static const struct v4l2_ioctl_ops viu_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_cap,
+ .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_overlay,
+ .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_overlay,
+ .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_overlay,
+ .vidioc_overlay = vidioc_overlay,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_querystd = vidioc_querystd,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct video_device viu_template = {
+ .name = "FSL viu",
+ .fops = &viu_fops,
+ .minor = -1,
+ .ioctl_ops = &viu_ioctl_ops,
+ .release = video_device_release,
+
+ .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL,
+};
+
+static int viu_of_probe(struct platform_device *op)
+{
+ struct viu_dev *viu_dev;
+ struct video_device *vdev;
+ struct resource r;
+ struct viu_reg __iomem *viu_regs;
+ struct i2c_adapter *ad;
+ int ret, viu_irq;
+ struct clk *clk;
+
+ ret = of_address_to_resource(op->dev.of_node, 0, &r);
+ if (ret) {
+ dev_err(&op->dev, "Can't parse device node resource\n");
+ return -ENODEV;
+ }
+
+ viu_irq = irq_of_parse_and_map(op->dev.of_node, 0);
+ if (!viu_irq) {
+ dev_err(&op->dev, "Error while mapping the irq\n");
+ return -EINVAL;
+ }
+
+ /* request mem region */
+ if (!devm_request_mem_region(&op->dev, r.start,
+ sizeof(struct viu_reg), DRV_NAME)) {
+ dev_err(&op->dev, "Error while requesting mem region\n");
+ ret = -EBUSY;
+ goto err_irq;
+ }
+
+ /* remap registers */
+ viu_regs = devm_ioremap(&op->dev, r.start, sizeof(struct viu_reg));
+ if (!viu_regs) {
+ dev_err(&op->dev, "Can't map register set\n");
+ ret = -ENOMEM;
+ goto err_irq;
+ }
+
+ /* Prepare our private structure */
+ viu_dev = devm_kzalloc(&op->dev, sizeof(struct viu_dev), GFP_ATOMIC);
+ if (!viu_dev) {
+ dev_err(&op->dev, "Can't allocate private structure\n");
+ ret = -ENOMEM;
+ goto err_irq;
+ }
+
+ viu_dev->vr = viu_regs;
+ viu_dev->irq = viu_irq;
+ viu_dev->dev = &op->dev;
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&viu_dev->vidq.active);
+ INIT_LIST_HEAD(&viu_dev->vidq.queued);
+
+ snprintf(viu_dev->v4l2_dev.name,
+ sizeof(viu_dev->v4l2_dev.name), "%s", "VIU");
+ ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
+ goto err_irq;
+ }
+
+ ad = i2c_get_adapter(0);
+ if (!ad) {
+ ret = -EFAULT;
+ dev_err(&op->dev, "couldn't get i2c adapter\n");
+ goto err_v4l2;
+ }
+
+ v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
+ if (viu_dev->hdl.error) {
+ ret = viu_dev->hdl.error;
+ dev_err(&op->dev, "couldn't register control\n");
+ goto err_i2c;
+ }
+ /* This control handler will inherit the control(s) from the
+ sub-device(s). */
+ viu_dev->v4l2_dev.ctrl_handler = &viu_dev->hdl;
+ viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
+ "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
+
+ timer_setup(&viu_dev->vidq.timeout, viu_vid_timeout, 0);
+ viu_dev->std = V4L2_STD_NTSC_M;
+ viu_dev->first = 1;
+
+ /* Allocate memory for video device */
+ vdev = video_device_alloc();
+ if (vdev == NULL) {
+ ret = -ENOMEM;
+ goto err_hdl;
+ }
+
+ *vdev = viu_template;
+
+ vdev->v4l2_dev = &viu_dev->v4l2_dev;
+
+ viu_dev->vdev = vdev;
+
+ /* initialize locks */
+ mutex_init(&viu_dev->lock);
+ viu_dev->vdev->lock = &viu_dev->lock;
+ spin_lock_init(&viu_dev->slock);
+
+ video_set_drvdata(viu_dev->vdev, viu_dev);
+
+ mutex_lock(&viu_dev->lock);
+
+ ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ video_device_release(viu_dev->vdev);
+ goto err_unlock;
+ }
+
+ /* enable VIU clock */
+ clk = devm_clk_get(&op->dev, "ipg");
+ if (IS_ERR(clk)) {
+ dev_err(&op->dev, "failed to lookup the clock!\n");
+ ret = PTR_ERR(clk);
+ goto err_vdev;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&op->dev, "failed to enable the clock!\n");
+ goto err_vdev;
+ }
+ viu_dev->clk = clk;
+
+ /* reset VIU module */
+ viu_reset(viu_dev->vr);
+
+ /* install interrupt handler */
+ if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
+ dev_err(&op->dev, "Request VIU IRQ failed.\n");
+ ret = -ENODEV;
+ goto err_clk;
+ }
+
+ mutex_unlock(&viu_dev->lock);
+
+ dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
+ return ret;
+
+err_clk:
+ clk_disable_unprepare(viu_dev->clk);
+err_vdev:
+ video_unregister_device(viu_dev->vdev);
+err_unlock:
+ mutex_unlock(&viu_dev->lock);
+err_hdl:
+ v4l2_ctrl_handler_free(&viu_dev->hdl);
+err_i2c:
+ i2c_put_adapter(ad);
+err_v4l2:
+ v4l2_device_unregister(&viu_dev->v4l2_dev);
+err_irq:
+ irq_dispose_mapping(viu_irq);
+ return ret;
+}
+
+static int viu_of_remove(struct platform_device *op)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
+ struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
+ struct v4l2_subdev *sdev = list_entry(v4l2_dev->subdevs.next,
+ struct v4l2_subdev, list);
+ struct i2c_client *client = v4l2_get_subdevdata(sdev);
+
+ free_irq(dev->irq, (void *)dev);
+ irq_dispose_mapping(dev->irq);
+
+ clk_disable_unprepare(dev->clk);
+
+ v4l2_ctrl_handler_free(&dev->hdl);
+ video_unregister_device(dev->vdev);
+ i2c_put_adapter(client->adapter);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int viu_suspend(struct platform_device *op, pm_message_t state)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
+ struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
+
+ clk_disable(dev->clk);
+ return 0;
+}
+
+static int viu_resume(struct platform_device *op)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
+ struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
+
+ clk_enable(dev->clk);
+ return 0;
+}
+#endif
+
+/*
+ * Initialization and module stuff
+ */
+static const struct of_device_id mpc512x_viu_of_match[] = {
+ {
+ .compatible = "fsl,mpc5121-viu",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpc512x_viu_of_match);
+
+static struct platform_driver viu_of_platform_driver = {
+ .probe = viu_of_probe,
+ .remove = viu_of_remove,
+#ifdef CONFIG_PM
+ .suspend = viu_suspend,
+ .resume = viu_resume,
+#endif
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = mpc512x_viu_of_match,
+ },
+};
+
+module_platform_driver(viu_of_platform_driver);
+
+MODULE_DESCRIPTION("Freescale Video-In(VIU)");
+MODULE_AUTHOR("Hongjun Chen");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VIU_VERSION);
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
new file mode 100644
index 000000000..5f84d2aa4
--- /dev/null
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -0,0 +1,1076 @@
+/*
+ * V4L2 deinterlacing support.
+ *
+ * Copyright (c) 2012 Vista Silicon S.L.
+ * Javier Martin <javier.martin@vista-silicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define MEM2MEM_TEST_MODULE_NAME "mem2mem-deinterlace"
+
+MODULE_DESCRIPTION("mem2mem device which supports deinterlacing using dmaengine");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.1");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+#define MEM2MEM_NAME "m2m-deinterlace"
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+struct deinterlace_fmt {
+ char *name;
+ u32 fourcc;
+ /* Types the format can be used for */
+ u32 types;
+};
+
+static struct deinterlace_fmt formats[] = {
+ {
+ .name = "YUV 4:2:0 Planar",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+ {
+ .name = "YUYV 4:2:2",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Per-queue, driver-specific private data */
+struct deinterlace_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ struct deinterlace_fmt *fmt;
+ enum v4l2_field field;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+enum {
+ YUV420_DMA_Y_ODD,
+ YUV420_DMA_Y_EVEN,
+ YUV420_DMA_U_ODD,
+ YUV420_DMA_U_EVEN,
+ YUV420_DMA_V_ODD,
+ YUV420_DMA_V_EVEN,
+ YUV420_DMA_Y_ODD_DOUBLING,
+ YUV420_DMA_U_ODD_DOUBLING,
+ YUV420_DMA_V_ODD_DOUBLING,
+ YUYV_DMA_ODD,
+ YUYV_DMA_EVEN,
+ YUYV_DMA_EVEN_DOUBLING,
+};
+
+/* Source and destination queue data */
+static struct deinterlace_q_data q_data[2];
+
+static struct deinterlace_q_data *get_q_data(enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+static struct deinterlace_fmt *find_format(struct v4l2_format *f)
+{
+ struct deinterlace_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &formats[k];
+ if ((fmt->types & f->type) &&
+ (fmt->fourcc == f->fmt.pix.pixelformat))
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &formats[k];
+}
+
+struct deinterlace_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+
+ atomic_t busy;
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+
+ struct dma_chan *dma_chan;
+
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct deinterlace_ctx {
+ struct deinterlace_dev *dev;
+
+ /* Abort requested by m2m */
+ int aborting;
+ enum v4l2_colorspace colorspace;
+ dma_cookie_t cookie;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct dma_interleaved_template *xt;
+};
+
+/*
+ * mem2mem callbacks
+ */
+static int deinterlace_job_ready(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *pcdev = ctx->dev;
+
+ if ((v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0)
+ && (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0)
+ && (atomic_read(&ctx->dev->busy) == 0)) {
+ dprintk(pcdev, "Task ready\n");
+ return 1;
+ }
+
+ dprintk(pcdev, "Task not ready to run\n");
+
+ return 0;
+}
+
+static void deinterlace_job_abort(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *pcdev = ctx->dev;
+
+ ctx->aborting = 1;
+
+ dprintk(pcdev, "Aborting task\n");
+
+ v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void dma_callback(void *data)
+{
+ struct deinterlace_ctx *curr_ctx = data;
+ struct deinterlace_dev *pcdev = curr_ctx->dev;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ atomic_set(&pcdev->busy, 0);
+
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->timecode = src_vb->timecode;
+
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->m2m_ctx);
+
+ dprintk(pcdev, "dma transfers completed.\n");
+}
+
+static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
+ int do_callback)
+{
+ struct deinterlace_q_data *s_q_data;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct deinterlace_dev *pcdev = ctx->dev;
+ struct dma_chan *chan = pcdev->dma_chan;
+ struct dma_device *dmadev = chan->device;
+ struct dma_async_tx_descriptor *tx;
+ unsigned int s_width, s_height;
+ unsigned int s_size;
+ dma_addr_t p_in, p_out;
+ enum dma_ctrl_flags flags;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+ s_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ s_width = s_q_data->width;
+ s_height = s_q_data->height;
+ s_size = s_width * s_height;
+
+ p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf,
+ 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&pcdev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return;
+ }
+
+ switch (op) {
+ case YUV420_DMA_Y_ODD:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width;
+ ctx->xt->sgl[0].icg = s_width;
+ ctx->xt->src_start = p_in;
+ ctx->xt->dst_start = p_out;
+ break;
+ case YUV420_DMA_Y_EVEN:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width;
+ ctx->xt->sgl[0].icg = s_width;
+ ctx->xt->src_start = p_in + s_size / 2;
+ ctx->xt->dst_start = p_out + s_width;
+ break;
+ case YUV420_DMA_U_ODD:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + s_size;
+ ctx->xt->dst_start = p_out + s_size;
+ break;
+ case YUV420_DMA_U_EVEN:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + (9 * s_size) / 8;
+ ctx->xt->dst_start = p_out + s_size + s_width / 2;
+ break;
+ case YUV420_DMA_V_ODD:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + (5 * s_size) / 4;
+ ctx->xt->dst_start = p_out + (5 * s_size) / 4;
+ break;
+ case YUV420_DMA_V_EVEN:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + (11 * s_size) / 8;
+ ctx->xt->dst_start = p_out + (5 * s_size) / 4 + s_width / 2;
+ break;
+ case YUV420_DMA_Y_ODD_DOUBLING:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width;
+ ctx->xt->sgl[0].icg = s_width;
+ ctx->xt->src_start = p_in;
+ ctx->xt->dst_start = p_out + s_width;
+ break;
+ case YUV420_DMA_U_ODD_DOUBLING:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + s_size;
+ ctx->xt->dst_start = p_out + s_size + s_width / 2;
+ break;
+ case YUV420_DMA_V_ODD_DOUBLING:
+ ctx->xt->numf = s_height / 4;
+ ctx->xt->sgl[0].size = s_width / 2;
+ ctx->xt->sgl[0].icg = s_width / 2;
+ ctx->xt->src_start = p_in + (5 * s_size) / 4;
+ ctx->xt->dst_start = p_out + (5 * s_size) / 4 + s_width / 2;
+ break;
+ case YUYV_DMA_ODD:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width * 2;
+ ctx->xt->sgl[0].icg = s_width * 2;
+ ctx->xt->src_start = p_in;
+ ctx->xt->dst_start = p_out;
+ break;
+ case YUYV_DMA_EVEN:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width * 2;
+ ctx->xt->sgl[0].icg = s_width * 2;
+ ctx->xt->src_start = p_in + s_size;
+ ctx->xt->dst_start = p_out + s_width * 2;
+ break;
+ case YUYV_DMA_EVEN_DOUBLING:
+ default:
+ ctx->xt->numf = s_height / 2;
+ ctx->xt->sgl[0].size = s_width * 2;
+ ctx->xt->sgl[0].icg = s_width * 2;
+ ctx->xt->src_start = p_in;
+ ctx->xt->dst_start = p_out + s_width * 2;
+ break;
+ }
+
+ /* Common parameters for al transfers */
+ ctx->xt->frame_size = 1;
+ ctx->xt->dir = DMA_MEM_TO_MEM;
+ ctx->xt->src_sgl = false;
+ ctx->xt->dst_sgl = true;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
+ if (tx == NULL) {
+ v4l2_warn(&pcdev->v4l2_dev, "DMA interleaved prep error\n");
+ return;
+ }
+
+ if (do_callback) {
+ tx->callback = dma_callback;
+ tx->callback_param = ctx;
+ }
+
+ ctx->cookie = dmaengine_submit(tx);
+ if (dma_submit_error(ctx->cookie)) {
+ v4l2_warn(&pcdev->v4l2_dev,
+ "DMA submit error %d with src=0x%x dst=0x%x len=0x%x\n",
+ ctx->cookie, (unsigned)p_in, (unsigned)p_out,
+ s_size * 3/2);
+ return;
+ }
+
+ dma_async_issue_pending(chan);
+}
+
+static void deinterlace_device_run(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_q_data *dst_q_data;
+
+ atomic_set(&ctx->dev->busy, 1);
+
+ dprintk(ctx->dev, "%s: DMA try issue.\n", __func__);
+
+ dst_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ /*
+ * 4 possible field conversions are possible at the moment:
+ * V4L2_FIELD_SEQ_TB --> V4L2_FIELD_INTERLACED_TB:
+ * two separate fields in the same input buffer are interlaced
+ * in the output buffer using weaving. Top field comes first.
+ * V4L2_FIELD_SEQ_TB --> V4L2_FIELD_NONE:
+ * top field from the input buffer is copied to the output buffer
+ * using line doubling. Bottom field from the input buffer is discarded.
+ * V4L2_FIELD_SEQ_BT --> V4L2_FIELD_INTERLACED_BT:
+ * two separate fields in the same input buffer are interlaced
+ * in the output buffer using weaving. Bottom field comes first.
+ * V4L2_FIELD_SEQ_BT --> V4L2_FIELD_NONE:
+ * bottom field from the input buffer is copied to the output buffer
+ * using line doubling. Top field from the input buffer is discarded.
+ */
+ switch (dst_q_data->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ switch (dst_q_data->field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ dprintk(ctx->dev, "%s: yuv420 interlaced tb.\n",
+ __func__);
+ deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_Y_EVEN, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_U_EVEN, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_V_EVEN, 1);
+ break;
+ case V4L2_FIELD_NONE:
+ default:
+ dprintk(ctx->dev, "%s: yuv420 interlaced line doubling.\n",
+ __func__);
+ deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD_DOUBLING, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD_DOUBLING, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD, 0);
+ deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD_DOUBLING, 1);
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ switch (dst_q_data->field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ dprintk(ctx->dev, "%s: yuyv interlaced_tb.\n",
+ __func__);
+ deinterlace_issue_dma(ctx, YUYV_DMA_ODD, 0);
+ deinterlace_issue_dma(ctx, YUYV_DMA_EVEN, 1);
+ break;
+ case V4L2_FIELD_NONE:
+ default:
+ dprintk(ctx->dev, "%s: yuyv interlaced line doubling.\n",
+ __func__);
+ deinterlace_issue_dma(ctx, YUYV_DMA_ODD, 0);
+ deinterlace_issue_dma(ctx, YUYV_DMA_EVEN_DOUBLING, 1);
+ break;
+ }
+ break;
+ }
+
+ dprintk(ctx->dev, "%s: DMA issue done.\n", __func__);
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, MEM2MEM_NAME, sizeof(cap->card));
+ strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->card));
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num;
+ struct deinterlace_fmt *fmt;
+
+ num = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ if (i < NUM_FORMATS) {
+ /* Format found */
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct deinterlace_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.field = q_data->field;
+ f->fmt.pix.pixelformat = q_data->fmt->fourcc;
+
+ switch (q_data->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ f->fmt.pix.bytesperline = q_data->width * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ f->fmt.pix.bytesperline = q_data->width * 2;
+ }
+
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct deinterlace_fmt *fmt)
+{
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ }
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_fmt *fmt;
+ struct deinterlace_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_CAPTURE))
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ if (f->fmt.pix.field != V4L2_FIELD_INTERLACED_TB &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED_BT &&
+ f->fmt.pix.field != V4L2_FIELD_NONE)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED_TB;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_fmt *fmt;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_OUTPUT))
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+
+ if (!f->fmt.pix.colorspace)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+ if (f->fmt.pix.field != V4L2_FIELD_SEQ_TB &&
+ f->fmt.pix.field != V4L2_FIELD_SEQ_BT)
+ f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_s_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
+{
+ struct deinterlace_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = find_format(f);
+ if (!q_data->fmt) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Couldn't set format type %d, wxh: %dx%d. fmt: %d, field: %d\n",
+ f->type, f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.pixelformat, f->fmt.pix.field);
+ return -EINVAL;
+ }
+
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->field = f->fmt.pix.field;
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2;
+ q_data->sizeimage = (q_data->width * q_data->height * 3) / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ q_data->sizeimage = q_data->width * q_data->height * 2;
+ }
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d, field: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
+ q_data->field);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = priv;
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = vidioc_s_fmt(priv, f);
+ if (!ret)
+ ctx->colorspace = f->fmt.pix.colorspace;
+
+ return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct deinterlace_q_data *s_q_data, *d_q_data;
+ struct deinterlace_ctx *ctx = priv;
+
+ s_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ d_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ /* Check that src and dst queues have the same pix format */
+ if (s_q_data->fmt->fourcc != d_q_data->fmt->fourcc) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "src and dst formats don't match.\n");
+ return -EINVAL;
+ }
+
+ /* Check that input and output deinterlacing types are compatible */
+ switch (s_q_data->field) {
+ case V4L2_FIELD_SEQ_BT:
+ if (d_q_data->field != V4L2_FIELD_NONE &&
+ d_q_data->field != V4L2_FIELD_INTERLACED_BT) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "src and dst field conversion [(%d)->(%d)] not supported.\n",
+ s_q_data->field, d_q_data->field);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ if (d_q_data->field != V4L2_FIELD_NONE &&
+ d_q_data->field != V4L2_FIELD_INTERLACED_TB) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "src and dst field conversion [(%d)->(%d)] not supported.\n",
+ s_q_data->field, d_q_data->field);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+
+/*
+ * Queue operations
+ */
+struct vb2_dc_conf {
+ struct device *dev;
+};
+
+static int deinterlace_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct deinterlace_q_data *q_data;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(vq->type);
+
+ switch (q_data->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ size = q_data->width * q_data->height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ size = q_data->width * q_data->height * 2;
+ }
+
+ *nplanes = 1;
+ *nbuffers = count;
+ sizes[0] = size;
+
+ dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+ return 0;
+}
+
+static int deinterlace_buf_prepare(struct vb2_buffer *vb)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct deinterlace_q_data *q_data;
+
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(vb->vb2_queue->type);
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+ return 0;
+}
+
+static void deinterlace_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
+}
+
+static const struct vb2_ops deinterlace_qops = {
+ .queue_setup = deinterlace_queue_setup,
+ .buf_prepare = deinterlace_buf_prepare,
+ .buf_queue = deinterlace_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct deinterlace_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &deinterlace_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ q_data[V4L2_M2M_SRC].fmt = &formats[0];
+ q_data[V4L2_M2M_SRC].width = 640;
+ q_data[V4L2_M2M_SRC].height = 480;
+ q_data[V4L2_M2M_SRC].sizeimage = (640 * 480 * 3) / 2;
+ q_data[V4L2_M2M_SRC].field = V4L2_FIELD_SEQ_TB;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &deinterlace_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ q_data[V4L2_M2M_DST].fmt = &formats[0];
+ q_data[V4L2_M2M_DST].width = 640;
+ q_data[V4L2_M2M_DST].height = 480;
+ q_data[V4L2_M2M_DST].sizeimage = (640 * 480 * 3) / 2;
+ q_data[V4L2_M2M_SRC].field = V4L2_FIELD_INTERLACED_TB;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int deinterlace_open(struct file *file)
+{
+ struct deinterlace_dev *pcdev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = NULL;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->dev = pcdev;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ int ret = PTR_ERR(ctx->m2m_ctx);
+
+ kfree(ctx);
+ return ret;
+ }
+
+ ctx->xt = kzalloc(sizeof(struct dma_interleaved_template) +
+ sizeof(struct data_chunk), GFP_KERNEL);
+ if (!ctx->xt) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
+
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+
+ dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+ return 0;
+}
+
+static int deinterlace_release(struct file *file)
+{
+ struct deinterlace_dev *pcdev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = file->private_data;
+
+ dprintk(pcdev, "Releasing instance %p\n", ctx);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ kfree(ctx->xt);
+ kfree(ctx);
+
+ return 0;
+}
+
+static __poll_t deinterlace_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct deinterlace_ctx *ctx = file->private_data;
+ __poll_t ret;
+
+ mutex_lock(&ctx->dev->dev_mutex);
+ ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ mutex_unlock(&ctx->dev->dev_mutex);
+
+ return ret;
+}
+
+static int deinterlace_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct deinterlace_ctx *ctx = file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations deinterlace_fops = {
+ .owner = THIS_MODULE,
+ .open = deinterlace_open,
+ .release = deinterlace_release,
+ .poll = deinterlace_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = deinterlace_mmap,
+};
+
+static const struct video_device deinterlace_videodev = {
+ .name = MEM2MEM_NAME,
+ .fops = &deinterlace_fops,
+ .ioctl_ops = &deinterlace_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = deinterlace_device_run,
+ .job_ready = deinterlace_job_ready,
+ .job_abort = deinterlace_job_abort,
+};
+
+static int deinterlace_probe(struct platform_device *pdev)
+{
+ struct deinterlace_dev *pcdev;
+ struct video_device *vfd;
+ dma_cap_mask_t mask;
+ int ret = 0;
+
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
+ if (!pcdev)
+ return -ENOMEM;
+
+ spin_lock_init(&pcdev->irqlock);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_INTERLEAVE, mask);
+ pcdev->dma_chan = dma_request_channel(mask, NULL, pcdev);
+ if (!pcdev->dma_chan)
+ return -ENODEV;
+
+ if (!dma_has_cap(DMA_INTERLEAVE, pcdev->dma_chan->device->cap_mask)) {
+ dev_err(&pdev->dev, "DMA does not support INTERLEAVE\n");
+ ret = -ENODEV;
+ goto rel_dma;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
+ if (ret)
+ goto rel_dma;
+
+ atomic_set(&pcdev->busy, 0);
+ mutex_init(&pcdev->dev_mutex);
+
+ vfd = &pcdev->vfd;
+ *vfd = deinterlace_videodev;
+ vfd->lock = &pcdev->dev_mutex;
+ vfd->v4l2_dev = &pcdev->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n");
+ goto unreg_dev;
+ }
+
+ video_set_drvdata(vfd, pcdev);
+ v4l2_info(&pcdev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
+ " Device registered as /dev/video%d\n", vfd->num);
+
+ platform_set_drvdata(pdev, pcdev);
+
+ pcdev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(pcdev->m2m_dev)) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(pcdev->m2m_dev);
+ goto err_m2m;
+ }
+
+ return 0;
+
+err_m2m:
+ video_unregister_device(&pcdev->vfd);
+unreg_dev:
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+rel_dma:
+ dma_release_channel(pcdev->dma_chan);
+
+ return ret;
+}
+
+static int deinterlace_remove(struct platform_device *pdev)
+{
+ struct deinterlace_dev *pcdev = platform_get_drvdata(pdev);
+
+ v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
+ v4l2_m2m_release(pcdev->m2m_dev);
+ video_unregister_device(&pcdev->vfd);
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+ dma_release_channel(pcdev->dma_chan);
+
+ return 0;
+}
+
+static struct platform_driver deinterlace_pdrv = {
+ .probe = deinterlace_probe,
+ .remove = deinterlace_remove,
+ .driver = {
+ .name = MEM2MEM_NAME,
+ },
+};
+module_platform_driver(deinterlace_pdrv);
+
diff --git a/drivers/media/platform/marvell-ccic/Kconfig b/drivers/media/platform/marvell-ccic/Kconfig
new file mode 100644
index 000000000..cf12e0772
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/Kconfig
@@ -0,0 +1,27 @@
+config VIDEO_CAFE_CCIC
+ tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
+ depends on PCI && I2C && VIDEO_V4L2
+ select VIDEO_OV7670
+ select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
+ ---help---
+ This is a video4linux2 driver for the Marvell 88ALP01 integrated
+ CMOS camera controller. This is the controller found on first-
+ generation OLPC systems.
+
+config VIDEO_MMP_CAMERA
+ tristate "Marvell Armada 610 integrated camera controller support"
+ depends on I2C && VIDEO_V4L2
+ depends on ARCH_MMP || COMPILE_TEST
+ select VIDEO_OV7670
+ select I2C_GPIO
+ select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
+ ---help---
+ This is a Video4Linux2 driver for the integrated camera
+ controller found on Marvell Armada 610 application
+ processors (and likely beyond). This is the controller found
+ in OLPC XO 1.75 systems.
+
diff --git a/drivers/media/platform/marvell-ccic/Makefile b/drivers/media/platform/marvell-ccic/Makefile
new file mode 100644
index 000000000..b3a4d0cdc
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o mcam-core.o
+cafe_ccic-y := cafe-driver.o
+
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += mmp_camera.o mcam-core.o
+mmp_camera-y := mmp-driver.o
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
new file mode 100644
index 000000000..57d2c483a
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -0,0 +1,661 @@
+/*
+ * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
+ * multifunction chip. Currently works with the Omnivision OV7670
+ * sensor.
+ *
+ * The data sheet for this device can be found at:
+ * http://www.marvell.com/products/pc_connectivity/88alp01/
+ *
+ * Copyright 2006-11 One Laptop Per Child Association, Inc.
+ * Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
+ *
+ * Written by Jonathan Corbet, corbet@lwn.net.
+ *
+ * v4l2_device/v4l2_subdev conversion by:
+ * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "mcam-core.h"
+
+#define CAFE_VERSION 0x000002
+
+
+/*
+ * Parameters.
+ */
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("Video");
+
+
+
+
+struct cafe_camera {
+ int registered; /* Fully initialized? */
+ struct mcam_camera mcam;
+ struct pci_dev *pdev;
+ wait_queue_head_t smbus_wait; /* Waiting on i2c events */
+};
+
+/*
+ * Most of the camera controller registers are defined in mcam-core.h,
+ * but the Cafe platform has some additional registers of its own;
+ * they are described here.
+ */
+
+/*
+ * "General purpose register" has a couple of GPIOs used for sensor
+ * power and reset on OLPC XO 1.0 systems.
+ */
+#define REG_GPR 0xb4
+#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
+#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
+#define GPR_C1 0x00000002 /* Control 1 value */
+/*
+ * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
+ * it is active low.
+ */
+#define GPR_C0 0x00000001 /* Control 0 value */
+
+/*
+ * These registers control the SMBUS module for communicating
+ * with the sensor.
+ */
+#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
+#define TWSIC0_EN 0x00000001 /* TWSI enable */
+#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
+#define TWSIC0_SID 0x000003fc /* Slave ID */
+/*
+ * Subtle trickery: the slave ID field starts with bit 2. But the
+ * Linux i2c stack wants to treat the bottommost bit as a separate
+ * read/write bit, which is why slave ID's are usually presented
+ * >>1. For consistency with that behavior, we shift over three
+ * bits instead of two.
+ */
+#define TWSIC0_SID_SHIFT 3
+#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
+#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
+#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
+
+#define REG_TWSIC1 0xbc /* TWSI control 1 */
+#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
+#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
+#define TWSIC1_ADDR_SHIFT 16
+#define TWSIC1_READ 0x01000000 /* Set for read op */
+#define TWSIC1_WSTAT 0x02000000 /* Write status */
+#define TWSIC1_RVALID 0x04000000 /* Read data valid */
+#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
+
+/*
+ * Here's the weird global control registers
+ */
+#define REG_GL_CSR 0x3004 /* Control/status register */
+#define GCSR_SRS 0x00000001 /* SW Reset set */
+#define GCSR_SRC 0x00000002 /* SW Reset clear */
+#define GCSR_MRS 0x00000004 /* Master reset set */
+#define GCSR_MRC 0x00000008 /* HW Reset clear */
+#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
+#define REG_GL_IMASK 0x300c /* Interrupt mask register */
+#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
+
+#define REG_GL_FCR 0x3038 /* GPIO functional control register */
+#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
+#define REG_GL_GPIOR 0x315c /* GPIO register */
+#define GGPIO_OUT 0x80000 /* GPIO output */
+#define GGPIO_VAL 0x00008 /* Output pin value */
+
+#define REG_LEN (REG_GL_IMASK + 4)
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err(&(cam)->pdev->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn(&(cam)->pdev->dev, fmt, ##arg);
+
+/* -------------------------------------------------------------------- */
+/*
+ * The I2C/SMBUS interface to the camera itself starts here. The
+ * controller handles SMBUS itself, presenting a relatively simple register
+ * interface; all we have to do is to tell it where to route the data.
+ */
+#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
+
+static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
+{
+ struct mcam_camera *m = container_of(dev, struct mcam_camera, v4l2_dev);
+ return container_of(m, struct cafe_camera, mcam);
+}
+
+
+static int cafe_smbus_write_done(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+ int c1;
+
+ /*
+ * We must delay after the interrupt, or the controller gets confused
+ * and never does give us good status. Fortunately, we don't do this
+ * often.
+ */
+ udelay(20);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ c1 = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
+}
+
+static int cafe_smbus_write_data(struct cafe_camera *cam,
+ u16 addr, u8 command, u8 value)
+{
+ unsigned int rval;
+ unsigned long flags;
+ struct mcam_camera *mcam = &cam->mcam;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+ rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
+ /*
+ * Marvell sez set clkdiv to all 1's for now.
+ */
+ rval |= TWSIC0_CLKDIV;
+ mcam_reg_write(mcam, REG_TWSIC0, rval);
+ (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+ rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+ mcam_reg_write(mcam, REG_TWSIC1, rval);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ /* Unfortunately, reading TWSIC1 too soon after sending a command
+ * causes the device to die.
+ * Use a busy-wait because we often send a large quantity of small
+ * commands at-once; using msleep() would cause a lot of context
+ * switches which take longer than 2ms, resulting in a noticeable
+ * boot-time and capture-start delays.
+ */
+ mdelay(2);
+
+ /*
+ * Another sad fact is that sometimes, commands silently complete but
+ * cafe_smbus_write_done() never becomes aware of this.
+ * This happens at random and appears to possible occur with any
+ * command.
+ * We don't understand why this is. We work around this issue
+ * with the timeout in the wait below, assuming that all commands
+ * complete within the timeout.
+ */
+ wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(mcam),
+ CAFE_SMBUS_TIMEOUT);
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ if (rval & TWSIC1_WSTAT) {
+ cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
+ command, value);
+ return -EIO;
+ }
+ if (rval & TWSIC1_ERROR) {
+ cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
+ command, value);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+
+static int cafe_smbus_read_done(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+ int c1;
+
+ /*
+ * We must delay after the interrupt, or the controller gets confused
+ * and never does give us good status. Fortunately, we don't do this
+ * often.
+ */
+ udelay(20);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ c1 = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
+}
+
+
+
+static int cafe_smbus_read_data(struct cafe_camera *cam,
+ u16 addr, u8 command, u8 *value)
+{
+ unsigned int rval;
+ unsigned long flags;
+ struct mcam_camera *mcam = &cam->mcam;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+ rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
+ /*
+ * Marvel sez set clkdiv to all 1's for now.
+ */
+ rval |= TWSIC0_CLKDIV;
+ mcam_reg_write(mcam, REG_TWSIC0, rval);
+ (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+ rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+ mcam_reg_write(mcam, REG_TWSIC1, rval);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ wait_event_timeout(cam->smbus_wait,
+ cafe_smbus_read_done(mcam), CAFE_SMBUS_TIMEOUT);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ if (rval & TWSIC1_ERROR) {
+ cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
+ return -EIO;
+ }
+ if (!(rval & TWSIC1_RVALID)) {
+ cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
+ command);
+ return -EIO;
+ }
+ *value = rval & 0xff;
+ return 0;
+}
+
+/*
+ * Perform a transfer over SMBUS. This thing is called under
+ * the i2c bus lock, so we shouldn't race with ourselves...
+ */
+static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char rw, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct cafe_camera *cam = i2c_get_adapdata(adapter);
+ int ret = -EINVAL;
+
+ /*
+ * This interface would appear to only do byte data ops. OK
+ * it can do word too, but the cam chip has no use for that.
+ */
+ if (size != I2C_SMBUS_BYTE_DATA) {
+ cam_err(cam, "funky xfer size %d\n", size);
+ return -EINVAL;
+ }
+
+ if (rw == I2C_SMBUS_WRITE)
+ ret = cafe_smbus_write_data(cam, addr, command, data->byte);
+ else if (rw == I2C_SMBUS_READ)
+ ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
+ return ret;
+}
+
+
+static void cafe_smbus_enable_irq(struct cafe_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->mcam.dev_lock, flags);
+ mcam_reg_set_bit(&cam->mcam, REG_IRQMASK, TWSIIRQS);
+ spin_unlock_irqrestore(&cam->mcam.dev_lock, flags);
+}
+
+static u32 cafe_smbus_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+}
+
+static const struct i2c_algorithm cafe_smbus_algo = {
+ .smbus_xfer = cafe_smbus_xfer,
+ .functionality = cafe_smbus_func
+};
+
+static int cafe_smbus_setup(struct cafe_camera *cam)
+{
+ struct i2c_adapter *adap;
+ int ret;
+
+ adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+ if (adap == NULL)
+ return -ENOMEM;
+ adap->owner = THIS_MODULE;
+ adap->algo = &cafe_smbus_algo;
+ strcpy(adap->name, "cafe_ccic");
+ adap->dev.parent = &cam->pdev->dev;
+ i2c_set_adapdata(adap, cam);
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ printk(KERN_ERR "Unable to register cafe i2c adapter\n");
+ kfree(adap);
+ return ret;
+ }
+
+ cam->mcam.i2c_adapter = adap;
+ cafe_smbus_enable_irq(cam);
+ return 0;
+}
+
+static void cafe_smbus_shutdown(struct cafe_camera *cam)
+{
+ i2c_del_adapter(cam->mcam.i2c_adapter);
+ kfree(cam->mcam.i2c_adapter);
+}
+
+
+/*
+ * Controller-level stuff
+ */
+
+static void cafe_ctlr_init(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ /*
+ * Added magic to bring up the hardware on the B-Test board
+ */
+ mcam_reg_write(mcam, 0x3038, 0x8);
+ mcam_reg_write(mcam, 0x315c, 0x80008);
+ /*
+ * Go through the dance needed to wake the device up.
+ * Note that these registers are global and shared
+ * with the NAND and SD devices. Interaction between the
+ * three still needs to be examined.
+ */
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
+ /*
+ * Here we must wait a bit for the controller to come around.
+ */
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ msleep(5);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
+ mcam_reg_set_bit(mcam, REG_GL_IMASK, GIMSK_CCIC_EN);
+ /*
+ * Mask all interrupts.
+ */
+ mcam_reg_write(mcam, REG_IRQMASK, 0);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+}
+
+
+static int cafe_ctlr_power_up(struct mcam_camera *mcam)
+{
+ /*
+ * Part one of the sensor dance: turn the global
+ * GPIO signal on.
+ */
+ mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+ mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
+ /*
+ * Put the sensor into operational mode (assumes OLPC-style
+ * wiring). Control 0 is reset - set to 1 to operate.
+ * Control 1 is power down, set to 0 to operate.
+ */
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
+
+ return 0;
+}
+
+static void cafe_ctlr_power_down(struct mcam_camera *mcam)
+{
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
+ mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+ mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT);
+}
+
+
+
+/*
+ * The platform interrupt handler.
+ */
+static irqreturn_t cafe_irq(int irq, void *data)
+{
+ struct cafe_camera *cam = data;
+ struct mcam_camera *mcam = &cam->mcam;
+ unsigned int irqs, handled;
+
+ spin_lock(&mcam->dev_lock);
+ irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+ handled = cam->registered && mccic_irq(mcam, irqs);
+ if (irqs & TWSIIRQS) {
+ mcam_reg_write(mcam, REG_IRQSTAT, TWSIIRQS);
+ wake_up(&cam->smbus_wait);
+ handled = 1;
+ }
+ spin_unlock(&mcam->dev_lock);
+ return IRQ_RETVAL(handled);
+}
+
+
+/* -------------------------------------------------------------------------- */
+/*
+ * PCI interface stuff.
+ */
+
+static int cafe_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ struct cafe_camera *cam;
+ struct mcam_camera *mcam;
+
+ /*
+ * Start putting together one of our big camera structures.
+ */
+ ret = -ENOMEM;
+ cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
+ if (cam == NULL)
+ goto out;
+ cam->pdev = pdev;
+ mcam = &cam->mcam;
+ mcam->chip_id = MCAM_CAFE;
+ spin_lock_init(&mcam->dev_lock);
+ init_waitqueue_head(&cam->smbus_wait);
+ mcam->plat_power_up = cafe_ctlr_power_up;
+ mcam->plat_power_down = cafe_ctlr_power_down;
+ mcam->dev = &pdev->dev;
+ snprintf(mcam->bus_info, sizeof(mcam->bus_info), "PCI:%s", pci_name(pdev));
+ /*
+ * Set the clock speed for the XO 1; I don't believe this
+ * driver has ever run anywhere else.
+ */
+ mcam->clock_speed = 45;
+ mcam->use_smbus = 1;
+ /*
+ * Vmalloc mode for buffers is traditional with this driver.
+ * We *might* be able to run DMA_contig, especially on a system
+ * with CMA in it.
+ */
+ mcam->buffer_mode = B_vmalloc;
+ /*
+ * Get set up on the PCI bus.
+ */
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out_free;
+ pci_set_master(pdev);
+
+ ret = -EIO;
+ mcam->regs = pci_iomap(pdev, 0, 0);
+ if (!mcam->regs) {
+ printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
+ goto out_disable;
+ }
+ mcam->regs_size = pci_resource_len(pdev, 0);
+ ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
+ if (ret)
+ goto out_iounmap;
+
+ /*
+ * Initialize the controller and leave it powered up. It will
+ * stay that way until the sensor driver shows up.
+ */
+ cafe_ctlr_init(mcam);
+ cafe_ctlr_power_up(mcam);
+ /*
+ * Set up I2C/SMBUS communications. We have to drop the mutex here
+ * because the sensor could attach in this call chain, leading to
+ * unsightly deadlocks.
+ */
+ ret = cafe_smbus_setup(cam);
+ if (ret)
+ goto out_pdown;
+
+ ret = mccic_register(mcam);
+ if (ret == 0) {
+ cam->registered = 1;
+ return 0;
+ }
+
+ cafe_smbus_shutdown(cam);
+out_pdown:
+ cafe_ctlr_power_down(mcam);
+ free_irq(pdev->irq, cam);
+out_iounmap:
+ pci_iounmap(pdev, mcam->regs);
+out_disable:
+ pci_disable_device(pdev);
+out_free:
+ kfree(cam);
+out:
+ return ret;
+}
+
+
+/*
+ * Shut down an initialized device
+ */
+static void cafe_shutdown(struct cafe_camera *cam)
+{
+ mccic_shutdown(&cam->mcam);
+ cafe_smbus_shutdown(cam);
+ free_irq(cam->pdev->irq, cam);
+ pci_iounmap(cam->pdev, cam->mcam.regs);
+}
+
+
+static void cafe_pci_remove(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+
+ if (cam == NULL) {
+ printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
+ return;
+ }
+ cafe_shutdown(cam);
+ kfree(cam);
+}
+
+
+#ifdef CONFIG_PM
+/*
+ * Basic power management.
+ */
+static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+ mccic_suspend(&cam->mcam);
+ pci_disable_device(pdev);
+ return 0;
+}
+
+
+static int cafe_pci_resume(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+ int ret = 0;
+
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+
+ if (ret) {
+ cam_warn(cam, "Unable to re-enable device on resume!\n");
+ return ret;
+ }
+ cafe_ctlr_init(&cam->mcam);
+ return mccic_resume(&cam->mcam);
+}
+
+#endif /* CONFIG_PM */
+
+static const struct pci_device_id cafe_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
+ PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_ids);
+
+static struct pci_driver cafe_pci_driver = {
+ .name = "cafe1000-ccic",
+ .id_table = cafe_ids,
+ .probe = cafe_pci_probe,
+ .remove = cafe_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = cafe_pci_suspend,
+ .resume = cafe_pci_resume,
+#endif
+};
+
+
+
+
+static int __init cafe_init(void)
+{
+ int ret;
+
+ printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
+ CAFE_VERSION);
+ ret = pci_register_driver(&cafe_pci_driver);
+ if (ret) {
+ printk(KERN_ERR "Unable to register cafe_ccic driver\n");
+ goto out;
+ }
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+static void __exit cafe_exit(void)
+{
+ pci_unregister_driver(&cafe_pci_driver);
+}
+
+module_init(cafe_init);
+module_exit(cafe_exit);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
new file mode 100644
index 000000000..eeee15ff0
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -0,0 +1,1909 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The Marvell camera core. This device appears in a number of settings,
+ * so it needs platform-specific support outside of the core.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/i2c/ov7670.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "mcam-core.h"
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Internal DMA buffer management. Since the controller cannot do S/G I/O,
+ * we must have physically contiguous buffers to bring frames into.
+ * These parameters control how many buffers we use, whether we
+ * allocate them at load time (better chance of success, but nails down
+ * memory) or when somebody tries to use the camera (riskier), and,
+ * for load-time allocation, how big they should be.
+ *
+ * The controller can cycle through three buffers. We could use
+ * more by flipping pointers around, but it probably makes little
+ * sense.
+ */
+
+static bool alloc_bufs_at_read;
+module_param(alloc_bufs_at_read, bool, 0444);
+MODULE_PARM_DESC(alloc_bufs_at_read,
+ "Non-zero value causes DMA buffers to be allocated when the video capture device is read, rather than at module load time. This saves memory, but decreases the chances of successfully getting those buffers. This parameter is only used in the vmalloc buffer mode");
+
+static int n_dma_bufs = 3;
+module_param(n_dma_bufs, uint, 0644);
+MODULE_PARM_DESC(n_dma_bufs,
+ "The number of DMA buffers to allocate. Can be either two (saves memory, makes timing tighter) or three.");
+
+static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
+module_param(dma_buf_size, uint, 0444);
+MODULE_PARM_DESC(dma_buf_size,
+ "The size of the allocated DMA buffers. If actual operating parameters require larger buffers, an attempt to reallocate will be made.");
+#else /* MCAM_MODE_VMALLOC */
+static const bool alloc_bufs_at_read;
+static const int n_dma_bufs = 3; /* Used by S/G_PARM */
+#endif /* MCAM_MODE_VMALLOC */
+
+static bool flip;
+module_param(flip, bool, 0444);
+MODULE_PARM_DESC(flip,
+ "If set, the sensor will be instructed to flip the image vertically.");
+
+static int buffer_mode = -1;
+module_param(buffer_mode, int, 0444);
+MODULE_PARM_DESC(buffer_mode,
+ "Set the buffer mode to be used; default is to go with what the platform driver asks for. Set to 0 for vmalloc, 1 for DMA contiguous.");
+
+/*
+ * Status flags. Always manipulated with bit operations.
+ */
+#define CF_BUF0_VALID 0 /* Buffers valid - first three */
+#define CF_BUF1_VALID 1
+#define CF_BUF2_VALID 2
+#define CF_DMA_ACTIVE 3 /* A frame is incoming */
+#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
+#define CF_SINGLE_BUFFER 5 /* Running with a single buffer */
+#define CF_SG_RESTART 6 /* SG restart needed */
+#define CF_FRAME_SOF0 7 /* Frame 0 started */
+#define CF_FRAME_SOF1 8
+#define CF_FRAME_SOF2 9
+
+#define sensor_call(cam, o, f, args...) \
+ v4l2_subdev_call(cam->sensor, o, f, ##args)
+
+static struct mcam_format_struct {
+ __u8 *desc;
+ __u32 pixelformat;
+ int bpp; /* Bytes per pixel */
+ bool planar;
+ u32 mbus_code;
+} mcam_formats[] = {
+ {
+ .desc = "YUYV 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "YVYU 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YVYU,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "YUV 4:2:0 PLANAR",
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 1,
+ .planar = true,
+ },
+ {
+ .desc = "YVU 4:2:0 PLANAR",
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 1,
+ .planar = true,
+ },
+ {
+ .desc = "XRGB 444",
+ .pixelformat = V4L2_PIX_FMT_XRGB444,
+ .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "RGB 565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "Raw RGB Bayer",
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .bpp = 1,
+ .planar = false,
+ },
+};
+#define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
+
+static struct mcam_format_struct *mcam_find_format(u32 pixelformat)
+{
+ unsigned i;
+
+ for (i = 0; i < N_MCAM_FMTS; i++)
+ if (mcam_formats[i].pixelformat == pixelformat)
+ return mcam_formats + i;
+ /* Not found? Then return the first format. */
+ return mcam_formats;
+}
+
+/*
+ * The default format we use until somebody says otherwise.
+ */
+static const struct v4l2_pix_format mcam_def_pix_format = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = VGA_WIDTH*2,
+ .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+};
+
+static const u32 mcam_def_mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+
+
+/*
+ * The two-word DMA descriptor format used by the Armada 610 and like. There
+ * Is a three-word format as well (set C1_DESC_3WORD) where the third
+ * word is a pointer to the next descriptor, but we don't use it. Two-word
+ * descriptors have to be contiguous in memory.
+ */
+struct mcam_dma_desc {
+ u32 dma_addr;
+ u32 segment_len;
+};
+
+/*
+ * Our buffer type for working with videobuf2. Note that the vb2
+ * developers have decreed that struct vb2_v4l2_buffer must be at the
+ * beginning of this structure.
+ */
+struct mcam_vb_buffer {
+ struct vb2_v4l2_buffer vb_buf;
+ struct list_head queue;
+ struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
+ dma_addr_t dma_desc_pa; /* Descriptor physical address */
+};
+
+static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct mcam_vb_buffer, vb_buf);
+}
+
+/*
+ * Hand a completed buffer back to user space.
+ */
+static void mcam_buffer_done(struct mcam_camera *cam, int frame,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ vbuf->vb2_buf.planes[0].bytesused = cam->pix_format.sizeimage;
+ vbuf->sequence = cam->buf_seq[frame];
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, cam->pix_format.sizeimage);
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err((cam)->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn((cam)->dev, fmt, ##arg);
+#define cam_dbg(cam, fmt, arg...) \
+ dev_dbg((cam)->dev, fmt, ##arg);
+
+
+/*
+ * Flag manipulation helpers
+ */
+static void mcam_reset_buffers(struct mcam_camera *cam)
+{
+ int i;
+
+ cam->next_buf = -1;
+ for (i = 0; i < cam->nbufs; i++) {
+ clear_bit(i, &cam->flags);
+ clear_bit(CF_FRAME_SOF0 + i, &cam->flags);
+ }
+}
+
+static inline int mcam_needs_config(struct mcam_camera *cam)
+{
+ return test_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+static void mcam_set_config_needed(struct mcam_camera *cam, int needed)
+{
+ if (needed)
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ else
+ clear_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+/* ------------------------------------------------------------------- */
+/*
+ * Make the controller start grabbing images. Everything must
+ * be set up before doing this.
+ */
+static void mcam_ctlr_start(struct mcam_camera *cam)
+{
+ /* set_bit performs a read, so no other barrier should be
+ needed here */
+ mcam_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+static void mcam_ctlr_stop(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+static void mcam_enable_mipi(struct mcam_camera *mcam)
+{
+ /* Using MIPI mode and enable MIPI */
+ cam_dbg(mcam, "camera: DPHY3=0x%x, DPHY5=0x%x, DPHY6=0x%x\n",
+ mcam->dphy[0], mcam->dphy[1], mcam->dphy[2]);
+ mcam_reg_write(mcam, REG_CSI2_DPHY3, mcam->dphy[0]);
+ mcam_reg_write(mcam, REG_CSI2_DPHY5, mcam->dphy[1]);
+ mcam_reg_write(mcam, REG_CSI2_DPHY6, mcam->dphy[2]);
+
+ if (!mcam->mipi_enabled) {
+ if (mcam->lane > 4 || mcam->lane <= 0) {
+ cam_warn(mcam, "lane number error\n");
+ mcam->lane = 1; /* set the default value */
+ }
+ /*
+ * 0x41 actives 1 lane
+ * 0x43 actives 2 lanes
+ * 0x45 actives 3 lanes (never happen)
+ * 0x47 actives 4 lanes
+ */
+ mcam_reg_write(mcam, REG_CSI2_CTRL0,
+ CSI2_C0_MIPI_EN | CSI2_C0_ACT_LANE(mcam->lane));
+ mcam_reg_write(mcam, REG_CLKCTRL,
+ (mcam->mclk_src << 29) | mcam->mclk_div);
+
+ mcam->mipi_enabled = true;
+ }
+}
+
+static void mcam_disable_mipi(struct mcam_camera *mcam)
+{
+ /* Using Parallel mode or disable MIPI */
+ mcam_reg_write(mcam, REG_CSI2_CTRL0, 0x0);
+ mcam_reg_write(mcam, REG_CSI2_DPHY3, 0x0);
+ mcam_reg_write(mcam, REG_CSI2_DPHY5, 0x0);
+ mcam_reg_write(mcam, REG_CSI2_DPHY6, 0x0);
+ mcam->mipi_enabled = false;
+}
+
+static bool mcam_fmt_is_planar(__u32 pfmt)
+{
+ struct mcam_format_struct *f;
+
+ f = mcam_find_format(pfmt);
+ return f->planar;
+}
+
+static void mcam_write_yuv_bases(struct mcam_camera *cam,
+ unsigned frame, dma_addr_t base)
+{
+ struct v4l2_pix_format *fmt = &cam->pix_format;
+ u32 pixel_count = fmt->width * fmt->height;
+ dma_addr_t y, u = 0, v = 0;
+
+ y = base;
+
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ u = y + pixel_count;
+ v = u + pixel_count / 4;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ v = y + pixel_count;
+ u = v + pixel_count / 4;
+ break;
+ default:
+ break;
+ }
+
+ mcam_reg_write(cam, REG_Y0BAR + frame * 4, y);
+ if (mcam_fmt_is_planar(fmt->pixelformat)) {
+ mcam_reg_write(cam, REG_U0BAR + frame * 4, u);
+ mcam_reg_write(cam, REG_V0BAR + frame * 4, v);
+ }
+}
+
+/* ------------------------------------------------------------------- */
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Code specific to the vmalloc buffer mode.
+ */
+
+/*
+ * Allocate in-kernel DMA buffers for vmalloc mode.
+ */
+static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+ int i;
+
+ mcam_set_config_needed(cam, 1);
+ if (loadtime)
+ cam->dma_buf_size = dma_buf_size;
+ else
+ cam->dma_buf_size = cam->pix_format.sizeimage;
+ if (n_dma_bufs > 3)
+ n_dma_bufs = 3;
+
+ cam->nbufs = 0;
+ for (i = 0; i < n_dma_bufs; i++) {
+ cam->dma_bufs[i] = dma_alloc_coherent(cam->dev,
+ cam->dma_buf_size, cam->dma_handles + i,
+ GFP_KERNEL);
+ if (cam->dma_bufs[i] == NULL) {
+ cam_warn(cam, "Failed to allocate DMA buffer\n");
+ break;
+ }
+ (cam->nbufs)++;
+ }
+
+ switch (cam->nbufs) {
+ case 1:
+ dma_free_coherent(cam->dev, cam->dma_buf_size,
+ cam->dma_bufs[0], cam->dma_handles[0]);
+ cam->nbufs = 0;
+ /* fall-through */
+ case 0:
+ cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
+ return -ENOMEM;
+
+ case 2:
+ if (n_dma_bufs > 2)
+ cam_warn(cam, "Will limp along with only 2 buffers\n");
+ break;
+ }
+ return 0;
+}
+
+static void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+ int i;
+
+ for (i = 0; i < cam->nbufs; i++) {
+ dma_free_coherent(cam->dev, cam->dma_buf_size,
+ cam->dma_bufs[i], cam->dma_handles[i]);
+ cam->dma_bufs[i] = NULL;
+ }
+ cam->nbufs = 0;
+}
+
+
+/*
+ * Set up DMA buffers when operating in vmalloc mode
+ */
+static void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam)
+{
+ /*
+ * Store the first two YUV buffers. Then either
+ * set the third if it exists, or tell the controller
+ * to just use two.
+ */
+ mcam_write_yuv_bases(cam, 0, cam->dma_handles[0]);
+ mcam_write_yuv_bases(cam, 1, cam->dma_handles[1]);
+ if (cam->nbufs > 2) {
+ mcam_write_yuv_bases(cam, 2, cam->dma_handles[2]);
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ } else
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ if (cam->chip_id == MCAM_CAFE)
+ mcam_reg_write(cam, REG_UBAR, 0); /* 32 bits only */
+}
+
+/*
+ * Copy data out to user space in the vmalloc case
+ */
+static void mcam_frame_tasklet(unsigned long data)
+{
+ struct mcam_camera *cam = (struct mcam_camera *) data;
+ int i;
+ unsigned long flags;
+ struct mcam_vb_buffer *buf;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ for (i = 0; i < cam->nbufs; i++) {
+ int bufno = cam->next_buf;
+
+ if (cam->state != S_STREAMING || bufno < 0)
+ break; /* I/O got stopped */
+ if (++(cam->next_buf) >= cam->nbufs)
+ cam->next_buf = 0;
+ if (!test_bit(bufno, &cam->flags))
+ continue;
+ if (list_empty(&cam->buffers)) {
+ cam->frame_state.singles++;
+ break; /* Leave it valid, hope for better later */
+ }
+ cam->frame_state.delivered++;
+ clear_bit(bufno, &cam->flags);
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
+ queue);
+ list_del_init(&buf->queue);
+ /*
+ * Drop the lock during the big copy. This *should* be safe...
+ */
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ memcpy(vb2_plane_vaddr(&buf->vb_buf.vb2_buf, 0),
+ cam->dma_bufs[bufno],
+ cam->pix_format.sizeimage);
+ mcam_buffer_done(cam, bufno, &buf->vb_buf);
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ }
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Make sure our allocated buffers are up to the task.
+ */
+static int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+ if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
+ mcam_free_dma_bufs(cam);
+ if (cam->nbufs == 0)
+ return mcam_alloc_dma_bufs(cam, 0);
+ return 0;
+}
+
+static void mcam_vmalloc_done(struct mcam_camera *cam, int frame)
+{
+ tasklet_schedule(&cam->s_tasklet);
+}
+
+#else /* MCAM_MODE_VMALLOC */
+
+static inline int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+ return 0;
+}
+
+static inline void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+ return;
+}
+
+static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+ return 0;
+}
+
+
+
+#endif /* MCAM_MODE_VMALLOC */
+
+
+#ifdef MCAM_MODE_DMA_CONTIG
+/* ---------------------------------------------------------------------- */
+/*
+ * DMA-contiguous code.
+ */
+
+/*
+ * Set up a contiguous buffer for the given frame. Here also is where
+ * the underrun strategy is set: if there is no buffer available, reuse
+ * the buffer from the other BAR and set the CF_SINGLE_BUFFER flag to
+ * keep the interrupt handler from giving that buffer back to user
+ * space. In this way, we always have a buffer to DMA to and don't
+ * have to try to play games stopping and restarting the controller.
+ */
+static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf;
+ dma_addr_t dma_handle;
+ struct vb2_v4l2_buffer *vb;
+
+ /*
+ * If there are no available buffers, go into single mode
+ */
+ if (list_empty(&cam->buffers)) {
+ buf = cam->vb_bufs[frame ^ 0x1];
+ set_bit(CF_SINGLE_BUFFER, &cam->flags);
+ cam->frame_state.singles++;
+ } else {
+ /*
+ * OK, we have a buffer we can use.
+ */
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
+ queue);
+ list_del_init(&buf->queue);
+ clear_bit(CF_SINGLE_BUFFER, &cam->flags);
+ }
+
+ cam->vb_bufs[frame] = buf;
+ vb = &buf->vb_buf;
+
+ dma_handle = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
+ mcam_write_yuv_bases(cam, frame, dma_handle);
+}
+
+/*
+ * Initial B_DMA_contig setup.
+ */
+static void mcam_ctlr_dma_contig(struct mcam_camera *cam)
+{
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ cam->nbufs = 2;
+ mcam_set_contig_buffer(cam, 0);
+ mcam_set_contig_buffer(cam, 1);
+}
+
+/*
+ * Frame completion handling.
+ */
+static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
+
+ if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
+ cam->frame_state.delivered++;
+ cam->vb_bufs[frame] = NULL;
+ mcam_buffer_done(cam, frame, &buf->vb_buf);
+ }
+ mcam_set_contig_buffer(cam, frame);
+}
+
+#endif /* MCAM_MODE_DMA_CONTIG */
+
+#ifdef MCAM_MODE_DMA_SG
+/* ---------------------------------------------------------------------- */
+/*
+ * Scatter/gather-specific code.
+ */
+
+/*
+ * Set up the next buffer for S/G I/O; caller should be sure that
+ * the controller is stopped and a buffer is available.
+ */
+static void mcam_sg_next_buffer(struct mcam_camera *cam)
+{
+ struct mcam_vb_buffer *buf;
+ struct sg_table *sg_table;
+
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
+ list_del_init(&buf->queue);
+ sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
+ /*
+ * Very Bad Not Good Things happen if you don't clear
+ * C1_DESC_ENA before making any descriptor changes.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
+ mcam_reg_write(cam, REG_DESC_LEN_Y,
+ sg_table->nents * sizeof(struct mcam_dma_desc));
+ mcam_reg_write(cam, REG_DESC_LEN_U, 0);
+ mcam_reg_write(cam, REG_DESC_LEN_V, 0);
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ cam->vb_bufs[0] = buf;
+}
+
+/*
+ * Initial B_DMA_sg setup
+ */
+static void mcam_ctlr_dma_sg(struct mcam_camera *cam)
+{
+ /*
+ * The list-empty condition can hit us at resume time
+ * if the buffer list was empty when the system was suspended.
+ */
+ if (list_empty(&cam->buffers)) {
+ set_bit(CF_SG_RESTART, &cam->flags);
+ return;
+ }
+
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_3WORD);
+ mcam_sg_next_buffer(cam);
+ cam->nbufs = 3;
+}
+
+
+/*
+ * Frame completion with S/G is trickier. We can't muck with
+ * a descriptor chain on the fly, since the controller buffers it
+ * internally. So we have to actually stop and restart; Marvell
+ * says this is the way to do it.
+ *
+ * Of course, stopping is easier said than done; experience shows
+ * that the controller can start a frame *after* C0_ENABLE has been
+ * cleared. So when running in S/G mode, the controller is "stopped"
+ * on receipt of the start-of-frame interrupt. That means we can
+ * safely change the DMA descriptor array here and restart things
+ * (assuming there's another buffer waiting to go).
+ */
+static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf = cam->vb_bufs[0];
+
+ /*
+ * If we're no longer supposed to be streaming, don't do anything.
+ */
+ if (cam->state != S_STREAMING)
+ return;
+ /*
+ * If we have another buffer available, put it in and
+ * restart the engine.
+ */
+ if (!list_empty(&cam->buffers)) {
+ mcam_sg_next_buffer(cam);
+ mcam_ctlr_start(cam);
+ /*
+ * Otherwise set CF_SG_RESTART and the controller will
+ * be restarted once another buffer shows up.
+ */
+ } else {
+ set_bit(CF_SG_RESTART, &cam->flags);
+ cam->frame_state.singles++;
+ cam->vb_bufs[0] = NULL;
+ }
+ /*
+ * Now we can give the completed frame back to user space.
+ */
+ cam->frame_state.delivered++;
+ mcam_buffer_done(cam, frame, &buf->vb_buf);
+}
+
+
+/*
+ * Scatter/gather mode requires stopping the controller between
+ * frames so we can put in a new DMA descriptor array. If no new
+ * buffer exists at frame completion, the controller is left stopped;
+ * this function is charged with gettig things going again.
+ */
+static void mcam_sg_restart(struct mcam_camera *cam)
+{
+ mcam_ctlr_dma_sg(cam);
+ mcam_ctlr_start(cam);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+}
+
+#else /* MCAM_MODE_DMA_SG */
+
+static inline void mcam_sg_restart(struct mcam_camera *cam)
+{
+ return;
+}
+
+#endif /* MCAM_MODE_DMA_SG */
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Buffer-mode-independent controller code.
+ */
+
+/*
+ * Image format setup
+ */
+static void mcam_ctlr_image(struct mcam_camera *cam)
+{
+ struct v4l2_pix_format *fmt = &cam->pix_format;
+ u32 widthy = 0, widthuv = 0, imgsz_h, imgsz_w;
+
+ cam_dbg(cam, "camera: bytesperline = %d; height = %d\n",
+ fmt->bytesperline, fmt->sizeimage / fmt->bytesperline);
+ imgsz_h = (fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK;
+ imgsz_w = (fmt->width * 2) & IMGSZ_H_MASK;
+
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ widthy = fmt->width * 2;
+ widthuv = 0;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ widthy = fmt->width;
+ widthuv = fmt->width / 2;
+ break;
+ default:
+ widthy = fmt->bytesperline;
+ widthuv = 0;
+ break;
+ }
+
+ mcam_reg_write_mask(cam, REG_IMGPITCH, widthuv << 16 | widthy,
+ IMGP_YP_MASK | IMGP_UVP_MASK);
+ mcam_reg_write(cam, REG_IMGSIZE, imgsz_h | imgsz_w);
+ mcam_reg_write(cam, REG_IMGOFFSET, 0x0);
+
+ /*
+ * Tell the controller about the image format we are using.
+ */
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_XRGB444:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB | C0_RGBF_444 | C0_RGB4_XBGR, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB | C0_RGBF_565 | C0_RGB5_BGGR, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_SBGGR8:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB | C0_RGB5_GRBG, C0_DF_MASK);
+ break;
+ default:
+ cam_err(cam, "camera: unknown format: %#x\n", fmt->pixelformat);
+ break;
+ }
+
+ /*
+ * Make sure it knows we want to use hsync/vsync.
+ */
+ mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC, C0_SIFM_MASK);
+ /*
+ * This field controls the generation of EOF(DVP only)
+ */
+ if (cam->bus_type != V4L2_MBUS_CSI2)
+ mcam_reg_set_bit(cam, REG_CTRL0,
+ C0_EOF_VSYNC | C0_VEDGE_CTRL);
+}
+
+
+/*
+ * Configure the controller for operation; caller holds the
+ * device mutex.
+ */
+static int mcam_ctlr_configure(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+ cam->dma_setup(cam);
+ mcam_ctlr_image(cam);
+ mcam_set_config_needed(cam, 0);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+static void mcam_ctlr_irq_enable(struct mcam_camera *cam)
+{
+ /*
+ * Clear any pending interrupts, since we do not
+ * expect to have I/O active prior to enabling.
+ */
+ mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
+ mcam_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+static void mcam_ctlr_irq_disable(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+
+
+static void mcam_ctlr_init(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ /*
+ * Make sure it's not powered down.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+ /*
+ * Turn off the enable bit. It sure should be off anyway,
+ * but it's good to be sure.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+ /*
+ * Clock the sensor appropriately. Controller clock should
+ * be 48MHz, sensor "typical" value is half that.
+ */
+ mcam_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Stop the controller, and don't return until we're really sure that no
+ * further DMA is going on.
+ */
+static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ /*
+ * Theory: stop the camera controller (whether it is operating
+ * or not). Delay briefly just in case we race with the SOF
+ * interrupt, then wait until no DMA is active.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+ mcam_ctlr_stop(cam);
+ cam->state = S_IDLE;
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ /*
+ * This is a brutally long sleep, but experience shows that
+ * it can take the controller a while to get the message that
+ * it needs to stop grabbing frames. In particular, we can
+ * sometimes (on mmp) get a frame at the end WITHOUT the
+ * start-of-frame indication.
+ */
+ msleep(150);
+ if (test_bit(CF_DMA_ACTIVE, &cam->flags))
+ cam_err(cam, "Timeout waiting for DMA to end\n");
+ /* This would be bad news - what now? */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ mcam_ctlr_irq_disable(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/*
+ * Power up and down.
+ */
+static int mcam_ctlr_power_up(struct mcam_camera *cam)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ ret = cam->plat_power_up(cam);
+ if (ret) {
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return ret;
+ }
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ msleep(5); /* Just to be sure */
+ return 0;
+}
+
+static void mcam_ctlr_power_down(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ /*
+ * School of hard knocks department: be sure we do any register
+ * twiddling on the controller *before* calling the platform
+ * power down routine.
+ */
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
+ cam->plat_power_down(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/* -------------------------------------------------------------------- */
+/*
+ * Communications with the sensor.
+ */
+
+static int __mcam_cam_reset(struct mcam_camera *cam)
+{
+ return sensor_call(cam, core, reset, 0);
+}
+
+/*
+ * We have found the sensor on the i2c. Let's try to have a
+ * conversation.
+ */
+static int mcam_cam_init(struct mcam_camera *cam)
+{
+ int ret;
+
+ if (cam->state != S_NOTREADY)
+ cam_warn(cam, "Cam init with device in funky state %d",
+ cam->state);
+ ret = __mcam_cam_reset(cam);
+ /* Get/set parameters? */
+ cam->state = S_IDLE;
+ mcam_ctlr_power_down(cam);
+ return ret;
+}
+
+/*
+ * Configure the sensor to match the parameters we have. Caller should
+ * hold s_mutex
+ */
+static int mcam_cam_set_flip(struct mcam_camera *cam)
+{
+ struct v4l2_control ctrl;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_VFLIP;
+ ctrl.value = flip;
+ return v4l2_s_ctrl(NULL, cam->sensor->ctrl_handler, &ctrl);
+}
+
+
+static int mcam_cam_configure(struct mcam_camera *cam)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ v4l2_fill_mbus_format(&format.format, &cam->pix_format, cam->mbus_code);
+ ret = sensor_call(cam, core, init, 0);
+ if (ret == 0)
+ ret = sensor_call(cam, pad, set_fmt, NULL, &format);
+ /*
+ * OV7670 does weird things if flip is set *before* format...
+ */
+ ret += mcam_cam_set_flip(cam);
+ return ret;
+}
+
+/*
+ * Get everything ready, and start grabbing frames.
+ */
+static int mcam_read_setup(struct mcam_camera *cam)
+{
+ int ret;
+ unsigned long flags;
+
+ /*
+ * Configuration. If we still don't have DMA buffers,
+ * make one last, desperate attempt.
+ */
+ if (cam->buffer_mode == B_vmalloc && cam->nbufs == 0 &&
+ mcam_alloc_dma_bufs(cam, 0))
+ return -ENOMEM;
+
+ if (mcam_needs_config(cam)) {
+ mcam_cam_configure(cam);
+ ret = mcam_ctlr_configure(cam);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Turn it loose.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ clear_bit(CF_DMA_ACTIVE, &cam->flags);
+ mcam_reset_buffers(cam);
+ /*
+ * Update CSI2_DPHY value
+ */
+ if (cam->calc_dphy)
+ cam->calc_dphy(cam);
+ cam_dbg(cam, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
+ cam->dphy[0], cam->dphy[1], cam->dphy[2]);
+ if (cam->bus_type == V4L2_MBUS_CSI2)
+ mcam_enable_mipi(cam);
+ else
+ mcam_disable_mipi(cam);
+ mcam_ctlr_irq_enable(cam);
+ cam->state = S_STREAMING;
+ if (!test_bit(CF_SG_RESTART, &cam->flags))
+ mcam_ctlr_start(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+/*
+ * Videobuf2 interface code.
+ */
+
+static int mcam_vb_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbufs,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
+ unsigned size = cam->pix_format.sizeimage;
+
+ if (*nbufs < minbufs)
+ *nbufs = minbufs;
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+ sizes[0] = size;
+ *num_planes = 1; /* Someday we have to support planar formats... */
+ return 0;
+}
+
+
+static void mcam_vb_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+ int start;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ start = (cam->state == S_BUFWAIT) && !list_empty(&cam->buffers);
+ list_add(&mvb->queue, &cam->buffers);
+ if (cam->state == S_STREAMING && test_bit(CF_SG_RESTART, &cam->flags))
+ mcam_sg_restart(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ if (start)
+ mcam_read_setup(cam);
+}
+
+static void mcam_vb_requeue_bufs(struct vb2_queue *vq,
+ enum vb2_buffer_state state)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ struct mcam_vb_buffer *buf, *node;
+ unsigned long flags;
+ unsigned i;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ list_for_each_entry_safe(buf, node, &cam->buffers, queue) {
+ vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+ for (i = 0; i < MAX_DMA_BUFS; i++) {
+ buf = cam->vb_bufs[i];
+
+ if (buf) {
+ vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
+ cam->vb_bufs[i] = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/*
+ * These need to be called with the mutex held from vb2
+ */
+static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ unsigned int frame;
+ int ret;
+
+ if (cam->state != S_IDLE) {
+ mcam_vb_requeue_bufs(vq, VB2_BUF_STATE_QUEUED);
+ return -EINVAL;
+ }
+ cam->frame_state.frames = 0;
+ cam->frame_state.singles = 0;
+ cam->frame_state.delivered = 0;
+ cam->sequence = 0;
+ /*
+ * Videobuf2 sneakily hoards all the buffers and won't
+ * give them to us until *after* streaming starts. But
+ * we can't actually start streaming until we have a
+ * destination. So go into a wait state and hope they
+ * give us buffers soon.
+ */
+ if (cam->buffer_mode != B_vmalloc && list_empty(&cam->buffers)) {
+ cam->state = S_BUFWAIT;
+ return 0;
+ }
+
+ /*
+ * Ensure clear the left over frame flags
+ * before every really start streaming
+ */
+ for (frame = 0; frame < cam->nbufs; frame++)
+ clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+
+ ret = mcam_read_setup(cam);
+ if (ret)
+ mcam_vb_requeue_bufs(vq, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void mcam_vb_stop_streaming(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+ cam_dbg(cam, "stop_streaming: %d frames, %d singles, %d delivered\n",
+ cam->frame_state.frames, cam->frame_state.singles,
+ cam->frame_state.delivered);
+ if (cam->state == S_BUFWAIT) {
+ /* They never gave us buffers */
+ cam->state = S_IDLE;
+ return;
+ }
+ if (cam->state != S_STREAMING)
+ return;
+ mcam_ctlr_stop_dma(cam);
+ /*
+ * Reset the CCIC PHY after stopping streaming,
+ * otherwise, the CCIC may be unstable.
+ */
+ if (cam->ctlr_reset)
+ cam->ctlr_reset(cam);
+ /*
+ * VB2 reclaims the buffers, so we need to forget
+ * about them.
+ */
+ mcam_vb_requeue_bufs(vq, VB2_BUF_STATE_ERROR);
+}
+
+
+static const struct vb2_ops mcam_vb2_ops = {
+ .queue_setup = mcam_vb_queue_setup,
+ .buf_queue = mcam_vb_buf_queue,
+ .start_streaming = mcam_vb_start_streaming,
+ .stop_streaming = mcam_vb_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+
+#ifdef MCAM_MODE_DMA_SG
+/*
+ * Scatter/gather mode uses all of the above functions plus a
+ * few extras to deal with DMA mapping.
+ */
+static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+ mvb->dma_desc = dma_alloc_coherent(cam->dev,
+ ndesc * sizeof(struct mcam_dma_desc),
+ &mvb->dma_desc_pa, GFP_KERNEL);
+ if (mvb->dma_desc == NULL) {
+ cam_err(cam, "Unable to get DMA descriptor array\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
+ struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
+ struct mcam_dma_desc *desc = mvb->dma_desc;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
+ desc->dma_addr = sg_dma_address(sg);
+ desc->segment_len = sg_dma_len(sg);
+ desc++;
+ }
+ return 0;
+}
+
+static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
+ int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+ dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
+ mvb->dma_desc, mvb->dma_desc_pa);
+}
+
+
+static const struct vb2_ops mcam_vb2_sg_ops = {
+ .queue_setup = mcam_vb_queue_setup,
+ .buf_init = mcam_vb_sg_buf_init,
+ .buf_prepare = mcam_vb_sg_buf_prepare,
+ .buf_queue = mcam_vb_buf_queue,
+ .buf_cleanup = mcam_vb_sg_buf_cleanup,
+ .start_streaming = mcam_vb_start_streaming,
+ .stop_streaming = mcam_vb_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+#endif /* MCAM_MODE_DMA_SG */
+
+static int mcam_setup_vb2(struct mcam_camera *cam)
+{
+ struct vb2_queue *vq = &cam->vb_queue;
+
+ memset(vq, 0, sizeof(*vq));
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->drv_priv = cam;
+ vq->lock = &cam->s_mutex;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
+ vq->dev = cam->dev;
+ INIT_LIST_HEAD(&cam->buffers);
+ switch (cam->buffer_mode) {
+ case B_DMA_contig:
+#ifdef MCAM_MODE_DMA_CONTIG
+ vq->ops = &mcam_vb2_ops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ cam->dma_setup = mcam_ctlr_dma_contig;
+ cam->frame_complete = mcam_dma_contig_done;
+#endif
+ break;
+ case B_DMA_sg:
+#ifdef MCAM_MODE_DMA_SG
+ vq->ops = &mcam_vb2_sg_ops;
+ vq->mem_ops = &vb2_dma_sg_memops;
+ cam->dma_setup = mcam_ctlr_dma_sg;
+ cam->frame_complete = mcam_dma_sg_done;
+#endif
+ break;
+ case B_vmalloc:
+#ifdef MCAM_MODE_VMALLOC
+ tasklet_init(&cam->s_tasklet, mcam_frame_tasklet,
+ (unsigned long) cam);
+ vq->ops = &mcam_vb2_ops;
+ vq->mem_ops = &vb2_vmalloc_memops;
+ cam->dma_setup = mcam_ctlr_dma_vmalloc;
+ cam->frame_complete = mcam_vmalloc_done;
+#endif
+ break;
+ }
+ return vb2_queue_init(vq);
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*
+ * The long list of V4L2 ioctl() operations.
+ */
+
+static int mcam_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct mcam_camera *cam = video_drvdata(file);
+
+ strcpy(cap->driver, "marvell_ccic");
+ strcpy(cap->card, "marvell_ccic");
+ strlcpy(cap->bus_info, cam->bus_info, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+
+static int mcam_vidioc_enum_fmt_vid_cap(struct file *filp,
+ void *priv, struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index >= N_MCAM_FMTS)
+ return -EINVAL;
+ strlcpy(fmt->description, mcam_formats[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = mcam_formats[fmt->index].pixelformat;
+ return 0;
+}
+
+static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ struct mcam_format_struct *f;
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ int ret;
+
+ f = mcam_find_format(pix->pixelformat);
+ pix->pixelformat = f->pixelformat;
+ v4l2_fill_mbus_format(&format.format, pix, f->mbus_code);
+ ret = sensor_call(cam, pad, set_fmt, &pad_cfg, &format);
+ v4l2_fill_pix_format(pix, &format.format);
+ pix->bytesperline = pix->width * f->bpp;
+ switch (f->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ pix->sizeimage = pix->height * pix->bytesperline * 3 / 2;
+ break;
+ default:
+ pix->sizeimage = pix->height * pix->bytesperline;
+ break;
+ }
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ return ret;
+}
+
+static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ struct mcam_format_struct *f;
+ int ret;
+
+ /*
+ * Can't do anything if the device is not idle
+ * Also can't if there are streaming buffers in place.
+ */
+ if (cam->state != S_IDLE || vb2_is_busy(&cam->vb_queue))
+ return -EBUSY;
+
+ f = mcam_find_format(fmt->fmt.pix.pixelformat);
+
+ /*
+ * See if the formatting works in principle.
+ */
+ ret = mcam_vidioc_try_fmt_vid_cap(filp, priv, fmt);
+ if (ret)
+ return ret;
+ /*
+ * Now we start to change things for real, so let's do it
+ * under lock.
+ */
+ cam->pix_format = fmt->fmt.pix;
+ cam->mbus_code = f->mbus_code;
+
+ /*
+ * Make sure we have appropriate DMA buffers.
+ */
+ if (cam->buffer_mode == B_vmalloc) {
+ ret = mcam_check_dma_buffers(cam);
+ if (ret)
+ goto out;
+ }
+ mcam_set_config_needed(cam, 1);
+out:
+ return ret;
+}
+
+/*
+ * Return our stored notion of how the camera is/should be configured.
+ * The V4l2 spec wants us to be smarter, and actually get this from
+ * the camera (and not mess with it at open time). Someday.
+ */
+static int mcam_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+
+ f->fmt.pix = cam->pix_format;
+ return 0;
+}
+
+/*
+ * We only have one input - the sensor - so minimize the nonsense here.
+ */
+static int mcam_vidioc_enum_input(struct file *filp, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index != 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ strcpy(input->name, "Camera");
+ return 0;
+}
+
+static int mcam_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int mcam_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * G/S_PARM. Most of this is done by the sensor, but we are
+ * the level which controls the number of read buffers.
+ */
+static int mcam_vidioc_g_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *a)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ int ret;
+
+ ret = v4l2_g_parm_cap(video_devdata(filp), cam->sensor, a);
+ a->parm.capture.readbuffers = n_dma_bufs;
+ return ret;
+}
+
+static int mcam_vidioc_s_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *a)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ int ret;
+
+ ret = v4l2_s_parm_cap(video_devdata(filp), cam->sensor, a);
+ a->parm.capture.readbuffers = n_dma_bufs;
+ return ret;
+}
+
+static int mcam_vidioc_enum_framesizes(struct file *filp, void *priv,
+ struct v4l2_frmsizeenum *sizes)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ struct mcam_format_struct *f;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = sizes->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ f = mcam_find_format(sizes->pixel_format);
+ if (f->pixelformat != sizes->pixel_format)
+ return -EINVAL;
+ fse.code = f->mbus_code;
+ ret = sensor_call(cam, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+ if (fse.min_width == fse.max_width &&
+ fse.min_height == fse.max_height) {
+ sizes->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ sizes->discrete.width = fse.min_width;
+ sizes->discrete.height = fse.min_height;
+ return 0;
+ }
+ sizes->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ sizes->stepwise.min_width = fse.min_width;
+ sizes->stepwise.max_width = fse.max_width;
+ sizes->stepwise.min_height = fse.min_height;
+ sizes->stepwise.max_height = fse.max_height;
+ sizes->stepwise.step_width = 1;
+ sizes->stepwise.step_height = 1;
+ return 0;
+}
+
+static int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *interval)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ struct mcam_format_struct *f;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = interval->index,
+ .width = interval->width,
+ .height = interval->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ f = mcam_find_format(interval->pixel_format);
+ if (f->pixelformat != interval->pixel_format)
+ return -EINVAL;
+ fie.code = f->mbus_code;
+ ret = sensor_call(cam, pad, enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+ interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ interval->discrete = fie.interval;
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mcam_vidioc_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct mcam_camera *cam = video_drvdata(file);
+
+ if (reg->reg > cam->regs_size - 4)
+ return -EINVAL;
+ reg->val = mcam_reg_read(cam, reg->reg);
+ reg->size = 4;
+ return 0;
+}
+
+static int mcam_vidioc_s_register(struct file *file, void *priv,
+ const struct v4l2_dbg_register *reg)
+{
+ struct mcam_camera *cam = video_drvdata(file);
+
+ if (reg->reg > cam->regs_size - 4)
+ return -EINVAL;
+ mcam_reg_write(cam, reg->reg, reg->val);
+ return 0;
+}
+#endif
+
+static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
+ .vidioc_querycap = mcam_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = mcam_vidioc_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = mcam_vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = mcam_vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = mcam_vidioc_g_fmt_vid_cap,
+ .vidioc_enum_input = mcam_vidioc_enum_input,
+ .vidioc_g_input = mcam_vidioc_g_input,
+ .vidioc_s_input = mcam_vidioc_s_input,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_g_parm = mcam_vidioc_g_parm,
+ .vidioc_s_parm = mcam_vidioc_s_parm,
+ .vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = mcam_vidioc_enum_frameintervals,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = mcam_vidioc_g_register,
+ .vidioc_s_register = mcam_vidioc_s_register,
+#endif
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Our various file operations.
+ */
+static int mcam_v4l_open(struct file *filp)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = v4l2_fh_open(filp);
+ if (ret)
+ goto out;
+ if (v4l2_fh_is_singular_file(filp)) {
+ ret = mcam_ctlr_power_up(cam);
+ if (ret)
+ goto out;
+ __mcam_cam_reset(cam);
+ mcam_set_config_needed(cam, 1);
+ }
+out:
+ mutex_unlock(&cam->s_mutex);
+ if (ret)
+ v4l2_fh_release(filp);
+ return ret;
+}
+
+
+static int mcam_v4l_release(struct file *filp)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ bool last_open;
+
+ mutex_lock(&cam->s_mutex);
+ last_open = v4l2_fh_is_singular_file(filp);
+ _vb2_fop_release(filp, NULL);
+ if (last_open) {
+ mcam_disable_mipi(cam);
+ mcam_ctlr_power_down(cam);
+ if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
+ mcam_free_dma_bufs(cam);
+ }
+
+ mutex_unlock(&cam->s_mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations mcam_v4l_fops = {
+ .owner = THIS_MODULE,
+ .open = mcam_v4l_open,
+ .release = mcam_v4l_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+
+/*
+ * This template device holds all of those v4l2 methods; we
+ * clone it for specific real devices.
+ */
+static const struct video_device mcam_v4l_template = {
+ .name = "mcam",
+ .fops = &mcam_v4l_fops,
+ .ioctl_ops = &mcam_v4l_ioctl_ops,
+ .release = video_device_release_empty,
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Interrupt handler stuff
+ */
+static void mcam_frame_complete(struct mcam_camera *cam, int frame)
+{
+ /*
+ * Basic frame housekeeping.
+ */
+ set_bit(frame, &cam->flags);
+ clear_bit(CF_DMA_ACTIVE, &cam->flags);
+ cam->next_buf = frame;
+ cam->buf_seq[frame] = cam->sequence++;
+ cam->frame_state.frames++;
+ /*
+ * "This should never happen"
+ */
+ if (cam->state != S_STREAMING)
+ return;
+ /*
+ * Process the frame and set up the next one.
+ */
+ cam->frame_complete(cam, frame);
+}
+
+
+/*
+ * The interrupt handler; this needs to be called from the
+ * platform irq handler with the lock held.
+ */
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
+{
+ unsigned int frame, handled = 0;
+
+ mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
+ /*
+ * Handle any frame completions. There really should
+ * not be more than one of these, or we have fallen
+ * far behind.
+ *
+ * When running in S/G mode, the frame number lacks any
+ * real meaning - there's only one descriptor array - but
+ * the controller still picks a different one to signal
+ * each time.
+ */
+ for (frame = 0; frame < cam->nbufs; frame++)
+ if (irqs & (IRQ_EOF0 << frame) &&
+ test_bit(CF_FRAME_SOF0 + frame, &cam->flags)) {
+ mcam_frame_complete(cam, frame);
+ handled = 1;
+ clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+ if (cam->buffer_mode == B_DMA_sg)
+ break;
+ }
+ /*
+ * If a frame starts, note that we have DMA active. This
+ * code assumes that we won't get multiple frame interrupts
+ * at once; may want to rethink that.
+ */
+ for (frame = 0; frame < cam->nbufs; frame++) {
+ if (irqs & (IRQ_SOF0 << frame)) {
+ set_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+ handled = IRQ_HANDLED;
+ }
+ }
+
+ if (handled == IRQ_HANDLED) {
+ set_bit(CF_DMA_ACTIVE, &cam->flags);
+ if (cam->buffer_mode == B_DMA_sg)
+ mcam_ctlr_stop(cam);
+ }
+ return handled;
+}
+EXPORT_SYMBOL_GPL(mccic_irq);
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Registration and such.
+ */
+static struct ov7670_config sensor_cfg = {
+ /*
+ * Exclude QCIF mode, because it only captures a tiny portion
+ * of the sensor FOV
+ */
+ .min_width = 320,
+ .min_height = 240,
+};
+
+
+int mccic_register(struct mcam_camera *cam)
+{
+ struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42 >> 1,
+ .platform_data = &sensor_cfg,
+ };
+ int ret;
+
+ /*
+ * Validate the requested buffer mode.
+ */
+ if (buffer_mode >= 0)
+ cam->buffer_mode = buffer_mode;
+ if (cam->buffer_mode == B_DMA_sg &&
+ cam->chip_id == MCAM_CAFE) {
+ printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, attempting vmalloc mode instead\n");
+ cam->buffer_mode = B_vmalloc;
+ }
+ if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
+ printk(KERN_ERR "marvell-cam: buffer mode %d unsupported\n",
+ cam->buffer_mode);
+ return -EINVAL;
+ }
+ /*
+ * Register with V4L
+ */
+ ret = v4l2_device_register(cam->dev, &cam->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&cam->s_mutex);
+ cam->state = S_NOTREADY;
+ mcam_set_config_needed(cam, 1);
+ cam->pix_format = mcam_def_pix_format;
+ cam->mbus_code = mcam_def_mbus_code;
+ mcam_ctlr_init(cam);
+
+ /*
+ * Get the v4l2 setup done.
+ */
+ ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
+ if (ret)
+ goto out_unregister;
+ cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
+
+ /*
+ * Try to find the sensor.
+ */
+ sensor_cfg.clock_speed = cam->clock_speed;
+ sensor_cfg.use_smbus = cam->use_smbus;
+ cam->sensor_addr = ov7670_info.addr;
+ cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev,
+ cam->i2c_adapter, &ov7670_info, NULL);
+ if (cam->sensor == NULL) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+
+ ret = mcam_cam_init(cam);
+ if (ret)
+ goto out_unregister;
+
+ ret = mcam_setup_vb2(cam);
+ if (ret)
+ goto out_unregister;
+
+ mutex_lock(&cam->s_mutex);
+ cam->vdev = mcam_v4l_template;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ cam->vdev.lock = &cam->s_mutex;
+ cam->vdev.queue = &cam->vb_queue;
+ video_set_drvdata(&cam->vdev, cam);
+ ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ mutex_unlock(&cam->s_mutex);
+ goto out_unregister;
+ }
+
+ /*
+ * If so requested, try to get our DMA buffers now.
+ */
+ if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
+ if (mcam_alloc_dma_bufs(cam, 1))
+ cam_warn(cam, "Unable to alloc DMA buffers at load will try again later.");
+ }
+
+ mutex_unlock(&cam->s_mutex);
+ return 0;
+
+out_unregister:
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ v4l2_device_unregister(&cam->v4l2_dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mccic_register);
+
+void mccic_shutdown(struct mcam_camera *cam)
+{
+ /*
+ * If we have no users (and we really, really should have no
+ * users) the device will already be powered down. Trying to
+ * take it down again will wedge the machine, which is frowned
+ * upon.
+ */
+ if (!list_empty(&cam->vdev.fh_list)) {
+ cam_warn(cam, "Removing a device with users!\n");
+ mcam_ctlr_power_down(cam);
+ }
+ if (cam->buffer_mode == B_vmalloc)
+ mcam_free_dma_bufs(cam);
+ video_unregister_device(&cam->vdev);
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ v4l2_device_unregister(&cam->v4l2_dev);
+}
+EXPORT_SYMBOL_GPL(mccic_shutdown);
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+void mccic_suspend(struct mcam_camera *cam)
+{
+ mutex_lock(&cam->s_mutex);
+ if (!list_empty(&cam->vdev.fh_list)) {
+ enum mcam_state cstate = cam->state;
+
+ mcam_ctlr_stop_dma(cam);
+ mcam_ctlr_power_down(cam);
+ cam->state = cstate;
+ }
+ mutex_unlock(&cam->s_mutex);
+}
+EXPORT_SYMBOL_GPL(mccic_suspend);
+
+int mccic_resume(struct mcam_camera *cam)
+{
+ int ret = 0;
+
+ mutex_lock(&cam->s_mutex);
+ if (!list_empty(&cam->vdev.fh_list)) {
+ ret = mcam_ctlr_power_up(cam);
+ if (ret) {
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+ }
+ __mcam_cam_reset(cam);
+ } else {
+ mcam_ctlr_power_down(cam);
+ }
+ mutex_unlock(&cam->s_mutex);
+
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ if (cam->state == S_STREAMING) {
+ /*
+ * If there was a buffer in the DMA engine at suspend
+ * time, put it back on the queue or we'll forget about it.
+ */
+ if (cam->buffer_mode == B_DMA_sg && cam->vb_bufs[0])
+ list_add(&cam->vb_bufs[0]->queue, &cam->buffers);
+ ret = mcam_read_setup(cam);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mccic_resume);
+#endif /* CONFIG_PM */
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
new file mode 100644
index 000000000..ad8955f9f
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Marvell camera core structures.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#ifndef _MCAM_CORE_H
+#define _MCAM_CORE_H
+
+#include <linux/list.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-v4l2.h>
+
+/*
+ * Create our own symbols for the supported buffer modes, but, for now,
+ * base them entirely on which videobuf2 options have been selected.
+ */
+#if IS_ENABLED(CONFIG_VIDEOBUF2_VMALLOC)
+#define MCAM_MODE_VMALLOC 1
+#endif
+
+#if IS_ENABLED(CONFIG_VIDEOBUF2_DMA_CONTIG)
+#define MCAM_MODE_DMA_CONTIG 1
+#endif
+
+#if IS_ENABLED(CONFIG_VIDEOBUF2_DMA_SG)
+#define MCAM_MODE_DMA_SG 1
+#endif
+
+#if !defined(MCAM_MODE_VMALLOC) && !defined(MCAM_MODE_DMA_CONTIG) && \
+ !defined(MCAM_MODE_DMA_SG)
+#error One of the videobuf buffer modes must be selected in the config
+#endif
+
+
+enum mcam_state {
+ S_NOTREADY, /* Not yet initialized */
+ S_IDLE, /* Just hanging around */
+ S_FLAKED, /* Some sort of problem */
+ S_STREAMING, /* Streaming data */
+ S_BUFWAIT /* streaming requested but no buffers yet */
+};
+#define MAX_DMA_BUFS 3
+
+/*
+ * Different platforms work best with different buffer modes, so we
+ * let the platform pick.
+ */
+enum mcam_buffer_mode {
+ B_vmalloc = 0,
+ B_DMA_contig = 1,
+ B_DMA_sg = 2
+};
+
+enum mcam_chip_id {
+ MCAM_CAFE,
+ MCAM_ARMADA610,
+};
+
+/*
+ * Is a given buffer mode supported by the current kernel configuration?
+ */
+static inline int mcam_buffer_mode_supported(enum mcam_buffer_mode mode)
+{
+ switch (mode) {
+#ifdef MCAM_MODE_VMALLOC
+ case B_vmalloc:
+#endif
+#ifdef MCAM_MODE_DMA_CONTIG
+ case B_DMA_contig:
+#endif
+#ifdef MCAM_MODE_DMA_SG
+ case B_DMA_sg:
+#endif
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Basic frame states
+ */
+struct mcam_frame_state {
+ unsigned int frames;
+ unsigned int singles;
+ unsigned int delivered;
+};
+
+#define NR_MCAM_CLK 3
+
+/*
+ * A description of one of our devices.
+ * Locking: controlled by s_mutex. Certain fields, however, require
+ * the dev_lock spinlock; they are marked as such by comments.
+ * dev_lock is also required for access to device registers.
+ */
+struct mcam_camera {
+ /*
+ * These fields should be set by the platform code prior to
+ * calling mcam_register().
+ */
+ struct i2c_adapter *i2c_adapter;
+ unsigned char __iomem *regs;
+ unsigned regs_size; /* size in bytes of the register space */
+ spinlock_t dev_lock;
+ struct device *dev; /* For messages, dma alloc */
+ enum mcam_chip_id chip_id;
+ short int clock_speed; /* Sensor clock speed, default 30 */
+ short int use_smbus; /* SMBUS or straight I2c? */
+ enum mcam_buffer_mode buffer_mode;
+
+ int mclk_min; /* The minimal value of mclk */
+ int mclk_src; /* which clock source the mclk derives from */
+ int mclk_div; /* Clock Divider Value for MCLK */
+
+ int ccic_id;
+ enum v4l2_mbus_type bus_type;
+ /* MIPI support */
+ /* The dphy config value, allocated in board file
+ * dphy[0]: DPHY3
+ * dphy[1]: DPHY5
+ * dphy[2]: DPHY6
+ */
+ int *dphy;
+ bool mipi_enabled; /* flag whether mipi is enabled already */
+ int lane; /* lane number */
+
+ /* clock tree support */
+ struct clk *clk[NR_MCAM_CLK];
+
+ /*
+ * Callbacks from the core to the platform code.
+ */
+ int (*plat_power_up) (struct mcam_camera *cam);
+ void (*plat_power_down) (struct mcam_camera *cam);
+ void (*calc_dphy) (struct mcam_camera *cam);
+ void (*ctlr_reset) (struct mcam_camera *cam);
+
+ /*
+ * Everything below here is private to the mcam core and
+ * should not be touched by the platform code.
+ */
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ enum mcam_state state;
+ unsigned long flags; /* Buffer status, mainly (dev_lock) */
+
+ struct mcam_frame_state frame_state; /* Frame state counter */
+ /*
+ * Subsystem structures.
+ */
+ struct video_device vdev;
+ struct v4l2_subdev *sensor;
+ unsigned short sensor_addr;
+
+ /* Videobuf2 stuff */
+ struct vb2_queue vb_queue;
+ struct list_head buffers; /* Available frames */
+
+ unsigned int nbufs; /* How many are alloc'd */
+ int next_buf; /* Next to consume (dev_lock) */
+
+ char bus_info[32]; /* querycap bus_info */
+
+ /* DMA buffers - vmalloc mode */
+#ifdef MCAM_MODE_VMALLOC
+ unsigned int dma_buf_size; /* allocated size */
+ void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
+ dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
+ struct tasklet_struct s_tasklet;
+#endif
+ unsigned int sequence; /* Frame sequence number */
+ unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual bufs */
+
+ /* DMA buffers - DMA modes */
+ struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
+
+ /* Mode-specific ops, set at open time */
+ void (*dma_setup)(struct mcam_camera *cam);
+ void (*frame_complete)(struct mcam_camera *cam, int frame);
+
+ /* Current operating parameters */
+ struct v4l2_pix_format pix_format;
+ u32 mbus_code;
+
+ /* Locks */
+ struct mutex s_mutex; /* Access to this structure */
+};
+
+
+/*
+ * Register I/O functions. These are here because the platform code
+ * may legitimately need to mess with the register space.
+ */
+/*
+ * Device register I/O
+ */
+static inline void mcam_reg_write(struct mcam_camera *cam, unsigned int reg,
+ unsigned int val)
+{
+ iowrite32(val, cam->regs + reg);
+}
+
+static inline unsigned int mcam_reg_read(struct mcam_camera *cam,
+ unsigned int reg)
+{
+ return ioread32(cam->regs + reg);
+}
+
+
+static inline void mcam_reg_write_mask(struct mcam_camera *cam, unsigned int reg,
+ unsigned int val, unsigned int mask)
+{
+ unsigned int v = mcam_reg_read(cam, reg);
+
+ v = (v & ~mask) | (val & mask);
+ mcam_reg_write(cam, reg, v);
+}
+
+static inline void mcam_reg_clear_bit(struct mcam_camera *cam,
+ unsigned int reg, unsigned int val)
+{
+ mcam_reg_write_mask(cam, reg, 0, val);
+}
+
+static inline void mcam_reg_set_bit(struct mcam_camera *cam,
+ unsigned int reg, unsigned int val)
+{
+ mcam_reg_write_mask(cam, reg, val, val);
+}
+
+/*
+ * Functions for use by platform code.
+ */
+int mccic_register(struct mcam_camera *cam);
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs);
+void mccic_shutdown(struct mcam_camera *cam);
+#ifdef CONFIG_PM
+void mccic_suspend(struct mcam_camera *cam);
+int mccic_resume(struct mcam_camera *cam);
+#endif
+
+/*
+ * Register definitions for the m88alp01 camera interface. Offsets in bytes
+ * as given in the spec.
+ */
+#define REG_Y0BAR 0x00
+#define REG_Y1BAR 0x04
+#define REG_Y2BAR 0x08
+#define REG_U0BAR 0x0c
+#define REG_U1BAR 0x10
+#define REG_U2BAR 0x14
+#define REG_V0BAR 0x18
+#define REG_V1BAR 0x1C
+#define REG_V2BAR 0x20
+
+/*
+ * register definitions for MIPI support
+ */
+#define REG_CSI2_CTRL0 0x100
+#define CSI2_C0_MIPI_EN (0x1 << 0)
+#define CSI2_C0_ACT_LANE(n) ((n-1) << 1)
+#define REG_CSI2_DPHY3 0x12c
+#define REG_CSI2_DPHY5 0x134
+#define REG_CSI2_DPHY6 0x138
+
+/* ... */
+
+#define REG_IMGPITCH 0x24 /* Image pitch register */
+#define IMGP_YP_SHFT 2 /* Y pitch params */
+#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
+#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
+#define IMGP_UVP_MASK 0x3ffc0000
+#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
+#define IRQ_EOF0 0x00000001 /* End of frame 0 */
+#define IRQ_EOF1 0x00000002 /* End of frame 1 */
+#define IRQ_EOF2 0x00000004 /* End of frame 2 */
+#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
+#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
+#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
+#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
+#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
+#define IRQ_TWSIR 0x00020000 /* TWSI read */
+#define IRQ_TWSIE 0x00040000 /* TWSI error */
+#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
+#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
+#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
+#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
+#define REG_IRQSTAT 0x30 /* IRQ status / clear */
+
+#define REG_IMGSIZE 0x34 /* Image size */
+#define IMGSZ_V_MASK 0x1fff0000
+#define IMGSZ_V_SHIFT 16
+#define IMGSZ_H_MASK 0x00003fff
+#define REG_IMGOFFSET 0x38 /* IMage offset */
+
+#define REG_CTRL0 0x3c /* Control 0 */
+#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
+
+/* Mask for all the format bits */
+#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
+
+/* RGB ordering */
+#define C0_RGB4_RGBX 0x00000000
+#define C0_RGB4_XRGB 0x00000004
+#define C0_RGB4_BGRX 0x00000008
+#define C0_RGB4_XBGR 0x0000000c
+#define C0_RGB5_RGGB 0x00000000
+#define C0_RGB5_GRBG 0x00000004
+#define C0_RGB5_GBRG 0x00000008
+#define C0_RGB5_BGGR 0x0000000c
+
+/* Spec has two fields for DIN and DOUT, but they must match, so
+ combine them here. */
+#define C0_DF_YUV 0x00000000 /* Data is YUV */
+#define C0_DF_RGB 0x000000a0 /* ... RGB */
+#define C0_DF_BAYER 0x00000140 /* ... Bayer */
+/* 8-8-8 must be missing from the below - ask */
+#define C0_RGBF_565 0x00000000
+#define C0_RGBF_444 0x00000800
+#define C0_RGB_BGR 0x00001000 /* Blue comes first */
+#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
+#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
+#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
+/* Think that 420 packed must be 111 - ask */
+#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
+#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
+#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
+#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
+#define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */
+#define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */
+#define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */
+#define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */
+/* Bayer bits 18,19 if needed */
+#define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
+#define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
+#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
+#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
+#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
+#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
+/* SIFMODE */
+#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
+#define C0_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
+#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
+
+/* Bits below C1_444ALPHA are not present in Cafe */
+#define REG_CTRL1 0x40 /* Control 1 */
+#define C1_CLKGATE 0x00000001 /* Sensor clock gate */
+#define C1_DESC_ENA 0x00000100 /* DMA descriptor enable */
+#define C1_DESC_3WORD 0x00000200 /* Three-word descriptors used */
+#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
+#define C1_ALPHA_SHFT 20
+#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
+#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
+#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
+#define C1_DMAB_MASK 0x06000000
+#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
+#define C1_PWRDWN 0x10000000 /* Power down */
+
+#define REG_CLKCTRL 0x88 /* Clock control */
+#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
+
+/* This appears to be a Cafe-only register */
+#define REG_UBAR 0xc4 /* Upper base address register */
+
+/* Armada 610 DMA descriptor registers */
+#define REG_DMA_DESC_Y 0x200
+#define REG_DMA_DESC_U 0x204
+#define REG_DMA_DESC_V 0x208
+#define REG_DESC_LEN_Y 0x20c /* Lengths are in bytes */
+#define REG_DESC_LEN_U 0x210
+#define REG_DESC_LEN_V 0x214
+
+/*
+ * Useful stuff that probably belongs somewhere global.
+ */
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+
+#endif /* _MCAM_CORE_H */
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
new file mode 100644
index 000000000..6d9f0abb2
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -0,0 +1,537 @@
+/*
+ * Support for the camera device found on Marvell MMP processors; known
+ * to work with the Armada 610 as used in the OLPC 1.75 system.
+ *
+ * Copyright 2011 Jonathan Corbet <corbet@lwn.net>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/platform_data/i2c-gpio.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <linux/platform_data/media/mmp-camera.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+
+#include "mcam-core.h"
+
+MODULE_ALIAS("platform:mmp-camera");
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_LICENSE("GPL");
+
+static char *mcam_clks[] = {"CCICAXICLK", "CCICFUNCLK", "CCICPHYCLK"};
+
+struct mmp_camera {
+ void __iomem *power_regs;
+ struct platform_device *pdev;
+ struct mcam_camera mcam;
+ struct list_head devlist;
+ struct clk *mipi_clk;
+ int irq;
+};
+
+static inline struct mmp_camera *mcam_to_cam(struct mcam_camera *mcam)
+{
+ return container_of(mcam, struct mmp_camera, mcam);
+}
+
+/*
+ * A silly little infrastructure so we can keep track of our devices.
+ * Chances are that we will never have more than one of them, but
+ * the Armada 610 *does* have two controllers...
+ */
+
+static LIST_HEAD(mmpcam_devices);
+static struct mutex mmpcam_devices_lock;
+
+static void mmpcam_add_device(struct mmp_camera *cam)
+{
+ mutex_lock(&mmpcam_devices_lock);
+ list_add(&cam->devlist, &mmpcam_devices);
+ mutex_unlock(&mmpcam_devices_lock);
+}
+
+static void mmpcam_remove_device(struct mmp_camera *cam)
+{
+ mutex_lock(&mmpcam_devices_lock);
+ list_del(&cam->devlist);
+ mutex_unlock(&mmpcam_devices_lock);
+}
+
+/*
+ * Platform dev remove passes us a platform_device, and there's
+ * no handy unused drvdata to stash a backpointer in. So just
+ * dig it out of our list.
+ */
+static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
+{
+ struct mmp_camera *cam;
+
+ mutex_lock(&mmpcam_devices_lock);
+ list_for_each_entry(cam, &mmpcam_devices, devlist) {
+ if (cam->pdev == pdev) {
+ mutex_unlock(&mmpcam_devices_lock);
+ return cam;
+ }
+ }
+ mutex_unlock(&mmpcam_devices_lock);
+ return NULL;
+}
+
+
+
+
+/*
+ * Power-related registers; this almost certainly belongs
+ * somewhere else.
+ *
+ * ARMADA 610 register manual, sec 7.2.1, p1842.
+ */
+#define CPU_SUBSYS_PMU_BASE 0xd4282800
+#define REG_CCIC_DCGCR 0x28 /* CCIC dyn clock gate ctrl reg */
+#define REG_CCIC_CRCR 0x50 /* CCIC clk reset ctrl reg */
+#define REG_CCIC2_CRCR 0xf4 /* CCIC2 clk reset ctrl reg */
+
+static void mcam_clk_enable(struct mcam_camera *mcam)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_MCAM_CLK; i++) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_prepare_enable(mcam->clk[i]);
+ }
+}
+
+static void mcam_clk_disable(struct mcam_camera *mcam)
+{
+ int i;
+
+ for (i = NR_MCAM_CLK - 1; i >= 0; i--) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_disable_unprepare(mcam->clk[i]);
+ }
+}
+
+/*
+ * Power control.
+ */
+static void mmpcam_power_up_ctlr(struct mmp_camera *cam)
+{
+ iowrite32(0x3f, cam->power_regs + REG_CCIC_DCGCR);
+ iowrite32(0x3805b, cam->power_regs + REG_CCIC_CRCR);
+ mdelay(1);
+}
+
+static int mmpcam_power_up(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata;
+
+/*
+ * Turn on power and clocks to the controller.
+ */
+ mmpcam_power_up_ctlr(cam);
+/*
+ * Provide power to the sensor.
+ */
+ mcam_reg_write(mcam, REG_CLKCTRL, 0x60000002);
+ pdata = cam->pdev->dev.platform_data;
+ gpio_set_value(pdata->sensor_power_gpio, 1);
+ mdelay(5);
+ mcam_reg_clear_bit(mcam, REG_CTRL1, 0x10000000);
+ gpio_set_value(pdata->sensor_reset_gpio, 0); /* reset is active low */
+ mdelay(5);
+ gpio_set_value(pdata->sensor_reset_gpio, 1); /* reset is active low */
+ mdelay(5);
+
+ mcam_clk_enable(mcam);
+
+ return 0;
+}
+
+static void mmpcam_power_down(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata;
+/*
+ * Turn off clocks and set reset lines
+ */
+ iowrite32(0, cam->power_regs + REG_CCIC_DCGCR);
+ iowrite32(0, cam->power_regs + REG_CCIC_CRCR);
+/*
+ * Shut down the sensor.
+ */
+ pdata = cam->pdev->dev.platform_data;
+ gpio_set_value(pdata->sensor_power_gpio, 0);
+ gpio_set_value(pdata->sensor_reset_gpio, 0);
+
+ mcam_clk_disable(mcam);
+}
+
+static void mcam_ctlr_reset(struct mcam_camera *mcam)
+{
+ unsigned long val;
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+
+ if (mcam->ccic_id) {
+ /*
+ * Using CCIC2
+ */
+ val = ioread32(cam->power_regs + REG_CCIC2_CRCR);
+ iowrite32(val & ~0x2, cam->power_regs + REG_CCIC2_CRCR);
+ iowrite32(val | 0x2, cam->power_regs + REG_CCIC2_CRCR);
+ } else {
+ /*
+ * Using CCIC1
+ */
+ val = ioread32(cam->power_regs + REG_CCIC_CRCR);
+ iowrite32(val & ~0x2, cam->power_regs + REG_CCIC_CRCR);
+ iowrite32(val | 0x2, cam->power_regs + REG_CCIC_CRCR);
+ }
+}
+
+/*
+ * calc the dphy register values
+ * There are three dphy registers being used.
+ * dphy[0] - CSI2_DPHY3
+ * dphy[1] - CSI2_DPHY5
+ * dphy[2] - CSI2_DPHY6
+ * CSI2_DPHY3 and CSI2_DPHY6 can be set with a default value
+ * or be calculated dynamically
+ */
+static void mmpcam_calc_dphy(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata = cam->pdev->dev.platform_data;
+ struct device *dev = &cam->pdev->dev;
+ unsigned long tx_clk_esc;
+
+ /*
+ * If CSI2_DPHY3 is calculated dynamically,
+ * pdata->lane_clk should be already set
+ * either in the board driver statically
+ * or in the sensor driver dynamically.
+ */
+ /*
+ * dphy[0] - CSI2_DPHY3:
+ * bit 0 ~ bit 7: HS Term Enable.
+ * defines the time that the DPHY
+ * wait before enabling the data
+ * lane termination after detecting
+ * that the sensor has driven the data
+ * lanes to the LP00 bridge state.
+ * The value is calculated by:
+ * (Max T(D_TERM_EN)/Period(DDR)) - 1
+ * bit 8 ~ bit 15: HS_SETTLE
+ * Time interval during which the HS
+ * receiver shall ignore any Data Lane
+ * HS transistions.
+ * The vaule has been calibrated on
+ * different boards. It seems to work well.
+ *
+ * More detail please refer
+ * MIPI Alliance Spectification for D-PHY
+ * document for explanation of HS-SETTLE
+ * and D-TERM-EN.
+ */
+ switch (pdata->dphy3_algo) {
+ case DPHY3_ALGO_PXA910:
+ /*
+ * Calculate CSI2_DPHY3 algo for PXA910
+ */
+ pdata->dphy[0] =
+ (((1 + (pdata->lane_clk * 80) / 1000) & 0xff) << 8)
+ | (1 + pdata->lane_clk * 35 / 1000);
+ break;
+ case DPHY3_ALGO_PXA2128:
+ /*
+ * Calculate CSI2_DPHY3 algo for PXA2128
+ */
+ pdata->dphy[0] =
+ (((2 + (pdata->lane_clk * 110) / 1000) & 0xff) << 8)
+ | (1 + pdata->lane_clk * 35 / 1000);
+ break;
+ default:
+ /*
+ * Use default CSI2_DPHY3 value for PXA688/PXA988
+ */
+ dev_dbg(dev, "camera: use the default CSI2_DPHY3 value\n");
+ }
+
+ /*
+ * mipi_clk will never be changed, it is a fixed value on MMP
+ */
+ if (IS_ERR(cam->mipi_clk))
+ return;
+
+ /* get the escape clk, this is hard coded */
+ clk_prepare_enable(cam->mipi_clk);
+ tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12;
+ clk_disable_unprepare(cam->mipi_clk);
+ /*
+ * dphy[2] - CSI2_DPHY6:
+ * bit 0 ~ bit 7: CK Term Enable
+ * Time for the Clock Lane receiver to enable the HS line
+ * termination. The value is calculated similarly with
+ * HS Term Enable
+ * bit 8 ~ bit 15: CK Settle
+ * Time interval during which the HS receiver shall ignore
+ * any Clock Lane HS transitions.
+ * The value is calibrated on the boards.
+ */
+ pdata->dphy[2] =
+ ((((534 * tx_clk_esc) / 2000 - 1) & 0xff) << 8)
+ | (((38 * tx_clk_esc) / 1000 - 1) & 0xff);
+
+ dev_dbg(dev, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
+ pdata->dphy[0], pdata->dphy[1], pdata->dphy[2]);
+}
+
+static irqreturn_t mmpcam_irq(int irq, void *data)
+{
+ struct mcam_camera *mcam = data;
+ unsigned int irqs, handled;
+
+ spin_lock(&mcam->dev_lock);
+ irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+ handled = mccic_irq(mcam, irqs);
+ spin_unlock(&mcam->dev_lock);
+ return IRQ_RETVAL(handled);
+}
+
+static void mcam_init_clk(struct mcam_camera *mcam)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_MCAM_CLK; i++) {
+ if (mcam_clks[i] != NULL) {
+ /* Some clks are not necessary on some boards
+ * We still try to run even it fails getting clk
+ */
+ mcam->clk[i] = devm_clk_get(mcam->dev, mcam_clks[i]);
+ if (IS_ERR(mcam->clk[i]))
+ dev_warn(mcam->dev, "Could not get clk: %s\n",
+ mcam_clks[i]);
+ }
+ }
+}
+
+static int mmpcam_probe(struct platform_device *pdev)
+{
+ struct mmp_camera *cam;
+ struct mcam_camera *mcam;
+ struct resource *res;
+ struct mmp_camera_platform_data *pdata;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -ENODEV;
+
+ cam = devm_kzalloc(&pdev->dev, sizeof(*cam), GFP_KERNEL);
+ if (cam == NULL)
+ return -ENOMEM;
+ cam->pdev = pdev;
+ INIT_LIST_HEAD(&cam->devlist);
+
+ mcam = &cam->mcam;
+ mcam->plat_power_up = mmpcam_power_up;
+ mcam->plat_power_down = mmpcam_power_down;
+ mcam->ctlr_reset = mcam_ctlr_reset;
+ mcam->calc_dphy = mmpcam_calc_dphy;
+ mcam->dev = &pdev->dev;
+ mcam->use_smbus = 0;
+ mcam->ccic_id = pdev->id;
+ mcam->mclk_min = pdata->mclk_min;
+ mcam->mclk_src = pdata->mclk_src;
+ mcam->mclk_div = pdata->mclk_div;
+ mcam->bus_type = pdata->bus_type;
+ mcam->dphy = pdata->dphy;
+ if (mcam->bus_type == V4L2_MBUS_CSI2) {
+ cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
+ if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
+ return PTR_ERR(cam->mipi_clk);
+ }
+ mcam->mipi_enabled = false;
+ mcam->lane = pdata->lane;
+ mcam->chip_id = MCAM_ARMADA610;
+ mcam->buffer_mode = B_DMA_sg;
+ strlcpy(mcam->bus_info, "platform:mmp-camera", sizeof(mcam->bus_info));
+ spin_lock_init(&mcam->dev_lock);
+ /*
+ * Get our I/O memory.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mcam->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcam->regs))
+ return PTR_ERR(mcam->regs);
+ mcam->regs_size = resource_size(res);
+ /*
+ * Power/clock memory is elsewhere; get it too. Perhaps this
+ * should really be managed outside of this driver?
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cam->power_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(cam->power_regs))
+ return PTR_ERR(cam->power_regs);
+ /*
+ * Find the i2c adapter. This assumes, of course, that the
+ * i2c bus is already up and functioning.
+ */
+ mcam->i2c_adapter = platform_get_drvdata(pdata->i2c_device);
+ if (mcam->i2c_adapter == NULL) {
+ dev_err(&pdev->dev, "No i2c adapter\n");
+ return -ENODEV;
+ }
+ /*
+ * Sensor GPIO pins.
+ */
+ ret = devm_gpio_request(&pdev->dev, pdata->sensor_power_gpio,
+ "cam-power");
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get sensor power gpio %d",
+ pdata->sensor_power_gpio);
+ return ret;
+ }
+ gpio_direction_output(pdata->sensor_power_gpio, 0);
+ ret = devm_gpio_request(&pdev->dev, pdata->sensor_reset_gpio,
+ "cam-reset");
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get sensor reset gpio %d",
+ pdata->sensor_reset_gpio);
+ return ret;
+ }
+ gpio_direction_output(pdata->sensor_reset_gpio, 0);
+
+ mcam_init_clk(mcam);
+
+ /*
+ * Power the device up and hand it off to the core.
+ */
+ ret = mmpcam_power_up(mcam);
+ if (ret)
+ return ret;
+ ret = mccic_register(mcam);
+ if (ret)
+ goto out_power_down;
+ /*
+ * Finally, set up our IRQ now that the core is ready to
+ * deal with it.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+ cam->irq = res->start;
+ ret = devm_request_irq(&pdev->dev, cam->irq, mmpcam_irq, IRQF_SHARED,
+ "mmp-camera", mcam);
+ if (ret == 0) {
+ mmpcam_add_device(cam);
+ return 0;
+ }
+
+out_unregister:
+ mccic_shutdown(mcam);
+out_power_down:
+ mmpcam_power_down(mcam);
+ return ret;
+}
+
+
+static int mmpcam_remove(struct mmp_camera *cam)
+{
+ struct mcam_camera *mcam = &cam->mcam;
+
+ mmpcam_remove_device(cam);
+ mccic_shutdown(mcam);
+ mmpcam_power_down(mcam);
+ return 0;
+}
+
+static int mmpcam_platform_remove(struct platform_device *pdev)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ if (cam == NULL)
+ return -ENODEV;
+ return mmpcam_remove(cam);
+}
+
+/*
+ * Suspend/resume support.
+ */
+#ifdef CONFIG_PM
+
+static int mmpcam_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ if (state.event != PM_EVENT_SUSPEND)
+ return 0;
+ mccic_suspend(&cam->mcam);
+ return 0;
+}
+
+static int mmpcam_resume(struct platform_device *pdev)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ /*
+ * Power up unconditionally just in case the core tries to
+ * touch a register even if nothing was active before; trust
+ * me, it's better this way.
+ */
+ mmpcam_power_up_ctlr(cam);
+ return mccic_resume(&cam->mcam);
+}
+
+#endif
+
+
+static struct platform_driver mmpcam_driver = {
+ .probe = mmpcam_probe,
+ .remove = mmpcam_platform_remove,
+#ifdef CONFIG_PM
+ .suspend = mmpcam_suspend,
+ .resume = mmpcam_resume,
+#endif
+ .driver = {
+ .name = "mmp-camera",
+ }
+};
+
+
+static int __init mmpcam_init_module(void)
+{
+ mutex_init(&mmpcam_devices_lock);
+ return platform_driver_register(&mmpcam_driver);
+}
+
+static void __exit mmpcam_exit_module(void)
+{
+ platform_driver_unregister(&mmpcam_driver);
+ /*
+ * platform_driver_unregister() should have emptied the list
+ */
+ if (!list_empty(&mmpcam_devices))
+ printk(KERN_ERR "mmp_camera leaving devices behind\n");
+}
+
+module_init(mmpcam_init_module);
+module_exit(mmpcam_exit_module);
diff --git a/drivers/media/platform/meson/Makefile b/drivers/media/platform/meson/Makefile
new file mode 100644
index 000000000..597beb8f3
--- /dev/null
+++ b/drivers/media/platform/meson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_MESON_AO_CEC) += ao-cec.o
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
new file mode 100644
index 000000000..cd4be38ab
--- /dev/null
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -0,0 +1,744 @@
+/*
+ * Driver for Amlogic Meson AO CEC Controller
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved
+ * Copyright (C) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+/* CEC Registers */
+
+/*
+ * [2:1] cntl_clk
+ * - 0 = Disable clk (Power-off mode)
+ * - 1 = Enable gated clock (Normal mode)
+ * - 2 = Enable free-run clk (Debug mode)
+ */
+#define CEC_GEN_CNTL_REG 0x00
+
+#define CEC_GEN_CNTL_RESET BIT(0)
+#define CEC_GEN_CNTL_CLK_DISABLE 0
+#define CEC_GEN_CNTL_CLK_ENABLE 1
+#define CEC_GEN_CNTL_CLK_ENABLE_DBG 2
+#define CEC_GEN_CNTL_CLK_CTRL_MASK GENMASK(2, 1)
+
+/*
+ * [7:0] cec_reg_addr
+ * [15:8] cec_reg_wrdata
+ * [16] cec_reg_wr
+ * - 0 = Read
+ * - 1 = Write
+ * [23] bus free
+ * [31:24] cec_reg_rddata
+ */
+#define CEC_RW_REG 0x04
+
+#define CEC_RW_ADDR GENMASK(7, 0)
+#define CEC_RW_WR_DATA GENMASK(15, 8)
+#define CEC_RW_WRITE_EN BIT(16)
+#define CEC_RW_BUS_BUSY BIT(23)
+#define CEC_RW_RD_DATA GENMASK(31, 24)
+
+/*
+ * [1] tx intr
+ * [2] rx intr
+ */
+#define CEC_INTR_MASKN_REG 0x08
+#define CEC_INTR_CLR_REG 0x0c
+#define CEC_INTR_STAT_REG 0x10
+
+#define CEC_INTR_TX BIT(1)
+#define CEC_INTR_RX BIT(2)
+
+/* CEC Commands */
+
+#define CEC_TX_MSG_0_HEADER 0x00
+#define CEC_TX_MSG_1_OPCODE 0x01
+#define CEC_TX_MSG_2_OP1 0x02
+#define CEC_TX_MSG_3_OP2 0x03
+#define CEC_TX_MSG_4_OP3 0x04
+#define CEC_TX_MSG_5_OP4 0x05
+#define CEC_TX_MSG_6_OP5 0x06
+#define CEC_TX_MSG_7_OP6 0x07
+#define CEC_TX_MSG_8_OP7 0x08
+#define CEC_TX_MSG_9_OP8 0x09
+#define CEC_TX_MSG_A_OP9 0x0A
+#define CEC_TX_MSG_B_OP10 0x0B
+#define CEC_TX_MSG_C_OP11 0x0C
+#define CEC_TX_MSG_D_OP12 0x0D
+#define CEC_TX_MSG_E_OP13 0x0E
+#define CEC_TX_MSG_F_OP14 0x0F
+#define CEC_TX_MSG_LENGTH 0x10
+#define CEC_TX_MSG_CMD 0x11
+#define CEC_TX_WRITE_BUF 0x12
+#define CEC_TX_CLEAR_BUF 0x13
+#define CEC_RX_MSG_CMD 0x14
+#define CEC_RX_CLEAR_BUF 0x15
+#define CEC_LOGICAL_ADDR0 0x16
+#define CEC_LOGICAL_ADDR1 0x17
+#define CEC_LOGICAL_ADDR2 0x18
+#define CEC_LOGICAL_ADDR3 0x19
+#define CEC_LOGICAL_ADDR4 0x1A
+#define CEC_CLOCK_DIV_H 0x1B
+#define CEC_CLOCK_DIV_L 0x1C
+#define CEC_QUIESCENT_25MS_BIT7_0 0x20
+#define CEC_QUIESCENT_25MS_BIT11_8 0x21
+#define CEC_STARTBITMINL2H_3MS5_BIT7_0 0x22
+#define CEC_STARTBITMINL2H_3MS5_BIT8 0x23
+#define CEC_STARTBITMAXL2H_3MS9_BIT7_0 0x24
+#define CEC_STARTBITMAXL2H_3MS9_BIT8 0x25
+#define CEC_STARTBITMINH_0MS6_BIT7_0 0x26
+#define CEC_STARTBITMINH_0MS6_BIT8 0x27
+#define CEC_STARTBITMAXH_1MS0_BIT7_0 0x28
+#define CEC_STARTBITMAXH_1MS0_BIT8 0x29
+#define CEC_STARTBITMINTOT_4MS3_BIT7_0 0x2A
+#define CEC_STARTBITMINTOT_4MS3_BIT9_8 0x2B
+#define CEC_STARTBITMAXTOT_4MS7_BIT7_0 0x2C
+#define CEC_STARTBITMAXTOT_4MS7_BIT9_8 0x2D
+#define CEC_LOGIC1MINL2H_0MS4_BIT7_0 0x2E
+#define CEC_LOGIC1MINL2H_0MS4_BIT8 0x2F
+#define CEC_LOGIC1MAXL2H_0MS8_BIT7_0 0x30
+#define CEC_LOGIC1MAXL2H_0MS8_BIT8 0x31
+#define CEC_LOGIC0MINL2H_1MS3_BIT7_0 0x32
+#define CEC_LOGIC0MINL2H_1MS3_BIT8 0x33
+#define CEC_LOGIC0MAXL2H_1MS7_BIT7_0 0x34
+#define CEC_LOGIC0MAXL2H_1MS7_BIT8 0x35
+#define CEC_LOGICMINTOTAL_2MS05_BIT7_0 0x36
+#define CEC_LOGICMINTOTAL_2MS05_BIT9_8 0x37
+#define CEC_LOGICMAXHIGH_2MS8_BIT7_0 0x38
+#define CEC_LOGICMAXHIGH_2MS8_BIT8 0x39
+#define CEC_LOGICERRLOW_3MS4_BIT7_0 0x3A
+#define CEC_LOGICERRLOW_3MS4_BIT8 0x3B
+#define CEC_NOMSMPPOINT_1MS05 0x3C
+#define CEC_DELCNTR_LOGICERR 0x3E
+#define CEC_TXTIME_17MS_BIT7_0 0x40
+#define CEC_TXTIME_17MS_BIT10_8 0x41
+#define CEC_TXTIME_2BIT_BIT7_0 0x42
+#define CEC_TXTIME_2BIT_BIT10_8 0x43
+#define CEC_TXTIME_4BIT_BIT7_0 0x44
+#define CEC_TXTIME_4BIT_BIT10_8 0x45
+#define CEC_STARTBITNOML2H_3MS7_BIT7_0 0x46
+#define CEC_STARTBITNOML2H_3MS7_BIT8 0x47
+#define CEC_STARTBITNOMH_0MS8_BIT7_0 0x48
+#define CEC_STARTBITNOMH_0MS8_BIT8 0x49
+#define CEC_LOGIC1NOML2H_0MS6_BIT7_0 0x4A
+#define CEC_LOGIC1NOML2H_0MS6_BIT8 0x4B
+#define CEC_LOGIC0NOML2H_1MS5_BIT7_0 0x4C
+#define CEC_LOGIC0NOML2H_1MS5_BIT8 0x4D
+#define CEC_LOGIC1NOMH_1MS8_BIT7_0 0x4E
+#define CEC_LOGIC1NOMH_1MS8_BIT8 0x4F
+#define CEC_LOGIC0NOMH_0MS9_BIT7_0 0x50
+#define CEC_LOGIC0NOMH_0MS9_BIT8 0x51
+#define CEC_LOGICERRLOW_3MS6_BIT7_0 0x52
+#define CEC_LOGICERRLOW_3MS6_BIT8 0x53
+#define CEC_CHKCONTENTION_0MS1 0x54
+#define CEC_PREPARENXTBIT_0MS05_BIT7_0 0x56
+#define CEC_PREPARENXTBIT_0MS05_BIT8 0x57
+#define CEC_NOMSMPACKPOINT_0MS45 0x58
+#define CEC_ACK0NOML2H_1MS5_BIT7_0 0x5A
+#define CEC_ACK0NOML2H_1MS5_BIT8 0x5B
+#define CEC_BUGFIX_DISABLE_0 0x60
+#define CEC_BUGFIX_DISABLE_1 0x61
+#define CEC_RX_MSG_0_HEADER 0x80
+#define CEC_RX_MSG_1_OPCODE 0x81
+#define CEC_RX_MSG_2_OP1 0x82
+#define CEC_RX_MSG_3_OP2 0x83
+#define CEC_RX_MSG_4_OP3 0x84
+#define CEC_RX_MSG_5_OP4 0x85
+#define CEC_RX_MSG_6_OP5 0x86
+#define CEC_RX_MSG_7_OP6 0x87
+#define CEC_RX_MSG_8_OP7 0x88
+#define CEC_RX_MSG_9_OP8 0x89
+#define CEC_RX_MSG_A_OP9 0x8A
+#define CEC_RX_MSG_B_OP10 0x8B
+#define CEC_RX_MSG_C_OP11 0x8C
+#define CEC_RX_MSG_D_OP12 0x8D
+#define CEC_RX_MSG_E_OP13 0x8E
+#define CEC_RX_MSG_F_OP14 0x8F
+#define CEC_RX_MSG_LENGTH 0x90
+#define CEC_RX_MSG_STATUS 0x91
+#define CEC_RX_NUM_MSG 0x92
+#define CEC_TX_MSG_STATUS 0x93
+#define CEC_TX_NUM_MSG 0x94
+
+
+/* CEC_TX_MSG_CMD definition */
+#define TX_NO_OP 0 /* No transaction */
+#define TX_REQ_CURRENT 1 /* Transmit earliest message in buffer */
+#define TX_ABORT 2 /* Abort transmitting earliest message */
+#define TX_REQ_NEXT 3 /* Overwrite earliest msg, transmit next */
+
+/* tx_msg_status definition */
+#define TX_IDLE 0 /* No transaction */
+#define TX_BUSY 1 /* Transmitter is busy */
+#define TX_DONE 2 /* Message successfully transmitted */
+#define TX_ERROR 3 /* Message transmitted with error */
+
+/* rx_msg_cmd */
+#define RX_NO_OP 0 /* No transaction */
+#define RX_ACK_CURRENT 1 /* Read earliest message in buffer */
+#define RX_DISABLE 2 /* Disable receiving latest message */
+#define RX_ACK_NEXT 3 /* Clear earliest msg, read next */
+
+/* rx_msg_status */
+#define RX_IDLE 0 /* No transaction */
+#define RX_BUSY 1 /* Receiver is busy */
+#define RX_DONE 2 /* Message has been received successfully */
+#define RX_ERROR 3 /* Message has been received with error */
+
+/* RX_CLEAR_BUF options */
+#define CLEAR_START 1
+#define CLEAR_STOP 0
+
+/* CEC_LOGICAL_ADDRx options */
+#define LOGICAL_ADDR_MASK 0xf
+#define LOGICAL_ADDR_VALID BIT(4)
+#define LOGICAL_ADDR_DISABLE 0
+
+#define CEC_CLK_RATE 32768
+
+struct meson_ao_cec_device {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct clk *core;
+ spinlock_t cec_reg_lock;
+ struct cec_notifier *notify;
+ struct cec_adapter *adap;
+ struct cec_msg rx_msg;
+};
+
+#define writel_bits_relaxed(mask, val, addr) \
+ writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
+
+static inline int meson_ao_cec_wait_busy(struct meson_ao_cec_device *ao_cec)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), 5000);
+
+ while (readl_relaxed(ao_cec->base + CEC_RW_REG) & CEC_RW_BUS_BUSY) {
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void meson_ao_cec_read(struct meson_ao_cec_device *ao_cec,
+ unsigned long address, u8 *data,
+ int *res)
+{
+ unsigned long flags;
+ u32 reg = FIELD_PREP(CEC_RW_ADDR, address);
+ int ret = 0;
+
+ if (res && *res)
+ return;
+
+ spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
+
+ ret = meson_ao_cec_wait_busy(ao_cec);
+ if (ret)
+ goto read_out;
+
+ writel_relaxed(reg, ao_cec->base + CEC_RW_REG);
+
+ ret = meson_ao_cec_wait_busy(ao_cec);
+ if (ret)
+ goto read_out;
+
+ *data = FIELD_GET(CEC_RW_RD_DATA,
+ readl_relaxed(ao_cec->base + CEC_RW_REG));
+
+read_out:
+ spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
+
+ if (res)
+ *res = ret;
+}
+
+static void meson_ao_cec_write(struct meson_ao_cec_device *ao_cec,
+ unsigned long address, u8 data,
+ int *res)
+{
+ unsigned long flags;
+ u32 reg = FIELD_PREP(CEC_RW_ADDR, address) |
+ FIELD_PREP(CEC_RW_WR_DATA, data) |
+ CEC_RW_WRITE_EN;
+ int ret = 0;
+
+ if (res && *res)
+ return;
+
+ spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
+
+ ret = meson_ao_cec_wait_busy(ao_cec);
+ if (ret)
+ goto write_out;
+
+ writel_relaxed(reg, ao_cec->base + CEC_RW_REG);
+
+write_out:
+ spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
+
+ if (res)
+ *res = ret;
+}
+
+static inline void meson_ao_cec_irq_setup(struct meson_ao_cec_device *ao_cec,
+ bool enable)
+{
+ u32 cfg = CEC_INTR_TX | CEC_INTR_RX;
+
+ writel_bits_relaxed(cfg, enable ? cfg : 0,
+ ao_cec->base + CEC_INTR_MASKN_REG);
+}
+
+static inline int meson_ao_cec_clear(struct meson_ao_cec_device *ao_cec)
+{
+ int ret = 0;
+
+ meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_DISABLE, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret);
+ meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 1, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 1, &ret);
+ if (ret)
+ return ret;
+
+ udelay(100);
+
+ meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 0, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 0, &ret);
+ if (ret)
+ return ret;
+
+ udelay(100);
+
+ meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret);
+
+ return ret;
+}
+
+static int meson_ao_cec_arbit_bit_time_set(struct meson_ao_cec_device *ao_cec,
+ unsigned int bit_set,
+ unsigned int time_set)
+{
+ int ret = 0;
+
+ switch (bit_set) {
+ case CEC_SIGNAL_FREE_TIME_RETRY:
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT7_0,
+ time_set & 0xff, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT10_8,
+ (time_set >> 8) & 0x7, &ret);
+ break;
+
+ case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR:
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT7_0,
+ time_set & 0xff, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT10_8,
+ (time_set >> 8) & 0x7, &ret);
+ break;
+
+ case CEC_SIGNAL_FREE_TIME_NEXT_XFER:
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT7_0,
+ time_set & 0xff, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT10_8,
+ (time_set >> 8) & 0x7, &ret);
+ break;
+ }
+
+ return ret;
+}
+
+static irqreturn_t meson_ao_cec_irq(int irq, void *data)
+{
+ struct meson_ao_cec_device *ao_cec = data;
+ u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG);
+
+ if (stat)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_NONE;
+}
+
+static void meson_ao_cec_irq_tx(struct meson_ao_cec_device *ao_cec)
+{
+ unsigned long tx_status = 0;
+ u8 stat;
+ int ret = 0;
+
+ meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, &stat, &ret);
+ if (ret)
+ goto tx_reg_err;
+
+ switch (stat) {
+ case TX_DONE:
+ tx_status = CEC_TX_STATUS_OK;
+ break;
+
+ case TX_BUSY:
+ tx_status = CEC_TX_STATUS_ARB_LOST;
+ break;
+
+ case TX_IDLE:
+ tx_status = CEC_TX_STATUS_LOW_DRIVE;
+ break;
+
+ case TX_ERROR:
+ default:
+ tx_status = CEC_TX_STATUS_NACK;
+ break;
+ }
+
+ /* Clear Interruption */
+ writel_relaxed(CEC_INTR_TX, ao_cec->base + CEC_INTR_CLR_REG);
+
+ /* Stop TX */
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret);
+ if (ret)
+ goto tx_reg_err;
+
+ cec_transmit_attempt_done(ao_cec->adap, tx_status);
+ return;
+
+tx_reg_err:
+ cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ERROR);
+}
+
+static void meson_ao_cec_irq_rx(struct meson_ao_cec_device *ao_cec)
+{
+ int i, ret = 0;
+ u8 reg;
+
+ meson_ao_cec_read(ao_cec, CEC_RX_MSG_STATUS, &reg, &ret);
+ if (reg != RX_DONE)
+ goto rx_out;
+
+ meson_ao_cec_read(ao_cec, CEC_RX_NUM_MSG, &reg, &ret);
+ if (reg != 1)
+ goto rx_out;
+
+ meson_ao_cec_read(ao_cec, CEC_RX_MSG_LENGTH, &reg, &ret);
+
+ ao_cec->rx_msg.len = reg + 1;
+ if (ao_cec->rx_msg.len > CEC_MAX_MSG_SIZE)
+ ao_cec->rx_msg.len = CEC_MAX_MSG_SIZE;
+
+ for (i = 0; i < ao_cec->rx_msg.len; i++) {
+ u8 byte;
+
+ meson_ao_cec_read(ao_cec, CEC_RX_MSG_0_HEADER + i, &byte, &ret);
+
+ ao_cec->rx_msg.msg[i] = byte;
+ }
+
+ if (ret)
+ goto rx_out;
+
+ cec_received_msg(ao_cec->adap, &ao_cec->rx_msg);
+
+rx_out:
+ /* Clear Interruption */
+ writel_relaxed(CEC_INTR_RX, ao_cec->base + CEC_INTR_CLR_REG);
+
+ /* Ack RX message */
+ meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_ACK_CURRENT, &ret);
+ meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret);
+
+ /* Clear RX buffer */
+ meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_START, &ret);
+ meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_STOP, &ret);
+}
+
+static irqreturn_t meson_ao_cec_irq_thread(int irq, void *data)
+{
+ struct meson_ao_cec_device *ao_cec = data;
+ u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG);
+
+ if (stat & CEC_INTR_TX)
+ meson_ao_cec_irq_tx(ao_cec);
+
+ meson_ao_cec_irq_rx(ao_cec);
+
+ return IRQ_HANDLED;
+}
+
+static int meson_ao_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct meson_ao_cec_device *ao_cec = adap->priv;
+ int ret = 0;
+
+ meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0,
+ LOGICAL_ADDR_DISABLE, &ret);
+ if (ret)
+ return ret;
+
+ ret = meson_ao_cec_clear(ao_cec);
+ if (ret)
+ return ret;
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ return 0;
+
+ meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0,
+ logical_addr & LOGICAL_ADDR_MASK, &ret);
+ if (ret)
+ return ret;
+
+ udelay(100);
+
+ meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0,
+ (logical_addr & LOGICAL_ADDR_MASK) |
+ LOGICAL_ADDR_VALID, &ret);
+
+ return ret;
+}
+
+static int meson_ao_cec_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct meson_ao_cec_device *ao_cec = adap->priv;
+ int i, ret = 0;
+ u8 reg;
+
+ meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, &reg, &ret);
+ if (ret)
+ return ret;
+
+ if (reg == TX_BUSY) {
+ dev_dbg(&ao_cec->pdev->dev, "%s: busy TX: aborting\n",
+ __func__);
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret);
+ }
+
+ for (i = 0; i < msg->len; i++) {
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_0_HEADER + i,
+ msg->msg[i], &ret);
+ }
+
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_LENGTH, msg->len - 1, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_REQ_CURRENT, &ret);
+
+ return ret;
+}
+
+static int meson_ao_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct meson_ao_cec_device *ao_cec = adap->priv;
+ int ret;
+
+ meson_ao_cec_irq_setup(ao_cec, false);
+
+ writel_bits_relaxed(CEC_GEN_CNTL_RESET, CEC_GEN_CNTL_RESET,
+ ao_cec->base + CEC_GEN_CNTL_REG);
+
+ if (!enable)
+ return 0;
+
+ /* Enable gated clock (Normal mode). */
+ writel_bits_relaxed(CEC_GEN_CNTL_CLK_CTRL_MASK,
+ FIELD_PREP(CEC_GEN_CNTL_CLK_CTRL_MASK,
+ CEC_GEN_CNTL_CLK_ENABLE),
+ ao_cec->base + CEC_GEN_CNTL_REG);
+
+ udelay(100);
+
+ /* Release Reset */
+ writel_bits_relaxed(CEC_GEN_CNTL_RESET, 0,
+ ao_cec->base + CEC_GEN_CNTL_REG);
+
+ /* Clear buffers */
+ ret = meson_ao_cec_clear(ao_cec);
+ if (ret)
+ return ret;
+
+ /* CEC arbitration 3/5/7 bit time set. */
+ ret = meson_ao_cec_arbit_bit_time_set(ao_cec,
+ CEC_SIGNAL_FREE_TIME_RETRY,
+ 0x118);
+ if (ret)
+ return ret;
+ ret = meson_ao_cec_arbit_bit_time_set(ao_cec,
+ CEC_SIGNAL_FREE_TIME_NEW_INITIATOR,
+ 0x000);
+ if (ret)
+ return ret;
+ ret = meson_ao_cec_arbit_bit_time_set(ao_cec,
+ CEC_SIGNAL_FREE_TIME_NEXT_XFER,
+ 0x2aa);
+ if (ret)
+ return ret;
+
+ meson_ao_cec_irq_setup(ao_cec, true);
+
+ return 0;
+}
+
+static const struct cec_adap_ops meson_ao_cec_ops = {
+ .adap_enable = meson_ao_cec_adap_enable,
+ .adap_log_addr = meson_ao_cec_set_log_addr,
+ .adap_transmit = meson_ao_cec_transmit,
+};
+
+static int meson_ao_cec_probe(struct platform_device *pdev)
+{
+ struct meson_ao_cec_device *ao_cec;
+ struct platform_device *hdmi_dev;
+ struct device_node *np;
+ struct resource *res;
+ int ret, irq;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node\n");
+ return -ENODEV;
+ }
+
+ hdmi_dev = of_find_device_by_node(np);
+ if (hdmi_dev == NULL)
+ return -EPROBE_DEFER;
+
+ ao_cec = devm_kzalloc(&pdev->dev, sizeof(*ao_cec), GFP_KERNEL);
+ if (!ao_cec)
+ return -ENOMEM;
+
+ spin_lock_init(&ao_cec->cec_reg_lock);
+
+ ao_cec->notify = cec_notifier_get(&hdmi_dev->dev);
+ if (!ao_cec->notify)
+ return -ENOMEM;
+
+ ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_ops, ao_cec,
+ "meson_ao_cec",
+ CEC_CAP_LOG_ADDRS |
+ CEC_CAP_TRANSMIT |
+ CEC_CAP_RC |
+ CEC_CAP_PASSTHROUGH,
+ 1); /* Use 1 for now */
+ if (IS_ERR(ao_cec->adap)) {
+ ret = PTR_ERR(ao_cec->adap);
+ goto out_probe_notify;
+ }
+
+ ao_cec->adap->owner = THIS_MODULE;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ao_cec->base)) {
+ ret = PTR_ERR(ao_cec->base);
+ goto out_probe_adapter;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ meson_ao_cec_irq,
+ meson_ao_cec_irq_thread,
+ 0, NULL, ao_cec);
+ if (ret) {
+ dev_err(&pdev->dev, "irq request failed\n");
+ goto out_probe_adapter;
+ }
+
+ ao_cec->core = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(ao_cec->core)) {
+ dev_err(&pdev->dev, "core clock request failed\n");
+ ret = PTR_ERR(ao_cec->core);
+ goto out_probe_adapter;
+ }
+
+ ret = clk_prepare_enable(ao_cec->core);
+ if (ret) {
+ dev_err(&pdev->dev, "core clock enable failed\n");
+ goto out_probe_adapter;
+ }
+
+ ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE);
+ if (ret) {
+ dev_err(&pdev->dev, "core clock set rate failed\n");
+ goto out_probe_clk;
+ }
+
+ device_reset_optional(&pdev->dev);
+
+ ao_cec->pdev = pdev;
+ platform_set_drvdata(pdev, ao_cec);
+
+ ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
+ if (ret < 0) {
+ cec_notifier_put(ao_cec->notify);
+ goto out_probe_clk;
+ }
+
+ /* Setup Hardware */
+ writel_relaxed(CEC_GEN_CNTL_RESET,
+ ao_cec->base + CEC_GEN_CNTL_REG);
+
+ cec_register_cec_notifier(ao_cec->adap, ao_cec->notify);
+
+ return 0;
+
+out_probe_clk:
+ clk_disable_unprepare(ao_cec->core);
+
+out_probe_adapter:
+ cec_delete_adapter(ao_cec->adap);
+
+out_probe_notify:
+ cec_notifier_put(ao_cec->notify);
+
+ dev_err(&pdev->dev, "CEC controller registration failed\n");
+
+ return ret;
+}
+
+static int meson_ao_cec_remove(struct platform_device *pdev)
+{
+ struct meson_ao_cec_device *ao_cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ao_cec->core);
+
+ cec_unregister_adapter(ao_cec->adap);
+
+ cec_notifier_put(ao_cec->notify);
+
+ return 0;
+}
+
+static const struct of_device_id meson_ao_cec_of_match[] = {
+ { .compatible = "amlogic,meson-gx-ao-cec", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_ao_cec_of_match);
+
+static struct platform_driver meson_ao_cec_driver = {
+ .probe = meson_ao_cec_probe,
+ .remove = meson_ao_cec_remove,
+ .driver = {
+ .name = "meson-ao-cec",
+ .of_match_table = of_match_ptr(meson_ao_cec_of_match),
+ },
+};
+
+module_platform_driver(meson_ao_cec_driver);
+
+MODULE_DESCRIPTION("Meson AO CEC Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/mtk-jpeg/Makefile b/drivers/media/platform/mtk-jpeg/Makefile
new file mode 100644
index 000000000..b2e6069f3
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/Makefile
@@ -0,0 +1,2 @@
+mtk_jpeg-objs := mtk_jpeg_core.o mtk_jpeg_hw.o mtk_jpeg_parse.o
+obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk_jpeg.o
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
new file mode 100644
index 000000000..f0bca30a0
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -0,0 +1,1291 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_jpeg_hw.h"
+#include "mtk_jpeg_core.h"
+#include "mtk_jpeg_parse.h"
+
+static struct mtk_jpeg_fmt mtk_jpeg_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .colplanes = 1,
+ .flags = MTK_JPEG_FMT_FLAG_DEC_OUTPUT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .h_sample = {4, 2, 2},
+ .v_sample = {4, 2, 2},
+ .colplanes = 3,
+ .h_align = 5,
+ .v_align = 4,
+ .flags = MTK_JPEG_FMT_FLAG_DEC_CAPTURE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422M,
+ .h_sample = {4, 2, 2},
+ .v_sample = {4, 4, 4},
+ .colplanes = 3,
+ .h_align = 5,
+ .v_align = 3,
+ .flags = MTK_JPEG_FMT_FLAG_DEC_CAPTURE,
+ },
+};
+
+#define MTK_JPEG_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_formats)
+
+enum {
+ MTK_JPEG_BUF_FLAGS_INIT = 0,
+ MTK_JPEG_BUF_FLAGS_LAST_FRAME = 1,
+};
+
+struct mtk_jpeg_src_buf {
+ struct vb2_v4l2_buffer b;
+ struct list_head list;
+ int flags;
+ struct mtk_jpeg_dec_param dec_param;
+};
+
+static int debug;
+module_param(debug, int, 0644);
+
+static inline struct mtk_jpeg_ctx *mtk_jpeg_fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mtk_jpeg_ctx, fh);
+}
+
+static inline struct mtk_jpeg_src_buf *mtk_jpeg_vb2_to_srcbuf(
+ struct vb2_buffer *vb)
+{
+ return container_of(to_vb2_v4l2_buffer(vb), struct mtk_jpeg_src_buf, b);
+}
+
+static int mtk_jpeg_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct mtk_jpeg_dev *jpeg = video_drvdata(file);
+
+ strlcpy(cap->driver, MTK_JPEG_NAME " decoder", sizeof(cap->driver));
+ strlcpy(cap->card, MTK_JPEG_NAME " decoder", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(jpeg->dev));
+
+ return 0;
+}
+
+static int mtk_jpeg_enum_fmt(struct mtk_jpeg_fmt *mtk_jpeg_formats, int n,
+ struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num = 0;
+
+ for (i = 0; i < n; ++i) {
+ if (mtk_jpeg_formats[i].flags & type) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ if (i >= n)
+ return -EINVAL;
+
+ f->pixelformat = mtk_jpeg_formats[i].fourcc;
+
+ return 0;
+}
+
+static int mtk_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_jpeg_enum_fmt(mtk_jpeg_formats, MTK_JPEG_NUM_FORMATS, f,
+ MTK_JPEG_FMT_FLAG_DEC_CAPTURE);
+}
+
+static int mtk_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_jpeg_enum_fmt(mtk_jpeg_formats, MTK_JPEG_NUM_FORMATS, f,
+ MTK_JPEG_FMT_FLAG_DEC_OUTPUT);
+}
+
+static struct mtk_jpeg_q_data *mtk_jpeg_get_q_data(struct mtk_jpeg_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->out_q;
+ return &ctx->cap_q;
+}
+
+static struct mtk_jpeg_fmt *mtk_jpeg_find_format(struct mtk_jpeg_ctx *ctx,
+ u32 pixelformat,
+ unsigned int fmt_type)
+{
+ unsigned int k, fmt_flag;
+
+ fmt_flag = (fmt_type == MTK_JPEG_FMT_TYPE_OUTPUT) ?
+ MTK_JPEG_FMT_FLAG_DEC_OUTPUT :
+ MTK_JPEG_FMT_FLAG_DEC_CAPTURE;
+
+ for (k = 0; k < MTK_JPEG_NUM_FORMATS; k++) {
+ struct mtk_jpeg_fmt *fmt = &mtk_jpeg_formats[k];
+
+ if (fmt->fourcc == pixelformat && fmt->flags & fmt_flag)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static void mtk_jpeg_bound_align_image(u32 *w, unsigned int wmin,
+ unsigned int wmax, unsigned int walign,
+ u32 *h, unsigned int hmin,
+ unsigned int hmax, unsigned int halign)
+{
+ int width, height, w_step, h_step;
+
+ width = *w;
+ height = *h;
+ w_step = 1 << walign;
+ h_step = 1 << halign;
+
+ v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
+ if (*w < width && (*w + w_step) <= wmax)
+ *w += w_step;
+ if (*h < height && (*h + h_step) <= hmax)
+ *h += h_step;
+}
+
+static void mtk_jpeg_adjust_fmt_mplane(struct mtk_jpeg_ctx *ctx,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct mtk_jpeg_q_data *q_data;
+ int i;
+
+ q_data = mtk_jpeg_get_q_data(ctx, f->type);
+
+ pix_mp->width = q_data->w;
+ pix_mp->height = q_data->h;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->num_planes = q_data->fmt->colplanes;
+
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ pix_mp->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix_mp->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+}
+
+static int mtk_jpeg_try_fmt_mplane(struct v4l2_format *f,
+ struct mtk_jpeg_fmt *fmt,
+ struct mtk_jpeg_ctx *ctx, int q_type)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ int i;
+
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ if (ctx->state != MTK_JPEG_INIT) {
+ mtk_jpeg_adjust_fmt_mplane(ctx, f);
+ goto end;
+ }
+
+ pix_mp->num_planes = fmt->colplanes;
+ pix_mp->pixelformat = fmt->fourcc;
+
+ if (q_type == MTK_JPEG_FMT_TYPE_OUTPUT) {
+ struct v4l2_plane_pix_format *pfmt = &pix_mp->plane_fmt[0];
+
+ mtk_jpeg_bound_align_image(&pix_mp->width, MTK_JPEG_MIN_WIDTH,
+ MTK_JPEG_MAX_WIDTH, 0,
+ &pix_mp->height, MTK_JPEG_MIN_HEIGHT,
+ MTK_JPEG_MAX_HEIGHT, 0);
+
+ memset(pfmt->reserved, 0, sizeof(pfmt->reserved));
+ pfmt->bytesperline = 0;
+ /* Source size must be aligned to 128 */
+ pfmt->sizeimage = mtk_jpeg_align(pfmt->sizeimage, 128);
+ if (pfmt->sizeimage == 0)
+ pfmt->sizeimage = MTK_JPEG_DEFAULT_SIZEIMAGE;
+ goto end;
+ }
+
+ /* type is MTK_JPEG_FMT_TYPE_CAPTURE */
+ mtk_jpeg_bound_align_image(&pix_mp->width, MTK_JPEG_MIN_WIDTH,
+ MTK_JPEG_MAX_WIDTH, fmt->h_align,
+ &pix_mp->height, MTK_JPEG_MIN_HEIGHT,
+ MTK_JPEG_MAX_HEIGHT, fmt->v_align);
+
+ for (i = 0; i < fmt->colplanes; i++) {
+ struct v4l2_plane_pix_format *pfmt = &pix_mp->plane_fmt[i];
+ u32 stride = pix_mp->width * fmt->h_sample[i] / 4;
+ u32 h = pix_mp->height * fmt->v_sample[i] / 4;
+
+ memset(pfmt->reserved, 0, sizeof(pfmt->reserved));
+ pfmt->bytesperline = stride;
+ pfmt->sizeimage = stride * h;
+ }
+end:
+ v4l2_dbg(2, debug, &jpeg->v4l2_dev, "wxh:%ux%u\n",
+ pix_mp->width, pix_mp->height);
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ v4l2_dbg(2, debug, &jpeg->v4l2_dev,
+ "plane[%d] bpl=%u, size=%u\n",
+ i,
+ pix_mp->plane_fmt[i].bytesperline,
+ pix_mp->plane_fmt[i].sizeimage);
+ }
+ return 0;
+}
+
+static int mtk_jpeg_g_fmt_vid_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct mtk_jpeg_q_data *q_data = NULL;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = mtk_jpeg_get_q_data(ctx, f->type);
+
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ pix_mp->width = q_data->w;
+ pix_mp->height = q_data->h;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->num_planes = q_data->fmt->colplanes;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->quantization = ctx->quantization;
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "(%d) g_fmt:%c%c%c%c wxh:%ux%u\n",
+ f->type,
+ (pix_mp->pixelformat & 0xff),
+ (pix_mp->pixelformat >> 8 & 0xff),
+ (pix_mp->pixelformat >> 16 & 0xff),
+ (pix_mp->pixelformat >> 24 & 0xff),
+ pix_mp->width, pix_mp->height);
+
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ struct v4l2_plane_pix_format *pfmt = &pix_mp->plane_fmt[i];
+
+ pfmt->bytesperline = q_data->bytesperline[i];
+ pfmt->sizeimage = q_data->sizeimage[i];
+ memset(pfmt->reserved, 0, sizeof(pfmt->reserved));
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev,
+ "plane[%d] bpl=%u, size=%u\n",
+ i,
+ pfmt->bytesperline,
+ pfmt->sizeimage);
+ }
+ return 0;
+}
+
+static int mtk_jpeg_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_fmt *fmt;
+
+ fmt = mtk_jpeg_find_format(ctx, f->fmt.pix_mp.pixelformat,
+ MTK_JPEG_FMT_TYPE_CAPTURE);
+ if (!fmt)
+ fmt = ctx->cap_q.fmt;
+
+ v4l2_dbg(2, debug, &ctx->jpeg->v4l2_dev, "(%d) try_fmt:%c%c%c%c\n",
+ f->type,
+ (fmt->fourcc & 0xff),
+ (fmt->fourcc >> 8 & 0xff),
+ (fmt->fourcc >> 16 & 0xff),
+ (fmt->fourcc >> 24 & 0xff));
+
+ return mtk_jpeg_try_fmt_mplane(f, fmt, ctx, MTK_JPEG_FMT_TYPE_CAPTURE);
+}
+
+static int mtk_jpeg_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_fmt *fmt;
+
+ fmt = mtk_jpeg_find_format(ctx, f->fmt.pix_mp.pixelformat,
+ MTK_JPEG_FMT_TYPE_OUTPUT);
+ if (!fmt)
+ fmt = ctx->out_q.fmt;
+
+ v4l2_dbg(2, debug, &ctx->jpeg->v4l2_dev, "(%d) try_fmt:%c%c%c%c\n",
+ f->type,
+ (fmt->fourcc & 0xff),
+ (fmt->fourcc >> 8 & 0xff),
+ (fmt->fourcc >> 16 & 0xff),
+ (fmt->fourcc >> 24 & 0xff));
+
+ return mtk_jpeg_try_fmt_mplane(f, fmt, ctx, MTK_JPEG_FMT_TYPE_OUTPUT);
+}
+
+static int mtk_jpeg_s_fmt_mplane(struct mtk_jpeg_ctx *ctx,
+ struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct mtk_jpeg_q_data *q_data = NULL;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ unsigned int f_type;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = mtk_jpeg_get_q_data(ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&jpeg->v4l2_dev, "queue busy\n");
+ return -EBUSY;
+ }
+
+ f_type = V4L2_TYPE_IS_OUTPUT(f->type) ?
+ MTK_JPEG_FMT_TYPE_OUTPUT : MTK_JPEG_FMT_TYPE_CAPTURE;
+
+ q_data->fmt = mtk_jpeg_find_format(ctx, pix_mp->pixelformat, f_type);
+ q_data->w = pix_mp->width;
+ q_data->h = pix_mp->height;
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->xfer_func = pix_mp->xfer_func;
+ ctx->quantization = pix_mp->quantization;
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "(%d) s_fmt:%c%c%c%c wxh:%ux%u\n",
+ f->type,
+ (q_data->fmt->fourcc & 0xff),
+ (q_data->fmt->fourcc >> 8 & 0xff),
+ (q_data->fmt->fourcc >> 16 & 0xff),
+ (q_data->fmt->fourcc >> 24 & 0xff),
+ q_data->w, q_data->h);
+
+ for (i = 0; i < q_data->fmt->colplanes; i++) {
+ q_data->bytesperline[i] = pix_mp->plane_fmt[i].bytesperline;
+ q_data->sizeimage[i] = pix_mp->plane_fmt[i].sizeimage;
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev,
+ "plane[%d] bpl=%u, size=%u\n",
+ i, q_data->bytesperline[i], q_data->sizeimage[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_jpeg_s_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = mtk_jpeg_try_fmt_vid_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f);
+}
+
+static int mtk_jpeg_s_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = mtk_jpeg_try_fmt_vid_cap_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f);
+}
+
+static void mtk_jpeg_queue_src_chg_event(struct mtk_jpeg_ctx *ctx)
+{
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+}
+
+static int mtk_jpeg_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mtk_jpeg_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.width = ctx->out_q.w;
+ s->r.height = ctx->out_q.h;
+ s->r.left = 0;
+ s->r.top = 0;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r.width = ctx->cap_q.w;
+ s->r.height = ctx->cap_q.h;
+ s->r.left = 0;
+ s->r.top = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mtk_jpeg_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->out_q.w;
+ s->r.height = ctx->out_q.h;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mtk_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct vb2_buffer *vb;
+ struct mtk_jpeg_src_buf *jpeg_src_buf;
+
+ if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ goto end;
+
+ vq = v4l2_m2m_get_vq(fh->m2m_ctx, buf->type);
+ if (buf->index >= vq->num_buffers) {
+ dev_err(ctx->jpeg->dev, "buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = vq->bufs[buf->index];
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
+ jpeg_src_buf->flags = (buf->m.planes[0].bytesused == 0) ?
+ MTK_JPEG_BUF_FLAGS_LAST_FRAME : MTK_JPEG_BUF_FLAGS_INIT;
+end:
+ return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
+}
+
+static const struct v4l2_ioctl_ops mtk_jpeg_ioctl_ops = {
+ .vidioc_querycap = mtk_jpeg_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = mtk_jpeg_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out_mplane = mtk_jpeg_enum_fmt_vid_out,
+ .vidioc_try_fmt_vid_cap_mplane = mtk_jpeg_try_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = mtk_jpeg_try_fmt_vid_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = mtk_jpeg_g_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_out_mplane = mtk_jpeg_g_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mtk_jpeg_s_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_out_mplane = mtk_jpeg_s_fmt_vid_out_mplane,
+ .vidioc_qbuf = mtk_jpeg_qbuf,
+ .vidioc_subscribe_event = mtk_jpeg_subscribe_event,
+ .vidioc_g_selection = mtk_jpeg_g_selection,
+ .vidioc_s_selection = mtk_jpeg_s_selection,
+
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int mtk_jpeg_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_ctxs[])
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+ struct mtk_jpeg_q_data *q_data = NULL;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ int i;
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "(%d) buf_req count=%u\n",
+ q->type, *num_buffers);
+
+ q_data = mtk_jpeg_get_q_data(ctx, q->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (*num_planes) {
+ for (i = 0; i < *num_planes; i++)
+ if (sizes[i] < q_data->sizeimage[i])
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = q_data->fmt->colplanes;
+ for (i = 0; i < q_data->fmt->colplanes; i++) {
+ sizes[i] = q_data->sizeimage[i];
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "sizeimage[%d]=%u\n",
+ i, sizes[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_jpeg_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_jpeg_q_data *q_data = NULL;
+ int i;
+
+ q_data = mtk_jpeg_get_q_data(ctx, vb->vb2_queue->type);
+ if (!q_data)
+ return -EINVAL;
+
+ for (i = 0; i < q_data->fmt->colplanes; i++)
+ vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+
+ return 0;
+}
+
+static bool mtk_jpeg_check_resolution_change(struct mtk_jpeg_ctx *ctx,
+ struct mtk_jpeg_dec_param *param)
+{
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ struct mtk_jpeg_q_data *q_data;
+
+ q_data = &ctx->out_q;
+ if (q_data->w != param->pic_w || q_data->h != param->pic_h) {
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "Picture size change\n");
+ return true;
+ }
+
+ q_data = &ctx->cap_q;
+ if (q_data->fmt != mtk_jpeg_find_format(ctx, param->dst_fourcc,
+ MTK_JPEG_FMT_TYPE_CAPTURE)) {
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "format change\n");
+ return true;
+ }
+ return false;
+}
+
+static void mtk_jpeg_set_queue_data(struct mtk_jpeg_ctx *ctx,
+ struct mtk_jpeg_dec_param *param)
+{
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ struct mtk_jpeg_q_data *q_data;
+ int i;
+
+ q_data = &ctx->out_q;
+ q_data->w = param->pic_w;
+ q_data->h = param->pic_h;
+
+ q_data = &ctx->cap_q;
+ q_data->w = param->dec_w;
+ q_data->h = param->dec_h;
+ q_data->fmt = mtk_jpeg_find_format(ctx,
+ param->dst_fourcc,
+ MTK_JPEG_FMT_TYPE_CAPTURE);
+
+ for (i = 0; i < q_data->fmt->colplanes; i++) {
+ q_data->bytesperline[i] = param->mem_stride[i];
+ q_data->sizeimage[i] = param->comp_size[i];
+ }
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev,
+ "set_parse cap:%c%c%c%c pic(%u, %u), buf(%u, %u)\n",
+ (param->dst_fourcc & 0xff),
+ (param->dst_fourcc >> 8 & 0xff),
+ (param->dst_fourcc >> 16 & 0xff),
+ (param->dst_fourcc >> 24 & 0xff),
+ param->pic_w, param->pic_h,
+ param->dec_w, param->dec_h);
+}
+
+static void mtk_jpeg_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_jpeg_dec_param *param;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ struct mtk_jpeg_src_buf *jpeg_src_buf;
+ bool header_valid;
+
+ v4l2_dbg(2, debug, &jpeg->v4l2_dev, "(%d) buf_q id=%d, vb=%p\n",
+ vb->vb2_queue->type, vb->index, vb);
+
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ goto end;
+
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
+ param = &jpeg_src_buf->dec_param;
+ memset(param, 0, sizeof(*param));
+
+ if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "Got eos\n");
+ goto end;
+ }
+ header_valid = mtk_jpeg_parse(param, (u8 *)vb2_plane_vaddr(vb, 0),
+ vb2_get_plane_payload(vb, 0));
+ if (!header_valid) {
+ v4l2_err(&jpeg->v4l2_dev, "Header invalid.\n");
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ if (ctx->state == MTK_JPEG_INIT) {
+ struct vb2_queue *dst_vq = v4l2_m2m_get_vq(
+ ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ mtk_jpeg_queue_src_chg_event(ctx);
+ mtk_jpeg_set_queue_data(ctx, param);
+ ctx->state = vb2_is_streaming(dst_vq) ?
+ MTK_JPEG_SOURCE_CHANGE : MTK_JPEG_RUNNING;
+ }
+end:
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ return v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+}
+
+static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vb;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(ctx->jpeg->dev);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vb;
+
+ /*
+ * STREAMOFF is an acknowledgment for source change event.
+ * Before STREAMOFF, we still have to return the old resolution and
+ * subsampling. Update capture queue when the stream is off.
+ */
+ if (ctx->state == MTK_JPEG_SOURCE_CHANGE &&
+ !V4L2_TYPE_IS_OUTPUT(q->type)) {
+ struct mtk_jpeg_src_buf *src_buf;
+
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ src_buf = mtk_jpeg_vb2_to_srcbuf(&vb->vb2_buf);
+ mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param);
+ ctx->state = MTK_JPEG_RUNNING;
+ } else if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ ctx->state = MTK_JPEG_INIT;
+ }
+
+ while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+
+ pm_runtime_put_sync(ctx->jpeg->dev);
+}
+
+static const struct vb2_ops mtk_jpeg_qops = {
+ .queue_setup = mtk_jpeg_queue_setup,
+ .buf_prepare = mtk_jpeg_buf_prepare,
+ .buf_queue = mtk_jpeg_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = mtk_jpeg_start_streaming,
+ .stop_streaming = mtk_jpeg_stop_streaming,
+};
+
+static void mtk_jpeg_set_dec_src(struct mtk_jpeg_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct mtk_jpeg_bs *bs)
+{
+ bs->str_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ bs->end_addr = bs->str_addr +
+ mtk_jpeg_align(vb2_get_plane_payload(src_buf, 0), 16);
+ bs->size = mtk_jpeg_align(vb2_plane_size(src_buf, 0), 128);
+}
+
+static int mtk_jpeg_set_dec_dst(struct mtk_jpeg_ctx *ctx,
+ struct mtk_jpeg_dec_param *param,
+ struct vb2_buffer *dst_buf,
+ struct mtk_jpeg_fb *fb)
+{
+ int i;
+
+ if (param->comp_num != dst_buf->num_planes) {
+ dev_err(ctx->jpeg->dev, "plane number mismatch (%u != %u)\n",
+ param->comp_num, dst_buf->num_planes);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dst_buf->num_planes; i++) {
+ if (vb2_plane_size(dst_buf, i) < param->comp_size[i]) {
+ dev_err(ctx->jpeg->dev,
+ "buffer size is underflow (%lu < %u)\n",
+ vb2_plane_size(dst_buf, 0),
+ param->comp_size[i]);
+ return -EINVAL;
+ }
+ fb->plane_addr[i] = vb2_dma_contig_plane_dma_addr(dst_buf, i);
+ }
+
+ return 0;
+}
+
+static void mtk_jpeg_device_run(void *priv)
+{
+ struct mtk_jpeg_ctx *ctx = priv;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ unsigned long flags;
+ struct mtk_jpeg_src_buf *jpeg_src_buf;
+ struct mtk_jpeg_bs bs;
+ struct mtk_jpeg_fb fb;
+ int i;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
+
+ if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
+ for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0);
+ buf_state = VB2_BUF_STATE_DONE;
+ goto dec_end;
+ }
+
+ if (mtk_jpeg_check_resolution_change(ctx, &jpeg_src_buf->dec_param)) {
+ mtk_jpeg_queue_src_chg_event(ctx);
+ ctx->state = MTK_JPEG_SOURCE_CHANGE;
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+ }
+
+ mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
+ if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
+ goto dec_end;
+
+ spin_lock_irqsave(&jpeg->hw_lock, flags);
+ mtk_jpeg_dec_reset(jpeg->dec_reg_base);
+ mtk_jpeg_dec_set_config(jpeg->dec_reg_base,
+ &jpeg_src_buf->dec_param, &bs, &fb);
+
+ mtk_jpeg_dec_start(jpeg->dec_reg_base);
+ spin_unlock_irqrestore(&jpeg->hw_lock, flags);
+ return;
+
+dec_end:
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static int mtk_jpeg_job_ready(void *priv)
+{
+ struct mtk_jpeg_ctx *ctx = priv;
+
+ return (ctx->state == MTK_JPEG_RUNNING) ? 1 : 0;
+}
+
+static const struct v4l2_m2m_ops mtk_jpeg_m2m_ops = {
+ .device_run = mtk_jpeg_device_run,
+ .job_ready = mtk_jpeg_job_ready,
+};
+
+static int mtk_jpeg_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_jpeg_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct mtk_jpeg_src_buf);
+ src_vq->ops = &mtk_jpeg_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->jpeg->lock;
+ src_vq->dev = ctx->jpeg->dev;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &mtk_jpeg_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->jpeg->lock;
+ dst_vq->dev = ctx->jpeg->dev;
+ ret = vb2_queue_init(dst_vq);
+
+ return ret;
+}
+
+static void mtk_jpeg_clk_on(struct mtk_jpeg_dev *jpeg)
+{
+ int ret;
+
+ ret = mtk_smi_larb_get(jpeg->larb);
+ if (ret)
+ dev_err(jpeg->dev, "mtk_smi_larb_get larbvdec fail %d\n", ret);
+ clk_prepare_enable(jpeg->clk_jdec_smi);
+ clk_prepare_enable(jpeg->clk_jdec);
+}
+
+static void mtk_jpeg_clk_off(struct mtk_jpeg_dev *jpeg)
+{
+ clk_disable_unprepare(jpeg->clk_jdec);
+ clk_disable_unprepare(jpeg->clk_jdec_smi);
+ mtk_smi_larb_put(jpeg->larb);
+}
+
+static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
+{
+ struct mtk_jpeg_dev *jpeg = priv;
+ struct mtk_jpeg_ctx *ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct mtk_jpeg_src_buf *jpeg_src_buf;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ u32 dec_irq_ret;
+ u32 dec_ret;
+ int i;
+
+ dec_ret = mtk_jpeg_dec_get_int_status(jpeg->dec_reg_base);
+ dec_irq_ret = mtk_jpeg_dec_enum_result(dec_ret);
+ ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&jpeg->v4l2_dev, "Context is NULL\n");
+ return IRQ_HANDLED;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
+
+ if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
+ mtk_jpeg_dec_reset(jpeg->dec_reg_base);
+
+ if (dec_irq_ret != MTK_JPEG_DEC_RESULT_EOF_DONE) {
+ dev_err(jpeg->dev, "decode failed\n");
+ goto dec_end;
+ }
+
+ for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, i,
+ jpeg_src_buf->dec_param.comp_size[i]);
+
+ buf_state = VB2_BUF_STATE_DONE;
+
+dec_end:
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ return IRQ_HANDLED;
+}
+
+static void mtk_jpeg_set_default_params(struct mtk_jpeg_ctx *ctx)
+{
+ struct mtk_jpeg_q_data *q = &ctx->out_q;
+ int i;
+
+ ctx->colorspace = V4L2_COLORSPACE_JPEG,
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ q->fmt = mtk_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+ MTK_JPEG_FMT_TYPE_OUTPUT);
+ q->w = MTK_JPEG_MIN_WIDTH;
+ q->h = MTK_JPEG_MIN_HEIGHT;
+ q->bytesperline[0] = 0;
+ q->sizeimage[0] = MTK_JPEG_DEFAULT_SIZEIMAGE;
+
+ q = &ctx->cap_q;
+ q->fmt = mtk_jpeg_find_format(ctx, V4L2_PIX_FMT_YUV420M,
+ MTK_JPEG_FMT_TYPE_CAPTURE);
+ q->w = MTK_JPEG_MIN_WIDTH;
+ q->h = MTK_JPEG_MIN_HEIGHT;
+
+ for (i = 0; i < q->fmt->colplanes; i++) {
+ u32 stride = q->w * q->fmt->h_sample[i] / 4;
+ u32 h = q->h * q->fmt->v_sample[i] / 4;
+
+ q->bytesperline[i] = stride;
+ q->sizeimage[i] = stride * h;
+ }
+}
+
+static int mtk_jpeg_open(struct file *file)
+{
+ struct mtk_jpeg_dev *jpeg = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct mtk_jpeg_ctx *ctx;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&jpeg->lock)) {
+ ret = -ERESTARTSYS;
+ goto free;
+ }
+
+ v4l2_fh_init(&ctx->fh, vfd);
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->jpeg = jpeg;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx,
+ mtk_jpeg_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto error;
+ }
+
+ mtk_jpeg_set_default_params(ctx);
+ mutex_unlock(&jpeg->lock);
+ return 0;
+
+error:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mutex_unlock(&jpeg->lock);
+free:
+ kfree(ctx);
+ return ret;
+}
+
+static int mtk_jpeg_release(struct file *file)
+{
+ struct mtk_jpeg_dev *jpeg = video_drvdata(file);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(file->private_data);
+
+ mutex_lock(&jpeg->lock);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mutex_unlock(&jpeg->lock);
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_jpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = mtk_jpeg_open,
+ .release = mtk_jpeg_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int mtk_jpeg_clk_init(struct mtk_jpeg_dev *jpeg)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+
+ node = of_parse_phandle(jpeg->dev->of_node, "mediatek,larb", 0);
+ if (!node)
+ return -EINVAL;
+ pdev = of_find_device_by_node(node);
+ if (WARN_ON(!pdev)) {
+ of_node_put(node);
+ return -EINVAL;
+ }
+ of_node_put(node);
+
+ jpeg->larb = &pdev->dev;
+
+ jpeg->clk_jdec = devm_clk_get(jpeg->dev, "jpgdec");
+ if (IS_ERR(jpeg->clk_jdec))
+ return PTR_ERR(jpeg->clk_jdec);
+
+ jpeg->clk_jdec_smi = devm_clk_get(jpeg->dev, "jpgdec-smi");
+ return PTR_ERR_OR_ZERO(jpeg->clk_jdec_smi);
+}
+
+static int mtk_jpeg_probe(struct platform_device *pdev)
+{
+ struct mtk_jpeg_dev *jpeg;
+ struct resource *res;
+ int dec_irq;
+ int ret;
+
+ jpeg = devm_kzalloc(&pdev->dev, sizeof(*jpeg), GFP_KERNEL);
+ if (!jpeg)
+ return -ENOMEM;
+
+ mutex_init(&jpeg->lock);
+ spin_lock_init(&jpeg->hw_lock);
+ jpeg->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ jpeg->dec_reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(jpeg->dec_reg_base)) {
+ ret = PTR_ERR(jpeg->dec_reg_base);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ dec_irq = platform_get_irq(pdev, 0);
+ if (!res || dec_irq < 0) {
+ dev_err(&pdev->dev, "Failed to get dec_irq %d.\n", dec_irq);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, dec_irq, mtk_jpeg_dec_irq, 0,
+ pdev->name, jpeg);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request dec_irq %d (%d)\n",
+ dec_irq, ret);
+ ret = -EINVAL;
+ goto err_req_irq;
+ }
+
+ ret = mtk_jpeg_clk_init(jpeg);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init clk, err %d\n", ret);
+ goto err_clk_init;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ ret = -EINVAL;
+ goto err_dev_register;
+ }
+
+ jpeg->m2m_dev = v4l2_m2m_init(&mtk_jpeg_m2m_ops);
+ if (IS_ERR(jpeg->m2m_dev)) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(jpeg->m2m_dev);
+ goto err_m2m_init;
+ }
+
+ jpeg->dec_vdev = video_device_alloc();
+ if (!jpeg->dec_vdev) {
+ ret = -ENOMEM;
+ goto err_dec_vdev_alloc;
+ }
+ snprintf(jpeg->dec_vdev->name, sizeof(jpeg->dec_vdev->name),
+ "%s-dec", MTK_JPEG_NAME);
+ jpeg->dec_vdev->fops = &mtk_jpeg_fops;
+ jpeg->dec_vdev->ioctl_ops = &mtk_jpeg_ioctl_ops;
+ jpeg->dec_vdev->minor = -1;
+ jpeg->dec_vdev->release = video_device_release;
+ jpeg->dec_vdev->lock = &jpeg->lock;
+ jpeg->dec_vdev->v4l2_dev = &jpeg->v4l2_dev;
+ jpeg->dec_vdev->vfl_dir = VFL_DIR_M2M;
+ jpeg->dec_vdev->device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+
+ ret = video_register_device(jpeg->dec_vdev, VFL_TYPE_GRABBER, 3);
+ if (ret) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
+ goto err_dec_vdev_register;
+ }
+
+ video_set_drvdata(jpeg->dec_vdev, jpeg);
+ v4l2_info(&jpeg->v4l2_dev,
+ "decoder device registered as /dev/video%d (%d,%d)\n",
+ jpeg->dec_vdev->num, VIDEO_MAJOR, jpeg->dec_vdev->minor);
+
+ platform_set_drvdata(pdev, jpeg);
+
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+err_dec_vdev_register:
+ video_device_release(jpeg->dec_vdev);
+
+err_dec_vdev_alloc:
+ v4l2_m2m_release(jpeg->m2m_dev);
+
+err_m2m_init:
+ v4l2_device_unregister(&jpeg->v4l2_dev);
+
+err_dev_register:
+
+err_clk_init:
+
+err_req_irq:
+
+ return ret;
+}
+
+static int mtk_jpeg_remove(struct platform_device *pdev)
+{
+ struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ video_unregister_device(jpeg->dec_vdev);
+ video_device_release(jpeg->dec_vdev);
+ v4l2_m2m_release(jpeg->m2m_dev);
+ v4l2_device_unregister(&jpeg->v4l2_dev);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_jpeg_pm_suspend(struct device *dev)
+{
+ struct mtk_jpeg_dev *jpeg = dev_get_drvdata(dev);
+
+ mtk_jpeg_dec_reset(jpeg->dec_reg_base);
+ mtk_jpeg_clk_off(jpeg);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_jpeg_pm_resume(struct device *dev)
+{
+ struct mtk_jpeg_dev *jpeg = dev_get_drvdata(dev);
+
+ mtk_jpeg_clk_on(jpeg);
+ mtk_jpeg_dec_reset(jpeg->dec_reg_base);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_jpeg_suspend(struct device *dev)
+{
+ int ret;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ ret = mtk_jpeg_pm_suspend(dev);
+ return ret;
+}
+
+static __maybe_unused int mtk_jpeg_resume(struct device *dev)
+{
+ int ret;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ ret = mtk_jpeg_pm_resume(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mtk_jpeg_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_jpeg_suspend, mtk_jpeg_resume)
+ SET_RUNTIME_PM_OPS(mtk_jpeg_pm_suspend, mtk_jpeg_pm_resume, NULL)
+};
+
+static const struct of_device_id mtk_jpeg_match[] = {
+ {
+ .compatible = "mediatek,mt8173-jpgdec",
+ .data = NULL,
+ },
+ {
+ .compatible = "mediatek,mt2701-jpgdec",
+ .data = NULL,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_jpeg_match);
+
+static struct platform_driver mtk_jpeg_driver = {
+ .probe = mtk_jpeg_probe,
+ .remove = mtk_jpeg_remove,
+ .driver = {
+ .name = MTK_JPEG_NAME,
+ .of_match_table = mtk_jpeg_match,
+ .pm = &mtk_jpeg_pm_ops,
+ },
+};
+
+module_platform_driver(mtk_jpeg_driver);
+
+MODULE_DESCRIPTION("MediaTek JPEG codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h
new file mode 100644
index 000000000..1a6cdfd4e
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_JPEG_CORE_H
+#define _MTK_JPEG_CORE_H
+
+#include <linux/interrupt.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+
+#define MTK_JPEG_NAME "mtk-jpeg"
+
+#define MTK_JPEG_FMT_FLAG_DEC_OUTPUT BIT(0)
+#define MTK_JPEG_FMT_FLAG_DEC_CAPTURE BIT(1)
+
+#define MTK_JPEG_FMT_TYPE_OUTPUT 1
+#define MTK_JPEG_FMT_TYPE_CAPTURE 2
+
+#define MTK_JPEG_MIN_WIDTH 32
+#define MTK_JPEG_MIN_HEIGHT 32
+#define MTK_JPEG_MAX_WIDTH 8192
+#define MTK_JPEG_MAX_HEIGHT 8192
+
+#define MTK_JPEG_DEFAULT_SIZEIMAGE (1 * 1024 * 1024)
+
+enum mtk_jpeg_ctx_state {
+ MTK_JPEG_INIT = 0,
+ MTK_JPEG_RUNNING,
+ MTK_JPEG_SOURCE_CHANGE,
+};
+
+/**
+ * struct mt_jpeg - JPEG IP abstraction
+ * @lock: the mutex protecting this structure
+ * @hw_lock: spinlock protecting the hw device resource
+ * @workqueue: decode work queue
+ * @dev: JPEG device
+ * @v4l2_dev: v4l2 device for mem2mem mode
+ * @m2m_dev: v4l2 mem2mem device data
+ * @alloc_ctx: videobuf2 memory allocator's context
+ * @dec_vdev: video device node for decoder mem2mem mode
+ * @dec_reg_base: JPEG registers mapping
+ * @clk_jdec: JPEG hw working clock
+ * @clk_jdec_smi: JPEG SMI bus clock
+ * @larb: SMI device
+ */
+struct mtk_jpeg_dev {
+ struct mutex lock;
+ spinlock_t hw_lock;
+ struct workqueue_struct *workqueue;
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ void *alloc_ctx;
+ struct video_device *dec_vdev;
+ void __iomem *dec_reg_base;
+ struct clk *clk_jdec;
+ struct clk *clk_jdec_smi;
+ struct device *larb;
+};
+
+/**
+ * struct jpeg_fmt - driver's internal color format data
+ * @fourcc: the fourcc code, 0 if not applicable
+ * @h_sample: horizontal sample count of plane in 4 * 4 pixel image
+ * @v_sample: vertical sample count of plane in 4 * 4 pixel image
+ * @colplanes: number of color planes (1 for packed formats)
+ * @h_align: horizontal alignment order (align to 2^h_align)
+ * @v_align: vertical alignment order (align to 2^v_align)
+ * @flags: flags describing format applicability
+ */
+struct mtk_jpeg_fmt {
+ u32 fourcc;
+ int h_sample[VIDEO_MAX_PLANES];
+ int v_sample[VIDEO_MAX_PLANES];
+ int colplanes;
+ int h_align;
+ int v_align;
+ u32 flags;
+};
+
+/**
+ * mtk_jpeg_q_data - parameters of one queue
+ * @fmt: driver-specific format of this queue
+ * @w: image width
+ * @h: image height
+ * @bytesperline: distance in bytes between the leftmost pixels in two adjacent
+ * lines
+ * @sizeimage: image buffer size in bytes
+ */
+struct mtk_jpeg_q_data {
+ struct mtk_jpeg_fmt *fmt;
+ u32 w;
+ u32 h;
+ u32 bytesperline[VIDEO_MAX_PLANES];
+ u32 sizeimage[VIDEO_MAX_PLANES];
+};
+
+/**
+ * mtk_jpeg_ctx - the device context data
+ * @jpeg: JPEG IP device for this context
+ * @out_q: source (output) queue information
+ * @cap_q: destination (capture) queue queue information
+ * @fh: V4L2 file handle
+ * @dec_param parameters for HW decoding
+ * @state: state of the context
+ * @header_valid: set if header has been parsed and valid
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ */
+struct mtk_jpeg_ctx {
+ struct mtk_jpeg_dev *jpeg;
+ struct mtk_jpeg_q_data out_q;
+ struct mtk_jpeg_q_data cap_q;
+ struct v4l2_fh fh;
+ enum mtk_jpeg_ctx_state state;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_xfer_func xfer_func;
+};
+
+#endif /* _MTK_JPEG_CORE_H */
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c
new file mode 100644
index 000000000..77b4cc6a8
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <media/videobuf2-core.h>
+
+#include "mtk_jpeg_hw.h"
+
+#define MTK_JPEG_DUNUM_MASK(val) (((val) - 1) & 0x3)
+
+enum mtk_jpeg_color {
+ MTK_JPEG_COLOR_420 = 0x00221111,
+ MTK_JPEG_COLOR_422 = 0x00211111,
+ MTK_JPEG_COLOR_444 = 0x00111111,
+ MTK_JPEG_COLOR_422V = 0x00121111,
+ MTK_JPEG_COLOR_422X2 = 0x00412121,
+ MTK_JPEG_COLOR_422VX2 = 0x00222121,
+ MTK_JPEG_COLOR_400 = 0x00110000
+};
+
+static inline int mtk_jpeg_verify_align(u32 val, int align, u32 reg)
+{
+ if (val & (align - 1)) {
+ pr_err("mtk-jpeg: write reg %x without %d align\n", reg, align);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mtk_jpeg_decide_format(struct mtk_jpeg_dec_param *param)
+{
+ param->src_color = (param->sampling_w[0] << 20) |
+ (param->sampling_h[0] << 16) |
+ (param->sampling_w[1] << 12) |
+ (param->sampling_h[1] << 8) |
+ (param->sampling_w[2] << 4) |
+ (param->sampling_h[2]);
+
+ param->uv_brz_w = 0;
+ switch (param->src_color) {
+ case MTK_JPEG_COLOR_444:
+ param->uv_brz_w = 1;
+ param->dst_fourcc = V4L2_PIX_FMT_YUV422M;
+ break;
+ case MTK_JPEG_COLOR_422X2:
+ case MTK_JPEG_COLOR_422:
+ param->dst_fourcc = V4L2_PIX_FMT_YUV422M;
+ break;
+ case MTK_JPEG_COLOR_422V:
+ case MTK_JPEG_COLOR_422VX2:
+ param->uv_brz_w = 1;
+ param->dst_fourcc = V4L2_PIX_FMT_YUV420M;
+ break;
+ case MTK_JPEG_COLOR_420:
+ param->dst_fourcc = V4L2_PIX_FMT_YUV420M;
+ break;
+ case MTK_JPEG_COLOR_400:
+ param->dst_fourcc = V4L2_PIX_FMT_GREY;
+ break;
+ default:
+ param->dst_fourcc = 0;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mtk_jpeg_calc_mcu(struct mtk_jpeg_dec_param *param)
+{
+ u32 factor_w, factor_h;
+ u32 i, comp, blk;
+
+ factor_w = 2 + param->sampling_w[0];
+ factor_h = 2 + param->sampling_h[0];
+ param->mcu_w = (param->pic_w + (1 << factor_w) - 1) >> factor_w;
+ param->mcu_h = (param->pic_h + (1 << factor_h) - 1) >> factor_h;
+ param->total_mcu = param->mcu_w * param->mcu_h;
+ param->unit_num = ((param->pic_w + 7) >> 3) * ((param->pic_h + 7) >> 3);
+ param->blk_num = 0;
+ for (i = 0; i < MTK_JPEG_COMP_MAX; i++) {
+ param->blk_comp[i] = 0;
+ if (i >= param->comp_num)
+ continue;
+ param->blk_comp[i] = param->sampling_w[i] *
+ param->sampling_h[i];
+ param->blk_num += param->blk_comp[i];
+ }
+
+ param->membership = 0;
+ for (i = 0, blk = 0, comp = 0; i < MTK_JPEG_BLOCK_MAX; i++) {
+ if (i < param->blk_num && comp < param->comp_num) {
+ u32 tmp;
+
+ tmp = (0x04 + (comp & 0x3));
+ param->membership |= tmp << (i * 3);
+ if (++blk == param->blk_comp[comp]) {
+ comp++;
+ blk = 0;
+ }
+ } else {
+ param->membership |= 7 << (i * 3);
+ }
+ }
+}
+
+static void mtk_jpeg_calc_dma_group(struct mtk_jpeg_dec_param *param)
+{
+ u32 factor_mcu = 3;
+
+ if (param->src_color == MTK_JPEG_COLOR_444 &&
+ param->dst_fourcc == V4L2_PIX_FMT_YUV422M)
+ factor_mcu = 4;
+ else if (param->src_color == MTK_JPEG_COLOR_422V &&
+ param->dst_fourcc == V4L2_PIX_FMT_YUV420M)
+ factor_mcu = 4;
+ else if (param->src_color == MTK_JPEG_COLOR_422X2 &&
+ param->dst_fourcc == V4L2_PIX_FMT_YUV422M)
+ factor_mcu = 2;
+ else if (param->src_color == MTK_JPEG_COLOR_400 ||
+ (param->src_color & 0x0FFFF) == 0)
+ factor_mcu = 4;
+
+ param->dma_mcu = 1 << factor_mcu;
+ param->dma_group = param->mcu_w / param->dma_mcu;
+ param->dma_last_mcu = param->mcu_w % param->dma_mcu;
+ if (param->dma_last_mcu)
+ param->dma_group++;
+ else
+ param->dma_last_mcu = param->dma_mcu;
+}
+
+static int mtk_jpeg_calc_dst_size(struct mtk_jpeg_dec_param *param)
+{
+ u32 i, padding_w;
+ u32 ds_row_h[3];
+ u32 brz_w[3];
+
+ brz_w[0] = 0;
+ brz_w[1] = param->uv_brz_w;
+ brz_w[2] = brz_w[1];
+
+ for (i = 0; i < param->comp_num; i++) {
+ if (brz_w[i] > 3)
+ return -1;
+
+ padding_w = param->mcu_w * MTK_JPEG_DCTSIZE *
+ param->sampling_w[i];
+ /* output format is 420/422 */
+ param->comp_w[i] = padding_w >> brz_w[i];
+ param->comp_w[i] = mtk_jpeg_align(param->comp_w[i],
+ MTK_JPEG_DCTSIZE);
+ param->img_stride[i] = i ? mtk_jpeg_align(param->comp_w[i], 16)
+ : mtk_jpeg_align(param->comp_w[i], 32);
+ ds_row_h[i] = (MTK_JPEG_DCTSIZE * param->sampling_h[i]);
+ }
+ param->dec_w = param->img_stride[0];
+ param->dec_h = ds_row_h[0] * param->mcu_h;
+
+ for (i = 0; i < MTK_JPEG_COMP_MAX; i++) {
+ /* They must be equal in frame mode. */
+ param->mem_stride[i] = param->img_stride[i];
+ param->comp_size[i] = param->mem_stride[i] * ds_row_h[i] *
+ param->mcu_h;
+ }
+
+ param->y_size = param->comp_size[0];
+ param->uv_size = param->comp_size[1];
+ param->dec_size = param->y_size + (param->uv_size << 1);
+
+ return 0;
+}
+
+int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param)
+{
+ if (mtk_jpeg_decide_format(param))
+ return -1;
+
+ mtk_jpeg_calc_mcu(param);
+ mtk_jpeg_calc_dma_group(param);
+ if (mtk_jpeg_calc_dst_size(param))
+ return -2;
+
+ return 0;
+}
+
+u32 mtk_jpeg_dec_get_int_status(void __iomem *base)
+{
+ u32 ret;
+
+ ret = readl(base + JPGDEC_REG_INTERRUPT_STATUS) & BIT_INQST_MASK_ALLIRQ;
+ if (ret)
+ writel(ret, base + JPGDEC_REG_INTERRUPT_STATUS);
+
+ return ret;
+}
+
+u32 mtk_jpeg_dec_enum_result(u32 irq_result)
+{
+ if (irq_result & BIT_INQST_MASK_EOF)
+ return MTK_JPEG_DEC_RESULT_EOF_DONE;
+ if (irq_result & BIT_INQST_MASK_PAUSE)
+ return MTK_JPEG_DEC_RESULT_PAUSE;
+ if (irq_result & BIT_INQST_MASK_UNDERFLOW)
+ return MTK_JPEG_DEC_RESULT_UNDERFLOW;
+ if (irq_result & BIT_INQST_MASK_OVERFLOW)
+ return MTK_JPEG_DEC_RESULT_OVERFLOW;
+ if (irq_result & BIT_INQST_MASK_ERROR_BS)
+ return MTK_JPEG_DEC_RESULT_ERROR_BS;
+
+ return MTK_JPEG_DEC_RESULT_ERROR_UNKNOWN;
+}
+
+void mtk_jpeg_dec_start(void __iomem *base)
+{
+ writel(0, base + JPGDEC_REG_TRIG);
+}
+
+static void mtk_jpeg_dec_soft_reset(void __iomem *base)
+{
+ writel(0x0000FFFF, base + JPGDEC_REG_INTERRUPT_STATUS);
+ writel(0x00, base + JPGDEC_REG_RESET);
+ writel(0x01, base + JPGDEC_REG_RESET);
+}
+
+static void mtk_jpeg_dec_hard_reset(void __iomem *base)
+{
+ writel(0x00, base + JPGDEC_REG_RESET);
+ writel(0x10, base + JPGDEC_REG_RESET);
+}
+
+void mtk_jpeg_dec_reset(void __iomem *base)
+{
+ mtk_jpeg_dec_soft_reset(base);
+ mtk_jpeg_dec_hard_reset(base);
+}
+
+static void mtk_jpeg_dec_set_brz_factor(void __iomem *base, u8 yscale_w,
+ u8 yscale_h, u8 uvscale_w, u8 uvscale_h)
+{
+ u32 val;
+
+ val = (uvscale_h << 12) | (uvscale_w << 8) |
+ (yscale_h << 4) | yscale_w;
+ writel(val, base + JPGDEC_REG_BRZ_FACTOR);
+}
+
+static void mtk_jpeg_dec_set_dst_bank0(void __iomem *base, u32 addr_y,
+ u32 addr_u, u32 addr_v)
+{
+ mtk_jpeg_verify_align(addr_y, 16, JPGDEC_REG_DEST_ADDR0_Y);
+ writel(addr_y, base + JPGDEC_REG_DEST_ADDR0_Y);
+ mtk_jpeg_verify_align(addr_u, 16, JPGDEC_REG_DEST_ADDR0_U);
+ writel(addr_u, base + JPGDEC_REG_DEST_ADDR0_U);
+ mtk_jpeg_verify_align(addr_v, 16, JPGDEC_REG_DEST_ADDR0_V);
+ writel(addr_v, base + JPGDEC_REG_DEST_ADDR0_V);
+}
+
+static void mtk_jpeg_dec_set_dst_bank1(void __iomem *base, u32 addr_y,
+ u32 addr_u, u32 addr_v)
+{
+ writel(addr_y, base + JPGDEC_REG_DEST_ADDR1_Y);
+ writel(addr_u, base + JPGDEC_REG_DEST_ADDR1_U);
+ writel(addr_v, base + JPGDEC_REG_DEST_ADDR1_V);
+}
+
+static void mtk_jpeg_dec_set_mem_stride(void __iomem *base, u32 stride_y,
+ u32 stride_uv)
+{
+ writel((stride_y & 0xFFFF), base + JPGDEC_REG_STRIDE_Y);
+ writel((stride_uv & 0xFFFF), base + JPGDEC_REG_STRIDE_UV);
+}
+
+static void mtk_jpeg_dec_set_img_stride(void __iomem *base, u32 stride_y,
+ u32 stride_uv)
+{
+ writel((stride_y & 0xFFFF), base + JPGDEC_REG_IMG_STRIDE_Y);
+ writel((stride_uv & 0xFFFF), base + JPGDEC_REG_IMG_STRIDE_UV);
+}
+
+static void mtk_jpeg_dec_set_pause_mcu_idx(void __iomem *base, u32 idx)
+{
+ writel(idx & 0x0003FFFFFF, base + JPGDEC_REG_PAUSE_MCU_NUM);
+}
+
+static void mtk_jpeg_dec_set_dec_mode(void __iomem *base, u32 mode)
+{
+ writel(mode & 0x03, base + JPGDEC_REG_OPERATION_MODE);
+}
+
+static void mtk_jpeg_dec_set_bs_write_ptr(void __iomem *base, u32 ptr)
+{
+ mtk_jpeg_verify_align(ptr, 16, JPGDEC_REG_FILE_BRP);
+ writel(ptr, base + JPGDEC_REG_FILE_BRP);
+}
+
+static void mtk_jpeg_dec_set_bs_info(void __iomem *base, u32 addr, u32 size)
+{
+ mtk_jpeg_verify_align(addr, 16, JPGDEC_REG_FILE_ADDR);
+ mtk_jpeg_verify_align(size, 128, JPGDEC_REG_FILE_TOTAL_SIZE);
+ writel(addr, base + JPGDEC_REG_FILE_ADDR);
+ writel(size, base + JPGDEC_REG_FILE_TOTAL_SIZE);
+}
+
+static void mtk_jpeg_dec_set_comp_id(void __iomem *base, u32 id_y, u32 id_u,
+ u32 id_v)
+{
+ u32 val;
+
+ val = ((id_y & 0x00FF) << 24) | ((id_u & 0x00FF) << 16) |
+ ((id_v & 0x00FF) << 8);
+ writel(val, base + JPGDEC_REG_COMP_ID);
+}
+
+static void mtk_jpeg_dec_set_total_mcu(void __iomem *base, u32 num)
+{
+ writel(num - 1, base + JPGDEC_REG_TOTAL_MCU_NUM);
+}
+
+static void mtk_jpeg_dec_set_comp0_du(void __iomem *base, u32 num)
+{
+ writel(num - 1, base + JPGDEC_REG_COMP0_DATA_UNIT_NUM);
+}
+
+static void mtk_jpeg_dec_set_du_membership(void __iomem *base, u32 member,
+ u32 gmc, u32 isgray)
+{
+ if (isgray)
+ member = 0x3FFFFFFC;
+ member |= (isgray << 31) | (gmc << 30);
+ writel(member, base + JPGDEC_REG_DU_CTRL);
+}
+
+static void mtk_jpeg_dec_set_q_table(void __iomem *base, u32 id0, u32 id1,
+ u32 id2)
+{
+ u32 val;
+
+ val = ((id0 & 0x0f) << 8) | ((id1 & 0x0f) << 4) | ((id2 & 0x0f) << 0);
+ writel(val, base + JPGDEC_REG_QT_ID);
+}
+
+static void mtk_jpeg_dec_set_dma_group(void __iomem *base, u32 mcu_group,
+ u32 group_num, u32 last_mcu)
+{
+ u32 val;
+
+ val = (((mcu_group - 1) & 0x00FF) << 16) |
+ (((group_num - 1) & 0x007F) << 8) |
+ ((last_mcu - 1) & 0x00FF);
+ writel(val, base + JPGDEC_REG_WDMA_CTRL);
+}
+
+static void mtk_jpeg_dec_set_sampling_factor(void __iomem *base, u32 comp_num,
+ u32 y_w, u32 y_h, u32 u_w,
+ u32 u_h, u32 v_w, u32 v_h)
+{
+ u32 val;
+ u32 y_wh = (MTK_JPEG_DUNUM_MASK(y_w) << 2) | MTK_JPEG_DUNUM_MASK(y_h);
+ u32 u_wh = (MTK_JPEG_DUNUM_MASK(u_w) << 2) | MTK_JPEG_DUNUM_MASK(u_h);
+ u32 v_wh = (MTK_JPEG_DUNUM_MASK(v_w) << 2) | MTK_JPEG_DUNUM_MASK(v_h);
+
+ if (comp_num == 1)
+ val = 0;
+ else
+ val = (y_wh << 8) | (u_wh << 4) | v_wh;
+ writel(val, base + JPGDEC_REG_DU_NUM);
+}
+
+void mtk_jpeg_dec_set_config(void __iomem *base,
+ struct mtk_jpeg_dec_param *config,
+ struct mtk_jpeg_bs *bs,
+ struct mtk_jpeg_fb *fb)
+{
+ mtk_jpeg_dec_set_brz_factor(base, 0, 0, config->uv_brz_w, 0);
+ mtk_jpeg_dec_set_dec_mode(base, 0);
+ mtk_jpeg_dec_set_comp0_du(base, config->unit_num);
+ mtk_jpeg_dec_set_total_mcu(base, config->total_mcu);
+ mtk_jpeg_dec_set_bs_info(base, bs->str_addr, bs->size);
+ mtk_jpeg_dec_set_bs_write_ptr(base, bs->end_addr);
+ mtk_jpeg_dec_set_du_membership(base, config->membership, 1,
+ (config->comp_num == 1) ? 1 : 0);
+ mtk_jpeg_dec_set_comp_id(base, config->comp_id[0], config->comp_id[1],
+ config->comp_id[2]);
+ mtk_jpeg_dec_set_q_table(base, config->qtbl_num[0],
+ config->qtbl_num[1], config->qtbl_num[2]);
+ mtk_jpeg_dec_set_sampling_factor(base, config->comp_num,
+ config->sampling_w[0],
+ config->sampling_h[0],
+ config->sampling_w[1],
+ config->sampling_h[1],
+ config->sampling_w[2],
+ config->sampling_h[2]);
+ mtk_jpeg_dec_set_mem_stride(base, config->mem_stride[0],
+ config->mem_stride[1]);
+ mtk_jpeg_dec_set_img_stride(base, config->img_stride[0],
+ config->img_stride[1]);
+ mtk_jpeg_dec_set_dst_bank0(base, fb->plane_addr[0],
+ fb->plane_addr[1], fb->plane_addr[2]);
+ mtk_jpeg_dec_set_dst_bank1(base, 0, 0, 0);
+ mtk_jpeg_dec_set_dma_group(base, config->dma_mcu, config->dma_group,
+ config->dma_last_mcu);
+ mtk_jpeg_dec_set_pause_mcu_idx(base, config->total_mcu);
+}
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h
new file mode 100644
index 000000000..37152a630
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_JPEG_HW_H
+#define _MTK_JPEG_HW_H
+
+#include <media/videobuf2-core.h>
+
+#include "mtk_jpeg_core.h"
+#include "mtk_jpeg_reg.h"
+
+enum {
+ MTK_JPEG_DEC_RESULT_EOF_DONE = 0,
+ MTK_JPEG_DEC_RESULT_PAUSE = 1,
+ MTK_JPEG_DEC_RESULT_UNDERFLOW = 2,
+ MTK_JPEG_DEC_RESULT_OVERFLOW = 3,
+ MTK_JPEG_DEC_RESULT_ERROR_BS = 4,
+ MTK_JPEG_DEC_RESULT_ERROR_UNKNOWN = 6
+};
+
+struct mtk_jpeg_dec_param {
+ u32 pic_w;
+ u32 pic_h;
+ u32 dec_w;
+ u32 dec_h;
+ u32 src_color;
+ u32 dst_fourcc;
+ u32 mcu_w;
+ u32 mcu_h;
+ u32 total_mcu;
+ u32 unit_num;
+ u32 comp_num;
+ u32 comp_id[MTK_JPEG_COMP_MAX];
+ u32 sampling_w[MTK_JPEG_COMP_MAX];
+ u32 sampling_h[MTK_JPEG_COMP_MAX];
+ u32 qtbl_num[MTK_JPEG_COMP_MAX];
+ u32 blk_num;
+ u32 blk_comp[MTK_JPEG_COMP_MAX];
+ u32 membership;
+ u32 dma_mcu;
+ u32 dma_group;
+ u32 dma_last_mcu;
+ u32 img_stride[MTK_JPEG_COMP_MAX];
+ u32 mem_stride[MTK_JPEG_COMP_MAX];
+ u32 comp_w[MTK_JPEG_COMP_MAX];
+ u32 comp_size[MTK_JPEG_COMP_MAX];
+ u32 y_size;
+ u32 uv_size;
+ u32 dec_size;
+ u8 uv_brz_w;
+};
+
+static inline u32 mtk_jpeg_align(u32 val, u32 align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
+struct mtk_jpeg_bs {
+ dma_addr_t str_addr;
+ dma_addr_t end_addr;
+ size_t size;
+};
+
+struct mtk_jpeg_fb {
+ dma_addr_t plane_addr[MTK_JPEG_COMP_MAX];
+ size_t size;
+};
+
+int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param);
+u32 mtk_jpeg_dec_get_int_status(void __iomem *dec_reg_base);
+u32 mtk_jpeg_dec_enum_result(u32 irq_result);
+void mtk_jpeg_dec_set_config(void __iomem *base,
+ struct mtk_jpeg_dec_param *config,
+ struct mtk_jpeg_bs *bs,
+ struct mtk_jpeg_fb *fb);
+void mtk_jpeg_dec_reset(void __iomem *dec_reg_base);
+void mtk_jpeg_dec_start(void __iomem *dec_reg_base);
+
+#endif /* _MTK_JPEG_HW_H */
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c
new file mode 100644
index 000000000..38868547f
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+
+#include "mtk_jpeg_parse.h"
+
+#define TEM 0x01
+#define SOF0 0xc0
+#define RST 0xd0
+#define SOI 0xd8
+#define EOI 0xd9
+
+struct mtk_jpeg_stream {
+ u8 *addr;
+ u32 size;
+ u32 curr;
+};
+
+static int read_byte(struct mtk_jpeg_stream *stream)
+{
+ if (stream->curr >= stream->size)
+ return -1;
+ return stream->addr[stream->curr++];
+}
+
+static int read_word_be(struct mtk_jpeg_stream *stream, u32 *word)
+{
+ u32 temp;
+ int byte;
+
+ byte = read_byte(stream);
+ if (byte == -1)
+ return -1;
+ temp = byte << 8;
+ byte = read_byte(stream);
+ if (byte == -1)
+ return -1;
+ *word = (u32)byte | temp;
+
+ return 0;
+}
+
+static void read_skip(struct mtk_jpeg_stream *stream, long len)
+{
+ if (len <= 0)
+ return;
+ while (len--)
+ read_byte(stream);
+}
+
+static bool mtk_jpeg_do_parse(struct mtk_jpeg_dec_param *param, u8 *src_addr_va,
+ u32 src_size)
+{
+ bool notfound = true;
+ struct mtk_jpeg_stream stream;
+
+ stream.addr = src_addr_va;
+ stream.size = src_size;
+ stream.curr = 0;
+
+ while (notfound) {
+ int i, length, byte;
+ u32 word;
+
+ byte = read_byte(&stream);
+ if (byte == -1)
+ return false;
+ if (byte != 0xff)
+ continue;
+ do
+ byte = read_byte(&stream);
+ while (byte == 0xff);
+ if (byte == -1)
+ return false;
+ if (byte == 0)
+ continue;
+
+ length = 0;
+ switch (byte) {
+ case SOF0:
+ /* length */
+ if (read_word_be(&stream, &word))
+ break;
+
+ /* precision */
+ if (read_byte(&stream) == -1)
+ break;
+
+ if (read_word_be(&stream, &word))
+ break;
+ param->pic_h = word;
+
+ if (read_word_be(&stream, &word))
+ break;
+ param->pic_w = word;
+
+ param->comp_num = read_byte(&stream);
+ if (param->comp_num != 1 && param->comp_num != 3)
+ break;
+
+ for (i = 0; i < param->comp_num; i++) {
+ param->comp_id[i] = read_byte(&stream);
+ if (param->comp_id[i] == -1)
+ break;
+
+ /* sampling */
+ byte = read_byte(&stream);
+ if (byte == -1)
+ break;
+ param->sampling_w[i] = (byte >> 4) & 0x0F;
+ param->sampling_h[i] = byte & 0x0F;
+
+ param->qtbl_num[i] = read_byte(&stream);
+ if (param->qtbl_num[i] == -1)
+ break;
+ }
+
+ notfound = !(i == param->comp_num);
+ break;
+ case RST ... RST + 7:
+ case SOI:
+ case EOI:
+ case TEM:
+ break;
+ default:
+ if (read_word_be(&stream, &word))
+ break;
+ length = (long)word - 2;
+ read_skip(&stream, length);
+ break;
+ }
+ }
+
+ return !notfound;
+}
+
+bool mtk_jpeg_parse(struct mtk_jpeg_dec_param *param, u8 *src_addr_va,
+ u32 src_size)
+{
+ if (!mtk_jpeg_do_parse(param, src_addr_va, src_size))
+ return false;
+ if (mtk_jpeg_dec_fill_param(param))
+ return false;
+
+ return true;
+}
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h
new file mode 100644
index 000000000..5d92340ea
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_JPEG_PARSE_H
+#define _MTK_JPEG_PARSE_H
+
+#include "mtk_jpeg_hw.h"
+
+bool mtk_jpeg_parse(struct mtk_jpeg_dec_param *param, u8 *src_addr_va,
+ u32 src_size);
+
+#endif /* _MTK_JPEG_PARSE_H */
+
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h
new file mode 100644
index 000000000..fc490d62b
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_JPEG_REG_H
+#define _MTK_JPEG_REG_H
+
+#define MTK_JPEG_COMP_MAX 3
+#define MTK_JPEG_BLOCK_MAX 10
+#define MTK_JPEG_DCTSIZE 8
+
+#define BIT_INQST_MASK_ERROR_BS 0x20
+#define BIT_INQST_MASK_PAUSE 0x10
+#define BIT_INQST_MASK_OVERFLOW 0x04
+#define BIT_INQST_MASK_UNDERFLOW 0x02
+#define BIT_INQST_MASK_EOF 0x01
+#define BIT_INQST_MASK_ALLIRQ 0x37
+
+#define JPGDEC_REG_RESET 0x0090
+#define JPGDEC_REG_BRZ_FACTOR 0x00F8
+#define JPGDEC_REG_DU_NUM 0x00FC
+#define JPGDEC_REG_DEST_ADDR0_Y 0x0140
+#define JPGDEC_REG_DEST_ADDR0_U 0x0144
+#define JPGDEC_REG_DEST_ADDR0_V 0x0148
+#define JPGDEC_REG_DEST_ADDR1_Y 0x014C
+#define JPGDEC_REG_DEST_ADDR1_U 0x0150
+#define JPGDEC_REG_DEST_ADDR1_V 0x0154
+#define JPGDEC_REG_STRIDE_Y 0x0158
+#define JPGDEC_REG_STRIDE_UV 0x015C
+#define JPGDEC_REG_IMG_STRIDE_Y 0x0160
+#define JPGDEC_REG_IMG_STRIDE_UV 0x0164
+#define JPGDEC_REG_WDMA_CTRL 0x016C
+#define JPGDEC_REG_PAUSE_MCU_NUM 0x0170
+#define JPGDEC_REG_OPERATION_MODE 0x017C
+#define JPGDEC_REG_FILE_ADDR 0x0200
+#define JPGDEC_REG_COMP_ID 0x020C
+#define JPGDEC_REG_TOTAL_MCU_NUM 0x0210
+#define JPGDEC_REG_COMP0_DATA_UNIT_NUM 0x0224
+#define JPGDEC_REG_DU_CTRL 0x023C
+#define JPGDEC_REG_TRIG 0x0240
+#define JPGDEC_REG_FILE_BRP 0x0248
+#define JPGDEC_REG_FILE_TOTAL_SIZE 0x024C
+#define JPGDEC_REG_QT_ID 0x0270
+#define JPGDEC_REG_INTERRUPT_STATUS 0x0274
+#define JPGDEC_REG_STATUS 0x0278
+
+#endif /* _MTK_JPEG_REG_H */
diff --git a/drivers/media/platform/mtk-mdp/Makefile b/drivers/media/platform/mtk-mdp/Makefile
new file mode 100644
index 000000000..5982d65c9
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+mtk-mdp-y += mtk_mdp_core.o
+mtk-mdp-y += mtk_mdp_comp.o
+mtk-mdp-y += mtk_mdp_m2m.o
+mtk-mdp-y += mtk_mdp_regs.o
+mtk-mdp-y += mtk_mdp_vpu.o
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp.o
+
+ccflags-y += -I$(srctree)/drivers/media/platform/mtk-vpu
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
new file mode 100644
index 000000000..03aba03a2
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_mdp_comp.h"
+
+
+static const char * const mtk_mdp_comp_stem[MTK_MDP_COMP_TYPE_MAX] = {
+ "mdp_rdma",
+ "mdp_rsz",
+ "mdp_wdma",
+ "mdp_wrot",
+};
+
+struct mtk_mdp_comp_match {
+ enum mtk_mdp_comp_type type;
+ int alias_id;
+};
+
+static const struct mtk_mdp_comp_match mtk_mdp_matches[MTK_MDP_COMP_ID_MAX] = {
+ { MTK_MDP_RDMA, 0 },
+ { MTK_MDP_RDMA, 1 },
+ { MTK_MDP_RSZ, 0 },
+ { MTK_MDP_RSZ, 1 },
+ { MTK_MDP_RSZ, 2 },
+ { MTK_MDP_WDMA, 0 },
+ { MTK_MDP_WROT, 0 },
+ { MTK_MDP_WROT, 1 },
+};
+
+int mtk_mdp_comp_get_id(struct device *dev, struct device_node *node,
+ enum mtk_mdp_comp_type comp_type)
+{
+ int id = of_alias_get_id(node, mtk_mdp_comp_stem[comp_type]);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_mdp_matches); i++) {
+ if (comp_type == mtk_mdp_matches[i].type &&
+ id == mtk_mdp_matches[i].alias_id)
+ return i;
+ }
+
+ dev_err(dev, "Failed to get id. type: %d, id: %d\n", comp_type, id);
+
+ return -EINVAL;
+}
+
+void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp)
+{
+ int i, err;
+
+ if (comp->larb_dev) {
+ err = mtk_smi_larb_get(comp->larb_dev);
+ if (err)
+ dev_err(dev,
+ "failed to get larb, err %d. type:%d id:%d\n",
+ err, comp->type, comp->id);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+ if (IS_ERR(comp->clk[i]))
+ continue;
+ err = clk_prepare_enable(comp->clk[i]);
+ if (err)
+ dev_err(dev,
+ "failed to enable clock, err %d. type:%d id:%d i:%d\n",
+ err, comp->type, comp->id, i);
+ }
+}
+
+void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+ if (IS_ERR(comp->clk[i]))
+ continue;
+ clk_disable_unprepare(comp->clk[i]);
+ }
+
+ if (comp->larb_dev)
+ mtk_smi_larb_put(comp->larb_dev);
+}
+
+int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
+ struct mtk_mdp_comp *comp, enum mtk_mdp_comp_id comp_id)
+{
+ struct device_node *larb_node;
+ struct platform_device *larb_pdev;
+ int i;
+
+ if (comp_id < 0 || comp_id >= MTK_MDP_COMP_ID_MAX) {
+ dev_err(dev, "Invalid comp_id %d\n", comp_id);
+ return -EINVAL;
+ }
+
+ comp->dev_node = of_node_get(node);
+ comp->id = comp_id;
+ comp->type = mtk_mdp_matches[comp_id].type;
+ comp->regs = of_iomap(node, 0);
+
+ for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+ comp->clk[i] = of_clk_get(node, i);
+
+ /* Only RDMA needs two clocks */
+ if (comp->type != MTK_MDP_RDMA)
+ break;
+ }
+
+ /* Only DMA capable components need the LARB property */
+ comp->larb_dev = NULL;
+ if (comp->type != MTK_MDP_RDMA &&
+ comp->type != MTK_MDP_WDMA &&
+ comp->type != MTK_MDP_WROT)
+ return 0;
+
+ larb_node = of_parse_phandle(node, "mediatek,larb", 0);
+ if (!larb_node) {
+ dev_err(dev,
+ "Missing mediadek,larb phandle in %pOF node\n", node);
+ return -EINVAL;
+ }
+
+ larb_pdev = of_find_device_by_node(larb_node);
+ if (!larb_pdev) {
+ dev_warn(dev, "Waiting for larb device %pOF\n", larb_node);
+ of_node_put(larb_node);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(larb_node);
+
+ comp->larb_dev = &larb_pdev->dev;
+
+ return 0;
+}
+
+void mtk_mdp_comp_deinit(struct device *dev, struct mtk_mdp_comp *comp)
+{
+ of_node_put(comp->dev_node);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
new file mode 100644
index 000000000..63b3983ef
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_COMP_H__
+#define __MTK_MDP_COMP_H__
+
+/**
+ * enum mtk_mdp_comp_type - the MDP component
+ * @MTK_MDP_RDMA: Read DMA
+ * @MTK_MDP_RSZ: Riszer
+ * @MTK_MDP_WDMA: Write DMA
+ * @MTK_MDP_WROT: Write DMA with rotation
+ */
+enum mtk_mdp_comp_type {
+ MTK_MDP_RDMA,
+ MTK_MDP_RSZ,
+ MTK_MDP_WDMA,
+ MTK_MDP_WROT,
+ MTK_MDP_COMP_TYPE_MAX,
+};
+
+enum mtk_mdp_comp_id {
+ MTK_MDP_COMP_RDMA0,
+ MTK_MDP_COMP_RDMA1,
+ MTK_MDP_COMP_RSZ0,
+ MTK_MDP_COMP_RSZ1,
+ MTK_MDP_COMP_RSZ2,
+ MTK_MDP_COMP_WDMA,
+ MTK_MDP_COMP_WROT0,
+ MTK_MDP_COMP_WROT1,
+ MTK_MDP_COMP_ID_MAX,
+};
+
+/**
+ * struct mtk_mdp_comp - the MDP's function component data
+ * @dev_node: component device node
+ * @clk: clocks required for component
+ * @regs: Mapped address of component registers.
+ * @larb_dev: SMI device required for component
+ * @type: component type
+ * @id: component ID
+ */
+struct mtk_mdp_comp {
+ struct device_node *dev_node;
+ struct clk *clk[2];
+ void __iomem *regs;
+ struct device *larb_dev;
+ enum mtk_mdp_comp_type type;
+ enum mtk_mdp_comp_id id;
+};
+
+int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
+ struct mtk_mdp_comp *comp, enum mtk_mdp_comp_id comp_id);
+void mtk_mdp_comp_deinit(struct device *dev, struct mtk_mdp_comp *comp);
+int mtk_mdp_comp_get_id(struct device *dev, struct device_node *node,
+ enum mtk_mdp_comp_type comp_type);
+void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp);
+void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp);
+
+
+#endif /* __MTK_MDP_COMP_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
new file mode 100644
index 000000000..3deb0549b
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_m2m.h"
+#include "mtk_vpu.h"
+
+/* MDP debug log level (0-3). 3 shows all the logs. */
+int mtk_mdp_dbg_level;
+EXPORT_SYMBOL(mtk_mdp_dbg_level);
+
+module_param(mtk_mdp_dbg_level, int, 0644);
+
+static const struct of_device_id mtk_mdp_comp_dt_ids[] = {
+ {
+ .compatible = "mediatek,mt8173-mdp-rdma",
+ .data = (void *)MTK_MDP_RDMA
+ }, {
+ .compatible = "mediatek,mt8173-mdp-rsz",
+ .data = (void *)MTK_MDP_RSZ
+ }, {
+ .compatible = "mediatek,mt8173-mdp-wdma",
+ .data = (void *)MTK_MDP_WDMA
+ }, {
+ .compatible = "mediatek,mt8173-mdp-wrot",
+ .data = (void *)MTK_MDP_WROT
+ },
+ { },
+};
+
+static const struct of_device_id mtk_mdp_of_ids[] = {
+ { .compatible = "mediatek,mt8173-mdp", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mtk_mdp_of_ids);
+
+static void mtk_mdp_clock_on(struct mtk_mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_clock_on(dev, mdp->comp[i]);
+}
+
+static void mtk_mdp_clock_off(struct mtk_mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_clock_off(dev, mdp->comp[i]);
+}
+
+static void mtk_mdp_wdt_worker(struct work_struct *work)
+{
+ struct mtk_mdp_dev *mdp =
+ container_of(work, struct mtk_mdp_dev, wdt_work);
+ struct mtk_mdp_ctx *ctx;
+
+ mtk_mdp_err("Watchdog timeout");
+
+ list_for_each_entry(ctx, &mdp->ctx_list, list) {
+ mtk_mdp_dbg(0, "[%d] Change as state error", ctx->id);
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_CTX_ERROR);
+ }
+}
+
+static void mtk_mdp_reset_handler(void *priv)
+{
+ struct mtk_mdp_dev *mdp = priv;
+
+ queue_work(mdp->wdt_wq, &mdp->wdt_work);
+}
+
+static int mtk_mdp_probe(struct platform_device *pdev)
+{
+ struct mtk_mdp_dev *mdp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node, *parent;
+ int i, ret = 0;
+
+ mdp = devm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return -ENOMEM;
+
+ mdp->id = pdev->id;
+ mdp->pdev = pdev;
+ INIT_LIST_HEAD(&mdp->ctx_list);
+
+ mutex_init(&mdp->lock);
+ mutex_init(&mdp->vpulock);
+
+ /* Old dts had the components as child nodes */
+ node = of_get_next_child(dev->of_node, NULL);
+ if (node) {
+ of_node_put(node);
+ parent = dev->of_node;
+ dev_warn(dev, "device tree is out of date\n");
+ } else {
+ parent = dev->of_node->parent;
+ }
+
+ /* Iterate over sibling MDP function blocks */
+ for_each_child_of_node(parent, node) {
+ const struct of_device_id *of_id;
+ enum mtk_mdp_comp_type comp_type;
+ int comp_id;
+ struct mtk_mdp_comp *comp;
+
+ of_id = of_match_node(mtk_mdp_comp_dt_ids, node);
+ if (!of_id)
+ continue;
+
+ if (!of_device_is_available(node)) {
+ dev_err(dev, "Skipping disabled component %pOF\n",
+ node);
+ continue;
+ }
+
+ comp_type = (enum mtk_mdp_comp_type)of_id->data;
+ comp_id = mtk_mdp_comp_get_id(dev, node, comp_type);
+ if (comp_id < 0) {
+ dev_warn(dev, "Skipping unknown component %pOF\n",
+ node);
+ continue;
+ }
+
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+ if (!comp) {
+ ret = -ENOMEM;
+ goto err_comp;
+ }
+ mdp->comp[comp_id] = comp;
+
+ ret = mtk_mdp_comp_init(dev, node, comp, comp_id);
+ if (ret)
+ goto err_comp;
+ }
+
+ mdp->job_wq = create_singlethread_workqueue(MTK_MDP_MODULE_NAME);
+ if (!mdp->job_wq) {
+ dev_err(&pdev->dev, "unable to alloc job workqueue\n");
+ ret = -ENOMEM;
+ goto err_alloc_job_wq;
+ }
+
+ mdp->wdt_wq = create_singlethread_workqueue("mdp_wdt_wq");
+ if (!mdp->wdt_wq) {
+ dev_err(&pdev->dev, "unable to alloc wdt workqueue\n");
+ ret = -ENOMEM;
+ goto err_alloc_wdt_wq;
+ }
+ INIT_WORK(&mdp->wdt_work, mtk_mdp_wdt_worker);
+
+ ret = v4l2_device_register(dev, &mdp->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ ret = -EINVAL;
+ goto err_dev_register;
+ }
+
+ ret = mtk_mdp_register_m2m_device(mdp);
+ if (ret) {
+ v4l2_err(&mdp->v4l2_dev, "Failed to init mem2mem device\n");
+ goto err_m2m_register;
+ }
+
+ mdp->vpu_dev = vpu_get_plat_device(pdev);
+ vpu_wdt_reg_handler(mdp->vpu_dev, mtk_mdp_reset_handler, mdp,
+ VPU_RST_MDP);
+
+ platform_set_drvdata(pdev, mdp);
+
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ pm_runtime_enable(dev);
+ dev_dbg(dev, "mdp-%d registered successfully\n", mdp->id);
+
+ return 0;
+
+err_m2m_register:
+ v4l2_device_unregister(&mdp->v4l2_dev);
+
+err_dev_register:
+ destroy_workqueue(mdp->wdt_wq);
+
+err_alloc_wdt_wq:
+ destroy_workqueue(mdp->job_wq);
+
+err_alloc_job_wq:
+
+err_comp:
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_deinit(dev, mdp->comp[i]);
+
+ dev_dbg(dev, "err %d\n", ret);
+ return ret;
+}
+
+static int mtk_mdp_remove(struct platform_device *pdev)
+{
+ struct mtk_mdp_dev *mdp = platform_get_drvdata(pdev);
+ int i;
+
+ pm_runtime_disable(&pdev->dev);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+ mtk_mdp_unregister_m2m_device(mdp);
+ v4l2_device_unregister(&mdp->v4l2_dev);
+
+ flush_workqueue(mdp->job_wq);
+ destroy_workqueue(mdp->job_wq);
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_deinit(&pdev->dev, mdp->comp[i]);
+
+ dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+ return 0;
+}
+
+static int __maybe_unused mtk_mdp_pm_suspend(struct device *dev)
+{
+ struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
+
+ mtk_mdp_clock_off(mdp);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_mdp_pm_resume(struct device *dev)
+{
+ struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
+
+ mtk_mdp_clock_on(mdp);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_mdp_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mtk_mdp_pm_suspend(dev);
+}
+
+static int __maybe_unused mtk_mdp_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mtk_mdp_pm_resume(dev);
+}
+
+static const struct dev_pm_ops mtk_mdp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_mdp_suspend, mtk_mdp_resume)
+ SET_RUNTIME_PM_OPS(mtk_mdp_pm_suspend, mtk_mdp_pm_resume, NULL)
+};
+
+static struct platform_driver mtk_mdp_driver = {
+ .probe = mtk_mdp_probe,
+ .remove = mtk_mdp_remove,
+ .driver = {
+ .name = MTK_MDP_MODULE_NAME,
+ .pm = &mtk_mdp_pm_ops,
+ .of_match_table = mtk_mdp_of_ids,
+ }
+};
+
+module_platform_driver(mtk_mdp_driver);
+
+MODULE_AUTHOR("Houlong Wei <houlong.wei@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek image processor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
new file mode 100644
index 000000000..ad1cff306
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_CORE_H__
+#define __MTK_MDP_CORE_H__
+
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_mdp_vpu.h"
+#include "mtk_mdp_comp.h"
+
+
+#define MTK_MDP_MODULE_NAME "mtk-mdp"
+
+#define MTK_MDP_SHUTDOWN_TIMEOUT ((100*HZ)/1000) /* 100ms */
+#define MTK_MDP_MAX_CTRL_NUM 10
+
+#define MTK_MDP_FMT_FLAG_OUTPUT BIT(0)
+#define MTK_MDP_FMT_FLAG_CAPTURE BIT(1)
+
+#define MTK_MDP_VPU_INIT BIT(0)
+#define MTK_MDP_SRC_FMT BIT(1)
+#define MTK_MDP_DST_FMT BIT(2)
+#define MTK_MDP_CTX_ERROR BIT(5)
+
+/**
+ * struct mtk_mdp_pix_align - alignement of image
+ * @org_w: source alignment of width
+ * @org_h: source alignment of height
+ * @target_w: dst alignment of width
+ * @target_h: dst alignment of height
+ */
+struct mtk_mdp_pix_align {
+ u16 org_w;
+ u16 org_h;
+ u16 target_w;
+ u16 target_h;
+};
+
+/**
+ * struct mtk_mdp_fmt - the driver's internal color format data
+ * @pixelformat: the fourcc code for this format, 0 if not applicable
+ * @num_planes: number of physically non-contiguous data planes
+ * @num_comp: number of logical data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @row_depth: per plane driver's private 'number of bits per pixel per row'
+ * @flags: flags indicating which operation mode format applies to
+ MTK_MDP_FMT_FLAG_OUTPUT is used in OUTPUT stream
+ MTK_MDP_FMT_FLAG_CAPTURE is used in CAPTURE stream
+ * @align: pointer to a pixel alignment struct, NULL if using default value
+ */
+struct mtk_mdp_fmt {
+ u32 pixelformat;
+ u16 num_planes;
+ u16 num_comp;
+ u8 depth[VIDEO_MAX_PLANES];
+ u8 row_depth[VIDEO_MAX_PLANES];
+ u32 flags;
+ struct mtk_mdp_pix_align *align;
+};
+
+/**
+ * struct mtk_mdp_addr - the image processor physical address set
+ * @addr: address of planes
+ */
+struct mtk_mdp_addr {
+ dma_addr_t addr[MTK_MDP_MAX_NUM_PLANE];
+};
+
+/* struct mtk_mdp_ctrls - the image processor control set
+ * @rotate: rotation degree
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @global_alpha: the alpha value of current frame
+ */
+struct mtk_mdp_ctrls {
+ struct v4l2_ctrl *rotate;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *global_alpha;
+};
+
+/**
+ * struct mtk_mdp_frame - source/target frame properties
+ * @width: SRC : SRCIMG_WIDTH, DST : OUTPUTDMA_WHOLE_IMG_WIDTH
+ * @height: SRC : SRCIMG_HEIGHT, DST : OUTPUTDMA_WHOLE_IMG_HEIGHT
+ * @crop: cropped(source)/scaled(destination) size
+ * @payload: image size in bytes (w x h x bpp)
+ * @pitch: bytes per line of image in memory
+ * @addr: image frame buffer physical addresses
+ * @fmt: color format pointer
+ * @alpha: frame's alpha value
+ */
+struct mtk_mdp_frame {
+ u32 width;
+ u32 height;
+ struct v4l2_rect crop;
+ unsigned long payload[VIDEO_MAX_PLANES];
+ unsigned int pitch[VIDEO_MAX_PLANES];
+ struct mtk_mdp_addr addr;
+ const struct mtk_mdp_fmt *fmt;
+ u8 alpha;
+};
+
+/**
+ * struct mtk_mdp_variant - image processor variant information
+ * @pix_max: maximum limit of image size
+ * @pix_min: minimun limit of image size
+ * @pix_align: alignement of image
+ * @h_scale_up_max: maximum scale-up in horizontal
+ * @v_scale_up_max: maximum scale-up in vertical
+ * @h_scale_down_max: maximum scale-down in horizontal
+ * @v_scale_down_max: maximum scale-down in vertical
+ */
+struct mtk_mdp_variant {
+ struct mtk_mdp_pix_limit *pix_max;
+ struct mtk_mdp_pix_limit *pix_min;
+ struct mtk_mdp_pix_align *pix_align;
+ u16 h_scale_up_max;
+ u16 v_scale_up_max;
+ u16 h_scale_down_max;
+ u16 v_scale_down_max;
+};
+
+/**
+ * struct mtk_mdp_dev - abstraction for image processor entity
+ * @lock: the mutex protecting this data structure
+ * @vpulock: the mutex protecting the communication with VPU
+ * @pdev: pointer to the image processor platform device
+ * @variant: the IP variant information
+ * @id: image processor device index (0..MTK_MDP_MAX_DEVS)
+ * @comp: MDP function components
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @ctx_list: list of struct mtk_mdp_ctx
+ * @vdev: video device for image processor driver
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @job_wq: processor work queue
+ * @vpu_dev: VPU platform device
+ * @ctx_num: counter of active MTK MDP context
+ * @id_counter: An integer id given to the next opened context
+ * @wdt_wq: work queue for VPU watchdog
+ * @wdt_work: worker for VPU watchdog
+ */
+struct mtk_mdp_dev {
+ struct mutex lock;
+ struct mutex vpulock;
+ struct platform_device *pdev;
+ struct mtk_mdp_variant *variant;
+ u16 id;
+ struct mtk_mdp_comp *comp[MTK_MDP_COMP_ID_MAX];
+ struct v4l2_m2m_dev *m2m_dev;
+ struct list_head ctx_list;
+ struct video_device *vdev;
+ struct v4l2_device v4l2_dev;
+ struct workqueue_struct *job_wq;
+ struct platform_device *vpu_dev;
+ int ctx_num;
+ unsigned long id_counter;
+ struct workqueue_struct *wdt_wq;
+ struct work_struct wdt_work;
+};
+
+/**
+ * mtk_mdp_ctx - the device context data
+ * @list: link to ctx_list of mtk_mdp_dev
+ * @s_frame: source frame properties
+ * @d_frame: destination frame properties
+ * @id: index of the context that this structure describes
+ * @flags: additional flags for image conversion
+ * @state: flags to keep track of user configuration
+ Protected by slock
+ * @rotation: rotates the image by specified angle
+ * @hflip: mirror the picture horizontally
+ * @vflip: mirror the picture vertically
+ * @mdp_dev: the image processor device this context applies to
+ * @m2m_ctx: memory-to-memory device context
+ * @fh: v4l2 file handle
+ * @ctrl_handler: v4l2 controls handler
+ * @ctrls image processor control set
+ * @ctrls_rdy: true if the control handler is initialized
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @quant: enum v4l2_quantization, colorspace quantization
+ * @vpu: VPU instance
+ * @slock: the mutex protecting mtp_mdp_ctx.state
+ * @work: worker for image processing
+ */
+struct mtk_mdp_ctx {
+ struct list_head list;
+ struct mtk_mdp_frame s_frame;
+ struct mtk_mdp_frame d_frame;
+ u32 flags;
+ u32 state;
+ int id;
+ int rotation;
+ u32 hflip:1;
+ u32 vflip:1;
+ struct mtk_mdp_dev *mdp_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct mtk_mdp_ctrls ctrls;
+ bool ctrls_rdy;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
+
+ struct mtk_mdp_vpu vpu;
+ struct mutex slock;
+ struct work_struct work;
+};
+
+extern int mtk_mdp_dbg_level;
+
+#if defined(DEBUG)
+
+#define mtk_mdp_dbg(level, fmt, args...) \
+ do { \
+ if (mtk_mdp_dbg_level >= level) \
+ pr_info("[MTK_MDP] level=%d %s(),%d: " fmt "\n", \
+ level, __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mtk_mdp_err(fmt, args...) \
+ pr_err("[MTK_MDP][ERROR] %s:%d: " fmt "\n", __func__, __LINE__, \
+ ##args)
+
+
+#define mtk_mdp_dbg_enter() mtk_mdp_dbg(3, "+")
+#define mtk_mdp_dbg_leave() mtk_mdp_dbg(3, "-")
+
+#else
+
+#define mtk_mdp_dbg(level, fmt, args...) {}
+#define mtk_mdp_err(fmt, args...)
+#define mtk_mdp_dbg_enter()
+#define mtk_mdp_dbg_leave()
+
+#endif
+
+#endif /* __MTK_MDP_CORE_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
new file mode 100644
index 000000000..78e2cc0de
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_IPI_H__
+#define __MTK_MDP_IPI_H__
+
+#define MTK_MDP_MAX_NUM_PLANE 3
+
+enum mdp_ipi_msgid {
+ AP_MDP_INIT = 0xd000,
+ AP_MDP_DEINIT = 0xd001,
+ AP_MDP_PROCESS = 0xd002,
+
+ VPU_MDP_INIT_ACK = 0xe000,
+ VPU_MDP_DEINIT_ACK = 0xe001,
+ VPU_MDP_PROCESS_ACK = 0xe002
+};
+
+#pragma pack(push, 4)
+
+/**
+ * struct mdp_ipi_init - for AP_MDP_INIT
+ * @msg_id : AP_MDP_INIT
+ * @ipi_id : IPI_MDP
+ * @ap_inst : AP mtk_mdp_vpu address
+ */
+struct mdp_ipi_init {
+ uint32_t msg_id;
+ uint32_t ipi_id;
+ uint64_t ap_inst;
+};
+
+/**
+ * struct mdp_ipi_comm - for AP_MDP_PROCESS, AP_MDP_DEINIT
+ * @msg_id : AP_MDP_PROCESS, AP_MDP_DEINIT
+ * @ipi_id : IPI_MDP
+ * @ap_inst : AP mtk_mdp_vpu address
+ * @vpu_inst_addr : VPU MDP instance address
+ */
+struct mdp_ipi_comm {
+ uint32_t msg_id;
+ uint32_t ipi_id;
+ uint64_t ap_inst;
+ uint32_t vpu_inst_addr;
+};
+
+/**
+ * struct mdp_ipi_comm_ack - for VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK
+ * @msg_id : VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK
+ * @ipi_id : IPI_MDP
+ * @ap_inst : AP mtk_mdp_vpu address
+ * @vpu_inst_addr : VPU MDP instance address
+ * @status : VPU exeuction result
+ */
+struct mdp_ipi_comm_ack {
+ uint32_t msg_id;
+ uint32_t ipi_id;
+ uint64_t ap_inst;
+ uint32_t vpu_inst_addr;
+ int32_t status;
+};
+
+/**
+ * struct mdp_config - configured for source/destination image
+ * @x : left
+ * @y : top
+ * @w : width
+ * @h : height
+ * @w_stride : bytes in horizontal
+ * @h_stride : bytes in vertical
+ * @crop_x : cropped left
+ * @crop_y : cropped top
+ * @crop_w : cropped width
+ * @crop_h : cropped height
+ * @format : color format
+ */
+struct mdp_config {
+ int32_t x;
+ int32_t y;
+ int32_t w;
+ int32_t h;
+ int32_t w_stride;
+ int32_t h_stride;
+ int32_t crop_x;
+ int32_t crop_y;
+ int32_t crop_w;
+ int32_t crop_h;
+ int32_t format;
+};
+
+struct mdp_buffer {
+ uint64_t addr_mva[MTK_MDP_MAX_NUM_PLANE];
+ int32_t plane_size[MTK_MDP_MAX_NUM_PLANE];
+ int32_t plane_num;
+};
+
+struct mdp_config_misc {
+ int32_t orientation; /* 0, 90, 180, 270 */
+ int32_t hflip; /* 1 will enable the flip */
+ int32_t vflip; /* 1 will enable the flip */
+ int32_t alpha; /* global alpha */
+};
+
+struct mdp_process_vsi {
+ struct mdp_config src_config;
+ struct mdp_buffer src_buffer;
+ struct mdp_config dst_config;
+ struct mdp_buffer dst_buffer;
+ struct mdp_config_misc misc;
+};
+
+#pragma pack(pop)
+
+#endif /* __MTK_MDP_IPI_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
new file mode 100644
index 000000000..ceffc31cc
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_m2m.h"
+#include "mtk_mdp_regs.h"
+#include "mtk_vpu.h"
+
+
+/**
+ * struct mtk_mdp_pix_limit - image pixel size limits
+ * @org_w: source pixel width
+ * @org_h: source pixel height
+ * @target_rot_dis_w: pixel dst scaled width with the rotator is off
+ * @target_rot_dis_h: pixel dst scaled height with the rotator is off
+ * @target_rot_en_w: pixel dst scaled width with the rotator is on
+ * @target_rot_en_h: pixel dst scaled height with the rotator is on
+ */
+struct mtk_mdp_pix_limit {
+ u16 org_w;
+ u16 org_h;
+ u16 target_rot_dis_w;
+ u16 target_rot_dis_h;
+ u16 target_rot_en_w;
+ u16 target_rot_en_h;
+};
+
+static struct mtk_mdp_pix_align mtk_mdp_size_align = {
+ .org_w = 16,
+ .org_h = 16,
+ .target_w = 2,
+ .target_h = 2,
+};
+
+static const struct mtk_mdp_fmt mtk_mdp_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_MT21C,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .num_comp = 2,
+ .align = &mtk_mdp_size_align,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12M,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .num_comp = 2,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT |
+ MTK_MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV420M,
+ .depth = { 8, 2, 2 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .num_comp = 3,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT |
+ MTK_MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .num_comp = 3,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT |
+ MTK_MDP_FMT_FLAG_CAPTURE,
+ }
+};
+
+static struct mtk_mdp_pix_limit mtk_mdp_size_max = {
+ .target_rot_dis_w = 4096,
+ .target_rot_dis_h = 4096,
+ .target_rot_en_w = 4096,
+ .target_rot_en_h = 4096,
+};
+
+static struct mtk_mdp_pix_limit mtk_mdp_size_min = {
+ .org_w = 16,
+ .org_h = 16,
+ .target_rot_dis_w = 16,
+ .target_rot_dis_h = 16,
+ .target_rot_en_w = 16,
+ .target_rot_en_h = 16,
+};
+
+/* align size for normal raster scan pixel format */
+static struct mtk_mdp_pix_align mtk_mdp_rs_align = {
+ .org_w = 2,
+ .org_h = 2,
+ .target_w = 2,
+ .target_h = 2,
+};
+
+static struct mtk_mdp_variant mtk_mdp_default_variant = {
+ .pix_max = &mtk_mdp_size_max,
+ .pix_min = &mtk_mdp_size_min,
+ .pix_align = &mtk_mdp_rs_align,
+ .h_scale_up_max = 32,
+ .v_scale_up_max = 32,
+ .h_scale_down_max = 32,
+ .v_scale_down_max = 128,
+};
+
+static const struct mtk_mdp_fmt *mtk_mdp_find_fmt(u32 pixelformat, u32 type)
+{
+ u32 i, flag;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MTK_MDP_FMT_FLAG_OUTPUT :
+ MTK_MDP_FMT_FLAG_CAPTURE;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_mdp_formats); ++i) {
+ if (!(mtk_mdp_formats[i].flags & flag))
+ continue;
+ if (mtk_mdp_formats[i].pixelformat == pixelformat)
+ return &mtk_mdp_formats[i];
+ }
+ return NULL;
+}
+
+static const struct mtk_mdp_fmt *mtk_mdp_find_fmt_by_index(u32 index, u32 type)
+{
+ u32 i, flag, num = 0;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MTK_MDP_FMT_FLAG_OUTPUT :
+ MTK_MDP_FMT_FLAG_CAPTURE;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_mdp_formats); ++i) {
+ if (!(mtk_mdp_formats[i].flags & flag))
+ continue;
+ if (index == num)
+ return &mtk_mdp_formats[i];
+ num++;
+ }
+ return NULL;
+}
+
+static void mtk_mdp_bound_align_image(u32 *w, unsigned int wmin,
+ unsigned int wmax, unsigned int align_w,
+ u32 *h, unsigned int hmin,
+ unsigned int hmax, unsigned int align_h)
+{
+ int org_w, org_h, step_w, step_h;
+ int walign, halign;
+
+ org_w = *w;
+ org_h = *h;
+ walign = ffs(align_w) - 1;
+ halign = ffs(align_h) - 1;
+ v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
+
+ step_w = 1 << walign;
+ step_h = 1 << halign;
+ if (*w < org_w && (*w + step_w) <= wmax)
+ *w += step_w;
+ if (*h < org_h && (*h + step_h) <= hmax)
+ *h += step_h;
+}
+
+static const struct mtk_mdp_fmt *mtk_mdp_try_fmt_mplane(struct mtk_mdp_ctx *ctx,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_variant *variant = mdp->variant;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct mtk_mdp_fmt *fmt;
+ u32 max_w, max_h, align_w, align_h;
+ u32 min_w, min_h, org_w, org_h;
+ int i;
+
+ fmt = mtk_mdp_find_fmt(pix_mp->pixelformat, f->type);
+ if (!fmt)
+ fmt = mtk_mdp_find_fmt_by_index(0, f->type);
+ if (!fmt) {
+ dev_dbg(&ctx->mdp_dev->pdev->dev,
+ "pixelformat format 0x%X invalid\n",
+ pix_mp->pixelformat);
+ return NULL;
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = fmt->pixelformat;
+ if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quant;
+ }
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+
+ max_w = variant->pix_max->target_rot_dis_w;
+ max_h = variant->pix_max->target_rot_dis_h;
+
+ if (fmt->align == NULL) {
+ /* use default alignment */
+ align_w = variant->pix_align->org_w;
+ align_h = variant->pix_align->org_h;
+ } else {
+ align_w = fmt->align->org_w;
+ align_h = fmt->align->org_h;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ min_w = variant->pix_min->org_w;
+ min_h = variant->pix_min->org_h;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ }
+
+ mtk_mdp_dbg(2, "[%d] type:%d, wxh:%ux%u, align:%ux%u, max:%ux%u",
+ ctx->id, f->type, pix_mp->width, pix_mp->height,
+ align_w, align_h, max_w, max_h);
+ /*
+ * To check if image size is modified to adjust parameter against
+ * hardware abilities
+ */
+ org_w = pix_mp->width;
+ org_h = pix_mp->height;
+
+ mtk_mdp_bound_align_image(&pix_mp->width, min_w, max_w, align_w,
+ &pix_mp->height, min_h, max_h, align_h);
+
+ if (org_w != pix_mp->width || org_h != pix_mp->height)
+ mtk_mdp_dbg(1, "[%d] size change:%ux%u to %ux%u", ctx->id,
+ org_w, org_h, pix_mp->width, pix_mp->height);
+ pix_mp->num_planes = fmt->num_planes;
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ int bpl = (pix_mp->width * fmt->row_depth[i]) / 8;
+ int sizeimage = (pix_mp->width * pix_mp->height *
+ fmt->depth[i]) / 8;
+
+ pix_mp->plane_fmt[i].bytesperline = bpl;
+ if (pix_mp->plane_fmt[i].sizeimage < sizeimage)
+ pix_mp->plane_fmt[i].sizeimage = sizeimage;
+ memset(pix_mp->plane_fmt[i].reserved, 0,
+ sizeof(pix_mp->plane_fmt[i].reserved));
+ mtk_mdp_dbg(2, "[%d] p%d, bpl:%d, sizeimage:%u (%u)", ctx->id,
+ i, bpl, pix_mp->plane_fmt[i].sizeimage, sizeimage);
+ }
+
+ return fmt;
+}
+
+static struct mtk_mdp_frame *mtk_mdp_ctx_get_frame(struct mtk_mdp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->s_frame;
+ return &ctx->d_frame;
+}
+
+static void mtk_mdp_check_crop_change(u32 new_w, u32 new_h, u32 *w, u32 *h)
+{
+ if (new_w != *w || new_h != *h) {
+ mtk_mdp_dbg(1, "size change:%dx%d to %dx%d",
+ *w, *h, new_w, new_h);
+
+ *w = new_w;
+ *h = new_h;
+ }
+}
+
+static int mtk_mdp_try_crop(struct mtk_mdp_ctx *ctx, u32 type,
+ struct v4l2_rect *r)
+{
+ struct mtk_mdp_frame *frame;
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_variant *variant = mdp->variant;
+ u32 align_w, align_h, new_w, new_h;
+ u32 min_w, min_h, max_w, max_h;
+
+ if (r->top < 0 || r->left < 0) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+
+ mtk_mdp_dbg(2, "[%d] type:%d, set wxh:%dx%d", ctx->id, type,
+ r->width, r->height);
+
+ frame = mtk_mdp_ctx_get_frame(ctx, type);
+ max_w = frame->width;
+ max_h = frame->height;
+ new_w = r->width;
+ new_h = r->height;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ align_w = 1;
+ align_h = 1;
+ min_w = 64;
+ min_h = 32;
+ } else {
+ align_w = variant->pix_align->target_w;
+ align_h = variant->pix_align->target_h;
+ if (ctx->ctrls.rotate->val == 90 ||
+ ctx->ctrls.rotate->val == 270) {
+ max_w = frame->height;
+ max_h = frame->width;
+ min_w = variant->pix_min->target_rot_en_w;
+ min_h = variant->pix_min->target_rot_en_h;
+ new_w = r->height;
+ new_h = r->width;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ }
+ }
+
+ mtk_mdp_dbg(2, "[%d] align:%dx%d, min:%dx%d, new:%dx%d", ctx->id,
+ align_w, align_h, min_w, min_h, new_w, new_h);
+
+ mtk_mdp_bound_align_image(&new_w, min_w, max_w, align_w,
+ &new_h, min_h, max_h, align_h);
+
+ if (!V4L2_TYPE_IS_OUTPUT(type) &&
+ (ctx->ctrls.rotate->val == 90 ||
+ ctx->ctrls.rotate->val == 270))
+ mtk_mdp_check_crop_change(new_h, new_w,
+ &r->width, &r->height);
+ else
+ mtk_mdp_check_crop_change(new_w, new_h,
+ &r->width, &r->height);
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ /* Need to add code to algin left value with 2's multiple */
+ if (r->left + new_w > max_w)
+ r->left = max_w - new_w;
+ if (r->top + new_h > max_h)
+ r->top = max_h - new_h;
+
+ if (r->left & 1)
+ r->left -= 1;
+
+ mtk_mdp_dbg(2, "[%d] crop l,t,w,h:%d,%d,%d,%d, max:%dx%d", ctx->id,
+ r->left, r->top, r->width,
+ r->height, max_w, max_h);
+ return 0;
+}
+
+static inline struct mtk_mdp_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mtk_mdp_ctx, fh);
+}
+
+static inline struct mtk_mdp_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mtk_mdp_ctx, ctrl_handler);
+}
+
+void mtk_mdp_ctx_state_lock_set(struct mtk_mdp_ctx *ctx, u32 state)
+{
+ mutex_lock(&ctx->slock);
+ ctx->state |= state;
+ mutex_unlock(&ctx->slock);
+}
+
+static void mtk_mdp_ctx_state_lock_clear(struct mtk_mdp_ctx *ctx, u32 state)
+{
+ mutex_lock(&ctx->slock);
+ ctx->state &= ~state;
+ mutex_unlock(&ctx->slock);
+}
+
+static bool mtk_mdp_ctx_state_is_set(struct mtk_mdp_ctx *ctx, u32 mask)
+{
+ bool ret;
+
+ mutex_lock(&ctx->slock);
+ ret = (ctx->state & mask) == mask;
+ mutex_unlock(&ctx->slock);
+ return ret;
+}
+
+static void mtk_mdp_set_frame_size(struct mtk_mdp_frame *frame, int width,
+ int height)
+{
+ frame->width = width;
+ frame->height = height;
+ frame->crop.width = width;
+ frame->crop.height = height;
+ frame->crop.left = 0;
+ frame->crop.top = 0;
+}
+
+static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_mdp_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->mdp_dev->pdev->dev);
+ if (ret < 0)
+ mtk_mdp_dbg(1, "[%d] pm_runtime_get_sync failed:%d",
+ ctx->id, ret);
+
+ return 0;
+}
+
+static void *mtk_mdp_m2m_buf_remove(struct mtk_mdp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ else
+ return v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+}
+
+static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct mtk_mdp_ctx *ctx = q->drv_priv;
+ struct vb2_buffer *vb;
+
+ vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
+ while (vb != NULL) {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
+ vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
+ }
+
+ pm_runtime_put(&ctx->mdp_dev->pdev->dev);
+}
+
+/* The color format (num_planes) must be already configured. */
+static void mtk_mdp_prepare_addr(struct mtk_mdp_ctx *ctx,
+ struct vb2_buffer *vb,
+ struct mtk_mdp_frame *frame,
+ struct mtk_mdp_addr *addr)
+{
+ u32 pix_size, planes, i;
+
+ pix_size = frame->width * frame->height;
+ planes = min_t(u32, frame->fmt->num_planes, ARRAY_SIZE(addr->addr));
+ for (i = 0; i < planes; i++)
+ addr->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ if (planes == 1) {
+ if (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) {
+ addr->addr[1] = (dma_addr_t)(addr->addr[0] + pix_size);
+ addr->addr[2] = (dma_addr_t)(addr->addr[1] +
+ (pix_size >> 2));
+ } else {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Invalid pixelformat:0x%x\n",
+ frame->fmt->pixelformat);
+ }
+ }
+ mtk_mdp_dbg(3, "[%d] planes:%d, size:%d, addr:%p,%p,%p",
+ ctx->id, planes, pix_size, (void *)addr->addr[0],
+ (void *)addr->addr[1], (void *)addr->addr[2]);
+}
+
+static void mtk_mdp_m2m_get_bufs(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_frame *s_frame, *d_frame;
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf;
+
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ mtk_mdp_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ mtk_mdp_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+
+ src_vbuf = to_vb2_v4l2_buffer(src_vb);
+ dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
+ dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
+}
+
+static void mtk_mdp_process_done(void *priv, int vb_state)
+{
+ struct mtk_mdp_dev *mdp = priv;
+ struct mtk_mdp_ctx *ctx;
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vbuf = NULL, *dst_vbuf = NULL;
+
+ ctx = v4l2_m2m_get_curr_priv(mdp->m2m_dev);
+ if (!ctx)
+ return;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ src_vbuf = to_vb2_v4l2_buffer(src_vb);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
+
+ dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
+ dst_vbuf->timecode = src_vbuf->timecode;
+ dst_vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vbuf->flags |= src_vbuf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src_vbuf, vb_state);
+ v4l2_m2m_buf_done(dst_vbuf, vb_state);
+ v4l2_m2m_job_finish(ctx->mdp_dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void mtk_mdp_m2m_worker(struct work_struct *work)
+{
+ struct mtk_mdp_ctx *ctx =
+ container_of(work, struct mtk_mdp_ctx, work);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ int ret;
+
+ if (mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_CTX_ERROR)) {
+ dev_err(&mdp->pdev->dev, "ctx is in error state");
+ goto worker_end;
+ }
+
+ mtk_mdp_m2m_get_bufs(ctx);
+
+ mtk_mdp_hw_set_input_addr(ctx, &ctx->s_frame.addr);
+ mtk_mdp_hw_set_output_addr(ctx, &ctx->d_frame.addr);
+
+ mtk_mdp_hw_set_in_size(ctx);
+ mtk_mdp_hw_set_in_image_format(ctx);
+
+ mtk_mdp_hw_set_out_size(ctx);
+ mtk_mdp_hw_set_out_image_format(ctx);
+
+ mtk_mdp_hw_set_rotation(ctx);
+ mtk_mdp_hw_set_global_alpha(ctx);
+
+ ret = mtk_mdp_vpu_process(&ctx->vpu);
+ if (ret) {
+ dev_err(&mdp->pdev->dev, "processing failed: %d", ret);
+ goto worker_end;
+ }
+
+ buf_state = VB2_BUF_STATE_DONE;
+
+worker_end:
+ mtk_mdp_process_done(mdp, buf_state);
+}
+
+static void mtk_mdp_m2m_device_run(void *priv)
+{
+ struct mtk_mdp_ctx *ctx = priv;
+
+ queue_work(ctx->mdp_dev->job_wq, &ctx->work);
+}
+
+static int mtk_mdp_m2m_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_mdp_frame *frame;
+ int i;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, vq->type);
+ *num_planes = frame->fmt->num_planes;
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ sizes[i] = frame->payload[i];
+ mtk_mdp_dbg(2, "[%d] type:%d, planes:%d, buffers:%d, size:%u,%u",
+ ctx->id, vq->type, *num_planes, *num_buffers,
+ sizes[0], sizes[1]);
+ return 0;
+}
+
+static int mtk_mdp_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_mdp_frame *frame;
+ int i;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, vb->vb2_queue->type);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
+ }
+
+ return 0;
+}
+
+static void mtk_mdp_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static const struct vb2_ops mtk_mdp_m2m_qops = {
+ .queue_setup = mtk_mdp_m2m_queue_setup,
+ .buf_prepare = mtk_mdp_m2m_buf_prepare,
+ .buf_queue = mtk_mdp_m2m_buf_queue,
+ .stop_streaming = mtk_mdp_m2m_stop_streaming,
+ .start_streaming = mtk_mdp_m2m_start_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int mtk_mdp_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+
+ strlcpy(cap->driver, MTK_MDP_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, mdp->pdev->name, sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:mt8173", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int mtk_mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f, u32 type)
+{
+ const struct mtk_mdp_fmt *fmt;
+
+ fmt = mtk_mdp_find_fmt_by_index(f->index, type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixelformat;
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_enum_fmt_mplane_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+}
+
+static int mtk_mdp_m2m_enum_fmt_mplane_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+}
+
+static int mtk_mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+ int i;
+
+ mtk_mdp_dbg(2, "[%d] type:%d", ctx->id, f->type);
+
+ frame = mtk_mdp_ctx_get_frame(ctx, f->type);
+ pix_mp = &f->fmt.pix_mp;
+
+ pix_mp->width = frame->width;
+ pix_mp->height = frame->height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = frame->fmt->pixelformat;
+ pix_mp->num_planes = frame->fmt->num_planes;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quant;
+ mtk_mdp_dbg(2, "[%d] wxh:%dx%d", ctx->id,
+ pix_mp->width, pix_mp->height);
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ pix_mp->plane_fmt[i].bytesperline = (frame->width *
+ frame->fmt->row_depth[i]) / 8;
+ pix_mp->plane_fmt[i].sizeimage = (frame->width *
+ frame->height * frame->fmt->depth[i]) / 8;
+
+ mtk_mdp_dbg(2, "[%d] p%d, bpl:%d, sizeimage:%d", ctx->id, i,
+ pix_mp->plane_fmt[i].bytesperline,
+ pix_mp->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+
+ if (!mtk_mdp_try_fmt_mplane(ctx, f))
+ return -EINVAL;
+ return 0;
+}
+
+static int mtk_mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct vb2_queue *vq;
+ struct mtk_mdp_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+ const struct mtk_mdp_fmt *fmt;
+ int i;
+
+ mtk_mdp_dbg(2, "[%d] type:%d", ctx->id, f->type);
+
+ frame = mtk_mdp_ctx_get_frame(ctx, f->type);
+ fmt = mtk_mdp_try_fmt_mplane(ctx, f);
+ if (!fmt) {
+ mtk_mdp_err("[%d] try_fmt failed, type:%d", ctx->id, f->type);
+ return -EINVAL;
+ }
+ frame->fmt = fmt;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_info(&ctx->mdp_dev->pdev->dev, "queue %d busy", f->type);
+ return -EBUSY;
+ }
+
+ pix_mp = &f->fmt.pix_mp;
+ for (i = 0; i < frame->fmt->num_planes; i++) {
+ frame->payload[i] = pix_mp->plane_fmt[i].sizeimage;
+ frame->pitch[i] = pix_mp->plane_fmt[i].bytesperline;
+ }
+
+ mtk_mdp_set_frame_size(frame, pix_mp->width, pix_mp->height);
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->xfer_func = pix_mp->xfer_func;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quant = pix_mp->quantization;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_SRC_FMT);
+ else
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_DST_FMT);
+
+ mtk_mdp_dbg(2, "[%d] type:%d, frame:%dx%d", ctx->id, f->type,
+ frame->width, frame->height);
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+
+ if (reqbufs->count == 0) {
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_SRC_FMT);
+ else
+ mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_DST_FMT);
+ }
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int mtk_mdp_m2m_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ int ret;
+
+ /* The source and target color format need to be set */
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_SRC_FMT))
+ return -EINVAL;
+ } else if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_DST_FMT)) {
+ return -EINVAL;
+ }
+
+ if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_VPU_INIT)) {
+ ret = mtk_mdp_vpu_init(&ctx->vpu);
+ if (ret < 0) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "vpu init failed %d\n",
+ ret);
+ return -EINVAL;
+ }
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_VPU_INIT);
+ }
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static inline bool mtk_mdp_is_target_compose(u32 target)
+{
+ if (target == V4L2_SEL_TGT_COMPOSE_DEFAULT
+ || target == V4L2_SEL_TGT_COMPOSE_BOUNDS
+ || target == V4L2_SEL_TGT_COMPOSE)
+ return true;
+ return false;
+}
+
+static inline bool mtk_mdp_is_target_crop(u32 target)
+{
+ if (target == V4L2_SEL_TGT_CROP_DEFAULT
+ || target == V4L2_SEL_TGT_CROP_BOUNDS
+ || target == V4L2_SEL_TGT_CROP)
+ return true;
+ return false;
+}
+
+static int mtk_mdp_m2m_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mtk_mdp_frame *frame;
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ bool valid = false;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (mtk_mdp_is_target_compose(s->target))
+ valid = true;
+ } else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (mtk_mdp_is_target_crop(s->target))
+ valid = true;
+ }
+ if (!valid) {
+ mtk_mdp_dbg(1, "[%d] invalid type:%d,%u", ctx->id, s->type,
+ s->target);
+ return -EINVAL;
+ }
+
+ frame = mtk_mdp_ctx_get_frame(ctx, s->type);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = frame->crop.left;
+ s->r.top = frame->crop.top;
+ s->r.width = frame->crop.width;
+ s->r.height = frame->crop.height;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int mtk_mdp_check_scaler_ratio(struct mtk_mdp_variant *var, int src_w,
+ int src_h, int dst_w, int dst_h, int rot)
+{
+ int tmp_w, tmp_h;
+
+ if (rot == 90 || rot == 270) {
+ tmp_w = dst_h;
+ tmp_h = dst_w;
+ } else {
+ tmp_w = dst_w;
+ tmp_h = dst_h;
+ }
+
+ if ((src_w / tmp_w) > var->h_scale_down_max ||
+ (src_h / tmp_h) > var->v_scale_down_max ||
+ (tmp_w / src_w) > var->h_scale_up_max ||
+ (tmp_h / src_h) > var->v_scale_up_max)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mtk_mdp_frame *frame;
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_rect new_r;
+ struct mtk_mdp_variant *variant = ctx->mdp_dev->variant;
+ int ret;
+ bool valid = false;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (s->target == V4L2_SEL_TGT_COMPOSE)
+ valid = true;
+ } else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (s->target == V4L2_SEL_TGT_CROP)
+ valid = true;
+ }
+ if (!valid) {
+ mtk_mdp_dbg(1, "[%d] invalid type:%d,%u", ctx->id, s->type,
+ s->target);
+ return -EINVAL;
+ }
+
+ new_r = s->r;
+ ret = mtk_mdp_try_crop(ctx, s->type, &new_r);
+ if (ret)
+ return ret;
+
+ if (mtk_mdp_is_target_crop(s->target))
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+
+ /* Check to see if scaling ratio is within supported range */
+ if (mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_DST_FMT | MTK_MDP_SRC_FMT)) {
+ if (V4L2_TYPE_IS_OUTPUT(s->type)) {
+ ret = mtk_mdp_check_scaler_ratio(variant, new_r.width,
+ new_r.height, ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->ctrls.rotate->val);
+ } else {
+ ret = mtk_mdp_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height, new_r.width,
+ new_r.height, ctx->ctrls.rotate->val);
+ }
+
+ if (ret) {
+ dev_info(&ctx->mdp_dev->pdev->dev,
+ "Out of scaler range");
+ return -EINVAL;
+ }
+ }
+
+ s->r = new_r;
+ frame->crop = new_r;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mtk_mdp_m2m_ioctl_ops = {
+ .vidioc_querycap = mtk_mdp_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = mtk_mdp_m2m_enum_fmt_mplane_vid_cap,
+ .vidioc_enum_fmt_vid_out_mplane = mtk_mdp_m2m_enum_fmt_mplane_vid_out,
+ .vidioc_g_fmt_vid_cap_mplane = mtk_mdp_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = mtk_mdp_m2m_g_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = mtk_mdp_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = mtk_mdp_m2m_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mtk_mdp_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = mtk_mdp_m2m_s_fmt_mplane,
+ .vidioc_reqbufs = mtk_mdp_m2m_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = mtk_mdp_m2m_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = mtk_mdp_m2m_g_selection,
+ .vidioc_s_selection = mtk_mdp_m2m_s_selection
+};
+
+static int mtk_mdp_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_mdp_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &mtk_mdp_m2m_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = &ctx->mdp_dev->pdev->dev;
+ src_vq->lock = &ctx->mdp_dev->lock;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &mtk_mdp_m2m_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = &ctx->mdp_dev->pdev->dev;
+ dst_vq->lock = &ctx->mdp_dev->lock;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int mtk_mdp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_mdp_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_variant *variant = mdp->variant;
+ u32 state = MTK_MDP_DST_FMT | MTK_MDP_SRC_FMT;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ if (mtk_mdp_ctx_state_is_set(ctx, state)) {
+ ret = mtk_mdp_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height,
+ ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->ctrls.rotate->val);
+
+ if (ret)
+ return -EINVAL;
+ }
+
+ ctx->rotation = ctrl->val;
+ break;
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->d_frame.alpha = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mtk_mdp_ctrl_ops = {
+ .s_ctrl = mtk_mdp_s_ctrl,
+};
+
+static int mtk_mdp_ctrls_create(struct mtk_mdp_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, MTK_MDP_MAX_CTRL_NUM);
+
+ ctx->ctrls.rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
+ ctx->ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops,
+ V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.global_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, 255, 1, 0);
+ ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Failed to create control handlers\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void mtk_mdp_set_default_params(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_frame *frame;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ frame->fmt = mtk_mdp_find_fmt_by_index(0,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ frame->width = mdp->variant->pix_min->org_w;
+ frame->height = mdp->variant->pix_min->org_h;
+ frame->payload[0] = frame->width * frame->height;
+ frame->payload[1] = frame->payload[0] / 2;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ frame->fmt = mtk_mdp_find_fmt_by_index(0,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ frame->width = mdp->variant->pix_min->target_rot_dis_w;
+ frame->height = mdp->variant->pix_min->target_rot_dis_h;
+ frame->payload[0] = frame->width * frame->height;
+ frame->payload[1] = frame->payload[0] / 2;
+
+}
+
+static int mtk_mdp_m2m_open(struct file *file)
+{
+ struct mtk_mdp_dev *mdp = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct mtk_mdp_ctx *ctx = NULL;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&mdp->lock)) {
+ ret = -ERESTARTSYS;
+ goto err_lock;
+ }
+
+ mutex_init(&ctx->slock);
+ ctx->id = mdp->id_counter++;
+ v4l2_fh_init(&ctx->fh, vfd);
+ file->private_data = &ctx->fh;
+ ret = mtk_mdp_ctrls_create(ctx);
+ if (ret)
+ goto error_ctrls;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ v4l2_fh_add(&ctx->fh);
+ INIT_LIST_HEAD(&ctx->list);
+
+ ctx->mdp_dev = mdp;
+ mtk_mdp_set_default_params(ctx);
+
+ INIT_WORK(&ctx->work, mtk_mdp_m2m_worker);
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(mdp->m2m_dev, ctx,
+ mtk_mdp_m2m_queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ dev_err(&mdp->pdev->dev, "Failed to initialize m2m context");
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_m2m_ctx;
+ }
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+ if (mdp->ctx_num++ == 0) {
+ ret = vpu_load_firmware(mdp->vpu_dev);
+ if (ret < 0) {
+ dev_err(&mdp->pdev->dev,
+ "vpu_load_firmware failed %d\n", ret);
+ goto err_load_vpu;
+ }
+
+ ret = mtk_mdp_vpu_register(mdp->pdev);
+ if (ret < 0) {
+ dev_err(&mdp->pdev->dev,
+ "mdp_vpu register failed %d\n", ret);
+ goto err_load_vpu;
+ }
+ }
+
+ list_add(&ctx->list, &mdp->ctx_list);
+ mutex_unlock(&mdp->lock);
+
+ mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
+
+ return 0;
+
+err_load_vpu:
+ mdp->ctx_num--;
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+error_m2m_ctx:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+error_ctrls:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mutex_unlock(&mdp->lock);
+err_lock:
+ kfree(ctx);
+
+ return ret;
+}
+
+static int mtk_mdp_m2m_release(struct file *file)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+
+ flush_workqueue(mdp->job_wq);
+ mutex_lock(&mdp->lock);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mtk_mdp_vpu_deinit(&ctx->vpu);
+ mdp->ctx_num--;
+ list_del_init(&ctx->list);
+
+ mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
+
+ mutex_unlock(&mdp->lock);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_mdp_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = mtk_mdp_m2m_open,
+ .release = mtk_mdp_m2m_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct v4l2_m2m_ops mtk_mdp_m2m_ops = {
+ .device_run = mtk_mdp_m2m_device_run,
+};
+
+int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int ret;
+
+ mdp->variant = &mtk_mdp_default_variant;
+ mdp->vdev = video_device_alloc();
+ if (!mdp->vdev) {
+ dev_err(dev, "failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_video_alloc;
+ }
+ mdp->vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ mdp->vdev->fops = &mtk_mdp_m2m_fops;
+ mdp->vdev->ioctl_ops = &mtk_mdp_m2m_ioctl_ops;
+ mdp->vdev->release = video_device_release;
+ mdp->vdev->lock = &mdp->lock;
+ mdp->vdev->vfl_dir = VFL_DIR_M2M;
+ mdp->vdev->v4l2_dev = &mdp->v4l2_dev;
+ snprintf(mdp->vdev->name, sizeof(mdp->vdev->name), "%s:m2m",
+ MTK_MDP_MODULE_NAME);
+ video_set_drvdata(mdp->vdev, mdp);
+
+ mdp->m2m_dev = v4l2_m2m_init(&mtk_mdp_m2m_ops);
+ if (IS_ERR(mdp->m2m_dev)) {
+ dev_err(dev, "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(mdp->m2m_dev);
+ goto err_m2m_init;
+ }
+
+ ret = video_register_device(mdp->vdev, VFL_TYPE_GRABBER, 2);
+ if (ret) {
+ dev_err(dev, "failed to register video device\n");
+ goto err_vdev_register;
+ }
+
+ v4l2_info(&mdp->v4l2_dev, "driver registered as /dev/video%d",
+ mdp->vdev->num);
+ return 0;
+
+err_vdev_register:
+ v4l2_m2m_release(mdp->m2m_dev);
+err_m2m_init:
+ video_device_release(mdp->vdev);
+err_video_alloc:
+
+ return ret;
+}
+
+void mtk_mdp_unregister_m2m_device(struct mtk_mdp_dev *mdp)
+{
+ video_unregister_device(mdp->vdev);
+ v4l2_m2m_release(mdp->m2m_dev);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h
new file mode 100644
index 000000000..45afd3655
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_M2M_H__
+#define __MTK_MDP_M2M_H__
+
+void mtk_mdp_ctx_state_lock_set(struct mtk_mdp_ctx *ctx, u32 state);
+int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp);
+void mtk_mdp_unregister_m2m_device(struct mtk_mdp_dev *mdp);
+
+#endif /* __MTK_MDP_M2M_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
new file mode 100644
index 000000000..86d57f380
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_regs.h"
+
+
+#define MDP_COLORFMT_PACK(VIDEO, PLANE, COPLANE, HF, VF, BITS, GROUP, SWAP, ID)\
+ (((VIDEO) << 27) | ((PLANE) << 24) | ((COPLANE) << 22) |\
+ ((HF) << 20) | ((VF) << 18) | ((BITS) << 8) | ((GROUP) << 6) |\
+ ((SWAP) << 5) | ((ID) << 0))
+
+enum MDP_COLOR_ENUM {
+ MDP_COLOR_UNKNOWN = 0,
+ MDP_COLOR_NV12 = MDP_COLORFMT_PACK(0, 2, 1, 1, 1, 8, 1, 0, 12),
+ MDP_COLOR_I420 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 0, 8),
+ MDP_COLOR_YV12 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 1, 8),
+ /* Mediatek proprietary format */
+ MDP_COLOR_420_MT21 = MDP_COLORFMT_PACK(5, 2, 1, 1, 1, 256, 1, 0, 12),
+};
+
+static int32_t mtk_mdp_map_color_format(int v4l2_format)
+{
+ switch (v4l2_format) {
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV12:
+ return MDP_COLOR_NV12;
+ case V4L2_PIX_FMT_MT21C:
+ return MDP_COLOR_420_MT21;
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YUV420:
+ return MDP_COLOR_I420;
+ case V4L2_PIX_FMT_YVU420:
+ return MDP_COLOR_YV12;
+ }
+
+ mtk_mdp_err("Unknown format 0x%x", v4l2_format);
+
+ return MDP_COLOR_UNKNOWN;
+}
+
+void mtk_mdp_hw_set_input_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr)
+{
+ struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(addr->addr); i++)
+ src_buf->addr_mva[i] = (uint64_t)addr->addr[i];
+}
+
+void mtk_mdp_hw_set_output_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr)
+{
+ struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(addr->addr); i++)
+ dst_buf->addr_mva[i] = (uint64_t)addr->addr[i];
+}
+
+void mtk_mdp_hw_set_in_size(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_frame *frame = &ctx->s_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->src_config;
+
+ /* Set input pixel offset */
+ config->crop_x = frame->crop.left;
+ config->crop_y = frame->crop.top;
+
+ /* Set input cropped size */
+ config->crop_w = frame->crop.width;
+ config->crop_h = frame->crop.height;
+
+ /* Set input original size */
+ config->x = 0;
+ config->y = 0;
+ config->w = frame->width;
+ config->h = frame->height;
+}
+
+void mtk_mdp_hw_set_in_image_format(struct mtk_mdp_ctx *ctx)
+{
+ unsigned int i;
+ struct mtk_mdp_frame *frame = &ctx->s_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->src_config;
+ struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer;
+
+ src_buf->plane_num = frame->fmt->num_comp;
+ config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
+ config->w_stride = 0; /* MDP will calculate it by color format. */
+ config->h_stride = 0; /* MDP will calculate it by color format. */
+
+ for (i = 0; i < src_buf->plane_num; i++)
+ src_buf->plane_size[i] = frame->payload[i];
+}
+
+void mtk_mdp_hw_set_out_size(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_frame *frame = &ctx->d_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->dst_config;
+
+ config->crop_x = frame->crop.left;
+ config->crop_y = frame->crop.top;
+ config->crop_w = frame->crop.width;
+ config->crop_h = frame->crop.height;
+ config->x = 0;
+ config->y = 0;
+ config->w = frame->width;
+ config->h = frame->height;
+}
+
+void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx)
+{
+ unsigned int i;
+ struct mtk_mdp_frame *frame = &ctx->d_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->dst_config;
+ struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer;
+
+ dst_buf->plane_num = frame->fmt->num_comp;
+ config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
+ config->w_stride = 0; /* MDP will calculate it by color format. */
+ config->h_stride = 0; /* MDP will calculate it by color format. */
+ for (i = 0; i < dst_buf->plane_num; i++)
+ dst_buf->plane_size[i] = frame->payload[i];
+}
+
+void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx)
+{
+ struct mdp_config_misc *misc = &ctx->vpu.vsi->misc;
+
+ misc->orientation = ctx->ctrls.rotate->val;
+ misc->hflip = ctx->ctrls.hflip->val;
+ misc->vflip = ctx->ctrls.vflip->val;
+}
+
+void mtk_mdp_hw_set_global_alpha(struct mtk_mdp_ctx *ctx)
+{
+ struct mdp_config_misc *misc = &ctx->vpu.vsi->misc;
+
+ misc->alpha = ctx->ctrls.global_alpha->val;
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
new file mode 100644
index 000000000..42bd057e7
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_REGS_H__
+#define __MTK_MDP_REGS_H__
+
+
+void mtk_mdp_hw_set_input_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr);
+void mtk_mdp_hw_set_output_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr);
+void mtk_mdp_hw_set_in_size(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_in_image_format(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_out_size(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_global_alpha(struct mtk_mdp_ctx *ctx);
+
+
+#endif /* __MTK_MDP_REGS_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
new file mode 100644
index 000000000..4893825aa
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_vpu.h"
+#include "mtk_vpu.h"
+
+
+static inline struct mtk_mdp_ctx *vpu_to_ctx(struct mtk_mdp_vpu *vpu)
+{
+ return container_of(vpu, struct mtk_mdp_ctx, vpu);
+}
+
+static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg)
+{
+ struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
+ (unsigned long)msg->ap_inst;
+
+ /* mapping VPU address to kernel virtual address */
+ vpu->vsi = (struct mdp_process_vsi *)
+ vpu_mapping_dm_addr(vpu->pdev, msg->vpu_inst_addr);
+ vpu->inst_addr = msg->vpu_inst_addr;
+}
+
+static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ unsigned int msg_id = *(unsigned int *)data;
+ struct mdp_ipi_comm_ack *msg = (struct mdp_ipi_comm_ack *)data;
+ struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
+ (unsigned long)msg->ap_inst;
+ struct mtk_mdp_ctx *ctx;
+
+ vpu->failure = msg->status;
+ if (!vpu->failure) {
+ switch (msg_id) {
+ case VPU_MDP_INIT_ACK:
+ mtk_mdp_vpu_handle_init_ack(data);
+ break;
+ case VPU_MDP_DEINIT_ACK:
+ case VPU_MDP_PROCESS_ACK:
+ break;
+ default:
+ ctx = vpu_to_ctx(vpu);
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "handle unknown ipi msg:0x%x\n",
+ msg_id);
+ break;
+ }
+ } else {
+ ctx = vpu_to_ctx(vpu);
+ mtk_mdp_dbg(0, "[%d]:msg 0x%x, failure:%d", ctx->id,
+ msg_id, vpu->failure);
+ }
+}
+
+int mtk_mdp_vpu_register(struct platform_device *pdev)
+{
+ struct mtk_mdp_dev *mdp = platform_get_drvdata(pdev);
+ int err;
+
+ err = vpu_ipi_register(mdp->vpu_dev, IPI_MDP,
+ mtk_mdp_vpu_ipi_handler, "mdp_vpu", NULL);
+ if (err)
+ dev_err(&mdp->pdev->dev,
+ "vpu_ipi_registration fail status=%d\n", err);
+
+ return err;
+}
+
+static int mtk_mdp_vpu_send_msg(void *msg, int len, struct mtk_mdp_vpu *vpu,
+ int id)
+{
+ struct mtk_mdp_ctx *ctx = vpu_to_ctx(vpu);
+ int err;
+
+ if (!vpu->pdev) {
+ mtk_mdp_dbg(1, "[%d]:vpu pdev is NULL", ctx->id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->mdp_dev->vpulock);
+ err = vpu_ipi_send(vpu->pdev, (enum ipi_id)id, msg, len);
+ if (err)
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "vpu_ipi_send fail status %d\n", err);
+ mutex_unlock(&ctx->mdp_dev->vpulock);
+
+ return err;
+}
+
+static int mtk_mdp_vpu_send_ap_ipi(struct mtk_mdp_vpu *vpu, uint32_t msg_id)
+{
+ int err;
+ struct mdp_ipi_comm msg;
+
+ msg.msg_id = msg_id;
+ msg.ipi_id = IPI_MDP;
+ msg.vpu_inst_addr = vpu->inst_addr;
+ msg.ap_inst = (unsigned long)vpu;
+ err = mtk_mdp_vpu_send_msg((void *)&msg, sizeof(msg), vpu, IPI_MDP);
+ if (!err && vpu->failure)
+ err = -EINVAL;
+
+ return err;
+}
+
+int mtk_mdp_vpu_init(struct mtk_mdp_vpu *vpu)
+{
+ int err;
+ struct mdp_ipi_init msg;
+ struct mtk_mdp_ctx *ctx = vpu_to_ctx(vpu);
+
+ vpu->pdev = ctx->mdp_dev->vpu_dev;
+
+ msg.msg_id = AP_MDP_INIT;
+ msg.ipi_id = IPI_MDP;
+ msg.ap_inst = (unsigned long)vpu;
+ err = mtk_mdp_vpu_send_msg((void *)&msg, sizeof(msg), vpu, IPI_MDP);
+ if (!err && vpu->failure)
+ err = -EINVAL;
+
+ return err;
+}
+
+int mtk_mdp_vpu_deinit(struct mtk_mdp_vpu *vpu)
+{
+ return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_DEINIT);
+}
+
+int mtk_mdp_vpu_process(struct mtk_mdp_vpu *vpu)
+{
+ return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_PROCESS);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
new file mode 100644
index 000000000..df4bddaa4
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_VPU_H__
+#define __MTK_MDP_VPU_H__
+
+#include "mtk_mdp_ipi.h"
+
+
+/**
+ * struct mtk_mdp_vpu - VPU instance for MDP
+ * @pdev : pointer to the VPU platform device
+ * @inst_addr : VPU MDP instance address
+ * @failure : VPU execution result status
+ * @vsi : VPU shared information
+ */
+struct mtk_mdp_vpu {
+ struct platform_device *pdev;
+ uint32_t inst_addr;
+ int32_t failure;
+ struct mdp_process_vsi *vsi;
+};
+
+int mtk_mdp_vpu_register(struct platform_device *pdev);
+int mtk_mdp_vpu_init(struct mtk_mdp_vpu *vpu);
+int mtk_mdp_vpu_deinit(struct mtk_mdp_vpu *vpu);
+int mtk_mdp_vpu_process(struct mtk_mdp_vpu *vpu);
+
+#endif /* __MTK_MDP_VPU_H__ */
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
new file mode 100644
index 000000000..37b94b555
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
+ mtk-vcodec-enc.o \
+ mtk-vcodec-common.o
+
+mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
+ vdec/vdec_vp8_if.o \
+ vdec/vdec_vp9_if.o \
+ mtk_vcodec_dec_drv.o \
+ vdec_drv_if.o \
+ vdec_vpu_if.o \
+ mtk_vcodec_dec.o \
+ mtk_vcodec_dec_pm.o \
+
+
+mtk-vcodec-enc-y := venc/venc_vp8_if.o \
+ venc/venc_h264_if.o \
+ mtk_vcodec_enc.o \
+ mtk_vcodec_enc_drv.o \
+ mtk_vcodec_enc_pm.o \
+ venc_drv_if.o \
+ venc_vpu_if.o \
+
+
+mtk-vcodec-common-y := mtk_vcodec_intr.o \
+ mtk_vcodec_util.o\
+
+ccflags-y += -I$(srctree)/drivers/media/platform/mtk-vpu
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
new file mode 100644
index 000000000..0c8a8b4c4
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -0,0 +1,1511 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "vdec_drv_if.h"
+#include "mtk_vcodec_dec_pm.h"
+
+#define OUT_FMT_IDX 0
+#define CAP_FMT_IDX 3
+
+#define MTK_VDEC_MIN_W 64U
+#define MTK_VDEC_MIN_H 64U
+#define DFT_CFG_WIDTH MTK_VDEC_MIN_W
+#define DFT_CFG_HEIGHT MTK_VDEC_MIN_H
+
+static struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MT21C,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+};
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+
+static struct mtk_video_fmt *mtk_vdec_find_format(struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &mtk_video_formats[k];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->q_data[MTK_Q_DATA_SRC];
+
+ return &ctx->q_data[MTK_Q_DATA_DST];
+}
+
+/*
+ * This function tries to clean all display buffers, the buffers will return
+ * in display order.
+ * Note the buffers returned from codec driver may still be in driver's
+ * reference list.
+ */
+static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_fb *disp_frame_buffer = NULL;
+ struct mtk_video_dec_buf *dstbuf;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ if (vdec_if_get_param(ctx,
+ GET_PARAM_DISP_FRAME_BUFFER,
+ &disp_frame_buffer)) {
+ mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER",
+ ctx->id);
+ return NULL;
+ }
+
+ if (disp_frame_buffer == NULL) {
+ mtk_v4l2_debug(3, "No display frame buffer");
+ return NULL;
+ }
+
+ dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0,
+ ctx->picinfo.y_bs_sz);
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
+ ctx->picinfo.c_bs_sz);
+
+ dstbuf->ready_to_display = true;
+
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to done_list %d",
+ ctx->id, disp_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index,
+ dstbuf->queued_in_vb2);
+
+ v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
+ ctx->decoded_frame_cnt++;
+ }
+ mutex_unlock(&ctx->lock);
+ return &dstbuf->vb.vb2_buf;
+}
+
+/*
+ * This function tries to clean all capture buffers that are not used as
+ * reference buffers by codec driver any more
+ * In this case, we need re-queue buffer to vb2 buffer if user space
+ * already returns this buffer to v4l2 or this buffer is just the output of
+ * previous sps/pps/resolution change decode, or do nothing if user
+ * space still owns this buffer
+ */
+static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_video_dec_buf *dstbuf;
+ struct vdec_fb *free_frame_buffer = NULL;
+
+ if (vdec_if_get_param(ctx,
+ GET_PARAM_FREE_FRAME_BUFFER,
+ &free_frame_buffer)) {
+ mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
+ return NULL;
+ }
+ if (free_frame_buffer == NULL) {
+ mtk_v4l2_debug(3, " No free frame buffer");
+ return NULL;
+ }
+
+ mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p",
+ ctx->id, free_frame_buffer);
+
+ dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ if ((dstbuf->queued_in_vb2) &&
+ (dstbuf->queued_in_v4l2) &&
+ (free_frame_buffer->status == FB_ST_FREE)) {
+ /*
+ * After decode sps/pps or non-display buffer, we don't
+ * need to return capture buffer to user space, but
+ * just re-queue this capture buffer to vb2 queue.
+ * This reduce overheads that dq/q unused capture
+ * buffer. In this case, queued_in_vb2 = true.
+ */
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to rdy_queue %d",
+ ctx->id, free_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index,
+ dstbuf->queued_in_vb2);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+ } else if ((dstbuf->queued_in_vb2 == false) &&
+ (dstbuf->queued_in_v4l2 == true)) {
+ /*
+ * If buffer in v4l2 driver but not in vb2 queue yet,
+ * and we get this buffer from free_list, it means
+ * that codec driver do not use this buffer as
+ * reference buffer anymore. We should q buffer to vb2
+ * queue, so later work thread could get this buffer
+ * for decode. In this case, queued_in_vb2 = false
+ * means this buffer is not from previous decode
+ * output.
+ */
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to rdy_queue",
+ ctx->id, free_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+ dstbuf->queued_in_vb2 = true;
+ } else {
+ /*
+ * Codec driver do not need to reference this capture
+ * buffer and this buffer is not in v4l2 driver.
+ * Then we don't need to do any thing, just add log when
+ * we need to debug buffer flow.
+ * When this buffer q from user space, it could
+ * directly q to vb2 buffer
+ */
+ mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
+ ctx->id, free_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index,
+ dstbuf->queued_in_vb2,
+ dstbuf->queued_in_v4l2);
+ }
+ dstbuf->used = false;
+ }
+ mutex_unlock(&ctx->lock);
+ return &dstbuf->vb.vb2_buf;
+}
+
+static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vb2_buffer *framptr;
+
+ do {
+ framptr = get_display_buffer(ctx);
+ } while (framptr);
+}
+
+static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vb2_buffer *framptr;
+
+ do {
+ framptr = get_free_buffer(ctx);
+ } while (framptr);
+}
+
+static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
+{
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ mtk_v4l2_debug(1, "[%d]", ctx->id);
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+}
+
+static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+ bool res_chg;
+ int ret = 0;
+
+ ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ if (ret)
+ mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
+
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+}
+
+static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
+{
+ unsigned int dpbsize = 0;
+ int ret;
+
+ if (vdec_if_get_param(ctx,
+ GET_PARAM_PIC_INFO,
+ &ctx->last_decoded_picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+ ctx->id);
+ return -EINVAL;
+ }
+
+ if (ctx->last_decoded_picinfo.pic_w == 0 ||
+ ctx->last_decoded_picinfo.pic_h == 0 ||
+ ctx->last_decoded_picinfo.buf_w == 0 ||
+ ctx->last_decoded_picinfo.buf_h == 0) {
+ mtk_v4l2_err("Cannot get correct pic info");
+ return -EINVAL;
+ }
+
+ if ((ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w) ||
+ (ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h))
+ return 0;
+
+ mtk_v4l2_debug(1,
+ "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
+ ctx->id, ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
+
+ ctx->dpb_size = dpbsize;
+
+ return ret;
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
+ decode_work);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct mtk_vcodec_mem buf;
+ struct vdec_fb *pfb;
+ bool res_chg = false;
+ int ret;
+ struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf == NULL) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
+ return;
+ }
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (dst_buf == NULL) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
+ return;
+ }
+
+ src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
+ src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+ dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
+ dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
+ pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;
+
+ pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
+ pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
+ pfb->status = 0;
+ mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
+
+ mtk_v4l2_debug(3,
+ "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
+ dst_buf->index, pfb,
+ pfb->base_y.va, &pfb->base_y.dma_addr,
+ &pfb->base_c.dma_addr, pfb->base_y.size);
+
+ if (src_buf_info->lastframe) {
+ mtk_v4l2_debug(1, "Got empty flush input buffer.");
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+
+ /* update dst buf status */
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = false;
+ mutex_unlock(&ctx->lock);
+
+ vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ clean_display_buffer(ctx);
+ vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
+ vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
+ dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
+ clean_free_buffer(ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ return;
+ }
+ buf.va = vb2_plane_vaddr(src_buf, 0);
+ buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ buf.size = (size_t)src_buf->planes[0].bytesused;
+ if (!buf.va) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
+ ctx->id, src_buf->index);
+ return;
+ }
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
+ dst_buf_info->vb.vb2_buf.timestamp
+ = src_buf_info->vb.vb2_buf.timestamp;
+ dst_buf_info->vb.timecode
+ = src_buf_info->vb.timecode;
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = true;
+ mutex_unlock(&ctx->lock);
+ src_buf_info->used = true;
+
+ ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
+
+ if (ret) {
+ mtk_v4l2_err(
+ " <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
+ ctx->id,
+ src_buf->index,
+ buf.size,
+ src_buf_info->vb.vb2_buf.timestamp,
+ dst_buf->index,
+ ret, res_chg);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ if (ret == -EIO) {
+ mutex_lock(&ctx->lock);
+ src_buf_info->error = true;
+ mutex_unlock(&ctx->lock);
+ }
+ v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
+ } else if (res_chg == false) {
+ /*
+ * we only return src buffer with VB2_BUF_STATE_DONE
+ * when decode success without resolution change
+ */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+ }
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+
+ if (!ret && res_chg) {
+ mtk_vdec_pic_info_update(ctx);
+ /*
+ * On encountering a resolution change in the stream.
+ * The driver must first process and decode all
+ * remaining buffers from before the resolution change
+ * point, so call flush decode here
+ */
+ mtk_vdec_flush_decoder(ctx);
+ /*
+ * After all buffers containing decoded frames from
+ * before the resolution change point ready to be
+ * dequeued on the CAPTURE queue, the driver sends a
+ * V4L2_EVENT_SOURCE_CHANGE event for source change
+ * type V4L2_EVENT_SRC_CH_RESOLUTION
+ */
+ mtk_vdec_queue_res_chg_event(ctx);
+ }
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+}
+
+static int vidioc_try_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *cmd)
+{
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ case V4L2_DEC_CMD_START:
+ if (cmd->flags != 0) {
+ mtk_v4l2_err("cmd->flags=%u", cmd->flags);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+static int vidioc_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *cmd)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *src_vq, *dst_vq;
+ int ret;
+
+ ret = vidioc_try_decoder_cmd(file, priv, cmd);
+ if (ret)
+ return ret;
+
+ mtk_v4l2_debug(1, "decoder cmd=%u", cmd->cmd);
+ dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!vb2_is_streaming(src_vq)) {
+ mtk_v4l2_debug(1, "Output stream is off. No need to flush.");
+ return 0;
+ }
+ if (!vb2_is_streaming(dst_vq)) {
+ mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
+ return 0;
+ }
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf->vb);
+ v4l2_m2m_try_schedule(ctx->m2m_ctx);
+ break;
+
+ case V4L2_DEC_CMD_START:
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx)
+{
+ mutex_unlock(&ctx->dev->dec_mutex);
+}
+
+void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx)
+{
+ mutex_lock(&ctx->dev->dec_mutex);
+}
+
+void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx)
+{
+ vdec_if_deinit(ctx);
+ ctx->state = MTK_STATE_FREE;
+}
+
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_q_data *q_data;
+
+ ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
+ INIT_WORK(&ctx->decode_work, mtk_vdec_worker);
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->visible_width = DFT_CFG_WIDTH;
+ q_data->visible_height = DFT_CFG_HEIGHT;
+ q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+ q_data->field = V4L2_FIELD_NONE;
+
+ q_data->sizeimage[0] = DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
+ q_data->bytesperline[0] = 0;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->visible_width = DFT_CFG_WIDTH;
+ q_data->visible_height = DFT_CFG_HEIGHT;
+ q_data->coded_width = DFT_CFG_WIDTH;
+ q_data->coded_height = DFT_CFG_HEIGHT;
+ q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+ q_data->field = V4L2_FIELD_NONE;
+
+ v4l_bound_align_image(&q_data->coded_width,
+ MTK_VDEC_MIN_W,
+ MTK_VDEC_MAX_W, 4,
+ &q_data->coded_height,
+ MTK_VDEC_MIN_H,
+ MTK_VDEC_MAX_H, 5, 6);
+
+ q_data->sizeimage[0] = q_data->coded_width * q_data->coded_height;
+ q_data->bytesperline[0] = q_data->coded_width;
+ q_data->sizeimage[1] = q_data->sizeimage[0] / 2;
+ q_data->bytesperline[1] = q_data->coded_width;
+}
+
+static int vidioc_vdec_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_vdec_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on DQBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_vdec_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, MTK_VCODEC_DEC_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, MTK_PLATFORM_STR, sizeof(cap->bus_info));
+ strlcpy(cap->card, MTK_PLATFORM_STR, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct mtk_video_fmt *fmt)
+{
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ int i;
+
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pix_fmt_mp->num_planes = 1;
+ pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ int tmp_w, tmp_h;
+
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+ MTK_VDEC_MIN_H,
+ MTK_VDEC_MAX_H);
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+ MTK_VDEC_MIN_W,
+ MTK_VDEC_MAX_W);
+
+ /*
+ * Find next closer width align 64, heign align 64, size align
+ * 64 rectangle
+ * Note: This only get default value, the real HW needed value
+ * only available when ctx in MTK_STATE_HEADER state
+ */
+ tmp_w = pix_fmt_mp->width;
+ tmp_h = pix_fmt_mp->height;
+ v4l_bound_align_image(&pix_fmt_mp->width,
+ MTK_VDEC_MIN_W,
+ MTK_VDEC_MAX_W, 6,
+ &pix_fmt_mp->height,
+ MTK_VDEC_MIN_H,
+ MTK_VDEC_MAX_H, 6, 9);
+
+ if (pix_fmt_mp->width < tmp_w &&
+ (pix_fmt_mp->width + 64) <= MTK_VDEC_MAX_W)
+ pix_fmt_mp->width += 64;
+ if (pix_fmt_mp->height < tmp_h &&
+ (pix_fmt_mp->height + 64) <= MTK_VDEC_MAX_H)
+ pix_fmt_mp->height += 64;
+
+ mtk_v4l2_debug(0,
+ "before resize width=%d, height=%d, after resize width=%d, height=%d, sizeimage=%d",
+ tmp_w, tmp_h, pix_fmt_mp->width,
+ pix_fmt_mp->height,
+ pix_fmt_mp->width * pix_fmt_mp->height);
+
+ pix_fmt_mp->num_planes = fmt->num_planes;
+ pix_fmt_mp->plane_fmt[0].sizeimage =
+ pix_fmt_mp->width * pix_fmt_mp->height;
+ pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
+
+ if (pix_fmt_mp->num_planes == 2) {
+ pix_fmt_mp->plane_fmt[1].sizeimage =
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 2;
+ pix_fmt_mp->plane_fmt[1].bytesperline =
+ pix_fmt_mp->width;
+ }
+ }
+
+ for (i = 0; i < pix_fmt_mp->num_planes; i++)
+ memset(&(pix_fmt_mp->plane_fmt[i].reserved[0]), 0x0,
+ sizeof(pix_fmt_mp->plane_fmt[0].reserved));
+
+ pix_fmt_mp->flags = 0;
+ memset(&pix_fmt_mp->reserved, 0x0, sizeof(pix_fmt_mp->reserved));
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+
+ fmt = mtk_vdec_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ struct mtk_video_fmt *fmt;
+
+ fmt = mtk_vdec_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ }
+
+ if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
+ mtk_v4l2_err("sizeimage of output format must be given");
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_vdec_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_q_data *q_data;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_DST];
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->picinfo.pic_w;
+ s->r.height = ctx->picinfo.pic_h;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->picinfo.buf_w;
+ s->r.height = ctx->picinfo.buf_h;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (vdec_if_get_param(ctx, GET_PARAM_CROP_INFO, &(s->r))) {
+ /* set to default value if header info not ready yet*/
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ctx->state < MTK_STATE_HEADER) {
+ /* set to default value if header info not ready yet*/
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int vidioc_vdec_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->picinfo.pic_w;
+ s->r.height = ctx->picinfo.pic_h;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_vdec_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct mtk_q_data *q_data;
+ int ret = 0;
+ struct mtk_video_fmt *fmt;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+ q_data = mtk_vdec_get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix_mp = &f->fmt.pix_mp;
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
+ mtk_v4l2_err("out_q_ctx buffers already requested");
+ ret = -EBUSY;
+ }
+
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
+ mtk_v4l2_err("cap_q_ctx buffers already requested");
+ ret = -EBUSY;
+ }
+
+ fmt = mtk_vdec_find_format(f);
+ if (fmt == NULL) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f->fmt.pix.pixelformat =
+ mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ f->fmt.pix.pixelformat =
+ mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ }
+ }
+
+ q_data->fmt = fmt;
+ vidioc_try_fmt(f, q_data->fmt);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ q_data->sizeimage[0] = pix_mp->plane_fmt[0].sizeimage;
+ q_data->coded_width = pix_mp->width;
+ q_data->coded_height = pix_mp->height;
+
+ ctx->colorspace = f->fmt.pix_mp.colorspace;
+ ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ ctx->quantization = f->fmt.pix_mp.quantization;
+ ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+
+ if (ctx->state == MTK_STATE_FREE) {
+ ret = vdec_if_init(ctx, q_data->fmt->fourcc);
+ if (ret) {
+ mtk_v4l2_err("[%d]: vdec_if_init() fail ret=%d",
+ ctx->id, ret);
+ return -EINVAL;
+ }
+ ctx->state = MTK_STATE_INIT;
+ }
+ }
+
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ int i = 0;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
+ if (fsize->pixel_format != mtk_vdec_framesizes[i].fourcc)
+ continue;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = mtk_vdec_framesizes[i].stepwise;
+ if (!(ctx->dev->dec_capability &
+ VCODEC_CAPABILITY_4K_DISABLED)) {
+ mtk_v4l2_debug(3, "4K is enabled");
+ fsize->stepwise.max_width =
+ VCODEC_DEC_4K_CODED_WIDTH;
+ fsize->stepwise.max_height =
+ VCODEC_DEC_4K_CODED_HEIGHT;
+ }
+ mtk_v4l2_debug(1, "%x, %d %d %d %d %d %d",
+ ctx->dev->dec_capability,
+ fsize->stepwise.min_width,
+ fsize->stepwise.max_width,
+ fsize->stepwise.step_width,
+ fsize->stepwise.min_height,
+ fsize->stepwise.max_height,
+ fsize->stepwise.step_height);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+{
+ struct mtk_video_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (output_queue && (mtk_video_formats[i].type != MTK_FMT_DEC))
+ continue;
+ if (!output_queue &&
+ (mtk_video_formats[i].type != MTK_FMT_FRAME))
+ continue;
+
+ if (j == f->index)
+ break;
+ ++j;
+ }
+
+ if (i == NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = &mtk_video_formats[i];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int vidioc_vdec_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false);
+}
+
+static int vidioc_vdec_enum_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true);
+}
+
+static int vidioc_vdec_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct vb2_queue *vq;
+ struct mtk_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq) {
+ mtk_v4l2_err("no vb2 queue for type=%d", f->type);
+ return -EINVAL;
+ }
+
+ q_data = mtk_vdec_get_q_data(ctx, f->type);
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quantization;
+ pix_mp->xfer_func = ctx->xfer_func;
+
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ (ctx->state >= MTK_STATE_HEADER)) {
+ /* Until STREAMOFF is called on the CAPTURE queue
+ * (acknowledging the event), the driver operates as if
+ * the resolution hasn't changed yet.
+ * So we just return picinfo yet, and update picinfo in
+ * stop_streaming hook function
+ */
+ q_data->sizeimage[0] = ctx->picinfo.y_bs_sz +
+ ctx->picinfo.y_len_sz;
+ q_data->sizeimage[1] = ctx->picinfo.c_bs_sz +
+ ctx->picinfo.c_len_sz;
+ q_data->bytesperline[0] = ctx->last_decoded_picinfo.buf_w;
+ q_data->bytesperline[1] = ctx->last_decoded_picinfo.buf_w;
+ q_data->coded_width = ctx->picinfo.buf_w;
+ q_data->coded_height = ctx->picinfo.buf_h;
+
+ /*
+ * Width and height are set to the dimensions
+ * of the movie, the buffer is bigger and
+ * further processing stages should crop to this
+ * rectangle.
+ */
+ pix_mp->width = q_data->coded_width;
+ pix_mp->height = q_data->coded_height;
+
+ /*
+ * Set pixelformat to the format in which mt vcodec
+ * outputs the decoded frame
+ */
+ pix_mp->num_planes = q_data->fmt->num_planes;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+ pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
+ pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
+
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /*
+ * This is run on OUTPUT
+ * The buffer contains compressed image
+ * so width and height have no meaning.
+ * Assign value here to pass v4l2-compliance test
+ */
+ pix_mp->width = q_data->visible_width;
+ pix_mp->height = q_data->visible_height;
+ pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->num_planes = q_data->fmt->num_planes;
+ } else {
+ pix_mp->width = q_data->coded_width;
+ pix_mp->height = q_data->coded_height;
+ pix_mp->num_planes = q_data->fmt->num_planes;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+ pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
+ pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
+
+ mtk_v4l2_debug(1, "[%d] type=%d state=%d Format information could not be read, not ready yet!",
+ ctx->id, f->type, ctx->state);
+ }
+
+ return 0;
+}
+
+static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_q_data *q_data;
+ unsigned int i;
+
+ q_data = mtk_vdec_get_q_data(ctx, vq->type);
+
+ if (q_data == NULL) {
+ mtk_v4l2_err("vq->type=%d err\n", vq->type);
+ return -EINVAL;
+ }
+
+ if (*nplanes) {
+ for (i = 0; i < *nplanes; i++) {
+ if (sizes[i] < q_data->sizeimage[i])
+ return -EINVAL;
+ }
+ } else {
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ *nplanes = 2;
+ else
+ *nplanes = 1;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->sizeimage[i];
+ }
+
+ mtk_v4l2_debug(1,
+ "[%d]\t type = %d, get %d plane(s), %d buffer(s) of size 0x%x 0x%x ",
+ ctx->id, vq->type, *nplanes, *nbuffers,
+ sizes[0], sizes[1]);
+
+ return 0;
+}
+
+static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_q_data *q_data;
+ int i;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d",
+ ctx->id, vb->vb2_queue->type, vb->index);
+
+ q_data = mtk_vdec_get_q_data(ctx, vb->vb2_queue->type);
+
+ for (i = 0; i < q_data->fmt->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
+ i, vb2_plane_size(vb, i),
+ q_data->sizeimage[i]);
+ }
+ }
+
+ return 0;
+}
+
+static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_buffer *src_buf;
+ struct mtk_vcodec_mem src_mem;
+ bool res_chg = false;
+ int ret = 0;
+ unsigned int dpbsize = 1;
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
+ struct mtk_video_dec_buf *buf = NULL;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p",
+ ctx->id, vb->vb2_queue->type,
+ vb->index, vb);
+ /*
+ * check if this buffer is ready to be used after decode
+ */
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+ mutex_lock(&ctx->lock);
+ if (buf->used == false) {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+ buf->queued_in_vb2 = true;
+ buf->queued_in_v4l2 = true;
+ buf->ready_to_display = false;
+ } else {
+ buf->queued_in_vb2 = false;
+ buf->queued_in_v4l2 = true;
+ buf->ready_to_display = false;
+ }
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+
+ if (ctx->state != MTK_STATE_INIT) {
+ mtk_v4l2_debug(3, "[%d] already init driver %d",
+ ctx->id, ctx->state);
+ return;
+ }
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf) {
+ mtk_v4l2_err("No src buffer");
+ return;
+ }
+ vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+ if (buf->lastframe) {
+ /* This shouldn't happen. Just in case. */
+ mtk_v4l2_err("Invalid flush buffer.");
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ return;
+ }
+
+ src_mem.va = vb2_plane_vaddr(src_buf, 0);
+ src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src_mem.size = (size_t)src_buf->planes[0].bytesused;
+ mtk_v4l2_debug(2,
+ "[%d] buf id=%d va=%p dma=%pad size=%zx",
+ ctx->id, src_buf->index,
+ src_mem.va, &src_mem.dma_addr,
+ src_mem.size);
+
+ ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
+ if (ret || !res_chg) {
+ /*
+ * fb == NULL menas to parse SPS/PPS header or
+ * resolution info in src_mem. Decode can fail
+ * if there is no SPS header or picture info
+ * in bs
+ */
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ if (ret == -EIO) {
+ mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.",
+ ctx->id);
+ ctx->state = MTK_STATE_ABORT;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_ERROR);
+ } else {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_DONE);
+ }
+ mtk_v4l2_debug(ret ? 0 : 1,
+ "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+ ctx->id, src_buf->index,
+ src_mem.size, ret, res_chg);
+ return;
+ }
+
+ if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+ ctx->id);
+ return;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ ctx->picinfo.y_bs_sz +
+ ctx->picinfo.y_len_sz;
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+ ctx->picinfo.buf_w;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1] =
+ ctx->picinfo.c_bs_sz +
+ ctx->picinfo.c_len_sz;
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[1] = ctx->picinfo.buf_w;
+ mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
+
+ ctx->dpb_size = dpbsize;
+ ctx->state = MTK_STATE_HEADER;
+ mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
+
+ mtk_vdec_queue_res_chg_event(ctx);
+}
+
+static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ struct mtk_video_dec_buf *buf;
+ bool buf_error;
+
+ vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+ mutex_lock(&ctx->lock);
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ buf->queued_in_v4l2 = false;
+ buf->queued_in_vb2 = false;
+ }
+ buf_error = buf->error;
+ mutex_unlock(&ctx->lock);
+
+ if (buf_error) {
+ mtk_v4l2_err("Unrecoverable error on buffer.");
+ ctx->state = MTK_STATE_ABORT;
+ }
+}
+
+static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
+ struct vb2_v4l2_buffer, vb2_buf);
+ struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
+ struct mtk_video_dec_buf, vb);
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ buf->used = false;
+ buf->ready_to_display = false;
+ buf->queued_in_v4l2 = false;
+ } else {
+ buf->lastframe = false;
+ }
+
+ return 0;
+}
+
+static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (ctx->state == MTK_STATE_FLUSH)
+ ctx->state = MTK_STATE_HEADER;
+
+ return 0;
+}
+
+static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
+{
+ struct vb2_buffer *src_buf = NULL, *dst_buf = NULL;
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+
+ mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
+ ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
+ struct vb2_v4l2_buffer *vb2_v4l2 =
+ to_vb2_v4l2_buffer(src_buf);
+ struct mtk_video_dec_buf *buf_info = container_of(
+ vb2_v4l2, struct mtk_video_dec_buf, vb);
+ if (!buf_info->lastframe)
+ v4l2_m2m_buf_done(vb2_v4l2,
+ VB2_BUF_STATE_ERROR);
+ }
+ return;
+ }
+
+ if (ctx->state >= MTK_STATE_HEADER) {
+
+ /* Until STREAMOFF is called on the CAPTURE queue
+ * (acknowledging the event), the driver operates
+ * as if the resolution hasn't changed yet, i.e.
+ * VIDIOC_G_FMT< etc. return previous resolution.
+ * So we update picinfo here
+ */
+ ctx->picinfo = ctx->last_decoded_picinfo;
+
+ mtk_v4l2_debug(2,
+ "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
+ ctx->id, ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
+
+ mtk_vdec_flush_decoder(ctx);
+ }
+ ctx->state = MTK_STATE_FLUSH;
+
+ while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
+ vb2_set_plane_payload(dst_buf, 0, 0);
+ vb2_set_plane_payload(dst_buf, 1, 0);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_ERROR);
+ }
+
+}
+
+static void m2mops_vdec_device_run(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ struct mtk_vcodec_dev *dev = ctx->dev;
+
+ queue_work(dev->decode_workqueue, &ctx->decode_work);
+}
+
+static int m2mops_vdec_job_ready(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+ if (ctx->state == MTK_STATE_ABORT)
+ return 0;
+
+ if ((ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w) ||
+ (ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h))
+ return 0;
+
+ if (ctx->state != MTK_STATE_HEADER)
+ return 0;
+
+ return 1;
+}
+
+static void m2mops_vdec_job_abort(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+
+ ctx->state = MTK_STATE_ABORT;
+}
+
+static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (ctx->state >= MTK_STATE_HEADER) {
+ ctrl->val = ctx->dpb_size;
+ } else {
+ mtk_v4l2_debug(0, "Seqinfo not ready");
+ ctrl->val = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
+ .g_volatile_ctrl = mtk_vdec_g_v_ctrl,
+};
+
+int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl,
+ &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+ 0, 32, 1, 1);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl,
+ &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ 0, V4L2_MPEG_VIDEO_VP9_PROFILE_0);
+
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("Adding control failed %d",
+ ctx->ctrl_hdl.error);
+ return ctx->ctrl_hdl.error;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+ return 0;
+}
+
+const struct v4l2_m2m_ops mtk_vdec_m2m_ops = {
+ .device_run = m2mops_vdec_device_run,
+ .job_ready = m2mops_vdec_job_ready,
+ .job_abort = m2mops_vdec_job_abort,
+};
+
+static const struct vb2_ops mtk_vdec_vb2_ops = {
+ .queue_setup = vb2ops_vdec_queue_setup,
+ .buf_prepare = vb2ops_vdec_buf_prepare,
+ .buf_queue = vb2ops_vdec_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_init = vb2ops_vdec_buf_init,
+ .buf_finish = vb2ops_vdec_buf_finish,
+ .start_streaming = vb2ops_vdec_start_streaming,
+ .stop_streaming = vb2ops_vdec_stop_streaming,
+};
+
+const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_qbuf = vidioc_vdec_qbuf,
+ .vidioc_dqbuf = vidioc_vdec_dqbuf,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
+
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_vdec_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_vdec_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_vdec_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_vdec_g_fmt,
+
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_vdec_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_vdec_enum_fmt_vid_out_mplane,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_querycap = vidioc_vdec_querycap,
+ .vidioc_subscribe_event = vidioc_vdec_subscribe_evt,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_selection = vidioc_vdec_g_selection,
+ .vidioc_s_selection = vidioc_vdec_s_selection,
+
+ .vidioc_decoder_cmd = vidioc_decoder_cmd,
+ .vidioc_try_decoder_cmd = vidioc_try_decoder_cmd,
+};
+
+int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ int ret = 0;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
+ src_vq->ops = &mtk_vdec_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = &ctx->dev->plat_dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret) {
+ mtk_v4l2_err("Failed to initialize videobuf2 queue(output)");
+ return ret;
+ }
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
+ dst_vq->ops = &mtk_vdec_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = &ctx->dev->plat_dev->dev;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ mtk_v4l2_err("Failed to initialize videobuf2 queue(capture)");
+ }
+
+ return ret;
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
new file mode 100644
index 000000000..dc4fc1df6
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_VCODEC_DEC_H_
+#define _MTK_VCODEC_DEC_H_
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+#define VCODEC_CAPABILITY_4K_DISABLED 0x10
+#define VCODEC_DEC_4K_CODED_WIDTH 4096U
+#define VCODEC_DEC_4K_CODED_HEIGHT 2304U
+#define MTK_VDEC_MAX_W 2048U
+#define MTK_VDEC_MAX_H 1088U
+
+#define MTK_VDEC_IRQ_STATUS_DEC_SUCCESS 0x10000
+
+/**
+ * struct vdec_fb - decoder frame buffer
+ * @base_y : Y plane memory info
+ * @base_c : C plane memory info
+ * @status : frame buffer status (vdec_fb_status)
+ */
+struct vdec_fb {
+ struct mtk_vcodec_mem base_y;
+ struct mtk_vcodec_mem base_c;
+ unsigned int status;
+};
+
+/**
+ * struct mtk_video_dec_buf - Private data related to each VB2 buffer.
+ * @b: VB2 buffer
+ * @list: link list
+ * @used: Capture buffer contain decoded frame data and keep in
+ * codec data structure
+ * @ready_to_display: Capture buffer not display yet
+ * @queued_in_vb2: Capture buffer is queue in vb2
+ * @queued_in_v4l2: Capture buffer is in v4l2 driver, but not in vb2
+ * queue yet
+ * @lastframe: Intput buffer is last buffer - EOS
+ * @error: An unrecoverable error occurs on this buffer.
+ * @frame_buffer: Decode status, and buffer information of Capture buffer
+ *
+ * Note : These status information help us track and debug buffer state
+ */
+struct mtk_video_dec_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+
+ bool used;
+ bool ready_to_display;
+ bool queued_in_vb2;
+ bool queued_in_v4l2;
+ bool lastframe;
+ bool error;
+ struct vdec_fb frame_buffer;
+};
+
+extern const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops;
+extern const struct v4l2_m2m_ops mtk_vdec_m2m_ops;
+
+
+/*
+ * mtk_vdec_lock/mtk_vdec_unlock are for ctx instance to
+ * get/release lock before/after access decoder hw.
+ * mtk_vdec_lock get decoder hw lock and set curr_ctx
+ * to ctx instance that get lock
+ */
+void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx);
+void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx);
+void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx);
+
+
+#endif /* _MTK_VCODEC_DEC_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
new file mode 100644
index 000000000..4334b7394
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+#define VDEC_HW_ACTIVE 0x10
+#define VDEC_IRQ_CFG 0x11
+#define VDEC_IRQ_CLR 0x10
+#define VDEC_IRQ_CFG_REG 0xa4
+
+module_param(mtk_v4l2_dbg_level, int, 0644);
+module_param(mtk_vcodec_dbg, bool, 0644);
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct mtk_vcodec_ctx *ctx)
+{
+ ctx->int_cond = 1;
+ wake_up_interruptible(&ctx->queue);
+}
+
+static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+ u32 cg_status = 0;
+ unsigned int dec_done_status = 0;
+ void __iomem *vdec_misc_addr = dev->reg_base[VDEC_MISC] +
+ VDEC_IRQ_CFG_REG;
+
+ ctx = mtk_vcodec_get_curr_ctx(dev);
+
+ /* check if HW active or not */
+ cg_status = readl(dev->reg_base[0]);
+ if ((cg_status & VDEC_HW_ACTIVE) != 0) {
+ mtk_v4l2_err("DEC ISR, VDEC active is not 0x0 (0x%08x)",
+ cg_status);
+ return IRQ_HANDLED;
+ }
+
+ dec_done_status = readl(vdec_misc_addr);
+ ctx->irq_status = dec_done_status;
+ if ((dec_done_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS) !=
+ MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
+ return IRQ_HANDLED;
+
+ /* clear interrupt */
+ writel((readl(vdec_misc_addr) | VDEC_IRQ_CFG),
+ dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
+ writel((readl(vdec_misc_addr) & ~VDEC_IRQ_CLR),
+ dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
+
+ wake_up_ctx(ctx);
+
+ mtk_v4l2_debug(3,
+ "mtk_vcodec_dec_irq_handler :wake up ctx %d, dec_done_status=%x",
+ ctx->id, dec_done_status);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_vcodec_dec_reset_handler(void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+
+ mtk_v4l2_err("Watchdog timeout!!");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ERROR",
+ ctx->id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static int fops_vcodec_open(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = NULL;
+ struct mtk_video_dec_buf *mtk_buf = NULL;
+ int ret = 0;
+ struct vb2_queue *src_vq;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ mtk_buf = kzalloc(sizeof(*mtk_buf), GFP_KERNEL);
+ if (!mtk_buf) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dev->dev_mutex);
+ ctx->empty_flush_buf = mtk_buf;
+ ctx->id = dev->id_counter++;
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ INIT_LIST_HEAD(&ctx->list);
+ ctx->dev = dev;
+ init_waitqueue_head(&ctx->queue);
+ mutex_init(&ctx->lock);
+
+ ctx->type = MTK_INST_DECODER;
+ ret = mtk_vcodec_dec_ctrls_setup(ctx);
+ if (ret) {
+ mtk_v4l2_err("Failed to setup mt vcodec controls");
+ goto err_ctrls_setup;
+ }
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_dec, ctx,
+ &mtk_vcodec_dec_queue_init);
+ if (IS_ERR((__force void *)ctx->m2m_ctx)) {
+ ret = PTR_ERR((__force void *)ctx->m2m_ctx);
+ mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
+ ret);
+ goto err_m2m_ctx_init;
+ }
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ ctx->empty_flush_buf->vb.vb2_buf.vb2_queue = src_vq;
+ ctx->empty_flush_buf->lastframe = true;
+ mtk_vcodec_dec_set_default_params(ctx);
+
+ if (v4l2_fh_is_singular(&ctx->fh)) {
+ mtk_vcodec_dec_pw_on(&dev->pm);
+ /*
+ * vpu_load_firmware checks if it was loaded already and
+ * does nothing in that case
+ */
+ ret = vpu_load_firmware(dev->vpu_plat_dev);
+ if (ret < 0) {
+ /*
+ * Return 0 if downloading firmware successfully,
+ * otherwise it is failed
+ */
+ mtk_v4l2_err("vpu_load_firmware failed!");
+ goto err_load_fw;
+ }
+
+ dev->dec_capability =
+ vpu_get_vdec_hw_capa(dev->vpu_plat_dev);
+ mtk_v4l2_debug(0, "decoder capability %x", dev->dec_capability);
+ }
+
+ list_add(&ctx->list, &dev->ctx_list);
+
+ mutex_unlock(&dev->dev_mutex);
+ mtk_v4l2_debug(0, "%s decoder [%d]", dev_name(&dev->plat_dev->dev),
+ ctx->id);
+ return ret;
+
+ /* Deinit when failure occurred */
+err_load_fw:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_m2m_ctx_init:
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+err_ctrls_setup:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx->empty_flush_buf);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int fops_vcodec_release(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(file->private_data);
+
+ mtk_v4l2_debug(0, "[%d] decoder", ctx->id);
+ mutex_lock(&dev->dev_mutex);
+
+ /*
+ * Call v4l2_m2m_ctx_release before mtk_vcodec_dec_release. First, it
+ * makes sure the worker thread is not running after vdec_if_deinit.
+ * Second, the decoder will be flushed and all the buffers will be
+ * returned in stop_streaming.
+ */
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ mtk_vcodec_dec_release(ctx);
+
+ if (v4l2_fh_is_singular(&ctx->fh))
+ mtk_vcodec_dec_pw_off(&dev->pm);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+ list_del_init(&ctx->list);
+ kfree(ctx->empty_flush_buf);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_vcodec_fops = {
+ .owner = THIS_MODULE,
+ .open = fops_vcodec_open,
+ .release = fops_vcodec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int mtk_vcodec_probe(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev;
+ struct video_device *vfd_dec;
+ struct resource *res;
+ int i, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dev->ctx_list);
+ dev->plat_dev = pdev;
+
+ dev->vpu_plat_dev = vpu_get_plat_device(dev->plat_dev);
+ if (dev->vpu_plat_dev == NULL) {
+ mtk_v4l2_err("[VPU] vpu device in not ready");
+ return -EPROBE_DEFER;
+ }
+
+ vpu_wdt_reg_handler(dev->vpu_plat_dev, mtk_vcodec_dec_reset_handler,
+ dev, VPU_RST_DEC);
+
+ ret = mtk_vcodec_init_dec_pm(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get mt vcodec clock source");
+ return ret;
+ }
+
+ for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "get memory resource failed.");
+ ret = -ENXIO;
+ goto err_res;
+ }
+ dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR((__force void *)dev->reg_base[i])) {
+ ret = PTR_ERR((__force void *)dev->reg_base[i]);
+ goto err_res;
+ }
+ mtk_v4l2_debug(2, "reg[%d] base=%p", i, dev->reg_base[i]);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get irq resource");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ dev->dec_irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, dev->dec_irq,
+ mtk_vcodec_dec_irq_handler, 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install dev->dec_irq %d (%d)",
+ dev->dec_irq,
+ ret);
+ goto err_res;
+ }
+
+ disable_irq(dev->dec_irq);
+ mutex_init(&dev->dec_mutex);
+ mutex_init(&dev->dev_mutex);
+ spin_lock_init(&dev->irqlock);
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+ "[/MTK_V4L2_VDEC]");
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ mtk_v4l2_err("v4l2_device_register err=%d", ret);
+ goto err_res;
+ }
+
+ init_waitqueue_head(&dev->queue);
+
+ vfd_dec = video_device_alloc();
+ if (!vfd_dec) {
+ mtk_v4l2_err("Failed to allocate video device");
+ ret = -ENOMEM;
+ goto err_dec_alloc;
+ }
+ vfd_dec->fops = &mtk_vcodec_fops;
+ vfd_dec->ioctl_ops = &mtk_vdec_ioctl_ops;
+ vfd_dec->release = video_device_release;
+ vfd_dec->lock = &dev->dev_mutex;
+ vfd_dec->v4l2_dev = &dev->v4l2_dev;
+ vfd_dec->vfl_dir = VFL_DIR_M2M;
+ vfd_dec->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE |
+ V4L2_CAP_STREAMING;
+
+ snprintf(vfd_dec->name, sizeof(vfd_dec->name), "%s",
+ MTK_VCODEC_DEC_NAME);
+ video_set_drvdata(vfd_dec, dev);
+ dev->vfd_dec = vfd_dec;
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev_dec = v4l2_m2m_init(&mtk_vdec_m2m_ops);
+ if (IS_ERR((__force void *)dev->m2m_dev_dec)) {
+ mtk_v4l2_err("Failed to init mem2mem dec device");
+ ret = PTR_ERR((__force void *)dev->m2m_dev_dec);
+ goto err_dec_mem_init;
+ }
+
+ dev->decode_workqueue =
+ alloc_ordered_workqueue(MTK_VCODEC_DEC_NAME,
+ WQ_MEM_RECLAIM | WQ_FREEZABLE);
+ if (!dev->decode_workqueue) {
+ mtk_v4l2_err("Failed to create decode workqueue");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ ret = video_register_device(vfd_dec, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ mtk_v4l2_err("Failed to register video device");
+ goto err_dec_reg;
+ }
+
+ mtk_v4l2_debug(0, "decoder registered as /dev/video%d",
+ vfd_dec->num);
+
+ return 0;
+
+err_dec_reg:
+ destroy_workqueue(dev->decode_workqueue);
+err_event_workq:
+ v4l2_m2m_release(dev->m2m_dev_dec);
+err_dec_mem_init:
+ video_unregister_device(vfd_dec);
+err_dec_alloc:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_res:
+ mtk_vcodec_release_dec_pm(dev);
+ return ret;
+}
+
+static const struct of_device_id mtk_vcodec_match[] = {
+ {.compatible = "mediatek,mt8173-vcodec-dec",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_vcodec_match);
+
+static int mtk_vcodec_dec_remove(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
+
+ flush_workqueue(dev->decode_workqueue);
+ destroy_workqueue(dev->decode_workqueue);
+ if (dev->m2m_dev_dec)
+ v4l2_m2m_release(dev->m2m_dev_dec);
+
+ if (dev->vfd_dec)
+ video_unregister_device(dev->vfd_dec);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ mtk_vcodec_release_dec_pm(dev);
+ return 0;
+}
+
+static struct platform_driver mtk_vcodec_dec_driver = {
+ .probe = mtk_vcodec_probe,
+ .remove = mtk_vcodec_dec_remove,
+ .driver = {
+ .name = MTK_VCODEC_DEC_NAME,
+ .of_match_table = mtk_vcodec_match,
+ },
+};
+
+module_platform_driver(mtk_vcodec_dec_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek video codec V4L2 decoder driver");
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
new file mode 100644
index 000000000..3f64119e8
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+ struct mtk_vcodec_pm *pm;
+ int ret = 0;
+
+ pdev = mtkdev->plat_dev;
+ pm = &mtkdev->pm;
+ pm->mtkdev = mtkdev;
+ node = of_parse_phandle(pdev->dev.of_node, "mediatek,larb", 0);
+ if (!node) {
+ mtk_v4l2_err("of_parse_phandle mediatek,larb fail!");
+ return -1;
+ }
+
+ pdev = of_find_device_by_node(node);
+ if (WARN_ON(!pdev)) {
+ of_node_put(node);
+ return -1;
+ }
+ pm->larbvdec = &pdev->dev;
+ pdev = mtkdev->plat_dev;
+ pm->dev = &pdev->dev;
+
+ pm->vcodecpll = devm_clk_get(&pdev->dev, "vcodecpll");
+ if (IS_ERR(pm->vcodecpll)) {
+ mtk_v4l2_err("devm_clk_get vcodecpll fail");
+ ret = PTR_ERR(pm->vcodecpll);
+ }
+
+ pm->univpll_d2 = devm_clk_get(&pdev->dev, "univpll_d2");
+ if (IS_ERR(pm->univpll_d2)) {
+ mtk_v4l2_err("devm_clk_get univpll_d2 fail");
+ ret = PTR_ERR(pm->univpll_d2);
+ }
+
+ pm->clk_cci400_sel = devm_clk_get(&pdev->dev, "clk_cci400_sel");
+ if (IS_ERR(pm->clk_cci400_sel)) {
+ mtk_v4l2_err("devm_clk_get clk_cci400_sel fail");
+ ret = PTR_ERR(pm->clk_cci400_sel);
+ }
+
+ pm->vdec_sel = devm_clk_get(&pdev->dev, "vdec_sel");
+ if (IS_ERR(pm->vdec_sel)) {
+ mtk_v4l2_err("devm_clk_get vdec_sel fail");
+ ret = PTR_ERR(pm->vdec_sel);
+ }
+
+ pm->vdecpll = devm_clk_get(&pdev->dev, "vdecpll");
+ if (IS_ERR(pm->vdecpll)) {
+ mtk_v4l2_err("devm_clk_get vdecpll fail");
+ ret = PTR_ERR(pm->vdecpll);
+ }
+
+ pm->vencpll = devm_clk_get(&pdev->dev, "vencpll");
+ if (IS_ERR(pm->vencpll)) {
+ mtk_v4l2_err("devm_clk_get vencpll fail");
+ ret = PTR_ERR(pm->vencpll);
+ }
+
+ pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
+ if (IS_ERR(pm->venc_lt_sel)) {
+ mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
+ ret = PTR_ERR(pm->venc_lt_sel);
+ }
+
+ pm->vdec_bus_clk_src = devm_clk_get(&pdev->dev, "vdec_bus_clk_src");
+ if (IS_ERR(pm->vdec_bus_clk_src)) {
+ mtk_v4l2_err("devm_clk_get vdec_bus_clk_src");
+ ret = PTR_ERR(pm->vdec_bus_clk_src);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ return ret;
+}
+
+void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
+{
+ pm_runtime_disable(dev->pm.dev);
+ put_device(dev->pm.larbvdec);
+}
+
+void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(pm->dev);
+ if (ret)
+ mtk_v4l2_err("pm_runtime_get_sync fail %d", ret);
+}
+
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = pm_runtime_put_sync(pm->dev);
+ if (ret)
+ mtk_v4l2_err("pm_runtime_put_sync fail %d", ret);
+}
+
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = clk_set_rate(pm->vcodecpll, 1482 * 1000000);
+ if (ret)
+ mtk_v4l2_err("clk_set_rate vcodecpll fail %d", ret);
+
+ ret = clk_set_rate(pm->vencpll, 800 * 1000000);
+ if (ret)
+ mtk_v4l2_err("clk_set_rate vencpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vcodecpll);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vcodecpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vencpll);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vencpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vdec_bus_clk_src);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vdec_bus_clk_src fail %d",
+ ret);
+
+ ret = clk_prepare_enable(pm->venc_lt_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable venc_lt_sel fail %d", ret);
+
+ ret = clk_set_parent(pm->venc_lt_sel, pm->vdec_bus_clk_src);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent venc_lt_sel vdec_bus_clk_src fail %d",
+ ret);
+
+ ret = clk_prepare_enable(pm->univpll_d2);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable univpll_d2 fail %d", ret);
+
+ ret = clk_prepare_enable(pm->clk_cci400_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable clk_cci400_sel fail %d", ret);
+
+ ret = clk_set_parent(pm->clk_cci400_sel, pm->univpll_d2);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent clk_cci400_sel univpll_d2 fail %d",
+ ret);
+
+ ret = clk_prepare_enable(pm->vdecpll);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vdecpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vdec_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vdec_sel fail %d", ret);
+
+ ret = clk_set_parent(pm->vdec_sel, pm->vdecpll);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent vdec_sel vdecpll fail %d", ret);
+
+ ret = mtk_smi_larb_get(pm->larbvdec);
+ if (ret)
+ mtk_v4l2_err("mtk_smi_larb_get larbvdec fail %d", ret);
+
+}
+
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm)
+{
+ mtk_smi_larb_put(pm->larbvdec);
+ clk_disable_unprepare(pm->vdec_sel);
+ clk_disable_unprepare(pm->vdecpll);
+ clk_disable_unprepare(pm->univpll_d2);
+ clk_disable_unprepare(pm->clk_cci400_sel);
+ clk_disable_unprepare(pm->venc_lt_sel);
+ clk_disable_unprepare(pm->vdec_bus_clk_src);
+ clk_disable_unprepare(pm->vencpll);
+ clk_disable_unprepare(pm->vcodecpll);
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
new file mode 100644
index 000000000..86a782535
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_VCODEC_DEC_PM_H_
+#define _MTK_VCODEC_DEC_PM_H_
+
+#include "mtk_vcodec_drv.h"
+
+int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *dev);
+void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev);
+
+void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm);
+
+#endif /* _MTK_VCODEC_DEC_PM_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
new file mode 100644
index 000000000..3cffb381a
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -0,0 +1,388 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_DRV_H_
+#define _MTK_VCODEC_DRV_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include "mtk_vcodec_util.h"
+
+#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
+#define MTK_VCODEC_DEC_NAME "mtk-vcodec-dec"
+#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
+#define MTK_PLATFORM_STR "platform:mt8173"
+
+#define MTK_VCODEC_MAX_PLANES 3
+#define MTK_V4L2_BENCHMARK 0
+#define WAIT_INTR_TIMEOUT_MS 1000
+
+/**
+ * enum mtk_hw_reg_idx - MTK hw register base index
+ */
+enum mtk_hw_reg_idx {
+ VDEC_SYS,
+ VDEC_MISC,
+ VDEC_LD,
+ VDEC_TOP,
+ VDEC_CM,
+ VDEC_AD,
+ VDEC_AV,
+ VDEC_PP,
+ VDEC_HWD,
+ VDEC_HWQ,
+ VDEC_HWB,
+ VDEC_HWG,
+ NUM_MAX_VDEC_REG_BASE,
+ /* h264 encoder */
+ VENC_SYS = NUM_MAX_VDEC_REG_BASE,
+ /* vp8 encoder */
+ VENC_LT_SYS,
+ NUM_MAX_VCODEC_REG_BASE
+};
+
+/**
+ * enum mtk_instance_type - The type of an MTK Vcodec instance.
+ */
+enum mtk_instance_type {
+ MTK_INST_DECODER = 0,
+ MTK_INST_ENCODER = 1,
+};
+
+/**
+ * enum mtk_instance_state - The state of an MTK Vcodec instance.
+ * @MTK_STATE_FREE - default state when instance is created
+ * @MTK_STATE_INIT - vcodec instance is initialized
+ * @MTK_STATE_HEADER - vdec had sps/pps header parsed or venc
+ * had sps/pps header encoded
+ * @MTK_STATE_FLUSH - vdec is flushing. Only used by decoder
+ * @MTK_STATE_ABORT - vcodec should be aborted
+ */
+enum mtk_instance_state {
+ MTK_STATE_FREE = 0,
+ MTK_STATE_INIT = 1,
+ MTK_STATE_HEADER = 2,
+ MTK_STATE_FLUSH = 3,
+ MTK_STATE_ABORT = 4,
+};
+
+/**
+ * struct mtk_encode_param - General encoding parameters type
+ */
+enum mtk_encode_param {
+ MTK_ENCODE_PARAM_NONE = 0,
+ MTK_ENCODE_PARAM_BITRATE = (1 << 0),
+ MTK_ENCODE_PARAM_FRAMERATE = (1 << 1),
+ MTK_ENCODE_PARAM_INTRA_PERIOD = (1 << 2),
+ MTK_ENCODE_PARAM_FORCE_INTRA = (1 << 3),
+ MTK_ENCODE_PARAM_GOP_SIZE = (1 << 4),
+};
+
+enum mtk_fmt_type {
+ MTK_FMT_DEC = 0,
+ MTK_FMT_ENC = 1,
+ MTK_FMT_FRAME = 2,
+};
+
+/**
+ * struct mtk_video_fmt - Structure used to store information about pixelformats
+ */
+struct mtk_video_fmt {
+ u32 fourcc;
+ enum mtk_fmt_type type;
+ u32 num_planes;
+};
+
+/**
+ * struct mtk_codec_framesizes - Structure used to store information about
+ * framesizes
+ */
+struct mtk_codec_framesizes {
+ u32 fourcc;
+ struct v4l2_frmsize_stepwise stepwise;
+};
+
+/**
+ * struct mtk_q_type - Type of queue
+ */
+enum mtk_q_type {
+ MTK_Q_DATA_SRC = 0,
+ MTK_Q_DATA_DST = 1,
+};
+
+/**
+ * struct mtk_q_data - Structure used to store information about queue
+ */
+struct mtk_q_data {
+ unsigned int visible_width;
+ unsigned int visible_height;
+ unsigned int coded_width;
+ unsigned int coded_height;
+ enum v4l2_field field;
+ unsigned int bytesperline[MTK_VCODEC_MAX_PLANES];
+ unsigned int sizeimage[MTK_VCODEC_MAX_PLANES];
+ struct mtk_video_fmt *fmt;
+};
+
+/**
+ * struct mtk_enc_params - General encoding parameters
+ * @bitrate: target bitrate in bits per second
+ * @num_b_frame: number of b frames between p-frame
+ * @rc_frame: frame based rate control
+ * @rc_mb: macroblock based rate control
+ * @seq_hdr_mode: H.264 sequence header is encoded separately or joined
+ * with the first frame
+ * @intra_period: I frame period
+ * @gop_size: group of picture size, it's used as the intra frame period
+ * @framerate_num: frame rate numerator. ex: framerate_num=30 and
+ * framerate_denom=1 menas FPS is 30
+ * @framerate_denom: frame rate denominator. ex: framerate_num=30 and
+ * framerate_denom=1 menas FPS is 30
+ * @h264_max_qp: Max value for H.264 quantization parameter
+ * @h264_profile: V4L2 defined H.264 profile
+ * @h264_level: V4L2 defined H.264 level
+ * @force_intra: force/insert intra frame
+ */
+struct mtk_enc_params {
+ unsigned int bitrate;
+ unsigned int num_b_frame;
+ unsigned int rc_frame;
+ unsigned int rc_mb;
+ unsigned int seq_hdr_mode;
+ unsigned int intra_period;
+ unsigned int gop_size;
+ unsigned int framerate_num;
+ unsigned int framerate_denom;
+ unsigned int h264_max_qp;
+ unsigned int h264_profile;
+ unsigned int h264_level;
+ unsigned int force_intra;
+};
+
+/**
+ * struct mtk_vcodec_pm - Power management data structure
+ */
+struct mtk_vcodec_pm {
+ struct clk *vdec_bus_clk_src;
+ struct clk *vencpll;
+
+ struct clk *vcodecpll;
+ struct clk *univpll_d2;
+ struct clk *clk_cci400_sel;
+ struct clk *vdecpll;
+ struct clk *vdec_sel;
+ struct clk *vencpll_d2;
+ struct clk *venc_sel;
+ struct clk *univpll1_d2;
+ struct clk *venc_lt_sel;
+ struct device *larbvdec;
+ struct device *larbvenc;
+ struct device *larbvenclt;
+ struct device *dev;
+ struct mtk_vcodec_dev *mtkdev;
+};
+
+/**
+ * struct vdec_pic_info - picture size information
+ * @pic_w: picture width
+ * @pic_h: picture height
+ * @buf_w: picture buffer width (64 aligned up from pic_w)
+ * @buf_h: picture buffer heiht (64 aligned up from pic_h)
+ * @y_bs_sz: Y bitstream size
+ * @c_bs_sz: CbCr bitstream size
+ * @y_len_sz: additional size required to store decompress information for y
+ * plane
+ * @c_len_sz: additional size required to store decompress information for cbcr
+ * plane
+ * E.g. suppose picture size is 176x144,
+ * buffer size will be aligned to 176x160.
+ */
+struct vdec_pic_info {
+ unsigned int pic_w;
+ unsigned int pic_h;
+ unsigned int buf_w;
+ unsigned int buf_h;
+ unsigned int y_bs_sz;
+ unsigned int c_bs_sz;
+ unsigned int y_len_sz;
+ unsigned int c_len_sz;
+};
+
+/**
+ * struct mtk_vcodec_ctx - Context (instance) private data.
+ *
+ * @type: type of the instance - decoder or encoder
+ * @dev: pointer to the mtk_vcodec_dev of the device
+ * @list: link to ctx_list of mtk_vcodec_dev
+ * @fh: struct v4l2_fh
+ * @m2m_ctx: pointer to the v4l2_m2m_ctx of the context
+ * @q_data: store information of input and output queue
+ * of the context
+ * @id: index of the context that this structure describes
+ * @state: state of the context
+ * @param_change: indicate encode parameter type
+ * @enc_params: encoding parameters
+ * @dec_if: hooked decoder driver interface
+ * @enc_if: hoooked encoder driver interface
+ * @drv_handle: driver handle for specific decode/encode instance
+ *
+ * @picinfo: store picture info after header parsing
+ * @dpb_size: store dpb count after header parsing
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of the last interrupt
+ * @queue: waitqueue that can be used to wait for this context to
+ * finish
+ * @irq_status: irq status
+ *
+ * @ctrl_hdl: handler for v4l2 framework
+ * @decode_work: worker for the decoding
+ * @encode_work: worker for the encoding
+ * @last_decoded_picinfo: pic information get from latest decode
+ * @empty_flush_buf: a fake size-0 capture buffer that indicates flush
+ *
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @lock: protect variables accessed by V4L2 threads and worker thread such as
+ * mtk_video_dec_buf.
+ */
+struct mtk_vcodec_ctx {
+ enum mtk_instance_type type;
+ struct mtk_vcodec_dev *dev;
+ struct list_head list;
+
+ struct v4l2_fh fh;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct mtk_q_data q_data[2];
+ int id;
+ enum mtk_instance_state state;
+ enum mtk_encode_param param_change;
+ struct mtk_enc_params enc_params;
+
+ const struct vdec_common_if *dec_if;
+ const struct venc_common_if *enc_if;
+ unsigned long drv_handle;
+
+ struct vdec_pic_info picinfo;
+ int dpb_size;
+
+ int int_cond;
+ int int_type;
+ wait_queue_head_t queue;
+ unsigned int irq_status;
+
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct work_struct decode_work;
+ struct work_struct encode_work;
+ struct vdec_pic_info last_decoded_picinfo;
+ struct mtk_video_dec_buf *empty_flush_buf;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_xfer_func xfer_func;
+
+ int decoded_frame_cnt;
+ struct mutex lock;
+
+};
+
+/**
+ * struct mtk_vcodec_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @vfd_dec: Video device for decoder
+ * @vfd_enc: Video device for encoder.
+ *
+ * @m2m_dev_dec: m2m device for decoder
+ * @m2m_dev_enc: m2m device for encoder.
+ * @plat_dev: platform device
+ * @vpu_plat_dev: mtk vpu platform device
+ * @ctx_list: list of struct mtk_vcodec_ctx
+ * @irqlock: protect data access by irq handler and work thread
+ * @curr_ctx: The context that is waiting for codec hardware
+ *
+ * @reg_base: Mapped address of MTK Vcodec registers.
+ *
+ * @id_counter: used to identify current opened instance
+ *
+ * @encode_workqueue: encode work queue
+ *
+ * @int_cond: used to identify interrupt condition happen
+ * @int_type: used to identify what kind of interrupt condition happen
+ * @dev_mutex: video_device lock
+ * @queue: waitqueue for waiting for completion of device commands
+ *
+ * @dec_irq: decoder irq resource
+ * @enc_irq: h264 encoder irq resource
+ * @enc_lt_irq: vp8 encoder irq resource
+ *
+ * @dec_mutex: decoder hardware lock
+ * @enc_mutex: encoder hardware lock.
+ *
+ * @pm: power management control
+ * @dec_capability: used to identify decode capability, ex: 4k
+ * @enc_capability: used to identify encode capability
+ */
+struct mtk_vcodec_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_dec;
+ struct video_device *vfd_enc;
+
+ struct v4l2_m2m_dev *m2m_dev_dec;
+ struct v4l2_m2m_dev *m2m_dev_enc;
+ struct platform_device *plat_dev;
+ struct platform_device *vpu_plat_dev;
+ struct list_head ctx_list;
+ spinlock_t irqlock;
+ struct mtk_vcodec_ctx *curr_ctx;
+ void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
+
+ unsigned long id_counter;
+
+ struct workqueue_struct *decode_workqueue;
+ struct workqueue_struct *encode_workqueue;
+ int int_cond;
+ int int_type;
+ struct mutex dev_mutex;
+ wait_queue_head_t queue;
+
+ int dec_irq;
+ int enc_irq;
+ int enc_lt_irq;
+
+ struct mutex dec_mutex;
+ struct mutex enc_mutex;
+
+ struct mtk_vcodec_pm pm;
+ unsigned int dec_capability;
+ unsigned int enc_capability;
+};
+
+static inline struct mtk_vcodec_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mtk_vcodec_ctx, fh);
+}
+
+static inline struct mtk_vcodec_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mtk_vcodec_ctx, ctrl_hdl);
+}
+
+#endif /* _MTK_VCODEC_DRV_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
new file mode 100644
index 000000000..6ad408514
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -0,0 +1,1355 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_enc.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "venc_drv_if.h"
+
+#define MTK_VENC_MIN_W 160U
+#define MTK_VENC_MIN_H 128U
+#define MTK_VENC_MAX_W 1920U
+#define MTK_VENC_MAX_H 1088U
+#define DFT_CFG_WIDTH MTK_VENC_MIN_W
+#define DFT_CFG_HEIGHT MTK_VENC_MIN_H
+#define MTK_MAX_CTRLS_HINT 20
+#define OUT_FMT_IDX 0
+#define CAP_FMT_IDX 4
+
+
+static void mtk_venc_worker(struct work_struct *work);
+
+static struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_ENC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .type = MTK_FMT_ENC,
+ .num_planes = 1,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+
+static const struct mtk_codec_framesizes mtk_venc_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .stepwise = { MTK_VENC_MIN_W, MTK_VENC_MAX_W, 16,
+ MTK_VENC_MIN_H, MTK_VENC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .stepwise = { MTK_VENC_MIN_W, MTK_VENC_MAX_W, 16,
+ MTK_VENC_MIN_H, MTK_VENC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_venc_framesizes)
+
+static int vidioc_venc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct mtk_enc_params *p = &ctx->enc_params;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_BITRATE val = %d",
+ ctrl->val);
+ p->bitrate = ctrl->val;
+ ctx->param_change |= MTK_ENCODE_PARAM_BITRATE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_B_FRAMES val = %d",
+ ctrl->val);
+ p->num_b_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE val = %d",
+ ctrl->val);
+ p->rc_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_MAX_QP val = %d",
+ ctrl->val);
+ p->h264_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_HEADER_MODE val = %d",
+ ctrl->val);
+ p->seq_hdr_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE val = %d",
+ ctrl->val);
+ p->rc_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_PROFILE val = %d",
+ ctrl->val);
+ p->h264_profile = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_LEVEL val = %d",
+ ctrl->val);
+ p->h264_level = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_I_PERIOD val = %d",
+ ctrl->val);
+ p->intra_period = ctrl->val;
+ ctx->param_change |= MTK_ENCODE_PARAM_INTRA_PERIOD;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_GOP_SIZE val = %d",
+ ctrl->val);
+ p->gop_size = ctrl->val;
+ ctx->param_change |= MTK_ENCODE_PARAM_GOP_SIZE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME");
+ p->force_intra = 1;
+ ctx->param_change |= MTK_ENCODE_PARAM_FORCE_INTRA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_enc_ctrl_ops = {
+ .s_ctrl = vidioc_venc_s_ctrl,
+};
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+{
+ struct mtk_video_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (output_queue && mtk_video_formats[i].type != MTK_FMT_FRAME)
+ continue;
+ if (!output_queue && mtk_video_formats[i].type != MTK_FMT_ENC)
+ continue;
+
+ if (j == f->index) {
+ fmt = &mtk_video_formats[i];
+ f->pixelformat = fmt->fourcc;
+ memset(f->reserved, 0, sizeof(f->reserved));
+ return 0;
+ }
+ ++j;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ int i = 0;
+
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
+ if (fsize->pixel_format != mtk_venc_framesizes[i].fourcc)
+ continue;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = mtk_venc_framesizes[i].stepwise;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true);
+}
+
+static int vidioc_venc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, MTK_VCODEC_ENC_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, MTK_PLATFORM_STR, sizeof(cap->bus_info));
+ strlcpy(cap->card, MTK_PLATFORM_STR, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vidioc_venc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ ctx->enc_params.framerate_num =
+ a->parm.output.timeperframe.denominator;
+ ctx->enc_params.framerate_denom =
+ a->parm.output.timeperframe.numerator;
+ ctx->param_change |= MTK_ENCODE_PARAM_FRAMERATE;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+
+ return 0;
+}
+
+static int vidioc_venc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.output.timeperframe.denominator =
+ ctx->enc_params.framerate_num;
+ a->parm.output.timeperframe.numerator =
+ ctx->enc_params.framerate_denom;
+
+ return 0;
+}
+
+static struct mtk_q_data *mtk_venc_get_q_data(struct mtk_vcodec_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->q_data[MTK_Q_DATA_SRC];
+
+ return &ctx->q_data[MTK_Q_DATA_DST];
+}
+
+static struct mtk_video_fmt *mtk_venc_find_format(struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &mtk_video_formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+/* V4L2 specification suggests the driver corrects the format struct if any of
+ * the dimensions is unsupported
+ */
+static int vidioc_try_fmt(struct v4l2_format *f, struct mtk_video_fmt *fmt)
+{
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ int i;
+
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pix_fmt_mp->num_planes = 1;
+ pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ int tmp_w, tmp_h;
+
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+ MTK_VENC_MIN_H,
+ MTK_VENC_MAX_H);
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+ MTK_VENC_MIN_W,
+ MTK_VENC_MAX_W);
+
+ /* find next closer width align 16, heign align 32, size align
+ * 64 rectangle
+ */
+ tmp_w = pix_fmt_mp->width;
+ tmp_h = pix_fmt_mp->height;
+ v4l_bound_align_image(&pix_fmt_mp->width,
+ MTK_VENC_MIN_W,
+ MTK_VENC_MAX_W, 4,
+ &pix_fmt_mp->height,
+ MTK_VENC_MIN_H,
+ MTK_VENC_MAX_H, 5, 6);
+
+ if (pix_fmt_mp->width < tmp_w &&
+ (pix_fmt_mp->width + 16) <= MTK_VENC_MAX_W)
+ pix_fmt_mp->width += 16;
+ if (pix_fmt_mp->height < tmp_h &&
+ (pix_fmt_mp->height + 32) <= MTK_VENC_MAX_H)
+ pix_fmt_mp->height += 32;
+
+ mtk_v4l2_debug(0,
+ "before resize width=%d, height=%d, after resize width=%d, height=%d, sizeimage=%d %d",
+ tmp_w, tmp_h, pix_fmt_mp->width,
+ pix_fmt_mp->height,
+ pix_fmt_mp->plane_fmt[0].sizeimage,
+ pix_fmt_mp->plane_fmt[1].sizeimage);
+
+ pix_fmt_mp->num_planes = fmt->num_planes;
+ pix_fmt_mp->plane_fmt[0].sizeimage =
+ pix_fmt_mp->width * pix_fmt_mp->height +
+ ((ALIGN(pix_fmt_mp->width, 16) * 2) * 16);
+ pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
+
+ if (pix_fmt_mp->num_planes == 2) {
+ pix_fmt_mp->plane_fmt[1].sizeimage =
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 2 +
+ (ALIGN(pix_fmt_mp->width, 16) * 16);
+ pix_fmt_mp->plane_fmt[2].sizeimage = 0;
+ pix_fmt_mp->plane_fmt[1].bytesperline =
+ pix_fmt_mp->width;
+ pix_fmt_mp->plane_fmt[2].bytesperline = 0;
+ } else if (pix_fmt_mp->num_planes == 3) {
+ pix_fmt_mp->plane_fmt[1].sizeimage =
+ pix_fmt_mp->plane_fmt[2].sizeimage =
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 4 +
+ ((ALIGN(pix_fmt_mp->width, 16) / 2) * 16);
+ pix_fmt_mp->plane_fmt[1].bytesperline =
+ pix_fmt_mp->plane_fmt[2].bytesperline =
+ pix_fmt_mp->width / 2;
+ }
+ }
+
+ for (i = 0; i < pix_fmt_mp->num_planes; i++)
+ memset(&(pix_fmt_mp->plane_fmt[i].reserved[0]), 0x0,
+ sizeof(pix_fmt_mp->plane_fmt[0].reserved));
+
+ pix_fmt_mp->flags = 0;
+ memset(&pix_fmt_mp->reserved, 0x0,
+ sizeof(pix_fmt_mp->reserved));
+
+ return 0;
+}
+
+static void mtk_venc_set_param(struct mtk_vcodec_ctx *ctx,
+ struct venc_enc_param *param)
+{
+ struct mtk_q_data *q_data_src = &ctx->q_data[MTK_Q_DATA_SRC];
+ struct mtk_enc_params *enc_params = &ctx->enc_params;
+
+ switch (q_data_src->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUV420M:
+ param->input_yuv_fmt = VENC_YUV_FORMAT_I420;
+ break;
+ case V4L2_PIX_FMT_YVU420M:
+ param->input_yuv_fmt = VENC_YUV_FORMAT_YV12;
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ param->input_yuv_fmt = VENC_YUV_FORMAT_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21M:
+ param->input_yuv_fmt = VENC_YUV_FORMAT_NV21;
+ break;
+ default:
+ mtk_v4l2_err("Unsupport fourcc =%d", q_data_src->fmt->fourcc);
+ break;
+ }
+ param->h264_profile = enc_params->h264_profile;
+ param->h264_level = enc_params->h264_level;
+
+ /* Config visible resolution */
+ param->width = q_data_src->visible_width;
+ param->height = q_data_src->visible_height;
+ /* Config coded resolution */
+ param->buf_width = q_data_src->coded_width;
+ param->buf_height = q_data_src->coded_height;
+ param->frm_rate = enc_params->framerate_num /
+ enc_params->framerate_denom;
+ param->intra_period = enc_params->intra_period;
+ param->gop_size = enc_params->gop_size;
+ param->bitrate = enc_params->bitrate;
+
+ mtk_v4l2_debug(0,
+ "fmt 0x%x, P/L %d/%d, w/h %d/%d, buf %d/%d, fps/bps %d/%d, gop %d, i_period %d",
+ param->input_yuv_fmt, param->h264_profile,
+ param->h264_level, param->width, param->height,
+ param->buf_width, param->buf_height,
+ param->frm_rate, param->bitrate,
+ param->gop_size, param->intra_period);
+}
+
+static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct mtk_q_data *q_data;
+ int i, ret;
+ struct mtk_video_fmt *fmt;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq) {
+ mtk_v4l2_err("fail to get vq");
+ return -EINVAL;
+ }
+
+ if (vb2_is_busy(vq)) {
+ mtk_v4l2_err("queue busy");
+ return -EBUSY;
+ }
+
+ q_data = mtk_venc_get_q_data(ctx, f->type);
+ if (!q_data) {
+ mtk_v4l2_err("fail to get q data");
+ return -EINVAL;
+ }
+
+ fmt = mtk_venc_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_venc_find_format(f);
+ }
+
+ q_data->fmt = fmt;
+ ret = vidioc_try_fmt(f, q_data->fmt);
+ if (ret)
+ return ret;
+
+ q_data->coded_width = f->fmt.pix_mp.width;
+ q_data->coded_height = f->fmt.pix_mp.height;
+ q_data->field = f->fmt.pix_mp.field;
+
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ struct v4l2_plane_pix_format *plane_fmt;
+
+ plane_fmt = &f->fmt.pix_mp.plane_fmt[i];
+ q_data->bytesperline[i] = plane_fmt->bytesperline;
+ q_data->sizeimage[i] = plane_fmt->sizeimage;
+ }
+
+ if (ctx->state == MTK_STATE_FREE) {
+ ret = venc_if_init(ctx, q_data->fmt->fourcc);
+ if (ret) {
+ mtk_v4l2_err("venc_if_init failed=%d, codec type=%x",
+ ret, q_data->fmt->fourcc);
+ return -EBUSY;
+ }
+ ctx->state = MTK_STATE_INIT;
+ }
+
+ return 0;
+}
+
+static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct mtk_q_data *q_data;
+ int ret, i;
+ struct mtk_video_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq) {
+ mtk_v4l2_err("fail to get vq");
+ return -EINVAL;
+ }
+
+ if (vb2_is_busy(vq)) {
+ mtk_v4l2_err("queue busy");
+ return -EBUSY;
+ }
+
+ q_data = mtk_venc_get_q_data(ctx, f->type);
+ if (!q_data) {
+ mtk_v4l2_err("fail to get q data");
+ return -EINVAL;
+ }
+
+ fmt = mtk_venc_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_venc_find_format(f);
+ }
+
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+ MTK_VENC_MIN_H,
+ MTK_VENC_MAX_H);
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+ MTK_VENC_MIN_W,
+ MTK_VENC_MAX_W);
+
+ q_data->visible_width = f->fmt.pix_mp.width;
+ q_data->visible_height = f->fmt.pix_mp.height;
+ q_data->fmt = fmt;
+ ret = vidioc_try_fmt(f, q_data->fmt);
+ if (ret)
+ return ret;
+
+ q_data->coded_width = f->fmt.pix_mp.width;
+ q_data->coded_height = f->fmt.pix_mp.height;
+
+ q_data->field = f->fmt.pix_mp.field;
+ ctx->colorspace = f->fmt.pix_mp.colorspace;
+ ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ ctx->quantization = f->fmt.pix_mp.quantization;
+ ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ struct v4l2_plane_pix_format *plane_fmt;
+
+ plane_fmt = &f->fmt.pix_mp.plane_fmt[i];
+ q_data->bytesperline[i] = plane_fmt->bytesperline;
+ q_data->sizeimage[i] = plane_fmt->sizeimage;
+ }
+
+ return 0;
+}
+
+static int vidioc_venc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct mtk_q_data *q_data;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = mtk_venc_get_q_data(ctx, f->type);
+
+ pix->width = q_data->coded_width;
+ pix->height = q_data->coded_height;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->field = q_data->field;
+ pix->num_planes = q_data->fmt->num_planes;
+ for (i = 0; i < pix->num_planes; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ memset(&(pix->plane_fmt[i].reserved[0]), 0x0,
+ sizeof(pix->plane_fmt[i].reserved));
+ }
+
+ pix->flags = 0;
+ pix->colorspace = ctx->colorspace;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ pix->xfer_func = ctx->xfer_func;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ fmt = mtk_venc_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_venc_find_format(f);
+ }
+ f->fmt.pix_mp.colorspace = ctx->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix_mp.quantization = ctx->quantization;
+ f->fmt.pix_mp.xfer_func = ctx->xfer_func;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+
+ fmt = mtk_venc_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_venc_find_format(f);
+ }
+ if (!f->fmt.pix_mp.colorspace) {
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_venc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_q_data *q_data;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ q_data = mtk_venc_get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r.top = 0;
+ s->r.left = 0;
+ s->r.width = q_data->coded_width;
+ s->r.height = q_data->coded_height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.top = 0;
+ s->r.left = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_venc_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_q_data *q_data;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ q_data = mtk_venc_get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* Only support crop from (0,0) */
+ s->r.top = 0;
+ s->r.left = 0;
+ s->r.width = min(s->r.width, q_data->coded_width);
+ s->r.height = min(s->r.height, q_data->coded_height);
+ q_data->visible_width = s->r.width;
+ q_data->visible_height = s->r.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_venc_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_venc_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = vidioc_venc_qbuf,
+ .vidioc_dqbuf = vidioc_venc_dqbuf,
+
+ .vidioc_querycap = vidioc_venc_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_s_parm = vidioc_venc_s_parm,
+ .vidioc_g_parm = vidioc_venc_g_parm,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_venc_s_fmt_cap,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_venc_s_fmt_out,
+
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_venc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_venc_g_fmt,
+
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+
+ .vidioc_g_selection = vidioc_venc_g_selection,
+ .vidioc_s_selection = vidioc_venc_s_selection,
+};
+
+static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_q_data *q_data;
+ unsigned int i;
+
+ q_data = mtk_venc_get_q_data(ctx, vq->type);
+
+ if (q_data == NULL)
+ return -EINVAL;
+
+ if (*nplanes) {
+ for (i = 0; i < *nplanes; i++)
+ if (sizes[i] < q_data->sizeimage[i])
+ return -EINVAL;
+ } else {
+ *nplanes = q_data->fmt->num_planes;
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int vb2ops_venc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_q_data *q_data;
+ int i;
+
+ q_data = mtk_venc_get_q_data(ctx, vb->vb2_queue->type);
+
+ for (i = 0; i < q_data->fmt->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
+ i, vb2_plane_size(vb, i),
+ q_data->sizeimage[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void vb2ops_venc_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2 =
+ container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+
+ struct mtk_video_enc_buf *mtk_buf =
+ container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);
+
+ if ((vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ (ctx->param_change != MTK_ENCODE_PARAM_NONE)) {
+ mtk_v4l2_debug(1, "[%d] Before id=%d encode parameter change %x",
+ ctx->id,
+ mtk_buf->vb.vb2_buf.index,
+ ctx->param_change);
+ mtk_buf->param_change = ctx->param_change;
+ mtk_buf->enc_params = ctx->enc_params;
+ ctx->param_change = MTK_ENCODE_PARAM_NONE;
+ }
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct venc_enc_param param;
+ int ret;
+ int i;
+
+ /* Once state turn into MTK_STATE_ABORT, we need stop_streaming
+ * to clear it
+ */
+ if ((ctx->state == MTK_STATE_ABORT) || (ctx->state == MTK_STATE_FREE)) {
+ ret = -EIO;
+ goto err_set_param;
+ }
+
+ /* Do the initialization when both start_streaming have been called */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (!vb2_start_streaming_called(&ctx->m2m_ctx->cap_q_ctx.q))
+ return 0;
+ } else {
+ if (!vb2_start_streaming_called(&ctx->m2m_ctx->out_q_ctx.q))
+ return 0;
+ }
+
+ mtk_venc_set_param(ctx, &param);
+ ret = venc_if_set_param(ctx, VENC_SET_PARAM_ENC, &param);
+ if (ret) {
+ mtk_v4l2_err("venc_if_set_param failed=%d", ret);
+ ctx->state = MTK_STATE_ABORT;
+ goto err_set_param;
+ }
+ ctx->param_change = MTK_ENCODE_PARAM_NONE;
+
+ if ((ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_H264) &&
+ (ctx->enc_params.seq_hdr_mode !=
+ V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)) {
+ ret = venc_if_set_param(ctx,
+ VENC_SET_PARAM_PREPEND_HEADER,
+ NULL);
+ if (ret) {
+ mtk_v4l2_err("venc_if_set_param failed=%d", ret);
+ ctx->state = MTK_STATE_ABORT;
+ goto err_set_param;
+ }
+ ctx->state = MTK_STATE_HEADER;
+ }
+
+ return 0;
+
+err_set_param:
+ for (i = 0; i < q->num_buffers; ++i) {
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
+ mtk_v4l2_debug(0, "[%d] id=%d, type=%d, %d -> VB2_BUF_STATE_QUEUED",
+ ctx->id, i, q->type,
+ (int)q->bufs[i]->state);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+
+ return ret;
+}
+
+static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_buffer *src_buf, *dst_buf;
+ int ret;
+
+ mtk_v4l2_debug(2, "[%d]-> type=%d", ctx->id, q->type);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
+ dst_buf->planes[0].bytesused = 0;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_ERROR);
+ }
+ } else {
+ while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_ERROR);
+ }
+
+ if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q)) ||
+ (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q))) {
+ mtk_v4l2_debug(1, "[%d]-> q type %d out=%d cap=%d",
+ ctx->id, q->type,
+ vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q),
+ vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q));
+ return;
+ }
+
+ /* Release the encoder if both streams are stopped. */
+ ret = venc_if_deinit(ctx);
+ if (ret)
+ mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+
+ ctx->state = MTK_STATE_FREE;
+}
+
+static const struct vb2_ops mtk_venc_vb2_ops = {
+ .queue_setup = vb2ops_venc_queue_setup,
+ .buf_prepare = vb2ops_venc_buf_prepare,
+ .buf_queue = vb2ops_venc_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vb2ops_venc_start_streaming,
+ .stop_streaming = vb2ops_venc_stop_streaming,
+};
+
+static int mtk_venc_encode_header(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ int ret;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
+ struct mtk_vcodec_mem bs_buf;
+ struct venc_done_result enc_result;
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (!dst_buf) {
+ mtk_v4l2_debug(1, "No dst buffer");
+ return -EINVAL;
+ }
+
+ bs_buf.va = vb2_plane_vaddr(dst_buf, 0);
+ bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ bs_buf.size = (size_t)dst_buf->planes[0].length;
+
+ mtk_v4l2_debug(1,
+ "[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
+ ctx->id,
+ dst_buf->index, bs_buf.va,
+ (u64)bs_buf.dma_addr,
+ bs_buf.size);
+
+ ret = venc_if_encode(ctx,
+ VENC_START_OPT_ENCODE_SEQUENCE_HEADER,
+ NULL, &bs_buf, &enc_result);
+
+ if (ret) {
+ dst_buf->planes[0].bytesused = 0;
+ ctx->state = MTK_STATE_ABORT;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_ERROR);
+ mtk_v4l2_err("venc_if_encode failed=%d", ret);
+ return -EINVAL;
+ }
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf) {
+ src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+ } else {
+ mtk_v4l2_err("No timestamp for the header buffer.");
+ }
+
+ ctx->state = MTK_STATE_HEADER;
+ dst_buf->planes[0].bytesused = enc_result.bs_size;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), VB2_BUF_STATE_DONE);
+
+ return 0;
+}
+
+static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
+{
+ struct venc_enc_param enc_prm;
+ struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ struct vb2_v4l2_buffer *vb2_v4l2 =
+ container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ struct mtk_video_enc_buf *mtk_buf =
+ container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);
+
+ int ret = 0;
+
+ memset(&enc_prm, 0, sizeof(enc_prm));
+ if (mtk_buf->param_change == MTK_ENCODE_PARAM_NONE)
+ return 0;
+
+ if (mtk_buf->param_change & MTK_ENCODE_PARAM_BITRATE) {
+ enc_prm.bitrate = mtk_buf->enc_params.bitrate;
+ mtk_v4l2_debug(1, "[%d] id=%d, change param br=%d",
+ ctx->id,
+ mtk_buf->vb.vb2_buf.index,
+ enc_prm.bitrate);
+ ret |= venc_if_set_param(ctx,
+ VENC_SET_PARAM_ADJUST_BITRATE,
+ &enc_prm);
+ }
+ if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FRAMERATE) {
+ enc_prm.frm_rate = mtk_buf->enc_params.framerate_num /
+ mtk_buf->enc_params.framerate_denom;
+ mtk_v4l2_debug(1, "[%d] id=%d, change param fr=%d",
+ ctx->id,
+ mtk_buf->vb.vb2_buf.index,
+ enc_prm.frm_rate);
+ ret |= venc_if_set_param(ctx,
+ VENC_SET_PARAM_ADJUST_FRAMERATE,
+ &enc_prm);
+ }
+ if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_GOP_SIZE) {
+ enc_prm.gop_size = mtk_buf->enc_params.gop_size;
+ mtk_v4l2_debug(1, "change param intra period=%d",
+ enc_prm.gop_size);
+ ret |= venc_if_set_param(ctx,
+ VENC_SET_PARAM_GOP_SIZE,
+ &enc_prm);
+ }
+ if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FORCE_INTRA) {
+ mtk_v4l2_debug(1, "[%d] id=%d, change param force I=%d",
+ ctx->id,
+ mtk_buf->vb.vb2_buf.index,
+ mtk_buf->enc_params.force_intra);
+ if (mtk_buf->enc_params.force_intra)
+ ret |= venc_if_set_param(ctx,
+ VENC_SET_PARAM_FORCE_INTRA,
+ NULL);
+ }
+
+ mtk_buf->param_change = MTK_ENCODE_PARAM_NONE;
+
+ if (ret) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_err("venc_if_set_param %d failed=%d",
+ mtk_buf->param_change, ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * v4l2_m2m_streamoff() holds dev_mutex and waits mtk_venc_worker()
+ * to call v4l2_m2m_job_finish().
+ * If mtk_venc_worker() tries to acquire dev_mutex, it will deadlock.
+ * So this function must not try to acquire dev->dev_mutex.
+ * This means v4l2 ioctls and mtk_venc_worker() can run at the same time.
+ * mtk_venc_worker() should be carefully implemented to avoid bugs.
+ */
+static void mtk_venc_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
+ encode_work);
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct venc_frm_buf frm_buf;
+ struct mtk_vcodec_mem bs_buf;
+ struct venc_done_result enc_result;
+ int ret, i;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
+
+ /* check dst_buf, dst_buf may be removed in device_run
+ * to stored encdoe header so we need check dst_buf and
+ * call job_finish here to prevent recursion
+ */
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (!dst_buf) {
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
+ return;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ memset(&frm_buf, 0, sizeof(frm_buf));
+ for (i = 0; i < src_buf->num_planes ; i++) {
+ frm_buf.fb_addr[i].va = vb2_plane_vaddr(src_buf, i);
+ frm_buf.fb_addr[i].dma_addr =
+ vb2_dma_contig_plane_dma_addr(src_buf, i);
+ frm_buf.fb_addr[i].size =
+ (size_t)src_buf->planes[i].length;
+ }
+ bs_buf.va = vb2_plane_vaddr(dst_buf, 0);
+ bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ bs_buf.size = (size_t)dst_buf->planes[0].length;
+
+ mtk_v4l2_debug(2,
+ "Framebuf VA=%p PA=%llx Size=0x%zx;VA=%p PA=0x%llx Size=0x%zx;VA=%p PA=0x%llx Size=%zu",
+ frm_buf.fb_addr[0].va,
+ (u64)frm_buf.fb_addr[0].dma_addr,
+ frm_buf.fb_addr[0].size,
+ frm_buf.fb_addr[1].va,
+ (u64)frm_buf.fb_addr[1].dma_addr,
+ frm_buf.fb_addr[1].size,
+ frm_buf.fb_addr[2].va,
+ (u64)frm_buf.fb_addr[2].dma_addr,
+ frm_buf.fb_addr[2].size);
+
+ ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
+ &frm_buf, &bs_buf, &enc_result);
+
+ src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+
+ if (enc_result.is_key_frm)
+ dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
+
+ if (ret) {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_ERROR);
+ dst_buf->planes[0].bytesused = 0;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_ERROR);
+ mtk_v4l2_err("venc_if_encode failed=%d", ret);
+ } else {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_DONE);
+ dst_buf->planes[0].bytesused = enc_result.bs_size;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_DONE);
+ mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
+ enc_result.bs_size);
+ }
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
+
+ mtk_v4l2_debug(1, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>",
+ src_buf->index, dst_buf->index, ret,
+ enc_result.bs_size);
+}
+
+static void m2mops_venc_device_run(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+
+ if ((ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_H264) &&
+ (ctx->state != MTK_STATE_HEADER)) {
+ /* encode h264 sps/pps header */
+ mtk_venc_encode_header(ctx);
+ queue_work(ctx->dev->encode_workqueue, &ctx->encode_work);
+ return;
+ }
+
+ mtk_venc_param_change(ctx);
+ queue_work(ctx->dev->encode_workqueue, &ctx->encode_work);
+}
+
+static int m2mops_venc_job_ready(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ if (ctx->state == MTK_STATE_ABORT || ctx->state == MTK_STATE_FREE) {
+ mtk_v4l2_debug(3, "[%d]Not ready: state=0x%x.",
+ ctx->id, ctx->state);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void m2mops_venc_job_abort(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+
+ ctx->state = MTK_STATE_ABORT;
+}
+
+const struct v4l2_m2m_ops mtk_venc_m2m_ops = {
+ .device_run = m2mops_venc_device_run,
+ .job_ready = m2mops_venc_job_ready,
+ .job_abort = m2mops_venc_job_abort,
+};
+
+void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_q_data *q_data;
+
+ ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
+ INIT_WORK(&ctx->encode_work, mtk_venc_worker);
+
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->visible_width = DFT_CFG_WIDTH;
+ q_data->visible_height = DFT_CFG_HEIGHT;
+ q_data->coded_width = DFT_CFG_WIDTH;
+ q_data->coded_height = DFT_CFG_HEIGHT;
+ q_data->field = V4L2_FIELD_NONE;
+
+ q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+
+ v4l_bound_align_image(&q_data->coded_width,
+ MTK_VENC_MIN_W,
+ MTK_VENC_MAX_W, 4,
+ &q_data->coded_height,
+ MTK_VENC_MIN_H,
+ MTK_VENC_MAX_H, 5, 6);
+
+ if (q_data->coded_width < DFT_CFG_WIDTH &&
+ (q_data->coded_width + 16) <= MTK_VENC_MAX_W)
+ q_data->coded_width += 16;
+ if (q_data->coded_height < DFT_CFG_HEIGHT &&
+ (q_data->coded_height + 32) <= MTK_VENC_MAX_H)
+ q_data->coded_height += 32;
+
+ q_data->sizeimage[0] =
+ q_data->coded_width * q_data->coded_height+
+ ((ALIGN(q_data->coded_width, 16) * 2) * 16);
+ q_data->bytesperline[0] = q_data->coded_width;
+ q_data->sizeimage[1] =
+ (q_data->coded_width * q_data->coded_height) / 2 +
+ (ALIGN(q_data->coded_width, 16) * 16);
+ q_data->bytesperline[1] = q_data->coded_width;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->coded_width = DFT_CFG_WIDTH;
+ q_data->coded_height = DFT_CFG_HEIGHT;
+ q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+ q_data->field = V4L2_FIELD_NONE;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] = 0;
+
+}
+
+int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ const struct v4l2_ctrl_ops *ops = &mtk_vcodec_enc_ctrl_ops;
+ struct v4l2_ctrl_handler *handler = &ctx->ctrl_hdl;
+
+ v4l2_ctrl_handler_init(handler, MTK_MAX_CTRLS_HINT);
+
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_BITRATE,
+ 1, 4000000, 1, 4000000);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_B_FRAMES,
+ 0, 2, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+ 0, 1, 1, 1);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ 0, 51, 1, 51);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+ 0, 65535, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 0, 65535, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME,
+ 0, 0, 0, 0);
+ v4l2_ctrl_new_std_menu(handler, ops,
+ V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ 0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
+ v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ 0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+ v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+ 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+ if (handler->error) {
+ mtk_v4l2_err("Init control handler fail %d",
+ handler->error);
+ return handler->error;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+
+ return 0;
+}
+
+int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ int ret;
+
+ /* Note: VB2_USERPTR works with dma-contig because mt8173
+ * support iommu
+ * https://patchwork.kernel.org/patch/8335461/
+ * https://patchwork.kernel.org/patch/7596181/
+ */
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct mtk_video_enc_buf);
+ src_vq->ops = &mtk_venc_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = &ctx->dev->plat_dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &mtk_venc_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = &ctx->dev->plat_dev->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+int mtk_venc_unlock(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_vcodec_dev *dev = ctx->dev;
+
+ mutex_unlock(&dev->enc_mutex);
+ return 0;
+}
+
+int mtk_venc_lock(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_vcodec_dev *dev = ctx->dev;
+
+ mutex_lock(&dev->enc_mutex);
+ return 0;
+}
+
+void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx)
+{
+ int ret = venc_if_deinit(ctx);
+
+ if (ret)
+ mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+
+ ctx->state = MTK_STATE_FREE;
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
new file mode 100644
index 000000000..d7a154a97
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
@@ -0,0 +1,58 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_ENC_H_
+#define _MTK_VCODEC_ENC_H_
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+#define MTK_VENC_IRQ_STATUS_SPS 0x1
+#define MTK_VENC_IRQ_STATUS_PPS 0x2
+#define MTK_VENC_IRQ_STATUS_FRM 0x4
+#define MTK_VENC_IRQ_STATUS_DRAM 0x8
+#define MTK_VENC_IRQ_STATUS_PAUSE 0x10
+#define MTK_VENC_IRQ_STATUS_SWITCH 0x20
+
+#define MTK_VENC_IRQ_STATUS_OFFSET 0x05C
+#define MTK_VENC_IRQ_ACK_OFFSET 0x060
+
+/**
+ * struct mtk_video_enc_buf - Private data related to each VB2 buffer.
+ * @vb: Pointer to related VB2 buffer.
+ * @list: list that buffer link to
+ * @param_change: Types of encode parameter change before encoding this
+ * buffer
+ * @enc_params: Encode parameters changed before encode this buffer
+ */
+struct mtk_video_enc_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ u32 param_change;
+ struct mtk_enc_params enc_params;
+};
+
+extern const struct v4l2_ioctl_ops mtk_venc_ioctl_ops;
+extern const struct v4l2_m2m_ops mtk_venc_m2m_ops;
+
+int mtk_venc_unlock(struct mtk_vcodec_ctx *ctx);
+int mtk_venc_lock(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx);
+void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx);
+
+#endif /* _MTK_VCODEC_ENC_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
new file mode 100644
index 000000000..b95006a86
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -0,0 +1,428 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+#include <linux/pm_runtime.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_enc.h"
+#include "mtk_vcodec_enc_pm.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+module_param(mtk_v4l2_dbg_level, int, S_IRUGO | S_IWUSR);
+module_param(mtk_vcodec_dbg, bool, S_IRUGO | S_IWUSR);
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct mtk_vcodec_ctx *ctx, unsigned int reason)
+{
+ ctx->int_cond = 1;
+ ctx->int_type = reason;
+ wake_up_interruptible(&ctx->queue);
+}
+
+static void clean_irq_status(unsigned int irq_status, void __iomem *addr)
+{
+ if (irq_status & MTK_VENC_IRQ_STATUS_PAUSE)
+ writel(MTK_VENC_IRQ_STATUS_PAUSE, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_SWITCH)
+ writel(MTK_VENC_IRQ_STATUS_SWITCH, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_DRAM)
+ writel(MTK_VENC_IRQ_STATUS_DRAM, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_SPS)
+ writel(MTK_VENC_IRQ_STATUS_SPS, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_PPS)
+ writel(MTK_VENC_IRQ_STATUS_PPS, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_FRM)
+ writel(MTK_VENC_IRQ_STATUS_FRM, addr);
+
+}
+static irqreturn_t mtk_vcodec_enc_irq_handler(int irq, void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+ unsigned long flags;
+ void __iomem *addr;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ ctx = dev->curr_ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ mtk_v4l2_debug(1, "id=%d", ctx->id);
+ addr = dev->reg_base[VENC_SYS] + MTK_VENC_IRQ_ACK_OFFSET;
+
+ ctx->irq_status = readl(dev->reg_base[VENC_SYS] +
+ (MTK_VENC_IRQ_STATUS_OFFSET));
+
+ clean_irq_status(ctx->irq_status, addr);
+
+ wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_vcodec_enc_lt_irq_handler(int irq, void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+ unsigned long flags;
+ void __iomem *addr;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ ctx = dev->curr_ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ mtk_v4l2_debug(1, "id=%d", ctx->id);
+ ctx->irq_status = readl(dev->reg_base[VENC_LT_SYS] +
+ (MTK_VENC_IRQ_STATUS_OFFSET));
+
+ addr = dev->reg_base[VENC_LT_SYS] + MTK_VENC_IRQ_ACK_OFFSET;
+
+ clean_irq_status(ctx->irq_status, addr);
+
+ wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED);
+ return IRQ_HANDLED;
+}
+
+static void mtk_vcodec_enc_reset_handler(void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+
+ mtk_v4l2_debug(0, "Watchdog timeout!!");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ABORT",
+ ctx->id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static int fops_vcodec_open(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_lock(&dev->dev_mutex);
+ /*
+ * Use simple counter to uniquely identify this context. Only
+ * used for logging.
+ */
+ ctx->id = dev->id_counter++;
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ INIT_LIST_HEAD(&ctx->list);
+ ctx->dev = dev;
+ init_waitqueue_head(&ctx->queue);
+
+ ctx->type = MTK_INST_ENCODER;
+ ret = mtk_vcodec_enc_ctrls_setup(ctx);
+ if (ret) {
+ mtk_v4l2_err("Failed to setup controls() (%d)",
+ ret);
+ goto err_ctrls_setup;
+ }
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_enc, ctx,
+ &mtk_vcodec_enc_queue_init);
+ if (IS_ERR((__force void *)ctx->m2m_ctx)) {
+ ret = PTR_ERR((__force void *)ctx->m2m_ctx);
+ mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
+ ret);
+ goto err_m2m_ctx_init;
+ }
+ mtk_vcodec_enc_set_default_params(ctx);
+
+ if (v4l2_fh_is_singular(&ctx->fh)) {
+ /*
+ * vpu_load_firmware checks if it was loaded already and
+ * does nothing in that case
+ */
+ ret = vpu_load_firmware(dev->vpu_plat_dev);
+ if (ret < 0) {
+ /*
+ * Return 0 if downloading firmware successfully,
+ * otherwise it is failed
+ */
+ mtk_v4l2_err("vpu_load_firmware failed!");
+ goto err_load_fw;
+ }
+
+ dev->enc_capability =
+ vpu_get_venc_hw_capa(dev->vpu_plat_dev);
+ mtk_v4l2_debug(0, "encoder capability %x", dev->enc_capability);
+ }
+
+ mtk_v4l2_debug(2, "Create instance [%d]@%p m2m_ctx=%p ",
+ ctx->id, ctx, ctx->m2m_ctx);
+
+ list_add(&ctx->list, &dev->ctx_list);
+
+ mutex_unlock(&dev->dev_mutex);
+ mtk_v4l2_debug(0, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
+ ctx->id);
+ return ret;
+
+ /* Deinit when failure occurred */
+err_load_fw:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_m2m_ctx_init:
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+err_ctrls_setup:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int fops_vcodec_release(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(file->private_data);
+
+ mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
+ mutex_lock(&dev->dev_mutex);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ mtk_vcodec_enc_release(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+ list_del_init(&ctx->list);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_vcodec_fops = {
+ .owner = THIS_MODULE,
+ .open = fops_vcodec_open,
+ .release = fops_vcodec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int mtk_vcodec_probe(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev;
+ struct video_device *vfd_enc;
+ struct resource *res;
+ int i, j, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dev->ctx_list);
+ dev->plat_dev = pdev;
+
+ dev->vpu_plat_dev = vpu_get_plat_device(dev->plat_dev);
+ if (dev->vpu_plat_dev == NULL) {
+ mtk_v4l2_err("[VPU] vpu device in not ready");
+ return -EPROBE_DEFER;
+ }
+
+ vpu_wdt_reg_handler(dev->vpu_plat_dev, mtk_vcodec_enc_reset_handler,
+ dev, VPU_RST_ENC);
+
+ ret = mtk_vcodec_init_enc_pm(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get mt vcodec clock source!");
+ return ret;
+ }
+
+ for (i = VENC_SYS, j = 0; i < NUM_MAX_VCODEC_REG_BASE; i++, j++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, j);
+ dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR((__force void *)dev->reg_base[i])) {
+ ret = PTR_ERR((__force void *)dev->reg_base[i]);
+ goto err_res;
+ }
+ mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[i]);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get irq resource");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ dev->enc_irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, dev->enc_irq,
+ mtk_vcodec_enc_irq_handler,
+ 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install dev->enc_irq %d (%d)",
+ dev->enc_irq,
+ ret);
+ ret = -EINVAL;
+ goto err_res;
+ }
+
+ dev->enc_lt_irq = platform_get_irq(pdev, 1);
+ ret = devm_request_irq(&pdev->dev,
+ dev->enc_lt_irq, mtk_vcodec_enc_lt_irq_handler,
+ 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to install dev->enc_lt_irq %d (%d)",
+ dev->enc_lt_irq, ret);
+ ret = -EINVAL;
+ goto err_res;
+ }
+
+ disable_irq(dev->enc_irq);
+ disable_irq(dev->enc_lt_irq); /* VENC_LT */
+ mutex_init(&dev->enc_mutex);
+ mutex_init(&dev->dev_mutex);
+ spin_lock_init(&dev->irqlock);
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+ "[MTK_V4L2_VENC]");
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ mtk_v4l2_err("v4l2_device_register err=%d", ret);
+ goto err_res;
+ }
+
+ init_waitqueue_head(&dev->queue);
+
+ /* allocate video device for encoder and register it */
+ vfd_enc = video_device_alloc();
+ if (!vfd_enc) {
+ mtk_v4l2_err("Failed to allocate video device");
+ ret = -ENOMEM;
+ goto err_enc_alloc;
+ }
+ vfd_enc->fops = &mtk_vcodec_fops;
+ vfd_enc->ioctl_ops = &mtk_venc_ioctl_ops;
+ vfd_enc->release = video_device_release;
+ vfd_enc->lock = &dev->dev_mutex;
+ vfd_enc->v4l2_dev = &dev->v4l2_dev;
+ vfd_enc->vfl_dir = VFL_DIR_M2M;
+ vfd_enc->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE |
+ V4L2_CAP_STREAMING;
+
+ snprintf(vfd_enc->name, sizeof(vfd_enc->name), "%s",
+ MTK_VCODEC_ENC_NAME);
+ video_set_drvdata(vfd_enc, dev);
+ dev->vfd_enc = vfd_enc;
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev_enc = v4l2_m2m_init(&mtk_venc_m2m_ops);
+ if (IS_ERR((__force void *)dev->m2m_dev_enc)) {
+ mtk_v4l2_err("Failed to init mem2mem enc device");
+ ret = PTR_ERR((__force void *)dev->m2m_dev_enc);
+ goto err_enc_mem_init;
+ }
+
+ dev->encode_workqueue =
+ alloc_ordered_workqueue(MTK_VCODEC_ENC_NAME,
+ WQ_MEM_RECLAIM |
+ WQ_FREEZABLE);
+ if (!dev->encode_workqueue) {
+ mtk_v4l2_err("Failed to create encode workqueue");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ ret = video_register_device(vfd_enc, VFL_TYPE_GRABBER, 1);
+ if (ret) {
+ mtk_v4l2_err("Failed to register video device");
+ goto err_enc_reg;
+ }
+
+ mtk_v4l2_debug(0, "encoder registered as /dev/video%d",
+ vfd_enc->num);
+
+ return 0;
+
+err_enc_reg:
+ destroy_workqueue(dev->encode_workqueue);
+err_event_workq:
+ v4l2_m2m_release(dev->m2m_dev_enc);
+err_enc_mem_init:
+ video_unregister_device(vfd_enc);
+err_enc_alloc:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_res:
+ mtk_vcodec_release_enc_pm(dev);
+ return ret;
+}
+
+static const struct of_device_id mtk_vcodec_enc_match[] = {
+ {.compatible = "mediatek,mt8173-vcodec-enc",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_vcodec_enc_match);
+
+static int mtk_vcodec_enc_remove(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
+
+ mtk_v4l2_debug_enter();
+ flush_workqueue(dev->encode_workqueue);
+ destroy_workqueue(dev->encode_workqueue);
+ if (dev->m2m_dev_enc)
+ v4l2_m2m_release(dev->m2m_dev_enc);
+
+ if (dev->vfd_enc)
+ video_unregister_device(dev->vfd_enc);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ mtk_vcodec_release_enc_pm(dev);
+ return 0;
+}
+
+static struct platform_driver mtk_vcodec_enc_driver = {
+ .probe = mtk_vcodec_probe,
+ .remove = mtk_vcodec_enc_remove,
+ .driver = {
+ .name = MTK_VCODEC_ENC_NAME,
+ .of_match_table = mtk_vcodec_enc_match,
+ },
+};
+
+module_platform_driver(mtk_vcodec_enc_driver);
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek video codec V4L2 encoder driver");
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
new file mode 100644
index 000000000..7c025045e
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
@@ -0,0 +1,139 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_vcodec_enc_pm.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+
+int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct mtk_vcodec_pm *pm;
+ int ret = 0;
+
+ pdev = mtkdev->plat_dev;
+ pm = &mtkdev->pm;
+ memset(pm, 0, sizeof(struct mtk_vcodec_pm));
+ pm->mtkdev = mtkdev;
+ pm->dev = &pdev->dev;
+ dev = &pdev->dev;
+
+ node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
+ if (!node) {
+ mtk_v4l2_err("no mediatek,larb found");
+ return -ENODEV;
+ }
+ pdev = of_find_device_by_node(node);
+ of_node_put(node);
+ if (!pdev) {
+ mtk_v4l2_err("no mediatek,larb device found");
+ return -ENODEV;
+ }
+ pm->larbvenc = &pdev->dev;
+
+ node = of_parse_phandle(dev->of_node, "mediatek,larb", 1);
+ if (!node) {
+ mtk_v4l2_err("no mediatek,larb found");
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(node);
+ of_node_put(node);
+ if (!pdev) {
+ mtk_v4l2_err("no mediatek,larb device found");
+ return -ENODEV;
+ }
+
+ pm->larbvenclt = &pdev->dev;
+ pdev = mtkdev->plat_dev;
+ pm->dev = &pdev->dev;
+
+ pm->vencpll_d2 = devm_clk_get(&pdev->dev, "venc_sel_src");
+ if (IS_ERR(pm->vencpll_d2)) {
+ mtk_v4l2_err("devm_clk_get vencpll_d2 fail");
+ ret = PTR_ERR(pm->vencpll_d2);
+ }
+
+ pm->venc_sel = devm_clk_get(&pdev->dev, "venc_sel");
+ if (IS_ERR(pm->venc_sel)) {
+ mtk_v4l2_err("devm_clk_get venc_sel fail");
+ ret = PTR_ERR(pm->venc_sel);
+ }
+
+ pm->univpll1_d2 = devm_clk_get(&pdev->dev, "venc_lt_sel_src");
+ if (IS_ERR(pm->univpll1_d2)) {
+ mtk_v4l2_err("devm_clk_get univpll1_d2 fail");
+ ret = PTR_ERR(pm->univpll1_d2);
+ }
+
+ pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
+ if (IS_ERR(pm->venc_lt_sel)) {
+ mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
+ ret = PTR_ERR(pm->venc_lt_sel);
+ }
+
+ return ret;
+}
+
+void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *mtkdev)
+{
+}
+
+
+void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = clk_prepare_enable(pm->venc_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable fail %d", ret);
+
+ ret = clk_set_parent(pm->venc_sel, pm->vencpll_d2);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent fail %d", ret);
+
+ ret = clk_prepare_enable(pm->venc_lt_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable fail %d", ret);
+
+ ret = clk_set_parent(pm->venc_lt_sel, pm->univpll1_d2);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent fail %d", ret);
+
+ ret = mtk_smi_larb_get(pm->larbvenc);
+ if (ret)
+ mtk_v4l2_err("mtk_smi_larb_get larb3 fail %d", ret);
+
+ ret = mtk_smi_larb_get(pm->larbvenclt);
+ if (ret)
+ mtk_v4l2_err("mtk_smi_larb_get larb4 fail %d", ret);
+
+}
+
+void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm)
+{
+ mtk_smi_larb_put(pm->larbvenc);
+ mtk_smi_larb_put(pm->larbvenclt);
+ clk_disable_unprepare(pm->venc_lt_sel);
+ clk_disable_unprepare(pm->venc_sel);
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h
new file mode 100644
index 000000000..f32167138
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h
@@ -0,0 +1,26 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_ENC_PM_H_
+#define _MTK_VCODEC_ENC_PM_H_
+
+#include "mtk_vcodec_drv.h"
+
+int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *dev);
+void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *dev);
+
+void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
+
+#endif /* _MTK_VCODEC_ENC_PM_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
new file mode 100644
index 000000000..113b2097f
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
@@ -0,0 +1,53 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/errno.h>
+#include <linux/wait.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+
+int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx, int command,
+ unsigned int timeout_ms)
+{
+ wait_queue_head_t *waitqueue;
+ long timeout_jiff, ret;
+ int status = 0;
+
+ waitqueue = (wait_queue_head_t *)&ctx->queue;
+ timeout_jiff = msecs_to_jiffies(timeout_ms);
+
+ ret = wait_event_interruptible_timeout(*waitqueue,
+ ctx->int_cond,
+ timeout_jiff);
+
+ if (!ret) {
+ status = -1; /* timeout */
+ mtk_v4l2_err("[%d] cmd=%d, ctx->type=%d, wait_event_interruptible_timeout time=%ums out %d %d!",
+ ctx->id, ctx->type, command, timeout_ms,
+ ctx->int_cond, ctx->int_type);
+ } else if (-ERESTARTSYS == ret) {
+ mtk_v4l2_err("[%d] cmd=%d, ctx->type=%d, wait_event_interruptible_timeout interrupted by a signal %d %d",
+ ctx->id, ctx->type, command, ctx->int_cond,
+ ctx->int_type);
+ status = -1;
+ }
+
+ ctx->int_cond = 0;
+ ctx->int_type = 0;
+
+ return status;
+}
+EXPORT_SYMBOL(mtk_vcodec_wait_for_done_ctx);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
new file mode 100644
index 000000000..12131855b
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
@@ -0,0 +1,26 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_INTR_H_
+#define _MTK_VCODEC_INTR_H_
+
+#define MTK_INST_IRQ_RECEIVED 0x1
+
+struct mtk_vcodec_ctx;
+
+/* timeout is ms */
+int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *data, int command,
+ unsigned int timeout_ms);
+
+#endif /* _MTK_VCODEC_INTR_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
new file mode 100644
index 000000000..0c28d0b99
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -0,0 +1,120 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+/* For encoder, this will enable logs in venc/*/
+bool mtk_vcodec_dbg;
+EXPORT_SYMBOL(mtk_vcodec_dbg);
+
+/* The log level of v4l2 encoder or decoder driver.
+ * That is, files under mtk-vcodec/.
+ */
+int mtk_v4l2_dbg_level;
+EXPORT_SYMBOL(mtk_v4l2_dbg_level);
+
+void __iomem *mtk_vcodec_get_reg_addr(struct mtk_vcodec_ctx *data,
+ unsigned int reg_idx)
+{
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
+
+ if (!data || reg_idx >= NUM_MAX_VCODEC_REG_BASE) {
+ mtk_v4l2_err("Invalid arguments, reg_idx=%d", reg_idx);
+ return NULL;
+ }
+ return ctx->dev->reg_base[reg_idx];
+}
+EXPORT_SYMBOL(mtk_vcodec_get_reg_addr);
+
+int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
+ struct mtk_vcodec_mem *mem)
+{
+ unsigned long size = mem->size;
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
+ struct device *dev = &ctx->dev->plat_dev->dev;
+
+ mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
+
+ if (!mem->va) {
+ mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
+ size);
+ return -ENOMEM;
+ }
+
+ memset(mem->va, 0, size);
+
+ mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va);
+ mtk_v4l2_debug(3, "[%d] - dma = 0x%lx", ctx->id,
+ (unsigned long)mem->dma_addr);
+ mtk_v4l2_debug(3, "[%d] size = 0x%lx", ctx->id, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_vcodec_mem_alloc);
+
+void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
+ struct mtk_vcodec_mem *mem)
+{
+ unsigned long size = mem->size;
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
+ struct device *dev = &ctx->dev->plat_dev->dev;
+
+ if (!mem->va) {
+ mtk_v4l2_err("%s dma_free size=%ld failed!", dev_name(dev),
+ size);
+ return;
+ }
+
+ mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va);
+ mtk_v4l2_debug(3, "[%d] - dma = 0x%lx", ctx->id,
+ (unsigned long)mem->dma_addr);
+ mtk_v4l2_debug(3, "[%d] size = 0x%lx", ctx->id, size);
+
+ dma_free_coherent(dev, size, mem->va, mem->dma_addr);
+ mem->va = NULL;
+ mem->dma_addr = 0;
+ mem->size = 0;
+}
+EXPORT_SYMBOL(mtk_vcodec_mem_free);
+
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
+ struct mtk_vcodec_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->curr_ctx = ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+EXPORT_SYMBOL(mtk_vcodec_set_curr_ctx);
+
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev)
+{
+ unsigned long flags;
+ struct mtk_vcodec_ctx *ctx;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ ctx = dev->curr_ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ctx;
+}
+EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek video codec driver");
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
new file mode 100644
index 000000000..06c254f5c
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -0,0 +1,89 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_UTIL_H_
+#define _MTK_VCODEC_UTIL_H_
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+
+struct mtk_vcodec_mem {
+ size_t size;
+ void *va;
+ dma_addr_t dma_addr;
+};
+
+struct mtk_vcodec_ctx;
+struct mtk_vcodec_dev;
+
+extern int mtk_v4l2_dbg_level;
+extern bool mtk_vcodec_dbg;
+
+
+#define mtk_v4l2_err(fmt, args...) \
+ pr_err("[MTK_V4L2][ERROR] %s:%d: " fmt "\n", __func__, __LINE__, \
+ ##args)
+
+#define mtk_vcodec_err(h, fmt, args...) \
+ pr_err("[MTK_VCODEC][ERROR][%d]: %s() " fmt "\n", \
+ ((struct mtk_vcodec_ctx *)h->ctx)->id, __func__, ##args)
+
+
+#if defined(DEBUG)
+
+#define mtk_v4l2_debug(level, fmt, args...) \
+ do { \
+ if (mtk_v4l2_dbg_level >= level) \
+ pr_info("[MTK_V4L2] level=%d %s(),%d: " fmt "\n",\
+ level, __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mtk_v4l2_debug_enter() mtk_v4l2_debug(3, "+")
+#define mtk_v4l2_debug_leave() mtk_v4l2_debug(3, "-")
+
+#define mtk_vcodec_debug(h, fmt, args...) \
+ do { \
+ if (mtk_vcodec_dbg) \
+ pr_info("[MTK_VCODEC][%d]: %s() " fmt "\n", \
+ ((struct mtk_vcodec_ctx *)h->ctx)->id, \
+ __func__, ##args); \
+ } while (0)
+
+#define mtk_vcodec_debug_enter(h) mtk_vcodec_debug(h, "+")
+#define mtk_vcodec_debug_leave(h) mtk_vcodec_debug(h, "-")
+
+#else
+
+#define mtk_v4l2_debug(level, fmt, args...) {}
+#define mtk_v4l2_debug_enter() {}
+#define mtk_v4l2_debug_leave() {}
+
+#define mtk_vcodec_debug(h, fmt, args...) {}
+#define mtk_vcodec_debug_enter(h) {}
+#define mtk_vcodec_debug_leave(h) {}
+
+#endif
+
+void __iomem *mtk_vcodec_get_reg_addr(struct mtk_vcodec_ctx *data,
+ unsigned int reg_idx);
+int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
+ struct mtk_vcodec_mem *mem);
+void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
+ struct mtk_vcodec_mem *mem);
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
+ struct mtk_vcodec_ctx *ctx);
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev);
+
+#endif /* _MTK_VCODEC_UTIL_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
new file mode 100644
index 000000000..aa3ce4189
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "../vdec_drv_if.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_vpu_if.h"
+#include "../vdec_drv_base.h"
+
+#define NAL_NON_IDR_SLICE 0x01
+#define NAL_IDR_SLICE 0x05
+#define NAL_H264_PPS 0x08
+#define NAL_TYPE(value) ((value) & 0x1F)
+
+#define BUF_PREDICTION_SZ (32 * 1024)
+
+#define MB_UNIT_LEN 16
+
+/* motion vector size (bytes) for every macro block */
+#define HW_MB_STORE_SZ 64
+
+#define H264_MAX_FB_NUM 17
+#define HDR_PARSING_BUF_SZ 1024
+
+/**
+ * struct h264_fb - h264 decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct h264_fb {
+ uint64_t vdec_fb_va;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ int32_t poc;
+ uint32_t reserved;
+};
+
+/**
+ * struct h264_ring_fb_list - ring frame buffer list
+ * @fb_list : frame buffer arrary
+ * @read_idx : read index
+ * @write_idx : write index
+ * @count : buffer count in list
+ * @reserved : for 8 bytes alignment
+ */
+struct h264_ring_fb_list {
+ struct h264_fb fb_list[H264_MAX_FB_NUM];
+ unsigned int read_idx;
+ unsigned int write_idx;
+ unsigned int count;
+ unsigned int reserved;
+};
+
+/**
+ * struct vdec_h264_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @realloc_mv_buf : flag to notify driver to re-allocate mv buffer
+ * @reserved : for 8 bytes alignment
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_h264_dec_info {
+ uint32_t dpb_sz;
+ uint32_t resolution_changed;
+ uint32_t realloc_mv_buf;
+ uint32_t reserved;
+ uint64_t bs_dma;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_h264_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf : Header parsing buffer (AP-W, VPU-R)
+ * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
+ * @mv_buf_dma : HW working motion vector buffer dma address (AP-W, VPU-R)
+ * @list_free : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp : display frame buffer ring list (AP-R, VPU-W)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ */
+struct vdec_h264_vsi {
+ unsigned char hdr_buf[HDR_PARSING_BUF_SZ];
+ uint64_t pred_buf_dma;
+ uint64_t mv_buf_dma[H264_MAX_FB_NUM];
+ struct h264_ring_fb_list list_free;
+ struct h264_ring_fb_list list_disp;
+ struct vdec_h264_dec_info dec;
+ struct vdec_pic_info pic;
+ struct v4l2_rect crop;
+};
+
+/**
+ * struct vdec_h264_inst - h264 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to mtk_vcodec_ctx
+ * @pred_buf : HW working predication buffer
+ * @mv_buf : HW working motion vector buffer
+ * @vpu : VPU instance
+ * @vsi : VPU shared information
+ */
+struct vdec_h264_inst {
+ unsigned int num_nalu;
+ struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_mem pred_buf;
+ struct mtk_vcodec_mem mv_buf[H264_MAX_FB_NUM];
+ struct vdec_vpu_inst vpu;
+ struct vdec_h264_vsi *vsi;
+};
+
+static unsigned int get_mv_buf_size(unsigned int width, unsigned int height)
+{
+ return HW_MB_STORE_SZ * (width/MB_UNIT_LEN) * (height/MB_UNIT_LEN);
+}
+
+static int allocate_predication_buf(struct vdec_h264_inst *inst)
+{
+ int err = 0;
+
+ inst->pred_buf.size = BUF_PREDICTION_SZ;
+ err = mtk_vcodec_mem_alloc(inst->ctx, &inst->pred_buf);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate ppl buf");
+ return err;
+ }
+
+ inst->vsi->pred_buf_dma = inst->pred_buf.dma_addr;
+ return 0;
+}
+
+static void free_predication_buf(struct vdec_h264_inst *inst)
+{
+ struct mtk_vcodec_mem *mem = NULL;
+
+ mtk_vcodec_debug_enter(inst);
+
+ inst->vsi->pred_buf_dma = 0;
+ mem = &inst->pred_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+}
+
+static int alloc_mv_buf(struct vdec_h264_inst *inst, struct vdec_pic_info *pic)
+{
+ int i;
+ int err;
+ struct mtk_vcodec_mem *mem = NULL;
+ unsigned int buf_sz = get_mv_buf_size(pic->buf_w, pic->buf_h);
+
+ for (i = 0; i < H264_MAX_FB_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ mem->size = buf_sz;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate mv buf");
+ return err;
+ }
+ inst->vsi->mv_buf_dma[i] = mem->dma_addr;
+ }
+
+ return 0;
+}
+
+static void free_mv_buf(struct vdec_h264_inst *inst)
+{
+ int i;
+ struct mtk_vcodec_mem *mem = NULL;
+
+ for (i = 0; i < H264_MAX_FB_NUM; i++) {
+ inst->vsi->mv_buf_dma[i] = 0;
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ }
+}
+
+static int check_list_validity(struct vdec_h264_inst *inst, bool disp_list)
+{
+ struct h264_ring_fb_list *list;
+
+ list = disp_list ? &inst->vsi->list_disp : &inst->vsi->list_free;
+
+ if (list->count > H264_MAX_FB_NUM ||
+ list->read_idx >= H264_MAX_FB_NUM ||
+ list->write_idx >= H264_MAX_FB_NUM) {
+ mtk_vcodec_err(inst, "%s list err: cnt=%d r_idx=%d w_idx=%d",
+ disp_list ? "disp" : "free", list->count,
+ list->read_idx, list->write_idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void put_fb_to_free(struct vdec_h264_inst *inst, struct vdec_fb *fb)
+{
+ struct h264_ring_fb_list *list;
+
+ if (fb) {
+ if (check_list_validity(inst, false))
+ return;
+
+ list = &inst->vsi->list_free;
+ if (list->count == H264_MAX_FB_NUM) {
+ mtk_vcodec_err(inst, "[FB] put fb free_list full");
+ return;
+ }
+
+ mtk_vcodec_debug(inst, "[FB] put fb into free_list @(%p, %llx)",
+ fb->base_y.va, (u64)fb->base_y.dma_addr);
+
+ list->fb_list[list->write_idx].vdec_fb_va = (u64)(uintptr_t)fb;
+ list->write_idx = (list->write_idx == H264_MAX_FB_NUM - 1) ?
+ 0 : list->write_idx + 1;
+ list->count++;
+ }
+}
+
+static void get_pic_info(struct vdec_h264_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_h264_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi->crop.left;
+ cr->top = inst->vsi->crop.top;
+ cr->width = inst->vsi->crop.width;
+ cr->height = inst->vsi->crop.height;
+
+ mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_h264_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = inst->vsi->dec.dpb_sz;
+ mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_h264_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_h264_inst *inst = NULL;
+ int err;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = IPI_VDEC_H264;
+ inst->vpu.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu.ctx = ctx;
+ inst->vpu.handler = vpu_dec_ipi_handler;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ inst->vsi = (struct vdec_h264_vsi *)inst->vpu.vsi;
+ err = allocate_predication_buf(inst);
+ if (err)
+ goto error_deinit;
+
+ mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
+
+ *h_vdec = (unsigned long)inst;
+ return 0;
+
+error_deinit:
+ vpu_dec_deinit(&inst->vpu);
+
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static void vdec_h264_deinit(unsigned long h_vdec)
+{
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ free_predication_buf(inst);
+ free_mv_buf(inst);
+
+ kfree(inst);
+}
+
+static int find_start_code(unsigned char *data, unsigned int data_sz)
+{
+ if (data_sz > 3 && data[0] == 0 && data[1] == 0 && data[2] == 1)
+ return 3;
+
+ if (data_sz > 4 && data[0] == 0 && data[1] == 0 && data[2] == 0 &&
+ data[3] == 1)
+ return 4;
+
+ return -1;
+}
+
+static int vdec_h264_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ int nal_start_idx = 0;
+ int err = 0;
+ unsigned int nal_start;
+ unsigned int nal_type;
+ unsigned char *buf;
+ unsigned int buf_sz;
+ unsigned int data[2];
+ uint64_t vdec_fb_va = (u64)(uintptr_t)fb;
+ uint64_t y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ uint64_t c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+ mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+ ++inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+
+ /* bs NULL means flush decoder */
+ if (bs == NULL)
+ return vpu_dec_reset(vpu);
+
+ buf = (unsigned char *)bs->va;
+ buf_sz = bs->size;
+ nal_start_idx = find_start_code(buf, buf_sz);
+ if (nal_start_idx < 0)
+ goto err_free_fb_out;
+
+ nal_start = buf[nal_start_idx];
+ nal_type = NAL_TYPE(buf[nal_start_idx]);
+ mtk_vcodec_debug(inst, "\n + NALU[%d] type %d +\n", inst->num_nalu,
+ nal_type);
+
+ if (nal_type == NAL_H264_PPS) {
+ buf_sz -= nal_start_idx;
+ if (buf_sz > HDR_PARSING_BUF_SZ) {
+ err = -EILSEQ;
+ goto err_free_fb_out;
+ }
+ memcpy(inst->vsi->hdr_buf, buf + nal_start_idx, buf_sz);
+ }
+
+ inst->vsi->dec.bs_dma = (uint64_t)bs->dma_addr;
+ inst->vsi->dec.y_fb_dma = y_fb_dma;
+ inst->vsi->dec.c_fb_dma = c_fb_dma;
+ inst->vsi->dec.vdec_fb_va = vdec_fb_va;
+
+ data[0] = buf_sz;
+ data[1] = nal_start;
+ err = vpu_dec_start(vpu, data, 2);
+ if (err)
+ goto err_free_fb_out;
+
+ *res_chg = inst->vsi->dec.resolution_changed;
+ if (*res_chg) {
+ struct vdec_pic_info pic;
+
+ mtk_vcodec_debug(inst, "- resolution changed -");
+ get_pic_info(inst, &pic);
+
+ if (inst->vsi->dec.realloc_mv_buf) {
+ err = alloc_mv_buf(inst, &pic);
+ if (err)
+ goto err_free_fb_out;
+ }
+ }
+
+ if (nal_type == NAL_NON_IDR_SLICE || nal_type == NAL_IDR_SLICE) {
+ /* wait decoder done interrupt */
+ err = mtk_vcodec_wait_for_done_ctx(inst->ctx,
+ MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+ if (err)
+ goto err_free_fb_out;
+
+ vpu_dec_end(vpu);
+ }
+
+ mtk_vcodec_debug(inst, "\n - NALU[%d] type=%d -\n", inst->num_nalu,
+ nal_type);
+ return 0;
+
+err_free_fb_out:
+ put_fb_to_free(inst, fb);
+ mtk_vcodec_err(inst, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
+ return err;
+}
+
+static void vdec_h264_get_fb(struct vdec_h264_inst *inst,
+ struct h264_ring_fb_list *list,
+ bool disp_list, struct vdec_fb **out_fb)
+{
+ struct vdec_fb *fb;
+
+ if (check_list_validity(inst, disp_list))
+ return;
+
+ if (list->count == 0) {
+ mtk_vcodec_debug(inst, "[FB] there is no %s fb",
+ disp_list ? "disp" : "free");
+ *out_fb = NULL;
+ return;
+ }
+
+ fb = (struct vdec_fb *)
+ (uintptr_t)list->fb_list[list->read_idx].vdec_fb_va;
+ fb->status |= (disp_list ? FB_ST_DISPLAY : FB_ST_FREE);
+
+ *out_fb = fb;
+ mtk_vcodec_debug(inst, "[FB] get %s fb st=%d poc=%d %llx",
+ disp_list ? "disp" : "free",
+ fb->status, list->fb_list[list->read_idx].poc,
+ list->fb_list[list->read_idx].vdec_fb_va);
+
+ list->read_idx = (list->read_idx == H264_MAX_FB_NUM - 1) ?
+ 0 : list->read_idx + 1;
+ list->count--;
+}
+
+static int vdec_h264_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ vdec_h264_get_fb(inst, &inst->vsi->list_disp, true, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ vdec_h264_get_fb(inst, &inst->vsi->list_free, false, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct vdec_common_if vdec_h264_if = {
+ .init = vdec_h264_init,
+ .decode = vdec_h264_decode,
+ .get_param = vdec_h264_get_param,
+ .deinit = vdec_h264_deinit,
+};
+
+struct vdec_common_if *get_h264_dec_comm_if(void);
+
+struct vdec_common_if *get_h264_dec_comm_if(void)
+{
+ return &vdec_h264_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
new file mode 100644
index 000000000..3e84a761d
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -0,0 +1,633 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "../vdec_drv_if.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_vpu_if.h"
+#include "../vdec_drv_base.h"
+
+/* Decoding picture buffer size (3 reference frames plus current frame) */
+#define VP8_DPB_SIZE 4
+
+/* HW working buffer size (bytes) */
+#define VP8_WORKING_BUF_SZ (45 * 4096)
+
+/* HW control register address */
+#define VP8_SEGID_DRAM_ADDR 0x3c
+#define VP8_HW_VLD_ADDR 0x93C
+#define VP8_HW_VLD_VALUE 0x940
+#define VP8_BSASET 0x100
+#define VP8_BSDSET 0x104
+#define VP8_RW_CKEN_SET 0x0
+#define VP8_RW_DCM_CON 0x18
+#define VP8_WO_VLD_SRST 0x108
+#define VP8_RW_MISC_SYS_SEL 0x84
+#define VP8_RW_MISC_SPEC_CON 0xC8
+#define VP8_WO_VLD_SRST 0x108
+#define VP8_RW_VP8_CTRL 0xA4
+#define VP8_RW_MISC_DCM_CON 0xEC
+#define VP8_RW_MISC_SRST 0xF4
+#define VP8_RW_MISC_FUNC_CON 0xCC
+
+#define VP8_MAX_FRM_BUF_NUM 5
+#define VP8_MAX_FRM_BUF_NODE_NUM (VP8_MAX_FRM_BUF_NUM * 2)
+
+/* required buffer size (bytes) to store decode information */
+#define VP8_HW_SEGMENT_DATA_SZ 272
+#define VP8_HW_SEGMENT_UINT 4
+
+#define VP8_DEC_TABLE_PROC_LOOP 96
+#define VP8_DEC_TABLE_UNIT 3
+#define VP8_DEC_TABLE_SZ 300
+#define VP8_DEC_TABLE_OFFSET 2
+#define VP8_DEC_TABLE_RW_UNIT 4
+
+/**
+ * struct vdec_vp8_dec_info - decode misc information
+ * @working_buf_dma : working buffer dma address
+ * @prev_y_dma : previous decoded frame buffer Y plane address
+ * @cur_y_fb_dma : current plane Y frame buffer dma address
+ * @cur_c_fb_dma : current plane C frame buffer dma address
+ * @bs_dma : bitstream dma address
+ * @bs_sz : bitstream size
+ * @resolution_changed: resolution change flag 1 - changed, 0 - not change
+ * @show_frame : display this frame or not
+ * @wait_key_frame : wait key frame coming
+ */
+struct vdec_vp8_dec_info {
+ uint64_t working_buf_dma;
+ uint64_t prev_y_dma;
+ uint64_t cur_y_fb_dma;
+ uint64_t cur_c_fb_dma;
+ uint64_t bs_dma;
+ uint32_t bs_sz;
+ uint32_t resolution_changed;
+ uint32_t show_frame;
+ uint32_t wait_key_frame;
+};
+
+/**
+ * struct vdec_vp8_vsi - VPU shared information
+ * @dec : decoding information
+ * @pic : picture information
+ * @dec_table : decoder coefficient table
+ * @segment_buf : segmentation buffer
+ * @load_data : flag to indicate reload decode data
+ */
+struct vdec_vp8_vsi {
+ struct vdec_vp8_dec_info dec;
+ struct vdec_pic_info pic;
+ uint32_t dec_table[VP8_DEC_TABLE_SZ];
+ uint32_t segment_buf[VP8_HW_SEGMENT_DATA_SZ][VP8_HW_SEGMENT_UINT];
+ uint32_t load_data;
+};
+
+/**
+ * struct vdec_vp8_hw_reg_base - HW register base
+ * @sys : base address for sys
+ * @misc : base address for misc
+ * @ld : base address for ld
+ * @top : base address for top
+ * @cm : base address for cm
+ * @hwd : base address for hwd
+ * @hwb : base address for hwb
+ */
+struct vdec_vp8_hw_reg_base {
+ void __iomem *sys;
+ void __iomem *misc;
+ void __iomem *ld;
+ void __iomem *top;
+ void __iomem *cm;
+ void __iomem *hwd;
+ void __iomem *hwb;
+};
+
+/**
+ * struct vdec_vp8_vpu_inst - VPU instance for VP8 decode
+ * @wq_hd : Wait queue to wait VPU message ack
+ * @signaled : 1 - Host has received ack message from VPU, 0 - not recevie
+ * @failure : VPU execution result status 0 - success, others - fail
+ * @inst_addr : VPU decoder instance address
+ */
+struct vdec_vp8_vpu_inst {
+ wait_queue_head_t wq_hd;
+ int signaled;
+ int failure;
+ uint32_t inst_addr;
+};
+
+/* frame buffer (fb) list
+ * [available_fb_node_list] - decode fb are initialized to 0 and populated in
+ * [fb_use_list] - fb is set after decode and is moved to this list
+ * [fb_free_list] - fb is not needed for reference will be moved from
+ * [fb_use_list] to [fb_free_list] and
+ * once user remove fb from [fb_free_list],
+ * it is circulated back to [available_fb_node_list]
+ * [fb_disp_list] - fb is set after decode and is moved to this list
+ * once user remove fb from [fb_disp_list] it is
+ * circulated back to [available_fb_node_list]
+ */
+
+/**
+ * struct vdec_vp8_inst - VP8 decoder instance
+ * @cur_fb : current frame buffer
+ * @dec_fb : decode frame buffer node
+ * @available_fb_node_list : list to store available frame buffer node
+ * @fb_use_list : list to store frame buffer in use
+ * @fb_free_list : list to store free frame buffer
+ * @fb_disp_list : list to store display ready frame buffer
+ * @working_buf : HW decoder working buffer
+ * @reg_base : HW register base address
+ * @frm_cnt : decode frame count
+ * @ctx : V4L2 context
+ * @vpu : VPU instance for decoder
+ * @vsi : VPU share information
+ */
+struct vdec_vp8_inst {
+ struct vdec_fb *cur_fb;
+ struct vdec_fb_node dec_fb[VP8_MAX_FRM_BUF_NODE_NUM];
+ struct list_head available_fb_node_list;
+ struct list_head fb_use_list;
+ struct list_head fb_free_list;
+ struct list_head fb_disp_list;
+ struct mtk_vcodec_mem working_buf;
+ struct vdec_vp8_hw_reg_base reg_base;
+ unsigned int frm_cnt;
+ struct mtk_vcodec_ctx *ctx;
+ struct vdec_vpu_inst vpu;
+ struct vdec_vp8_vsi *vsi;
+};
+
+static void get_hw_reg_base(struct vdec_vp8_inst *inst)
+{
+ inst->reg_base.top = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_TOP);
+ inst->reg_base.cm = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_CM);
+ inst->reg_base.hwd = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWD);
+ inst->reg_base.sys = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_SYS);
+ inst->reg_base.misc = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_MISC);
+ inst->reg_base.ld = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_LD);
+ inst->reg_base.hwb = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWB);
+}
+
+static void write_hw_segmentation_data(struct vdec_vp8_inst *inst)
+{
+ int i, j;
+ u32 seg_id_addr;
+ u32 val;
+ void __iomem *cm = inst->reg_base.cm;
+ struct vdec_vp8_vsi *vsi = inst->vsi;
+
+ seg_id_addr = readl(inst->reg_base.top + VP8_SEGID_DRAM_ADDR) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) {
+ for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) {
+ val = (1 << 16) + ((seg_id_addr + i) << 2) + j;
+ writel(val, cm + VP8_HW_VLD_ADDR);
+
+ val = vsi->segment_buf[i][j];
+ writel(val, cm + VP8_HW_VLD_VALUE);
+ }
+ }
+}
+
+static void read_hw_segmentation_data(struct vdec_vp8_inst *inst)
+{
+ int i, j;
+ u32 seg_id_addr;
+ u32 val;
+ void __iomem *cm = inst->reg_base.cm;
+ struct vdec_vp8_vsi *vsi = inst->vsi;
+
+ seg_id_addr = readl(inst->reg_base.top + VP8_SEGID_DRAM_ADDR) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) {
+ for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) {
+ val = ((seg_id_addr + i) << 2) + j;
+ writel(val, cm + VP8_HW_VLD_ADDR);
+
+ val = readl(cm + VP8_HW_VLD_VALUE);
+ vsi->segment_buf[i][j] = val;
+ }
+ }
+}
+
+/* reset HW and enable HW read/write data function */
+static void enable_hw_rw_function(struct vdec_vp8_inst *inst)
+{
+ u32 val = 0;
+ void __iomem *sys = inst->reg_base.sys;
+ void __iomem *misc = inst->reg_base.misc;
+ void __iomem *ld = inst->reg_base.ld;
+ void __iomem *hwb = inst->reg_base.hwb;
+ void __iomem *hwd = inst->reg_base.hwd;
+
+ writel(0x1, sys + VP8_RW_CKEN_SET);
+ writel(0x101, ld + VP8_WO_VLD_SRST);
+ writel(0x101, hwb + VP8_WO_VLD_SRST);
+
+ writel(1, sys);
+ val = readl(misc + VP8_RW_MISC_SRST);
+ writel((val & 0xFFFFFFFE), misc + VP8_RW_MISC_SRST);
+
+ writel(0x1, misc + VP8_RW_MISC_SYS_SEL);
+ writel(0x17F, misc + VP8_RW_MISC_SPEC_CON);
+ writel(0x71201100, misc + VP8_RW_MISC_FUNC_CON);
+ writel(0x0, ld + VP8_WO_VLD_SRST);
+ writel(0x0, hwb + VP8_WO_VLD_SRST);
+ writel(0x1, sys + VP8_RW_DCM_CON);
+ writel(0x1, misc + VP8_RW_MISC_DCM_CON);
+ writel(0x1, hwd + VP8_RW_VP8_CTRL);
+}
+
+static void store_dec_table(struct vdec_vp8_inst *inst)
+{
+ int i, j;
+ u32 addr = 0, val = 0;
+ void __iomem *hwd = inst->reg_base.hwd;
+ u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
+
+ for (i = 0; i < VP8_DEC_TABLE_PROC_LOOP; i++) {
+ writel(addr, hwd + VP8_BSASET);
+ for (j = 0; j < VP8_DEC_TABLE_UNIT ; j++) {
+ val = *p++;
+ writel(val, hwd + VP8_BSDSET);
+ }
+ addr += VP8_DEC_TABLE_RW_UNIT;
+ }
+}
+
+static void load_dec_table(struct vdec_vp8_inst *inst)
+{
+ int i;
+ u32 addr = 0;
+ u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
+ void __iomem *hwd = inst->reg_base.hwd;
+
+ for (i = 0; i < VP8_DEC_TABLE_PROC_LOOP; i++) {
+ writel(addr, hwd + VP8_BSASET);
+ /* read total 11 bytes */
+ *p++ = readl(hwd + VP8_BSDSET);
+ *p++ = readl(hwd + VP8_BSDSET);
+ *p++ = readl(hwd + VP8_BSDSET) & 0xFFFFFF;
+ addr += VP8_DEC_TABLE_RW_UNIT;
+ }
+}
+
+static void get_pic_info(struct vdec_vp8_inst *inst, struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void vp8_dec_finish(struct vdec_vp8_inst *inst)
+{
+ struct vdec_fb_node *node;
+ uint64_t prev_y_dma = inst->vsi->dec.prev_y_dma;
+
+ mtk_vcodec_debug(inst, "prev fb base dma=%llx", prev_y_dma);
+
+ /* put last decode ok frame to fb_free_list */
+ if (prev_y_dma != 0) {
+ list_for_each_entry(node, &inst->fb_use_list, list) {
+ struct vdec_fb *fb = (struct vdec_fb *)node->fb;
+
+ if (prev_y_dma == (uint64_t)fb->base_y.dma_addr) {
+ list_move_tail(&node->list,
+ &inst->fb_free_list);
+ break;
+ }
+ }
+ }
+
+ /* available_fb_node_list -> fb_use_list */
+ node = list_first_entry(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ node->fb = inst->cur_fb;
+ list_move_tail(&node->list, &inst->fb_use_list);
+
+ /* available_fb_node_list -> fb_disp_list */
+ if (inst->vsi->dec.show_frame) {
+ node = list_first_entry(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ node->fb = inst->cur_fb;
+ list_move_tail(&node->list, &inst->fb_disp_list);
+ }
+}
+
+static void move_fb_list_use_to_free(struct vdec_vp8_inst *inst)
+{
+ struct vdec_fb_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
+ list_move_tail(&node->list, &inst->fb_free_list);
+}
+
+static void init_list(struct vdec_vp8_inst *inst)
+{
+ int i;
+
+ INIT_LIST_HEAD(&inst->available_fb_node_list);
+ INIT_LIST_HEAD(&inst->fb_use_list);
+ INIT_LIST_HEAD(&inst->fb_free_list);
+ INIT_LIST_HEAD(&inst->fb_disp_list);
+
+ for (i = 0; i < ARRAY_SIZE(inst->dec_fb); i++) {
+ INIT_LIST_HEAD(&inst->dec_fb[i].list);
+ inst->dec_fb[i].fb = NULL;
+ list_add_tail(&inst->dec_fb[i].list,
+ &inst->available_fb_node_list);
+ }
+}
+
+static void add_fb_to_free_list(struct vdec_vp8_inst *inst, void *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (fb) {
+ node = list_first_entry(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_free_list);
+ }
+}
+
+static int alloc_working_buf(struct vdec_vp8_inst *inst)
+{
+ int err;
+ struct mtk_vcodec_mem *mem = &inst->working_buf;
+
+ mem->size = VP8_WORKING_BUF_SZ;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "Cannot allocate working buffer");
+ return err;
+ }
+
+ inst->vsi->dec.working_buf_dma = (uint64_t)mem->dma_addr;
+ return 0;
+}
+
+static void free_working_buf(struct vdec_vp8_inst *inst)
+{
+ struct mtk_vcodec_mem *mem = &inst->working_buf;
+
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ inst->vsi->dec.working_buf_dma = 0;
+}
+
+static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_vp8_inst *inst;
+ int err;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = IPI_VDEC_VP8;
+ inst->vpu.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu.ctx = ctx;
+ inst->vpu.handler = vpu_dec_ipi_handler;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_vp8 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ inst->vsi = (struct vdec_vp8_vsi *)inst->vpu.vsi;
+ init_list(inst);
+ err = alloc_working_buf(inst);
+ if (err)
+ goto error_deinit;
+
+ get_hw_reg_base(inst);
+ mtk_vcodec_debug(inst, "VP8 Instance >> %p", inst);
+
+ *h_vdec = (unsigned long)inst;
+ return 0;
+
+error_deinit:
+ vpu_dec_deinit(&inst->vpu);
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static int vdec_vp8_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+ struct vdec_vp8_dec_info *dec = &inst->vsi->dec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ unsigned char *bs_va;
+ unsigned int data;
+ int err = 0;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+
+ /* bs NULL means flush decoder */
+ if (bs == NULL) {
+ move_fb_list_use_to_free(inst);
+ return vpu_dec_reset(vpu);
+ }
+
+ y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+ mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx fb=%p",
+ inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
+
+ inst->cur_fb = fb;
+ dec->bs_dma = (unsigned long)bs->dma_addr;
+ dec->bs_sz = bs->size;
+ dec->cur_y_fb_dma = y_fb_dma;
+ dec->cur_c_fb_dma = c_fb_dma;
+
+ mtk_vcodec_debug(inst, "\n + FRAME[%d] +\n", inst->frm_cnt);
+
+ write_hw_segmentation_data(inst);
+ enable_hw_rw_function(inst);
+ store_dec_table(inst);
+
+ bs_va = (unsigned char *)bs->va;
+
+ /* retrieve width/hight and scale info from header */
+ data = (*(bs_va + 9) << 24) | (*(bs_va + 8) << 16) |
+ (*(bs_va + 7) << 8) | *(bs_va + 6);
+ err = vpu_dec_start(vpu, &data, 1);
+ if (err) {
+ add_fb_to_free_list(inst, fb);
+ if (dec->wait_key_frame) {
+ mtk_vcodec_debug(inst, "wait key frame !");
+ return 0;
+ }
+
+ goto error;
+ }
+
+ if (dec->resolution_changed) {
+ mtk_vcodec_debug(inst, "- resolution_changed -");
+ *res_chg = true;
+ add_fb_to_free_list(inst, fb);
+ return 0;
+ }
+
+ /* wait decoder done interrupt */
+ mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+
+ if (inst->vsi->load_data)
+ load_dec_table(inst);
+
+ vp8_dec_finish(inst);
+ read_hw_segmentation_data(inst);
+
+ err = vpu_dec_end(vpu);
+ if (err)
+ goto error;
+
+ mtk_vcodec_debug(inst, "\n - FRAME[%d] - show=%d\n", inst->frm_cnt,
+ dec->show_frame);
+ inst->frm_cnt++;
+ *res_chg = false;
+ return 0;
+
+error:
+ mtk_vcodec_err(inst, "\n - FRAME[%d] - err=%d\n", inst->frm_cnt, err);
+ return err;
+}
+
+static void get_disp_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb;
+
+ node = list_first_entry_or_null(&inst->fb_disp_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_DISPLAY;
+ mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
+ node->fb, fb->status);
+ } else {
+ fb = NULL;
+ mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+ }
+
+ *out_fb = fb;
+}
+
+static void get_free_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb;
+
+ node = list_first_entry_or_null(&inst->fb_free_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_FREE;
+ mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
+ node->fb, fb->status);
+ } else {
+ fb = NULL;
+ mtk_vcodec_debug(inst, "[FB] there is no free fb");
+ }
+
+ *out_fb = fb;
+}
+
+static void get_crop_info(struct vdec_vp8_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = 0;
+ cr->top = 0;
+ cr->width = inst->vsi->pic.pic_w;
+ cr->height = inst->vsi->pic.pic_h;
+ mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_vp8_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ get_disp_fb(inst, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ get_free_fb(inst, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ *((unsigned int *)out) = VP8_DPB_SIZE;
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vdec_vp8_deinit(unsigned long h_vdec)
+{
+ struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ free_working_buf(inst);
+ kfree(inst);
+}
+
+static struct vdec_common_if vdec_vp8_if = {
+ .init = vdec_vp8_init,
+ .decode = vdec_vp8_decode,
+ .get_param = vdec_vp8_get_param,
+ .deinit = vdec_vp8_deinit,
+};
+
+struct vdec_common_if *get_vp8_dec_comm_if(void);
+
+struct vdec_common_if *get_vp8_dec_comm_if(void)
+{
+ return &vdec_vp8_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
new file mode 100644
index 000000000..bc8349bc2
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -0,0 +1,1026 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Kai-Sean Yang <kai-sean.yang@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_drv_base.h"
+#include "../vdec_vpu_if.h"
+
+#define VP9_SUPER_FRAME_BS_SZ 64
+#define MAX_VP9_DPB_SIZE 9
+
+#define REFS_PER_FRAME 3
+#define MAX_NUM_REF_FRAMES 8
+#define VP9_MAX_FRM_BUF_NUM 9
+#define VP9_MAX_FRM_BUF_NODE_NUM (VP9_MAX_FRM_BUF_NUM * 2)
+#define VP9_SEG_ID_SZ 0x12000
+
+/**
+ * struct vp9_dram_buf - contains buffer info for vpu
+ * @va : cpu address
+ * @pa : iova address
+ * @sz : buffer size
+ * @padding : for 64 bytes alignment
+ */
+struct vp9_dram_buf {
+ unsigned long va;
+ unsigned long pa;
+ unsigned int sz;
+ unsigned int padding;
+};
+
+/**
+ * struct vp9_fb_info - contains frame buffer info
+ * @fb : frmae buffer
+ * @reserved : reserved field used by vpu
+ */
+struct vp9_fb_info {
+ struct vdec_fb *fb;
+ unsigned int reserved[32];
+};
+
+/**
+ * struct vp9_ref_cnt_buf - contains reference buffer information
+ * @buf : referenced frame buffer
+ * @ref_cnt : referenced frame buffer's reference count.
+ * When reference count=0, remove it from reference list
+ */
+struct vp9_ref_cnt_buf {
+ struct vp9_fb_info buf;
+ unsigned int ref_cnt;
+};
+
+/**
+ * struct vp9_fb_info - contains current frame's reference buffer information
+ * @buf : reference buffer
+ * @idx : reference buffer index to frm_bufs
+ * @reserved : reserved field used by vpu
+ */
+struct vp9_ref_buf {
+ struct vp9_fb_info *buf;
+ unsigned int idx;
+ unsigned int reserved[6];
+};
+
+/**
+ * struct vp9_fb_info - contains frame buffer info
+ * @fb : super frame reference frame buffer
+ * @used : this reference frame info entry is used
+ * @padding : for 64 bytes size align
+ */
+struct vp9_sf_ref_fb {
+ struct vdec_fb fb;
+ int used;
+ int padding;
+};
+
+/*
+ * struct vdec_vp9_vsi - shared buffer between host and VPU firmware
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @sf_bs_buf : super frame backup buffer (AP-W, VPU-R)
+ * @sf_ref_fb : record supoer frame reference buffer information
+ * (AP-R/W, VPU-R/W)
+ * @sf_next_ref_fb_idx : next available super frame (AP-W, VPU-R)
+ * @sf_frm_cnt : super frame count, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_offset : super frame offset, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_sz : super frame size, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_idx : current super frame (AP-R, VPU-W)
+ * @sf_init : inform super frame info already parsed by vpu (AP-R, VPU-W)
+ * @fb : capture buffer (AP-W, VPU-R)
+ * @bs : bs buffer (AP-W, VPU-R)
+ * @cur_fb : current show capture buffer (AP-R/W, VPU-R/W)
+ * @pic_w : picture width (AP-R, VPU-W)
+ * @pic_h : picture height (AP-R, VPU-W)
+ * @buf_w : codec width (AP-R, VPU-W)
+ * @buf_h : coded height (AP-R, VPU-W)
+ * @buf_sz_y_bs : ufo compressed y plane size (AP-R, VPU-W)
+ * @buf_sz_c_bs : ufo compressed cbcr plane size (AP-R, VPU-W)
+ * @buf_len_sz_y : size used to store y plane ufo info (AP-R, VPU-W)
+ * @buf_len_sz_c : size used to store cbcr plane ufo info (AP-R, VPU-W)
+
+ * @profile : profile sparsed from vpu (AP-R, VPU-W)
+ * @show_frame : display this frame or not (AP-R, VPU-W)
+ * @show_existing_frame : inform this frame is show existing frame
+ * (AP-R, VPU-W)
+ * @frm_to_show_idx : index to show frame (AP-R, VPU-W)
+
+ * @refresh_frm_flags : indicate when frame need to refine reference count
+ * (AP-R, VPU-W)
+ * @resolution_changed : resolution change in this frame (AP-R, VPU-W)
+
+ * @frm_bufs : maintain reference buffer info (AP-R/W, VPU-R/W)
+ * @ref_frm_map : maintain reference buffer map info (AP-R/W, VPU-R/W)
+ * @new_fb_idx : index to frm_bufs array (AP-R, VPU-W)
+ * @frm_num : decoded frame number, include sub-frame count (AP-R, VPU-W)
+ * @mv_buf : motion vector working buffer (AP-W, VPU-R)
+ * @frm_refs : maintain three reference buffer info (AP-R/W, VPU-R/W)
+ * @seg_id_buf : segmentation map working buffer (AP-W, VPU-R)
+ */
+struct vdec_vp9_vsi {
+ unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ];
+ struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_FRM_BUF_NUM-1];
+ int sf_next_ref_fb_idx;
+ unsigned int sf_frm_cnt;
+ unsigned int sf_frm_offset[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_sz[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_idx;
+ unsigned int sf_init;
+ struct vdec_fb fb;
+ struct mtk_vcodec_mem bs;
+ struct vdec_fb cur_fb;
+ unsigned int pic_w;
+ unsigned int pic_h;
+ unsigned int buf_w;
+ unsigned int buf_h;
+ unsigned int buf_sz_y_bs;
+ unsigned int buf_sz_c_bs;
+ unsigned int buf_len_sz_y;
+ unsigned int buf_len_sz_c;
+ unsigned int profile;
+ unsigned int show_frame;
+ unsigned int show_existing_frame;
+ unsigned int frm_to_show_idx;
+ unsigned int refresh_frm_flags;
+ unsigned int resolution_changed;
+
+ struct vp9_ref_cnt_buf frm_bufs[VP9_MAX_FRM_BUF_NUM];
+ int ref_frm_map[MAX_NUM_REF_FRAMES];
+ unsigned int new_fb_idx;
+ unsigned int frm_num;
+ struct vp9_dram_buf mv_buf;
+
+ struct vp9_ref_buf frm_refs[REFS_PER_FRAME];
+ struct vp9_dram_buf seg_id_buf;
+
+};
+
+/*
+ * struct vdec_vp9_inst - vp9 decode instance
+ * @mv_buf : working buffer for mv
+ * @seg_id_buf : working buffer for segmentation map
+ * @dec_fb : vdec_fb node to link fb to different fb_xxx_list
+ * @available_fb_node_list : current available vdec_fb node
+ * @fb_use_list : current used or referenced vdec_fb
+ * @fb_free_list : current available to free vdec_fb
+ * @fb_disp_list : current available to display vdec_fb
+ * @cur_fb : current frame buffer
+ * @ctx : current decode context
+ * @vpu : vpu instance information
+ * @vsi : shared buffer between host and VPU firmware
+ * @total_frm_cnt : total frame count, it do not include sub-frames in super
+ * frame
+ * @mem : instance memory information
+ */
+struct vdec_vp9_inst {
+ struct mtk_vcodec_mem mv_buf;
+ struct mtk_vcodec_mem seg_id_buf;
+
+ struct vdec_fb_node dec_fb[VP9_MAX_FRM_BUF_NODE_NUM];
+ struct list_head available_fb_node_list;
+ struct list_head fb_use_list;
+ struct list_head fb_free_list;
+ struct list_head fb_disp_list;
+ struct vdec_fb *cur_fb;
+ struct mtk_vcodec_ctx *ctx;
+ struct vdec_vpu_inst vpu;
+ struct vdec_vp9_vsi *vsi;
+ unsigned int total_frm_cnt;
+ struct mtk_vcodec_mem mem;
+};
+
+static bool vp9_is_sf_ref_fb(struct vdec_vp9_inst *inst, struct vdec_fb *fb)
+{
+ int i;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) {
+ if (fb == &vsi->sf_ref_fb[i].fb)
+ return true;
+ }
+ return false;
+}
+
+static struct vdec_fb *vp9_rm_from_fb_use_list(struct vdec_vp9_inst
+ *inst, void *addr)
+{
+ struct vdec_fb *fb = NULL;
+ struct vdec_fb_node *node;
+
+ list_for_each_entry(node, &inst->fb_use_list, list) {
+ fb = (struct vdec_fb *)node->fb;
+ if (fb->base_y.va == addr) {
+ list_move_tail(&node->list,
+ &inst->available_fb_node_list);
+ break;
+ }
+ }
+ return fb;
+}
+
+static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst,
+ struct vdec_fb *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (fb) {
+ node = list_first_entry_or_null(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+
+ if (node) {
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_free_list);
+ }
+ } else {
+ mtk_vcodec_debug(inst, "No free fb node");
+ }
+}
+
+static void vp9_free_sf_ref_fb(struct vdec_fb *fb)
+{
+ struct vp9_sf_ref_fb *sf_ref_fb =
+ container_of(fb, struct vp9_sf_ref_fb, fb);
+
+ sf_ref_fb->used = 0;
+}
+
+static void vp9_ref_cnt_fb(struct vdec_vp9_inst *inst, int *idx,
+ int new_idx)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ int ref_idx = *idx;
+
+ if (ref_idx >= 0 && vsi->frm_bufs[ref_idx].ref_cnt > 0) {
+ vsi->frm_bufs[ref_idx].ref_cnt--;
+
+ if (vsi->frm_bufs[ref_idx].ref_cnt == 0) {
+ if (!vp9_is_sf_ref_fb(inst,
+ vsi->frm_bufs[ref_idx].buf.fb)) {
+ struct vdec_fb *fb;
+
+ fb = vp9_rm_from_fb_use_list(inst,
+ vsi->frm_bufs[ref_idx].buf.fb->base_y.va);
+ vp9_add_to_fb_free_list(inst, fb);
+ } else
+ vp9_free_sf_ref_fb(
+ vsi->frm_bufs[ref_idx].buf.fb);
+ }
+ }
+
+ *idx = new_idx;
+ vsi->frm_bufs[new_idx].ref_cnt++;
+}
+
+static void vp9_free_all_sf_ref_fb(struct vdec_vp9_inst *inst)
+{
+ int i;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) {
+ if (vsi->sf_ref_fb[i].fb.base_y.va) {
+ mtk_vcodec_mem_free(inst->ctx,
+ &vsi->sf_ref_fb[i].fb.base_y);
+ mtk_vcodec_mem_free(inst->ctx,
+ &vsi->sf_ref_fb[i].fb.base_c);
+ vsi->sf_ref_fb[i].used = 0;
+ }
+ }
+}
+
+/* For each sub-frame except the last one, the driver will dynamically
+ * allocate reference buffer by calling vp9_get_sf_ref_fb()
+ * The last sub-frame will use the original fb provided by the
+ * vp9_dec_decode() interface
+ */
+static int vp9_get_sf_ref_fb(struct vdec_vp9_inst *inst)
+{
+ int idx;
+ struct mtk_vcodec_mem *mem_basy_y;
+ struct mtk_vcodec_mem *mem_basy_c;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ for (idx = 0;
+ idx < ARRAY_SIZE(vsi->sf_ref_fb);
+ idx++) {
+ if (vsi->sf_ref_fb[idx].fb.base_y.va &&
+ vsi->sf_ref_fb[idx].used == 0) {
+ return idx;
+ }
+ }
+
+ for (idx = 0;
+ idx < ARRAY_SIZE(vsi->sf_ref_fb);
+ idx++) {
+ if (vsi->sf_ref_fb[idx].fb.base_y.va == NULL)
+ break;
+ }
+
+ if (idx == ARRAY_SIZE(vsi->sf_ref_fb)) {
+ mtk_vcodec_err(inst, "List Full");
+ return -1;
+ }
+
+ mem_basy_y = &vsi->sf_ref_fb[idx].fb.base_y;
+ mem_basy_y->size = vsi->buf_sz_y_bs +
+ vsi->buf_len_sz_y;
+
+ if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_y)) {
+ mtk_vcodec_err(inst, "Cannot allocate sf_ref_buf y_buf");
+ return -1;
+ }
+
+ mem_basy_c = &vsi->sf_ref_fb[idx].fb.base_c;
+ mem_basy_c->size = vsi->buf_sz_c_bs +
+ vsi->buf_len_sz_c;
+
+ if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_c)) {
+ mtk_vcodec_err(inst, "Cannot allocate sf_ref_fb c_buf");
+ return -1;
+ }
+ vsi->sf_ref_fb[idx].used = 0;
+
+ return idx;
+}
+
+static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ int result;
+ struct mtk_vcodec_mem *mem;
+
+ unsigned int max_pic_w;
+ unsigned int max_pic_h;
+
+
+ if (!(inst->ctx->dev->dec_capability &
+ VCODEC_CAPABILITY_4K_DISABLED)) {
+ max_pic_w = VCODEC_DEC_4K_CODED_WIDTH;
+ max_pic_h = VCODEC_DEC_4K_CODED_HEIGHT;
+ } else {
+ max_pic_w = MTK_VDEC_MAX_W;
+ max_pic_h = MTK_VDEC_MAX_H;
+ }
+
+ if ((vsi->pic_w > max_pic_w) ||
+ (vsi->pic_h > max_pic_h)) {
+ mtk_vcodec_err(inst, "Invalid w/h %d/%d",
+ vsi->pic_w, vsi->pic_h);
+ return false;
+ }
+
+ mtk_vcodec_debug(inst, "BUF CHG(%d): w/h/sb_w/sb_h=%d/%d/%d/%d",
+ vsi->resolution_changed,
+ vsi->pic_w,
+ vsi->pic_h,
+ vsi->buf_w,
+ vsi->buf_h);
+
+ mem = &inst->mv_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ mem->size = ((vsi->buf_w / 64) *
+ (vsi->buf_h / 64) + 2) * 36 * 16;
+ result = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (result) {
+ mem->size = 0;
+ mtk_vcodec_err(inst, "Cannot allocate mv_buf");
+ return false;
+ }
+ /* Set the va again */
+ vsi->mv_buf.va = (unsigned long)mem->va;
+ vsi->mv_buf.pa = (unsigned long)mem->dma_addr;
+ vsi->mv_buf.sz = (unsigned int)mem->size;
+
+
+ mem = &inst->seg_id_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ mem->size = VP9_SEG_ID_SZ;
+ result = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (result) {
+ mem->size = 0;
+ mtk_vcodec_err(inst, "Cannot allocate seg_id_buf");
+ return false;
+ }
+ /* Set the va again */
+ vsi->seg_id_buf.va = (unsigned long)mem->va;
+ vsi->seg_id_buf.pa = (unsigned long)mem->dma_addr;
+ vsi->seg_id_buf.sz = (unsigned int)mem->size;
+
+
+ vp9_free_all_sf_ref_fb(inst);
+ vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+
+ return true;
+}
+
+static bool vp9_add_to_fb_disp_list(struct vdec_vp9_inst *inst,
+ struct vdec_fb *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (!fb) {
+ mtk_vcodec_err(inst, "fb == NULL");
+ return false;
+ }
+
+ node = list_first_entry_or_null(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_disp_list);
+ } else {
+ mtk_vcodec_err(inst, "No available fb node");
+ return false;
+ }
+
+ return true;
+}
+
+/* If any buffer updating is signaled it should be done here. */
+static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ struct vp9_fb_info *frm_to_show;
+ int ref_index = 0, mask;
+
+ for (mask = vsi->refresh_frm_flags; mask; mask >>= 1) {
+ if (mask & 1)
+ vp9_ref_cnt_fb(inst, &vsi->ref_frm_map[ref_index],
+ vsi->new_fb_idx);
+ ++ref_index;
+ }
+
+ frm_to_show = &vsi->frm_bufs[vsi->new_fb_idx].buf;
+ vsi->frm_bufs[vsi->new_fb_idx].ref_cnt--;
+
+ if (frm_to_show->fb != inst->cur_fb) {
+ /* This frame is show exist frame and no decode output
+ * copy frame data from frm_to_show to current CAPTURE
+ * buffer
+ */
+ if ((frm_to_show->fb != NULL) &&
+ (inst->cur_fb->base_y.size >=
+ frm_to_show->fb->base_y.size)) {
+ memcpy((void *)inst->cur_fb->base_y.va,
+ (void *)frm_to_show->fb->base_y.va,
+ vsi->buf_w *
+ vsi->buf_h);
+ memcpy((void *)inst->cur_fb->base_c.va,
+ (void *)frm_to_show->fb->base_c.va,
+ vsi->buf_w *
+ vsi->buf_h / 2);
+ } else {
+ /* After resolution change case, current CAPTURE buffer
+ * may have less buffer size than frm_to_show buffer
+ * size
+ */
+ if (frm_to_show->fb != NULL)
+ mtk_vcodec_err(inst,
+ "inst->cur_fb->base_y.size=%zu, frm_to_show->fb.base_y.size=%zu",
+ inst->cur_fb->base_y.size,
+ frm_to_show->fb->base_y.size);
+ }
+ if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
+ if (vsi->show_frame)
+ vp9_add_to_fb_disp_list(inst, inst->cur_fb);
+ }
+ } else {
+ if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
+ if (vsi->show_frame)
+ vp9_add_to_fb_disp_list(inst, frm_to_show->fb);
+ }
+ }
+
+ /* when ref_cnt ==0, move this fb to fb_free_list. v4l2 driver will
+ * clean fb_free_list
+ */
+ if (vsi->frm_bufs[vsi->new_fb_idx].ref_cnt == 0) {
+ if (!vp9_is_sf_ref_fb(
+ inst, vsi->frm_bufs[vsi->new_fb_idx].buf.fb)) {
+ struct vdec_fb *fb;
+
+ fb = vp9_rm_from_fb_use_list(inst,
+ vsi->frm_bufs[vsi->new_fb_idx].buf.fb->base_y.va);
+
+ vp9_add_to_fb_free_list(inst, fb);
+ } else {
+ vp9_free_sf_ref_fb(
+ vsi->frm_bufs[vsi->new_fb_idx].buf.fb);
+ }
+ }
+
+ /* if this super frame and it is not last sub-frame, get next fb for
+ * sub-frame decode
+ */
+ if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt - 1)
+ vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+}
+
+static bool vp9_wait_dec_end(struct vdec_vp9_inst *inst)
+{
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ mtk_vcodec_wait_for_done_ctx(inst->ctx,
+ MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+
+ if (ctx->irq_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
+ return true;
+ else
+ return false;
+}
+
+static struct vdec_vp9_inst *vp9_alloc_inst(struct mtk_vcodec_ctx *ctx)
+{
+ int result;
+ struct mtk_vcodec_mem mem;
+ struct vdec_vp9_inst *inst;
+
+ memset(&mem, 0, sizeof(mem));
+ mem.size = sizeof(struct vdec_vp9_inst);
+ result = mtk_vcodec_mem_alloc(ctx, &mem);
+ if (result)
+ return NULL;
+
+ inst = mem.va;
+ inst->mem = mem;
+
+ return inst;
+}
+
+static void vp9_free_inst(struct vdec_vp9_inst *inst)
+{
+ struct mtk_vcodec_mem mem;
+
+ mem = inst->mem;
+ if (mem.va)
+ mtk_vcodec_mem_free(inst->ctx, &mem);
+}
+
+static bool vp9_decode_end_proc(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ bool ret = false;
+
+ if (!vsi->show_existing_frame) {
+ ret = vp9_wait_dec_end(inst);
+ if (!ret) {
+ mtk_vcodec_err(inst, "Decode failed, Decode Timeout @[%d]",
+ vsi->frm_num);
+ return false;
+ }
+
+ if (vpu_dec_end(&inst->vpu)) {
+ mtk_vcodec_err(inst, "vp9_dec_vpu_end failed");
+ return false;
+ }
+ mtk_vcodec_debug(inst, "Decode Ok @%d (%d/%d)", vsi->frm_num,
+ vsi->pic_w, vsi->pic_h);
+ } else {
+ mtk_vcodec_debug(inst, "Decode Ok @%d (show_existing_frame)",
+ vsi->frm_num);
+ }
+
+ vp9_swap_frm_bufs(inst);
+ vsi->frm_num++;
+ return true;
+}
+
+static bool vp9_is_last_sub_frm(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ if (vsi->sf_frm_cnt <= 0 || vsi->sf_frm_idx == vsi->sf_frm_cnt)
+ return true;
+
+ return false;
+}
+
+static struct vdec_fb *vp9_rm_from_fb_disp_list(struct vdec_vp9_inst *inst)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb = NULL;
+
+ node = list_first_entry_or_null(&inst->fb_disp_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_DISPLAY;
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
+ node->fb, fb->status);
+ } else
+ mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+
+ return fb;
+}
+
+static bool vp9_add_to_fb_use_list(struct vdec_vp9_inst *inst,
+ struct vdec_fb *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (!fb) {
+ mtk_vcodec_debug(inst, "fb == NULL");
+ return false;
+ }
+
+ node = list_first_entry_or_null(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_use_list);
+ } else {
+ mtk_vcodec_err(inst, "No free fb node");
+ return false;
+ }
+ return true;
+}
+
+static void vp9_reset(struct vdec_vp9_inst *inst)
+{
+ struct vdec_fb_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
+ list_move_tail(&node->list, &inst->fb_free_list);
+
+ vp9_free_all_sf_ref_fb(inst);
+ inst->vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+
+ if (vpu_dec_reset(&inst->vpu))
+ mtk_vcodec_err(inst, "vp9_dec_vpu_reset failed");
+
+ /* Set the va again, since vpu_dec_reset will clear mv_buf in vpu */
+ inst->vsi->mv_buf.va = (unsigned long)inst->mv_buf.va;
+ inst->vsi->mv_buf.pa = (unsigned long)inst->mv_buf.dma_addr;
+ inst->vsi->mv_buf.sz = (unsigned long)inst->mv_buf.size;
+
+ /* Set the va again, since vpu_dec_reset will clear seg_id_buf in vpu */
+ inst->vsi->seg_id_buf.va = (unsigned long)inst->seg_id_buf.va;
+ inst->vsi->seg_id_buf.pa = (unsigned long)inst->seg_id_buf.dma_addr;
+ inst->vsi->seg_id_buf.sz = (unsigned long)inst->seg_id_buf.size;
+
+}
+
+static void init_all_fb_lists(struct vdec_vp9_inst *inst)
+{
+ int i;
+
+ INIT_LIST_HEAD(&inst->available_fb_node_list);
+ INIT_LIST_HEAD(&inst->fb_use_list);
+ INIT_LIST_HEAD(&inst->fb_free_list);
+ INIT_LIST_HEAD(&inst->fb_disp_list);
+
+ for (i = 0; i < ARRAY_SIZE(inst->dec_fb); i++) {
+ INIT_LIST_HEAD(&inst->dec_fb[i].list);
+ inst->dec_fb[i].fb = NULL;
+ list_add_tail(&inst->dec_fb[i].list,
+ &inst->available_fb_node_list);
+ }
+}
+
+static void get_pic_info(struct vdec_vp9_inst *inst, struct vdec_pic_info *pic)
+{
+ pic->y_bs_sz = inst->vsi->buf_sz_y_bs;
+ pic->c_bs_sz = inst->vsi->buf_sz_c_bs;
+ pic->y_len_sz = inst->vsi->buf_len_sz_y;
+ pic->c_len_sz = inst->vsi->buf_len_sz_c;
+
+ pic->pic_w = inst->vsi->pic_w;
+ pic->pic_h = inst->vsi->pic_h;
+ pic->buf_w = inst->vsi->buf_w;
+ pic->buf_h = inst->vsi->buf_h;
+
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_disp_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
+{
+
+ *out_fb = vp9_rm_from_fb_disp_list(inst);
+ if (*out_fb)
+ (*out_fb)->status |= FB_ST_DISPLAY;
+}
+
+static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb = NULL;
+
+ node = list_first_entry_or_null(&inst->fb_free_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_FREE;
+ mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
+ node->fb, fb->status);
+ } else {
+ mtk_vcodec_debug(inst, "[FB] there is no free fb");
+ }
+
+ *out_fb = fb;
+}
+
+static int validate_vsi_array_indexes(struct vdec_vp9_inst *inst,
+ struct vdec_vp9_vsi *vsi) {
+ if (vsi->sf_frm_idx >= VP9_MAX_FRM_BUF_NUM - 1) {
+ mtk_vcodec_err(inst, "Invalid vsi->sf_frm_idx=%u.",
+ vsi->sf_frm_idx);
+ return -EIO;
+ }
+ if (vsi->frm_to_show_idx >= VP9_MAX_FRM_BUF_NUM) {
+ mtk_vcodec_err(inst, "Invalid vsi->frm_to_show_idx=%u.",
+ vsi->frm_to_show_idx);
+ return -EIO;
+ }
+ if (vsi->new_fb_idx >= VP9_MAX_FRM_BUF_NUM) {
+ mtk_vcodec_err(inst, "Invalid vsi->new_fb_idx=%u.",
+ vsi->new_fb_idx);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void vdec_vp9_deinit(unsigned long h_vdec)
+{
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+ struct mtk_vcodec_mem *mem;
+ int ret = 0;
+
+ ret = vpu_dec_deinit(&inst->vpu);
+ if (ret)
+ mtk_vcodec_err(inst, "vpu_dec_deinit failed");
+
+ mem = &inst->mv_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ mem = &inst->seg_id_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ vp9_free_all_sf_ref_fb(inst);
+ vp9_free_inst(inst);
+}
+
+static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_vp9_inst *inst;
+
+ inst = vp9_alloc_inst(ctx);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->total_frm_cnt = 0;
+ inst->ctx = ctx;
+
+ inst->vpu.id = IPI_VDEC_VP9;
+ inst->vpu.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu.ctx = ctx;
+ inst->vpu.handler = vpu_dec_ipi_handler;
+
+ if (vpu_dec_init(&inst->vpu)) {
+ mtk_vcodec_err(inst, "vp9_dec_vpu_init failed");
+ goto err_deinit_inst;
+ }
+
+ inst->vsi = (struct vdec_vp9_vsi *)inst->vpu.vsi;
+ init_all_fb_lists(inst);
+
+ (*h_vdec) = (unsigned long)inst;
+ return 0;
+
+err_deinit_inst:
+ vp9_free_inst(inst);
+
+ return -EINVAL;
+}
+
+static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ int ret = 0;
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ u32 data[3];
+ int i;
+
+ *res_chg = false;
+
+ if ((bs == NULL) && (fb == NULL)) {
+ mtk_vcodec_debug(inst, "[EOS]");
+ vp9_reset(inst);
+ return ret;
+ }
+
+ if (bs == NULL) {
+ mtk_vcodec_err(inst, "bs == NULL");
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug(inst, "Input BS Size = %zu", bs->size);
+
+ while (1) {
+ struct vdec_fb *cur_fb = NULL;
+
+ data[0] = *((unsigned int *)bs->va);
+ data[1] = *((unsigned int *)(bs->va + 4));
+ data[2] = *((unsigned int *)(bs->va + 8));
+
+ vsi->bs = *bs;
+
+ if (fb)
+ vsi->fb = *fb;
+
+ if (!vsi->sf_init) {
+ unsigned int sf_bs_sz;
+ unsigned int sf_bs_off;
+ unsigned char *sf_bs_src;
+ unsigned char *sf_bs_dst;
+
+ sf_bs_sz = bs->size > VP9_SUPER_FRAME_BS_SZ ?
+ VP9_SUPER_FRAME_BS_SZ : bs->size;
+ sf_bs_off = VP9_SUPER_FRAME_BS_SZ - sf_bs_sz;
+ sf_bs_src = bs->va + bs->size - sf_bs_sz;
+ sf_bs_dst = vsi->sf_bs_buf + sf_bs_off;
+ memcpy(sf_bs_dst, sf_bs_src, sf_bs_sz);
+ } else {
+ if ((vsi->sf_frm_cnt > 0) &&
+ (vsi->sf_frm_idx < vsi->sf_frm_cnt)) {
+ unsigned int idx = vsi->sf_frm_idx;
+
+ memcpy((void *)bs->va,
+ (void *)(bs->va +
+ vsi->sf_frm_offset[idx]),
+ vsi->sf_frm_sz[idx]);
+ }
+ }
+ memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size);
+ ret = vpu_dec_start(&inst->vpu, data, 3);
+ if (ret) {
+ mtk_vcodec_err(inst, "vpu_dec_start failed");
+ goto DECODE_ERROR;
+ }
+
+ ret = validate_vsi_array_indexes(inst, vsi);
+ if (ret) {
+ mtk_vcodec_err(inst, "Invalid values from VPU.");
+ goto DECODE_ERROR;
+ }
+
+ if (vsi->resolution_changed) {
+ if (!vp9_alloc_work_buf(inst)) {
+ ret = -EINVAL;
+ goto DECODE_ERROR;
+ }
+ }
+
+ if (vsi->sf_frm_cnt > 0) {
+ cur_fb = &vsi->sf_ref_fb[vsi->sf_next_ref_fb_idx].fb;
+
+ if (vsi->sf_frm_idx < vsi->sf_frm_cnt)
+ inst->cur_fb = cur_fb;
+ else
+ inst->cur_fb = fb;
+ } else {
+ inst->cur_fb = fb;
+ }
+
+ vsi->frm_bufs[vsi->new_fb_idx].buf.fb = inst->cur_fb;
+ if (!vp9_is_sf_ref_fb(inst, inst->cur_fb))
+ vp9_add_to_fb_use_list(inst, inst->cur_fb);
+
+ mtk_vcodec_debug(inst, "[#pic %d]", vsi->frm_num);
+
+ if (vsi->show_existing_frame)
+ mtk_vcodec_debug(inst,
+ "drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+ vsi->new_fb_idx, vsi->frm_to_show_idx);
+
+ if (vsi->show_existing_frame && (vsi->frm_to_show_idx <
+ VP9_MAX_FRM_BUF_NUM)) {
+ mtk_vcodec_err(inst,
+ "Skip Decode drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+ vsi->new_fb_idx, vsi->frm_to_show_idx);
+
+ vp9_ref_cnt_fb(inst, &vsi->new_fb_idx,
+ vsi->frm_to_show_idx);
+ ret = -EINVAL;
+ goto DECODE_ERROR;
+ }
+
+ /* VPU assign the buffer pointer in its address space,
+ * reassign here
+ */
+ for (i = 0; i < ARRAY_SIZE(vsi->frm_refs); i++) {
+ unsigned int idx = vsi->frm_refs[i].idx;
+
+ vsi->frm_refs[i].buf = &vsi->frm_bufs[idx].buf;
+ }
+
+ if (vsi->resolution_changed) {
+ *res_chg = true;
+ mtk_vcodec_debug(inst, "VDEC_ST_RESOLUTION_CHANGED");
+
+ ret = 0;
+ goto DECODE_ERROR;
+ }
+
+ if (vp9_decode_end_proc(inst) != true) {
+ mtk_vcodec_err(inst, "vp9_decode_end_proc");
+ ret = -EINVAL;
+ goto DECODE_ERROR;
+ }
+
+ if (vp9_is_last_sub_frm(inst))
+ break;
+
+ }
+ inst->total_frm_cnt++;
+
+DECODE_ERROR:
+ if (ret < 0)
+ vp9_add_to_fb_free_list(inst, fb);
+
+ return ret;
+}
+
+static void get_crop_info(struct vdec_vp9_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = 0;
+ cr->top = 0;
+ cr->width = inst->vsi->pic_w;
+ cr->height = inst->vsi->pic_h;
+ mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d\n",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_vp9_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+ int ret = 0;
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ get_disp_fb(inst, out);
+ break;
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ get_free_fb(inst, out);
+ break;
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+ case GET_PARAM_DPB_SIZE:
+ *((unsigned int *)out) = MAX_VP9_DPB_SIZE;
+ break;
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+ default:
+ mtk_vcodec_err(inst, "not supported param type %d", type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct vdec_common_if vdec_vp9_if = {
+ .init = vdec_vp9_init,
+ .decode = vdec_vp9_decode,
+ .get_param = vdec_vp9_get_param,
+ .deinit = vdec_vp9_deinit,
+};
+
+struct vdec_common_if *get_vp9_dec_comm_if(void);
+
+struct vdec_common_if *get_vp9_dec_comm_if(void)
+{
+ return &vdec_vp9_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
new file mode 100644
index 000000000..7e4c1a92b
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_DRV_BASE_
+#define _VDEC_DRV_BASE_
+
+#include "mtk_vcodec_drv.h"
+
+#include "vdec_drv_if.h"
+
+struct vdec_common_if {
+ /**
+ * (*init)() - initialize decode driver
+ * @ctx : [in] mtk v4l2 context
+ * @h_vdec : [out] driver handle
+ */
+ int (*init)(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec);
+
+ /**
+ * (*decode)() - trigger decode
+ * @h_vdec : [in] driver handle
+ * @bs : [in] input bitstream
+ * @fb : [in] frame buffer to store decoded frame
+ * @res_chg : [out] resolution change happen
+ */
+ int (*decode)(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg);
+
+ /**
+ * (*get_param)() - get driver's parameter
+ * @h_vdec : [in] driver handle
+ * @type : [in] input parameter type
+ * @out : [out] buffer to store query result
+ */
+ int (*get_param)(unsigned long h_vdec, enum vdec_get_param_type type,
+ void *out);
+
+ /**
+ * (*deinit)() - deinitialize driver.
+ * @h_vdec : [in] driver handle to be deinit
+ */
+ void (*deinit)(unsigned long h_vdec);
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
new file mode 100644
index 000000000..5ffc468dd
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "vdec_drv_if.h"
+#include "mtk_vcodec_dec.h"
+#include "vdec_drv_base.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vpu.h"
+
+const struct vdec_common_if *get_h264_dec_comm_if(void);
+const struct vdec_common_if *get_vp8_dec_comm_if(void);
+const struct vdec_common_if *get_vp9_dec_comm_if(void);
+
+int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
+{
+ int ret = 0;
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_H264:
+ ctx->dec_if = get_h264_dec_comm_if();
+ break;
+ case V4L2_PIX_FMT_VP8:
+ ctx->dec_if = get_vp8_dec_comm_if();
+ break;
+ case V4L2_PIX_FMT_VP9:
+ ctx->dec_if = get_vp9_dec_comm_if();
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mtk_vdec_lock(ctx);
+ mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ ret = ctx->dec_if->init(ctx, &ctx->drv_handle);
+ mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vdec_unlock(ctx);
+
+ return ret;
+}
+
+int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ int ret = 0;
+
+ if (bs) {
+ if ((bs->dma_addr & 63) != 0) {
+ mtk_v4l2_err("bs dma_addr should 64 byte align");
+ return -EINVAL;
+ }
+ }
+
+ if (fb) {
+ if (((fb->base_y.dma_addr & 511) != 0) ||
+ ((fb->base_c.dma_addr & 511) != 0)) {
+ mtk_v4l2_err("frame buffer dma_addr should 512 byte align");
+ return -EINVAL;
+ }
+ }
+
+ if (ctx->drv_handle == 0)
+ return -EIO;
+
+ mtk_vdec_lock(ctx);
+
+ mtk_vcodec_set_curr_ctx(ctx->dev, ctx);
+ mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ enable_irq(ctx->dev->dec_irq);
+ ret = ctx->dec_if->decode(ctx->drv_handle, bs, fb, res_chg);
+ disable_irq(ctx->dev->dec_irq);
+ mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vcodec_set_curr_ctx(ctx->dev, NULL);
+
+ mtk_vdec_unlock(ctx);
+
+ return ret;
+}
+
+int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+ void *out)
+{
+ int ret = 0;
+
+ if (ctx->drv_handle == 0)
+ return -EIO;
+
+ mtk_vdec_lock(ctx);
+ ret = ctx->dec_if->get_param(ctx->drv_handle, type, out);
+ mtk_vdec_unlock(ctx);
+
+ return ret;
+}
+
+void vdec_if_deinit(struct mtk_vcodec_ctx *ctx)
+{
+ if (ctx->drv_handle == 0)
+ return;
+
+ mtk_vdec_lock(ctx);
+ mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ ctx->dec_if->deinit(ctx->drv_handle);
+ mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vdec_unlock(ctx);
+
+ ctx->drv_handle = 0;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
new file mode 100644
index 000000000..ded115448
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_DRV_IF_H_
+#define _VDEC_DRV_IF_H_
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_util.h"
+
+
+/**
+ * struct vdec_fb_status - decoder frame buffer status
+ * @FB_ST_NORMAL : initial state
+ * @FB_ST_DISPLAY : frmae buffer is ready to be displayed
+ * @FB_ST_FREE : frame buffer is not used by decoder any more
+ */
+enum vdec_fb_status {
+ FB_ST_NORMAL = 0,
+ FB_ST_DISPLAY = (1 << 0),
+ FB_ST_FREE = (1 << 1)
+};
+
+/* For GET_PARAM_DISP_FRAME_BUFFER and GET_PARAM_FREE_FRAME_BUFFER,
+ * the caller does not own the returned buffer. The buffer will not be
+ * released before vdec_if_deinit.
+ * GET_PARAM_DISP_FRAME_BUFFER : get next displayable frame buffer,
+ * struct vdec_fb**
+ * GET_PARAM_FREE_FRAME_BUFFER : get non-referenced framebuffer, vdec_fb**
+ * GET_PARAM_PIC_INFO : get picture info, struct vdec_pic_info*
+ * GET_PARAM_CROP_INFO : get crop info, struct v4l2_crop*
+ * GET_PARAM_DPB_SIZE : get dpb size, unsigned int*
+ */
+enum vdec_get_param_type {
+ GET_PARAM_DISP_FRAME_BUFFER,
+ GET_PARAM_FREE_FRAME_BUFFER,
+ GET_PARAM_PIC_INFO,
+ GET_PARAM_CROP_INFO,
+ GET_PARAM_DPB_SIZE
+};
+
+/**
+ * struct vdec_fb_node - decoder frame buffer node
+ * @list : list to hold this node
+ * @fb : point to frame buffer (vdec_fb), fb could point to frame buffer and
+ * working buffer this is for maintain buffers in different state
+ */
+struct vdec_fb_node {
+ struct list_head list;
+ struct vdec_fb *fb;
+};
+
+/**
+ * vdec_if_init() - initialize decode driver
+ * @ctx : [in] v4l2 context
+ * @fourcc : [in] video format fourcc, V4L2_PIX_FMT_H264/VP8/VP9..
+ */
+int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc);
+
+/**
+ * vdec_if_deinit() - deinitialize decode driver
+ * @ctx : [in] v4l2 context
+ *
+ */
+void vdec_if_deinit(struct mtk_vcodec_ctx *ctx);
+
+/**
+ * vdec_if_decode() - trigger decode
+ * @ctx : [in] v4l2 context
+ * @bs : [in] input bitstream
+ * @fb : [in] frame buffer to store decoded frame, when null menas parse
+ * header only
+ * @res_chg : [out] resolution change happens if current bs have different
+ * picture width/height
+ * Note: To flush the decoder when reaching EOF, set input bitstream as NULL.
+ *
+ * Return: 0 on success. -EIO on unrecoverable error.
+ */
+int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg);
+
+/**
+ * vdec_if_get_param() - get driver's parameter
+ * @ctx : [in] v4l2 context
+ * @type : [in] input parameter type
+ * @out : [out] buffer to store query result
+ */
+int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+ void *out);
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
new file mode 100644
index 000000000..5a8a629f4
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_IPI_MSG_H_
+#define _VDEC_IPI_MSG_H_
+
+/**
+ * enum vdec_ipi_msgid - message id between AP and VPU
+ * @AP_IPIMSG_XXX : AP to VPU cmd message id
+ * @VPU_IPIMSG_XXX_ACK : VPU ack AP cmd message id
+ */
+enum vdec_ipi_msgid {
+ AP_IPIMSG_DEC_INIT = 0xA000,
+ AP_IPIMSG_DEC_START = 0xA001,
+ AP_IPIMSG_DEC_END = 0xA002,
+ AP_IPIMSG_DEC_DEINIT = 0xA003,
+ AP_IPIMSG_DEC_RESET = 0xA004,
+
+ VPU_IPIMSG_DEC_INIT_ACK = 0xB000,
+ VPU_IPIMSG_DEC_START_ACK = 0xB001,
+ VPU_IPIMSG_DEC_END_ACK = 0xB002,
+ VPU_IPIMSG_DEC_DEINIT_ACK = 0xB003,
+ VPU_IPIMSG_DEC_RESET_ACK = 0xB004,
+};
+
+/**
+ * struct vdec_ap_ipi_cmd - generic AP to VPU ipi command format
+ * @msg_id : vdec_ipi_msgid
+ * @vpu_inst_addr : VPU decoder instance address
+ */
+struct vdec_ap_ipi_cmd {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+};
+
+/**
+ * struct vdec_vpu_ipi_ack - generic VPU to AP ipi command format
+ * @msg_id : vdec_ipi_msgid
+ * @status : VPU exeuction result
+ * @ap_inst_addr : AP video decoder instance address
+ */
+struct vdec_vpu_ipi_ack {
+ uint32_t msg_id;
+ int32_t status;
+ uint64_t ap_inst_addr;
+};
+
+/**
+ * struct vdec_ap_ipi_init - for AP_IPIMSG_DEC_INIT
+ * @msg_id : AP_IPIMSG_DEC_INIT
+ * @reserved : Reserved field
+ * @ap_inst_addr : AP video decoder instance address
+ */
+struct vdec_ap_ipi_init {
+ uint32_t msg_id;
+ uint32_t reserved;
+ uint64_t ap_inst_addr;
+};
+
+/**
+ * struct vdec_ap_ipi_dec_start - for AP_IPIMSG_DEC_START
+ * @msg_id : AP_IPIMSG_DEC_START
+ * @vpu_inst_addr : VPU decoder instance address
+ * @data : Header info
+ * H264 decoder [0]:buf_sz [1]:nal_start
+ * VP8 decoder [0]:width/height
+ * VP9 decoder [0]:profile, [1][2] width/height
+ * @reserved : Reserved field
+ */
+struct vdec_ap_ipi_dec_start {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+ uint32_t data[3];
+ uint32_t reserved;
+};
+
+/**
+ * struct vdec_vpu_ipi_init_ack - for VPU_IPIMSG_DEC_INIT_ACK
+ * @msg_id : VPU_IPIMSG_DEC_INIT_ACK
+ * @status : VPU exeuction result
+ * @ap_inst_addr : AP vcodec_vpu_inst instance address
+ * @vpu_inst_addr : VPU decoder instance address
+ */
+struct vdec_vpu_ipi_init_ack {
+ uint32_t msg_id;
+ int32_t status;
+ uint64_t ap_inst_addr;
+ uint32_t vpu_inst_addr;
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
new file mode 100644
index 000000000..1abd14e79
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_util.h"
+#include "vdec_ipi_msg.h"
+#include "vdec_vpu_if.h"
+
+static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
+{
+ struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
+ (unsigned long)msg->ap_inst_addr;
+
+ mtk_vcodec_debug(vpu, "+ ap_inst_addr = 0x%llx", msg->ap_inst_addr);
+
+ /* mapping VPU address to kernel virtual address */
+ /* the content in vsi is initialized to 0 in VPU */
+ vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr);
+ vpu->inst_addr = msg->vpu_inst_addr;
+
+ mtk_vcodec_debug(vpu, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
+}
+
+/*
+ * This function runs in interrupt context and it means there's an IPI MSG
+ * from VPU.
+ */
+void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct vdec_vpu_ipi_ack *msg = data;
+ struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
+ (unsigned long)msg->ap_inst_addr;
+
+ mtk_vcodec_debug(vpu, "+ id=%X", msg->msg_id);
+
+ if (msg->status == 0) {
+ switch (msg->msg_id) {
+ case VPU_IPIMSG_DEC_INIT_ACK:
+ handle_init_ack_msg(data);
+ break;
+
+ case VPU_IPIMSG_DEC_START_ACK:
+ case VPU_IPIMSG_DEC_END_ACK:
+ case VPU_IPIMSG_DEC_DEINIT_ACK:
+ case VPU_IPIMSG_DEC_RESET_ACK:
+ break;
+
+ default:
+ mtk_vcodec_err(vpu, "invalid msg=%X", msg->msg_id);
+ break;
+ }
+ }
+
+ mtk_vcodec_debug(vpu, "- id=%X", msg->msg_id);
+ vpu->failure = msg->status;
+ vpu->signaled = 1;
+}
+
+static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
+{
+ int err;
+
+ mtk_vcodec_debug(vpu, "id=%X", *(uint32_t *)msg);
+
+ vpu->failure = 0;
+ vpu->signaled = 0;
+
+ err = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
+ if (err) {
+ mtk_vcodec_err(vpu, "send fail vpu_id=%d msg_id=%X status=%d",
+ vpu->id, *(uint32_t *)msg, err);
+ return err;
+ }
+
+ return vpu->failure;
+}
+
+static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
+{
+ struct vdec_ap_ipi_cmd msg;
+ int err = 0;
+
+ mtk_vcodec_debug(vpu, "+ id=%X", msg_id);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_id = msg_id;
+ msg.vpu_inst_addr = vpu->inst_addr;
+
+ err = vcodec_vpu_send_msg(vpu, &msg, sizeof(msg));
+ mtk_vcodec_debug(vpu, "- id=%X ret=%d", msg_id, err);
+ return err;
+}
+
+int vpu_dec_init(struct vdec_vpu_inst *vpu)
+{
+ struct vdec_ap_ipi_init msg;
+ int err;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ init_waitqueue_head(&vpu->wq);
+
+ err = vpu_ipi_register(vpu->dev, vpu->id, vpu->handler, "vdec", NULL);
+ if (err != 0) {
+ mtk_vcodec_err(vpu, "vpu_ipi_register fail status=%d", err);
+ return err;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_id = AP_IPIMSG_DEC_INIT;
+ msg.ap_inst_addr = (unsigned long)vpu;
+
+ mtk_vcodec_debug(vpu, "vdec_inst=%p", vpu);
+
+ err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
+ mtk_vcodec_debug(vpu, "- ret=%d", err);
+ return err;
+}
+
+int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
+{
+ struct vdec_ap_ipi_dec_start msg;
+ int i;
+ int err = 0;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ if (len > ARRAY_SIZE(msg.data)) {
+ mtk_vcodec_err(vpu, "invalid len = %d\n", len);
+ return -EINVAL;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_id = AP_IPIMSG_DEC_START;
+ msg.vpu_inst_addr = vpu->inst_addr;
+
+ for (i = 0; i < len; i++)
+ msg.data[i] = data[i];
+
+ err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
+ mtk_vcodec_debug(vpu, "- ret=%d", err);
+ return err;
+}
+
+int vpu_dec_end(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_END);
+}
+
+int vpu_dec_deinit(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_DEINIT);
+}
+
+int vpu_dec_reset(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_RESET);
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
new file mode 100644
index 000000000..cd37bb2a6
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_VPU_IF_H_
+#define _VDEC_VPU_IF_H_
+
+#include "mtk_vpu.h"
+
+/**
+ * struct vdec_vpu_inst - VPU instance for video codec
+ * @ipi_id : ipi id for each decoder
+ * @vsi : driver structure allocated by VPU side and shared to AP side
+ * for control and info share
+ * @failure : VPU execution result status, 0: success, others: fail
+ * @inst_addr : VPU decoder instance address
+ * @signaled : 1 - Host has received ack message from VPU, 0 - not received
+ * @ctx : context for v4l2 layer integration
+ * @dev : platform device of VPU
+ * @wq : wait queue to wait VPU message ack
+ * @handler : ipi handler for each decoder
+ */
+struct vdec_vpu_inst {
+ enum ipi_id id;
+ void *vsi;
+ int32_t failure;
+ uint32_t inst_addr;
+ unsigned int signaled;
+ struct mtk_vcodec_ctx *ctx;
+ struct platform_device *dev;
+ wait_queue_head_t wq;
+ ipi_handler_t handler;
+};
+
+/**
+ * vpu_dec_init - init decoder instance and allocate required resource in VPU.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_init(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_start - start decoding, basically the function will be invoked once
+ * every frame.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ * @data: meta data to pass bitstream info to VPU decoder
+ * @len : meta data length
+ */
+int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len);
+
+/**
+ * vpu_dec_end - end decoding, basically the function will be invoked once
+ * when HW decoding done interrupt received successfully. The
+ * decoder in VPU will continute to do referene frame management
+ * and check if there is a new decoded frame available to display.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ */
+int vpu_dec_end(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_deinit - deinit decoder instance and resource freed in VPU.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_reset - reset decoder, use for flush decoder when end of stream or
+ * seek. Remainig non displayed frame will be pushed to display.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_reset(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_ipi_handler - Handler for VPU ipi message.
+ *
+ * @data: ipi message
+ * @len : length of ipi message
+ * @priv: callback private data which is passed by decoder when register.
+ */
+void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv);
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
new file mode 100644
index 000000000..6cf31b366
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * PoChun Lin <pochun.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "../mtk_vcodec_drv.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_intr.h"
+#include "../mtk_vcodec_enc.h"
+#include "../mtk_vcodec_enc_pm.h"
+#include "../venc_drv_base.h"
+#include "../venc_ipi_msg.h"
+#include "../venc_vpu_if.h"
+#include "mtk_vpu.h"
+
+static const char h264_filler_marker[] = {0x0, 0x0, 0x0, 0x1, 0xc};
+
+#define H264_FILLER_MARKER_SIZE ARRAY_SIZE(h264_filler_marker)
+#define VENC_PIC_BITSTREAM_BYTE_CNT 0x0098
+
+/*
+ * enum venc_h264_vpu_work_buf - h264 encoder buffer index
+ */
+enum venc_h264_vpu_work_buf {
+ VENC_H264_VPU_WORK_BUF_RC_INFO,
+ VENC_H264_VPU_WORK_BUF_RC_CODE,
+ VENC_H264_VPU_WORK_BUF_REC_LUMA,
+ VENC_H264_VPU_WORK_BUF_REC_CHROMA,
+ VENC_H264_VPU_WORK_BUF_REF_LUMA,
+ VENC_H264_VPU_WORK_BUF_REF_CHROMA,
+ VENC_H264_VPU_WORK_BUF_MV_INFO_1,
+ VENC_H264_VPU_WORK_BUF_MV_INFO_2,
+ VENC_H264_VPU_WORK_BUF_SKIP_FRAME,
+ VENC_H264_VPU_WORK_BUF_MAX,
+};
+
+/*
+ * enum venc_h264_bs_mode - for bs_mode argument in h264_enc_vpu_encode
+ */
+enum venc_h264_bs_mode {
+ H264_BS_MODE_SPS,
+ H264_BS_MODE_PPS,
+ H264_BS_MODE_FRAME,
+};
+
+/*
+ * struct venc_h264_vpu_config - Structure for h264 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @input_fourcc: input fourcc
+ * @bitrate: target bitrate (in bps)
+ * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
+ * to be used for display purposes; must be smaller or equal to buffer
+ * size.
+ * @pic_h: picture height
+ * @buf_w: buffer width. Buffer size is stream resolution in pixels aligned to
+ * hardware requirements.
+ * @buf_h: buffer height
+ * @gop_size: group of picture size (idr frame)
+ * @intra_period: intra frame period
+ * @framerate: frame rate in fps
+ * @profile: as specified in standard
+ * @level: as specified in standard
+ * @wfd: WFD mode 1:on, 0:off
+ */
+struct venc_h264_vpu_config {
+ u32 input_fourcc;
+ u32 bitrate;
+ u32 pic_w;
+ u32 pic_h;
+ u32 buf_w;
+ u32 buf_h;
+ u32 gop_size;
+ u32 intra_period;
+ u32 framerate;
+ u32 profile;
+ u32 level;
+ u32 wfd;
+};
+
+/*
+ * struct venc_h264_vpu_buf - Structure for buffer information
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @iova: IO virtual address
+ * @vpua: VPU side memory addr which is used by RC_CODE
+ * @size: buffer size (in bytes)
+ */
+struct venc_h264_vpu_buf {
+ u32 iova;
+ u32 vpua;
+ u32 size;
+};
+
+/*
+ * struct venc_h264_vsi - Structure for VPU driver control and info share
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * This structure is allocated in VPU side and shared to AP side.
+ * @config: h264 encoder configuration
+ * @work_bufs: working buffer information in VPU side
+ * The work_bufs here is for storing the 'size' info shared to AP side.
+ * The similar item in struct venc_h264_inst is for memory allocation
+ * in AP side. The AP driver will copy the 'size' from here to the one in
+ * struct mtk_vcodec_mem, then invoke mtk_vcodec_mem_alloc to allocate
+ * the buffer. After that, bypass the 'dma_addr' to the 'iova' field here for
+ * register setting in VPU side.
+ */
+struct venc_h264_vsi {
+ struct venc_h264_vpu_config config;
+ struct venc_h264_vpu_buf work_bufs[VENC_H264_VPU_WORK_BUF_MAX];
+};
+
+/*
+ * struct venc_h264_inst - h264 encoder AP driver instance
+ * @hw_base: h264 encoder hardware register base
+ * @work_bufs: working buffer
+ * @pps_buf: buffer to store the pps bitstream
+ * @work_buf_allocated: working buffer allocated flag
+ * @frm_cnt: encoded frame count
+ * @prepend_hdr: when the v4l2 layer send VENC_SET_PARAM_PREPEND_HEADER cmd
+ * through h264_enc_set_param interface, it will set this flag and prepend the
+ * sps/pps in h264_enc_encode function.
+ * @vpu_inst: VPU instance to exchange information between AP and VPU
+ * @vsi: driver structure allocated by VPU side and shared to AP side for
+ * control and info share
+ * @ctx: context for v4l2 layer integration
+ */
+struct venc_h264_inst {
+ void __iomem *hw_base;
+ struct mtk_vcodec_mem work_bufs[VENC_H264_VPU_WORK_BUF_MAX];
+ struct mtk_vcodec_mem pps_buf;
+ bool work_buf_allocated;
+ unsigned int frm_cnt;
+ unsigned int prepend_hdr;
+ struct venc_vpu_inst vpu_inst;
+ struct venc_h264_vsi *vsi;
+ struct mtk_vcodec_ctx *ctx;
+};
+
+static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr)
+{
+ return readl(inst->hw_base + addr);
+}
+
+static unsigned int h264_get_profile(struct venc_h264_inst *inst,
+ unsigned int profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return 66;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return 77;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ return 100;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ mtk_vcodec_err(inst, "unsupported CONSTRAINED_BASELINE");
+ return 0;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ mtk_vcodec_err(inst, "unsupported EXTENDED");
+ return 0;
+ default:
+ mtk_vcodec_debug(inst, "unsupported profile %d", profile);
+ return 100;
+ }
+}
+
+static unsigned int h264_get_level(struct venc_h264_inst *inst,
+ unsigned int level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ mtk_vcodec_err(inst, "unsupported 1B");
+ return 0;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 10;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 20;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 30;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 40;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
+ default:
+ mtk_vcodec_debug(inst, "unsupported level %d", level);
+ return 31;
+ }
+}
+
+static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
+{
+ int i;
+
+ mtk_vcodec_debug_enter(inst);
+
+ /* Except the SKIP_FRAME buffers,
+ * other buffers need to be freed by AP.
+ */
+ for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
+ if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME)
+ mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
+ }
+
+ mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
+
+ mtk_vcodec_debug_leave(inst);
+}
+
+static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
+{
+ int i;
+ int ret = 0;
+ struct venc_h264_vpu_buf *wb = inst->vsi->work_bufs;
+
+ mtk_vcodec_debug_enter(inst);
+
+ for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
+ /*
+ * This 'wb' structure is set by VPU side and shared to AP for
+ * buffer allocation and IO virtual addr mapping. For most of
+ * the buffers, AP will allocate the buffer according to 'size'
+ * field and store the IO virtual addr in 'iova' field. There
+ * are two exceptions:
+ * (1) RC_CODE buffer, it's pre-allocated in the VPU side, and
+ * save the VPU addr in the 'vpua' field. The AP will translate
+ * the VPU addr to the corresponding IO virtual addr and store
+ * in 'iova' field for reg setting in VPU side.
+ * (2) SKIP_FRAME buffer, it's pre-allocated in the VPU side,
+ * and save the VPU addr in the 'vpua' field. The AP will
+ * translate the VPU addr to the corresponding AP side virtual
+ * address and do some memcpy access to move to bitstream buffer
+ * assigned by v4l2 layer.
+ */
+ inst->work_bufs[i].size = wb[i].size;
+ if (i == VENC_H264_VPU_WORK_BUF_SKIP_FRAME) {
+ inst->work_bufs[i].va = vpu_mapping_dm_addr(
+ inst->vpu_inst.dev, wb[i].vpua);
+ inst->work_bufs[i].dma_addr = 0;
+ } else {
+ ret = mtk_vcodec_mem_alloc(inst->ctx,
+ &inst->work_bufs[i]);
+ if (ret) {
+ mtk_vcodec_err(inst,
+ "cannot allocate buf %d", i);
+ goto err_alloc;
+ }
+ /*
+ * This RC_CODE is pre-allocated by VPU and saved in VPU
+ * addr. So we need use memcpy to copy RC_CODE from VPU
+ * addr into IO virtual addr in 'iova' field for reg
+ * setting in VPU side.
+ */
+ if (i == VENC_H264_VPU_WORK_BUF_RC_CODE) {
+ void *tmp_va;
+
+ tmp_va = vpu_mapping_dm_addr(inst->vpu_inst.dev,
+ wb[i].vpua);
+ memcpy(inst->work_bufs[i].va, tmp_va,
+ wb[i].size);
+ }
+ }
+ wb[i].iova = inst->work_bufs[i].dma_addr;
+
+ mtk_vcodec_debug(inst,
+ "work_buf[%d] va=0x%p iova=%pad size=%zu",
+ i, inst->work_bufs[i].va,
+ &inst->work_bufs[i].dma_addr,
+ inst->work_bufs[i].size);
+ }
+
+ /* the pps_buf is used by AP side only */
+ inst->pps_buf.size = 128;
+ ret = mtk_vcodec_mem_alloc(inst->ctx, &inst->pps_buf);
+ if (ret) {
+ mtk_vcodec_err(inst, "cannot allocate pps_buf");
+ goto err_alloc;
+ }
+
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+
+err_alloc:
+ h264_enc_free_work_buf(inst);
+
+ return ret;
+}
+
+static unsigned int h264_enc_wait_venc_done(struct venc_h264_inst *inst)
+{
+ unsigned int irq_status = 0;
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)inst->ctx;
+
+ if (!mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS)) {
+ irq_status = ctx->irq_status;
+ mtk_vcodec_debug(inst, "irq_status %x <-", irq_status);
+ }
+ return irq_status;
+}
+
+static int h264_encode_sps(struct venc_h264_inst *inst,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int irq_status;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_SPS, NULL,
+ bs_buf, bs_size);
+ if (ret)
+ return ret;
+
+ irq_status = h264_enc_wait_venc_done(inst);
+ if (irq_status != MTK_VENC_IRQ_STATUS_SPS) {
+ mtk_vcodec_err(inst, "expect irq status %d",
+ MTK_VENC_IRQ_STATUS_SPS);
+ return -EINVAL;
+ }
+
+ *bs_size = h264_read_reg(inst, VENC_PIC_BITSTREAM_BYTE_CNT);
+ mtk_vcodec_debug(inst, "bs size %d <-", *bs_size);
+
+ return ret;
+}
+
+static int h264_encode_pps(struct venc_h264_inst *inst,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int irq_status;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_PPS, NULL,
+ bs_buf, bs_size);
+ if (ret)
+ return ret;
+
+ irq_status = h264_enc_wait_venc_done(inst);
+ if (irq_status != MTK_VENC_IRQ_STATUS_PPS) {
+ mtk_vcodec_err(inst, "expect irq status %d",
+ MTK_VENC_IRQ_STATUS_PPS);
+ return -EINVAL;
+ }
+
+ *bs_size = h264_read_reg(inst, VENC_PIC_BITSTREAM_BYTE_CNT);
+ mtk_vcodec_debug(inst, "bs size %d <-", *bs_size);
+
+ return ret;
+}
+
+static int h264_encode_header(struct venc_h264_inst *inst,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int bs_size_sps;
+ unsigned int bs_size_pps;
+
+ ret = h264_encode_sps(inst, bs_buf, &bs_size_sps);
+ if (ret)
+ return ret;
+
+ ret = h264_encode_pps(inst, &inst->pps_buf, &bs_size_pps);
+ if (ret)
+ return ret;
+
+ memcpy(bs_buf->va + bs_size_sps, inst->pps_buf.va, bs_size_pps);
+ *bs_size = bs_size_sps + bs_size_pps;
+
+ return ret;
+}
+
+static int h264_encode_frame(struct venc_h264_inst *inst,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int irq_status;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME, frm_buf,
+ bs_buf, bs_size);
+ if (ret)
+ return ret;
+
+ /*
+ * skip frame case: The skip frame buffer is composed by vpu side only,
+ * it does not trigger the hw, so skip the wait interrupt operation.
+ */
+ if (inst->vpu_inst.state == VEN_IPI_MSG_ENC_STATE_SKIP) {
+ *bs_size = inst->vpu_inst.bs_size;
+ memcpy(bs_buf->va,
+ inst->work_bufs[VENC_H264_VPU_WORK_BUF_SKIP_FRAME].va,
+ *bs_size);
+ ++inst->frm_cnt;
+ return ret;
+ }
+
+ irq_status = h264_enc_wait_venc_done(inst);
+ if (irq_status != MTK_VENC_IRQ_STATUS_FRM) {
+ mtk_vcodec_err(inst, "irq_status=%d failed", irq_status);
+ return -EIO;
+ }
+
+ *bs_size = h264_read_reg(inst, VENC_PIC_BITSTREAM_BYTE_CNT);
+
+ ++inst->frm_cnt;
+ mtk_vcodec_debug(inst, "frm %d bs_size %d key_frm %d <-",
+ inst->frm_cnt, *bs_size, inst->vpu_inst.is_key_frm);
+
+ return ret;
+}
+
+static void h264_encode_filler(struct venc_h264_inst *inst, void *buf,
+ int size)
+{
+ unsigned char *p = buf;
+
+ if (size < H264_FILLER_MARKER_SIZE) {
+ mtk_vcodec_err(inst, "filler size too small %d", size);
+ return;
+ }
+
+ memcpy(p, h264_filler_marker, ARRAY_SIZE(h264_filler_marker));
+ size -= H264_FILLER_MARKER_SIZE;
+ p += H264_FILLER_MARKER_SIZE;
+ memset(p, 0xff, size);
+}
+
+static int h264_enc_init(struct mtk_vcodec_ctx *ctx, unsigned long *handle)
+{
+ int ret = 0;
+ struct venc_h264_inst *inst;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+ inst->vpu_inst.ctx = ctx;
+ inst->vpu_inst.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu_inst.id = IPI_VENC_H264;
+ inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_SYS);
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_init(&inst->vpu_inst);
+
+ inst->vsi = (struct venc_h264_vsi *)inst->vpu_inst.vsi;
+
+ mtk_vcodec_debug_leave(inst);
+
+ if (ret)
+ kfree(inst);
+ else
+ (*handle) = (unsigned long)inst;
+
+ return ret;
+}
+
+static int h264_enc_encode(unsigned long handle,
+ enum venc_start_opt opt,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result)
+{
+ int ret = 0;
+ struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ mtk_vcodec_debug(inst, "opt %d ->", opt);
+
+ enable_irq(ctx->dev->enc_irq);
+
+ switch (opt) {
+ case VENC_START_OPT_ENCODE_SEQUENCE_HEADER: {
+ unsigned int bs_size_hdr;
+
+ ret = h264_encode_header(inst, bs_buf, &bs_size_hdr);
+ if (ret)
+ goto encode_err;
+
+ result->bs_size = bs_size_hdr;
+ result->is_key_frm = false;
+ break;
+ }
+
+ case VENC_START_OPT_ENCODE_FRAME: {
+ int hdr_sz;
+ int hdr_sz_ext;
+ int filler_sz = 0;
+ const int bs_alignment = 128;
+ struct mtk_vcodec_mem tmp_bs_buf;
+ unsigned int bs_size_hdr;
+ unsigned int bs_size_frm;
+
+ if (!inst->prepend_hdr) {
+ ret = h264_encode_frame(inst, frm_buf, bs_buf,
+ &result->bs_size);
+ if (ret)
+ goto encode_err;
+ result->is_key_frm = inst->vpu_inst.is_key_frm;
+ break;
+ }
+
+ mtk_vcodec_debug(inst, "h264_encode_frame prepend SPS/PPS");
+
+ ret = h264_encode_header(inst, bs_buf, &bs_size_hdr);
+ if (ret)
+ goto encode_err;
+
+ hdr_sz = bs_size_hdr;
+ hdr_sz_ext = (hdr_sz & (bs_alignment - 1));
+ if (hdr_sz_ext) {
+ filler_sz = bs_alignment - hdr_sz_ext;
+ if (hdr_sz_ext + H264_FILLER_MARKER_SIZE > bs_alignment)
+ filler_sz += bs_alignment;
+ h264_encode_filler(inst, bs_buf->va + hdr_sz,
+ filler_sz);
+ }
+
+ tmp_bs_buf.va = bs_buf->va + hdr_sz + filler_sz;
+ tmp_bs_buf.dma_addr = bs_buf->dma_addr + hdr_sz + filler_sz;
+ tmp_bs_buf.size = bs_buf->size - (hdr_sz + filler_sz);
+
+ ret = h264_encode_frame(inst, frm_buf, &tmp_bs_buf,
+ &bs_size_frm);
+ if (ret)
+ goto encode_err;
+
+ result->bs_size = hdr_sz + filler_sz + bs_size_frm;
+
+ mtk_vcodec_debug(inst, "hdr %d filler %d frame %d bs %d",
+ hdr_sz, filler_sz, bs_size_frm,
+ result->bs_size);
+
+ inst->prepend_hdr = 0;
+ result->is_key_frm = inst->vpu_inst.is_key_frm;
+ break;
+ }
+
+ default:
+ mtk_vcodec_err(inst, "venc_start_opt %d not supported", opt);
+ ret = -EINVAL;
+ break;
+ }
+
+encode_err:
+
+ disable_irq(ctx->dev->enc_irq);
+ mtk_vcodec_debug(inst, "opt %d <-", opt);
+
+ return ret;
+}
+
+static int h264_enc_set_param(unsigned long handle,
+ enum venc_set_param_type type,
+ struct venc_enc_param *enc_prm)
+{
+ int ret = 0;
+ struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
+
+ mtk_vcodec_debug(inst, "->type=%d", type);
+
+ switch (type) {
+ case VENC_SET_PARAM_ENC:
+ inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt;
+ inst->vsi->config.bitrate = enc_prm->bitrate;
+ inst->vsi->config.pic_w = enc_prm->width;
+ inst->vsi->config.pic_h = enc_prm->height;
+ inst->vsi->config.buf_w = enc_prm->buf_width;
+ inst->vsi->config.buf_h = enc_prm->buf_height;
+ inst->vsi->config.gop_size = enc_prm->gop_size;
+ inst->vsi->config.framerate = enc_prm->frm_rate;
+ inst->vsi->config.intra_period = enc_prm->intra_period;
+ inst->vsi->config.profile =
+ h264_get_profile(inst, enc_prm->h264_profile);
+ inst->vsi->config.level =
+ h264_get_level(inst, enc_prm->h264_level);
+ inst->vsi->config.wfd = 0;
+ ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
+ if (ret)
+ break;
+ if (inst->work_buf_allocated) {
+ h264_enc_free_work_buf(inst);
+ inst->work_buf_allocated = false;
+ }
+ ret = h264_enc_alloc_work_buf(inst);
+ if (ret)
+ break;
+ inst->work_buf_allocated = true;
+ break;
+
+ case VENC_SET_PARAM_PREPEND_HEADER:
+ inst->prepend_hdr = 1;
+ mtk_vcodec_debug(inst, "set prepend header mode");
+ break;
+
+ default:
+ ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
+ break;
+ }
+
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+}
+
+static int h264_enc_deinit(unsigned long handle)
+{
+ int ret = 0;
+ struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_deinit(&inst->vpu_inst);
+
+ if (inst->work_buf_allocated)
+ h264_enc_free_work_buf(inst);
+
+ mtk_vcodec_debug_leave(inst);
+ kfree(inst);
+
+ return ret;
+}
+
+static const struct venc_common_if venc_h264_if = {
+ .init = h264_enc_init,
+ .encode = h264_enc_encode,
+ .set_param = h264_enc_set_param,
+ .deinit = h264_enc_deinit,
+};
+
+const struct venc_common_if *get_h264_enc_comm_if(void);
+
+const struct venc_common_if *get_h264_enc_comm_if(void)
+{
+ return &venc_h264_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
new file mode 100644
index 000000000..957420dd6
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * PoChun Lin <pochun.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "../mtk_vcodec_drv.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_intr.h"
+#include "../mtk_vcodec_enc.h"
+#include "../mtk_vcodec_enc_pm.h"
+#include "../venc_drv_base.h"
+#include "../venc_ipi_msg.h"
+#include "../venc_vpu_if.h"
+#include "mtk_vpu.h"
+
+#define VENC_BITSTREAM_FRAME_SIZE 0x0098
+#define VENC_BITSTREAM_HEADER_LEN 0x00e8
+
+/* This ac_tag is vp8 frame tag. */
+#define MAX_AC_TAG_SIZE 10
+
+/*
+ * enum venc_vp8_vpu_work_buf - vp8 encoder buffer index
+ */
+enum venc_vp8_vpu_work_buf {
+ VENC_VP8_VPU_WORK_BUF_LUMA,
+ VENC_VP8_VPU_WORK_BUF_LUMA2,
+ VENC_VP8_VPU_WORK_BUF_LUMA3,
+ VENC_VP8_VPU_WORK_BUF_CHROMA,
+ VENC_VP8_VPU_WORK_BUF_CHROMA2,
+ VENC_VP8_VPU_WORK_BUF_CHROMA3,
+ VENC_VP8_VPU_WORK_BUF_MV_INFO,
+ VENC_VP8_VPU_WORK_BUF_BS_HEADER,
+ VENC_VP8_VPU_WORK_BUF_PROB_BUF,
+ VENC_VP8_VPU_WORK_BUF_RC_INFO,
+ VENC_VP8_VPU_WORK_BUF_RC_CODE,
+ VENC_VP8_VPU_WORK_BUF_RC_CODE2,
+ VENC_VP8_VPU_WORK_BUF_RC_CODE3,
+ VENC_VP8_VPU_WORK_BUF_MAX,
+};
+
+/*
+ * struct venc_vp8_vpu_config - Structure for vp8 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @input_fourcc: input fourcc
+ * @bitrate: target bitrate (in bps)
+ * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
+ * to be used for display purposes; must be smaller or equal to buffer
+ * size.
+ * @pic_h: picture height
+ * @buf_w: buffer width (with 16 alignment). Buffer size is stream resolution
+ * in pixels aligned to hardware requirements.
+ * @buf_h: buffer height (with 16 alignment)
+ * @gop_size: group of picture size (key frame)
+ * @framerate: frame rate in fps
+ * @ts_mode: temporal scalability mode (0: disable, 1: enable)
+ * support three temporal layers - 0: 7.5fps 1: 7.5fps 2: 15fps.
+ */
+struct venc_vp8_vpu_config {
+ u32 input_fourcc;
+ u32 bitrate;
+ u32 pic_w;
+ u32 pic_h;
+ u32 buf_w;
+ u32 buf_h;
+ u32 gop_size;
+ u32 framerate;
+ u32 ts_mode;
+};
+
+/*
+ * struct venc_vp8_vpu_buf - Structure for buffer information
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @iova: IO virtual address
+ * @vpua: VPU side memory addr which is used by RC_CODE
+ * @size: buffer size (in bytes)
+ */
+struct venc_vp8_vpu_buf {
+ u32 iova;
+ u32 vpua;
+ u32 size;
+};
+
+/*
+ * struct venc_vp8_vsi - Structure for VPU driver control and info share
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * This structure is allocated in VPU side and shared to AP side.
+ * @config: vp8 encoder configuration
+ * @work_bufs: working buffer information in VPU side
+ * The work_bufs here is for storing the 'size' info shared to AP side.
+ * The similar item in struct venc_vp8_inst is for memory allocation
+ * in AP side. The AP driver will copy the 'size' from here to the one in
+ * struct mtk_vcodec_mem, then invoke mtk_vcodec_mem_alloc to allocate
+ * the buffer. After that, bypass the 'dma_addr' to the 'iova' field here for
+ * register setting in VPU side.
+ */
+struct venc_vp8_vsi {
+ struct venc_vp8_vpu_config config;
+ struct venc_vp8_vpu_buf work_bufs[VENC_VP8_VPU_WORK_BUF_MAX];
+};
+
+/*
+ * struct venc_vp8_inst - vp8 encoder AP driver instance
+ * @hw_base: vp8 encoder hardware register base
+ * @work_bufs: working buffer
+ * @work_buf_allocated: working buffer allocated flag
+ * @frm_cnt: encoded frame count, it's used for I-frame judgement and
+ * reset when force intra cmd received.
+ * @ts_mode: temporal scalability mode (0: disable, 1: enable)
+ * support three temporal layers - 0: 7.5fps 1: 7.5fps 2: 15fps.
+ * @vpu_inst: VPU instance to exchange information between AP and VPU
+ * @vsi: driver structure allocated by VPU side and shared to AP side for
+ * control and info share
+ * @ctx: context for v4l2 layer integration
+ */
+struct venc_vp8_inst {
+ void __iomem *hw_base;
+ struct mtk_vcodec_mem work_bufs[VENC_VP8_VPU_WORK_BUF_MAX];
+ bool work_buf_allocated;
+ unsigned int frm_cnt;
+ unsigned int ts_mode;
+ struct venc_vpu_inst vpu_inst;
+ struct venc_vp8_vsi *vsi;
+ struct mtk_vcodec_ctx *ctx;
+};
+
+static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr)
+{
+ return readl(inst->hw_base + addr);
+}
+
+static void vp8_enc_free_work_buf(struct venc_vp8_inst *inst)
+{
+ int i;
+
+ mtk_vcodec_debug_enter(inst);
+
+ /* Buffers need to be freed by AP. */
+ for (i = 0; i < VENC_VP8_VPU_WORK_BUF_MAX; i++) {
+ if (inst->work_bufs[i].size == 0)
+ continue;
+ mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
+ }
+
+ mtk_vcodec_debug_leave(inst);
+}
+
+static int vp8_enc_alloc_work_buf(struct venc_vp8_inst *inst)
+{
+ int i;
+ int ret = 0;
+ struct venc_vp8_vpu_buf *wb = inst->vsi->work_bufs;
+
+ mtk_vcodec_debug_enter(inst);
+
+ for (i = 0; i < VENC_VP8_VPU_WORK_BUF_MAX; i++) {
+ if (wb[i].size == 0)
+ continue;
+ /*
+ * This 'wb' structure is set by VPU side and shared to AP for
+ * buffer allocation and IO virtual addr mapping. For most of
+ * the buffers, AP will allocate the buffer according to 'size'
+ * field and store the IO virtual addr in 'iova' field. For the
+ * RC_CODEx buffers, they are pre-allocated in the VPU side
+ * because they are inside VPU SRAM, and save the VPU addr in
+ * the 'vpua' field. The AP will translate the VPU addr to the
+ * corresponding IO virtual addr and store in 'iova' field.
+ */
+ inst->work_bufs[i].size = wb[i].size;
+ ret = mtk_vcodec_mem_alloc(inst->ctx, &inst->work_bufs[i]);
+ if (ret) {
+ mtk_vcodec_err(inst,
+ "cannot alloc work_bufs[%d]", i);
+ goto err_alloc;
+ }
+ /*
+ * This RC_CODEx is pre-allocated by VPU and saved in VPU addr.
+ * So we need use memcpy to copy RC_CODEx from VPU addr into IO
+ * virtual addr in 'iova' field for reg setting in VPU side.
+ */
+ if (i == VENC_VP8_VPU_WORK_BUF_RC_CODE ||
+ i == VENC_VP8_VPU_WORK_BUF_RC_CODE2 ||
+ i == VENC_VP8_VPU_WORK_BUF_RC_CODE3) {
+ void *tmp_va;
+
+ tmp_va = vpu_mapping_dm_addr(inst->vpu_inst.dev,
+ wb[i].vpua);
+ memcpy(inst->work_bufs[i].va, tmp_va, wb[i].size);
+ }
+ wb[i].iova = inst->work_bufs[i].dma_addr;
+
+ mtk_vcodec_debug(inst,
+ "work_bufs[%d] va=0x%p,iova=%pad,size=%zu",
+ i, inst->work_bufs[i].va,
+ &inst->work_bufs[i].dma_addr,
+ inst->work_bufs[i].size);
+ }
+
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+
+err_alloc:
+ vp8_enc_free_work_buf(inst);
+
+ return ret;
+}
+
+static unsigned int vp8_enc_wait_venc_done(struct venc_vp8_inst *inst)
+{
+ unsigned int irq_status = 0;
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)inst->ctx;
+
+ if (!mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS)) {
+ irq_status = ctx->irq_status;
+ mtk_vcodec_debug(inst, "isr return %x", irq_status);
+ }
+ return irq_status;
+}
+
+/*
+ * Compose ac_tag, bitstream header and bitstream payload into
+ * one bitstream buffer.
+ */
+static int vp8_enc_compose_one_frame(struct venc_vp8_inst *inst,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ unsigned int not_key;
+ u32 bs_frm_size;
+ u32 bs_hdr_len;
+ unsigned int ac_tag_size;
+ u8 ac_tag[MAX_AC_TAG_SIZE];
+ u32 tag;
+
+ bs_frm_size = vp8_enc_read_reg(inst, VENC_BITSTREAM_FRAME_SIZE);
+ bs_hdr_len = vp8_enc_read_reg(inst, VENC_BITSTREAM_HEADER_LEN);
+
+ /* if a frame is key frame, not_key is 0 */
+ not_key = !inst->vpu_inst.is_key_frm;
+ tag = (bs_hdr_len << 5) | 0x10 | not_key;
+ ac_tag[0] = tag & 0xff;
+ ac_tag[1] = (tag >> 8) & 0xff;
+ ac_tag[2] = (tag >> 16) & 0xff;
+
+ /* key frame */
+ if (not_key == 0) {
+ ac_tag_size = MAX_AC_TAG_SIZE;
+ ac_tag[3] = 0x9d;
+ ac_tag[4] = 0x01;
+ ac_tag[5] = 0x2a;
+ ac_tag[6] = inst->vsi->config.pic_w;
+ ac_tag[7] = inst->vsi->config.pic_w >> 8;
+ ac_tag[8] = inst->vsi->config.pic_h;
+ ac_tag[9] = inst->vsi->config.pic_h >> 8;
+ } else {
+ ac_tag_size = 3;
+ }
+
+ if (bs_buf->size < bs_hdr_len + bs_frm_size + ac_tag_size) {
+ mtk_vcodec_err(inst, "bitstream buf size is too small(%zu)",
+ bs_buf->size);
+ return -EINVAL;
+ }
+
+ /*
+ * (1) The vp8 bitstream header and body are generated by the HW vp8
+ * encoder separately at the same time. We cannot know the bitstream
+ * header length in advance.
+ * (2) From the vp8 spec, there is no stuffing byte allowed between the
+ * ac tag, bitstream header and bitstream body.
+ */
+ memmove(bs_buf->va + bs_hdr_len + ac_tag_size,
+ bs_buf->va, bs_frm_size);
+ memcpy(bs_buf->va + ac_tag_size,
+ inst->work_bufs[VENC_VP8_VPU_WORK_BUF_BS_HEADER].va,
+ bs_hdr_len);
+ memcpy(bs_buf->va, ac_tag, ac_tag_size);
+ *bs_size = bs_frm_size + bs_hdr_len + ac_tag_size;
+
+ return 0;
+}
+
+static int vp8_enc_encode_frame(struct venc_vp8_inst *inst,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int irq_status;
+
+ mtk_vcodec_debug(inst, "->frm_cnt=%d", inst->frm_cnt);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, 0, frm_buf, bs_buf, bs_size);
+ if (ret)
+ return ret;
+
+ irq_status = vp8_enc_wait_venc_done(inst);
+ if (irq_status != MTK_VENC_IRQ_STATUS_FRM) {
+ mtk_vcodec_err(inst, "irq_status=%d failed", irq_status);
+ return -EIO;
+ }
+
+ if (vp8_enc_compose_one_frame(inst, bs_buf, bs_size)) {
+ mtk_vcodec_err(inst, "vp8_enc_compose_one_frame failed");
+ return -EINVAL;
+ }
+
+ inst->frm_cnt++;
+ mtk_vcodec_debug(inst, "<-size=%d key_frm=%d", *bs_size,
+ inst->vpu_inst.is_key_frm);
+
+ return ret;
+}
+
+static int vp8_enc_init(struct mtk_vcodec_ctx *ctx, unsigned long *handle)
+{
+ int ret = 0;
+ struct venc_vp8_inst *inst;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+ inst->vpu_inst.ctx = ctx;
+ inst->vpu_inst.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu_inst.id = IPI_VENC_VP8;
+ inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_LT_SYS);
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_init(&inst->vpu_inst);
+
+ inst->vsi = (struct venc_vp8_vsi *)inst->vpu_inst.vsi;
+
+ mtk_vcodec_debug_leave(inst);
+
+ if (ret)
+ kfree(inst);
+ else
+ (*handle) = (unsigned long)inst;
+
+ return ret;
+}
+
+static int vp8_enc_encode(unsigned long handle,
+ enum venc_start_opt opt,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result)
+{
+ int ret = 0;
+ struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ mtk_vcodec_debug_enter(inst);
+
+ enable_irq(ctx->dev->enc_lt_irq);
+
+ switch (opt) {
+ case VENC_START_OPT_ENCODE_FRAME:
+ ret = vp8_enc_encode_frame(inst, frm_buf, bs_buf,
+ &result->bs_size);
+ if (ret)
+ goto encode_err;
+ result->is_key_frm = inst->vpu_inst.is_key_frm;
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "opt not support:%d", opt);
+ ret = -EINVAL;
+ break;
+ }
+
+encode_err:
+
+ disable_irq(ctx->dev->enc_lt_irq);
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+}
+
+static int vp8_enc_set_param(unsigned long handle,
+ enum venc_set_param_type type,
+ struct venc_enc_param *enc_prm)
+{
+ int ret = 0;
+ struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
+
+ mtk_vcodec_debug(inst, "->type=%d", type);
+
+ switch (type) {
+ case VENC_SET_PARAM_ENC:
+ inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt;
+ inst->vsi->config.bitrate = enc_prm->bitrate;
+ inst->vsi->config.pic_w = enc_prm->width;
+ inst->vsi->config.pic_h = enc_prm->height;
+ inst->vsi->config.buf_w = enc_prm->buf_width;
+ inst->vsi->config.buf_h = enc_prm->buf_height;
+ inst->vsi->config.gop_size = enc_prm->gop_size;
+ inst->vsi->config.framerate = enc_prm->frm_rate;
+ inst->vsi->config.ts_mode = inst->ts_mode;
+ ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
+ if (ret)
+ break;
+ if (inst->work_buf_allocated) {
+ vp8_enc_free_work_buf(inst);
+ inst->work_buf_allocated = false;
+ }
+ ret = vp8_enc_alloc_work_buf(inst);
+ if (ret)
+ break;
+ inst->work_buf_allocated = true;
+ break;
+
+ /*
+ * VENC_SET_PARAM_TS_MODE must be called before VENC_SET_PARAM_ENC
+ */
+ case VENC_SET_PARAM_TS_MODE:
+ inst->ts_mode = 1;
+ mtk_vcodec_debug(inst, "set ts_mode");
+ break;
+
+ default:
+ ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
+ break;
+ }
+
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+}
+
+static int vp8_enc_deinit(unsigned long handle)
+{
+ int ret = 0;
+ struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_deinit(&inst->vpu_inst);
+
+ if (inst->work_buf_allocated)
+ vp8_enc_free_work_buf(inst);
+
+ mtk_vcodec_debug_leave(inst);
+ kfree(inst);
+
+ return ret;
+}
+
+static const struct venc_common_if venc_vp8_if = {
+ .init = vp8_enc_init,
+ .encode = vp8_enc_encode,
+ .set_param = vp8_enc_set_param,
+ .deinit = vp8_enc_deinit,
+};
+
+const struct venc_common_if *get_vp8_enc_comm_if(void);
+
+const struct venc_common_if *get_vp8_enc_comm_if(void)
+{
+ return &venc_vp8_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_base.h b/drivers/media/platform/mtk-vcodec/venc_drv_base.h
new file mode 100644
index 000000000..6308d44de
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_base.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VENC_DRV_BASE_
+#define _VENC_DRV_BASE_
+
+#include "mtk_vcodec_drv.h"
+
+#include "venc_drv_if.h"
+
+struct venc_common_if {
+ /**
+ * (*init)() - initialize driver
+ * @ctx: [in] mtk v4l2 context
+ * @handle: [out] driver handle
+ */
+ int (*init)(struct mtk_vcodec_ctx *ctx, unsigned long *handle);
+
+ /**
+ * (*encode)() - trigger encode
+ * @handle: [in] driver handle
+ * @opt: [in] encode option
+ * @frm_buf: [in] frame buffer to store input frame
+ * @bs_buf: [in] bitstream buffer to store output bitstream
+ * @result: [out] encode result
+ */
+ int (*encode)(unsigned long handle, enum venc_start_opt opt,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result);
+
+ /**
+ * (*set_param)() - set driver's parameter
+ * @handle: [in] driver handle
+ * @type: [in] parameter type
+ * @in: [in] buffer to store the parameter
+ */
+ int (*set_param)(unsigned long handle, enum venc_set_param_type type,
+ struct venc_enc_param *in);
+
+ /**
+ * (*deinit)() - deinitialize driver.
+ * @handle: [in] driver handle
+ */
+ int (*deinit)(unsigned long handle);
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.c b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
new file mode 100644
index 000000000..d02d5f1df
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "venc_drv_base.h"
+#include "venc_drv_if.h"
+
+#include "mtk_vcodec_enc.h"
+#include "mtk_vcodec_enc_pm.h"
+#include "mtk_vpu.h"
+
+const struct venc_common_if *get_h264_enc_comm_if(void);
+const struct venc_common_if *get_vp8_enc_comm_if(void);
+
+int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
+{
+ int ret = 0;
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_VP8:
+ ctx->enc_if = get_vp8_enc_comm_if();
+ break;
+ case V4L2_PIX_FMT_H264:
+ ctx->enc_if = get_h264_enc_comm_if();
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mtk_venc_lock(ctx);
+ mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->init(ctx, (unsigned long *)&ctx->drv_handle);
+ mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_venc_unlock(ctx);
+
+ return ret;
+}
+
+int venc_if_set_param(struct mtk_vcodec_ctx *ctx,
+ enum venc_set_param_type type, struct venc_enc_param *in)
+{
+ int ret = 0;
+
+ mtk_venc_lock(ctx);
+ mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->set_param(ctx->drv_handle, type, in);
+ mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_venc_unlock(ctx);
+
+ return ret;
+}
+
+int venc_if_encode(struct mtk_vcodec_ctx *ctx,
+ enum venc_start_opt opt, struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ mtk_venc_lock(ctx);
+
+ spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ ctx->dev->curr_ctx = ctx;
+ spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+
+ mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->encode(ctx->drv_handle, opt, frm_buf,
+ bs_buf, result);
+ mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+
+ spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ ctx->dev->curr_ctx = NULL;
+ spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+
+ mtk_venc_unlock(ctx);
+ return ret;
+}
+
+int venc_if_deinit(struct mtk_vcodec_ctx *ctx)
+{
+ int ret = 0;
+
+ if (ctx->drv_handle == 0)
+ return 0;
+
+ mtk_venc_lock(ctx);
+ mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->deinit(ctx->drv_handle);
+ mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_venc_unlock(ctx);
+
+ ctx->drv_handle = 0;
+
+ return ret;
+}
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.h b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
new file mode 100644
index 000000000..a6e7d32e5
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VENC_DRV_IF_H_
+#define _VENC_DRV_IF_H_
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_util.h"
+
+/*
+ * enum venc_yuv_fmt - The type of input yuv format
+ * (VPU related: If you change the order, you must also update the VPU codes.)
+ * @VENC_YUV_FORMAT_I420: I420 YUV format
+ * @VENC_YUV_FORMAT_YV12: YV12 YUV format
+ * @VENC_YUV_FORMAT_NV12: NV12 YUV format
+ * @VENC_YUV_FORMAT_NV21: NV21 YUV format
+ */
+enum venc_yuv_fmt {
+ VENC_YUV_FORMAT_I420 = 3,
+ VENC_YUV_FORMAT_YV12 = 5,
+ VENC_YUV_FORMAT_NV12 = 6,
+ VENC_YUV_FORMAT_NV21 = 7,
+};
+
+/*
+ * enum venc_start_opt - encode frame option used in venc_if_encode()
+ * @VENC_START_OPT_ENCODE_SEQUENCE_HEADER: encode SPS/PPS for H264
+ * @VENC_START_OPT_ENCODE_FRAME: encode normal frame
+ */
+enum venc_start_opt {
+ VENC_START_OPT_ENCODE_SEQUENCE_HEADER,
+ VENC_START_OPT_ENCODE_FRAME,
+};
+
+/*
+ * enum venc_set_param_type - The type of set parameter used in
+ * venc_if_set_param()
+ * (VPU related: If you change the order, you must also update the VPU codes.)
+ * @VENC_SET_PARAM_ENC: set encoder parameters
+ * @VENC_SET_PARAM_FORCE_INTRA: force an intra frame
+ * @VENC_SET_PARAM_ADJUST_BITRATE: adjust bitrate (in bps)
+ * @VENC_SET_PARAM_ADJUST_FRAMERATE: set frame rate
+ * @VENC_SET_PARAM_GOP_SIZE: set IDR interval
+ * @VENC_SET_PARAM_INTRA_PERIOD: set I frame interval
+ * @VENC_SET_PARAM_SKIP_FRAME: set H264 skip one frame
+ * @VENC_SET_PARAM_PREPEND_HEADER: set H264 prepend SPS/PPS before IDR
+ * @VENC_SET_PARAM_TS_MODE: set VP8 temporal scalability mode
+ */
+enum venc_set_param_type {
+ VENC_SET_PARAM_ENC,
+ VENC_SET_PARAM_FORCE_INTRA,
+ VENC_SET_PARAM_ADJUST_BITRATE,
+ VENC_SET_PARAM_ADJUST_FRAMERATE,
+ VENC_SET_PARAM_GOP_SIZE,
+ VENC_SET_PARAM_INTRA_PERIOD,
+ VENC_SET_PARAM_SKIP_FRAME,
+ VENC_SET_PARAM_PREPEND_HEADER,
+ VENC_SET_PARAM_TS_MODE,
+};
+
+/*
+ * struct venc_enc_prm - encoder settings for VENC_SET_PARAM_ENC used in
+ * venc_if_set_param()
+ * @input_fourcc: input yuv format
+ * @h264_profile: V4L2 defined H.264 profile
+ * @h264_level: V4L2 defined H.264 level
+ * @width: image width
+ * @height: image height
+ * @buf_width: buffer width
+ * @buf_height: buffer height
+ * @frm_rate: frame rate in fps
+ * @intra_period: intra frame period
+ * @bitrate: target bitrate in bps
+ * @gop_size: group of picture size
+ */
+struct venc_enc_param {
+ enum venc_yuv_fmt input_yuv_fmt;
+ unsigned int h264_profile;
+ unsigned int h264_level;
+ unsigned int width;
+ unsigned int height;
+ unsigned int buf_width;
+ unsigned int buf_height;
+ unsigned int frm_rate;
+ unsigned int intra_period;
+ unsigned int bitrate;
+ unsigned int gop_size;
+};
+
+/*
+ * struct venc_frm_buf - frame buffer information used in venc_if_encode()
+ * @fb_addr: plane frame buffer addresses
+ */
+struct venc_frm_buf {
+ struct mtk_vcodec_mem fb_addr[MTK_VCODEC_MAX_PLANES];
+};
+
+/*
+ * struct venc_done_result - This is return information used in venc_if_encode()
+ * @bs_size: output bitstream size
+ * @is_key_frm: output is key frame or not
+ */
+struct venc_done_result {
+ unsigned int bs_size;
+ bool is_key_frm;
+};
+
+/*
+ * venc_if_init - Create the driver handle
+ * @ctx: device context
+ * @fourcc: encoder input format
+ * Return: 0 if creating handle successfully, otherwise it is failed.
+ */
+int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc);
+
+/*
+ * venc_if_deinit - Release the driver handle
+ * @ctx: device context
+ * Return: 0 if releasing handle successfully, otherwise it is failed.
+ */
+int venc_if_deinit(struct mtk_vcodec_ctx *ctx);
+
+/*
+ * venc_if_set_param - Set parameter to driver
+ * @ctx: device context
+ * @type: parameter type
+ * @in: input parameter
+ * Return: 0 if setting param successfully, otherwise it is failed.
+ */
+int venc_if_set_param(struct mtk_vcodec_ctx *ctx,
+ enum venc_set_param_type type,
+ struct venc_enc_param *in);
+
+/*
+ * venc_if_encode - Encode one frame
+ * @ctx: device context
+ * @opt: encode frame option
+ * @frm_buf: input frame buffer information
+ * @bs_buf: output bitstream buffer infomraiton
+ * @result: encode result
+ * Return: 0 if encoding frame successfully, otherwise it is failed.
+ */
+int venc_if_encode(struct mtk_vcodec_ctx *ctx,
+ enum venc_start_opt opt,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result);
+
+#endif /* _VENC_DRV_IF_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
new file mode 100644
index 000000000..4c869cb6f
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VENC_IPI_MSG_H_
+#define _VENC_IPI_MSG_H_
+
+#define AP_IPIMSG_VENC_BASE 0xC000
+#define VPU_IPIMSG_VENC_BASE 0xD000
+
+/**
+ * enum venc_ipi_msg_id - message id between AP and VPU
+ * (ipi stands for inter-processor interrupt)
+ * @AP_IPIMSG_ENC_XXX: AP to VPU cmd message id
+ * @VPU_IPIMSG_ENC_XXX_DONE: VPU ack AP cmd message id
+ */
+enum venc_ipi_msg_id {
+ AP_IPIMSG_ENC_INIT = AP_IPIMSG_VENC_BASE,
+ AP_IPIMSG_ENC_SET_PARAM,
+ AP_IPIMSG_ENC_ENCODE,
+ AP_IPIMSG_ENC_DEINIT,
+
+ VPU_IPIMSG_ENC_INIT_DONE = VPU_IPIMSG_VENC_BASE,
+ VPU_IPIMSG_ENC_SET_PARAM_DONE,
+ VPU_IPIMSG_ENC_ENCODE_DONE,
+ VPU_IPIMSG_ENC_DEINIT_DONE,
+};
+
+/**
+ * struct venc_ap_ipi_msg_init - AP to VPU init cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_INIT)
+ * @reserved: reserved for future use. vpu is running in 32bit. Without
+ * this reserved field, if kernel run in 64bit. this struct size
+ * will be different between kernel and vpu
+ * @venc_inst: AP encoder instance
+ * (struct venc_vp8_inst/venc_h264_inst *)
+ */
+struct venc_ap_ipi_msg_init {
+ uint32_t msg_id;
+ uint32_t reserved;
+ uint64_t venc_inst;
+};
+
+/**
+ * struct venc_ap_ipi_msg_set_param - AP to VPU set_param cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_SET_PARAM)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * (struct venc_vp8_vsi/venc_h264_vsi *)
+ * @param_id: parameter id (venc_set_param_type)
+ * @data_item: number of items in the data array
+ * @data[8]: data array to store the set parameters
+ */
+struct venc_ap_ipi_msg_set_param {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+ uint32_t param_id;
+ uint32_t data_item;
+ uint32_t data[8];
+};
+
+/**
+ * struct venc_ap_ipi_msg_enc - AP to VPU enc cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_ENCODE)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * (struct venc_vp8_vsi/venc_h264_vsi *)
+ * @bs_mode: bitstream mode for h264
+ * (H264_BS_MODE_SPS/H264_BS_MODE_PPS/H264_BS_MODE_FRAME)
+ * @input_addr: pointer to input image buffer plane
+ * @bs_addr: pointer to output bit stream buffer
+ * @bs_size: bit stream buffer size
+ */
+struct venc_ap_ipi_msg_enc {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+ uint32_t bs_mode;
+ uint32_t input_addr[3];
+ uint32_t bs_addr;
+ uint32_t bs_size;
+};
+
+/**
+ * struct venc_ap_ipi_msg_deinit - AP to VPU deinit cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_DEINIT)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * (struct venc_vp8_vsi/venc_h264_vsi *)
+ */
+struct venc_ap_ipi_msg_deinit {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+};
+
+/**
+ * enum venc_ipi_msg_status - VPU ack AP cmd status
+ */
+enum venc_ipi_msg_status {
+ VENC_IPI_MSG_STATUS_OK,
+ VENC_IPI_MSG_STATUS_FAIL,
+};
+
+/**
+ * struct venc_vpu_ipi_msg_common - VPU ack AP cmd common structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ */
+struct venc_vpu_ipi_msg_common {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+};
+
+/**
+ * struct venc_vpu_ipi_msg_init - VPU ack AP init cmd structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_ENC_SET_PARAM_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * (struct venc_vp8_vsi/venc_h264_vsi *)
+ * @reserved: reserved for future use. vpu is running in 32bit. Without
+ * this reserved field, if kernel run in 64bit. this struct size
+ * will be different between kernel and vpu
+ */
+struct venc_vpu_ipi_msg_init {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+ uint32_t vpu_inst_addr;
+ uint32_t reserved;
+};
+
+/**
+ * struct venc_vpu_ipi_msg_set_param - VPU ack AP set_param cmd structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_ENC_SET_PARAM_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ * @param_id: parameter id (venc_set_param_type)
+ * @data_item: number of items in the data array
+ * @data[6]: data array to store the return result
+ */
+struct venc_vpu_ipi_msg_set_param {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+ uint32_t param_id;
+ uint32_t data_item;
+ uint32_t data[6];
+};
+
+/**
+ * enum venc_ipi_msg_enc_state - Type of encode state
+ * VEN_IPI_MSG_ENC_STATE_FRAME: one frame being encoded
+ * VEN_IPI_MSG_ENC_STATE_PART: bit stream buffer full
+ * VEN_IPI_MSG_ENC_STATE_SKIP: encoded skip frame
+ * VEN_IPI_MSG_ENC_STATE_ERROR: encounter error
+ */
+enum venc_ipi_msg_enc_state {
+ VEN_IPI_MSG_ENC_STATE_FRAME,
+ VEN_IPI_MSG_ENC_STATE_PART,
+ VEN_IPI_MSG_ENC_STATE_SKIP,
+ VEN_IPI_MSG_ENC_STATE_ERROR,
+};
+
+/**
+ * struct venc_vpu_ipi_msg_enc - VPU ack AP enc cmd structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_ENC_ENCODE_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ * @state: encode state (venc_ipi_msg_enc_state)
+ * @is_key_frm: whether the encoded frame is key frame
+ * @bs_size: encoded bitstream size
+ * @reserved: reserved for future use. vpu is running in 32bit. Without
+ * this reserved field, if kernel run in 64bit. this struct size
+ * will be different between kernel and vpu
+ */
+struct venc_vpu_ipi_msg_enc {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+ uint32_t state;
+ uint32_t is_key_frm;
+ uint32_t bs_size;
+ uint32_t reserved;
+};
+
+/**
+ * struct venc_vpu_ipi_msg_deinit - VPU ack AP deinit cmd structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_ENC_DEINIT_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ */
+struct venc_vpu_ipi_msg_deinit {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+};
+
+#endif /* _VENC_IPI_MSG_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
new file mode 100644
index 000000000..0d882acf8
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PoChun Lin <pochun.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_vpu.h"
+#include "venc_ipi_msg.h"
+#include "venc_vpu_if.h"
+
+static void handle_enc_init_msg(struct venc_vpu_inst *vpu, void *data)
+{
+ struct venc_vpu_ipi_msg_init *msg = data;
+
+ vpu->inst_addr = msg->vpu_inst_addr;
+ vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr);
+}
+
+static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, void *data)
+{
+ struct venc_vpu_ipi_msg_enc *msg = data;
+
+ vpu->state = msg->state;
+ vpu->bs_size = msg->bs_size;
+ vpu->is_key_frm = msg->is_key_frm;
+}
+
+static void vpu_enc_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct venc_vpu_ipi_msg_common *msg = data;
+ struct venc_vpu_inst *vpu =
+ (struct venc_vpu_inst *)(unsigned long)msg->venc_inst;
+
+ mtk_vcodec_debug(vpu, "msg_id %x inst %p status %d",
+ msg->msg_id, vpu, msg->status);
+
+ switch (msg->msg_id) {
+ case VPU_IPIMSG_ENC_INIT_DONE:
+ handle_enc_init_msg(vpu, data);
+ break;
+ case VPU_IPIMSG_ENC_SET_PARAM_DONE:
+ break;
+ case VPU_IPIMSG_ENC_ENCODE_DONE:
+ handle_enc_encode_msg(vpu, data);
+ break;
+ case VPU_IPIMSG_ENC_DEINIT_DONE:
+ break;
+ default:
+ mtk_vcodec_err(vpu, "unknown msg id %x", msg->msg_id);
+ break;
+ }
+
+ vpu->signaled = 1;
+ vpu->failure = (msg->status != VENC_IPI_MSG_STATUS_OK);
+
+ mtk_vcodec_debug_leave(vpu);
+}
+
+static int vpu_enc_send_msg(struct venc_vpu_inst *vpu, void *msg,
+ int len)
+{
+ int status;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ if (!vpu->dev) {
+ mtk_vcodec_err(vpu, "inst dev is NULL");
+ return -EINVAL;
+ }
+
+ status = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
+ if (status) {
+ mtk_vcodec_err(vpu, "vpu_ipi_send msg_id %x len %d fail %d",
+ *(uint32_t *)msg, len, status);
+ return -EINVAL;
+ }
+ if (vpu->failure)
+ return -EINVAL;
+
+ mtk_vcodec_debug_leave(vpu);
+
+ return 0;
+}
+
+int vpu_enc_init(struct venc_vpu_inst *vpu)
+{
+ int status;
+ struct venc_ap_ipi_msg_init out;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ init_waitqueue_head(&vpu->wq_hd);
+ vpu->signaled = 0;
+ vpu->failure = 0;
+
+ status = vpu_ipi_register(vpu->dev, vpu->id, vpu_enc_ipi_handler,
+ NULL, NULL);
+ if (status) {
+ mtk_vcodec_err(vpu, "vpu_ipi_register fail %d", status);
+ return -EINVAL;
+ }
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_INIT;
+ out.venc_inst = (unsigned long)vpu;
+ if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_INIT fail");
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug_leave(vpu);
+
+ return 0;
+}
+
+int vpu_enc_set_param(struct venc_vpu_inst *vpu,
+ enum venc_set_param_type id,
+ struct venc_enc_param *enc_param)
+{
+ struct venc_ap_ipi_msg_set_param out;
+
+ mtk_vcodec_debug(vpu, "id %d ->", id);
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_SET_PARAM;
+ out.vpu_inst_addr = vpu->inst_addr;
+ out.param_id = id;
+ switch (id) {
+ case VENC_SET_PARAM_ENC:
+ out.data_item = 0;
+ break;
+ case VENC_SET_PARAM_FORCE_INTRA:
+ out.data_item = 0;
+ break;
+ case VENC_SET_PARAM_ADJUST_BITRATE:
+ out.data_item = 1;
+ out.data[0] = enc_param->bitrate;
+ break;
+ case VENC_SET_PARAM_ADJUST_FRAMERATE:
+ out.data_item = 1;
+ out.data[0] = enc_param->frm_rate;
+ break;
+ case VENC_SET_PARAM_GOP_SIZE:
+ out.data_item = 1;
+ out.data[0] = enc_param->gop_size;
+ break;
+ case VENC_SET_PARAM_INTRA_PERIOD:
+ out.data_item = 1;
+ out.data[0] = enc_param->intra_period;
+ break;
+ case VENC_SET_PARAM_SKIP_FRAME:
+ out.data_item = 0;
+ break;
+ default:
+ mtk_vcodec_err(vpu, "id %d not supported", id);
+ return -EINVAL;
+ }
+ if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ mtk_vcodec_err(vpu,
+ "AP_IPIMSG_ENC_SET_PARAM %d fail", id);
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug(vpu, "id %d <-", id);
+
+ return 0;
+}
+
+int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ struct venc_ap_ipi_msg_enc out;
+
+ mtk_vcodec_debug(vpu, "bs_mode %d ->", bs_mode);
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_ENCODE;
+ out.vpu_inst_addr = vpu->inst_addr;
+ out.bs_mode = bs_mode;
+ if (frm_buf) {
+ if ((frm_buf->fb_addr[0].dma_addr % 16 == 0) &&
+ (frm_buf->fb_addr[1].dma_addr % 16 == 0) &&
+ (frm_buf->fb_addr[2].dma_addr % 16 == 0)) {
+ out.input_addr[0] = frm_buf->fb_addr[0].dma_addr;
+ out.input_addr[1] = frm_buf->fb_addr[1].dma_addr;
+ out.input_addr[2] = frm_buf->fb_addr[2].dma_addr;
+ } else {
+ mtk_vcodec_err(vpu, "dma_addr not align to 16");
+ return -EINVAL;
+ }
+ }
+ if (bs_buf) {
+ out.bs_addr = bs_buf->dma_addr;
+ out.bs_size = bs_buf->size;
+ }
+ if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_ENCODE %d fail",
+ bs_mode);
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug(vpu, "bs_mode %d state %d size %d key_frm %d <-",
+ bs_mode, vpu->state, vpu->bs_size, vpu->is_key_frm);
+
+ return 0;
+}
+
+int vpu_enc_deinit(struct venc_vpu_inst *vpu)
+{
+ struct venc_ap_ipi_msg_deinit out;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_DEINIT;
+ out.vpu_inst_addr = vpu->inst_addr;
+ if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_DEINIT fail");
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug_leave(vpu);
+
+ return 0;
+}
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
new file mode 100644
index 000000000..215d1e013
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PoChun Lin <pochun.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VENC_VPU_IF_H_
+#define _VENC_VPU_IF_H_
+
+#include "mtk_vpu.h"
+#include "venc_drv_if.h"
+
+/*
+ * struct venc_vpu_inst - encoder VPU driver instance
+ * @wq_hd: wait queue used for vpu cmd trigger then wait vpu interrupt done
+ * @signaled: flag used for checking vpu interrupt done
+ * @failure: flag to show vpu cmd succeeds or not
+ * @state: enum venc_ipi_msg_enc_state
+ * @bs_size: bitstream size for skip frame case usage
+ * @is_key_frm: key frame flag
+ * @inst_addr: VPU instance addr
+ * @vsi: driver structure allocated by VPU side and shared to AP side for
+ * control and info share
+ * @id: the id of inter-processor interrupt
+ * @ctx: context for v4l2 layer integration
+ * @dev: device for v4l2 layer integration
+ */
+struct venc_vpu_inst {
+ wait_queue_head_t wq_hd;
+ int signaled;
+ int failure;
+ int state;
+ int bs_size;
+ int is_key_frm;
+ unsigned int inst_addr;
+ void *vsi;
+ enum ipi_id id;
+ struct mtk_vcodec_ctx *ctx;
+ struct platform_device *dev;
+};
+
+int vpu_enc_init(struct venc_vpu_inst *vpu);
+int vpu_enc_set_param(struct venc_vpu_inst *vpu,
+ enum venc_set_param_type id,
+ struct venc_enc_param *param);
+int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size);
+int vpu_enc_deinit(struct venc_vpu_inst *vpu);
+
+#endif
diff --git a/drivers/media/platform/mtk-vpu/Makefile b/drivers/media/platform/mtk-vpu/Makefile
new file mode 100644
index 000000000..58cc1b4bc
--- /dev/null
+++ b/drivers/media/platform/mtk-vpu/Makefile
@@ -0,0 +1,3 @@
+mtk-vpu-y += mtk_vpu.o
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu.o
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
new file mode 100644
index 000000000..9b57fb285
--- /dev/null
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -0,0 +1,966 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/dma-mapping.h>
+
+#include "mtk_vpu.h"
+
+/**
+ * VPU (video processor unit) is a tiny processor controlling video hardware
+ * related to video codec, scaling and color format converting.
+ * VPU interfaces with other blocks by share memory and interrupt.
+ **/
+
+#define INIT_TIMEOUT_MS 2000U
+#define IPI_TIMEOUT_MS 2000U
+#define VPU_FW_VER_LEN 16
+
+/* maximum program/data TCM (Tightly-Coupled Memory) size */
+#define VPU_PTCM_SIZE (96 * SZ_1K)
+#define VPU_DTCM_SIZE (32 * SZ_1K)
+/* the offset to get data tcm address */
+#define VPU_DTCM_OFFSET 0x18000UL
+/* daynamic allocated maximum extended memory size */
+#define VPU_EXT_P_SIZE SZ_1M
+#define VPU_EXT_D_SIZE SZ_4M
+/* maximum binary firmware size */
+#define VPU_P_FW_SIZE (VPU_PTCM_SIZE + VPU_EXT_P_SIZE)
+#define VPU_D_FW_SIZE (VPU_DTCM_SIZE + VPU_EXT_D_SIZE)
+/* the size of share buffer between Host and VPU */
+#define SHARE_BUF_SIZE 48
+
+/* binary firmware name */
+#define VPU_P_FW "vpu_p.bin"
+#define VPU_D_FW "vpu_d.bin"
+
+#define VPU_RESET 0x0
+#define VPU_TCM_CFG 0x0008
+#define VPU_PMEM_EXT0_ADDR 0x000C
+#define VPU_PMEM_EXT1_ADDR 0x0010
+#define VPU_TO_HOST 0x001C
+#define VPU_DMEM_EXT0_ADDR 0x0014
+#define VPU_DMEM_EXT1_ADDR 0x0018
+#define HOST_TO_VPU 0x0024
+#define VPU_PC_REG 0x0060
+#define VPU_WDT_REG 0x0084
+
+/* vpu inter-processor communication interrupt */
+#define VPU_IPC_INT BIT(8)
+
+/**
+ * enum vpu_fw_type - VPU firmware type
+ *
+ * @P_FW: program firmware
+ * @D_FW: data firmware
+ *
+ */
+enum vpu_fw_type {
+ P_FW,
+ D_FW,
+};
+
+/**
+ * struct vpu_mem - VPU extended program/data memory information
+ *
+ * @va: the kernel virtual memory address of VPU extended memory
+ * @pa: the physical memory address of VPU extended memory
+ *
+ */
+struct vpu_mem {
+ void *va;
+ dma_addr_t pa;
+};
+
+/**
+ * struct vpu_regs - VPU TCM and configuration registers
+ *
+ * @tcm: the register for VPU Tightly-Coupled Memory
+ * @cfg: the register for VPU configuration
+ * @irq: the irq number for VPU interrupt
+ */
+struct vpu_regs {
+ void __iomem *tcm;
+ void __iomem *cfg;
+ int irq;
+};
+
+/**
+ * struct vpu_wdt_handler - VPU watchdog reset handler
+ *
+ * @reset_func: reset handler
+ * @priv: private data
+ */
+struct vpu_wdt_handler {
+ void (*reset_func)(void *);
+ void *priv;
+};
+
+/**
+ * struct vpu_wdt - VPU watchdog workqueue
+ *
+ * @handler: VPU watchdog reset handler
+ * @ws: workstruct for VPU watchdog
+ * @wq: workqueue for VPU watchdog
+ */
+struct vpu_wdt {
+ struct vpu_wdt_handler handler[VPU_RST_MAX];
+ struct work_struct ws;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * struct vpu_run - VPU initialization status
+ *
+ * @signaled: the signal of vpu initialization completed
+ * @fw_ver: VPU firmware version
+ * @dec_capability: decoder capability which is not used for now and
+ * the value is reserved for future use
+ * @enc_capability: encoder capability which is not used for now and
+ * the value is reserved for future use
+ * @wq: wait queue for VPU initialization status
+ */
+struct vpu_run {
+ u32 signaled;
+ char fw_ver[VPU_FW_VER_LEN];
+ unsigned int dec_capability;
+ unsigned int enc_capability;
+ wait_queue_head_t wq;
+};
+
+/**
+ * struct vpu_ipi_desc - VPU IPI descriptor
+ *
+ * @handler: IPI handler
+ * @name: the name of IPI handler
+ * @priv: the private data of IPI handler
+ */
+struct vpu_ipi_desc {
+ ipi_handler_t handler;
+ const char *name;
+ void *priv;
+};
+
+/**
+ * struct share_obj - DTCM (Data Tightly-Coupled Memory) buffer shared with
+ * AP and VPU
+ *
+ * @id: IPI id
+ * @len: share buffer length
+ * @share_buf: share buffer data
+ */
+struct share_obj {
+ s32 id;
+ u32 len;
+ unsigned char share_buf[SHARE_BUF_SIZE];
+};
+
+/**
+ * struct mtk_vpu - vpu driver data
+ * @extmem: VPU extended memory information
+ * @reg: VPU TCM and configuration registers
+ * @run: VPU initialization status
+ * @wdt: VPU watchdog workqueue
+ * @ipi_desc: VPU IPI descriptor
+ * @recv_buf: VPU DTCM share buffer for receiving. The
+ * receive buffer is only accessed in interrupt context.
+ * @send_buf: VPU DTCM share buffer for sending
+ * @dev: VPU struct device
+ * @clk: VPU clock on/off
+ * @fw_loaded: indicate VPU firmware loaded
+ * @enable_4GB: VPU 4GB mode on/off
+ * @vpu_mutex: protect mtk_vpu (except recv_buf) and ensure only
+ * one client to use VPU service at a time. For example,
+ * suppose a client is using VPU to decode VP8.
+ * If the other client wants to encode VP8,
+ * it has to wait until VP8 decode completes.
+ * @wdt_refcnt: WDT reference count to make sure the watchdog can be
+ * disabled if no other client is using VPU service
+ * @ack_wq: The wait queue for each codec and mdp. When sleeping
+ * processes wake up, they will check the condition
+ * "ipi_id_ack" to run the corresponding action or
+ * go back to sleep.
+ * @ipi_id_ack: The ACKs for registered IPI function sending
+ * interrupt to VPU
+ *
+ */
+struct mtk_vpu {
+ struct vpu_mem extmem[2];
+ struct vpu_regs reg;
+ struct vpu_run run;
+ struct vpu_wdt wdt;
+ struct vpu_ipi_desc ipi_desc[IPI_MAX];
+ struct share_obj *recv_buf;
+ struct share_obj *send_buf;
+ struct device *dev;
+ struct clk *clk;
+ bool fw_loaded;
+ bool enable_4GB;
+ struct mutex vpu_mutex; /* for protecting vpu data data structure */
+ u32 wdt_refcnt;
+ wait_queue_head_t ack_wq;
+ bool ipi_id_ack[IPI_MAX];
+};
+
+static inline void vpu_cfg_writel(struct mtk_vpu *vpu, u32 val, u32 offset)
+{
+ writel(val, vpu->reg.cfg + offset);
+}
+
+static inline u32 vpu_cfg_readl(struct mtk_vpu *vpu, u32 offset)
+{
+ return readl(vpu->reg.cfg + offset);
+}
+
+static inline bool vpu_running(struct mtk_vpu *vpu)
+{
+ return vpu_cfg_readl(vpu, VPU_RESET) & BIT(0);
+}
+
+static void vpu_clock_disable(struct mtk_vpu *vpu)
+{
+ /* Disable VPU watchdog */
+ mutex_lock(&vpu->vpu_mutex);
+ if (!--vpu->wdt_refcnt)
+ vpu_cfg_writel(vpu,
+ vpu_cfg_readl(vpu, VPU_WDT_REG) & ~(1L << 31),
+ VPU_WDT_REG);
+ mutex_unlock(&vpu->vpu_mutex);
+
+ clk_disable(vpu->clk);
+}
+
+static int vpu_clock_enable(struct mtk_vpu *vpu)
+{
+ int ret;
+
+ ret = clk_enable(vpu->clk);
+ if (ret)
+ return ret;
+ /* Enable VPU watchdog */
+ mutex_lock(&vpu->vpu_mutex);
+ if (!vpu->wdt_refcnt++)
+ vpu_cfg_writel(vpu,
+ vpu_cfg_readl(vpu, VPU_WDT_REG) | (1L << 31),
+ VPU_WDT_REG);
+ mutex_unlock(&vpu->vpu_mutex);
+
+ return ret;
+}
+
+int vpu_ipi_register(struct platform_device *pdev,
+ enum ipi_id id, ipi_handler_t handler,
+ const char *name, void *priv)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+ struct vpu_ipi_desc *ipi_desc;
+
+ if (!vpu) {
+ dev_err(&pdev->dev, "vpu device in not ready\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (id >= 0 && id < IPI_MAX && handler) {
+ ipi_desc = vpu->ipi_desc;
+ ipi_desc[id].name = name;
+ ipi_desc[id].handler = handler;
+ ipi_desc[id].priv = priv;
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "register vpu ipi id %d with invalid arguments\n",
+ id);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vpu_ipi_register);
+
+int vpu_ipi_send(struct platform_device *pdev,
+ enum ipi_id id, void *buf,
+ unsigned int len)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+ struct share_obj *send_obj = vpu->send_buf;
+ unsigned long timeout;
+ int ret = 0;
+
+ if (id <= IPI_VPU_INIT || id >= IPI_MAX ||
+ len > sizeof(send_obj->share_buf) || !buf) {
+ dev_err(vpu->dev, "failed to send ipi message\n");
+ return -EINVAL;
+ }
+
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(vpu->dev, "failed to enable vpu clock\n");
+ return ret;
+ }
+ if (!vpu_running(vpu)) {
+ dev_err(vpu->dev, "vpu_ipi_send: VPU is not running\n");
+ ret = -EINVAL;
+ goto clock_disable;
+ }
+
+ mutex_lock(&vpu->vpu_mutex);
+
+ /* Wait until VPU receives the last command */
+ timeout = jiffies + msecs_to_jiffies(IPI_TIMEOUT_MS);
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_err(vpu->dev, "vpu_ipi_send: IPI timeout!\n");
+ ret = -EIO;
+ goto mut_unlock;
+ }
+ } while (vpu_cfg_readl(vpu, HOST_TO_VPU));
+
+ memcpy((void *)send_obj->share_buf, buf, len);
+ send_obj->len = len;
+ send_obj->id = id;
+
+ vpu->ipi_id_ack[id] = false;
+ /* send the command to VPU */
+ vpu_cfg_writel(vpu, 0x1, HOST_TO_VPU);
+
+ mutex_unlock(&vpu->vpu_mutex);
+
+ /* wait for VPU's ACK */
+ timeout = msecs_to_jiffies(IPI_TIMEOUT_MS);
+ ret = wait_event_timeout(vpu->ack_wq, vpu->ipi_id_ack[id], timeout);
+ vpu->ipi_id_ack[id] = false;
+ if (ret == 0) {
+ dev_err(vpu->dev, "vpu ipi %d ack time out !", id);
+ ret = -EIO;
+ goto clock_disable;
+ }
+ vpu_clock_disable(vpu);
+
+ return 0;
+
+mut_unlock:
+ mutex_unlock(&vpu->vpu_mutex);
+clock_disable:
+ vpu_clock_disable(vpu);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vpu_ipi_send);
+
+static void vpu_wdt_reset_func(struct work_struct *ws)
+{
+ struct vpu_wdt *wdt = container_of(ws, struct vpu_wdt, ws);
+ struct mtk_vpu *vpu = container_of(wdt, struct mtk_vpu, wdt);
+ struct vpu_wdt_handler *handler = wdt->handler;
+ int index, ret;
+
+ dev_info(vpu->dev, "vpu reset\n");
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(vpu->dev, "[VPU] wdt enables clock failed %d\n", ret);
+ return;
+ }
+ mutex_lock(&vpu->vpu_mutex);
+ vpu_cfg_writel(vpu, 0x0, VPU_RESET);
+ vpu->fw_loaded = false;
+ mutex_unlock(&vpu->vpu_mutex);
+ vpu_clock_disable(vpu);
+
+ for (index = 0; index < VPU_RST_MAX; index++) {
+ if (handler[index].reset_func) {
+ handler[index].reset_func(handler[index].priv);
+ dev_dbg(vpu->dev, "wdt handler func %d\n", index);
+ }
+ }
+}
+
+int vpu_wdt_reg_handler(struct platform_device *pdev,
+ void wdt_reset(void *),
+ void *priv, enum rst_id id)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+ struct vpu_wdt_handler *handler;
+
+ if (!vpu) {
+ dev_err(&pdev->dev, "vpu device in not ready\n");
+ return -EPROBE_DEFER;
+ }
+
+ handler = vpu->wdt.handler;
+
+ if (id >= 0 && id < VPU_RST_MAX && wdt_reset) {
+ dev_dbg(vpu->dev, "wdt register id %d\n", id);
+ mutex_lock(&vpu->vpu_mutex);
+ handler[id].reset_func = wdt_reset;
+ handler[id].priv = priv;
+ mutex_unlock(&vpu->vpu_mutex);
+ return 0;
+ }
+
+ dev_err(vpu->dev, "register vpu wdt handler failed\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vpu_wdt_reg_handler);
+
+unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+ return vpu->run.dec_capability;
+}
+EXPORT_SYMBOL_GPL(vpu_get_vdec_hw_capa);
+
+unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+ return vpu->run.enc_capability;
+}
+EXPORT_SYMBOL_GPL(vpu_get_venc_hw_capa);
+
+void *vpu_mapping_dm_addr(struct platform_device *pdev,
+ u32 dtcm_dmem_addr)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+ if (!dtcm_dmem_addr ||
+ (dtcm_dmem_addr > (VPU_DTCM_SIZE + VPU_EXT_D_SIZE))) {
+ dev_err(vpu->dev, "invalid virtual data memory address\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dtcm_dmem_addr < VPU_DTCM_SIZE)
+ return (__force void *)(dtcm_dmem_addr + vpu->reg.tcm +
+ VPU_DTCM_OFFSET);
+
+ return vpu->extmem[D_FW].va + (dtcm_dmem_addr - VPU_DTCM_SIZE);
+}
+EXPORT_SYMBOL_GPL(vpu_mapping_dm_addr);
+
+struct platform_device *vpu_get_plat_device(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *vpu_node;
+ struct platform_device *vpu_pdev;
+
+ vpu_node = of_parse_phandle(dev->of_node, "mediatek,vpu", 0);
+ if (!vpu_node) {
+ dev_err(dev, "can't get vpu node\n");
+ return NULL;
+ }
+
+ vpu_pdev = of_find_device_by_node(vpu_node);
+ if (WARN_ON(!vpu_pdev)) {
+ dev_err(dev, "vpu pdev failed\n");
+ of_node_put(vpu_node);
+ return NULL;
+ }
+
+ return vpu_pdev;
+}
+EXPORT_SYMBOL_GPL(vpu_get_plat_device);
+
+/* load vpu program/data memory */
+static int load_requested_vpu(struct mtk_vpu *vpu,
+ const struct firmware *vpu_fw,
+ u8 fw_type)
+{
+ size_t tcm_size = fw_type ? VPU_DTCM_SIZE : VPU_PTCM_SIZE;
+ size_t fw_size = fw_type ? VPU_D_FW_SIZE : VPU_P_FW_SIZE;
+ char *fw_name = fw_type ? VPU_D_FW : VPU_P_FW;
+ size_t dl_size = 0;
+ size_t extra_fw_size = 0;
+ void *dest;
+ int ret;
+
+ ret = request_firmware(&vpu_fw, fw_name, vpu->dev);
+ if (ret < 0) {
+ dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name, ret);
+ return ret;
+ }
+ dl_size = vpu_fw->size;
+ if (dl_size > fw_size) {
+ dev_err(vpu->dev, "fw %s size %zu is abnormal\n", fw_name,
+ dl_size);
+ release_firmware(vpu_fw);
+ return -EFBIG;
+ }
+ dev_dbg(vpu->dev, "Downloaded fw %s size: %zu.\n",
+ fw_name,
+ dl_size);
+ /* reset VPU */
+ vpu_cfg_writel(vpu, 0x0, VPU_RESET);
+
+ /* handle extended firmware size */
+ if (dl_size > tcm_size) {
+ dev_dbg(vpu->dev, "fw size %zu > limited fw size %zu\n",
+ dl_size, tcm_size);
+ extra_fw_size = dl_size - tcm_size;
+ dev_dbg(vpu->dev, "extra_fw_size %zu\n", extra_fw_size);
+ dl_size = tcm_size;
+ }
+ dest = (__force void *)vpu->reg.tcm;
+ if (fw_type == D_FW)
+ dest += VPU_DTCM_OFFSET;
+ memcpy(dest, vpu_fw->data, dl_size);
+ /* download to extended memory if need */
+ if (extra_fw_size > 0) {
+ dest = vpu->extmem[fw_type].va;
+ dev_dbg(vpu->dev, "download extended memory type %x\n",
+ fw_type);
+ memcpy(dest, vpu_fw->data + tcm_size, extra_fw_size);
+ }
+
+ release_firmware(vpu_fw);
+
+ return 0;
+}
+
+int vpu_load_firmware(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu;
+ struct device *dev = &pdev->dev;
+ struct vpu_run *run;
+ const struct firmware *vpu_fw = NULL;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev, "VPU platform device is invalid\n");
+ return -EINVAL;
+ }
+
+ vpu = platform_get_drvdata(pdev);
+ run = &vpu->run;
+
+ mutex_lock(&vpu->vpu_mutex);
+ if (vpu->fw_loaded) {
+ mutex_unlock(&vpu->vpu_mutex);
+ return 0;
+ }
+ mutex_unlock(&vpu->vpu_mutex);
+
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(dev, "enable clock failed %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&vpu->vpu_mutex);
+
+ run->signaled = false;
+ dev_dbg(vpu->dev, "firmware request\n");
+ /* Downloading program firmware to device*/
+ ret = load_requested_vpu(vpu, vpu_fw, P_FW);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request %s, %d\n", VPU_P_FW, ret);
+ goto OUT_LOAD_FW;
+ }
+
+ /* Downloading data firmware to device */
+ ret = load_requested_vpu(vpu, vpu_fw, D_FW);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request %s, %d\n", VPU_D_FW, ret);
+ goto OUT_LOAD_FW;
+ }
+
+ vpu->fw_loaded = true;
+ /* boot up vpu */
+ vpu_cfg_writel(vpu, 0x1, VPU_RESET);
+
+ ret = wait_event_interruptible_timeout(run->wq,
+ run->signaled,
+ msecs_to_jiffies(INIT_TIMEOUT_MS)
+ );
+ if (ret == 0) {
+ ret = -ETIME;
+ dev_err(dev, "wait vpu initialization timeout!\n");
+ goto OUT_LOAD_FW;
+ } else if (-ERESTARTSYS == ret) {
+ dev_err(dev, "wait vpu interrupted by a signal!\n");
+ goto OUT_LOAD_FW;
+ }
+
+ ret = 0;
+ dev_info(dev, "vpu is ready. Fw version %s\n", run->fw_ver);
+
+OUT_LOAD_FW:
+ mutex_unlock(&vpu->vpu_mutex);
+ vpu_clock_disable(vpu);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vpu_load_firmware);
+
+static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct mtk_vpu *vpu = (struct mtk_vpu *)priv;
+ struct vpu_run *run = (struct vpu_run *)data;
+
+ vpu->run.signaled = run->signaled;
+ strncpy(vpu->run.fw_ver, run->fw_ver, VPU_FW_VER_LEN);
+ vpu->run.dec_capability = run->dec_capability;
+ vpu->run.enc_capability = run->enc_capability;
+ wake_up_interruptible(&vpu->run.wq);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t vpu_debug_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[256];
+ unsigned int len;
+ unsigned int running, pc, vpu_to_host, host_to_vpu, wdt;
+ int ret;
+ struct device *dev = file->private_data;
+ struct mtk_vpu *vpu = dev_get_drvdata(dev);
+
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
+ return 0;
+ }
+
+ /* vpu register status */
+ running = vpu_running(vpu);
+ pc = vpu_cfg_readl(vpu, VPU_PC_REG);
+ wdt = vpu_cfg_readl(vpu, VPU_WDT_REG);
+ host_to_vpu = vpu_cfg_readl(vpu, HOST_TO_VPU);
+ vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
+ vpu_clock_disable(vpu);
+
+ if (running) {
+ len = snprintf(buf, sizeof(buf), "VPU is running\n\n"
+ "FW Version: %s\n"
+ "PC: 0x%x\n"
+ "WDT: 0x%x\n"
+ "Host to VPU: 0x%x\n"
+ "VPU to Host: 0x%x\n",
+ vpu->run.fw_ver, pc, wdt,
+ host_to_vpu, vpu_to_host);
+ } else {
+ len = snprintf(buf, sizeof(buf), "VPU not running\n");
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations vpu_debug_fops = {
+ .open = simple_open,
+ .read = vpu_debug_read,
+};
+#endif /* CONFIG_DEBUG_FS */
+
+static void vpu_free_ext_mem(struct mtk_vpu *vpu, u8 fw_type)
+{
+ struct device *dev = vpu->dev;
+ size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
+
+ dma_free_coherent(dev, fw_ext_size, vpu->extmem[fw_type].va,
+ vpu->extmem[fw_type].pa);
+}
+
+static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type)
+{
+ struct device *dev = vpu->dev;
+ size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
+ u32 vpu_ext_mem0 = fw_type ? VPU_DMEM_EXT0_ADDR : VPU_PMEM_EXT0_ADDR;
+ u32 vpu_ext_mem1 = fw_type ? VPU_DMEM_EXT1_ADDR : VPU_PMEM_EXT1_ADDR;
+ u32 offset_4gb = vpu->enable_4GB ? 0x40000000 : 0;
+
+ vpu->extmem[fw_type].va = dma_alloc_coherent(dev,
+ fw_ext_size,
+ &vpu->extmem[fw_type].pa,
+ GFP_KERNEL);
+ if (!vpu->extmem[fw_type].va) {
+ dev_err(dev, "Failed to allocate the extended program memory\n");
+ return -ENOMEM;
+ }
+
+ /* Disable extend0. Enable extend1 */
+ vpu_cfg_writel(vpu, 0x1, vpu_ext_mem0);
+ vpu_cfg_writel(vpu, (vpu->extmem[fw_type].pa & 0xFFFFF000) + offset_4gb,
+ vpu_ext_mem1);
+
+ dev_info(dev, "%s extend memory phy=0x%llx virt=0x%p\n",
+ fw_type ? "Data" : "Program",
+ (unsigned long long)vpu->extmem[fw_type].pa,
+ vpu->extmem[fw_type].va);
+
+ return 0;
+}
+
+static void vpu_ipi_handler(struct mtk_vpu *vpu)
+{
+ struct share_obj *rcv_obj = vpu->recv_buf;
+ struct vpu_ipi_desc *ipi_desc = vpu->ipi_desc;
+
+ if (rcv_obj->id < IPI_MAX && ipi_desc[rcv_obj->id].handler) {
+ ipi_desc[rcv_obj->id].handler(rcv_obj->share_buf,
+ rcv_obj->len,
+ ipi_desc[rcv_obj->id].priv);
+ if (rcv_obj->id > IPI_VPU_INIT) {
+ vpu->ipi_id_ack[rcv_obj->id] = true;
+ wake_up(&vpu->ack_wq);
+ }
+ } else {
+ dev_err(vpu->dev, "No such ipi id = %d\n", rcv_obj->id);
+ }
+}
+
+static int vpu_ipi_init(struct mtk_vpu *vpu)
+{
+ /* Disable VPU to host interrupt */
+ vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
+
+ /* shared buffer initialization */
+ vpu->recv_buf = (__force struct share_obj *)(vpu->reg.tcm +
+ VPU_DTCM_OFFSET);
+ vpu->send_buf = vpu->recv_buf + 1;
+ memset(vpu->recv_buf, 0, sizeof(struct share_obj));
+ memset(vpu->send_buf, 0, sizeof(struct share_obj));
+
+ return 0;
+}
+
+static irqreturn_t vpu_irq_handler(int irq, void *priv)
+{
+ struct mtk_vpu *vpu = priv;
+ u32 vpu_to_host;
+ int ret;
+
+ /*
+ * Clock should have been enabled already.
+ * Enable again in case vpu_ipi_send times out
+ * and has disabled the clock.
+ */
+ ret = clk_enable(vpu->clk);
+ if (ret) {
+ dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
+ return IRQ_NONE;
+ }
+ vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
+ if (vpu_to_host & VPU_IPC_INT) {
+ vpu_ipi_handler(vpu);
+ } else {
+ dev_err(vpu->dev, "vpu watchdog timeout! 0x%x", vpu_to_host);
+ queue_work(vpu->wdt.wq, &vpu->wdt.ws);
+ }
+
+ /* VPU won't send another interrupt until we set VPU_TO_HOST to 0. */
+ vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
+ clk_disable(vpu->clk);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *vpu_debugfs;
+#endif
+static int mtk_vpu_probe(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu;
+ struct device *dev;
+ struct resource *res;
+ int ret = 0;
+
+ dev_dbg(&pdev->dev, "initialization\n");
+
+ dev = &pdev->dev;
+ vpu = devm_kzalloc(dev, sizeof(*vpu), GFP_KERNEL);
+ if (!vpu)
+ return -ENOMEM;
+
+ vpu->dev = &pdev->dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcm");
+ vpu->reg.tcm = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)vpu->reg.tcm))
+ return PTR_ERR((__force void *)vpu->reg.tcm);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_reg");
+ vpu->reg.cfg = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)vpu->reg.cfg))
+ return PTR_ERR((__force void *)vpu->reg.cfg);
+
+ /* Get VPU clock */
+ vpu->clk = devm_clk_get(dev, "main");
+ if (IS_ERR(vpu->clk)) {
+ dev_err(dev, "get vpu clock failed\n");
+ return PTR_ERR(vpu->clk);
+ }
+
+ platform_set_drvdata(pdev, vpu);
+
+ ret = clk_prepare(vpu->clk);
+ if (ret) {
+ dev_err(dev, "prepare vpu clock failed\n");
+ return ret;
+ }
+
+ /* VPU watchdog */
+ vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt");
+ if (!vpu->wdt.wq) {
+ dev_err(dev, "initialize wdt workqueue failed\n");
+ ret = -ENOMEM;
+ goto clk_unprepare;
+ }
+ INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func);
+ mutex_init(&vpu->vpu_mutex);
+
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(dev, "enable vpu clock failed\n");
+ goto workqueue_destroy;
+ }
+
+ dev_dbg(dev, "vpu ipi init\n");
+ ret = vpu_ipi_init(vpu);
+ if (ret) {
+ dev_err(dev, "Failed to init ipi\n");
+ goto disable_vpu_clk;
+ }
+
+ /* register vpu initialization IPI */
+ ret = vpu_ipi_register(pdev, IPI_VPU_INIT, vpu_init_ipi_handler,
+ "vpu_init", vpu);
+ if (ret) {
+ dev_err(dev, "Failed to register IPI_VPU_INIT\n");
+ goto vpu_mutex_destroy;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ vpu_debugfs = debugfs_create_file("mtk_vpu", S_IRUGO, NULL, (void *)dev,
+ &vpu_debug_fops);
+ if (!vpu_debugfs) {
+ ret = -ENOMEM;
+ goto cleanup_ipi;
+ }
+#endif
+
+ /* Set PTCM to 96K and DTCM to 32K */
+ vpu_cfg_writel(vpu, 0x2, VPU_TCM_CFG);
+
+ vpu->enable_4GB = !!(totalram_pages > (SZ_2G >> PAGE_SHIFT));
+ dev_info(dev, "4GB mode %u\n", vpu->enable_4GB);
+
+ if (vpu->enable_4GB) {
+ ret = of_reserved_mem_device_init(dev);
+ if (ret)
+ dev_info(dev, "init reserved memory failed\n");
+ /* continue to use dynamic allocation if failed */
+ }
+
+ ret = vpu_alloc_ext_mem(vpu, D_FW);
+ if (ret) {
+ dev_err(dev, "Allocate DM failed\n");
+ goto remove_debugfs;
+ }
+
+ ret = vpu_alloc_ext_mem(vpu, P_FW);
+ if (ret) {
+ dev_err(dev, "Allocate PM failed\n");
+ goto free_d_mem;
+ }
+
+ init_waitqueue_head(&vpu->run.wq);
+ init_waitqueue_head(&vpu->ack_wq);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "get IRQ resource failed.\n");
+ ret = -ENXIO;
+ goto free_p_mem;
+ }
+ vpu->reg.irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, vpu->reg.irq, vpu_irq_handler, 0,
+ pdev->name, vpu);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto free_p_mem;
+ }
+
+ vpu_clock_disable(vpu);
+ dev_dbg(dev, "initialization completed\n");
+
+ return 0;
+
+free_p_mem:
+ vpu_free_ext_mem(vpu, P_FW);
+free_d_mem:
+ vpu_free_ext_mem(vpu, D_FW);
+remove_debugfs:
+ of_reserved_mem_device_release(dev);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(vpu_debugfs);
+cleanup_ipi:
+#endif
+ memset(vpu->ipi_desc, 0, sizeof(struct vpu_ipi_desc) * IPI_MAX);
+vpu_mutex_destroy:
+ mutex_destroy(&vpu->vpu_mutex);
+disable_vpu_clk:
+ vpu_clock_disable(vpu);
+workqueue_destroy:
+ destroy_workqueue(vpu->wdt.wq);
+clk_unprepare:
+ clk_unprepare(vpu->clk);
+
+ return ret;
+}
+
+static const struct of_device_id mtk_vpu_match[] = {
+ {
+ .compatible = "mediatek,mt8173-vpu",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_vpu_match);
+
+static int mtk_vpu_remove(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(vpu_debugfs);
+#endif
+ if (vpu->wdt.wq) {
+ flush_workqueue(vpu->wdt.wq);
+ destroy_workqueue(vpu->wdt.wq);
+ }
+ vpu_free_ext_mem(vpu, P_FW);
+ vpu_free_ext_mem(vpu, D_FW);
+ mutex_destroy(&vpu->vpu_mutex);
+ clk_unprepare(vpu->clk);
+
+ return 0;
+}
+
+static struct platform_driver mtk_vpu_driver = {
+ .probe = mtk_vpu_probe,
+ .remove = mtk_vpu_remove,
+ .driver = {
+ .name = "mtk_vpu",
+ .of_match_table = mtk_vpu_match,
+ },
+};
+
+module_platform_driver(mtk_vpu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek Video Processor Unit driver");
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mtk-vpu/mtk_vpu.h
new file mode 100644
index 000000000..aec0268be
--- /dev/null
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.h
@@ -0,0 +1,194 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VPU_H
+#define _MTK_VPU_H
+
+#include <linux/platform_device.h>
+
+/**
+ * VPU (video processor unit) is a tiny processor controlling video hardware
+ * related to video codec, scaling and color format converting.
+ * VPU interfaces with other blocks by share memory and interrupt.
+ **/
+
+typedef void (*ipi_handler_t) (void *data,
+ unsigned int len,
+ void *priv);
+
+/**
+ * enum ipi_id - the id of inter-processor interrupt
+ *
+ * @IPI_VPU_INIT: The interrupt from vpu is to notfiy kernel
+ * VPU initialization completed.
+ * IPI_VPU_INIT is sent from VPU when firmware is
+ * loaded. AP doesn't need to send IPI_VPU_INIT
+ * command to VPU.
+ * For other IPI below, AP should send the request
+ * to VPU to trigger the interrupt.
+ * @IPI_VDEC_H264: The interrupt from vpu is to notify kernel to
+ * handle H264 vidoe decoder job, and vice versa.
+ * Decode output format is always MT21 no matter what
+ * the input format is.
+ * @IPI_VDEC_VP8: The interrupt from is to notify kernel to
+ * handle VP8 video decoder job, and vice versa.
+ * Decode output format is always MT21 no matter what
+ * the input format is.
+ * @IPI_VDEC_VP9: The interrupt from vpu is to notify kernel to
+ * handle VP9 video decoder job, and vice versa.
+ * Decode output format is always MT21 no matter what
+ * the input format is.
+ * @IPI_VENC_H264: The interrupt from vpu is to notify kernel to
+ * handle H264 video encoder job, and vice versa.
+ * @IPI_VENC_VP8: The interrupt fro vpu is to notify kernel to
+ * handle VP8 video encoder job,, and vice versa.
+ * @IPI_MDP: The interrupt from vpu is to notify kernel to
+ * handle MDP (Media Data Path) job, and vice versa.
+ * @IPI_MAX: The maximum IPI number
+ */
+
+enum ipi_id {
+ IPI_VPU_INIT = 0,
+ IPI_VDEC_H264,
+ IPI_VDEC_VP8,
+ IPI_VDEC_VP9,
+ IPI_VENC_H264,
+ IPI_VENC_VP8,
+ IPI_MDP,
+ IPI_MAX,
+};
+
+/**
+ * enum rst_id - reset id to register reset function for VPU watchdog timeout
+ *
+ * @VPU_RST_ENC: encoder reset id
+ * @VPU_RST_DEC: decoder reset id
+ * @VPU_RST_MDP: MDP (Media Data Path) reset id
+ * @VPU_RST_MAX: maximum reset id
+ */
+enum rst_id {
+ VPU_RST_ENC,
+ VPU_RST_DEC,
+ VPU_RST_MDP,
+ VPU_RST_MAX,
+};
+
+/**
+ * vpu_ipi_register - register an ipi function
+ *
+ * @pdev: VPU platform device
+ * @id: IPI ID
+ * @handler: IPI handler
+ * @name: IPI name
+ * @priv: private data for IPI handler
+ *
+ * Register an ipi function to receive ipi interrupt from VPU.
+ *
+ * Return: Return 0 if ipi registers successfully, otherwise it is failed.
+ */
+int vpu_ipi_register(struct platform_device *pdev, enum ipi_id id,
+ ipi_handler_t handler, const char *name, void *priv);
+
+/**
+ * vpu_ipi_send - send data from AP to vpu.
+ *
+ * @pdev: VPU platform device
+ * @id: IPI ID
+ * @buf: the data buffer
+ * @len: the data buffer length
+ *
+ * This function is thread-safe. When this function returns,
+ * VPU has received the data and starts the processing.
+ * When the processing completes, IPI handler registered
+ * by vpu_ipi_register will be called in interrupt context.
+ *
+ * Return: Return 0 if sending data successfully, otherwise it is failed.
+ **/
+int vpu_ipi_send(struct platform_device *pdev,
+ enum ipi_id id, void *buf,
+ unsigned int len);
+
+/**
+ * vpu_get_plat_device - get VPU's platform device
+ *
+ * @pdev: the platform device of the module requesting VPU platform
+ * device for using VPU API.
+ *
+ * Return: Return NULL if it is failed.
+ * otherwise it is VPU's platform device
+ **/
+struct platform_device *vpu_get_plat_device(struct platform_device *pdev);
+
+/**
+ * vpu_wdt_reg_handler - register a VPU watchdog handler
+ *
+ * @pdev: VPU platform device
+ * @vpu_wdt_reset_func: the callback reset function
+ * @private_data: the private data for reset function
+ * @rst_id: reset id
+ *
+ * Register a handler performing own tasks when vpu reset by watchdog
+ *
+ * Return: Return 0 if the handler is added successfully,
+ * otherwise it is failed.
+ *
+ **/
+int vpu_wdt_reg_handler(struct platform_device *pdev,
+ void vpu_wdt_reset_func(void *),
+ void *priv, enum rst_id id);
+
+/**
+ * vpu_get_vdec_hw_capa - get video decoder hardware capability
+ *
+ * @pdev: VPU platform device
+ *
+ * Return: video decoder hardware capability
+ **/
+unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev);
+
+/**
+ * vpu_get_venc_hw_capa - get video encoder hardware capability
+ *
+ * @pdev: VPU platform device
+ *
+ * Return: video encoder hardware capability
+ **/
+unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev);
+
+/**
+ * vpu_load_firmware - download VPU firmware and boot it
+ *
+ * @pdev: VPU platform device
+ *
+ * Return: Return 0 if downloading firmware successfully,
+ * otherwise it is failed
+ **/
+int vpu_load_firmware(struct platform_device *pdev);
+
+/**
+ * vpu_mapping_dm_addr - Mapping DTCM/DMEM to kernel virtual address
+ *
+ * @pdev: VPU platform device
+ * @dmem_addr: VPU's data memory address
+ *
+ * Mapping the VPU's DTCM (Data Tightly-Coupled Memory) /
+ * DMEM (Data Extended Memory) memory address to
+ * kernel virtual address.
+ *
+ * Return: Return ERR_PTR(-EINVAL) if mapping failed,
+ * otherwise the mapped kernel virtual address
+ **/
+void *vpu_mapping_dm_addr(struct platform_device *pdev,
+ u32 dtcm_dmem_addr);
+#endif /* _MTK_VPU_H */
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
new file mode 100644
index 000000000..f4be4c672
--- /dev/null
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -0,0 +1,991 @@
+/*
+ * Support eMMa-PrP through mem2mem framework.
+ *
+ * eMMa-PrP is a piece of HW that allows fetching buffers
+ * from one memory location and do several operations on
+ * them such as scaling or format conversion giving, as a result
+ * a new processed buffer in another memory location.
+ *
+ * Based on mem2mem_testdev.c by Pawel Osciak.
+ *
+ * Copyright (c) 2011 Vista Silicon S.L.
+ * Javier Martin <javier.martin@vista-silicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+#include <linux/sizes.h>
+
+#define EMMAPRP_MODULE_NAME "mem2mem-emmaprp"
+
+MODULE_DESCRIPTION("Mem-to-mem device which supports eMMa-PrP present in mx2 SoCs");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.1");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 2040
+#define MAX_H 2046
+
+#define S_ALIGN 1 /* multiple of 2 */
+#define W_ALIGN_YUV420 3 /* multiple of 8 */
+#define W_ALIGN_OTHERS 2 /* multiple of 4 */
+#define H_ALIGN 1 /* multiple of 2 */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+#define MEM2MEM_NAME "m2m-emmaprp"
+
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT SZ_16M
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+/* EMMA PrP */
+#define PRP_CNTL 0x00
+#define PRP_INTR_CNTL 0x04
+#define PRP_INTRSTATUS 0x08
+#define PRP_SOURCE_Y_PTR 0x0c
+#define PRP_SOURCE_CB_PTR 0x10
+#define PRP_SOURCE_CR_PTR 0x14
+#define PRP_DEST_RGB1_PTR 0x18
+#define PRP_DEST_RGB2_PTR 0x1c
+#define PRP_DEST_Y_PTR 0x20
+#define PRP_DEST_CB_PTR 0x24
+#define PRP_DEST_CR_PTR 0x28
+#define PRP_SRC_FRAME_SIZE 0x2c
+#define PRP_DEST_CH1_LINE_STRIDE 0x30
+#define PRP_SRC_PIXEL_FORMAT_CNTL 0x34
+#define PRP_CH1_PIXEL_FORMAT_CNTL 0x38
+#define PRP_CH1_OUT_IMAGE_SIZE 0x3c
+#define PRP_CH2_OUT_IMAGE_SIZE 0x40
+#define PRP_SRC_LINE_STRIDE 0x44
+#define PRP_CSC_COEF_012 0x48
+#define PRP_CSC_COEF_345 0x4c
+#define PRP_CSC_COEF_678 0x50
+#define PRP_CH1_RZ_HORI_COEF1 0x54
+#define PRP_CH1_RZ_HORI_COEF2 0x58
+#define PRP_CH1_RZ_HORI_VALID 0x5c
+#define PRP_CH1_RZ_VERT_COEF1 0x60
+#define PRP_CH1_RZ_VERT_COEF2 0x64
+#define PRP_CH1_RZ_VERT_VALID 0x68
+#define PRP_CH2_RZ_HORI_COEF1 0x6c
+#define PRP_CH2_RZ_HORI_COEF2 0x70
+#define PRP_CH2_RZ_HORI_VALID 0x74
+#define PRP_CH2_RZ_VERT_COEF1 0x78
+#define PRP_CH2_RZ_VERT_COEF2 0x7c
+#define PRP_CH2_RZ_VERT_VALID 0x80
+
+#define PRP_CNTL_CH1EN (1 << 0)
+#define PRP_CNTL_CH2EN (1 << 1)
+#define PRP_CNTL_CSIEN (1 << 2)
+#define PRP_CNTL_DATA_IN_YUV420 (0 << 3)
+#define PRP_CNTL_DATA_IN_YUV422 (1 << 3)
+#define PRP_CNTL_DATA_IN_RGB16 (2 << 3)
+#define PRP_CNTL_DATA_IN_RGB32 (3 << 3)
+#define PRP_CNTL_CH1_OUT_RGB8 (0 << 5)
+#define PRP_CNTL_CH1_OUT_RGB16 (1 << 5)
+#define PRP_CNTL_CH1_OUT_RGB32 (2 << 5)
+#define PRP_CNTL_CH1_OUT_YUV422 (3 << 5)
+#define PRP_CNTL_CH2_OUT_YUV420 (0 << 7)
+#define PRP_CNTL_CH2_OUT_YUV422 (1 << 7)
+#define PRP_CNTL_CH2_OUT_YUV444 (2 << 7)
+#define PRP_CNTL_CH1_LEN (1 << 9)
+#define PRP_CNTL_CH2_LEN (1 << 10)
+#define PRP_CNTL_SKIP_FRAME (1 << 11)
+#define PRP_CNTL_SWRST (1 << 12)
+#define PRP_CNTL_CLKEN (1 << 13)
+#define PRP_CNTL_WEN (1 << 14)
+#define PRP_CNTL_CH1BYP (1 << 15)
+#define PRP_CNTL_IN_TSKIP(x) ((x) << 16)
+#define PRP_CNTL_CH1_TSKIP(x) ((x) << 19)
+#define PRP_CNTL_CH2_TSKIP(x) ((x) << 22)
+#define PRP_CNTL_INPUT_FIFO_LEVEL(x) ((x) << 25)
+#define PRP_CNTL_RZ_FIFO_LEVEL(x) ((x) << 27)
+#define PRP_CNTL_CH2B1EN (1 << 29)
+#define PRP_CNTL_CH2B2EN (1 << 30)
+#define PRP_CNTL_CH2FEN (1 << 31)
+
+#define PRP_SIZE_HEIGHT(x) (x)
+#define PRP_SIZE_WIDTH(x) ((x) << 16)
+
+/* IRQ Enable and status register */
+#define PRP_INTR_RDERR (1 << 0)
+#define PRP_INTR_CH1WERR (1 << 1)
+#define PRP_INTR_CH2WERR (1 << 2)
+#define PRP_INTR_CH1FC (1 << 3)
+#define PRP_INTR_CH2FC (1 << 5)
+#define PRP_INTR_LBOVF (1 << 7)
+#define PRP_INTR_CH2OVF (1 << 8)
+
+#define PRP_INTR_ST_RDERR (1 << 0)
+#define PRP_INTR_ST_CH1WERR (1 << 1)
+#define PRP_INTR_ST_CH2WERR (1 << 2)
+#define PRP_INTR_ST_CH2B2CI (1 << 3)
+#define PRP_INTR_ST_CH2B1CI (1 << 4)
+#define PRP_INTR_ST_CH1B2CI (1 << 5)
+#define PRP_INTR_ST_CH1B1CI (1 << 6)
+#define PRP_INTR_ST_LBOVF (1 << 7)
+#define PRP_INTR_ST_CH2OVF (1 << 8)
+
+struct emmaprp_fmt {
+ char *name;
+ u32 fourcc;
+ /* Types the format can be used for */
+ u32 types;
+};
+
+static struct emmaprp_fmt formats[] = {
+ {
+ .name = "YUV 4:2:0 Planar",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .types = MEM2MEM_CAPTURE,
+ },
+ {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .types = MEM2MEM_OUTPUT,
+ },
+};
+
+/* Per-queue, driver-specific private data */
+struct emmaprp_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ struct emmaprp_fmt *fmt;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct emmaprp_fmt *find_format(struct v4l2_format *f)
+{
+ struct emmaprp_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &formats[k];
+}
+
+struct emmaprp_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd;
+
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+
+ void __iomem *base_emma;
+ struct clk *clk_emma_ahb, *clk_emma_ipg;
+
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct emmaprp_ctx {
+ struct emmaprp_dev *dev;
+ /* Abort requested by m2m */
+ int aborting;
+ struct emmaprp_q_data q_data[2];
+ struct v4l2_m2m_ctx *m2m_ctx;
+};
+
+static struct emmaprp_q_data *get_q_data(struct emmaprp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &(ctx->q_data[V4L2_M2M_SRC]);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &(ctx->q_data[V4L2_M2M_DST]);
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/*
+ * mem2mem callbacks
+ */
+static void emmaprp_job_abort(void *priv)
+{
+ struct emmaprp_ctx *ctx = priv;
+ struct emmaprp_dev *pcdev = ctx->dev;
+
+ ctx->aborting = 1;
+
+ dprintk(pcdev, "Aborting task\n");
+
+ v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
+}
+
+static inline void emmaprp_dump_regs(struct emmaprp_dev *pcdev)
+{
+ dprintk(pcdev,
+ "eMMa-PrP Registers:\n"
+ " SOURCE_Y_PTR = 0x%08X\n"
+ " SRC_FRAME_SIZE = 0x%08X\n"
+ " DEST_Y_PTR = 0x%08X\n"
+ " DEST_CR_PTR = 0x%08X\n"
+ " DEST_CB_PTR = 0x%08X\n"
+ " CH2_OUT_IMAGE_SIZE = 0x%08X\n"
+ " CNTL = 0x%08X\n",
+ readl(pcdev->base_emma + PRP_SOURCE_Y_PTR),
+ readl(pcdev->base_emma + PRP_SRC_FRAME_SIZE),
+ readl(pcdev->base_emma + PRP_DEST_Y_PTR),
+ readl(pcdev->base_emma + PRP_DEST_CR_PTR),
+ readl(pcdev->base_emma + PRP_DEST_CB_PTR),
+ readl(pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE),
+ readl(pcdev->base_emma + PRP_CNTL));
+}
+
+static void emmaprp_device_run(void *priv)
+{
+ struct emmaprp_ctx *ctx = priv;
+ struct emmaprp_q_data *s_q_data, *d_q_data;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct emmaprp_dev *pcdev = ctx->dev;
+ unsigned int s_width, s_height;
+ unsigned int d_width, d_height;
+ unsigned int d_size;
+ dma_addr_t p_in, p_out;
+ u32 tmp;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ s_width = s_q_data->width;
+ s_height = s_q_data->height;
+
+ d_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ d_width = d_q_data->width;
+ d_height = d_q_data->height;
+ d_size = d_width * d_height;
+
+ p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&pcdev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return;
+ }
+
+ /* Input frame parameters */
+ writel(p_in, pcdev->base_emma + PRP_SOURCE_Y_PTR);
+ writel(PRP_SIZE_WIDTH(s_width) | PRP_SIZE_HEIGHT(s_height),
+ pcdev->base_emma + PRP_SRC_FRAME_SIZE);
+
+ /* Output frame parameters */
+ writel(p_out, pcdev->base_emma + PRP_DEST_Y_PTR);
+ writel(p_out + d_size, pcdev->base_emma + PRP_DEST_CB_PTR);
+ writel(p_out + d_size + (d_size >> 2),
+ pcdev->base_emma + PRP_DEST_CR_PTR);
+ writel(PRP_SIZE_WIDTH(d_width) | PRP_SIZE_HEIGHT(d_height),
+ pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE);
+
+ /* IRQ configuration */
+ tmp = readl(pcdev->base_emma + PRP_INTR_CNTL);
+ writel(tmp | PRP_INTR_RDERR |
+ PRP_INTR_CH2WERR |
+ PRP_INTR_CH2FC,
+ pcdev->base_emma + PRP_INTR_CNTL);
+
+ emmaprp_dump_regs(pcdev);
+
+ /* Enable transfer */
+ tmp = readl(pcdev->base_emma + PRP_CNTL);
+ writel(tmp | PRP_CNTL_CH2_OUT_YUV420 |
+ PRP_CNTL_DATA_IN_YUV422 |
+ PRP_CNTL_CH2EN,
+ pcdev->base_emma + PRP_CNTL);
+}
+
+static irqreturn_t emmaprp_irq(int irq_emma, void *data)
+{
+ struct emmaprp_dev *pcdev = data;
+ struct emmaprp_ctx *curr_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ unsigned long flags;
+ u32 irqst;
+
+ /* Check irq flags and clear irq */
+ irqst = readl(pcdev->base_emma + PRP_INTRSTATUS);
+ writel(irqst, pcdev->base_emma + PRP_INTRSTATUS);
+ dprintk(pcdev, "irqst = 0x%08x\n", irqst);
+
+ curr_ctx = v4l2_m2m_get_curr_priv(pcdev->m2m_dev);
+ if (curr_ctx == NULL) {
+ pr_err("Instance released before the end of transaction\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!curr_ctx->aborting) {
+ if ((irqst & PRP_INTR_ST_RDERR) ||
+ (irqst & PRP_INTR_ST_CH2WERR)) {
+ pr_err("PrP bus error occurred, this transfer is probably corrupted\n");
+ writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL);
+ } else if (irqst & PRP_INTR_ST_CH2B1CI) { /* buffer ready */
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->flags &=
+ ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags
+ & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->timecode = src_vb->timecode;
+
+ spin_lock_irqsave(&pcdev->irqlock, flags);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&pcdev->irqlock, flags);
+ }
+ }
+
+ v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->m2m_ctx);
+ return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num;
+ struct emmaprp_fmt *fmt;
+
+ num = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ if (i < NUM_FORMATS) {
+ /* Format found */
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct emmaprp_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = q_data->fmt->fourcc;
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
+ f->fmt.pix.bytesperline = q_data->width * 3 / 2;
+ else /* YUYV */
+ f->fmt.pix.bytesperline = q_data->width * 2;
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f)
+{
+ enum v4l2_field field;
+
+
+ if (!find_format(f))
+ return -EINVAL;
+
+ field = f->fmt.pix.field;
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != field)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ f->fmt.pix.field = field;
+
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) {
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W,
+ W_ALIGN_YUV420, &f->fmt.pix.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2;
+ } else {
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W,
+ W_ALIGN_OTHERS, &f->fmt.pix.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ }
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct emmaprp_fmt *fmt;
+ struct emmaprp_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct emmaprp_fmt *fmt;
+ struct emmaprp_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f);
+}
+
+static int vidioc_s_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
+{
+ struct emmaprp_q_data *q_data;
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = vidioc_try_fmt(f);
+ if (ret)
+ return ret;
+
+ q_data->fmt = find_format(f);
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ if (q_data->fmt->fourcc == V4L2_PIX_FMT_YUV420)
+ q_data->sizeimage = q_data->width * q_data->height * 3 / 2;
+ else /* YUYV */
+ q_data->sizeimage = q_data->width * q_data->height * 2;
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct emmaprp_ctx *ctx = priv;
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+
+/*
+ * Queue operations
+ */
+static int emmaprp_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct emmaprp_ctx *ctx = vb2_get_drv_priv(vq);
+ struct emmaprp_q_data *q_data;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ if (q_data->fmt->fourcc == V4L2_PIX_FMT_YUV420)
+ size = q_data->width * q_data->height * 3 / 2;
+ else
+ size = q_data->width * q_data->height * 2;
+
+ while (size * count > MEM2MEM_VID_MEM_LIMIT)
+ (count)--;
+
+ *nplanes = 1;
+ *nbuffers = count;
+ sizes[0] = size;
+
+ dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+ return 0;
+}
+
+static int emmaprp_buf_prepare(struct vb2_buffer *vb)
+{
+ struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct emmaprp_q_data *q_data;
+
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev,
+ "%s data will not fit into plane(%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+ return 0;
+}
+
+static void emmaprp_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
+}
+
+static const struct vb2_ops emmaprp_qops = {
+ .queue_setup = emmaprp_queue_setup,
+ .buf_prepare = emmaprp_buf_prepare,
+ .buf_queue = emmaprp_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct emmaprp_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &emmaprp_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->lock = &ctx->dev->dev_mutex;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &emmaprp_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int emmaprp_open(struct file *file)
+{
+ struct emmaprp_dev *pcdev = video_drvdata(file);
+ struct emmaprp_ctx *ctx;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->dev = pcdev;
+
+ if (mutex_lock_interruptible(&pcdev->dev_mutex)) {
+ kfree(ctx);
+ return -ERESTARTSYS;
+ }
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->m2m_ctx)) {
+ int ret = PTR_ERR(ctx->m2m_ctx);
+
+ mutex_unlock(&pcdev->dev_mutex);
+ kfree(ctx);
+ return ret;
+ }
+
+ clk_prepare_enable(pcdev->clk_emma_ipg);
+ clk_prepare_enable(pcdev->clk_emma_ahb);
+ ctx->q_data[V4L2_M2M_SRC].fmt = &formats[1];
+ ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+ mutex_unlock(&pcdev->dev_mutex);
+
+ dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+ return 0;
+}
+
+static int emmaprp_release(struct file *file)
+{
+ struct emmaprp_dev *pcdev = video_drvdata(file);
+ struct emmaprp_ctx *ctx = file->private_data;
+
+ dprintk(pcdev, "Releasing instance %p\n", ctx);
+
+ mutex_lock(&pcdev->dev_mutex);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ mutex_unlock(&pcdev->dev_mutex);
+ kfree(ctx);
+
+ return 0;
+}
+
+static __poll_t emmaprp_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct emmaprp_dev *pcdev = video_drvdata(file);
+ struct emmaprp_ctx *ctx = file->private_data;
+ __poll_t res;
+
+ mutex_lock(&pcdev->dev_mutex);
+ res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ mutex_unlock(&pcdev->dev_mutex);
+ return res;
+}
+
+static int emmaprp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct emmaprp_dev *pcdev = video_drvdata(file);
+ struct emmaprp_ctx *ctx = file->private_data;
+ int ret;
+
+ if (mutex_lock_interruptible(&pcdev->dev_mutex))
+ return -ERESTARTSYS;
+ ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+ mutex_unlock(&pcdev->dev_mutex);
+ return ret;
+}
+
+static const struct v4l2_file_operations emmaprp_fops = {
+ .owner = THIS_MODULE,
+ .open = emmaprp_open,
+ .release = emmaprp_release,
+ .poll = emmaprp_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = emmaprp_mmap,
+};
+
+static const struct video_device emmaprp_videodev = {
+ .name = MEM2MEM_NAME,
+ .fops = &emmaprp_fops,
+ .ioctl_ops = &emmaprp_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = emmaprp_device_run,
+ .job_abort = emmaprp_job_abort,
+};
+
+static int emmaprp_probe(struct platform_device *pdev)
+{
+ struct emmaprp_dev *pcdev;
+ struct video_device *vfd;
+ struct resource *res;
+ int irq, ret;
+
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
+ if (!pcdev)
+ return -ENOMEM;
+
+ spin_lock_init(&pcdev->irqlock);
+
+ pcdev->clk_emma_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(pcdev->clk_emma_ipg)) {
+ return PTR_ERR(pcdev->clk_emma_ipg);
+ }
+
+ pcdev->clk_emma_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(pcdev->clk_emma_ahb))
+ return PTR_ERR(pcdev->clk_emma_ahb);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pcdev->base_emma = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pcdev->base_emma))
+ return PTR_ERR(pcdev->base_emma);
+
+ ret = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&pcdev->dev_mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_dev;
+ }
+
+ *vfd = emmaprp_videodev;
+ vfd->lock = &pcdev->dev_mutex;
+ vfd->v4l2_dev = &pcdev->v4l2_dev;
+
+ video_set_drvdata(vfd, pcdev);
+ pcdev->vfd = vfd;
+ v4l2_info(&pcdev->v4l2_dev, EMMAPRP_MODULE_NAME
+ " Device registered as /dev/video%d\n", vfd->num);
+
+ platform_set_drvdata(pdev, pcdev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto rel_vdev;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
+ dev_name(&pdev->dev), pcdev);
+ if (ret)
+ goto rel_vdev;
+
+ pcdev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(pcdev->m2m_dev)) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(pcdev->m2m_dev);
+ goto rel_vdev;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n");
+ goto rel_m2m;
+ }
+
+ return 0;
+
+
+rel_m2m:
+ v4l2_m2m_release(pcdev->m2m_dev);
+rel_vdev:
+ video_device_release(vfd);
+unreg_dev:
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+
+ mutex_destroy(&pcdev->dev_mutex);
+
+ return ret;
+}
+
+static int emmaprp_remove(struct platform_device *pdev)
+{
+ struct emmaprp_dev *pcdev = platform_get_drvdata(pdev);
+
+ v4l2_info(&pcdev->v4l2_dev, "Removing " EMMAPRP_MODULE_NAME);
+
+ video_unregister_device(pcdev->vfd);
+ v4l2_m2m_release(pcdev->m2m_dev);
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+ mutex_destroy(&pcdev->dev_mutex);
+
+ return 0;
+}
+
+static struct platform_driver emmaprp_pdrv = {
+ .probe = emmaprp_probe,
+ .remove = emmaprp_remove,
+ .driver = {
+ .name = MEM2MEM_NAME,
+ },
+};
+module_platform_driver(emmaprp_pdrv);
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
new file mode 100644
index 000000000..4b5e55d41
--- /dev/null
+++ b/drivers/media/platform/omap/Kconfig
@@ -0,0 +1,18 @@
+config VIDEO_OMAP2_VOUT_VRFB
+ bool
+ default y
+ depends on VIDEO_OMAP2_VOUT && (OMAP2_VRFB || COMPILE_TEST)
+
+config VIDEO_OMAP2_VOUT
+ tristate "OMAP2/OMAP3 V4L2-Display driver"
+ depends on MMU
+ depends on FB_OMAP2 || (COMPILE_TEST && FB_OMAP2=n)
+ depends on ARCH_OMAP2 || ARCH_OMAP3 || COMPILE_TEST
+ depends on VIDEO_V4L2
+ select VIDEOBUF_GEN
+ select VIDEOBUF_DMA_CONTIG
+ select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
+ select FRAME_VECTOR
+ default n
+ ---help---
+ V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/omap/Makefile b/drivers/media/platform/omap/Makefile
new file mode 100644
index 000000000..d80df41fd
--- /dev/null
+++ b/drivers/media/platform/omap/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the omap video device drivers.
+#
+
+# OMAP2/3 Display driver
+omap-vout-y += omap_vout.o omap_voutlib.o
+omap-vout-$(CONFIG_VIDEO_OMAP2_VOUT_VRFB) += omap_vout_vrfb.o
+obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout.o
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
new file mode 100644
index 000000000..45511d24d
--- /dev/null
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -0,0 +1,2223 @@
+/*
+ * omap_vout.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Leveraged code from the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * History:
+ * 20-APR-2006 Khasim Modified VRFB based Rotation,
+ * The image data is always read from 0 degree
+ * view and written
+ * to the virtual space of desired rotation angle
+ * 4-DEC-2006 Jian Changed to support better memory management
+ *
+ * 17-Nov-2008 Hardik Changed driver to use video_ioctl2
+ *
+ * 23-Feb-2010 Vaibhav H Modified to use new DSS2 interface
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/videodev2.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#include <video/omapvrfb.h>
+#include <video/omapfb_dss.h>
+
+#include "omap_voutlib.h"
+#include "omap_voutdef.h"
+#include "omap_vout_vrfb.h"
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
+MODULE_LICENSE("GPL");
+
+/* Driver Configuration macros */
+#define VOUT_NAME "omap_vout"
+
+enum omap_vout_channels {
+ OMAP_VIDEO1,
+ OMAP_VIDEO2,
+};
+
+static struct videobuf_queue_ops video_vbq_ops;
+/* Variables configurable through module params*/
+static u32 video1_numbuffers = 3;
+static u32 video2_numbuffers = 3;
+static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static bool vid1_static_vrfb_alloc;
+static bool vid2_static_vrfb_alloc;
+static bool debug;
+
+/* Module parameters */
+module_param(video1_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_numbuffers,
+ "Number of buffers to be allocated at init time for Video1 device.");
+
+module_param(video2_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_numbuffers,
+ "Number of buffers to be allocated at init time for Video2 device.");
+
+module_param(video1_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_bufsize,
+ "Size of the buffer to be allocated for video1 device");
+
+module_param(video2_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_bufsize,
+ "Size of the buffer to be allocated for video2 device");
+
+module_param(vid1_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid1_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video1 device");
+
+module_param(vid2_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid2_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video2 device");
+
+module_param(debug, bool, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+/* list of image formats supported by OMAP2 video pipelines */
+static const struct v4l2_fmtdesc omap_formats[] = {
+ {
+ /* Note: V4L2 defines RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
+ *
+ * We interpret RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
+ */
+ .description = "RGB565, le",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ /* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use
+ * this for RGB24 unpack mode, the last 8 bits are ignored
+ * */
+ .description = "RGB32, le",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ },
+ {
+ /* Note: V4L2 defines RGB24 as: RGB-8-8-8 we use
+ * this for RGB24 packed mode
+ *
+ */
+ .description = "RGB24, le",
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ },
+ {
+ .description = "YUYV (YUV 4:2:2), packed",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .description = "UYVY, packed",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+};
+
+#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
+
+/*
+ * Try format
+ */
+static int omap_vout_try_format(struct v4l2_pix_format *pix)
+{
+ int ifmt, bpp = 0;
+
+ pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT,
+ (u32)VID_MAX_HEIGHT);
+ pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH);
+
+ for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) {
+ if (pix->pixelformat == omap_formats[ifmt].pixelformat)
+ break;
+ }
+
+ if (ifmt == NUM_OUTPUT_FORMATS)
+ ifmt = 0;
+
+ pix->pixelformat = omap_formats[ifmt].pixelformat;
+ pix->field = V4L2_FIELD_ANY;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ default:
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ bpp = YUYV_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB565_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB24_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB32_BPP;
+ break;
+ }
+ pix->bytesperline = pix->width * bpp;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ return bpp;
+}
+
+/*
+ * omap_vout_get_userptr: Convert user space virtual address to physical
+ * address.
+ */
+static int omap_vout_get_userptr(struct videobuf_buffer *vb, long virtp,
+ u32 *physp)
+{
+ struct frame_vector *vec;
+ int ret;
+
+ /* For kernel direct-mapped memory, take the easy way */
+ if (virtp >= PAGE_OFFSET) {
+ *physp = virt_to_phys((void *)virtp);
+ return 0;
+ }
+
+ vec = frame_vector_create(1);
+ if (!vec)
+ return -ENOMEM;
+
+ ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
+ if (ret != 1) {
+ frame_vector_destroy(vec);
+ return -EINVAL;
+ }
+ *physp = __pfn_to_phys(frame_vector_pfns(vec)[0]);
+ vb->priv = vec;
+
+ return 0;
+}
+
+/*
+ * Free the V4L2 buffers
+ */
+void omap_vout_free_buffers(struct omap_vout_device *vout)
+{
+ int i, numbuffers;
+
+ /* Allocate memory for the buffers */
+ numbuffers = (vout->vid) ? video2_numbuffers : video1_numbuffers;
+ vout->buffer_size = (vout->vid) ? video2_bufsize : video1_bufsize;
+
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_phy_addr[i] = 0;
+ vout->buf_virt_addr[i] = 0;
+ }
+}
+
+/*
+ * Convert V4L2 rotation to DSS rotation
+ * V4L2 understand 0, 90, 180, 270.
+ * Convert to 0, 1, 2 and 3 respectively for DSS
+ */
+static int v4l2_rot_to_dss_rot(int v4l2_rotation,
+ enum dss_rotation *rotation, bool mirror)
+{
+ int ret = 0;
+
+ switch (v4l2_rotation) {
+ case 90:
+ *rotation = dss_rotation_90_degree;
+ break;
+ case 180:
+ *rotation = dss_rotation_180_degree;
+ break;
+ case 270:
+ *rotation = dss_rotation_270_degree;
+ break;
+ case 0:
+ *rotation = dss_rotation_0_degree;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int omap_vout_calculate_offset(struct omap_vout_device *vout)
+{
+ struct omapvideo_info *ovid;
+ struct v4l2_rect *crop = &vout->crop;
+ struct v4l2_pix_format *pix = &vout->pix;
+ int *cropped_offset = &vout->cropped_offset;
+ int ps = 2, line_length = 0;
+
+ ovid = &vout->vid_info;
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ omap_vout_calculate_vrfb_offset(vout);
+ } else {
+ vout->line_length = line_length = pix->width;
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat)
+ ps = 2;
+ else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat)
+ ps = 4;
+ else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat)
+ ps = 3;
+
+ vout->ps = ps;
+
+ *cropped_offset = (line_length * ps) *
+ crop->top + crop->left * ps;
+ }
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
+ __func__, vout->cropped_offset);
+
+ return 0;
+}
+
+/*
+ * Convert V4L2 pixel format to DSS pixel format
+ */
+static int video_mode_to_dss_mode(struct omap_vout_device *vout)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct v4l2_pix_format *pix = &vout->pix;
+ enum omap_color_mode mode;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ mode = OMAP_DSS_COLOR_YUV2;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mode = OMAP_DSS_COLOR_UYVY;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ mode = OMAP_DSS_COLOR_RGB16;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ mode = OMAP_DSS_COLOR_RGB24P;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ mode = (ovl->id == OMAP_DSS_VIDEO1) ?
+ OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ mode = OMAP_DSS_COLOR_RGBX32;
+ break;
+ default:
+ mode = -EINVAL;
+ break;
+ }
+ return mode;
+}
+
+/*
+ * Setup the overlay
+ */
+static int omapvid_setup_overlay(struct omap_vout_device *vout,
+ struct omap_overlay *ovl, int posx, int posy, int outw,
+ int outh, u32 addr)
+{
+ int ret = 0;
+ struct omap_overlay_info info;
+ int cropheight, cropwidth, pixwidth;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 &&
+ (outw != vout->pix.width || outh != vout->pix.height)) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ vout->dss_mode = video_mode_to_dss_mode(vout);
+ if (vout->dss_mode == -EINVAL) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ /* Setup the input plane parameters according to
+ * rotation value selected.
+ */
+ if (is_rotation_90_or_270(vout)) {
+ cropheight = vout->crop.width;
+ cropwidth = vout->crop.height;
+ pixwidth = vout->pix.height;
+ } else {
+ cropheight = vout->crop.height;
+ cropwidth = vout->crop.width;
+ pixwidth = vout->pix.width;
+ }
+
+ ovl->get_overlay_info(ovl, &info);
+ info.paddr = addr;
+ info.width = cropwidth;
+ info.height = cropheight;
+ info.color_mode = vout->dss_mode;
+ info.mirror = vout->mirror;
+ info.pos_x = posx;
+ info.pos_y = posy;
+ info.out_width = outw;
+ info.out_height = outh;
+ info.global_alpha = vout->win.global_alpha;
+ if (!is_rotation_enabled(vout)) {
+ info.rotation = 0;
+ info.rotation_type = OMAP_DSS_ROT_DMA;
+ info.screen_width = pixwidth;
+ } else {
+ info.rotation = vout->rotation;
+ info.rotation_type = OMAP_DSS_ROT_VRFB;
+ info.screen_width = 2048;
+ }
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "%s enable=%d addr=%pad width=%d\n height=%d color_mode=%d\n"
+ "rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
+ "out_height=%d rotation_type=%d screen_width=%d\n", __func__,
+ ovl->is_enabled(ovl), &info.paddr, info.width, info.height,
+ info.color_mode, info.rotation, info.mirror, info.pos_x,
+ info.pos_y, info.out_width, info.out_height, info.rotation_type,
+ info.screen_width);
+
+ ret = ovl->set_overlay_info(ovl, &info);
+ if (ret)
+ goto setup_ovl_err;
+
+ return 0;
+
+setup_ovl_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "setup_overlay failed\n");
+ return ret;
+}
+
+/*
+ * Initialize the overlay structure
+ */
+static int omapvid_init(struct omap_vout_device *vout, u32 addr)
+{
+ int ret = 0, i;
+ struct v4l2_window *win;
+ struct omap_overlay *ovl;
+ int posx, posy, outw, outh;
+ struct omap_video_timings *timing;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ win = &vout->win;
+ for (i = 0; i < ovid->num_overlays; i++) {
+ struct omap_dss_device *dssdev;
+
+ ovl = ovid->overlays[i];
+ dssdev = ovl->get_device(ovl);
+
+ if (!dssdev)
+ return -EINVAL;
+
+ timing = &dssdev->panel.timings;
+
+ outw = win->w.width;
+ outh = win->w.height;
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ /* Invert the height and width for 90
+ * and 270 degree rotation
+ */
+ swap(outw, outh);
+ posy = (timing->y_res - win->w.width) - win->w.left;
+ posx = win->w.top;
+ break;
+
+ case dss_rotation_180_degree:
+ posx = (timing->x_res - win->w.width) - win->w.left;
+ posy = (timing->y_res - win->w.height) - win->w.top;
+ break;
+
+ case dss_rotation_270_degree:
+ swap(outw, outh);
+ posy = win->w.left;
+ posx = (timing->x_res - win->w.height) - win->w.top;
+ break;
+
+ default:
+ posx = win->w.left;
+ posy = win->w.top;
+ break;
+ }
+
+ ret = omapvid_setup_overlay(vout, ovl, posx, posy,
+ outw, outh, addr);
+ if (ret)
+ goto omapvid_init_err;
+ }
+ return 0;
+
+omapvid_init_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "apply_changes failed\n");
+ return ret;
+}
+
+/*
+ * Apply the changes set the go bit of DSS
+ */
+static int omapvid_apply_changes(struct omap_vout_device *vout)
+{
+ int i;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ for (i = 0; i < ovid->num_overlays; i++) {
+ struct omap_dss_device *dssdev;
+
+ ovl = ovid->overlays[i];
+ dssdev = ovl->get_device(ovl);
+ if (!dssdev)
+ return -EINVAL;
+ ovl->manager->apply(ovl->manager);
+ }
+
+ return 0;
+}
+
+static int omapvid_handle_interlace_display(struct omap_vout_device *vout,
+ unsigned int irqstatus, struct timeval timevalue)
+{
+ u32 fid;
+
+ if (vout->first_int) {
+ vout->first_int = 0;
+ goto err;
+ }
+
+ if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
+ fid = 1;
+ else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
+ fid = 0;
+ else
+ goto err;
+
+ vout->field_id ^= 1;
+ if (fid != vout->field_id) {
+ if (fid == 0)
+ vout->field_id = fid;
+ } else if (0 == fid) {
+ if (vout->cur_frm == vout->next_frm)
+ goto err;
+
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ } else {
+ if (list_empty(&vout->dma_queue) ||
+ (vout->cur_frm != vout->next_frm))
+ goto err;
+ }
+
+ return vout->field_id;
+err:
+ return 0;
+}
+
+static void omap_vout_isr(void *arg, unsigned int irqstatus)
+{
+ int ret, fid, mgr_id;
+ u32 addr, irq;
+ struct omap_overlay *ovl;
+ struct timeval timevalue;
+ struct omapvideo_info *ovid;
+ struct omap_dss_device *cur_display;
+ struct omap_vout_device *vout = (struct omap_vout_device *)arg;
+
+ if (!vout->streaming)
+ return;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ mgr_id = ovl->manager->id;
+
+ /* get the display device attached to the overlay */
+ cur_display = ovl->get_device(ovl);
+
+ if (!cur_display)
+ return;
+
+ spin_lock(&vout->vbq_lock);
+ v4l2_get_timestamp(&timevalue);
+
+ switch (cur_display->type) {
+ case OMAP_DISPLAY_TYPE_DSI:
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DVI:
+ if (mgr_id == OMAP_DSS_CHANNEL_LCD)
+ irq = DISPC_IRQ_VSYNC;
+ else if (mgr_id == OMAP_DSS_CHANNEL_LCD2)
+ irq = DISPC_IRQ_VSYNC2;
+ else
+ goto vout_isr_err;
+
+ if (!(irqstatus & irq))
+ goto vout_isr_err;
+ break;
+ case OMAP_DISPLAY_TYPE_VENC:
+ fid = omapvid_handle_interlace_display(vout, irqstatus,
+ timevalue);
+ if (!fid)
+ goto vout_isr_err;
+ break;
+ case OMAP_DISPLAY_TYPE_HDMI:
+ if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN))
+ goto vout_isr_err;
+ break;
+ default:
+ goto vout_isr_err;
+ }
+
+ if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ }
+
+ vout->first_int = 0;
+ if (list_empty(&vout->dma_queue))
+ goto vout_isr_err;
+
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
+
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
+
+ addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
+ + vout->cropped_offset;
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret) {
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ goto vout_isr_err;
+ }
+
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME "failed to change mode\n");
+
+vout_isr_err:
+ spin_unlock(&vout->vbq_lock);
+}
+
+/* Video buffer call backs */
+
+/*
+ * Buffer setup function is called by videobuf layer when REQBUF ioctl is
+ * called. This is used to setup buffers and return size and count of
+ * buffers allocated. After the call to this buffer, videobuf layer will
+ * setup buffer queue depending on the size and count of buffers
+ */
+static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
+ unsigned int *size)
+{
+ int startindex = 0, i, j;
+ u32 phy_addr = 0, virt_addr = 0;
+ struct omap_vout_device *vout = q->priv_data;
+ struct omapvideo_info *ovid = &vout->vid_info;
+ int vid_max_buf_size;
+
+ if (!vout)
+ return -EINVAL;
+
+ vid_max_buf_size = vout->vid == OMAP_VIDEO1 ? video1_bufsize :
+ video2_bufsize;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
+ return -EINVAL;
+
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
+ *count = startindex;
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
+ return -ENOMEM;
+ }
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return 0;
+
+ /* Now allocated the V4L2 buffers */
+ *size = PAGE_ALIGN(vout->pix.width * vout->pix.height * vout->bpp);
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ /* Check the size of the buffer */
+ if (*size > vid_max_buf_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "buffer allocation mismatch [%u] [%u]\n",
+ *size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
+ for (i = startindex; i < *count; i++) {
+ vout->buffer_size = *size;
+
+ virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
+ &phy_addr);
+ if (!virt_addr) {
+ if (ovid->rotation_type == VOUT_ROT_NONE)
+ break;
+
+ if (!is_rotation_enabled(vout))
+ break;
+
+ /* Free the VRFB buffers if no space for V4L2 buffers */
+ for (j = i; j < *count; j++) {
+ omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ }
+ vout->buf_virt_addr[i] = virt_addr;
+ vout->buf_phy_addr[i] = phy_addr;
+ }
+ *count = vout->buffer_allocated = i;
+
+ return 0;
+}
+
+/*
+ * Free the V4L2 buffers additionally allocated than default
+ * number of buffers
+ */
+static void omap_vout_free_extra_buffers(struct omap_vout_device *vout)
+{
+ int num_buffers = 0, i;
+
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ if (vout->buf_virt_addr[i])
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ vout->buffer_allocated = num_buffers;
+}
+
+/*
+ * This function will be called when VIDIOC_QBUF ioctl is called.
+ * It prepare buffers before give out for the display. This function
+ * converts user space virtual address into physical address if userptr memory
+ * exchange mechanism is used. If rotation is enabled, it copies entire
+ * buffer into VRFB memory space before giving it to the DSS.
+ */
+static int omap_vout_buffer_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct omap_vout_device *vout = q->priv_data;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = vout->pix.width;
+ vb->height = vout->pix.height;
+ vb->size = vb->width * vb->height * vout->bpp;
+ vb->field = field;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ /* if user pointer memory mechanism is used, get the physical
+ * address of the buffer
+ */
+ if (V4L2_MEMORY_USERPTR == vb->memory) {
+ int ret;
+
+ if (0 == vb->baddr)
+ return -EINVAL;
+ /* Physical address */
+ ret = omap_vout_get_userptr(vb, vb->baddr,
+ (u32 *)&vout->queued_buf_addr[vb->i]);
+ if (ret < 0)
+ return ret;
+ } else {
+ unsigned long addr, dma_addr;
+ unsigned long size;
+
+ addr = (unsigned long) vout->buf_virt_addr[vb->i];
+ size = (unsigned long) vb->size;
+
+ dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "dma_map_single failed\n");
+
+ vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
+ }
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB)
+ return omap_vout_prepare_vrfb(vout, vb);
+ else
+ return 0;
+}
+
+/*
+ * Buffer queue function will be called from the videobuf layer when _QBUF
+ * ioctl is called. It is used to enqueue buffer, which is ready to be
+ * displayed.
+ */
+static void omap_vout_buffer_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ /* Driver is also maintainig a queue. So enqueue buffer in the driver
+ * queue */
+ list_add_tail(&vb->queue, &vout->dma_queue);
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * Buffer release function is called from videobuf layer to release buffer
+ * which are already allocated
+ */
+static void omap_vout_buffer_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ if (vb->memory == V4L2_MEMORY_USERPTR && vb->priv) {
+ struct frame_vector *vec = vb->priv;
+
+ put_vaddr_frames(vec);
+ frame_vector_destroy(vec);
+ }
+}
+
+/*
+ * File operations
+ */
+static __poll_t omap_vout_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+
+ return videobuf_poll_stream(file, q, wait);
+}
+
+static void omap_vout_vm_open(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count++;
+}
+
+static void omap_vout_vm_close(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count--;
+}
+
+static const struct vm_operations_struct omap_vout_vm_ops = {
+ .open = omap_vout_vm_open,
+ .close = omap_vout_vm_close,
+};
+
+static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int i;
+ void *pos;
+ unsigned long start = vma->vm_start;
+ unsigned long size = (vma->vm_end - vma->vm_start);
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ " %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__,
+ vma->vm_pgoff, vma->vm_start, vma->vm_end);
+
+ /* look for the buffer to map */
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
+ continue;
+ if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ break;
+ }
+
+ if (VIDEO_MAX_FRAME == i) {
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
+ return -EINVAL;
+ }
+ /* Check the size of the buffer */
+ if (size > vout->buffer_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "insufficient memory [%lu] [%u]\n",
+ size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
+ q->bufs[i]->baddr = vma->vm_start;
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &omap_vout_vm_ops;
+ vma->vm_private_data = (void *) vout;
+ pos = (void *)vout->buf_virt_addr[i];
+ vma->vm_pgoff = virt_to_phys((void *)pos) >> PAGE_SHIFT;
+ while (size > 0) {
+ unsigned long pfn;
+ pfn = virt_to_phys((void *) pos) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ vout->mmap_count++;
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+
+ return 0;
+}
+
+static int omap_vout_release(struct file *file)
+{
+ unsigned int ret, i;
+ struct videobuf_queue *q;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = file->private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+ ovid = &vout->vid_info;
+
+ if (!vout)
+ return 0;
+
+ q = &vout->vbq;
+ /* Disable all the overlay managers connected with this interface */
+ for (i = 0; i < ovid->num_overlays; i++) {
+ struct omap_overlay *ovl = ovid->overlays[i];
+ struct omap_dss_device *dssdev = ovl->get_device(ovl);
+
+ if (dssdev)
+ ovl->disable(ovl);
+ }
+ /* Turn off the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "Unable to apply changes\n");
+
+ /* Free all buffers */
+ omap_vout_free_extra_buffers(vout);
+
+ /* Free the VRFB buffers only if they are allocated
+ * during reqbufs. Don't free if init time allocated
+ */
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ if (!vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+ }
+ videobuf_mmap_free(q);
+
+ /* Even if apply changes fails we should continue
+ freeing allocated memory */
+ if (vout->streaming) {
+ u32 mask = 0;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2;
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+ vout->streaming = false;
+
+ videobuf_streamoff(q);
+ videobuf_queue_cancel(q);
+ }
+
+ if (vout->mmap_count != 0)
+ vout->mmap_count = 0;
+
+ vout->opened -= 1;
+ file->private_data = NULL;
+
+ if (vout->buffer_allocated)
+ videobuf_mmap_free(q);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return ret;
+}
+
+static int omap_vout_open(struct file *file)
+{
+ struct videobuf_queue *q;
+ struct omap_vout_device *vout = NULL;
+
+ vout = video_drvdata(file);
+
+ if (vout == NULL)
+ return -ENODEV;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
+ /* for now, we only support single open */
+ if (vout->opened)
+ return -EBUSY;
+
+ vout->opened += 1;
+
+ file->private_data = vout;
+ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ q = &vout->vbq;
+ video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+ video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+ video_vbq_ops.buf_release = omap_vout_buffer_release;
+ video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+ spin_lock_init(&vout->vbq_lock);
+
+ videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
+ &vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), vout, NULL);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return 0;
+}
+
+/*
+ * V4L2 ioctls
+ */
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct omap_vout_device *vout = fh;
+
+ strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
+ cap->bus_info[0] = '\0';
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+
+ f->fmt.pix = vout->pix;
+ return 0;
+
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+ struct omap_dss_device *dssdev;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ dssdev = ovl->get_device(ovl);
+
+ if (!dssdev)
+ return -EINVAL;
+
+ timing = &dssdev->panel.timings;
+
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+
+ omap_vout_try_format(&f->fmt.pix);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret, bpp;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+ struct omap_dss_device *dssdev;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ dssdev = ovl->get_device(ovl);
+
+ /* get the display device attached to the overlay */
+ if (!dssdev) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+ timing = &dssdev->panel.timings;
+
+ /* We dont support RGB24-packed mode if vrfb rotation
+ * is enabled*/
+ if ((is_rotation_enabled(vout)) &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+
+ /* get the framebuffer parameters */
+
+ if (is_rotation_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ /* change to samller size is OK */
+
+ bpp = omap_vout_try_format(&f->fmt.pix);
+ f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp;
+
+ /* try & set the new output format */
+ vout->bpp = bpp;
+ vout->pix = f->fmt.pix;
+ vout->vrfb_bpp = 1;
+
+ /* If YUYV then vrfb bpp is 2, for others its 1 */
+ if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat ||
+ V4L2_PIX_FMT_UYVY == vout->pix.pixelformat)
+ vout->vrfb_bpp = 2;
+
+ /* set default crop and win */
+ omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ ret = 0;
+
+s_fmt_vid_out_exit:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ ret = omap_vout_try_window(&vout->fbuf, win);
+
+ if (!ret) {
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
+ win->global_alpha = 255;
+ else
+ win->global_alpha = f->fmt.win.global_alpha;
+ }
+
+ return ret;
+}
+
+static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct v4l2_window *win = &f->fmt.win;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
+ if (!ret) {
+ /* Video1 plane does not support global alpha on OMAP3 */
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
+ vout->win.global_alpha = 255;
+ else
+ vout->win.global_alpha = f->fmt.win.global_alpha;
+
+ vout->win.chromakey = f->fmt.win.chromakey;
+ }
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ u32 key_value = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ win->w = vout->win.w;
+ win->field = vout->win.field;
+ win->global_alpha = vout->win.global_alpha;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ key_value = info.trans_key;
+ }
+ win->chromakey = key_value;
+ return 0;
+}
+
+static int vidioc_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ struct omap_vout_device *vout = fh;
+ struct v4l2_pix_format *pix = &vout->pix;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = vout->crop;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ omap_vout_default_crop(&vout->pix, &vout->fbuf, &sel->r);
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ /* Width and height are always even */
+ sel->r.width = pix->width & ~1;
+ sel->r.height = pix->height & ~1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ int ret = -EINVAL;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_video_timings *timing;
+ struct omap_dss_device *dssdev;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ dssdev = ovl->get_device(ovl);
+
+ if (!dssdev) {
+ ret = -EINVAL;
+ goto s_crop_err;
+ }
+
+ timing = &dssdev->panel.timings;
+
+ if (is_rotation_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
+ &vout->fbuf, &sel->r);
+
+s_crop_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int omap_vout_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct omap_vout_device *vout =
+ container_of(ctrl->handler, struct omap_vout_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE: {
+ struct omapvideo_info *ovid;
+ int rotation = ctrl->val;
+
+ ovid = &vout->vid_info;
+
+ mutex_lock(&vout->lock);
+ if (rotation && ovid->rotation_type == VOUT_ROT_NONE) {
+ mutex_unlock(&vout->lock);
+ ret = -ERANGE;
+ break;
+ }
+
+ if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (v4l2_rot_to_dss_rot(rotation, &vout->rotation,
+ vout->mirror)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay *ovl;
+ unsigned int color = ctrl->val;
+ struct omap_overlay_manager_info info;
+
+ ovl = vout->vid_info.overlays[0];
+
+ mutex_lock(&vout->lock);
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.default_color = color;
+ if (ovl->manager->set_manager_info(ovl->manager, &info)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ {
+ struct omapvideo_info *ovid;
+ unsigned int mirror = ctrl->val;
+
+ ovid = &vout->vid_info;
+
+ mutex_lock(&vout->lock);
+ if (mirror && ovid->rotation_type == VOUT_ROT_NONE) {
+ mutex_unlock(&vout->lock);
+ ret = -ERANGE;
+ break;
+ }
+
+ if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+ vout->mirror = mirror;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops omap_vout_ctrl_ops = {
+ .s_ctrl = omap_vout_s_ctrl,
+};
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ int ret = 0;
+ unsigned int i, num_buffers = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ /* if memory is not mmp or userptr
+ return error */
+ if ((V4L2_MEMORY_MMAP != req->memory) &&
+ (V4L2_MEMORY_USERPTR != req->memory))
+ return -EINVAL;
+
+ mutex_lock(&vout->lock);
+ /* Cannot be requested when streaming is on */
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+
+ /* If buffers are already allocated free them */
+ if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
+ if (vout->mmap_count) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ vout->buffer_allocated = num_buffers;
+ videobuf_mmap_free(q);
+ } else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) {
+ if (vout->buffer_allocated) {
+ videobuf_mmap_free(q);
+ for (i = 0; i < vout->buffer_allocated; i++) {
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+ vout->buffer_allocated = 0;
+ }
+ }
+
+ /*store the memory type in data structure */
+ vout->memory = req->memory;
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+
+ /* call videobuf_reqbufs api */
+ ret = videobuf_reqbufs(q, req);
+ if (ret < 0)
+ goto reqbuf_err;
+
+ vout->buffer_allocated = req->count;
+
+reqbuf_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+
+ return videobuf_querybuf(&vout->vbq, b);
+}
+
+static int vidioc_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buffer)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) ||
+ (buffer->index >= vout->buffer_allocated) ||
+ (q->bufs[buffer->index]->memory != buffer->memory)) {
+ return -EINVAL;
+ }
+ if (V4L2_MEMORY_USERPTR == buffer->memory) {
+ if ((buffer->length < vout->pix.sizeimage) ||
+ (0 == buffer->m.userptr)) {
+ return -EINVAL;
+ }
+ }
+
+ if ((is_rotation_enabled(vout)) &&
+ vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "DMA Channel not allocated for Rotation\n");
+ return -EINVAL;
+ }
+
+ return videobuf_qbuf(q, buffer);
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ int ret;
+ u32 addr;
+ unsigned long size;
+ struct videobuf_buffer *vb;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ ret = videobuf_dqbuf(q, b, !!(file->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
+
+ addr = (unsigned long) vout->buf_phy_addr[vb->i];
+ size = (unsigned long) vb->size;
+ dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
+ size, DMA_TO_DEVICE);
+ return 0;
+}
+
+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ int ret = 0, j;
+ u32 addr = 0, mask = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ mutex_lock(&vout->lock);
+
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto streamon_err;
+ }
+
+ ret = videobuf_streamon(q);
+ if (ret)
+ goto streamon_err;
+
+ if (list_empty(&vout->dma_queue)) {
+ ret = -EIO;
+ goto streamon_err1;
+ }
+
+ /* Get the next frame from the buffer queue */
+ vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ /* Remove buffer from the buffer queue */
+ list_del(&vout->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ vout->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ vout->field_id = 0;
+
+ /* set flag here. Next QBUF will start DMA */
+ vout->streaming = true;
+
+ vout->first_int = 1;
+
+ if (omap_vout_calculate_offset(vout)) {
+ ret = -EINVAL;
+ goto streamon_err1;
+ }
+ addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ + vout->cropped_offset;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_VSYNC2;
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to set overlay info\n");
+ goto streamon_err1;
+ }
+
+ omap_dispc_register_isr(omap_vout_isr, vout, mask);
+
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+ struct omap_dss_device *dssdev = ovl->get_device(ovl);
+
+ if (dssdev) {
+ ret = ovl->enable(ovl);
+ if (ret)
+ goto streamon_err1;
+ }
+ }
+
+ ret = 0;
+
+streamon_err1:
+ if (ret)
+ ret = videobuf_streamoff(q);
+streamon_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ u32 mask = 0;
+ int ret = 0, j;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ vout->streaming = false;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_VSYNC2;
+
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+ struct omap_dss_device *dssdev = ovl->get_device(ovl);
+
+ if (dssdev)
+ ovl->disable(ovl);
+ }
+
+ /* Turn of the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to change mode in streamoff\n");
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+ ret = videobuf_streamoff(&vout->vbq);
+
+ return ret;
+}
+
+static int vidioc_s_fbuf(struct file *file, void *fh,
+ const struct v4l2_framebuffer *a)
+{
+ int enable = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* OMAP DSS doesn't support Source and Destination color
+ key together */
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_CHROMAKEY))
+ return -EINVAL;
+ /* OMAP DSS Doesn't support the Destination color key
+ and alpha blending together */
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA))
+ return -EINVAL;
+
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_VID_SRC;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_CHROMAKEY;
+
+ if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_SRC_CHROMAKEY))
+ enable = 1;
+ else
+ enable = 0;
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.trans_enabled = enable;
+ info.trans_key_type = key_type;
+ info.trans_key = vout->win.chromakey;
+
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+ if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 1;
+ } else {
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 0;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ /* enable this only if there is no zorder cap */
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
+ info.partial_alpha_enabled = enable;
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* The video overlay must stay within the framebuffer and can't be
+ positioned independently. */
+ a->flags = V4L2_FBUF_FLAG_OVERLAY;
+ a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
+ | V4L2_FBUF_CAP_SRC_CHROMAKEY;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC)
+ a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST)
+ a->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.partial_alpha_enabled)
+ a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vout_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+static const struct v4l2_file_operations omap_vout_fops = {
+ .owner = THIS_MODULE,
+ .poll = omap_vout_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = omap_vout_mmap,
+ .open = omap_vout_open,
+ .release = omap_vout_release,
+};
+
+/* Init functions used during driver initialization */
+/* Initial setup of video_data */
+static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+ struct v4l2_pix_format *pix;
+ struct omap_overlay *ovl = vout->vid_info.overlays[0];
+ struct omap_dss_device *display = ovl->get_device(ovl);
+ struct v4l2_ctrl_handler *hdl;
+
+ /* set the default pix */
+ pix = &vout->pix;
+
+ /* Set the default picture of QVGA */
+ pix->width = QQVGA_WIDTH;
+ pix->height = QQVGA_HEIGHT;
+
+ /* Default pixel format is RGB 5-6-5 */
+ pix->pixelformat = V4L2_PIX_FMT_RGB565;
+ pix->field = V4L2_FIELD_ANY;
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ vout->bpp = RGB565_BPP;
+ vout->fbuf.fmt.width = display->panel.timings.x_res;
+ vout->fbuf.fmt.height = display->panel.timings.y_res;
+
+ /* Set the data structures for the overlay parameters*/
+ vout->win.global_alpha = 255;
+ vout->fbuf.flags = 0;
+ vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA |
+ V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY;
+ vout->win.chromakey = 0;
+
+ omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ hdl = &vout->ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 3);
+ v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+ v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
+ V4L2_CID_BG_COLOR, 0, 0xffffff, 1, 0);
+ v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (hdl->error)
+ return hdl->error;
+
+ vout->rotation = 0;
+ vout->mirror = false;
+ if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+ vout->vrfb_bpp = 2;
+
+ /* initialize the video_device struct */
+ vfd = vout->vfd = video_device_alloc();
+
+ if (!vfd) {
+ printk(KERN_ERR VOUT_NAME
+ ": could not allocate video device struct\n");
+ v4l2_ctrl_handler_free(hdl);
+ return -ENOMEM;
+ }
+ vfd->ctrl_handler = hdl;
+ vfd->release = video_device_release;
+ vfd->ioctl_ops = &vout_ioctl_ops;
+
+ strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
+
+ vfd->fops = &omap_vout_fops;
+ vfd->v4l2_dev = &vout->vid_dev->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_TX;
+ mutex_init(&vout->lock);
+
+ vfd->minor = -1;
+ return 0;
+
+}
+
+/* Setup video buffers */
+static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
+ int vid_num)
+{
+ u32 numbuffers;
+ int ret = 0, i;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev =
+ container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+ vout = vid_dev->vouts[vid_num];
+ ovid = &vout->vid_info;
+
+ numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
+ vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
+ dev_info(&pdev->dev, "Buffer Size = %d\n", vout->buffer_size);
+
+ for (i = 0; i < numbuffers; i++) {
+ vout->buf_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->buffer_size,
+ (u32 *) &vout->buf_phy_addr[i]);
+ if (!vout->buf_virt_addr[i]) {
+ numbuffers = i;
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+
+ vout->cropped_offset = 0;
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ bool static_vrfb_allocation = (vid_num == 0) ?
+ vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
+ ret = omap_vout_setup_vrfb_bufs(pdev, vid_num,
+ static_vrfb_allocation);
+ }
+
+ return ret;
+
+free_buffers:
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ return ret;
+
+}
+
+/* Create video out devices */
+static int __init omap_vout_create_video_devices(struct platform_device *pdev)
+{
+ int ret = 0, k;
+ struct omap_vout_device *vout;
+ struct video_device *vfd = NULL;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev,
+ struct omap2video_device, v4l2_dev);
+
+ for (k = 0; k < pdev->num_resources; k++) {
+
+ vout = kzalloc(sizeof(struct omap_vout_device), GFP_KERNEL);
+ if (!vout) {
+ dev_err(&pdev->dev, ": could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vout->vid = k;
+ vid_dev->vouts[k] = vout;
+ vout->vid_dev = vid_dev;
+ /* Select video2 if only 1 overlay is controlled by V4L2 */
+ if (pdev->num_resources == 1)
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 2];
+ else
+ /* Else select video1 and video2 one by one. */
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 1];
+ vout->vid_info.num_overlays = 1;
+ vout->vid_info.id = k + 1;
+
+ /* Set VRFB as rotation_type for omap2 and omap3 */
+ if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx())
+ vout->vid_info.rotation_type = VOUT_ROT_VRFB;
+
+ /* Setup the default configuration for the video devices
+ */
+ if (omap_vout_setup_video_data(vout) != 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Allocate default number of buffers for the video streaming
+ * and reserve the VRFB space for rotation
+ */
+ if (omap_vout_setup_video_bufs(pdev, k) != 0) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ /* Register the Video device with V4L2
+ */
+ vfd = vout->vfd;
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
+ dev_err(&pdev->dev,
+ ": Could not register Video for Linux device\n");
+ vfd->minor = -1;
+ ret = -ENODEV;
+ goto error2;
+ }
+ video_set_drvdata(vfd, vout);
+
+ dev_info(&pdev->dev,
+ ": registered and initialized video device %d\n",
+ vfd->minor);
+ if (k == (pdev->num_resources - 1))
+ return 0;
+
+ continue;
+error2:
+ if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+ omap_vout_release_vrfb(vout);
+ omap_vout_free_buffers(vout);
+error1:
+ video_device_release(vfd);
+error:
+ kfree(vout);
+ return ret;
+ }
+
+ return -ENODEV;
+}
+/* Driver functions */
+static void omap_vout_cleanup_device(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+ struct omapvideo_info *ovid;
+
+ if (!vout)
+ return;
+
+ vfd = vout->vfd;
+ ovid = &vout->vid_info;
+ if (vfd) {
+ if (!video_is_registered(vfd)) {
+ /*
+ * The device was never registered, so release the
+ * video_device struct directly.
+ */
+ video_device_release(vfd);
+ } else {
+ /*
+ * The unregister function will release the video_device
+ * struct as well as unregistering it.
+ */
+ video_unregister_device(vfd);
+ }
+ }
+ v4l2_ctrl_handler_free(&vout->ctrl_handler);
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ omap_vout_release_vrfb(vout);
+ /* Free the VRFB buffer if allocated
+ * init time
+ */
+ if (vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+ }
+ omap_vout_free_buffers(vout);
+
+ kfree(vout);
+}
+
+static int omap_vout_remove(struct platform_device *pdev)
+{
+ int k;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
+ omap2video_device, v4l2_dev);
+
+ v4l2_device_unregister(v4l2_dev);
+ for (k = 0; k < pdev->num_resources; k++)
+ omap_vout_cleanup_device(vid_dev->vouts[k]);
+
+ for (k = 0; k < vid_dev->num_displays; k++) {
+ if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED)
+ vid_dev->displays[k]->driver->disable(vid_dev->displays[k]);
+
+ omap_dss_put_device(vid_dev->displays[k]);
+ }
+ kfree(vid_dev);
+ return 0;
+}
+
+static int __init omap_vout_probe(struct platform_device *pdev)
+{
+ int ret = 0, i;
+ struct omap_overlay *ovl;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *def_display;
+ struct omap2video_device *vid_dev = NULL;
+
+ if (omapdss_is_initialized() == false)
+ return -EPROBE_DEFER;
+
+ ret = omapdss_compat_init();
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init dss\n");
+ return ret;
+ }
+
+ if (pdev->num_resources == 0) {
+ dev_err(&pdev->dev, "probed for an unknown device\n");
+ ret = -ENODEV;
+ goto err_dss_init;
+ }
+
+ vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
+ if (vid_dev == NULL) {
+ ret = -ENOMEM;
+ goto err_dss_init;
+ }
+
+ vid_dev->num_displays = 0;
+ for_each_dss_dev(dssdev) {
+ omap_dss_get_device(dssdev);
+
+ if (!dssdev->driver) {
+ dev_warn(&pdev->dev, "no driver for display: %s\n",
+ dssdev->name);
+ omap_dss_put_device(dssdev);
+ continue;
+ }
+
+ vid_dev->displays[vid_dev->num_displays++] = dssdev;
+ }
+
+ if (vid_dev->num_displays == 0) {
+ dev_err(&pdev->dev, "no displays\n");
+ ret = -EINVAL;
+ goto probe_err0;
+ }
+
+ vid_dev->num_overlays = omap_dss_get_num_overlays();
+ for (i = 0; i < vid_dev->num_overlays; i++)
+ vid_dev->overlays[i] = omap_dss_get_overlay(i);
+
+ vid_dev->num_managers = omap_dss_get_num_overlay_managers();
+ for (i = 0; i < vid_dev->num_managers; i++)
+ vid_dev->managers[i] = omap_dss_get_overlay_manager(i);
+
+ /* Get the Video1 overlay and video2 overlay.
+ * Setup the Display attached to that overlays
+ */
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ ovl = omap_dss_get_overlay(i);
+ dssdev = ovl->get_device(ovl);
+
+ if (dssdev) {
+ def_display = dssdev;
+ } else {
+ dev_warn(&pdev->dev, "cannot find display\n");
+ def_display = NULL;
+ }
+ if (def_display) {
+ struct omap_dss_driver *dssdrv = def_display->driver;
+
+ ret = dssdrv->enable(def_display);
+ if (ret) {
+ /* Here we are not considering a error
+ * as display may be enabled by frame
+ * buffer driver
+ */
+ dev_warn(&pdev->dev,
+ "'%s' Display already enabled\n",
+ def_display->name);
+ }
+ }
+ }
+
+ if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) {
+ dev_err(&pdev->dev, "v4l2_device_register failed\n");
+ ret = -ENODEV;
+ goto probe_err1;
+ }
+
+ ret = omap_vout_create_video_devices(pdev);
+ if (ret)
+ goto probe_err2;
+
+ for (i = 0; i < vid_dev->num_displays; i++) {
+ struct omap_dss_device *display = vid_dev->displays[i];
+
+ if (display->driver->update)
+ display->driver->update(display, 0, 0,
+ display->panel.timings.x_res,
+ display->panel.timings.y_res);
+ }
+ return 0;
+
+probe_err2:
+ v4l2_device_unregister(&vid_dev->v4l2_dev);
+probe_err1:
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ def_display = NULL;
+ ovl = omap_dss_get_overlay(i);
+ dssdev = ovl->get_device(ovl);
+
+ if (dssdev)
+ def_display = dssdev;
+
+ if (def_display && def_display->driver)
+ def_display->driver->disable(def_display);
+ }
+probe_err0:
+ kfree(vid_dev);
+err_dss_init:
+ omapdss_compat_uninit();
+ return ret;
+}
+
+static struct platform_driver omap_vout_driver = {
+ .driver = {
+ .name = VOUT_NAME,
+ },
+ .remove = omap_vout_remove,
+};
+
+static int __init omap_vout_init(void)
+{
+ if (platform_driver_probe(&omap_vout_driver, omap_vout_probe) != 0) {
+ printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void omap_vout_cleanup(void)
+{
+ platform_driver_unregister(&omap_vout_driver);
+}
+
+late_initcall(omap_vout_init);
+module_exit(omap_vout_cleanup);
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
new file mode 100644
index 000000000..11ec04892
--- /dev/null
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -0,0 +1,421 @@
+/*
+ * omap_vout_vrfb.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-device.h>
+
+#include <video/omapvrfb.h>
+
+#include "omap_voutdef.h"
+#include "omap_voutlib.h"
+#include "omap_vout_vrfb.h"
+
+#define OMAP_DMA_NO_DEVICE 0
+
+/*
+ * Function for allocating video buffers
+ */
+static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
+ unsigned int *count, int startindex)
+{
+ int i, j;
+
+ for (i = 0; i < *count; i++) {
+ if (!vout->smsshado_virt_addr[i]) {
+ vout->smsshado_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->smsshado_size,
+ &vout->smsshado_phy_addr[i]);
+ }
+ if (!vout->smsshado_virt_addr[i] && startindex != -1) {
+ if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
+ break;
+ }
+ if (!vout->smsshado_virt_addr[i]) {
+ for (j = 0; j < i; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ *count = 0;
+ return -ENOMEM;
+ }
+ memset((void *)(long)vout->smsshado_virt_addr[i], 0,
+ vout->smsshado_size);
+ }
+ return 0;
+}
+
+/*
+ * Wakes up the application once the DMA transfer to VRFB space is completed.
+ */
+static void omap_vout_vrfb_dma_tx_callback(void *data)
+{
+ struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
+
+ t->tx_status = 1;
+ wake_up_interruptible(&t->wait);
+}
+
+/*
+ * Free VRFB buffers
+ */
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
+{
+ int j;
+
+ for (j = 0; j < VRFB_NUM_BUFS; j++) {
+ if (vout->smsshado_virt_addr[j]) {
+ omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ }
+}
+
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ bool static_vrfb_allocation)
+{
+ int ret = 0, i, j;
+ struct omap_vout_device *vout;
+ struct video_device *vfd;
+ dma_cap_mask_t mask;
+ int image_width, image_height;
+ int vrfb_num_bufs = VRFB_NUM_BUFS;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev =
+ container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+ vout = vid_dev->vouts[vid_num];
+ vfd = vout->vfd;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
+ dev_info(&pdev->dev, ": VRFB allocation failed\n");
+ for (j = 0; j < i; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+
+ /* Calculate VRFB memory size */
+ /* allocate for worst case size */
+ image_width = VID_MAX_WIDTH / TILE_SIZE;
+ if (VID_MAX_WIDTH % TILE_SIZE)
+ image_width++;
+
+ image_width = image_width * TILE_SIZE;
+ image_height = VID_MAX_HEIGHT / TILE_SIZE;
+
+ if (VID_MAX_HEIGHT % TILE_SIZE)
+ image_height++;
+
+ image_height = image_height * TILE_SIZE;
+ vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
+
+ /*
+ * Request and Initialize DMA, for DMA based VRFB transfer
+ */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_INTERLEAVE, mask);
+ vout->vrfb_dma_tx.chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(vout->vrfb_dma_tx.chan)) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ } else {
+ size_t xt_size = sizeof(struct dma_interleaved_template) +
+ sizeof(struct data_chunk);
+
+ vout->vrfb_dma_tx.xt = kzalloc(xt_size, GFP_KERNEL);
+ if (!vout->vrfb_dma_tx.xt) {
+ dma_release_channel(vout->vrfb_dma_tx.chan);
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ }
+ }
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED)
+ dev_info(&pdev->dev,
+ ": failed to allocate DMA Channel for video%d\n",
+ vfd->minor);
+
+ init_waitqueue_head(&vout->vrfb_dma_tx.wait);
+
+ /* statically allocated the VRFB buffer is done through
+ commands line aruments */
+ if (static_vrfb_allocation) {
+ if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
+ ret = -ENOMEM;
+ goto release_vrfb_ctx;
+ }
+ vout->vrfb_static_allocation = true;
+ }
+ return 0;
+
+release_vrfb_ctx:
+ for (j = 0; j < VRFB_NUM_BUFS; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+free_buffers:
+ omap_vout_free_buffers(vout);
+
+ return ret;
+}
+
+/*
+ * Release the VRFB context once the module exits
+ */
+void omap_vout_release_vrfb(struct omap_vout_device *vout)
+{
+ int i;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[i]);
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ kfree(vout->vrfb_dma_tx.xt);
+ dmaengine_terminate_sync(vout->vrfb_dma_tx.chan);
+ dma_release_channel(vout->vrfb_dma_tx.chan);
+ }
+}
+
+/*
+ * Allocate the buffers for the VRFB space. Data is copied from V4L2
+ * buffers to the VRFB buffers using the DMA engine.
+ */
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+{
+ int i;
+ bool yuv_mode;
+
+ if (!is_rotation_enabled(vout))
+ return 0;
+
+ /* If rotation is enabled, allocate memory for VRFB space also */
+ *count = *count > VRFB_NUM_BUFS ? VRFB_NUM_BUFS : *count;
+
+ /* Allocate the VRFB buffers only if the buffers are not
+ * allocated during init time.
+ */
+ if (!vout->vrfb_static_allocation)
+ if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
+ return -ENOMEM;
+
+ if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
+ vout->dss_mode == OMAP_DSS_COLOR_UYVY)
+ yuv_mode = true;
+ else
+ yuv_mode = false;
+
+ for (i = 0; i < *count; i++)
+ omap_vrfb_setup(&vout->vrfb_context[i],
+ vout->smsshado_phy_addr[i], vout->pix.width,
+ vout->pix.height, vout->bpp, yuv_mode);
+
+ return 0;
+}
+
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb)
+{
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ struct dma_chan *chan = vout->vrfb_dma_tx.chan;
+ struct dma_interleaved_template *xt = vout->vrfb_dma_tx.xt;
+ dma_cookie_t cookie;
+ enum dma_status status;
+ enum dss_rotation rotation;
+ size_t dst_icg;
+ u32 pixsize;
+
+ if (!is_rotation_enabled(vout))
+ return 0;
+
+ /* If rotation is enabled, copy input buffer into VRFB
+ * memory space using DMA. We are copying input buffer
+ * into VRFB memory space of desired angle and DSS will
+ * read image VRFB memory for 0 degree angle
+ */
+
+ pixsize = vout->bpp * vout->vrfb_bpp;
+ dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
+
+ xt->src_start = vout->buf_phy_addr[vb->i];
+ xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
+
+ xt->numf = vout->pix.height;
+ xt->frame_size = 1;
+ xt->sgl[0].size = vout->pix.width * vout->bpp;
+ xt->sgl[0].icg = dst_icg;
+
+ xt->dir = DMA_MEM_TO_MEM;
+ xt->src_sgl = false;
+ xt->src_inc = true;
+ xt->dst_sgl = true;
+ xt->dst_inc = true;
+
+ tx = dmaengine_prep_interleaved_dma(chan, xt, flags);
+ if (tx == NULL) {
+ pr_err("%s: DMA interleaved prep error\n", __func__);
+ return -EINVAL;
+ }
+
+ tx->callback = omap_vout_vrfb_dma_tx_callback;
+ tx->callback_param = &vout->vrfb_dma_tx;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ pr_err("%s: dmaengine_submit failed (%d)\n", __func__, cookie);
+ return -EINVAL;
+ }
+
+ vout->vrfb_dma_tx.tx_status = 0;
+ dma_async_issue_pending(chan);
+
+ wait_event_interruptible_timeout(vout->vrfb_dma_tx.wait,
+ vout->vrfb_dma_tx.tx_status == 1,
+ VRFB_TX_TIMEOUT);
+
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+
+ if (vout->vrfb_dma_tx.tx_status == 0) {
+ pr_err("%s: Timeout while waiting for DMA\n", __func__);
+ dmaengine_terminate_sync(chan);
+ return -EINVAL;
+ } else if (status != DMA_COMPLETE) {
+ pr_err("%s: DMA completion %s status\n", __func__,
+ status == DMA_ERROR ? "error" : "busy");
+ dmaengine_terminate_sync(chan);
+ return -EINVAL;
+ }
+
+ /* Store buffers physical address into an array. Addresses
+ * from this array will be used to configure DSS */
+ rotation = calc_rotation(vout);
+ vout->queued_buf_addr[vb->i] = (u8 *)
+ vout->vrfb_context[vb->i].paddr[rotation];
+ return 0;
+}
+
+/*
+ * Calculate the buffer offsets from which the streaming should
+ * start. This offset calculation is mainly required because of
+ * the VRFB 32 pixels alignment with rotation.
+ */
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
+{
+ enum dss_rotation rotation;
+ bool mirroring = vout->mirror;
+ struct v4l2_rect *crop = &vout->crop;
+ struct v4l2_pix_format *pix = &vout->pix;
+ int *cropped_offset = &vout->cropped_offset;
+ int vr_ps = 1, ps = 2, temp_ps = 2;
+ int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+
+ rotation = calc_rotation(vout);
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat) {
+ if (is_rotation_enabled(vout)) {
+ /*
+ * ps - Actual pixel size for YUYV/UYVY for
+ * VRFB/Mirroring is 4 bytes
+ * vr_ps - Virtually pixel size for YUYV/UYVY is
+ * 2 bytes
+ */
+ ps = 4;
+ vr_ps = 2;
+ } else {
+ ps = 2; /* otherwise the pixel size is 2 byte */
+ }
+ } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
+ ps = 4;
+ } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
+ ps = 3;
+ }
+ vout->ps = ps;
+ vout->vr_ps = vr_ps;
+
+ if (is_rotation_enabled(vout)) {
+ line_length = MAX_PIXELS_PER_LINE;
+ ctop = (pix->height - crop->height) - crop->top;
+ cleft = (pix->width - crop->width) - crop->left;
+ } else {
+ line_length = pix->width;
+ }
+ vout->line_length = line_length;
+ switch (rotation) {
+ case dss_rotation_90_degree:
+ offset = vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (!mirroring) {
+ *cropped_offset = offset + line_length *
+ temp_ps * cleft + crop->top * temp_ps;
+ } else {
+ *cropped_offset = offset + line_length * temp_ps *
+ cleft + crop->top * temp_ps + (line_length *
+ ((crop->width / (vr_ps)) - 1) * ps);
+ }
+ break;
+ case dss_rotation_180_degree:
+ offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp) +
+ (vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp));
+ if (!mirroring) {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps;
+
+ } else {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps + (line_length *
+ (crop->height - 1) * ps);
+ }
+ break;
+ case dss_rotation_270_degree:
+ offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (!mirroring) {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps;
+ } else {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps +
+ (line_length * ((crop->width / vr_ps) - 1) *
+ ps);
+ }
+ break;
+ case dss_rotation_0_degree:
+ if (!mirroring) {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps;
+ } else {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps +
+ (line_length * (crop->height - 1) * ps);
+ }
+ break;
+ default:
+ *cropped_offset = (line_length * ps * crop->top) /
+ vr_ps + (crop->left * ps) / vr_ps +
+ ((crop->width / vr_ps) - 1) * ps;
+ break;
+ }
+}
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.h b/drivers/media/platform/omap/omap_vout_vrfb.h
new file mode 100644
index 000000000..c97697502
--- /dev/null
+++ b/drivers/media/platform/omap/omap_vout_vrfb.h
@@ -0,0 +1,40 @@
+/*
+ * omap_vout_vrfb.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUT_VRFB_H
+#define OMAP_VOUT_VRFB_H
+
+#ifdef CONFIG_VIDEO_OMAP2_VOUT_VRFB
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout);
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ bool static_vrfb_allocation);
+void omap_vout_release_vrfb(struct omap_vout_device *vout);
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex);
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb);
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout);
+#else
+static inline void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { };
+static inline int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ bool static_vrfb_allocation)
+ { return 0; };
+static inline void omap_vout_release_vrfb(struct omap_vout_device *vout) { };
+static inline int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+ { return 0; };
+static inline int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb)
+ { return 0; };
+static inline void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { };
+#endif
+
+#endif
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
new file mode 100644
index 000000000..56b630b1c
--- /dev/null
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -0,0 +1,226 @@
+/*
+ * omap_voutdef.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef OMAP_VOUTDEF_H
+#define OMAP_VOUTDEF_H
+
+#include <media/v4l2-ctrls.h>
+#include <video/omapfb_dss.h>
+#include <video/omapvrfb.h>
+#include <linux/dmaengine.h>
+
+#define YUYV_BPP 2
+#define RGB565_BPP 2
+#define RGB24_BPP 3
+#define RGB32_BPP 4
+#define TILE_SIZE 32
+#define YUYV_VRFB_BPP 2
+#define RGB_VRFB_BPP 1
+#define MAX_CID 3
+#define MAC_VRFB_CTXS 4
+#define MAX_VOUT_DEV 2
+#define MAX_OVLS 3
+#define MAX_DISPLAYS 10
+#define MAX_MANAGERS 3
+
+#define QQVGA_WIDTH 160
+#define QQVGA_HEIGHT 120
+
+/* Max Resolution supported by the driver */
+#define VID_MAX_WIDTH 1280 /* Largest width */
+#define VID_MAX_HEIGHT 720 /* Largest height */
+
+/* Mimimum requirement is 2x2 for DSS */
+#define VID_MIN_WIDTH 2
+#define VID_MIN_HEIGHT 2
+
+/* 2048 x 2048 is max res supported by OMAP display controller */
+#define MAX_PIXELS_PER_LINE 2048
+
+#define VRFB_TX_TIMEOUT 1000
+#define VRFB_NUM_BUFS 4
+
+/* Max buffer size tobe allocated during init */
+#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
+
+enum dma_channel_state {
+ DMA_CHAN_NOT_ALLOTED,
+ DMA_CHAN_ALLOTED,
+};
+
+/* Enum for Rotation
+ * DSS understands rotation in 0, 1, 2, 3 context
+ * while V4L2 driver understands it as 0, 90, 180, 270
+ */
+enum dss_rotation {
+ dss_rotation_0_degree = 0,
+ dss_rotation_90_degree = 1,
+ dss_rotation_180_degree = 2,
+ dss_rotation_270_degree = 3,
+};
+
+/* Enum for choosing rotation type for vout
+ * DSS2 doesn't understand no rotation as an
+ * option while V4L2 driver doesn't support
+ * rotation in the case where VRFB is not built in
+ * the kernel
+ */
+enum vout_rotaion_type {
+ VOUT_ROT_NONE = 0,
+ VOUT_ROT_VRFB = 1,
+};
+
+/*
+ * This structure is used to store the DMA transfer parameters
+ * for VRFB hidden buffer
+ */
+struct vid_vrfb_dma {
+ struct dma_chan *chan;
+ struct dma_interleaved_template *xt;
+
+ int req_status;
+ int tx_status;
+ wait_queue_head_t wait;
+};
+
+struct omapvideo_info {
+ int id;
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+ enum vout_rotaion_type rotation_type;
+};
+
+struct omap2video_device {
+ struct mutex mtx;
+
+ int state;
+
+ struct v4l2_device v4l2_dev;
+ struct omap_vout_device *vouts[MAX_VOUT_DEV];
+
+ int num_displays;
+ struct omap_dss_device *displays[MAX_DISPLAYS];
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+ int num_managers;
+ struct omap_overlay_manager *managers[MAX_MANAGERS];
+};
+
+/* per-device data structure */
+struct omap_vout_device {
+
+ struct omapvideo_info vid_info;
+ struct video_device *vfd;
+ struct omap2video_device *vid_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ int vid;
+ int opened;
+
+ /* we don't allow to change image fmt/size once buffer has
+ * been allocated
+ */
+ int buffer_allocated;
+ /* allow to reuse previously allocated buffer which is big enough */
+ int buffer_size;
+ /* keep buffer info across opens */
+ unsigned long buf_virt_addr[VIDEO_MAX_FRAME];
+ unsigned long buf_phy_addr[VIDEO_MAX_FRAME];
+ enum omap_color_mode dss_mode;
+
+ /* we don't allow to request new buffer when old buffers are
+ * still mmaped
+ */
+ int mmap_count;
+
+ spinlock_t vbq_lock; /* spinlock for videobuf queues */
+ unsigned long field_count; /* field counter for videobuf_buffer */
+
+ /* non-NULL means streaming is in progress. */
+ bool streaming;
+
+ struct v4l2_pix_format pix;
+ struct v4l2_rect crop;
+ struct v4l2_window win;
+ struct v4l2_framebuffer fbuf;
+
+ /* Lock to protect the shared data structures in ioctl */
+ struct mutex lock;
+
+ enum dss_rotation rotation;
+ bool mirror;
+ int flicker_filter;
+
+ int bpp; /* bytes per pixel */
+ int vrfb_bpp; /* bytes per pixel with respect to VRFB */
+
+ struct vid_vrfb_dma vrfb_dma_tx;
+ unsigned int smsshado_phy_addr[MAC_VRFB_CTXS];
+ unsigned int smsshado_virt_addr[MAC_VRFB_CTXS];
+ struct vrfb vrfb_context[MAC_VRFB_CTXS];
+ bool vrfb_static_allocation;
+ unsigned int smsshado_size;
+ unsigned char pos;
+
+ int ps, vr_ps, line_length, first_int, field_id;
+ enum v4l2_memory memory;
+ struct videobuf_buffer *cur_frm, *next_frm;
+ struct list_head dma_queue;
+ u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+ u32 cropped_offset;
+ s32 tv_field1_offset;
+ void *isr_handle;
+
+ /* Buffer queue variables */
+ struct omap_vout_device *vout;
+ enum v4l2_buf_type type;
+ struct videobuf_queue vbq;
+ int io_allowed;
+
+};
+
+/*
+ * Return true if rotation is 90 or 270
+ */
+static inline int is_rotation_90_or_270(const struct omap_vout_device *vout)
+{
+ return (vout->rotation == dss_rotation_90_degree ||
+ vout->rotation == dss_rotation_270_degree);
+}
+
+/*
+ * Return true if rotation is enabled
+ */
+static inline int is_rotation_enabled(const struct omap_vout_device *vout)
+{
+ return vout->rotation || vout->mirror;
+}
+
+/*
+ * Reverse the rotation degree if mirroring is enabled
+ */
+static inline int calc_rotation(const struct omap_vout_device *vout)
+{
+ if (!vout->mirror)
+ return vout->rotation;
+
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ return dss_rotation_270_degree;
+ case dss_rotation_270_degree:
+ return dss_rotation_90_degree;
+ case dss_rotation_180_degree:
+ return dss_rotation_0_degree;
+ default:
+ return dss_rotation_180_degree;
+ }
+}
+
+void omap_vout_free_buffers(struct omap_vout_device *vout);
+#endif /* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
new file mode 100644
index 000000000..58a25fdf0
--- /dev/null
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -0,0 +1,357 @@
+/*
+ * omap_voutlib.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Based on the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <linux/dma-mapping.h>
+
+#include <video/omapfb_dss.h>
+
+#include "omap_voutlib.h"
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video library");
+MODULE_LICENSE("GPL");
+
+/* Return the default overlay cropping rectangle in crop given the image
+ * size in pix and the video display size in fbuf. The default
+ * cropping rectangle is the largest rectangle no larger than the capture size
+ * that will fit on the display. The default cropping rectangle is centered in
+ * the image. All dimensions and offsets are rounded down to even numbers.
+ */
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop)
+{
+ crop->width = (pix->width < fbuf->fmt.width) ?
+ pix->width : fbuf->fmt.width;
+ crop->height = (pix->height < fbuf->fmt.height) ?
+ pix->height : fbuf->fmt.height;
+ crop->width &= ~1;
+ crop->height &= ~1;
+ crop->left = ((pix->width - crop->width) >> 1) & ~1;
+ crop->top = ((pix->height - crop->height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_default_crop);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The adjusted window parameters are
+ * returned in new_win.
+ * Returns zero if successful, or -EINVAL if the requested window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ struct v4l2_rect try_win;
+
+ /* make a working copy of the new_win rectangle */
+ try_win = new_win->w;
+
+ /* adjust the preview window so it fits on the display by clipping any
+ * offscreen areas
+ */
+ if (try_win.left < 0) {
+ try_win.width += try_win.left;
+ try_win.left = 0;
+ }
+ if (try_win.top < 0) {
+ try_win.height += try_win.top;
+ try_win.top = 0;
+ }
+ try_win.width = (try_win.width < fbuf->fmt.width) ?
+ try_win.width : fbuf->fmt.width;
+ try_win.height = (try_win.height < fbuf->fmt.height) ?
+ try_win.height : fbuf->fmt.height;
+ if (try_win.left + try_win.width > fbuf->fmt.width)
+ try_win.width = fbuf->fmt.width - try_win.left;
+ if (try_win.top + try_win.height > fbuf->fmt.height)
+ try_win.height = fbuf->fmt.height - try_win.top;
+ try_win.width &= ~1;
+ try_win.height &= ~1;
+
+ if (try_win.width <= 0 || try_win.height <= 0)
+ return -EINVAL;
+
+ /* We now have a valid preview window, so go with it */
+ new_win->w = try_win;
+ new_win->field = V4L2_FIELD_ANY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_try_window);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The image cropping window in crop
+ * will also be adjusted if necessary. Preference is given to keeping the
+ * the window as close to the requested configuration as possible. If
+ * successful, new_win, vout->win, and crop are updated.
+ * Returns zero if successful, or -EINVAL if the requested preview window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ int err;
+
+ err = omap_vout_try_window(fbuf, new_win);
+ if (err)
+ return err;
+
+ /* update our preview window */
+ win->w = new_win->w;
+ win->field = new_win->field;
+ win->chromakey = new_win->chromakey;
+
+ /* Adjust the cropping window to allow for resizing limitation */
+ if (omap_vout_dss_omap24xx()) {
+ /* For 24xx limit is 8x to 1/2x scaling. */
+ if ((crop->height/win->w.height) >= 2)
+ crop->height = win->w.height * 2;
+
+ if ((crop->width/win->w.width) >= 2)
+ crop->width = win->w.width * 2;
+
+ if (crop->width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is 768
+ * pixels wide. If the cropped image is wider than
+ * 768 pixels then it cannot be vertically resized.
+ */
+ if (crop->height != win->w.height)
+ crop->width = 768;
+ }
+ } else if (omap_vout_dss_omap34xx()) {
+ /* For 34xx limit is 8x to 1/4x scaling. */
+ if ((crop->height/win->w.height) >= 4)
+ crop->height = win->w.height * 4;
+
+ if ((crop->width/win->w.width) >= 4)
+ crop->width = win->w.width * 4;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_window);
+
+/* Given a new cropping rectangle in new_crop, adjust the cropping rectangle to
+ * the nearest supported configuration. The image render window in win will
+ * also be adjusted if necessary. The preview window is adjusted such that the
+ * horizontal and vertical rescaling ratios stay constant. If the render
+ * window would fall outside the display boundaries, the cropping rectangle
+ * will also be adjusted to maintain the rescaling ratios. If successful, crop
+ * and win are updated.
+ * Returns zero if successful, or -EINVAL if the requested cropping rectangle is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf, const struct v4l2_rect *new_crop)
+{
+ struct v4l2_rect try_crop;
+ unsigned long vresize, hresize;
+
+ /* make a working copy of the new_crop rectangle */
+ try_crop = *new_crop;
+
+ /* adjust the cropping rectangle so it fits in the image */
+ if (try_crop.left < 0) {
+ try_crop.width += try_crop.left;
+ try_crop.left = 0;
+ }
+ if (try_crop.top < 0) {
+ try_crop.height += try_crop.top;
+ try_crop.top = 0;
+ }
+ try_crop.width = (try_crop.width < pix->width) ?
+ try_crop.width : pix->width;
+ try_crop.height = (try_crop.height < pix->height) ?
+ try_crop.height : pix->height;
+ if (try_crop.left + try_crop.width > pix->width)
+ try_crop.width = pix->width - try_crop.left;
+ if (try_crop.top + try_crop.height > pix->height)
+ try_crop.height = pix->height - try_crop.top;
+
+ try_crop.width &= ~1;
+ try_crop.height &= ~1;
+
+ if (try_crop.width <= 0 || try_crop.height <= 0)
+ return -EINVAL;
+
+ if (omap_vout_dss_omap24xx()) {
+ if (try_crop.height != win->w.height) {
+ /* If we're resizing vertically, we can't support a
+ * crop width wider than 768 pixels.
+ */
+ if (try_crop.width > 768)
+ try_crop.width = 768;
+ }
+ }
+ /* vertical resizing */
+ vresize = (1024 * try_crop.height) / win->w.height;
+ if (omap_vout_dss_omap24xx() && (vresize > 2048))
+ vresize = 2048;
+ else if (omap_vout_dss_omap34xx() && (vresize > 4096))
+ vresize = 4096;
+
+ win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
+ if (win->w.height == 0)
+ win->w.height = 2;
+ if (win->w.height + win->w.top > fbuf->fmt.height) {
+ /* We made the preview window extend below the bottom of the
+ * display, so clip it to the display boundary and resize the
+ * cropping height to maintain the vertical resizing ratio.
+ */
+ win->w.height = (fbuf->fmt.height - win->w.top) & ~1;
+ if (try_crop.height == 0)
+ try_crop.height = 2;
+ }
+ /* horizontal resizing */
+ hresize = (1024 * try_crop.width) / win->w.width;
+ if (omap_vout_dss_omap24xx() && (hresize > 2048))
+ hresize = 2048;
+ else if (omap_vout_dss_omap34xx() && (hresize > 4096))
+ hresize = 4096;
+
+ win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
+ if (win->w.width == 0)
+ win->w.width = 2;
+ if (win->w.width + win->w.left > fbuf->fmt.width) {
+ /* We made the preview window extend past the right side of the
+ * display, so clip it to the display boundary and resize the
+ * cropping width to maintain the horizontal resizing ratio.
+ */
+ win->w.width = (fbuf->fmt.width - win->w.left) & ~1;
+ if (try_crop.width == 0)
+ try_crop.width = 2;
+ }
+ if (omap_vout_dss_omap24xx()) {
+ if ((try_crop.height/win->w.height) >= 2)
+ try_crop.height = win->w.height * 2;
+
+ if ((try_crop.width/win->w.width) >= 2)
+ try_crop.width = win->w.width * 2;
+
+ if (try_crop.width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is
+ * 768 pixels wide. If the cropped image is wider
+ * than 768 pixels then it cannot be vertically resized.
+ */
+ if (try_crop.height != win->w.height)
+ try_crop.width = 768;
+ }
+ } else if (omap_vout_dss_omap34xx()) {
+ if ((try_crop.height/win->w.height) >= 4)
+ try_crop.height = win->w.height * 4;
+
+ if ((try_crop.width/win->w.width) >= 4)
+ try_crop.width = win->w.width * 4;
+ }
+ /* update our cropping rectangle and we're done */
+ *crop = try_crop;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_crop);
+
+/* Given a new format in pix and fbuf, crop and win
+ * structures are initialized to default values. crop
+ * is initialized to the largest window size that will fit on the display. The
+ * crop window is centered in the image. win is initialized to
+ * the same size as crop and is centered on the display.
+ * All sizes and offsets are constrained to be even numbers.
+ */
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win)
+{
+ /* crop defines the preview source window in the image capture
+ * buffer
+ */
+ omap_vout_default_crop(pix, fbuf, crop);
+
+ /* win defines the preview target window on the display */
+ win->w.width = crop->width;
+ win->w.height = crop->height;
+ win->w.left = ((fbuf->fmt.width - win->w.width) >> 1) & ~1;
+ win->w.top = ((fbuf->fmt.height - win->w.height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_format);
+
+/*
+ * Allocate buffers
+ */
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
+{
+ u32 order, size;
+ unsigned long virt_addr, addr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+ virt_addr = __get_free_pages(GFP_KERNEL, order);
+ addr = virt_addr;
+
+ if (virt_addr) {
+ while (size > 0) {
+ SetPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+ *phys_addr = (u32) virt_to_phys((void *) virt_addr);
+ return virt_addr;
+}
+
+/*
+ * Free buffers
+ */
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
+{
+ u32 order, size;
+ unsigned long addr = virtaddr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages((unsigned long) virtaddr, order);
+}
+
+bool omap_vout_dss_omap24xx(void)
+{
+ return omapdss_get_version() == OMAPDSS_VER_OMAP24xx;
+}
+
+bool omap_vout_dss_omap34xx(void)
+{
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/media/platform/omap/omap_voutlib.h b/drivers/media/platform/omap/omap_voutlib.h
new file mode 100644
index 000000000..f9d1c0779
--- /dev/null
+++ b/drivers/media/platform/omap/omap_voutlib.h
@@ -0,0 +1,39 @@
+/*
+ * omap_voutlib.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUTLIB_H
+#define OMAP_VOUTLIB_H
+
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
+
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf,
+ const struct v4l2_rect *new_crop);
+
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win);
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr);
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size);
+
+bool omap_vout_dss_omap24xx(void);
+bool omap_vout_dss_omap34xx(void);
+#endif /* #ifndef OMAP_VOUTLIB_H */
+
diff --git a/drivers/media/platform/omap3isp/Makefile b/drivers/media/platform/omap3isp/Makefile
new file mode 100644
index 000000000..56e99b4f7
--- /dev/null
+++ b/drivers/media/platform/omap3isp/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for OMAP3 ISP driver
+
+ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG
+
+omap3-isp-objs += \
+ isp.o ispvideo.o \
+ ispcsiphy.o ispccp2.o ispcsi2.o \
+ ispccdc.o isppreview.o ispresizer.o \
+ ispstat.o isph3a_aewb.o isph3a_af.o isphist.o
+
+obj-$(CONFIG_VIDEO_OMAP3) += omap3-isp.o
diff --git a/drivers/media/platform/omap3isp/cfa_coef_table.h b/drivers/media/platform/omap3isp/cfa_coef_table.h
new file mode 100644
index 000000000..e75b0eb25
--- /dev/null
+++ b/drivers/media/platform/omap3isp/cfa_coef_table.h
@@ -0,0 +1,51 @@
+/*
+ * cfa_coef_table.h
+ *
+ * TI OMAP3 ISP - CFA coefficients table
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+{ 244, 0, 247, 0, 12, 27, 36, 247, 250, 0, 27, 0, 4, 250, 12, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 12, 250, 4, 0, 27, 0, 250,
+247, 36, 27, 12, 0, 247, 0, 244, 0, 0, 40, 0, 0, 0, 0, 248,
+244, 0, 247, 0, 12, 27, 36, 247, 250, 0, 27, 0, 4, 250, 12, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 12, 250, 4, 0, 27, 0, 250,
+247, 36, 27, 12, 0, 247, 0, 244, 0, 0, 40, 0, 0, 0, 0, 248,
+244, 0, 247, 0, 12, 27, 36, 247, 250, 0, 27, 0, 4, 250, 12, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 12, 250, 4, 0, 27, 0, 250,
+247, 36, 27, 12, 0, 247, 0, 244, 0, 0, 40, 0, 0, 0, 0, 248 },
+{ 0, 247, 0, 244, 247, 36, 27, 12, 0, 27, 0, 250, 244, 12, 250, 4,
+ 0, 0, 0, 248, 0, 0, 40, 0, 4, 250, 12, 244, 250, 0, 27, 0,
+ 12, 27, 36, 247, 244, 0, 247, 0, 0, 40, 0, 0, 248, 0, 0, 0,
+ 0, 247, 0, 244, 247, 36, 27, 12, 0, 27, 0, 250, 244, 12, 250, 4,
+ 0, 0, 0, 248, 0, 0, 40, 0, 4, 250, 12, 244, 250, 0, 27, 0,
+ 12, 27, 36, 247, 244, 0, 247, 0, 0, 40, 0, 0, 248, 0, 0, 0,
+ 0, 247, 0, 244, 247, 36, 27, 12, 0, 27, 0, 250, 244, 12, 250, 4,
+ 0, 0, 0, 248, 0, 0, 40, 0, 4, 250, 12, 244, 250, 0, 27, 0,
+ 12, 27, 36, 247, 244, 0, 247, 0, 0, 40, 0, 0, 248, 0, 0, 0 },
+{ 4, 250, 12, 244, 250, 0, 27, 0, 12, 27, 36, 247, 244, 0, 247, 0,
+ 0, 0, 0, 248, 0, 0, 40, 0, 0, 247, 0, 244, 247, 36, 27, 12,
+ 0, 27, 0, 250, 244, 12, 250, 4, 0, 40, 0, 0, 248, 0, 0, 0,
+ 4, 250, 12, 244, 250, 0, 27, 0, 12, 27, 36, 247, 244, 0, 247, 0,
+ 0, 0, 0, 248, 0, 0, 40, 0, 0, 247, 0, 244, 247, 36, 27, 12,
+ 0, 27, 0, 250, 244, 12, 250, 4, 0, 40, 0, 0, 248, 0, 0, 0,
+ 4, 250, 12, 244, 250, 0, 27, 0, 12, 27, 36, 247, 244, 0, 247, 0,
+ 0, 0, 0, 248, 0, 0, 40, 0, 0, 247, 0, 244, 247, 36, 27, 12,
+ 0, 27, 0, 250, 244, 12, 250, 4, 0, 40, 0, 0, 248, 0, 0, 0 },
+{ 244,12, 250, 4, 0, 27, 0, 250, 247, 36, 27, 12, 0, 247, 0, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 0, 247, 0, 12, 27, 36, 247,
+250, 0, 27, 0, 4, 250, 12, 244, 0, 0, 40, 0, 0, 0, 0, 248,
+244, 12, 250, 4, 0, 27, 0, 250, 247, 36, 27, 12, 0, 247, 0, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 0, 247, 0, 12, 27, 36, 247,
+250, 0, 27, 0, 4, 250, 12, 244, 0, 0, 40, 0, 0, 0, 0, 248,
+244, 12, 250, 4, 0, 27, 0, 250, 247, 36, 27, 12, 0, 247, 0, 244,
+248, 0, 0, 0, 0, 40, 0, 0, 244, 0, 247, 0, 12, 27, 36, 247,
+250, 0, 27, 0, 4, 250, 12, 244, 0, 0, 40, 0, 0, 0, 0, 248 },
diff --git a/drivers/media/platform/omap3isp/gamma_table.h b/drivers/media/platform/omap3isp/gamma_table.h
new file mode 100644
index 000000000..3b5070780
--- /dev/null
+++ b/drivers/media/platform/omap3isp/gamma_table.h
@@ -0,0 +1,80 @@
+/*
+ * gamma_table.h
+ *
+ * TI OMAP3 ISP - Default gamma table for all components
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ 0, 0, 1, 2, 3, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20,
+ 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 36, 37, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 63, 64, 65, 66, 66, 67, 68, 69, 69, 70,
+ 71, 72, 72, 73, 74, 75, 75, 76, 77, 78, 78, 79, 80, 81, 81, 82,
+ 83, 84, 84, 85, 86, 87, 88, 88, 89, 90, 91, 91, 92, 93, 94, 94,
+ 95, 96, 97, 97, 98, 98, 99, 99, 100, 100, 101, 101, 102, 103, 104, 104,
+105, 106, 107, 108, 108, 109, 110, 111, 111, 112, 113, 114, 114, 115, 116, 117,
+117, 118, 119, 119, 120, 120, 121, 121, 122, 122, 123, 123, 124, 124, 125, 125,
+126, 126, 127, 127, 128, 128, 129, 129, 130, 130, 131, 131, 132, 132, 133, 133,
+134, 134, 135, 135, 136, 136, 137, 137, 138, 138, 139, 139, 140, 140, 141, 141,
+142, 142, 143, 143, 144, 144, 145, 145, 146, 146, 147, 147, 148, 148, 149, 149,
+150, 150, 151, 151, 152, 152, 153, 153, 153, 153, 154, 154, 154, 154, 155, 155,
+156, 156, 157, 157, 158, 158, 158, 159, 159, 159, 160, 160, 160, 161, 161, 162,
+162, 163, 163, 164, 164, 164, 164, 165, 165, 165, 165, 166, 166, 167, 167, 168,
+168, 169, 169, 170, 170, 170, 170, 171, 171, 171, 171, 172, 172, 173, 173, 174,
+174, 175, 175, 176, 176, 176, 176, 177, 177, 177, 177, 178, 178, 178, 178, 179,
+179, 179, 179, 180, 180, 180, 180, 181, 181, 181, 181, 182, 182, 182, 182, 183,
+183, 183, 183, 184, 184, 184, 184, 185, 185, 185, 185, 186, 186, 186, 186, 187,
+187, 187, 187, 188, 188, 188, 188, 189, 189, 189, 189, 190, 190, 190, 190, 191,
+191, 191, 191, 192, 192, 192, 192, 193, 193, 193, 193, 194, 194, 194, 194, 195,
+195, 195, 195, 196, 196, 196, 196, 197, 197, 197, 197, 198, 198, 198, 198, 199,
+199, 199, 199, 200, 200, 200, 200, 201, 201, 201, 201, 202, 202, 202, 203, 203,
+203, 203, 204, 204, 204, 204, 205, 205, 205, 205, 206, 206, 206, 206, 207, 207,
+207, 207, 208, 208, 208, 208, 209, 209, 209, 209, 210, 210, 210, 210, 210, 210,
+210, 210, 210, 210, 210, 210, 211, 211, 211, 211, 211, 211, 211, 211, 211, 211,
+211, 212, 212, 212, 212, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
+213, 214, 214, 214, 214, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215,
+216, 216, 216, 216, 217, 217, 217, 217, 218, 218, 218, 218, 219, 219, 219, 219,
+219, 219, 219, 219, 219, 219, 219, 219, 220, 220, 220, 220, 221, 221, 221, 221,
+221, 221, 221, 221, 221, 221, 221, 222, 222, 222, 222, 223, 223, 223, 223, 223,
+223, 223, 223, 223, 223, 223, 223, 224, 224, 224, 224, 225, 225, 225, 225, 225,
+225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 226, 226,
+226, 226, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 228, 228,
+228, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 230, 230, 230,
+230, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 232, 232, 232,
+232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232,
+233, 233, 233, 233, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 235,
+235, 235, 235, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236,
+236, 236, 236, 236, 236, 236, 237, 237, 237, 237, 238, 238, 238, 238, 238, 238,
+238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238,
+238, 238, 238, 238, 238, 239, 239, 239, 239, 240, 240, 240, 240, 240, 240, 240,
+240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240,
+240, 240, 240, 240, 241, 241, 241, 241, 242, 242, 242, 242, 242, 242, 242, 242,
+242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242,
+242, 242, 243, 243, 243, 243, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244,
+244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244,
+244, 245, 245, 245, 245, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246,
+246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246,
+246, 246, 246, 246, 246, 246, 246, 247, 247, 247, 247, 248, 248, 248, 248, 248,
+248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248,
+248, 248, 248, 248, 248, 248, 249, 249, 249, 249, 250, 250, 250, 250, 250, 250,
+250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250,
+250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250,
+250, 250, 250, 250, 251, 251, 251, 251, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 254, 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
new file mode 100644
index 000000000..00e52f0b8
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -0,0 +1,2423 @@
+/*
+ * isp.c
+ *
+ * TI OMAP3 ISP - Core
+ *
+ * Copyright (C) 2006-2010 Nokia Corporation
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * Contributors:
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ * David Cohen <dacohen@gmail.com>
+ * Stanimir Varbanov <svarbanov@mm-sol.com>
+ * Vimarsh Zutshi <vimarsh.zutshi@gmail.com>
+ * Tuukka Toivonen <tuukkat76@gmail.com>
+ * Sergio Aguirre <saaguirre@ti.com>
+ * Antti Koskipaa <akoskipa@gmail.com>
+ * Ivan T. Ivanov <iivanov@mm-sol.com>
+ * RaniSuneela <r-m@ti.com>
+ * Atanas Filipov <afilipov@mm-sol.com>
+ * Gjorgji Rosikopulos <grosikopulos@mm-sol.com>
+ * Hiroshi DOYU <hiroshi.doyu@nokia.com>
+ * Nayden Kanchev <nkanchev@mm-sol.com>
+ * Phil Carmody <ext-phil.2.carmody@nokia.com>
+ * Artem Bityutskiy <artem.bityutskiy@nokia.com>
+ * Dominic Curran <dcurran@ti.com>
+ * Ilkka Myllyperkio <ilkka.myllyperkio@sofica.fi>
+ * Pallavi Kulkarni <p-kulkarni@ti.com>
+ * Vaibhav Hiremath <hvaibhav@ti.com>
+ * Mohit Jalori <mjalori@ti.com>
+ * Sameer Venkatraman <sameerv@ti.com>
+ * Senthilvadivu Guruswamy <svadivu@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ * Toni Leinonen <toni.leinonen@nokia.com>
+ * Troy Laramy <t-laramy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/omap-iommu.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#include <asm/dma-iommu.h>
+#endif
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mc.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispccdc.h"
+#include "isppreview.h"
+#include "ispresizer.h"
+#include "ispcsi2.h"
+#include "ispccp2.h"
+#include "isph3a.h"
+#include "isphist.h"
+
+static unsigned int autoidle;
+module_param(autoidle, int, 0444);
+MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
+
+static void isp_save_ctx(struct isp_device *isp);
+
+static void isp_restore_ctx(struct isp_device *isp);
+
+static const struct isp_res_mapping isp_res_maps[] = {
+ {
+ .isp_rev = ISP_REVISION_2_0,
+ .offset = {
+ /* first MMIO area */
+ 0x0000, /* base, len 0x0070 */
+ 0x0400, /* ccp2, len 0x01f0 */
+ 0x0600, /* ccdc, len 0x00a8 */
+ 0x0a00, /* hist, len 0x0048 */
+ 0x0c00, /* h3a, len 0x0060 */
+ 0x0e00, /* preview, len 0x00a0 */
+ 0x1000, /* resizer, len 0x00ac */
+ 0x1200, /* sbl, len 0x00fc */
+ /* second MMIO area */
+ 0x0000, /* csi2a, len 0x0170 */
+ 0x0170, /* csiphy2, len 0x000c */
+ },
+ .phy_type = ISP_PHY_TYPE_3430,
+ },
+ {
+ .isp_rev = ISP_REVISION_15_0,
+ .offset = {
+ /* first MMIO area */
+ 0x0000, /* base, len 0x0070 */
+ 0x0400, /* ccp2, len 0x01f0 */
+ 0x0600, /* ccdc, len 0x00a8 */
+ 0x0a00, /* hist, len 0x0048 */
+ 0x0c00, /* h3a, len 0x0060 */
+ 0x0e00, /* preview, len 0x00a0 */
+ 0x1000, /* resizer, len 0x00ac */
+ 0x1200, /* sbl, len 0x00fc */
+ /* second MMIO area */
+ 0x0000, /* csi2a, len 0x0170 (1st area) */
+ 0x0170, /* csiphy2, len 0x000c */
+ 0x01c0, /* csi2a, len 0x0040 (2nd area) */
+ 0x0400, /* csi2c, len 0x0170 (1st area) */
+ 0x0570, /* csiphy1, len 0x000c */
+ 0x05c0, /* csi2c, len 0x0040 (2nd area) */
+ },
+ .phy_type = ISP_PHY_TYPE_3630,
+ },
+};
+
+/* Structure for saving/restoring ISP module registers */
+static struct isp_reg isp_reg_list[] = {
+ {OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG, 0},
+ {OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 0},
+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 0},
+ {0, ISP_TOK_TERM, 0}
+};
+
+/*
+ * omap3isp_flush - Post pending L3 bus writes by doing a register readback
+ * @isp: OMAP3 ISP device
+ *
+ * In order to force posting of pending writes, we need to write and
+ * readback the same register, in this case the revision register.
+ *
+ * See this link for reference:
+ * http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
+ */
+void omap3isp_flush(struct isp_device *isp)
+{
+ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+}
+
+/* -----------------------------------------------------------------------------
+ * XCLK
+ */
+
+#define to_isp_xclk(_hw) container_of(_hw, struct isp_xclk, hw)
+
+static void isp_xclk_update(struct isp_xclk *xclk, u32 divider)
+{
+ switch (xclk->id) {
+ case ISP_XCLK_A:
+ isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
+ ISPTCTRL_CTRL_DIVA_MASK,
+ divider << ISPTCTRL_CTRL_DIVA_SHIFT);
+ break;
+ case ISP_XCLK_B:
+ isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
+ ISPTCTRL_CTRL_DIVB_MASK,
+ divider << ISPTCTRL_CTRL_DIVB_SHIFT);
+ break;
+ }
+}
+
+static int isp_xclk_prepare(struct clk_hw *hw)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+
+ omap3isp_get(xclk->isp);
+
+ return 0;
+}
+
+static void isp_xclk_unprepare(struct clk_hw *hw)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+
+ omap3isp_put(xclk->isp);
+}
+
+static int isp_xclk_enable(struct clk_hw *hw)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xclk->lock, flags);
+ isp_xclk_update(xclk, xclk->divider);
+ xclk->enabled = true;
+ spin_unlock_irqrestore(&xclk->lock, flags);
+
+ return 0;
+}
+
+static void isp_xclk_disable(struct clk_hw *hw)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xclk->lock, flags);
+ isp_xclk_update(xclk, 0);
+ xclk->enabled = false;
+ spin_unlock_irqrestore(&xclk->lock, flags);
+}
+
+static unsigned long isp_xclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+
+ return parent_rate / xclk->divider;
+}
+
+static u32 isp_xclk_calc_divider(unsigned long *rate, unsigned long parent_rate)
+{
+ u32 divider;
+
+ if (*rate >= parent_rate) {
+ *rate = parent_rate;
+ return ISPTCTRL_CTRL_DIV_BYPASS;
+ }
+
+ if (*rate == 0)
+ *rate = 1;
+
+ divider = DIV_ROUND_CLOSEST(parent_rate, *rate);
+ if (divider >= ISPTCTRL_CTRL_DIV_BYPASS)
+ divider = ISPTCTRL_CTRL_DIV_BYPASS - 1;
+
+ *rate = parent_rate / divider;
+ return divider;
+}
+
+static long isp_xclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ isp_xclk_calc_divider(&rate, *parent_rate);
+ return rate;
+}
+
+static int isp_xclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct isp_xclk *xclk = to_isp_xclk(hw);
+ unsigned long flags;
+ u32 divider;
+
+ divider = isp_xclk_calc_divider(&rate, parent_rate);
+
+ spin_lock_irqsave(&xclk->lock, flags);
+
+ xclk->divider = divider;
+ if (xclk->enabled)
+ isp_xclk_update(xclk, divider);
+
+ spin_unlock_irqrestore(&xclk->lock, flags);
+
+ dev_dbg(xclk->isp->dev, "%s: cam_xclk%c set to %lu Hz (div %u)\n",
+ __func__, xclk->id == ISP_XCLK_A ? 'a' : 'b', rate, divider);
+ return 0;
+}
+
+static const struct clk_ops isp_xclk_ops = {
+ .prepare = isp_xclk_prepare,
+ .unprepare = isp_xclk_unprepare,
+ .enable = isp_xclk_enable,
+ .disable = isp_xclk_disable,
+ .recalc_rate = isp_xclk_recalc_rate,
+ .round_rate = isp_xclk_round_rate,
+ .set_rate = isp_xclk_set_rate,
+};
+
+static const char *isp_xclk_parent_name = "cam_mclk";
+
+static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
+{
+ unsigned int idx = clkspec->args[0];
+ struct isp_device *isp = data;
+
+ if (idx >= ARRAY_SIZE(isp->xclks))
+ return ERR_PTR(-ENOENT);
+
+ return isp->xclks[idx].clk;
+}
+
+static int isp_xclk_init(struct isp_device *isp)
+{
+ struct device_node *np = isp->dev->of_node;
+ struct clk_init_data init = {};
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
+ isp->xclks[i].clk = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
+ struct isp_xclk *xclk = &isp->xclks[i];
+
+ xclk->isp = isp;
+ xclk->id = i == 0 ? ISP_XCLK_A : ISP_XCLK_B;
+ xclk->divider = 1;
+ spin_lock_init(&xclk->lock);
+
+ init.name = i == 0 ? "cam_xclka" : "cam_xclkb";
+ init.ops = &isp_xclk_ops;
+ init.parent_names = &isp_xclk_parent_name;
+ init.num_parents = 1;
+
+ xclk->hw.init = &init;
+ /*
+ * The first argument is NULL in order to avoid circular
+ * reference, as this driver takes reference on the
+ * sensor subdevice modules and the sensors would take
+ * reference on this module through clk_get().
+ */
+ xclk->clk = clk_register(NULL, &xclk->hw);
+ if (IS_ERR(xclk->clk))
+ return PTR_ERR(xclk->clk);
+ }
+
+ if (np)
+ of_clk_add_provider(np, isp_xclk_src_get, isp);
+
+ return 0;
+}
+
+static void isp_xclk_cleanup(struct isp_device *isp)
+{
+ struct device_node *np = isp->dev->of_node;
+ unsigned int i;
+
+ if (np)
+ of_clk_del_provider(np);
+
+ for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
+ struct isp_xclk *xclk = &isp->xclks[i];
+
+ if (!IS_ERR(xclk->clk))
+ clk_unregister(xclk->clk);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupts
+ */
+
+/*
+ * isp_enable_interrupts - Enable ISP interrupts.
+ * @isp: OMAP3 ISP device
+ */
+static void isp_enable_interrupts(struct isp_device *isp)
+{
+ static const u32 irq = IRQ0ENABLE_CSIA_IRQ
+ | IRQ0ENABLE_CSIB_IRQ
+ | IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ
+ | IRQ0ENABLE_CCDC_LSC_DONE_IRQ
+ | IRQ0ENABLE_CCDC_VD0_IRQ
+ | IRQ0ENABLE_CCDC_VD1_IRQ
+ | IRQ0ENABLE_HS_VS_IRQ
+ | IRQ0ENABLE_HIST_DONE_IRQ
+ | IRQ0ENABLE_H3A_AWB_DONE_IRQ
+ | IRQ0ENABLE_H3A_AF_DONE_IRQ
+ | IRQ0ENABLE_PRV_DONE_IRQ
+ | IRQ0ENABLE_RSZ_DONE_IRQ;
+
+ isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+ isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
+}
+
+/*
+ * isp_disable_interrupts - Disable ISP interrupts.
+ * @isp: OMAP3 ISP device
+ */
+static void isp_disable_interrupts(struct isp_device *isp)
+{
+ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
+}
+
+/*
+ * isp_core_init - ISP core settings
+ * @isp: OMAP3 ISP device
+ * @idle: Consider idle state.
+ *
+ * Set the power settings for the ISP and SBL bus and configure the HS/VS
+ * interrupt source.
+ *
+ * We need to configure the HS/VS interrupt source before interrupts get
+ * enabled, as the sensor might be free-running and the ISP default setting
+ * (HS edge) would put an unnecessary burden on the CPU.
+ */
+static void isp_core_init(struct isp_device *isp, int idle)
+{
+ isp_reg_writel(isp,
+ ((idle ? ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY :
+ ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY) <<
+ ISP_SYSCONFIG_MIDLEMODE_SHIFT) |
+ ((isp->revision == ISP_REVISION_15_0) ?
+ ISP_SYSCONFIG_AUTOIDLE : 0),
+ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
+
+ isp_reg_writel(isp,
+ (isp->autoidle ? ISPCTRL_SBL_AUTOIDLE : 0) |
+ ISPCTRL_SYNC_DETECT_VSRISE,
+ OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
+}
+
+/*
+ * Configure the bridge and lane shifter. Valid inputs are
+ *
+ * CCDC_INPUT_PARALLEL: Parallel interface
+ * CCDC_INPUT_CSI2A: CSI2a receiver
+ * CCDC_INPUT_CCP2B: CCP2b receiver
+ * CCDC_INPUT_CSI2C: CSI2c receiver
+ *
+ * The bridge and lane shifter are configured according to the selected input
+ * and the ISP platform data.
+ */
+void omap3isp_configure_bridge(struct isp_device *isp,
+ enum ccdc_input_entity input,
+ const struct isp_parallel_cfg *parcfg,
+ unsigned int shift, unsigned int bridge)
+{
+ u32 ispctrl_val;
+
+ ispctrl_val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
+ ispctrl_val &= ~ISPCTRL_SHIFT_MASK;
+ ispctrl_val &= ~ISPCTRL_PAR_CLK_POL_INV;
+ ispctrl_val &= ~ISPCTRL_PAR_SER_CLK_SEL_MASK;
+ ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_MASK;
+ ispctrl_val |= bridge;
+
+ switch (input) {
+ case CCDC_INPUT_PARALLEL:
+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
+ ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
+ shift += parcfg->data_lane_shift;
+ break;
+
+ case CCDC_INPUT_CSI2A:
+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIA;
+ break;
+
+ case CCDC_INPUT_CCP2B:
+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIB;
+ break;
+
+ case CCDC_INPUT_CSI2C:
+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIC;
+ break;
+
+ default:
+ return;
+ }
+
+ ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK;
+
+ isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
+}
+
+void omap3isp_hist_dma_done(struct isp_device *isp)
+{
+ if (omap3isp_ccdc_busy(&isp->isp_ccdc) ||
+ omap3isp_stat_pcr_busy(&isp->isp_hist)) {
+ /* Histogram cannot be enabled in this frame anymore */
+ atomic_set(&isp->isp_hist.buf_err, 1);
+ dev_dbg(isp->dev,
+ "hist: Out of synchronization with CCDC. Ignoring next buffer.\n");
+ }
+}
+
+static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
+{
+ static const char *name[] = {
+ "CSIA_IRQ",
+ "res1",
+ "res2",
+ "CSIB_LCM_IRQ",
+ "CSIB_IRQ",
+ "res5",
+ "res6",
+ "res7",
+ "CCDC_VD0_IRQ",
+ "CCDC_VD1_IRQ",
+ "CCDC_VD2_IRQ",
+ "CCDC_ERR_IRQ",
+ "H3A_AF_DONE_IRQ",
+ "H3A_AWB_DONE_IRQ",
+ "res14",
+ "res15",
+ "HIST_DONE_IRQ",
+ "CCDC_LSC_DONE",
+ "CCDC_LSC_PREFETCH_COMPLETED",
+ "CCDC_LSC_PREFETCH_ERROR",
+ "PRV_DONE_IRQ",
+ "CBUFF_IRQ",
+ "res22",
+ "res23",
+ "RSZ_DONE_IRQ",
+ "OVF_IRQ",
+ "res26",
+ "res27",
+ "MMU_ERR_IRQ",
+ "OCP_ERR_IRQ",
+ "SEC_ERR_IRQ",
+ "HS_VS_IRQ",
+ };
+ int i;
+
+ dev_dbg(isp->dev, "ISP IRQ: ");
+
+ for (i = 0; i < ARRAY_SIZE(name); i++) {
+ if ((1 << i) & irqstatus)
+ printk(KERN_CONT "%s ", name[i]);
+ }
+ printk(KERN_CONT "\n");
+}
+
+static void isp_isr_sbl(struct isp_device *isp)
+{
+ struct device *dev = isp->dev;
+ struct isp_pipeline *pipe;
+ u32 sbl_pcr;
+
+ /*
+ * Handle shared buffer logic overflows for video buffers.
+ * ISPSBL_PCR_CCDCPRV_2_RSZ_OVF can be safely ignored.
+ */
+ sbl_pcr = isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
+ isp_reg_writel(isp, sbl_pcr, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
+ sbl_pcr &= ~ISPSBL_PCR_CCDCPRV_2_RSZ_OVF;
+
+ if (sbl_pcr)
+ dev_dbg(dev, "SBL overflow (PCR = 0x%08x)\n", sbl_pcr);
+
+ if (sbl_pcr & ISPSBL_PCR_CSIB_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_ccp2.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_CSIA_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_csi2a.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_CCDC_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_ccdc.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_PRV_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_prev.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF
+ | ISPSBL_PCR_RSZ2_WBL_OVF
+ | ISPSBL_PCR_RSZ3_WBL_OVF
+ | ISPSBL_PCR_RSZ4_WBL_OVF)) {
+ pipe = to_isp_pipeline(&isp->isp_res.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF)
+ omap3isp_stat_sbl_overflow(&isp->isp_af);
+
+ if (sbl_pcr & ISPSBL_PCR_H3A_AEAWB_WBL_OVF)
+ omap3isp_stat_sbl_overflow(&isp->isp_aewb);
+}
+
+/*
+ * isp_isr - Interrupt Service Routine for Camera ISP module.
+ * @irq: Not used currently.
+ * @_isp: Pointer to the OMAP3 ISP device
+ *
+ * Handles the corresponding callback if plugged in.
+ */
+static irqreturn_t isp_isr(int irq, void *_isp)
+{
+ static const u32 ccdc_events = IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ |
+ IRQ0STATUS_CCDC_LSC_DONE_IRQ |
+ IRQ0STATUS_CCDC_VD0_IRQ |
+ IRQ0STATUS_CCDC_VD1_IRQ |
+ IRQ0STATUS_HS_VS_IRQ;
+ struct isp_device *isp = _isp;
+ u32 irqstatus;
+
+ irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+ isp_reg_writel(isp, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+
+ isp_isr_sbl(isp);
+
+ if (irqstatus & IRQ0STATUS_CSIA_IRQ)
+ omap3isp_csi2_isr(&isp->isp_csi2a);
+
+ if (irqstatus & IRQ0STATUS_CSIB_IRQ)
+ omap3isp_ccp2_isr(&isp->isp_ccp2);
+
+ if (irqstatus & IRQ0STATUS_CCDC_VD0_IRQ) {
+ if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
+ omap3isp_preview_isr_frame_sync(&isp->isp_prev);
+ if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER)
+ omap3isp_resizer_isr_frame_sync(&isp->isp_res);
+ omap3isp_stat_isr_frame_sync(&isp->isp_aewb);
+ omap3isp_stat_isr_frame_sync(&isp->isp_af);
+ omap3isp_stat_isr_frame_sync(&isp->isp_hist);
+ }
+
+ if (irqstatus & ccdc_events)
+ omap3isp_ccdc_isr(&isp->isp_ccdc, irqstatus & ccdc_events);
+
+ if (irqstatus & IRQ0STATUS_PRV_DONE_IRQ) {
+ if (isp->isp_prev.output & PREVIEW_OUTPUT_RESIZER)
+ omap3isp_resizer_isr_frame_sync(&isp->isp_res);
+ omap3isp_preview_isr(&isp->isp_prev);
+ }
+
+ if (irqstatus & IRQ0STATUS_RSZ_DONE_IRQ)
+ omap3isp_resizer_isr(&isp->isp_res);
+
+ if (irqstatus & IRQ0STATUS_H3A_AWB_DONE_IRQ)
+ omap3isp_stat_isr(&isp->isp_aewb);
+
+ if (irqstatus & IRQ0STATUS_H3A_AF_DONE_IRQ)
+ omap3isp_stat_isr(&isp->isp_af);
+
+ if (irqstatus & IRQ0STATUS_HIST_DONE_IRQ)
+ omap3isp_stat_isr(&isp->isp_hist);
+
+ omap3isp_flush(isp);
+
+#if defined(DEBUG) && defined(ISP_ISR_DEBUG)
+ isp_isr_dbg(isp, irqstatus);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+static const struct media_device_ops isp_media_ops = {
+ .link_notify = v4l2_pipeline_link_notify,
+};
+
+/* -----------------------------------------------------------------------------
+ * Pipeline stream management
+ */
+
+/*
+ * isp_pipeline_enable - Enable streaming on a pipeline
+ * @pipe: ISP pipeline
+ * @mode: Stream mode (single shot or continuous)
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * all modules in the chain in the given mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int isp_pipeline_enable(struct isp_pipeline *pipe,
+ enum isp_pipeline_stream_state mode)
+{
+ struct isp_device *isp = pipe->output->isp;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ unsigned long flags;
+ int ret;
+
+ /* Refuse to start streaming if an entity included in the pipeline has
+ * crashed. This check must be performed before the loop below to avoid
+ * starting entities if the pipeline won't start anyway (those entities
+ * would then likely fail to stop, making the problem worse).
+ */
+ if (media_entity_enum_intersects(&pipe->ent_enum, &isp->crashed))
+ return -EIO;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ pipe->do_propagation = false;
+
+ entity = &pipe->output->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, mode);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (subdev == &isp->isp_ccdc.subdev) {
+ v4l2_subdev_call(&isp->isp_aewb.subdev, video,
+ s_stream, mode);
+ v4l2_subdev_call(&isp->isp_af.subdev, video,
+ s_stream, mode);
+ v4l2_subdev_call(&isp->isp_hist.subdev, video,
+ s_stream, mode);
+ pipe->do_propagation = true;
+ }
+
+ /* Stop at the first external sub-device. */
+ if (subdev->dev != isp->dev)
+ break;
+ }
+
+ return 0;
+}
+
+static int isp_pipeline_wait_resizer(struct isp_device *isp)
+{
+ return omap3isp_resizer_busy(&isp->isp_res);
+}
+
+static int isp_pipeline_wait_preview(struct isp_device *isp)
+{
+ return omap3isp_preview_busy(&isp->isp_prev);
+}
+
+static int isp_pipeline_wait_ccdc(struct isp_device *isp)
+{
+ return omap3isp_stat_busy(&isp->isp_af)
+ || omap3isp_stat_busy(&isp->isp_aewb)
+ || omap3isp_stat_busy(&isp->isp_hist)
+ || omap3isp_ccdc_busy(&isp->isp_ccdc);
+}
+
+#define ISP_STOP_TIMEOUT msecs_to_jiffies(1000)
+
+static int isp_pipeline_wait(struct isp_device *isp,
+ int(*busy)(struct isp_device *isp))
+{
+ unsigned long timeout = jiffies + ISP_STOP_TIMEOUT;
+
+ while (!time_after(jiffies, timeout)) {
+ if (!busy(isp))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * isp_pipeline_disable - Disable streaming on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and stop
+ * all modules in the chain. Wait synchronously for the modules to be stopped if
+ * necessary.
+ *
+ * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
+ * can't be stopped (in which case a software reset of the ISP is probably
+ * necessary).
+ */
+static int isp_pipeline_disable(struct isp_pipeline *pipe)
+{
+ struct isp_device *isp = pipe->output->isp;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ int failure = 0;
+ int ret;
+
+ /*
+ * We need to stop all the modules after CCDC first or they'll
+ * never stop since they may not get a full frame from CCDC.
+ */
+ entity = &pipe->output->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ if (subdev == &isp->isp_ccdc.subdev) {
+ v4l2_subdev_call(&isp->isp_aewb.subdev,
+ video, s_stream, 0);
+ v4l2_subdev_call(&isp->isp_af.subdev,
+ video, s_stream, 0);
+ v4l2_subdev_call(&isp->isp_hist.subdev,
+ video, s_stream, 0);
+ }
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+
+ if (subdev == &isp->isp_res.subdev)
+ ret |= isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
+ else if (subdev == &isp->isp_prev.subdev)
+ ret |= isp_pipeline_wait(isp, isp_pipeline_wait_preview);
+ else if (subdev == &isp->isp_ccdc.subdev)
+ ret |= isp_pipeline_wait(isp, isp_pipeline_wait_ccdc);
+
+ /* Handle stop failures. An entity that fails to stop can
+ * usually just be restarted. Flag the stop failure nonetheless
+ * to trigger an ISP reset the next time the device is released,
+ * just in case.
+ *
+ * The preview engine is a special case. A failure to stop can
+ * mean a hardware crash. When that happens the preview engine
+ * won't respond to read/write operations on the L4 bus anymore,
+ * resulting in a bus fault and a kernel oops next time it gets
+ * accessed. Mark it as crashed to prevent pipelines including
+ * it from being started.
+ */
+ if (ret) {
+ dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
+ isp->stop_failure = true;
+ if (subdev == &isp->isp_prev.subdev)
+ media_entity_enum_set(&isp->crashed,
+ &subdev->entity);
+ failure = -ETIMEDOUT;
+ }
+
+ /* Stop at the first external sub-device. */
+ if (subdev->dev != isp->dev)
+ break;
+ }
+
+ return failure;
+}
+
+/*
+ * omap3isp_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: ISP pipeline
+ * @state: Stream state (stopped, single shot or continuous)
+ *
+ * Set the pipeline to the given stream state. Pipelines can be started in
+ * single-shot or continuous mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. The pipeline state is not updated when the operation
+ * fails, except when stopping the pipeline.
+ */
+int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
+ enum isp_pipeline_stream_state state)
+{
+ int ret;
+
+ if (state == ISP_PIPELINE_STREAM_STOPPED)
+ ret = isp_pipeline_disable(pipe);
+ else
+ ret = isp_pipeline_enable(pipe, state);
+
+ if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED)
+ pipe->stream_state = state;
+
+ return ret;
+}
+
+/*
+ * omap3isp_pipeline_cancel_stream - Cancel stream on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Cancelling a stream mark all buffers on all video nodes in the pipeline as
+ * erroneous and makes sure no new buffer can be queued. This function is called
+ * when a fatal error that prevents any further operation on the pipeline
+ * occurs.
+ */
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe)
+{
+ if (pipe->input)
+ omap3isp_video_cancel_stream(pipe->input);
+ if (pipe->output)
+ omap3isp_video_cancel_stream(pipe->output);
+}
+
+/*
+ * isp_pipeline_resume - Resume streaming on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Resume video output and input and re-enable pipeline.
+ */
+static void isp_pipeline_resume(struct isp_pipeline *pipe)
+{
+ int singleshot = pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT;
+
+ omap3isp_video_resume(pipe->output, !singleshot);
+ if (singleshot)
+ omap3isp_video_resume(pipe->input, 0);
+ isp_pipeline_enable(pipe, pipe->stream_state);
+}
+
+/*
+ * isp_pipeline_suspend - Suspend streaming on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Suspend pipeline.
+ */
+static void isp_pipeline_suspend(struct isp_pipeline *pipe)
+{
+ isp_pipeline_disable(pipe);
+}
+
+/*
+ * isp_pipeline_is_last - Verify if entity has an enabled link to the output
+ * video node
+ * @me: ISP module's media entity
+ *
+ * Returns 1 if the entity has an enabled link to the output video node or 0
+ * otherwise. It's true only while pipeline can have no more than one output
+ * node.
+ */
+static int isp_pipeline_is_last(struct media_entity *me)
+{
+ struct isp_pipeline *pipe;
+ struct media_pad *pad;
+
+ if (!me->pipe)
+ return 0;
+ pipe = to_isp_pipeline(me);
+ if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+ pad = media_entity_remote_pad(&pipe->output->pad);
+ return pad->entity == me;
+}
+
+/*
+ * isp_suspend_module_pipeline - Suspend pipeline to which belongs the module
+ * @me: ISP module's media entity
+ *
+ * Suspend the whole pipeline if module's entity has an enabled link to the
+ * output video node. It works only while pipeline can have no more than one
+ * output node.
+ */
+static void isp_suspend_module_pipeline(struct media_entity *me)
+{
+ if (isp_pipeline_is_last(me))
+ isp_pipeline_suspend(to_isp_pipeline(me));
+}
+
+/*
+ * isp_resume_module_pipeline - Resume pipeline to which belongs the module
+ * @me: ISP module's media entity
+ *
+ * Resume the whole pipeline if module's entity has an enabled link to the
+ * output video node. It works only while pipeline can have no more than one
+ * output node.
+ */
+static void isp_resume_module_pipeline(struct media_entity *me)
+{
+ if (isp_pipeline_is_last(me))
+ isp_pipeline_resume(to_isp_pipeline(me));
+}
+
+/*
+ * isp_suspend_modules - Suspend ISP submodules.
+ * @isp: OMAP3 ISP device
+ *
+ * Returns 0 if suspend left in idle state all the submodules properly,
+ * or returns 1 if a general Reset is required to suspend the submodules.
+ */
+static int __maybe_unused isp_suspend_modules(struct isp_device *isp)
+{
+ unsigned long timeout;
+
+ omap3isp_stat_suspend(&isp->isp_aewb);
+ omap3isp_stat_suspend(&isp->isp_af);
+ omap3isp_stat_suspend(&isp->isp_hist);
+ isp_suspend_module_pipeline(&isp->isp_res.subdev.entity);
+ isp_suspend_module_pipeline(&isp->isp_prev.subdev.entity);
+ isp_suspend_module_pipeline(&isp->isp_ccdc.subdev.entity);
+ isp_suspend_module_pipeline(&isp->isp_csi2a.subdev.entity);
+ isp_suspend_module_pipeline(&isp->isp_ccp2.subdev.entity);
+
+ timeout = jiffies + ISP_STOP_TIMEOUT;
+ while (omap3isp_stat_busy(&isp->isp_af)
+ || omap3isp_stat_busy(&isp->isp_aewb)
+ || omap3isp_stat_busy(&isp->isp_hist)
+ || omap3isp_preview_busy(&isp->isp_prev)
+ || omap3isp_resizer_busy(&isp->isp_res)
+ || omap3isp_ccdc_busy(&isp->isp_ccdc)) {
+ if (time_after(jiffies, timeout)) {
+ dev_info(isp->dev, "can't stop modules.\n");
+ return 1;
+ }
+ msleep(1);
+ }
+
+ return 0;
+}
+
+/*
+ * isp_resume_modules - Resume ISP submodules.
+ * @isp: OMAP3 ISP device
+ */
+static void __maybe_unused isp_resume_modules(struct isp_device *isp)
+{
+ omap3isp_stat_resume(&isp->isp_aewb);
+ omap3isp_stat_resume(&isp->isp_af);
+ omap3isp_stat_resume(&isp->isp_hist);
+ isp_resume_module_pipeline(&isp->isp_res.subdev.entity);
+ isp_resume_module_pipeline(&isp->isp_prev.subdev.entity);
+ isp_resume_module_pipeline(&isp->isp_ccdc.subdev.entity);
+ isp_resume_module_pipeline(&isp->isp_csi2a.subdev.entity);
+ isp_resume_module_pipeline(&isp->isp_ccp2.subdev.entity);
+}
+
+/*
+ * isp_reset - Reset ISP with a timeout wait for idle.
+ * @isp: OMAP3 ISP device
+ */
+static int isp_reset(struct isp_device *isp)
+{
+ unsigned long timeout = 0;
+
+ isp_reg_writel(isp,
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG)
+ | ISP_SYSCONFIG_SOFTRESET,
+ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
+ while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN,
+ ISP_SYSSTATUS) & 0x1)) {
+ if (timeout++ > 10000) {
+ dev_alert(isp->dev, "cannot reset ISP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+
+ isp->stop_failure = false;
+ media_entity_enum_zero(&isp->crashed);
+ return 0;
+}
+
+/*
+ * isp_save_context - Saves the values of the ISP module registers.
+ * @isp: OMAP3 ISP device
+ * @reg_list: Structure containing pairs of register address and value to
+ * modify on OMAP.
+ */
+static void
+isp_save_context(struct isp_device *isp, struct isp_reg *reg_list)
+{
+ struct isp_reg *next = reg_list;
+
+ for (; next->reg != ISP_TOK_TERM; next++)
+ next->val = isp_reg_readl(isp, next->mmio_range, next->reg);
+}
+
+/*
+ * isp_restore_context - Restores the values of the ISP module registers.
+ * @isp: OMAP3 ISP device
+ * @reg_list: Structure containing pairs of register address and value to
+ * modify on OMAP.
+ */
+static void
+isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
+{
+ struct isp_reg *next = reg_list;
+
+ for (; next->reg != ISP_TOK_TERM; next++)
+ isp_reg_writel(isp, next->val, next->mmio_range, next->reg);
+}
+
+/*
+ * isp_save_ctx - Saves ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
+ * @isp: OMAP3 ISP device
+ *
+ * Routine for saving the context of each module in the ISP.
+ * CCDC, HIST, H3A, PREV, RESZ and MMU.
+ */
+static void isp_save_ctx(struct isp_device *isp)
+{
+ isp_save_context(isp, isp_reg_list);
+ omap_iommu_save_ctx(isp->dev);
+}
+
+/*
+ * isp_restore_ctx - Restores ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
+ * @isp: OMAP3 ISP device
+ *
+ * Routine for restoring the context of each module in the ISP.
+ * CCDC, HIST, H3A, PREV, RESZ and MMU.
+ */
+static void isp_restore_ctx(struct isp_device *isp)
+{
+ isp_restore_context(isp, isp_reg_list);
+ omap_iommu_restore_ctx(isp->dev);
+ omap3isp_ccdc_restore_context(isp);
+ omap3isp_preview_restore_context(isp);
+}
+
+/* -----------------------------------------------------------------------------
+ * SBL resources management
+ */
+#define OMAP3_ISP_SBL_READ (OMAP3_ISP_SBL_CSI1_READ | \
+ OMAP3_ISP_SBL_CCDC_LSC_READ | \
+ OMAP3_ISP_SBL_PREVIEW_READ | \
+ OMAP3_ISP_SBL_RESIZER_READ)
+#define OMAP3_ISP_SBL_WRITE (OMAP3_ISP_SBL_CSI1_WRITE | \
+ OMAP3_ISP_SBL_CSI2A_WRITE | \
+ OMAP3_ISP_SBL_CSI2C_WRITE | \
+ OMAP3_ISP_SBL_CCDC_WRITE | \
+ OMAP3_ISP_SBL_PREVIEW_WRITE)
+
+void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res)
+{
+ u32 sbl = 0;
+
+ isp->sbl_resources |= res;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ)
+ sbl |= ISPCTRL_SBL_SHARED_RPORTA;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ)
+ sbl |= ISPCTRL_SBL_SHARED_RPORTB;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE)
+ sbl |= ISPCTRL_SBL_SHARED_WPORTC;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE)
+ sbl |= ISPCTRL_SBL_WR0_RAM_EN;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_WRITE)
+ sbl |= ISPCTRL_SBL_WR1_RAM_EN;
+
+ if (isp->sbl_resources & OMAP3_ISP_SBL_READ)
+ sbl |= ISPCTRL_SBL_RD_RAM_EN;
+
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
+}
+
+void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res)
+{
+ u32 sbl = 0;
+
+ isp->sbl_resources &= ~res;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ))
+ sbl |= ISPCTRL_SBL_SHARED_RPORTA;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ))
+ sbl |= ISPCTRL_SBL_SHARED_RPORTB;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE))
+ sbl |= ISPCTRL_SBL_SHARED_WPORTC;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE))
+ sbl |= ISPCTRL_SBL_WR0_RAM_EN;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_WRITE))
+ sbl |= ISPCTRL_SBL_WR1_RAM_EN;
+
+ if (!(isp->sbl_resources & OMAP3_ISP_SBL_READ))
+ sbl |= ISPCTRL_SBL_RD_RAM_EN;
+
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
+}
+
+/*
+ * isp_module_sync_idle - Helper to sync module with its idle state
+ * @me: ISP submodule's media entity
+ * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISP submodule needs to wait for next interrupt. If
+ * yes, makes the caller to sleep while waiting for such event.
+ */
+int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+ atomic_t *stopping)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(me);
+
+ if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED ||
+ (pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT &&
+ !isp_pipeline_ready(pipe)))
+ return 0;
+
+ /*
+ * atomic_set() doesn't include memory barrier on ARM platform for SMP
+ * scenario. We'll call it here to avoid race conditions.
+ */
+ atomic_set(stopping, 1);
+ smp_mb();
+
+ /*
+ * If module is the last one, it's writing to memory. In this case,
+ * it's necessary to check if the module is already paused due to
+ * DMA queue underrun or if it has to wait for next interrupt to be
+ * idle.
+ * If it isn't the last one, the function won't sleep but *stopping
+ * will still be set to warn next submodule caller's interrupt the
+ * module wants to be idle.
+ */
+ if (isp_pipeline_is_last(me)) {
+ struct isp_video *video = pipe->output;
+ unsigned long flags;
+ spin_lock_irqsave(&video->irqlock, flags);
+ if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ atomic_set(stopping, 0);
+ smp_mb();
+ return 0;
+ }
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ if (!wait_event_timeout(*wait, !atomic_read(stopping),
+ msecs_to_jiffies(1000))) {
+ atomic_set(stopping, 0);
+ smp_mb();
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * omap3isp_module_sync_is_stopping - Helper to verify if module was stopping
+ * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISP submodule was stopping. In case of yes, it
+ * notices the caller by setting stopping to 0 and waking up the wait queue.
+ * Returns 1 if it was stopping or 0 otherwise.
+ */
+int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
+ atomic_t *stopping)
+{
+ if (atomic_cmpxchg(stopping, 1, 0)) {
+ wake_up(wait);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ * Clock management
+ */
+
+#define ISPCTRL_CLKS_MASK (ISPCTRL_H3A_CLK_EN | \
+ ISPCTRL_HIST_CLK_EN | \
+ ISPCTRL_RSZ_CLK_EN | \
+ (ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN) | \
+ (ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN))
+
+static void __isp_subclk_update(struct isp_device *isp)
+{
+ u32 clk = 0;
+
+ /* AEWB and AF share the same clock. */
+ if (isp->subclk_resources &
+ (OMAP3_ISP_SUBCLK_AEWB | OMAP3_ISP_SUBCLK_AF))
+ clk |= ISPCTRL_H3A_CLK_EN;
+
+ if (isp->subclk_resources & OMAP3_ISP_SUBCLK_HIST)
+ clk |= ISPCTRL_HIST_CLK_EN;
+
+ if (isp->subclk_resources & OMAP3_ISP_SUBCLK_RESIZER)
+ clk |= ISPCTRL_RSZ_CLK_EN;
+
+ /* NOTE: For CCDC & Preview submodules, we need to affect internal
+ * RAM as well.
+ */
+ if (isp->subclk_resources & OMAP3_ISP_SUBCLK_CCDC)
+ clk |= ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN;
+
+ if (isp->subclk_resources & OMAP3_ISP_SUBCLK_PREVIEW)
+ clk |= ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN;
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
+ ISPCTRL_CLKS_MASK, clk);
+}
+
+void omap3isp_subclk_enable(struct isp_device *isp,
+ enum isp_subclk_resource res)
+{
+ isp->subclk_resources |= res;
+
+ __isp_subclk_update(isp);
+}
+
+void omap3isp_subclk_disable(struct isp_device *isp,
+ enum isp_subclk_resource res)
+{
+ isp->subclk_resources &= ~res;
+
+ __isp_subclk_update(isp);
+}
+
+/*
+ * isp_enable_clocks - Enable ISP clocks
+ * @isp: OMAP3 ISP device
+ *
+ * Return 0 if successful, or clk_prepare_enable return value if any of them
+ * fails.
+ */
+static int isp_enable_clocks(struct isp_device *isp)
+{
+ int r;
+ unsigned long rate;
+
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ if (r) {
+ dev_err(isp->dev, "failed to enable cam_ick clock\n");
+ goto out_clk_enable_ick;
+ }
+ r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ);
+ if (r) {
+ dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n");
+ goto out_clk_enable_mclk;
+ }
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
+ if (r) {
+ dev_err(isp->dev, "failed to enable cam_mclk clock\n");
+ goto out_clk_enable_mclk;
+ }
+ rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
+ if (rate != CM_CAM_MCLK_HZ)
+ dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
+ " expected : %d\n"
+ " actual : %ld\n", CM_CAM_MCLK_HZ, rate);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CSI2_FCK]);
+ if (r) {
+ dev_err(isp->dev, "failed to enable csi2_fck clock\n");
+ goto out_clk_enable_csi2_fclk;
+ }
+ return 0;
+
+out_clk_enable_csi2_fclk:
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
+out_clk_enable_mclk:
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
+out_clk_enable_ick:
+ return r;
+}
+
+/*
+ * isp_disable_clocks - Disable ISP clocks
+ * @isp: OMAP3 ISP device
+ */
+static void isp_disable_clocks(struct isp_device *isp)
+{
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CSI2_FCK]);
+}
+
+static const char *isp_clocks[] = {
+ "cam_ick",
+ "cam_mclk",
+ "csi2_96m_fck",
+ "l3_ick",
+};
+
+static int isp_get_clocks(struct isp_device *isp)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
+ clk = devm_clk_get(isp->dev, isp_clocks[i]);
+ if (IS_ERR(clk)) {
+ dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]);
+ return PTR_ERR(clk);
+ }
+
+ isp->clock[i] = clk;
+ }
+
+ return 0;
+}
+
+/*
+ * omap3isp_get - Acquire the ISP resource.
+ *
+ * Initializes the clocks for the first acquire.
+ *
+ * Increment the reference count on the ISP. If the first reference is taken,
+ * enable clocks and power-up all submodules.
+ *
+ * Return a pointer to the ISP device structure, or NULL if an error occurred.
+ */
+static struct isp_device *__omap3isp_get(struct isp_device *isp, bool irq)
+{
+ struct isp_device *__isp = isp;
+
+ if (isp == NULL)
+ return NULL;
+
+ mutex_lock(&isp->isp_mutex);
+ if (isp->ref_count > 0)
+ goto out;
+
+ if (isp_enable_clocks(isp) < 0) {
+ __isp = NULL;
+ goto out;
+ }
+
+ /* We don't want to restore context before saving it! */
+ if (isp->has_context)
+ isp_restore_ctx(isp);
+
+ if (irq)
+ isp_enable_interrupts(isp);
+
+out:
+ if (__isp != NULL)
+ isp->ref_count++;
+ mutex_unlock(&isp->isp_mutex);
+
+ return __isp;
+}
+
+struct isp_device *omap3isp_get(struct isp_device *isp)
+{
+ return __omap3isp_get(isp, true);
+}
+
+/*
+ * omap3isp_put - Release the ISP
+ *
+ * Decrement the reference count on the ISP. If the last reference is released,
+ * power-down all submodules, disable clocks and free temporary buffers.
+ */
+static void __omap3isp_put(struct isp_device *isp, bool save_ctx)
+{
+ if (isp == NULL)
+ return;
+
+ mutex_lock(&isp->isp_mutex);
+ BUG_ON(isp->ref_count == 0);
+ if (--isp->ref_count == 0) {
+ isp_disable_interrupts(isp);
+ if (save_ctx) {
+ isp_save_ctx(isp);
+ isp->has_context = 1;
+ }
+ /* Reset the ISP if an entity has failed to stop. This is the
+ * only way to recover from such conditions.
+ */
+ if (!media_entity_enum_empty(&isp->crashed) ||
+ isp->stop_failure)
+ isp_reset(isp);
+ isp_disable_clocks(isp);
+ }
+ mutex_unlock(&isp->isp_mutex);
+}
+
+void omap3isp_put(struct isp_device *isp)
+{
+ __omap3isp_put(isp, true);
+}
+
+/* --------------------------------------------------------------------------
+ * Platform device driver
+ */
+
+/*
+ * omap3isp_print_status - Prints the values of the ISP Control Module registers
+ * @isp: OMAP3 ISP device
+ */
+#define ISP_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###ISP " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_##name))
+#define SBL_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###SBL " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_##name))
+
+void omap3isp_print_status(struct isp_device *isp)
+{
+ dev_dbg(isp->dev, "-------------ISP Register dump--------------\n");
+
+ ISP_PRINT_REGISTER(isp, SYSCONFIG);
+ ISP_PRINT_REGISTER(isp, SYSSTATUS);
+ ISP_PRINT_REGISTER(isp, IRQ0ENABLE);
+ ISP_PRINT_REGISTER(isp, IRQ0STATUS);
+ ISP_PRINT_REGISTER(isp, TCTRL_GRESET_LENGTH);
+ ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_REPLAY);
+ ISP_PRINT_REGISTER(isp, CTRL);
+ ISP_PRINT_REGISTER(isp, TCTRL_CTRL);
+ ISP_PRINT_REGISTER(isp, TCTRL_FRAME);
+ ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_DELAY);
+ ISP_PRINT_REGISTER(isp, TCTRL_STRB_DELAY);
+ ISP_PRINT_REGISTER(isp, TCTRL_SHUT_DELAY);
+ ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_LENGTH);
+ ISP_PRINT_REGISTER(isp, TCTRL_STRB_LENGTH);
+ ISP_PRINT_REGISTER(isp, TCTRL_SHUT_LENGTH);
+
+ SBL_PRINT_REGISTER(isp, PCR);
+ SBL_PRINT_REGISTER(isp, SDR_REQ_EXP);
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * Power management support.
+ *
+ * As the ISP can't properly handle an input video stream interruption on a non
+ * frame boundary, the ISP pipelines need to be stopped before sensors get
+ * suspended. However, as suspending the sensors can require a running clock,
+ * which can be provided by the ISP, the ISP can't be completely suspended
+ * before the sensor.
+ *
+ * To solve this problem power management support is split into prepare/complete
+ * and suspend/resume operations. The pipelines are stopped in prepare() and the
+ * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in
+ * resume(), and the the pipelines are restarted in complete().
+ *
+ * TODO: PM dependencies between the ISP and sensors are not modelled explicitly
+ * yet.
+ */
+static int isp_pm_prepare(struct device *dev)
+{
+ struct isp_device *isp = dev_get_drvdata(dev);
+ int reset;
+
+ WARN_ON(mutex_is_locked(&isp->isp_mutex));
+
+ if (isp->ref_count == 0)
+ return 0;
+
+ reset = isp_suspend_modules(isp);
+ isp_disable_interrupts(isp);
+ isp_save_ctx(isp);
+ if (reset)
+ isp_reset(isp);
+
+ return 0;
+}
+
+static int isp_pm_suspend(struct device *dev)
+{
+ struct isp_device *isp = dev_get_drvdata(dev);
+
+ WARN_ON(mutex_is_locked(&isp->isp_mutex));
+
+ if (isp->ref_count)
+ isp_disable_clocks(isp);
+
+ return 0;
+}
+
+static int isp_pm_resume(struct device *dev)
+{
+ struct isp_device *isp = dev_get_drvdata(dev);
+
+ if (isp->ref_count == 0)
+ return 0;
+
+ return isp_enable_clocks(isp);
+}
+
+static void isp_pm_complete(struct device *dev)
+{
+ struct isp_device *isp = dev_get_drvdata(dev);
+
+ if (isp->ref_count == 0)
+ return;
+
+ isp_restore_ctx(isp);
+ isp_enable_interrupts(isp);
+ isp_resume_modules(isp);
+}
+
+#else
+
+#define isp_pm_prepare NULL
+#define isp_pm_suspend NULL
+#define isp_pm_resume NULL
+#define isp_pm_complete NULL
+
+#endif /* CONFIG_PM */
+
+static void isp_unregister_entities(struct isp_device *isp)
+{
+ media_device_unregister(&isp->media_dev);
+
+ omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
+ omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
+ omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
+ omap3isp_preview_unregister_entities(&isp->isp_prev);
+ omap3isp_resizer_unregister_entities(&isp->isp_res);
+ omap3isp_stat_unregister_entities(&isp->isp_aewb);
+ omap3isp_stat_unregister_entities(&isp->isp_af);
+ omap3isp_stat_unregister_entities(&isp->isp_hist);
+
+ v4l2_device_unregister(&isp->v4l2_dev);
+ media_device_cleanup(&isp->media_dev);
+}
+
+static int isp_link_entity(
+ struct isp_device *isp, struct media_entity *entity,
+ enum isp_interface_type interface)
+{
+ struct media_entity *input;
+ unsigned int flags;
+ unsigned int pad;
+ unsigned int i;
+
+ /* Connect the sensor to the correct interface module.
+ * Parallel sensors are connected directly to the CCDC, while
+ * serial sensors are connected to the CSI2a, CCP2b or CSI2c
+ * receiver through CSIPHY1 or CSIPHY2.
+ */
+ switch (interface) {
+ case ISP_INTERFACE_PARALLEL:
+ input = &isp->isp_ccdc.subdev.entity;
+ pad = CCDC_PAD_SINK;
+ flags = 0;
+ break;
+
+ case ISP_INTERFACE_CSI2A_PHY2:
+ input = &isp->isp_csi2a.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
+ break;
+
+ case ISP_INTERFACE_CCP2B_PHY1:
+ case ISP_INTERFACE_CCP2B_PHY2:
+ input = &isp->isp_ccp2.subdev.entity;
+ pad = CCP2_PAD_SINK;
+ flags = 0;
+ break;
+
+ case ISP_INTERFACE_CSI2C_PHY1:
+ input = &isp->isp_csi2c.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
+ break;
+
+ default:
+ dev_err(isp->dev, "%s: invalid interface type %u\n", __func__,
+ interface);
+ return -EINVAL;
+ }
+
+ /*
+ * Not all interfaces are available on all revisions of the
+ * ISP. The sub-devices of those interfaces aren't initialised
+ * in such a case. Check this by ensuring the num_pads is
+ * non-zero.
+ */
+ if (!input->num_pads) {
+ dev_err(isp->dev, "%s: invalid input %u\n", entity->name,
+ interface);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < entity->num_pads; i++) {
+ if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+ if (i == entity->num_pads) {
+ dev_err(isp->dev, "%s: no source pad in external entity %s\n",
+ __func__, entity->name);
+ return -EINVAL;
+ }
+
+ return media_create_pad_link(entity, i, input, pad, flags);
+}
+
+static int isp_register_entities(struct isp_device *isp)
+{
+ int ret;
+
+ isp->media_dev.dev = isp->dev;
+ strlcpy(isp->media_dev.model, "TI OMAP3 ISP",
+ sizeof(isp->media_dev.model));
+ isp->media_dev.hw_revision = isp->revision;
+ isp->media_dev.ops = &isp_media_ops;
+ media_device_init(&isp->media_dev);
+
+ isp->v4l2_dev.mdev = &isp->media_dev;
+ ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
+ __func__, ret);
+ goto done;
+ }
+
+ /* Register internal entities */
+ ret = omap3isp_ccp2_register_entities(&isp->isp_ccp2, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_csi2_register_entities(&isp->isp_csi2a, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_ccdc_register_entities(&isp->isp_ccdc, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_preview_register_entities(&isp->isp_prev,
+ &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_resizer_register_entities(&isp->isp_res, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_stat_register_entities(&isp->isp_aewb, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_stat_register_entities(&isp->isp_af, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap3isp_stat_register_entities(&isp->isp_hist, &isp->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+done:
+ if (ret < 0)
+ isp_unregister_entities(isp);
+
+ return ret;
+}
+
+/*
+ * isp_create_links() - Create links for internal and external ISP entities
+ * @isp : Pointer to ISP device
+ *
+ * This function creates all links between ISP internal and external entities.
+ *
+ * Return: A negative error code on failure or zero on success. Possible error
+ * codes are those returned by media_create_pad_link().
+ */
+static int isp_create_links(struct isp_device *isp)
+{
+ int ret;
+
+ /* Create links between entities and video nodes. */
+ ret = media_create_pad_link(
+ &isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
+ &isp->isp_csi2a.video_out.video.entity, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_ccp2.video_in.video.entity, 0,
+ &isp->isp_ccp2.subdev.entity, CCP2_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
+ &isp->isp_ccdc.video_out.video.entity, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_prev.video_in.video.entity, 0,
+ &isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
+ &isp->isp_prev.video_out.video.entity, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_res.video_in.video.entity, 0,
+ &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_res.subdev.entity, RESZ_PAD_SOURCE,
+ &isp->isp_res.video_out.video.entity, 0, 0);
+
+ if (ret < 0)
+ return ret;
+
+ /* Create links between entities. */
+ ret = media_create_pad_link(
+ &isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_ccp2.subdev.entity, CCP2_PAD_SOURCE,
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+ &isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
+ &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
+ &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+ &isp->isp_aewb.subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+ &isp->isp_af.subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(
+ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+ &isp->isp_hist.subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void isp_cleanup_modules(struct isp_device *isp)
+{
+ omap3isp_h3a_aewb_cleanup(isp);
+ omap3isp_h3a_af_cleanup(isp);
+ omap3isp_hist_cleanup(isp);
+ omap3isp_resizer_cleanup(isp);
+ omap3isp_preview_cleanup(isp);
+ omap3isp_ccdc_cleanup(isp);
+ omap3isp_ccp2_cleanup(isp);
+ omap3isp_csi2_cleanup(isp);
+ omap3isp_csiphy_cleanup(isp);
+}
+
+static int isp_initialize_modules(struct isp_device *isp)
+{
+ int ret;
+
+ ret = omap3isp_csiphy_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "CSI PHY initialization failed\n");
+ return ret;
+ }
+
+ ret = omap3isp_csi2_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "CSI2 initialization failed\n");
+ goto error_csi2;
+ }
+
+ ret = omap3isp_ccp2_init(isp);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(isp->dev, "CCP2 initialization failed\n");
+ goto error_ccp2;
+ }
+
+ ret = omap3isp_ccdc_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "CCDC initialization failed\n");
+ goto error_ccdc;
+ }
+
+ ret = omap3isp_preview_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "Preview initialization failed\n");
+ goto error_preview;
+ }
+
+ ret = omap3isp_resizer_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "Resizer initialization failed\n");
+ goto error_resizer;
+ }
+
+ ret = omap3isp_hist_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "Histogram initialization failed\n");
+ goto error_hist;
+ }
+
+ ret = omap3isp_h3a_aewb_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "H3A AEWB initialization failed\n");
+ goto error_h3a_aewb;
+ }
+
+ ret = omap3isp_h3a_af_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "H3A AF initialization failed\n");
+ goto error_h3a_af;
+ }
+
+ return 0;
+
+error_h3a_af:
+ omap3isp_h3a_aewb_cleanup(isp);
+error_h3a_aewb:
+ omap3isp_hist_cleanup(isp);
+error_hist:
+ omap3isp_resizer_cleanup(isp);
+error_resizer:
+ omap3isp_preview_cleanup(isp);
+error_preview:
+ omap3isp_ccdc_cleanup(isp);
+error_ccdc:
+ omap3isp_ccp2_cleanup(isp);
+error_ccp2:
+ omap3isp_csi2_cleanup(isp);
+error_csi2:
+ omap3isp_csiphy_cleanup(isp);
+
+ return ret;
+}
+
+static void isp_detach_iommu(struct isp_device *isp)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ arm_iommu_detach_device(isp->dev);
+ arm_iommu_release_mapping(isp->mapping);
+ isp->mapping = NULL;
+#endif
+}
+
+static int isp_attach_iommu(struct isp_device *isp)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+ int ret;
+
+ /*
+ * Create the ARM mapping, used by the ARM DMA mapping core to allocate
+ * VAs. This will allocate a corresponding IOMMU domain.
+ */
+ mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
+ if (IS_ERR(mapping)) {
+ dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
+ return PTR_ERR(mapping);
+ }
+
+ isp->mapping = mapping;
+
+ /* Attach the ARM VA mapping to the device. */
+ ret = arm_iommu_attach_device(isp->dev, mapping);
+ if (ret < 0) {
+ dev_err(isp->dev, "failed to attach device to VA mapping\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ arm_iommu_release_mapping(isp->mapping);
+ isp->mapping = NULL;
+ return ret;
+#else
+ return -ENODEV;
+#endif
+}
+
+/*
+ * isp_remove - Remove ISP platform device
+ * @pdev: Pointer to ISP platform device
+ *
+ * Always returns 0.
+ */
+static int isp_remove(struct platform_device *pdev)
+{
+ struct isp_device *isp = platform_get_drvdata(pdev);
+
+ v4l2_async_notifier_unregister(&isp->notifier);
+ isp_unregister_entities(isp);
+ isp_cleanup_modules(isp);
+ isp_xclk_cleanup(isp);
+
+ __omap3isp_get(isp, false);
+ isp_detach_iommu(isp);
+ __omap3isp_put(isp, false);
+
+ media_entity_enum_cleanup(&isp->crashed);
+ v4l2_async_notifier_cleanup(&isp->notifier);
+
+ return 0;
+}
+
+enum isp_of_phy {
+ ISP_OF_PHY_PARALLEL = 0,
+ ISP_OF_PHY_CSIPHY1,
+ ISP_OF_PHY_CSIPHY2,
+};
+
+static int isp_fwnode_parse(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
+{
+ struct isp_async_subdev *isd =
+ container_of(asd, struct isp_async_subdev, asd);
+ struct isp_bus_cfg *buscfg = &isd->bus;
+ bool csi1 = false;
+ unsigned int i;
+
+ dev_dbg(dev, "parsing endpoint %pOF, interface %u\n",
+ to_of_node(vep->base.local_fwnode), vep->base.port);
+
+ switch (vep->base.port) {
+ case ISP_OF_PHY_PARALLEL:
+ buscfg->interface = ISP_INTERFACE_PARALLEL;
+ buscfg->bus.parallel.data_lane_shift =
+ vep->bus.parallel.data_shift;
+ buscfg->bus.parallel.clk_pol =
+ !!(vep->bus.parallel.flags
+ & V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ buscfg->bus.parallel.hs_pol =
+ !!(vep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ buscfg->bus.parallel.vs_pol =
+ !!(vep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ buscfg->bus.parallel.fld_pol =
+ !!(vep->bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
+ buscfg->bus.parallel.data_pol =
+ !!(vep->bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
+ buscfg->bus.parallel.bt656 = vep->bus_type == V4L2_MBUS_BT656;
+ break;
+
+ case ISP_OF_PHY_CSIPHY1:
+ case ISP_OF_PHY_CSIPHY2:
+ switch (vep->bus_type) {
+ case V4L2_MBUS_CCP2:
+ case V4L2_MBUS_CSI1:
+ dev_dbg(dev, "CSI-1/CCP-2 configuration\n");
+ csi1 = true;
+ break;
+ case V4L2_MBUS_CSI2:
+ dev_dbg(dev, "CSI-2 configuration\n");
+ csi1 = false;
+ break;
+ default:
+ dev_err(dev, "unsupported bus type %u\n",
+ vep->bus_type);
+ return -EINVAL;
+ }
+
+ switch (vep->base.port) {
+ case ISP_OF_PHY_CSIPHY1:
+ if (csi1)
+ buscfg->interface = ISP_INTERFACE_CCP2B_PHY1;
+ else
+ buscfg->interface = ISP_INTERFACE_CSI2C_PHY1;
+ break;
+ case ISP_OF_PHY_CSIPHY2:
+ if (csi1)
+ buscfg->interface = ISP_INTERFACE_CCP2B_PHY2;
+ else
+ buscfg->interface = ISP_INTERFACE_CSI2A_PHY2;
+ break;
+ }
+ if (csi1) {
+ buscfg->bus.ccp2.lanecfg.clk.pos =
+ vep->bus.mipi_csi1.clock_lane;
+ buscfg->bus.ccp2.lanecfg.clk.pol =
+ vep->bus.mipi_csi1.lane_polarity[0];
+ dev_dbg(dev, "clock lane polarity %u, pos %u\n",
+ buscfg->bus.ccp2.lanecfg.clk.pol,
+ buscfg->bus.ccp2.lanecfg.clk.pos);
+
+ buscfg->bus.ccp2.lanecfg.data[0].pos =
+ vep->bus.mipi_csi1.data_lane;
+ buscfg->bus.ccp2.lanecfg.data[0].pol =
+ vep->bus.mipi_csi1.lane_polarity[1];
+
+ dev_dbg(dev, "data lane polarity %u, pos %u\n",
+ buscfg->bus.ccp2.lanecfg.data[0].pol,
+ buscfg->bus.ccp2.lanecfg.data[0].pos);
+
+ buscfg->bus.ccp2.strobe_clk_pol =
+ vep->bus.mipi_csi1.clock_inv;
+ buscfg->bus.ccp2.phy_layer = vep->bus.mipi_csi1.strobe;
+ buscfg->bus.ccp2.ccp2_mode =
+ vep->bus_type == V4L2_MBUS_CCP2;
+ buscfg->bus.ccp2.vp_clk_pol = 1;
+
+ buscfg->bus.ccp2.crc = 1;
+ } else {
+ buscfg->bus.csi2.lanecfg.clk.pos =
+ vep->bus.mipi_csi2.clock_lane;
+ buscfg->bus.csi2.lanecfg.clk.pol =
+ vep->bus.mipi_csi2.lane_polarities[0];
+ dev_dbg(dev, "clock lane polarity %u, pos %u\n",
+ buscfg->bus.csi2.lanecfg.clk.pol,
+ buscfg->bus.csi2.lanecfg.clk.pos);
+
+ buscfg->bus.csi2.num_data_lanes =
+ vep->bus.mipi_csi2.num_data_lanes;
+
+ for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) {
+ buscfg->bus.csi2.lanecfg.data[i].pos =
+ vep->bus.mipi_csi2.data_lanes[i];
+ buscfg->bus.csi2.lanecfg.data[i].pol =
+ vep->bus.mipi_csi2.lane_polarities[i + 1];
+ dev_dbg(dev,
+ "data lane %u polarity %u, pos %u\n", i,
+ buscfg->bus.csi2.lanecfg.data[i].pol,
+ buscfg->bus.csi2.lanecfg.data[i].pos);
+ }
+ /*
+ * FIXME: now we assume the CRC is always there.
+ * Implement a way to obtain this information from the
+ * sensor. Frame descriptors, perhaps?
+ */
+ buscfg->bus.csi2.crc = 1;
+ }
+ break;
+
+ default:
+ dev_warn(dev, "%pOF: invalid interface %u\n",
+ to_of_node(vep->base.local_fwnode), vep->base.port);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
+{
+ struct isp_device *isp = container_of(async, struct isp_device,
+ notifier);
+ struct v4l2_device *v4l2_dev = &isp->v4l2_dev;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ ret = media_entity_enum_init(&isp->crashed, &isp->media_dev);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
+ if (sd->notifier != &isp->notifier)
+ continue;
+
+ ret = isp_link_entity(isp, &sd->entity,
+ v4l2_subdev_to_bus_cfg(sd)->interface);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+ if (ret < 0)
+ return ret;
+
+ return media_device_register(&isp->media_dev);
+}
+
+static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = {
+ .complete = isp_subdev_notifier_complete,
+};
+
+/*
+ * isp_probe - Probe ISP platform device
+ * @pdev: Pointer to ISP platform device
+ *
+ * Returns 0 if successful,
+ * -ENOMEM if no memory available,
+ * -ENODEV if no platform device resources found
+ * or no space for remapping registers,
+ * -EINVAL if couldn't install ISR,
+ * or clk_get return error value.
+ */
+static int isp_probe(struct platform_device *pdev)
+{
+ struct isp_device *isp;
+ struct resource *mem;
+ int ret;
+ int i, m;
+
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp) {
+ dev_err(&pdev->dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ ret = fwnode_property_read_u32(of_fwnode_handle(pdev->dev.of_node),
+ "ti,phy-type", &isp->phy_type);
+ if (ret)
+ return ret;
+
+ isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(isp->syscon))
+ return PTR_ERR(isp->syscon);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "syscon", 1, &isp->syscon_offset);
+ if (ret)
+ return ret;
+
+ isp->autoidle = autoidle;
+
+ mutex_init(&isp->isp_mutex);
+ spin_lock_init(&isp->stat_lock);
+
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(
+ &pdev->dev, &isp->notifier, sizeof(struct isp_async_subdev),
+ isp_fwnode_parse);
+ if (ret < 0)
+ goto error;
+
+ isp->dev = &pdev->dev;
+ isp->ref_count = 0;
+
+ ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto error;
+
+ platform_set_drvdata(pdev, isp);
+
+ /* Regulators */
+ isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1");
+ isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2");
+
+ /* Clocks
+ *
+ * The ISP clock tree is revision-dependent. We thus need to enable ICLK
+ * manually to read the revision before calling __omap3isp_get().
+ *
+ * Start by mapping the ISP MMIO area, which is in two pieces.
+ * The ISP IOMMU is in between. Map both now, and fill in the
+ * ISP revision specific portions a little later in the
+ * function.
+ */
+ for (i = 0; i < 2; i++) {
+ unsigned int map_idx = i ? OMAP3_ISP_IOMEM_CSI2A_REGS1 : 0;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ isp->mmio_base[map_idx] =
+ devm_ioremap_resource(isp->dev, mem);
+ if (IS_ERR(isp->mmio_base[map_idx])) {
+ ret = PTR_ERR(isp->mmio_base[map_idx]);
+ goto error;
+ }
+ }
+
+ ret = isp_get_clocks(isp);
+ if (ret < 0)
+ goto error;
+
+ ret = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ if (ret < 0)
+ goto error;
+
+ isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+ dev_info(isp->dev, "Revision %d.%d found\n",
+ (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
+
+ clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+
+ if (__omap3isp_get(isp, false) == NULL) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ret = isp_reset(isp);
+ if (ret < 0)
+ goto error_isp;
+
+ ret = isp_xclk_init(isp);
+ if (ret < 0)
+ goto error_isp;
+
+ /* Memory resources */
+ for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
+ if (isp->revision == isp_res_maps[m].isp_rev)
+ break;
+
+ if (m == ARRAY_SIZE(isp_res_maps)) {
+ dev_err(isp->dev, "No resource map found for ISP rev %d.%d\n",
+ (isp->revision & 0xf0) >> 4, isp->revision & 0xf);
+ ret = -ENODEV;
+ goto error_isp;
+ }
+
+ for (i = 1; i < OMAP3_ISP_IOMEM_CSI2A_REGS1; i++)
+ isp->mmio_base[i] =
+ isp->mmio_base[0] + isp_res_maps[m].offset[i];
+
+ for (i = OMAP3_ISP_IOMEM_CSIPHY2; i < OMAP3_ISP_IOMEM_LAST; i++)
+ isp->mmio_base[i] =
+ isp->mmio_base[OMAP3_ISP_IOMEM_CSI2A_REGS1]
+ + isp_res_maps[m].offset[i];
+
+ isp->mmio_hist_base_phys =
+ mem->start + isp_res_maps[m].offset[OMAP3_ISP_IOMEM_HIST];
+
+ /* IOMMU */
+ ret = isp_attach_iommu(isp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to attach to IOMMU\n");
+ goto error_isp;
+ }
+
+ /* Interrupt */
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(isp->dev, "No IRQ resource\n");
+ ret = -ENODEV;
+ goto error_iommu;
+ }
+ isp->irq_num = ret;
+
+ if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
+ "OMAP3 ISP", isp)) {
+ dev_err(isp->dev, "Unable to request IRQ\n");
+ ret = -EINVAL;
+ goto error_iommu;
+ }
+
+ /* Entities */
+ ret = isp_initialize_modules(isp);
+ if (ret < 0)
+ goto error_iommu;
+
+ ret = isp_register_entities(isp);
+ if (ret < 0)
+ goto error_modules;
+
+ ret = isp_create_links(isp);
+ if (ret < 0)
+ goto error_register_entities;
+
+ isp->notifier.ops = &isp_subdev_notifier_ops;
+
+ ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
+ if (ret)
+ goto error_register_entities;
+
+ isp_core_init(isp, 1);
+ omap3isp_put(isp);
+
+ return 0;
+
+error_register_entities:
+ isp_unregister_entities(isp);
+error_modules:
+ isp_cleanup_modules(isp);
+error_iommu:
+ isp_detach_iommu(isp);
+error_isp:
+ isp_xclk_cleanup(isp);
+ __omap3isp_put(isp, false);
+error:
+ v4l2_async_notifier_cleanup(&isp->notifier);
+ mutex_destroy(&isp->isp_mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops omap3isp_pm_ops = {
+ .prepare = isp_pm_prepare,
+ .suspend = isp_pm_suspend,
+ .resume = isp_pm_resume,
+ .complete = isp_pm_complete,
+};
+
+static struct platform_device_id omap3isp_id_table[] = {
+ { "omap3isp", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
+
+static const struct of_device_id omap3isp_of_table[] = {
+ { .compatible = "ti,omap3-isp" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap3isp_of_table);
+
+static struct platform_driver omap3isp_driver = {
+ .probe = isp_probe,
+ .remove = isp_remove,
+ .id_table = omap3isp_id_table,
+ .driver = {
+ .name = "omap3isp",
+ .pm = &omap3isp_pm_ops,
+ .of_match_table = omap3isp_of_table,
+ },
+};
+
+module_platform_driver(omap3isp_driver);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("TI OMAP3 ISP driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
new file mode 100644
index 000000000..8b9043db9
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -0,0 +1,363 @@
+/*
+ * isp.h
+ *
+ * TI OMAP3 ISP - Core
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_CORE_H
+#define OMAP3_ISP_CORE_H
+
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include "omap3isp.h"
+#include "ispstat.h"
+#include "ispccdc.h"
+#include "ispreg.h"
+#include "ispresizer.h"
+#include "isppreview.h"
+#include "ispcsiphy.h"
+#include "ispcsi2.h"
+#include "ispccp2.h"
+
+#define ISP_TOK_TERM 0xFFFFFFFF /*
+ * terminating token for ISP
+ * modules reg list
+ */
+#define to_isp_device(ptr_module) \
+ container_of(ptr_module, struct isp_device, isp_##ptr_module)
+#define to_device(ptr_module) \
+ (to_isp_device(ptr_module)->dev)
+
+enum isp_mem_resources {
+ OMAP3_ISP_IOMEM_MAIN,
+ OMAP3_ISP_IOMEM_CCP2,
+ OMAP3_ISP_IOMEM_CCDC,
+ OMAP3_ISP_IOMEM_HIST,
+ OMAP3_ISP_IOMEM_H3A,
+ OMAP3_ISP_IOMEM_PREV,
+ OMAP3_ISP_IOMEM_RESZ,
+ OMAP3_ISP_IOMEM_SBL,
+ OMAP3_ISP_IOMEM_CSI2A_REGS1,
+ OMAP3_ISP_IOMEM_CSIPHY2,
+ OMAP3_ISP_IOMEM_CSI2A_REGS2,
+ OMAP3_ISP_IOMEM_CSI2C_REGS1,
+ OMAP3_ISP_IOMEM_CSIPHY1,
+ OMAP3_ISP_IOMEM_CSI2C_REGS2,
+ OMAP3_ISP_IOMEM_LAST
+};
+
+enum isp_sbl_resource {
+ OMAP3_ISP_SBL_CSI1_READ = 0x1,
+ OMAP3_ISP_SBL_CSI1_WRITE = 0x2,
+ OMAP3_ISP_SBL_CSI2A_WRITE = 0x4,
+ OMAP3_ISP_SBL_CSI2C_WRITE = 0x8,
+ OMAP3_ISP_SBL_CCDC_LSC_READ = 0x10,
+ OMAP3_ISP_SBL_CCDC_WRITE = 0x20,
+ OMAP3_ISP_SBL_PREVIEW_READ = 0x40,
+ OMAP3_ISP_SBL_PREVIEW_WRITE = 0x80,
+ OMAP3_ISP_SBL_RESIZER_READ = 0x100,
+ OMAP3_ISP_SBL_RESIZER_WRITE = 0x200,
+};
+
+enum isp_subclk_resource {
+ OMAP3_ISP_SUBCLK_CCDC = (1 << 0),
+ OMAP3_ISP_SUBCLK_AEWB = (1 << 1),
+ OMAP3_ISP_SUBCLK_AF = (1 << 2),
+ OMAP3_ISP_SUBCLK_HIST = (1 << 3),
+ OMAP3_ISP_SUBCLK_PREVIEW = (1 << 4),
+ OMAP3_ISP_SUBCLK_RESIZER = (1 << 5),
+};
+
+/* ISP: OMAP 34xx ES 1.0 */
+#define ISP_REVISION_1_0 0x10
+/* ISP2: OMAP 34xx ES 2.0, 2.1 and 3.0 */
+#define ISP_REVISION_2_0 0x20
+/* ISP2P: OMAP 36xx */
+#define ISP_REVISION_15_0 0xF0
+
+#define ISP_PHY_TYPE_3430 0
+#define ISP_PHY_TYPE_3630 1
+
+struct regmap;
+
+/*
+ * struct isp_res_mapping - Map ISP io resources to ISP revision.
+ * @isp_rev: ISP_REVISION_x_x
+ * @offset: register offsets of various ISP sub-blocks
+ * @phy_type: ISP_PHY_TYPE_{3430,3630}
+ */
+struct isp_res_mapping {
+ u32 isp_rev;
+ u32 offset[OMAP3_ISP_IOMEM_LAST];
+ u32 phy_type;
+};
+
+/*
+ * struct isp_reg - Structure for ISP register values.
+ * @reg: 32-bit Register address.
+ * @val: 32-bit Register value.
+ */
+struct isp_reg {
+ enum isp_mem_resources mmio_range;
+ u32 reg;
+ u32 val;
+};
+
+enum isp_xclk_id {
+ ISP_XCLK_A,
+ ISP_XCLK_B,
+};
+
+struct isp_xclk {
+ struct isp_device *isp;
+ struct clk_hw hw;
+ struct clk *clk;
+ enum isp_xclk_id id;
+
+ spinlock_t lock; /* Protects enabled and divider */
+ bool enabled;
+ unsigned int divider;
+};
+
+/*
+ * struct isp_device - ISP device structure.
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @revision: Stores current ISP module revision.
+ * @irq_num: Currently used IRQ number.
+ * @mmio_base: Array with kernel base addresses for ioremapped ISP register
+ * regions.
+ * @mmio_hist_base_phys: Physical L4 bus address for ISP hist block register
+ * region.
+ * @syscon: Regmap for the syscon register space
+ * @syscon_offset: Offset of the CSIPHY control register in syscon
+ * @phy_type: ISP_PHY_TYPE_{3430,3630}
+ * @mapping: IOMMU mapping
+ * @stat_lock: Spinlock for handling statistics
+ * @isp_mutex: Mutex for serializing requests to ISP.
+ * @stop_failure: Indicates that an entity failed to stop.
+ * @crashed: Crashed ent_enum
+ * @has_context: Context has been saved at least once and can be restored.
+ * @ref_count: Reference count for handling multiple ISP requests.
+ * @cam_ick: Pointer to camera interface clock structure.
+ * @cam_mclk: Pointer to camera functional clock structure.
+ * @csi2_fck: Pointer to camera CSI2 complexIO clock structure.
+ * @l3_ick: Pointer to OMAP3 L3 bus interface clock.
+ * @xclks: External clocks provided by the ISP
+ * @irq: Currently attached ISP ISR callbacks information structure.
+ * @isp_af: Pointer to current settings for ISP AutoFocus SCM.
+ * @isp_hist: Pointer to current settings for ISP Histogram SCM.
+ * @isp_h3a: Pointer to current settings for ISP Auto Exposure and
+ * White Balance SCM.
+ * @isp_res: Pointer to current settings for ISP Resizer.
+ * @isp_prev: Pointer to current settings for ISP Preview.
+ * @isp_ccdc: Pointer to current settings for ISP CCDC.
+ * @platform_cb: ISP driver callback function pointers for platform code
+ *
+ * This structure is used to store the OMAP ISP Information.
+ */
+struct isp_device {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_async_notifier notifier;
+ struct media_device media_dev;
+ struct device *dev;
+ u32 revision;
+
+ /* platform HW resources */
+ unsigned int irq_num;
+
+ void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
+ unsigned long mmio_hist_base_phys;
+ struct regmap *syscon;
+ u32 syscon_offset;
+ u32 phy_type;
+
+ struct dma_iommu_mapping *mapping;
+
+ /* ISP Obj */
+ spinlock_t stat_lock; /* common lock for statistic drivers */
+ struct mutex isp_mutex; /* For handling ref_count field */
+ bool stop_failure;
+ struct media_entity_enum crashed;
+ int has_context;
+ int ref_count;
+ unsigned int autoidle;
+#define ISP_CLK_CAM_ICK 0
+#define ISP_CLK_CAM_MCLK 1
+#define ISP_CLK_CSI2_FCK 2
+#define ISP_CLK_L3_ICK 3
+ struct clk *clock[4];
+ struct isp_xclk xclks[2];
+
+ /* ISP modules */
+ struct ispstat isp_af;
+ struct ispstat isp_aewb;
+ struct ispstat isp_hist;
+ struct isp_res_device isp_res;
+ struct isp_prev_device isp_prev;
+ struct isp_ccdc_device isp_ccdc;
+ struct isp_csi2_device isp_csi2a;
+ struct isp_csi2_device isp_csi2c;
+ struct isp_ccp2_device isp_ccp2;
+ struct isp_csiphy isp_csiphy1;
+ struct isp_csiphy isp_csiphy2;
+
+ unsigned int sbl_resources;
+ unsigned int subclk_resources;
+};
+
+struct isp_async_subdev {
+ struct v4l2_async_subdev asd;
+ struct isp_bus_cfg bus;
+};
+
+#define v4l2_subdev_to_bus_cfg(sd) \
+ (&container_of((sd)->asd, struct isp_async_subdev, asd)->bus)
+
+#define v4l2_dev_to_isp_device(dev) \
+ container_of(dev, struct isp_device, v4l2_dev)
+
+void omap3isp_hist_dma_done(struct isp_device *isp);
+
+void omap3isp_flush(struct isp_device *isp);
+
+int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+ atomic_t *stopping);
+
+int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
+ atomic_t *stopping);
+
+int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
+ enum isp_pipeline_stream_state state);
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe);
+void omap3isp_configure_bridge(struct isp_device *isp,
+ enum ccdc_input_entity input,
+ const struct isp_parallel_cfg *buscfg,
+ unsigned int shift, unsigned int bridge);
+
+struct isp_device *omap3isp_get(struct isp_device *isp);
+void omap3isp_put(struct isp_device *isp);
+
+void omap3isp_print_status(struct isp_device *isp);
+
+void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res);
+void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res);
+
+void omap3isp_subclk_enable(struct isp_device *isp,
+ enum isp_subclk_resource res);
+void omap3isp_subclk_disable(struct isp_device *isp,
+ enum isp_subclk_resource res);
+
+int omap3isp_register_entities(struct platform_device *pdev,
+ struct v4l2_device *v4l2_dev);
+void omap3isp_unregister_entities(struct platform_device *pdev);
+
+/*
+ * isp_reg_readl - Read value of an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @isp_mmio_range: Range to which the register offset refers to.
+ * @reg_offset: Register offset to read from.
+ *
+ * Returns an unsigned 32 bit value with the required register contents.
+ */
+static inline
+u32 isp_reg_readl(struct isp_device *isp, enum isp_mem_resources isp_mmio_range,
+ u32 reg_offset)
+{
+ return __raw_readl(isp->mmio_base[isp_mmio_range] + reg_offset);
+}
+
+/*
+ * isp_reg_writel - Write value to an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @reg_value: 32 bit value to write to the register.
+ * @isp_mmio_range: Range to which the register offset refers to.
+ * @reg_offset: Register offset to write into.
+ */
+static inline
+void isp_reg_writel(struct isp_device *isp, u32 reg_value,
+ enum isp_mem_resources isp_mmio_range, u32 reg_offset)
+{
+ __raw_writel(reg_value, isp->mmio_base[isp_mmio_range] + reg_offset);
+}
+
+/*
+ * isp_reg_clr - Clear individual bits in an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @mmio_range: Range to which the register offset refers to.
+ * @reg: Register offset to work on.
+ * @clr_bits: 32 bit value which would be cleared in the register.
+ */
+static inline
+void isp_reg_clr(struct isp_device *isp, enum isp_mem_resources mmio_range,
+ u32 reg, u32 clr_bits)
+{
+ u32 v = isp_reg_readl(isp, mmio_range, reg);
+
+ isp_reg_writel(isp, v & ~clr_bits, mmio_range, reg);
+}
+
+/*
+ * isp_reg_set - Set individual bits in an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @mmio_range: Range to which the register offset refers to.
+ * @reg: Register offset to work on.
+ * @set_bits: 32 bit value which would be set in the register.
+ */
+static inline
+void isp_reg_set(struct isp_device *isp, enum isp_mem_resources mmio_range,
+ u32 reg, u32 set_bits)
+{
+ u32 v = isp_reg_readl(isp, mmio_range, reg);
+
+ isp_reg_writel(isp, v | set_bits, mmio_range, reg);
+}
+
+/*
+ * isp_reg_clr_set - Clear and set invidial bits in an OMAP3 ISP register
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ * @mmio_range: Range to which the register offset refers to.
+ * @reg: Register offset to work on.
+ * @clr_bits: 32 bit value which would be cleared in the register.
+ * @set_bits: 32 bit value which would be set in the register.
+ *
+ * The clear operation is done first, and then the set operation.
+ */
+static inline
+void isp_reg_clr_set(struct isp_device *isp, enum isp_mem_resources mmio_range,
+ u32 reg, u32 clr_bits, u32 set_bits)
+{
+ u32 v = isp_reg_readl(isp, mmio_range, reg);
+
+ isp_reg_writel(isp, (v & ~clr_bits) | set_bits, mmio_range, reg);
+}
+
+static inline enum v4l2_buf_type
+isp_pad_buffer_type(const struct v4l2_subdev *subdev, int pad)
+{
+ if (pad >= subdev->entity.num_pads)
+ return 0;
+
+ if (subdev->entity.pads[pad].flags & MEDIA_PAD_FL_SINK)
+ return V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ else
+ return V4L2_BUF_TYPE_VIDEO_CAPTURE;
+}
+
+#endif /* OMAP3_ISP_CORE_H */
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
new file mode 100644
index 000000000..412438dce
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -0,0 +1,2741 @@
+/*
+ * ispccdc.c
+ *
+ * TI OMAP3 ISP - CCDC module
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <media/v4l2-event.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispccdc.h"
+
+#define CCDC_MIN_WIDTH 32
+#define CCDC_MIN_HEIGHT 32
+
+static struct v4l2_mbus_framefmt *
+__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which);
+
+static const unsigned int ccdc_fmts[] = {
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+};
+
+/*
+ * ccdc_print_status - Print current CCDC Module register values.
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Also prints other debug information stored in the CCDC module.
+ */
+#define CCDC_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###CCDC " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_##name))
+
+static void ccdc_print_status(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ dev_dbg(isp->dev, "-------------CCDC Register dump-------------\n");
+
+ CCDC_PRINT_REGISTER(isp, PCR);
+ CCDC_PRINT_REGISTER(isp, SYN_MODE);
+ CCDC_PRINT_REGISTER(isp, HD_VD_WID);
+ CCDC_PRINT_REGISTER(isp, PIX_LINES);
+ CCDC_PRINT_REGISTER(isp, HORZ_INFO);
+ CCDC_PRINT_REGISTER(isp, VERT_START);
+ CCDC_PRINT_REGISTER(isp, VERT_LINES);
+ CCDC_PRINT_REGISTER(isp, CULLING);
+ CCDC_PRINT_REGISTER(isp, HSIZE_OFF);
+ CCDC_PRINT_REGISTER(isp, SDOFST);
+ CCDC_PRINT_REGISTER(isp, SDR_ADDR);
+ CCDC_PRINT_REGISTER(isp, CLAMP);
+ CCDC_PRINT_REGISTER(isp, DCSUB);
+ CCDC_PRINT_REGISTER(isp, COLPTN);
+ CCDC_PRINT_REGISTER(isp, BLKCMP);
+ CCDC_PRINT_REGISTER(isp, FPC);
+ CCDC_PRINT_REGISTER(isp, FPC_ADDR);
+ CCDC_PRINT_REGISTER(isp, VDINT);
+ CCDC_PRINT_REGISTER(isp, ALAW);
+ CCDC_PRINT_REGISTER(isp, REC656IF);
+ CCDC_PRINT_REGISTER(isp, CFG);
+ CCDC_PRINT_REGISTER(isp, FMTCFG);
+ CCDC_PRINT_REGISTER(isp, FMT_HORZ);
+ CCDC_PRINT_REGISTER(isp, FMT_VERT);
+ CCDC_PRINT_REGISTER(isp, PRGEVEN0);
+ CCDC_PRINT_REGISTER(isp, PRGEVEN1);
+ CCDC_PRINT_REGISTER(isp, PRGODD0);
+ CCDC_PRINT_REGISTER(isp, PRGODD1);
+ CCDC_PRINT_REGISTER(isp, VP_OUT);
+ CCDC_PRINT_REGISTER(isp, LSC_CONFIG);
+ CCDC_PRINT_REGISTER(isp, LSC_INITIAL);
+ CCDC_PRINT_REGISTER(isp, LSC_TABLE_BASE);
+ CCDC_PRINT_REGISTER(isp, LSC_TABLE_OFFSET);
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * omap3isp_ccdc_busy - Get busy state of the CCDC.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+int omap3isp_ccdc_busy(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR) &
+ ISPCCDC_PCR_BUSY;
+}
+
+/* -----------------------------------------------------------------------------
+ * Lens Shading Compensation
+ */
+
+/*
+ * ccdc_lsc_validate_config - Check that LSC configuration is valid.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @lsc_cfg: the LSC configuration to check.
+ *
+ * Returns 0 if the LSC configuration is valid, or -EINVAL if invalid.
+ */
+static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
+ struct omap3isp_ccdc_lsc_config *lsc_cfg)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ struct v4l2_mbus_framefmt *format;
+ unsigned int paxel_width, paxel_height;
+ unsigned int paxel_shift_x, paxel_shift_y;
+ unsigned int min_width, min_height, min_size;
+ unsigned int input_width, input_height;
+
+ paxel_shift_x = lsc_cfg->gain_mode_m;
+ paxel_shift_y = lsc_cfg->gain_mode_n;
+
+ if ((paxel_shift_x < 2) || (paxel_shift_x > 6) ||
+ (paxel_shift_y < 2) || (paxel_shift_y > 6)) {
+ dev_dbg(isp->dev, "CCDC: LSC: Invalid paxel size\n");
+ return -EINVAL;
+ }
+
+ if (lsc_cfg->offset & 3) {
+ dev_dbg(isp->dev,
+ "CCDC: LSC: Offset must be a multiple of 4\n");
+ return -EINVAL;
+ }
+
+ if ((lsc_cfg->initial_x & 1) || (lsc_cfg->initial_y & 1)) {
+ dev_dbg(isp->dev, "CCDC: LSC: initial_x and y must be even\n");
+ return -EINVAL;
+ }
+
+ format = __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ input_width = format->width;
+ input_height = format->height;
+
+ /* Calculate minimum bytesize for validation */
+ paxel_width = 1 << paxel_shift_x;
+ min_width = ((input_width + lsc_cfg->initial_x + paxel_width - 1)
+ >> paxel_shift_x) + 1;
+
+ paxel_height = 1 << paxel_shift_y;
+ min_height = ((input_height + lsc_cfg->initial_y + paxel_height - 1)
+ >> paxel_shift_y) + 1;
+
+ min_size = 4 * min_width * min_height;
+ if (min_size > lsc_cfg->size) {
+ dev_dbg(isp->dev, "CCDC: LSC: too small table\n");
+ return -EINVAL;
+ }
+ if (lsc_cfg->offset < (min_width * 4)) {
+ dev_dbg(isp->dev, "CCDC: LSC: Offset is too small\n");
+ return -EINVAL;
+ }
+ if ((lsc_cfg->size / lsc_cfg->offset) < min_height) {
+ dev_dbg(isp->dev, "CCDC: LSC: Wrong size/offset combination\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * ccdc_lsc_program_table - Program Lens Shading Compensation table address.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc,
+ dma_addr_t addr)
+{
+ isp_reg_writel(to_isp_device(ccdc), addr,
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE);
+}
+
+/*
+ * ccdc_lsc_setup_regs - Configures the lens shading compensation module
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_lsc_setup_regs(struct isp_ccdc_device *ccdc,
+ struct omap3isp_ccdc_lsc_config *cfg)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ int reg;
+
+ isp_reg_writel(isp, cfg->offset, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_LSC_TABLE_OFFSET);
+
+ reg = 0;
+ reg |= cfg->gain_mode_n << ISPCCDC_LSC_GAIN_MODE_N_SHIFT;
+ reg |= cfg->gain_mode_m << ISPCCDC_LSC_GAIN_MODE_M_SHIFT;
+ reg |= cfg->gain_format << ISPCCDC_LSC_GAIN_FORMAT_SHIFT;
+ isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG);
+
+ reg = 0;
+ reg &= ~ISPCCDC_LSC_INITIAL_X_MASK;
+ reg |= cfg->initial_x << ISPCCDC_LSC_INITIAL_X_SHIFT;
+ reg &= ~ISPCCDC_LSC_INITIAL_Y_MASK;
+ reg |= cfg->initial_y << ISPCCDC_LSC_INITIAL_Y_SHIFT;
+ isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_LSC_INITIAL);
+}
+
+static int ccdc_lsc_wait_prefetch(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ unsigned int wait;
+
+ isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ,
+ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+
+ /* timeout 1 ms */
+ for (wait = 0; wait < 1000; wait++) {
+ if (isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS) &
+ IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ) {
+ isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ,
+ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+ return 0;
+ }
+
+ rmb();
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * __ccdc_lsc_enable - Enables/Disables the Lens Shading Compensation module.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @enable: 0 Disables LSC, 1 Enables LSC.
+ */
+static int __ccdc_lsc_enable(struct isp_ccdc_device *ccdc, int enable)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ const struct v4l2_mbus_framefmt *format =
+ __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+
+ if ((format->code != MEDIA_BUS_FMT_SGRBG10_1X10) &&
+ (format->code != MEDIA_BUS_FMT_SRGGB10_1X10) &&
+ (format->code != MEDIA_BUS_FMT_SBGGR10_1X10) &&
+ (format->code != MEDIA_BUS_FMT_SGBRG10_1X10))
+ return -EINVAL;
+
+ if (enable)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_LSC_READ);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG,
+ ISPCCDC_LSC_ENABLE, enable ? ISPCCDC_LSC_ENABLE : 0);
+
+ if (enable) {
+ if (ccdc_lsc_wait_prefetch(ccdc) < 0) {
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE);
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+ dev_warn(to_device(ccdc), "LSC prefetch timeout\n");
+ return -ETIMEDOUT;
+ }
+ ccdc->lsc.state = LSC_STATE_RUNNING;
+ } else {
+ ccdc->lsc.state = LSC_STATE_STOPPING;
+ }
+
+ return 0;
+}
+
+static int ccdc_lsc_busy(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG) &
+ ISPCCDC_LSC_BUSY;
+}
+
+/* __ccdc_lsc_configure - Apply a new configuration to the LSC engine
+ * @ccdc: Pointer to ISP CCDC device
+ * @req: New configuration request
+ *
+ * context: in_interrupt()
+ */
+static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc,
+ struct ispccdc_lsc_config_req *req)
+{
+ if (!req->enable)
+ return -EINVAL;
+
+ if (ccdc_lsc_validate_config(ccdc, &req->config) < 0) {
+ dev_dbg(to_device(ccdc), "Discard LSC configuration\n");
+ return -EINVAL;
+ }
+
+ if (ccdc_lsc_busy(ccdc))
+ return -EBUSY;
+
+ ccdc_lsc_setup_regs(ccdc, &req->config);
+ ccdc_lsc_program_table(ccdc, req->table.dma);
+ return 0;
+}
+
+/*
+ * ccdc_lsc_error_handler - Handle LSC prefetch error scenario.
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Disables LSC, and defers enablement to shadow registers update time.
+ */
+static void ccdc_lsc_error_handler(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ /*
+ * From OMAP3 TRM: When this event is pending, the module
+ * goes into transparent mode (output =input). Normal
+ * operation can be resumed at the start of the next frame
+ * after:
+ * 1) Clearing this event
+ * 2) Disabling the LSC module
+ * 3) Enabling it
+ */
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG,
+ ISPCCDC_LSC_ENABLE);
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+}
+
+static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
+ struct ispccdc_lsc_config_req *req)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ if (req == NULL)
+ return;
+
+ if (req->table.addr) {
+ sg_free_table(&req->table.sgt);
+ dma_free_coherent(isp->dev, req->config.size, req->table.addr,
+ req->table.dma);
+ }
+
+ kfree(req);
+}
+
+static void ccdc_lsc_free_queue(struct isp_ccdc_device *ccdc,
+ struct list_head *queue)
+{
+ struct ispccdc_lsc_config_req *req, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ list_for_each_entry_safe(req, n, queue, list) {
+ list_del(&req->list);
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+ ccdc_lsc_free_request(ccdc, req);
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ }
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+}
+
+static void ccdc_lsc_free_table_work(struct work_struct *work)
+{
+ struct isp_ccdc_device *ccdc;
+ struct ispccdc_lsc *lsc;
+
+ lsc = container_of(work, struct ispccdc_lsc, table_work);
+ ccdc = container_of(lsc, struct isp_ccdc_device, lsc);
+
+ ccdc_lsc_free_queue(ccdc, &lsc->free_queue);
+}
+
+/*
+ * ccdc_lsc_config - Configure the LSC module from a userspace request
+ *
+ * Store the request LSC configuration in the LSC engine request pointer. The
+ * configuration will be applied to the hardware when the CCDC will be enabled,
+ * or at the next LSC interrupt if the CCDC is already running.
+ */
+static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
+ struct omap3isp_ccdc_update_config *config)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ struct ispccdc_lsc_config_req *req;
+ unsigned long flags;
+ u16 update;
+ int ret;
+
+ update = config->update &
+ (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC);
+ if (!update)
+ return 0;
+
+ if (update != (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC)) {
+ dev_dbg(to_device(ccdc),
+ "%s: Both LSC configuration and table need to be supplied\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (req == NULL)
+ return -ENOMEM;
+
+ if (config->flag & OMAP3ISP_CCDC_CONFIG_LSC) {
+ if (copy_from_user(&req->config, config->lsc_cfg,
+ sizeof(req->config))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ req->enable = 1;
+
+ req->table.addr = dma_alloc_coherent(isp->dev, req->config.size,
+ &req->table.dma,
+ GFP_KERNEL);
+ if (req->table.addr == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = dma_get_sgtable(isp->dev, &req->table.sgt,
+ req->table.addr, req->table.dma,
+ req->config.size);
+ if (ret < 0)
+ goto done;
+
+ dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
+ req->table.sgt.nents, DMA_TO_DEVICE);
+
+ if (copy_from_user(req->table.addr, config->lsc,
+ req->config.size)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
+ req->table.sgt.nents, DMA_TO_DEVICE);
+ }
+
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ if (ccdc->lsc.request) {
+ list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue);
+ schedule_work(&ccdc->lsc.table_work);
+ }
+ ccdc->lsc.request = req;
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+
+ ret = 0;
+
+done:
+ if (ret < 0)
+ ccdc_lsc_free_request(ccdc, req);
+
+ return ret;
+}
+
+static inline int ccdc_lsc_is_configured(struct isp_ccdc_device *ccdc)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ ret = ccdc->lsc.active != NULL;
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+
+ return ret;
+}
+
+static int ccdc_lsc_enable(struct isp_ccdc_device *ccdc)
+{
+ struct ispccdc_lsc *lsc = &ccdc->lsc;
+
+ if (lsc->state != LSC_STATE_STOPPED)
+ return -EINVAL;
+
+ if (lsc->active) {
+ list_add_tail(&lsc->active->list, &lsc->free_queue);
+ lsc->active = NULL;
+ }
+
+ if (__ccdc_lsc_configure(ccdc, lsc->request) < 0) {
+ omap3isp_sbl_disable(to_isp_device(ccdc),
+ OMAP3_ISP_SBL_CCDC_LSC_READ);
+ list_add_tail(&lsc->request->list, &lsc->free_queue);
+ lsc->request = NULL;
+ goto done;
+ }
+
+ lsc->active = lsc->request;
+ lsc->request = NULL;
+ __ccdc_lsc_enable(ccdc, 1);
+
+done:
+ if (!list_empty(&lsc->free_queue))
+ schedule_work(&lsc->table_work);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Parameters configuration
+ */
+
+/*
+ * ccdc_configure_clamp - Configure optical-black or digital clamping
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * The CCDC performs either optical-black or digital clamp. Configure and enable
+ * the selected clamp method.
+ */
+static void ccdc_configure_clamp(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ u32 clamp;
+
+ if (ccdc->obclamp) {
+ clamp = ccdc->clamp.obgain << ISPCCDC_CLAMP_OBGAIN_SHIFT;
+ clamp |= ccdc->clamp.oblen << ISPCCDC_CLAMP_OBSLEN_SHIFT;
+ clamp |= ccdc->clamp.oblines << ISPCCDC_CLAMP_OBSLN_SHIFT;
+ clamp |= ccdc->clamp.obstpixel << ISPCCDC_CLAMP_OBST_SHIFT;
+ isp_reg_writel(isp, clamp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP);
+ } else {
+ isp_reg_writel(isp, ccdc->clamp.dcsubval,
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB);
+ }
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP,
+ ISPCCDC_CLAMP_CLAMPEN,
+ ccdc->obclamp ? ISPCCDC_CLAMP_CLAMPEN : 0);
+}
+
+/*
+ * ccdc_configure_fpc - Configure Faulty Pixel Correction
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, ISPCCDC_FPC_FPCEN);
+
+ if (!ccdc->fpc_en)
+ return;
+
+ isp_reg_writel(isp, ccdc->fpc.dma, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_FPC_ADDR);
+ /* The FPNUM field must be set before enabling FPC. */
+ isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC);
+ isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT) |
+ ISPCCDC_FPC_FPCEN, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC);
+}
+
+/*
+ * ccdc_configure_black_comp - Configure Black Level Compensation.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_black_comp(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ u32 blcomp;
+
+ blcomp = ccdc->blcomp.b_mg << ISPCCDC_BLKCMP_B_MG_SHIFT;
+ blcomp |= ccdc->blcomp.gb_g << ISPCCDC_BLKCMP_GB_G_SHIFT;
+ blcomp |= ccdc->blcomp.gr_cy << ISPCCDC_BLKCMP_GR_CY_SHIFT;
+ blcomp |= ccdc->blcomp.r_ye << ISPCCDC_BLKCMP_R_YE_SHIFT;
+
+ isp_reg_writel(isp, blcomp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_BLKCMP);
+}
+
+/*
+ * ccdc_configure_lpf - Configure Low-Pass Filter (LPF).
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_lpf(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE,
+ ISPCCDC_SYN_MODE_LPF,
+ ccdc->lpf ? ISPCCDC_SYN_MODE_LPF : 0);
+}
+
+/*
+ * ccdc_configure_alaw - Configure A-law compression.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_alaw(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ const struct isp_format_info *info;
+ u32 alaw = 0;
+
+ info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code);
+
+ switch (info->width) {
+ case 8:
+ return;
+
+ case 10:
+ alaw = ISPCCDC_ALAW_GWDI_9_0;
+ break;
+ case 11:
+ alaw = ISPCCDC_ALAW_GWDI_10_1;
+ break;
+ case 12:
+ alaw = ISPCCDC_ALAW_GWDI_11_2;
+ break;
+ case 13:
+ alaw = ISPCCDC_ALAW_GWDI_12_3;
+ break;
+ }
+
+ if (ccdc->alaw)
+ alaw |= ISPCCDC_ALAW_CCDTBL;
+
+ isp_reg_writel(isp, alaw, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW);
+}
+
+/*
+ * ccdc_config_imgattr - Configure sensor image specific attributes.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @colptn: Color pattern of the sensor.
+ */
+static void ccdc_config_imgattr(struct isp_ccdc_device *ccdc, u32 colptn)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_writel(isp, colptn, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN);
+}
+
+/*
+ * ccdc_config - Set CCDC configuration from userspace
+ * @ccdc: Pointer to ISP CCDC device.
+ * @ccdc_struct: Structure containing CCDC configuration sent from userspace.
+ *
+ * Returns 0 if successful, -EINVAL if the pointer to the configuration
+ * structure is null, or the copy_from_user function fails to copy user space
+ * memory to kernel space memory.
+ */
+static int ccdc_config(struct isp_ccdc_device *ccdc,
+ struct omap3isp_ccdc_update_config *ccdc_struct)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccdc->lock, flags);
+ ccdc->shadow_update = 1;
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+
+ if (OMAP3ISP_CCDC_ALAW & ccdc_struct->update) {
+ ccdc->alaw = !!(OMAP3ISP_CCDC_ALAW & ccdc_struct->flag);
+ ccdc->update |= OMAP3ISP_CCDC_ALAW;
+ }
+
+ if (OMAP3ISP_CCDC_LPF & ccdc_struct->update) {
+ ccdc->lpf = !!(OMAP3ISP_CCDC_LPF & ccdc_struct->flag);
+ ccdc->update |= OMAP3ISP_CCDC_LPF;
+ }
+
+ if (OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->update) {
+ if (copy_from_user(&ccdc->clamp, ccdc_struct->bclamp,
+ sizeof(ccdc->clamp))) {
+ ccdc->shadow_update = 0;
+ return -EFAULT;
+ }
+
+ ccdc->obclamp = !!(OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->flag);
+ ccdc->update |= OMAP3ISP_CCDC_BLCLAMP;
+ }
+
+ if (OMAP3ISP_CCDC_BCOMP & ccdc_struct->update) {
+ if (copy_from_user(&ccdc->blcomp, ccdc_struct->blcomp,
+ sizeof(ccdc->blcomp))) {
+ ccdc->shadow_update = 0;
+ return -EFAULT;
+ }
+
+ ccdc->update |= OMAP3ISP_CCDC_BCOMP;
+ }
+
+ ccdc->shadow_update = 0;
+
+ if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) {
+ struct omap3isp_ccdc_fpc fpc;
+ struct ispccdc_fpc fpc_old = { .addr = NULL, };
+ struct ispccdc_fpc fpc_new;
+ u32 size;
+
+ if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
+ return -EBUSY;
+
+ ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag);
+
+ if (ccdc->fpc_en) {
+ if (copy_from_user(&fpc, ccdc_struct->fpc, sizeof(fpc)))
+ return -EFAULT;
+
+ size = fpc.fpnum * 4;
+
+ /*
+ * The table address must be 64-bytes aligned, which is
+ * guaranteed by dma_alloc_coherent().
+ */
+ fpc_new.fpnum = fpc.fpnum;
+ fpc_new.addr = dma_alloc_coherent(isp->dev, size,
+ &fpc_new.dma,
+ GFP_KERNEL);
+ if (fpc_new.addr == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(fpc_new.addr,
+ (__force void __user *)(long)fpc.fpcaddr,
+ size)) {
+ dma_free_coherent(isp->dev, size, fpc_new.addr,
+ fpc_new.dma);
+ return -EFAULT;
+ }
+
+ fpc_old = ccdc->fpc;
+ ccdc->fpc = fpc_new;
+ }
+
+ ccdc_configure_fpc(ccdc);
+
+ if (fpc_old.addr != NULL)
+ dma_free_coherent(isp->dev, fpc_old.fpnum * 4,
+ fpc_old.addr, fpc_old.dma);
+ }
+
+ return ccdc_lsc_config(ccdc, ccdc_struct);
+}
+
+static void ccdc_apply_controls(struct isp_ccdc_device *ccdc)
+{
+ if (ccdc->update & OMAP3ISP_CCDC_ALAW) {
+ ccdc_configure_alaw(ccdc);
+ ccdc->update &= ~OMAP3ISP_CCDC_ALAW;
+ }
+
+ if (ccdc->update & OMAP3ISP_CCDC_LPF) {
+ ccdc_configure_lpf(ccdc);
+ ccdc->update &= ~OMAP3ISP_CCDC_LPF;
+ }
+
+ if (ccdc->update & OMAP3ISP_CCDC_BLCLAMP) {
+ ccdc_configure_clamp(ccdc);
+ ccdc->update &= ~OMAP3ISP_CCDC_BLCLAMP;
+ }
+
+ if (ccdc->update & OMAP3ISP_CCDC_BCOMP) {
+ ccdc_configure_black_comp(ccdc);
+ ccdc->update &= ~OMAP3ISP_CCDC_BCOMP;
+ }
+}
+
+/*
+ * omap3isp_ccdc_restore_context - Restore values of the CCDC module registers
+ * @isp: Pointer to ISP device
+ */
+void omap3isp_ccdc_restore_context(struct isp_device *isp)
+{
+ struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_VDLC);
+
+ ccdc->update = OMAP3ISP_CCDC_ALAW | OMAP3ISP_CCDC_LPF
+ | OMAP3ISP_CCDC_BLCLAMP | OMAP3ISP_CCDC_BCOMP;
+ ccdc_apply_controls(ccdc);
+ ccdc_configure_fpc(ccdc);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+/*
+ * ccdc_config_vp - Configure the Video Port.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_config_vp(struct isp_ccdc_device *ccdc)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccdc);
+ const struct isp_format_info *info;
+ struct v4l2_mbus_framefmt *format;
+ unsigned long l3_ick = pipe->l3_ick;
+ unsigned int max_div = isp->revision == ISP_REVISION_15_0 ? 64 : 8;
+ unsigned int div = 0;
+ u32 fmtcfg = ISPCCDC_FMTCFG_VPEN;
+
+ format = &ccdc->formats[CCDC_PAD_SOURCE_VP];
+
+ if (!format->code) {
+ /* Disable the video port when the input format isn't supported.
+ * This is indicated by a pixel code set to 0.
+ */
+ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG);
+ return;
+ }
+
+ isp_reg_writel(isp, (0 << ISPCCDC_FMT_HORZ_FMTSPH_SHIFT) |
+ (format->width << ISPCCDC_FMT_HORZ_FMTLNH_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ);
+ isp_reg_writel(isp, (0 << ISPCCDC_FMT_VERT_FMTSLV_SHIFT) |
+ ((format->height + 1) << ISPCCDC_FMT_VERT_FMTLNV_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT);
+
+ isp_reg_writel(isp, (format->width << ISPCCDC_VP_OUT_HORZ_NUM_SHIFT) |
+ (format->height << ISPCCDC_VP_OUT_VERT_NUM_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT);
+
+ info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code);
+
+ switch (info->width) {
+ case 8:
+ case 10:
+ fmtcfg |= ISPCCDC_FMTCFG_VPIN_9_0;
+ break;
+ case 11:
+ fmtcfg |= ISPCCDC_FMTCFG_VPIN_10_1;
+ break;
+ case 12:
+ fmtcfg |= ISPCCDC_FMTCFG_VPIN_11_2;
+ break;
+ case 13:
+ fmtcfg |= ISPCCDC_FMTCFG_VPIN_12_3;
+ break;
+ }
+
+ if (pipe->input)
+ div = DIV_ROUND_UP(l3_ick, pipe->max_rate);
+ else if (pipe->external_rate)
+ div = l3_ick / pipe->external_rate;
+
+ div = clamp(div, 2U, max_div);
+ fmtcfg |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT;
+
+ isp_reg_writel(isp, fmtcfg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG);
+}
+
+/*
+ * ccdc_config_outlineoffset - Configure memory saving output line offset
+ * @ccdc: Pointer to ISP CCDC device.
+ * @bpl: Number of bytes per line when stored in memory.
+ * @field: Field order when storing interlaced formats in memory.
+ *
+ * Configure the offsets for the line output control:
+ *
+ * - The horizontal line offset is defined as the number of bytes between the
+ * start of two consecutive lines in memory. Set it to the given bytes per
+ * line value.
+ *
+ * - The field offset value is defined as the number of lines to offset the
+ * start of the field identified by FID = 1. Set it to one.
+ *
+ * - The line offset values are defined as the number of lines (as defined by
+ * the horizontal line offset) between the start of two consecutive lines for
+ * all combinations of odd/even lines in odd/even fields. When interleaving
+ * fields set them all to two lines, and to one line otherwise.
+ */
+static void ccdc_config_outlineoffset(struct isp_ccdc_device *ccdc,
+ unsigned int bpl,
+ enum v4l2_field field)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ u32 sdofst = 0;
+
+ isp_reg_writel(isp, bpl & 0xffff, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_HSIZE_OFF);
+
+ switch (field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ /* When interleaving fields in memory offset field one by one
+ * line and set the line offset to two lines.
+ */
+ sdofst |= (1 << ISPCCDC_SDOFST_LOFST0_SHIFT)
+ | (1 << ISPCCDC_SDOFST_LOFST1_SHIFT)
+ | (1 << ISPCCDC_SDOFST_LOFST2_SHIFT)
+ | (1 << ISPCCDC_SDOFST_LOFST3_SHIFT);
+ break;
+
+ default:
+ /* In all other cases set the line offsets to one line. */
+ break;
+ }
+
+ isp_reg_writel(isp, sdofst, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST);
+}
+
+/*
+ * ccdc_set_outaddr - Set memory address to save output image
+ * @ccdc: Pointer to ISP CCDC device.
+ * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ */
+static void ccdc_set_outaddr(struct isp_ccdc_device *ccdc, u32 addr)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDR_ADDR);
+}
+
+/*
+ * omap3isp_ccdc_max_rate - Calculate maximum input data rate based on the input
+ * @ccdc: Pointer to ISP CCDC device.
+ * @max_rate: Maximum calculated data rate.
+ *
+ * Returns in *max_rate less value between calculated and passed
+ */
+void omap3isp_ccdc_max_rate(struct isp_ccdc_device *ccdc,
+ unsigned int *max_rate)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ unsigned int rate;
+
+ if (pipe == NULL)
+ return;
+
+ /*
+ * TRM says that for parallel sensors the maximum data rate
+ * should be 90% form L3/2 clock, otherwise just L3/2.
+ */
+ if (ccdc->input == CCDC_INPUT_PARALLEL)
+ rate = pipe->l3_ick / 2 * 9 / 10;
+ else
+ rate = pipe->l3_ick / 2;
+
+ *max_rate = min(*max_rate, rate);
+}
+
+/*
+ * ccdc_config_sync_if - Set CCDC sync interface configuration
+ * @ccdc: Pointer to ISP CCDC device.
+ * @parcfg: Parallel interface platform data (may be NULL)
+ * @data_size: Data size
+ */
+static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
+ struct isp_parallel_cfg *parcfg,
+ unsigned int data_size)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ const struct v4l2_mbus_framefmt *format;
+ u32 syn_mode = ISPCCDC_SYN_MODE_VDHDEN;
+
+ format = &ccdc->formats[CCDC_PAD_SINK];
+
+ if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ format->code == MEDIA_BUS_FMT_UYVY8_2X8) {
+ /* According to the OMAP3 TRM the input mode only affects SYNC
+ * mode, enabling BT.656 mode should take precedence. However,
+ * in practice setting the input mode to YCbCr data on 8 bits
+ * seems to be required in BT.656 mode. In SYNC mode set it to
+ * YCbCr on 16 bits as the bridge is enabled in that case.
+ */
+ if (ccdc->bt656)
+ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR8;
+ else
+ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16;
+ }
+
+ switch (data_size) {
+ case 8:
+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_8;
+ break;
+ case 10:
+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_10;
+ break;
+ case 11:
+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_11;
+ break;
+ case 12:
+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_12;
+ break;
+ }
+
+ if (parcfg && parcfg->data_pol)
+ syn_mode |= ISPCCDC_SYN_MODE_DATAPOL;
+
+ if (parcfg && parcfg->hs_pol)
+ syn_mode |= ISPCCDC_SYN_MODE_HDPOL;
+
+ /* The polarity of the vertical sync signal output by the BT.656
+ * decoder is not documented and seems to be active low.
+ */
+ if ((parcfg && parcfg->vs_pol) || ccdc->bt656)
+ syn_mode |= ISPCCDC_SYN_MODE_VDPOL;
+
+ if (parcfg && parcfg->fld_pol)
+ syn_mode |= ISPCCDC_SYN_MODE_FLDPOL;
+
+ isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
+
+ /* The CCDC_CFG.Y8POS bit is used in YCbCr8 input mode only. The
+ * hardware seems to ignore it in all other input modes.
+ */
+ if (format->code == MEDIA_BUS_FMT_UYVY8_2X8)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_Y8POS);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_Y8POS);
+
+ /* Enable or disable BT.656 mode, including error correction for the
+ * synchronization codes.
+ */
+ if (ccdc->bt656)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF,
+ ISPCCDC_REC656IF_R656ON | ISPCCDC_REC656IF_ECCFVH);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF,
+ ISPCCDC_REC656IF_R656ON | ISPCCDC_REC656IF_ECCFVH);
+
+}
+
+/* CCDC formats descriptions */
+static const u32 ccdc_sgrbg_pattern =
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static const u32 ccdc_srggb_pattern =
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static const u32 ccdc_sbggr_pattern =
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static const u32 ccdc_sgbrg_pattern =
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static void ccdc_configure(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+ struct isp_parallel_cfg *parcfg = NULL;
+ struct v4l2_subdev *sensor;
+ struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
+ const struct isp_format_info *fmt_info;
+ struct v4l2_subdev_format fmt_src;
+ unsigned int depth_out;
+ unsigned int depth_in = 0;
+ struct media_pad *pad;
+ unsigned long flags;
+ unsigned int bridge;
+ unsigned int shift;
+ unsigned int nph;
+ unsigned int sph;
+ u32 syn_mode;
+ u32 ccdc_pattern;
+
+ ccdc->bt656 = false;
+ ccdc->fields = 0;
+
+ pad = media_entity_remote_pad(&ccdc->pads[CCDC_PAD_SINK]);
+ sensor = media_entity_to_v4l2_subdev(pad->entity);
+ if (ccdc->input == CCDC_INPUT_PARALLEL) {
+ struct v4l2_subdev *sd =
+ to_isp_pipeline(&ccdc->subdev.entity)->external;
+
+ parcfg = &v4l2_subdev_to_bus_cfg(sd)->bus.parallel;
+ ccdc->bt656 = parcfg->bt656;
+ }
+
+ /* CCDC_PAD_SINK */
+ format = &ccdc->formats[CCDC_PAD_SINK];
+
+ /* Compute the lane shifter shift value and enable the bridge when the
+ * input format is a non-BT.656 YUV variant.
+ */
+ fmt_src.pad = pad->index;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ if (!v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt_src)) {
+ fmt_info = omap3isp_video_format_info(fmt_src.format.code);
+ depth_in = fmt_info->width;
+ }
+
+ fmt_info = omap3isp_video_format_info(format->code);
+ depth_out = fmt_info->width;
+ shift = depth_in - depth_out;
+
+ if (ccdc->bt656)
+ bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
+ else if (fmt_info->code == MEDIA_BUS_FMT_YUYV8_2X8)
+ bridge = ISPCTRL_PAR_BRIDGE_LENDIAN;
+ else if (fmt_info->code == MEDIA_BUS_FMT_UYVY8_2X8)
+ bridge = ISPCTRL_PAR_BRIDGE_BENDIAN;
+ else
+ bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
+
+ omap3isp_configure_bridge(isp, ccdc->input, parcfg, shift, bridge);
+
+ /* Configure the sync interface. */
+ ccdc_config_sync_if(ccdc, parcfg, depth_out);
+
+ syn_mode = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
+
+ /* Use the raw, unprocessed data when writing to memory. The H3A and
+ * histogram modules are still fed with lens shading corrected data.
+ */
+ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR;
+
+ if (ccdc->output & CCDC_OUTPUT_MEMORY)
+ syn_mode |= ISPCCDC_SYN_MODE_WEN;
+ else
+ syn_mode &= ~ISPCCDC_SYN_MODE_WEN;
+
+ if (ccdc->output & CCDC_OUTPUT_RESIZER)
+ syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ;
+ else
+ syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ;
+
+ /* Mosaic filter */
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ ccdc_pattern = ccdc_srggb_pattern;
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ ccdc_pattern = ccdc_sbggr_pattern;
+ break;
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ ccdc_pattern = ccdc_sgbrg_pattern;
+ break;
+ default:
+ /* Use GRBG */
+ ccdc_pattern = ccdc_sgrbg_pattern;
+ break;
+ }
+ ccdc_config_imgattr(ccdc, ccdc_pattern);
+
+ /* Generate VD0 on the last line of the image and VD1 on the
+ * 2/3 height line.
+ */
+ isp_reg_writel(isp, ((format->height - 2) << ISPCCDC_VDINT_0_SHIFT) |
+ ((format->height * 2 / 3) << ISPCCDC_VDINT_1_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT);
+
+ /* CCDC_PAD_SOURCE_OF */
+ format = &ccdc->formats[CCDC_PAD_SOURCE_OF];
+ crop = &ccdc->crop;
+
+ /* The horizontal coordinates are expressed in pixel clock cycles. We
+ * need two cycles per pixel in BT.656 mode, and one cycle per pixel in
+ * SYNC mode regardless of the format as the bridge is enabled for YUV
+ * formats in that case.
+ */
+ if (ccdc->bt656) {
+ sph = crop->left * 2;
+ nph = crop->width * 2 - 1;
+ } else {
+ sph = crop->left;
+ nph = crop->width - 1;
+ }
+
+ isp_reg_writel(isp, (sph << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
+ (nph << ISPCCDC_HORZ_INFO_NPH_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO);
+ isp_reg_writel(isp, (crop->top << ISPCCDC_VERT_START_SLV0_SHIFT) |
+ (crop->top << ISPCCDC_VERT_START_SLV1_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START);
+ isp_reg_writel(isp, (crop->height - 1)
+ << ISPCCDC_VERT_LINES_NLV_SHIFT,
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES);
+
+ ccdc_config_outlineoffset(ccdc, ccdc->video_out.bpl_value,
+ format->field);
+
+ /* When interleaving fields enable processing of the field input signal.
+ * This will cause the line output control module to apply the field
+ * offset to field 1.
+ */
+ if (ccdc->formats[CCDC_PAD_SINK].field == V4L2_FIELD_ALTERNATE &&
+ (format->field == V4L2_FIELD_INTERLACED_TB ||
+ format->field == V4L2_FIELD_INTERLACED_BT))
+ syn_mode |= ISPCCDC_SYN_MODE_FLDMODE;
+
+ /* The CCDC outputs data in UYVY order by default. Swap bytes to get
+ * YUYV.
+ */
+ if (format->code == MEDIA_BUS_FMT_YUYV8_1X16)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_BSWD);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_BSWD);
+
+ /* Use PACK8 mode for 1byte per pixel formats. Check for BT.656 mode
+ * explicitly as the driver reports 1X16 instead of 2X8 at the OF pad
+ * for simplicity.
+ */
+ if (omap3isp_video_format_info(format->code)->width <= 8 || ccdc->bt656)
+ syn_mode |= ISPCCDC_SYN_MODE_PACK8;
+ else
+ syn_mode &= ~ISPCCDC_SYN_MODE_PACK8;
+
+ isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
+
+ /* CCDC_PAD_SOURCE_VP */
+ ccdc_config_vp(ccdc);
+
+ /* Lens shading correction. */
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+ if (ccdc->lsc.request == NULL)
+ goto unlock;
+
+ WARN_ON(ccdc->lsc.active);
+
+ /* Get last good LSC configuration. If it is not supported for
+ * the current active resolution discard it.
+ */
+ if (ccdc->lsc.active == NULL &&
+ __ccdc_lsc_configure(ccdc, ccdc->lsc.request) == 0) {
+ ccdc->lsc.active = ccdc->lsc.request;
+ } else {
+ list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue);
+ schedule_work(&ccdc->lsc.table_work);
+ }
+
+ ccdc->lsc.request = NULL;
+
+unlock:
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+
+ ccdc_apply_controls(ccdc);
+}
+
+static void __ccdc_enable(struct isp_ccdc_device *ccdc, int enable)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR,
+ ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0);
+
+ ccdc->running = enable;
+}
+
+static int ccdc_disable(struct isp_ccdc_device *ccdc)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ccdc->lock, flags);
+ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS)
+ ccdc->stopping = CCDC_STOP_REQUEST;
+ if (!ccdc->running)
+ ccdc->stopping = CCDC_STOP_FINISHED;
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+
+ ret = wait_event_timeout(ccdc->wait,
+ ccdc->stopping == CCDC_STOP_FINISHED,
+ msecs_to_jiffies(2000));
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ dev_warn(to_device(ccdc), "CCDC stop timeout!\n");
+ }
+
+ omap3isp_sbl_disable(to_isp_device(ccdc), OMAP3_ISP_SBL_CCDC_LSC_READ);
+
+ mutex_lock(&ccdc->ioctl_lock);
+ ccdc_lsc_free_request(ccdc, ccdc->lsc.request);
+ ccdc->lsc.request = ccdc->lsc.active;
+ ccdc->lsc.active = NULL;
+ cancel_work_sync(&ccdc->lsc.table_work);
+ ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
+ mutex_unlock(&ccdc->ioctl_lock);
+
+ ccdc->stopping = CCDC_STOP_NOT_REQUESTED;
+
+ return ret > 0 ? 0 : ret;
+}
+
+static void ccdc_enable(struct isp_ccdc_device *ccdc)
+{
+ if (ccdc_lsc_is_configured(ccdc))
+ __ccdc_lsc_enable(ccdc, 1);
+ __ccdc_enable(ccdc, 1);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+/*
+ * ccdc_sbl_busy - Poll idle state of CCDC and related SBL memory write bits
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Returns zero if the CCDC is idle and the image has been written to
+ * memory, too.
+ */
+static int ccdc_sbl_busy(struct isp_ccdc_device *ccdc)
+{
+ struct isp_device *isp = to_isp_device(ccdc);
+
+ return omap3isp_ccdc_busy(ccdc)
+ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_0) &
+ ISPSBL_CCDC_WR_0_DATA_READY)
+ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_1) &
+ ISPSBL_CCDC_WR_0_DATA_READY)
+ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_2) &
+ ISPSBL_CCDC_WR_0_DATA_READY)
+ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_3) &
+ ISPSBL_CCDC_WR_0_DATA_READY);
+}
+
+/*
+ * ccdc_sbl_wait_idle - Wait until the CCDC and related SBL are idle
+ * @ccdc: Pointer to ISP CCDC device.
+ * @max_wait: Max retry count in us for wait for idle/busy transition.
+ */
+static int ccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc,
+ unsigned int max_wait)
+{
+ unsigned int wait = 0;
+
+ if (max_wait == 0)
+ max_wait = 10000; /* 10 ms */
+
+ for (wait = 0; wait <= max_wait; wait++) {
+ if (!ccdc_sbl_busy(ccdc))
+ return 0;
+
+ rmb();
+ udelay(1);
+ }
+
+ return -EBUSY;
+}
+
+/* ccdc_handle_stopping - Handle CCDC and/or LSC stopping sequence
+ * @ccdc: Pointer to ISP CCDC device.
+ * @event: Pointing which event trigger handler
+ *
+ * Return 1 when the event and stopping request combination is satisfied,
+ * zero otherwise.
+ */
+static int ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event)
+{
+ int rval = 0;
+
+ switch ((ccdc->stopping & 3) | event) {
+ case CCDC_STOP_REQUEST | CCDC_EVENT_VD1:
+ if (ccdc->lsc.state != LSC_STATE_STOPPED)
+ __ccdc_lsc_enable(ccdc, 0);
+ __ccdc_enable(ccdc, 0);
+ ccdc->stopping = CCDC_STOP_EXECUTED;
+ return 1;
+
+ case CCDC_STOP_EXECUTED | CCDC_EVENT_VD0:
+ ccdc->stopping |= CCDC_STOP_CCDC_FINISHED;
+ if (ccdc->lsc.state == LSC_STATE_STOPPED)
+ ccdc->stopping |= CCDC_STOP_LSC_FINISHED;
+ rval = 1;
+ break;
+
+ case CCDC_STOP_EXECUTED | CCDC_EVENT_LSC_DONE:
+ ccdc->stopping |= CCDC_STOP_LSC_FINISHED;
+ rval = 1;
+ break;
+
+ case CCDC_STOP_EXECUTED | CCDC_EVENT_VD1:
+ return 1;
+ }
+
+ if (ccdc->stopping == CCDC_STOP_FINISHED) {
+ wake_up(&ccdc->wait);
+ rval = 1;
+ }
+
+ return rval;
+}
+
+static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ struct video_device *vdev = ccdc->subdev.devnode;
+ struct v4l2_event event;
+
+ /* Frame number propagation */
+ atomic_inc(&pipe->frame_number);
+
+ memset(&event, 0, sizeof(event));
+ event.type = V4L2_EVENT_FRAME_SYNC;
+ event.u.frame_sync.frame_sequence = atomic_read(&pipe->frame_number);
+
+ v4l2_event_queue(vdev, &event);
+}
+
+/*
+ * ccdc_lsc_isr - Handle LSC events
+ * @ccdc: Pointer to ISP CCDC device.
+ * @events: LSC events
+ */
+static void ccdc_lsc_isr(struct isp_ccdc_device *ccdc, u32 events)
+{
+ unsigned long flags;
+
+ if (events & IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ) {
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&ccdc->subdev.entity);
+
+ ccdc_lsc_error_handler(ccdc);
+ pipe->error = true;
+ dev_dbg(to_device(ccdc), "lsc prefetch error\n");
+ }
+
+ if (!(events & IRQ0STATUS_CCDC_LSC_DONE_IRQ))
+ return;
+
+ /* LSC_DONE interrupt occur, there are two cases
+ * 1. stopping for reconfiguration
+ * 2. stopping because of STREAM OFF command
+ */
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+
+ if (ccdc->lsc.state == LSC_STATE_STOPPING)
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+
+ if (ccdc_handle_stopping(ccdc, CCDC_EVENT_LSC_DONE))
+ goto done;
+
+ if (ccdc->lsc.state != LSC_STATE_RECONFIG)
+ goto done;
+
+ /* LSC is in STOPPING state, change to the new state */
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+
+ /* This is an exception. Start of frame and LSC_DONE interrupt
+ * have been received on the same time. Skip this event and wait
+ * for better times.
+ */
+ if (events & IRQ0STATUS_HS_VS_IRQ)
+ goto done;
+
+ /* The LSC engine is stopped at this point. Enable it if there's a
+ * pending request.
+ */
+ if (ccdc->lsc.request == NULL)
+ goto done;
+
+ ccdc_lsc_enable(ccdc);
+
+done:
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+}
+
+/*
+ * Check whether the CCDC has captured all fields necessary to complete the
+ * buffer.
+ */
+static bool ccdc_has_all_fields(struct isp_ccdc_device *ccdc)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccdc);
+ enum v4l2_field of_field = ccdc->formats[CCDC_PAD_SOURCE_OF].field;
+ enum v4l2_field field;
+
+ /* When the input is progressive fields don't matter. */
+ if (of_field == V4L2_FIELD_NONE)
+ return true;
+
+ /* Read the current field identifier. */
+ field = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE)
+ & ISPCCDC_SYN_MODE_FLDSTAT
+ ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
+
+ /* When capturing fields in alternate order just store the current field
+ * identifier in the pipeline.
+ */
+ if (of_field == V4L2_FIELD_ALTERNATE) {
+ pipe->field = field;
+ return true;
+ }
+
+ /* The format is interlaced. Make sure we've captured both fields. */
+ ccdc->fields |= field == V4L2_FIELD_BOTTOM
+ ? CCDC_FIELD_BOTTOM : CCDC_FIELD_TOP;
+
+ if (ccdc->fields != CCDC_FIELD_BOTH)
+ return false;
+
+ /* Verify that the field just captured corresponds to the last field
+ * needed based on the desired field order.
+ */
+ if ((of_field == V4L2_FIELD_INTERLACED_TB && field == V4L2_FIELD_TOP) ||
+ (of_field == V4L2_FIELD_INTERLACED_BT && field == V4L2_FIELD_BOTTOM))
+ return false;
+
+ /* The buffer can be completed, reset the fields for the next buffer. */
+ ccdc->fields = 0;
+
+ return true;
+}
+
+static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccdc);
+ struct isp_buffer *buffer;
+
+ /* The CCDC generates VD0 interrupts even when disabled (the datasheet
+ * doesn't explicitly state if that's supposed to happen or not, so it
+ * can be considered as a hardware bug or as a feature, but we have to
+ * deal with it anyway). Disabling the CCDC when no buffer is available
+ * would thus not be enough, we need to handle the situation explicitly.
+ */
+ if (list_empty(&ccdc->video_out.dmaqueue))
+ return 0;
+
+ /* We're in continuous mode, and memory writes were disabled due to a
+ * buffer underrun. Reenable them now that we have a buffer. The buffer
+ * address has been set in ccdc_video_queue.
+ */
+ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) {
+ ccdc->underrun = 0;
+ return 1;
+ }
+
+ /* Wait for the CCDC to become idle. */
+ if (ccdc_sbl_wait_idle(ccdc, 1000)) {
+ dev_info(isp->dev, "CCDC won't become idle!\n");
+ media_entity_enum_set(&isp->crashed, &ccdc->subdev.entity);
+ omap3isp_pipeline_cancel_stream(pipe);
+ return 0;
+ }
+
+ if (!ccdc_has_all_fields(ccdc))
+ return 1;
+
+ buffer = omap3isp_video_buffer_next(&ccdc->video_out);
+ if (buffer != NULL)
+ ccdc_set_outaddr(ccdc, buffer->dma);
+
+ pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
+
+ if (ccdc->state == ISP_PIPELINE_STREAM_SINGLESHOT &&
+ isp_pipeline_ready(pipe))
+ omap3isp_pipeline_set_stream(pipe,
+ ISP_PIPELINE_STREAM_SINGLESHOT);
+
+ return buffer != NULL;
+}
+
+/*
+ * ccdc_vd0_isr - Handle VD0 event
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Executes LSC deferred enablement before next frame starts.
+ */
+static void ccdc_vd0_isr(struct isp_ccdc_device *ccdc)
+{
+ unsigned long flags;
+ int restart = 0;
+
+ /* In BT.656 mode the CCDC doesn't generate an HS/VS interrupt. We thus
+ * need to increment the frame counter here.
+ */
+ if (ccdc->bt656) {
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&ccdc->subdev.entity);
+
+ atomic_inc(&pipe->frame_number);
+ }
+
+ /* Emulate a VD1 interrupt for BT.656 mode, as we can't stop the CCDC in
+ * the VD1 interrupt handler in that mode without risking a CCDC stall
+ * if a short frame is received.
+ */
+ if (ccdc->bt656) {
+ spin_lock_irqsave(&ccdc->lock, flags);
+ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS &&
+ ccdc->output & CCDC_OUTPUT_MEMORY) {
+ if (ccdc->lsc.state != LSC_STATE_STOPPED)
+ __ccdc_lsc_enable(ccdc, 0);
+ __ccdc_enable(ccdc, 0);
+ }
+ ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1);
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+ }
+
+ if (ccdc->output & CCDC_OUTPUT_MEMORY)
+ restart = ccdc_isr_buffer(ccdc);
+
+ spin_lock_irqsave(&ccdc->lock, flags);
+
+ if (ccdc_handle_stopping(ccdc, CCDC_EVENT_VD0)) {
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+ return;
+ }
+
+ if (!ccdc->shadow_update)
+ ccdc_apply_controls(ccdc);
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+
+ if (restart)
+ ccdc_enable(ccdc);
+}
+
+/*
+ * ccdc_vd1_isr - Handle VD1 event
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc)
+{
+ unsigned long flags;
+
+ /* In BT.656 mode the synchronization signals are generated by the CCDC
+ * from the embedded sync codes. The VD0 and VD1 interrupts are thus
+ * only triggered when the CCDC is enabled, unlike external sync mode
+ * where the line counter runs even when the CCDC is stopped. We can't
+ * disable the CCDC at VD1 time, as no VD0 interrupt would be generated
+ * for a short frame, which would result in the CCDC being stopped and
+ * no VD interrupt generated anymore. The CCDC is stopped from the VD0
+ * interrupt handler instead for BT.656.
+ */
+ if (ccdc->bt656)
+ return;
+
+ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+
+ /*
+ * Depending on the CCDC pipeline state, CCDC stopping should be
+ * handled differently. In SINGLESHOT we emulate an internal CCDC
+ * stopping because the CCDC hw works only in continuous mode.
+ * When CONTINUOUS pipeline state is used and the CCDC writes it's
+ * data to memory the CCDC and LSC are stopped immediately but
+ * without change the CCDC stopping state machine. The CCDC
+ * stopping state machine should be used only when user request
+ * for stopping is received (SINGLESHOT is an exeption).
+ */
+ switch (ccdc->state) {
+ case ISP_PIPELINE_STREAM_SINGLESHOT:
+ ccdc->stopping = CCDC_STOP_REQUEST;
+ break;
+
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ if (ccdc->output & CCDC_OUTPUT_MEMORY) {
+ if (ccdc->lsc.state != LSC_STATE_STOPPED)
+ __ccdc_lsc_enable(ccdc, 0);
+ __ccdc_enable(ccdc, 0);
+ }
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ break;
+ }
+
+ if (ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1))
+ goto done;
+
+ if (ccdc->lsc.request == NULL)
+ goto done;
+
+ /*
+ * LSC need to be reconfigured. Stop it here and on next LSC_DONE IRQ
+ * do the appropriate changes in registers
+ */
+ if (ccdc->lsc.state == LSC_STATE_RUNNING) {
+ __ccdc_lsc_enable(ccdc, 0);
+ ccdc->lsc.state = LSC_STATE_RECONFIG;
+ goto done;
+ }
+
+ /* LSC has been in STOPPED state, enable it */
+ if (ccdc->lsc.state == LSC_STATE_STOPPED)
+ ccdc_lsc_enable(ccdc);
+
+done:
+ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+}
+
+/*
+ * omap3isp_ccdc_isr - Configure CCDC during interframe time.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @events: CCDC events
+ */
+int omap3isp_ccdc_isr(struct isp_ccdc_device *ccdc, u32 events)
+{
+ if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ if (events & IRQ0STATUS_CCDC_VD1_IRQ)
+ ccdc_vd1_isr(ccdc);
+
+ ccdc_lsc_isr(ccdc, events);
+
+ if (events & IRQ0STATUS_CCDC_VD0_IRQ)
+ ccdc_vd0_isr(ccdc);
+
+ if (events & IRQ0STATUS_HS_VS_IRQ)
+ ccdc_hs_vs_isr(ccdc);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
+{
+ struct isp_ccdc_device *ccdc = &video->isp->isp_ccdc;
+ unsigned long flags;
+ bool restart = false;
+
+ if (!(ccdc->output & CCDC_OUTPUT_MEMORY))
+ return -ENODEV;
+
+ ccdc_set_outaddr(ccdc, buffer->dma);
+
+ /* We now have a buffer queued on the output, restart the pipeline
+ * on the next CCDC interrupt if running in continuous mode (or when
+ * starting the stream) in external sync mode, or immediately in BT.656
+ * sync mode as no CCDC interrupt is generated when the CCDC is stopped
+ * in that case.
+ */
+ spin_lock_irqsave(&ccdc->lock, flags);
+ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && !ccdc->running &&
+ ccdc->bt656)
+ restart = true;
+ else
+ ccdc->underrun = 1;
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+
+ if (restart)
+ ccdc_enable(ccdc);
+
+ return 0;
+}
+
+static const struct isp_video_operations ccdc_video_ops = {
+ .queue = ccdc_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * ccdc_ioctl - CCDC module private ioctl's
+ * @sd: ISP CCDC V4L2 subdevice
+ * @cmd: ioctl command
+ * @arg: ioctl argument
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static long ccdc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ int ret;
+
+ switch (cmd) {
+ case VIDIOC_OMAP3ISP_CCDC_CFG:
+ mutex_lock(&ccdc->ioctl_lock);
+ ret = ccdc_config(ccdc, arg);
+ mutex_unlock(&ccdc->ioctl_lock);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return ret;
+}
+
+static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_FRAME_SYNC)
+ return -EINVAL;
+
+ /* line number is zero at frame start */
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS, NULL);
+}
+
+static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+/*
+ * ccdc_set_stream - Enable/Disable streaming on the CCDC module
+ * @sd: ISP CCDC V4L2 subdevice
+ * @enable: Enable/disable stream
+ *
+ * When writing to memory, the CCDC hardware can't be enabled without a memory
+ * buffer to write to. As the s_stream operation is called in response to a
+ * STREAMON call without any buffer queued yet, just update the enabled field
+ * and return immediately. The CCDC will be enabled in ccdc_isr_buffer().
+ *
+ * When not writing to memory enable the CCDC immediately.
+ */
+static int ccdc_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct isp_device *isp = to_isp_device(ccdc);
+ int ret = 0;
+
+ if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_CCDC);
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+ ISPCCDC_CFG_VDLC);
+
+ ccdc_configure(ccdc);
+
+ ccdc_print_status(ccdc);
+ }
+
+ switch (enable) {
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ if (ccdc->output & CCDC_OUTPUT_MEMORY)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
+
+ if (ccdc->underrun || !(ccdc->output & CCDC_OUTPUT_MEMORY))
+ ccdc_enable(ccdc);
+
+ ccdc->underrun = 0;
+ break;
+
+ case ISP_PIPELINE_STREAM_SINGLESHOT:
+ if (ccdc->output & CCDC_OUTPUT_MEMORY &&
+ ccdc->state != ISP_PIPELINE_STREAM_SINGLESHOT)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
+
+ ccdc_enable(ccdc);
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ ret = ccdc_disable(ccdc);
+ if (ccdc->output & CCDC_OUTPUT_MEMORY)
+ omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
+ omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_CCDC);
+ ccdc->underrun = 0;
+ break;
+ }
+
+ ccdc->state = enable;
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&ccdc->subdev, cfg, pad);
+ else
+ return &ccdc->formats[pad];
+}
+
+static struct v4l2_rect *
+__ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&ccdc->subdev, cfg, CCDC_PAD_SOURCE_OF);
+ else
+ return &ccdc->crop;
+}
+
+/*
+ * ccdc_try_format - Try video format on a pad
+ * @ccdc: ISP CCDC device
+ * @cfg : V4L2 subdev pad configuration
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ const struct isp_format_info *info;
+ u32 pixelcode;
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ struct v4l2_rect *crop;
+ enum v4l2_field field;
+ unsigned int i;
+
+ switch (pad) {
+ case CCDC_PAD_SINK:
+ for (i = 0; i < ARRAY_SIZE(ccdc_fmts); i++) {
+ if (fmt->code == ccdc_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(ccdc_fmts))
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ /* Clamp the input size. */
+ fmt->width = clamp_t(u32, width, 32, 4096);
+ fmt->height = clamp_t(u32, height, 32, 4096);
+
+ /* Default to progressive field order. */
+ if (fmt->field == V4L2_FIELD_ANY)
+ fmt->field = V4L2_FIELD_NONE;
+
+ break;
+
+ case CCDC_PAD_SOURCE_OF:
+ pixelcode = fmt->code;
+ field = fmt->field;
+ *fmt = *__ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, which);
+
+ /* In SYNC mode the bridge converts YUV formats from 2X8 to
+ * 1X16. In BT.656 no such conversion occurs. As we don't know
+ * at this point whether the source will use SYNC or BT.656 mode
+ * let's pretend the conversion always occurs. The CCDC will be
+ * configured to pack bytes in BT.656, hiding the inaccuracy.
+ * In all cases bytes can be swapped.
+ */
+ if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) {
+ /* Use the user requested format if YUV. */
+ if (pixelcode == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ pixelcode == MEDIA_BUS_FMT_UYVY8_2X8 ||
+ pixelcode == MEDIA_BUS_FMT_YUYV8_1X16 ||
+ pixelcode == MEDIA_BUS_FMT_UYVY8_1X16)
+ fmt->code = pixelcode;
+
+ if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8)
+ fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
+ else if (fmt->code == MEDIA_BUS_FMT_UYVY8_2X8)
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
+ }
+
+ /* Hardcode the output size to the crop rectangle size. */
+ crop = __ccdc_get_crop(ccdc, cfg, which);
+ fmt->width = crop->width;
+ fmt->height = crop->height;
+
+ /* When input format is interlaced with alternating fields the
+ * CCDC can interleave the fields.
+ */
+ if (fmt->field == V4L2_FIELD_ALTERNATE &&
+ (field == V4L2_FIELD_INTERLACED_TB ||
+ field == V4L2_FIELD_INTERLACED_BT)) {
+ fmt->field = field;
+ fmt->height *= 2;
+ }
+
+ break;
+
+ case CCDC_PAD_SOURCE_VP:
+ *fmt = *__ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, which);
+
+ /* The video port interface truncates the data to 10 bits. */
+ info = omap3isp_video_format_info(fmt->code);
+ fmt->code = info->truncated;
+
+ /* YUV formats are not supported by the video port. */
+ if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ fmt->code == MEDIA_BUS_FMT_UYVY8_2X8)
+ fmt->code = 0;
+
+ /* The number of lines that can be clocked out from the video
+ * port output must be at least one line less than the number
+ * of input lines.
+ */
+ fmt->width = clamp_t(u32, width, 32, fmt->width);
+ fmt->height = clamp_t(u32, height, 32, fmt->height - 1);
+ break;
+ }
+
+ /* Data is written to memory unpacked, each 10-bit or 12-bit pixel is
+ * stored on 2 bytes.
+ */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+/*
+ * ccdc_try_crop - Validate a crop rectangle
+ * @ccdc: ISP CCDC device
+ * @sink: format on the sink pad
+ * @crop: crop rectangle to be validated
+ */
+static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
+ const struct v4l2_mbus_framefmt *sink,
+ struct v4l2_rect *crop)
+{
+ const struct isp_format_info *info;
+ unsigned int max_width;
+
+ /* For Bayer formats, restrict left/top and width/height to even values
+ * to keep the Bayer pattern.
+ */
+ info = omap3isp_video_format_info(sink->code);
+ if (info->flavor != MEDIA_BUS_FMT_Y8_1X8) {
+ crop->left &= ~1;
+ crop->top &= ~1;
+ }
+
+ crop->left = clamp_t(u32, crop->left, 0, sink->width - CCDC_MIN_WIDTH);
+ crop->top = clamp_t(u32, crop->top, 0, sink->height - CCDC_MIN_HEIGHT);
+
+ /* The data formatter truncates the number of horizontal output pixels
+ * to a multiple of 16. To avoid clipping data, allow callers to request
+ * an output size bigger than the input size up to the nearest multiple
+ * of 16.
+ */
+ max_width = (sink->width - crop->left + 15) & ~15;
+ crop->width = clamp_t(u32, crop->width, CCDC_MIN_WIDTH, max_width)
+ & ~15;
+ crop->height = clamp_t(u32, crop->height, CCDC_MIN_HEIGHT,
+ sink->height - crop->top);
+
+ /* Odd width/height values don't make sense for Bayer formats. */
+ if (info->flavor != MEDIA_BUS_FMT_Y8_1X8) {
+ crop->width &= ~1;
+ crop->height &= ~1;
+ }
+}
+
+/*
+ * ccdc_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg : V4L2 subdev pad configuration
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ switch (code->pad) {
+ case CCDC_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ccdc_fmts))
+ return -EINVAL;
+
+ code->code = ccdc_fmts[code->index];
+ break;
+
+ case CCDC_PAD_SOURCE_OF:
+ format = __ccdc_get_format(ccdc, cfg, code->pad,
+ code->which);
+
+ if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ format->code == MEDIA_BUS_FMT_UYVY8_2X8) {
+ /* In YUV mode the CCDC can swap bytes. */
+ if (code->index == 0)
+ code->code = MEDIA_BUS_FMT_YUYV8_1X16;
+ else if (code->index == 1)
+ code->code = MEDIA_BUS_FMT_UYVY8_1X16;
+ else
+ return -EINVAL;
+ } else {
+ /* In raw mode, no configurable format confversion is
+ * available.
+ */
+ if (code->index == 0)
+ code->code = format->code;
+ else
+ return -EINVAL;
+ }
+ break;
+
+ case CCDC_PAD_SOURCE_VP:
+ /* The CCDC supports no configurable format conversion
+ * compatible with the video port. Enumerate a single output
+ * format code.
+ */
+ if (code->index != 0)
+ return -EINVAL;
+
+ format = __ccdc_get_format(ccdc, cfg, code->pad,
+ code->which);
+
+ /* A pixel code equal to 0 means that the video port doesn't
+ * support the input format. Don't enumerate any pixel code.
+ */
+ if (format->code == 0)
+ return -EINVAL;
+
+ code->code = format->code;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ccdc_try_format(ccdc, cfg, fse->pad, &format, fse->which);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ccdc_try_format(ccdc, cfg, fse->pad, &format, fse->which);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ccdc_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the output formatter
+ * source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != CCDC_PAD_SOURCE_OF)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, sel->which);
+ ccdc_try_crop(ccdc, format, &sel->r);
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *__ccdc_get_crop(ccdc, cfg, sel->which);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ccdc_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the output
+ * formatter source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->target != V4L2_SEL_TGT_CROP ||
+ sel->pad != CCDC_PAD_SOURCE_OF)
+ return -EINVAL;
+
+ /* The crop rectangle can't be changed while streaming. */
+ if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
+ return -EBUSY;
+
+ /* Modifying the crop rectangle always changes the format on the source
+ * pad. If the KEEP_CONFIG flag is set, just return the current crop
+ * rectangle.
+ */
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = *__ccdc_get_crop(ccdc, cfg, sel->which);
+ return 0;
+ }
+
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, sel->which);
+ ccdc_try_crop(ccdc, format, &sel->r);
+ *__ccdc_get_crop(ccdc, cfg, sel->which) = sel->r;
+
+ /* Update the source format. */
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, sel->which);
+ ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, format, sel->which);
+
+ return 0;
+}
+
+/*
+ * ccdc_get_format - Retrieve the video format on a pad
+ * @sd : ISP CCDC V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ccdc_get_format(ccdc, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * ccdc_set_format - Set the video format on a pad
+ * @sd : ISP CCDC V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ format = __ccdc_get_format(ccdc, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ccdc_try_format(ccdc, cfg, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == CCDC_PAD_SINK) {
+ /* Reset the crop rectangle. */
+ crop = __ccdc_get_crop(ccdc, cfg, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+
+ ccdc_try_crop(ccdc, &fmt->format, crop);
+
+ /* Update the source formats. */
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_OF,
+ fmt->which);
+ *format = fmt->format;
+ ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, format,
+ fmt->which);
+
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_VP,
+ fmt->which);
+ *format = fmt->format;
+ ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_VP, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * Decide whether desired output pixel code can be obtained with
+ * the lane shifter by shifting the input pixel code.
+ * @in: input pixelcode to shifter
+ * @out: output pixelcode from shifter
+ * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
+ *
+ * return true if the combination is possible
+ * return false otherwise
+ */
+static bool ccdc_is_shiftable(u32 in, u32 out, unsigned int additional_shift)
+{
+ const struct isp_format_info *in_info, *out_info;
+
+ if (in == out)
+ return true;
+
+ in_info = omap3isp_video_format_info(in);
+ out_info = omap3isp_video_format_info(out);
+
+ if ((in_info->flavor == 0) || (out_info->flavor == 0))
+ return false;
+
+ if (in_info->flavor != out_info->flavor)
+ return false;
+
+ return in_info->width - out_info->width + additional_shift <= 6;
+}
+
+static int ccdc_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ unsigned long parallel_shift;
+
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ /* We've got a parallel sensor here. */
+ if (ccdc->input == CCDC_INPUT_PARALLEL) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct isp_bus_cfg *bus_cfg = v4l2_subdev_to_bus_cfg(sd);
+
+ parallel_shift = bus_cfg->bus.parallel.data_lane_shift;
+ } else {
+ parallel_shift = 0;
+ }
+
+ /* Lane shifter may be used to drop bits on CCDC sink pad */
+ if (!ccdc_is_shiftable(source_fmt->format.code,
+ sink_fmt->format.code, parallel_shift))
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
+ * ccdc_init_formats - Initialize formats on all pads
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = CCDC_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ ccdc_set_format(sd, fh ? fh->pad : NULL, &format);
+
+ return 0;
+}
+
+/* V4L2 subdev core operations */
+static const struct v4l2_subdev_core_ops ccdc_v4l2_core_ops = {
+ .ioctl = ccdc_ioctl,
+ .subscribe_event = ccdc_subscribe_event,
+ .unsubscribe_event = ccdc_unsubscribe_event,
+};
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops ccdc_v4l2_video_ops = {
+ .s_stream = ccdc_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops ccdc_v4l2_pad_ops = {
+ .enum_mbus_code = ccdc_enum_mbus_code,
+ .enum_frame_size = ccdc_enum_frame_size,
+ .get_fmt = ccdc_get_format,
+ .set_fmt = ccdc_set_format,
+ .get_selection = ccdc_get_selection,
+ .set_selection = ccdc_set_selection,
+ .link_validate = ccdc_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops ccdc_v4l2_ops = {
+ .core = &ccdc_v4l2_core_ops,
+ .video = &ccdc_v4l2_video_ops,
+ .pad = &ccdc_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops ccdc_v4l2_internal_ops = {
+ .open = ccdc_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ccdc_link_setup - Setup CCDC connections
+ * @entity: CCDC media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int ccdc_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct isp_device *isp = to_isp_device(ccdc);
+ unsigned int index = local->index;
+
+ /* FIXME: this is actually a hack! */
+ if (is_media_entity_v4l2_subdev(remote->entity))
+ index |= 2 << 16;
+
+ switch (index) {
+ case CCDC_PAD_SINK | 2 << 16:
+ /* Read from the sensor (parallel interface), CCP2, CSI2a or
+ * CSI2c.
+ */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ccdc->input = CCDC_INPUT_NONE;
+ break;
+ }
+
+ if (ccdc->input != CCDC_INPUT_NONE)
+ return -EBUSY;
+
+ if (remote->entity == &isp->isp_ccp2.subdev.entity)
+ ccdc->input = CCDC_INPUT_CCP2B;
+ else if (remote->entity == &isp->isp_csi2a.subdev.entity)
+ ccdc->input = CCDC_INPUT_CSI2A;
+ else if (remote->entity == &isp->isp_csi2c.subdev.entity)
+ ccdc->input = CCDC_INPUT_CSI2C;
+ else
+ ccdc->input = CCDC_INPUT_PARALLEL;
+
+ break;
+
+ /*
+ * The ISP core doesn't support pipelines with multiple video outputs.
+ * Revisit this when it will be implemented, and return -EBUSY for now.
+ */
+
+ case CCDC_PAD_SOURCE_VP | 2 << 16:
+ /* Write to preview engine, histogram and H3A. When none of
+ * those links are active, the video port can be disabled.
+ */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccdc->output & ~CCDC_OUTPUT_PREVIEW)
+ return -EBUSY;
+ ccdc->output |= CCDC_OUTPUT_PREVIEW;
+ } else {
+ ccdc->output &= ~CCDC_OUTPUT_PREVIEW;
+ }
+ break;
+
+ case CCDC_PAD_SOURCE_OF:
+ /* Write to memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccdc->output & ~CCDC_OUTPUT_MEMORY)
+ return -EBUSY;
+ ccdc->output |= CCDC_OUTPUT_MEMORY;
+ } else {
+ ccdc->output &= ~CCDC_OUTPUT_MEMORY;
+ }
+ break;
+
+ case CCDC_PAD_SOURCE_OF | 2 << 16:
+ /* Write to resizer */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccdc->output & ~CCDC_OUTPUT_RESIZER)
+ return -EBUSY;
+ ccdc->output |= CCDC_OUTPUT_RESIZER;
+ } else {
+ ccdc->output &= ~CCDC_OUTPUT_RESIZER;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ccdc_media_ops = {
+ .link_setup = ccdc_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc)
+{
+ v4l2_device_unregister_subdev(&ccdc->subdev);
+ omap3isp_video_unregister(&ccdc->video_out);
+}
+
+int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ccdc->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&ccdc->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_ccdc_unregister_entities(ccdc);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP CCDC initialisation and cleanup
+ */
+
+/*
+ * ccdc_init_entities - Initialize V4L2 subdev and media entity
+ * @ccdc: ISP CCDC module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
+{
+ struct v4l2_subdev *sd = &ccdc->subdev;
+ struct media_pad *pads = ccdc->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ ccdc->input = CCDC_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &ccdc_v4l2_ops);
+ sd->internal_ops = &ccdc_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP3 ISP CCDC", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
+ v4l2_set_subdevdata(sd, ccdc);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
+ pads[CCDC_PAD_SOURCE_OF].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &ccdc_media_ops;
+ ret = media_entity_pads_init(me, CCDC_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ ccdc_init_formats(sd, NULL);
+
+ ccdc->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ccdc->video_out.ops = &ccdc_video_ops;
+ ccdc->video_out.isp = to_isp_device(ccdc);
+ ccdc->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+ ccdc->video_out.bpl_alignment = 32;
+
+ ret = omap3isp_video_init(&ccdc->video_out, "CCDC");
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ media_entity_cleanup(me);
+ return ret;
+}
+
+/*
+ * omap3isp_ccdc_init - CCDC module initialization.
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap3isp_ccdc_init(struct isp_device *isp)
+{
+ struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+ int ret;
+
+ spin_lock_init(&ccdc->lock);
+ init_waitqueue_head(&ccdc->wait);
+ mutex_init(&ccdc->ioctl_lock);
+
+ ccdc->stopping = CCDC_STOP_NOT_REQUESTED;
+
+ INIT_WORK(&ccdc->lsc.table_work, ccdc_lsc_free_table_work);
+ ccdc->lsc.state = LSC_STATE_STOPPED;
+ INIT_LIST_HEAD(&ccdc->lsc.free_queue);
+ spin_lock_init(&ccdc->lsc.req_lock);
+
+ ccdc->clamp.oblen = 0;
+ ccdc->clamp.dcsubval = 0;
+
+ ccdc->update = OMAP3ISP_CCDC_BLCLAMP;
+ ccdc_apply_controls(ccdc);
+
+ ret = ccdc_init_entities(ccdc);
+ if (ret < 0) {
+ mutex_destroy(&ccdc->ioctl_lock);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * omap3isp_ccdc_cleanup - CCDC module cleanup.
+ * @isp: Device pointer specific to the OMAP3 ISP.
+ */
+void omap3isp_ccdc_cleanup(struct isp_device *isp)
+{
+ struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+
+ omap3isp_video_cleanup(&ccdc->video_out);
+ media_entity_cleanup(&ccdc->subdev.entity);
+
+ /* Free LSC requests. As the CCDC is stopped there's no active request,
+ * so only the pending request and the free queue need to be handled.
+ */
+ ccdc_lsc_free_request(ccdc, ccdc->lsc.request);
+ cancel_work_sync(&ccdc->lsc.table_work);
+ ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
+
+ if (ccdc->fpc.addr != NULL)
+ dma_free_coherent(isp->dev, ccdc->fpc.fpnum * 4, ccdc->fpc.addr,
+ ccdc->fpc.dma);
+
+ mutex_destroy(&ccdc->ioctl_lock);
+}
diff --git a/drivers/media/platform/omap3isp/ispccdc.h b/drivers/media/platform/omap3isp/ispccdc.h
new file mode 100644
index 000000000..3440a7097
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccdc.h
@@ -0,0 +1,177 @@
+/*
+ * ispccdc.h
+ *
+ * TI OMAP3 ISP - CCDC module
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_CCDC_H
+#define OMAP3_ISP_CCDC_H
+
+#include <linux/omap3isp.h>
+#include <linux/workqueue.h>
+
+#include "ispvideo.h"
+
+enum ccdc_input_entity {
+ CCDC_INPUT_NONE,
+ CCDC_INPUT_PARALLEL,
+ CCDC_INPUT_CSI2A,
+ CCDC_INPUT_CCP2B,
+ CCDC_INPUT_CSI2C
+};
+
+#define CCDC_OUTPUT_MEMORY (1 << 0)
+#define CCDC_OUTPUT_PREVIEW (1 << 1)
+#define CCDC_OUTPUT_RESIZER (1 << 2)
+
+#define OMAP3ISP_CCDC_NEVENTS 16
+
+struct ispccdc_fpc {
+ void *addr;
+ dma_addr_t dma;
+ unsigned int fpnum;
+};
+
+enum ispccdc_lsc_state {
+ LSC_STATE_STOPPED = 0,
+ LSC_STATE_STOPPING = 1,
+ LSC_STATE_RUNNING = 2,
+ LSC_STATE_RECONFIG = 3,
+};
+
+struct ispccdc_lsc_config_req {
+ struct list_head list;
+ struct omap3isp_ccdc_lsc_config config;
+ unsigned char enable;
+
+ struct {
+ void *addr;
+ dma_addr_t dma;
+ struct sg_table sgt;
+ } table;
+};
+
+/*
+ * ispccdc_lsc - CCDC LSC parameters
+ */
+struct ispccdc_lsc {
+ enum ispccdc_lsc_state state;
+ struct work_struct table_work;
+
+ /* LSC queue of configurations */
+ spinlock_t req_lock;
+ struct ispccdc_lsc_config_req *request; /* requested configuration */
+ struct ispccdc_lsc_config_req *active; /* active configuration */
+ struct list_head free_queue; /* configurations for freeing */
+};
+
+#define CCDC_STOP_NOT_REQUESTED 0x00
+#define CCDC_STOP_REQUEST 0x01
+#define CCDC_STOP_EXECUTED (0x02 | CCDC_STOP_REQUEST)
+#define CCDC_STOP_CCDC_FINISHED 0x04
+#define CCDC_STOP_LSC_FINISHED 0x08
+#define CCDC_STOP_FINISHED \
+ (CCDC_STOP_EXECUTED | CCDC_STOP_CCDC_FINISHED | CCDC_STOP_LSC_FINISHED)
+
+#define CCDC_EVENT_VD1 0x10
+#define CCDC_EVENT_VD0 0x20
+#define CCDC_EVENT_LSC_DONE 0x40
+
+/* Sink and source CCDC pads */
+#define CCDC_PAD_SINK 0
+#define CCDC_PAD_SOURCE_OF 1
+#define CCDC_PAD_SOURCE_VP 2
+#define CCDC_PADS_NUM 3
+
+#define CCDC_FIELD_TOP 1
+#define CCDC_FIELD_BOTTOM 2
+#define CCDC_FIELD_BOTH 3
+
+/*
+ * struct isp_ccdc_device - Structure for the CCDC module to store its own
+ * information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @crop: Active crop rectangle on the OF source pad
+ * @input: Active input
+ * @output: Active outputs
+ * @video_out: Output video node
+ * @alaw: A-law compression enabled (1) or disabled (0)
+ * @lpf: Low pass filter enabled (1) or disabled (0)
+ * @obclamp: Optical-black clamp enabled (1) or disabled (0)
+ * @fpc_en: Faulty pixels correction enabled (1) or disabled (0)
+ * @blcomp: Black level compensation configuration
+ * @clamp: Optical-black or digital clamp configuration
+ * @fpc: Faulty pixels correction configuration
+ * @lsc: Lens shading compensation configuration
+ * @update: Bitmask of controls to update during the next interrupt
+ * @shadow_update: Controls update in progress by userspace
+ * @bt656: Whether the input interface uses BT.656 synchronization
+ * @fields: The fields (CCDC_FIELD_*) stored in the current buffer
+ * @underrun: A buffer underrun occurred and a new buffer has been queued
+ * @state: Streaming state
+ * @lock: Serializes shadow_update with interrupt handler
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ * @running: Is the CCDC hardware running
+ * @ioctl_lock: Serializes ioctl calls and LSC requests freeing
+ */
+struct isp_ccdc_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CCDC_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[CCDC_PADS_NUM];
+ struct v4l2_rect crop;
+
+ enum ccdc_input_entity input;
+ unsigned int output;
+ struct isp_video video_out;
+
+ unsigned int alaw:1,
+ lpf:1,
+ obclamp:1,
+ fpc_en:1;
+ struct omap3isp_ccdc_blcomp blcomp;
+ struct omap3isp_ccdc_bclamp clamp;
+ struct ispccdc_fpc fpc;
+ struct ispccdc_lsc lsc;
+ unsigned int update;
+ unsigned int shadow_update;
+
+ bool bt656;
+ unsigned int fields;
+
+ unsigned int underrun:1;
+ enum isp_pipeline_stream_state state;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ unsigned int stopping;
+ bool running;
+ struct mutex ioctl_lock;
+};
+
+struct isp_device;
+
+int omap3isp_ccdc_init(struct isp_device *isp);
+void omap3isp_ccdc_cleanup(struct isp_device *isp);
+int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
+ struct v4l2_device *vdev);
+void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc);
+
+int omap3isp_ccdc_busy(struct isp_ccdc_device *isp_ccdc);
+int omap3isp_ccdc_isr(struct isp_ccdc_device *isp_ccdc, u32 events);
+void omap3isp_ccdc_restore_context(struct isp_device *isp);
+void omap3isp_ccdc_max_rate(struct isp_ccdc_device *ccdc,
+ unsigned int *max_rate);
+
+#endif /* OMAP3_ISP_CCDC_H */
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
new file mode 100644
index 000000000..47b0d3fe8
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -0,0 +1,1177 @@
+/*
+ * ispccp2.c
+ *
+ * TI OMAP3 ISP - CCP2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispccp2.h"
+
+/* Number of LCX channels */
+#define CCP2_LCx_CHANS_NUM 3
+/* Max/Min size for CCP2 video port */
+#define ISPCCP2_DAT_START_MIN 0
+#define ISPCCP2_DAT_START_MAX 4095
+#define ISPCCP2_DAT_SIZE_MIN 0
+#define ISPCCP2_DAT_SIZE_MAX 4095
+#define ISPCCP2_VPCLK_FRACDIV 65536
+#define ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP 0x12
+#define ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP 0x16
+/* Max/Min size for CCP2 memory channel */
+#define ISPCCP2_LCM_HSIZE_COUNT_MIN 16
+#define ISPCCP2_LCM_HSIZE_COUNT_MAX 8191
+#define ISPCCP2_LCM_HSIZE_SKIP_MIN 0
+#define ISPCCP2_LCM_HSIZE_SKIP_MAX 8191
+#define ISPCCP2_LCM_VSIZE_MIN 1
+#define ISPCCP2_LCM_VSIZE_MAX 8191
+#define ISPCCP2_LCM_HWORDS_MIN 1
+#define ISPCCP2_LCM_HWORDS_MAX 4095
+#define ISPCCP2_LCM_CTRL_BURST_SIZE_32X 5
+#define ISPCCP2_LCM_CTRL_READ_THROTTLE_FULL 0
+#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10 2
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8 2
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10 3
+#define ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10 3
+#define ISPCCP2_LCM_CTRL_DST_PORT_VP 0
+#define ISPCCP2_LCM_CTRL_DST_PORT_MEM 1
+
+/* Set only the required bits */
+#define BIT_SET(var, shift, mask, val) \
+ do { \
+ var = ((var) & ~((mask) << (shift))) \
+ | ((val) << (shift)); \
+ } while (0)
+
+/*
+ * ccp2_print_status - Print current CCP2 module register values.
+ */
+#define CCP2_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###CCP2 " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_##name))
+
+static void ccp2_print_status(struct isp_ccp2_device *ccp2)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+
+ dev_dbg(isp->dev, "-------------CCP2 Register dump-------------\n");
+
+ CCP2_PRINT_REGISTER(isp, SYSCONFIG);
+ CCP2_PRINT_REGISTER(isp, SYSSTATUS);
+ CCP2_PRINT_REGISTER(isp, LC01_IRQENABLE);
+ CCP2_PRINT_REGISTER(isp, LC01_IRQSTATUS);
+ CCP2_PRINT_REGISTER(isp, LC23_IRQENABLE);
+ CCP2_PRINT_REGISTER(isp, LC23_IRQSTATUS);
+ CCP2_PRINT_REGISTER(isp, LCM_IRQENABLE);
+ CCP2_PRINT_REGISTER(isp, LCM_IRQSTATUS);
+ CCP2_PRINT_REGISTER(isp, CTRL);
+ CCP2_PRINT_REGISTER(isp, LCx_CTRL(0));
+ CCP2_PRINT_REGISTER(isp, LCx_CODE(0));
+ CCP2_PRINT_REGISTER(isp, LCx_STAT_START(0));
+ CCP2_PRINT_REGISTER(isp, LCx_STAT_SIZE(0));
+ CCP2_PRINT_REGISTER(isp, LCx_SOF_ADDR(0));
+ CCP2_PRINT_REGISTER(isp, LCx_EOF_ADDR(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_START(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_SIZE(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_PING_ADDR(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_PONG_ADDR(0));
+ CCP2_PRINT_REGISTER(isp, LCx_DAT_OFST(0));
+ CCP2_PRINT_REGISTER(isp, LCM_CTRL);
+ CCP2_PRINT_REGISTER(isp, LCM_VSIZE);
+ CCP2_PRINT_REGISTER(isp, LCM_HSIZE);
+ CCP2_PRINT_REGISTER(isp, LCM_PREFETCH);
+ CCP2_PRINT_REGISTER(isp, LCM_SRC_ADDR);
+ CCP2_PRINT_REGISTER(isp, LCM_SRC_OFST);
+ CCP2_PRINT_REGISTER(isp, LCM_DST_ADDR);
+ CCP2_PRINT_REGISTER(isp, LCM_DST_OFST);
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * ccp2_reset - Reset the CCP2
+ * @ccp2: pointer to ISP CCP2 device
+ */
+static void ccp2_reset(struct isp_ccp2_device *ccp2)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ int i = 0;
+
+ /* Reset the CSI1/CCP2B and wait for reset to complete */
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG,
+ ISPCCP2_SYSCONFIG_SOFT_RESET);
+ while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSSTATUS) &
+ ISPCCP2_SYSSTATUS_RESET_DONE)) {
+ udelay(10);
+ if (i++ > 10) { /* try read 10 times */
+ dev_warn(isp->dev,
+ "omap3_isp: timeout waiting for ccp2 reset\n");
+ break;
+ }
+ }
+}
+
+/*
+ * ccp2_pwr_cfg - Configure the power mode settings
+ * @ccp2: pointer to ISP CCP2 device
+ */
+static void ccp2_pwr_cfg(struct isp_ccp2_device *ccp2)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+
+ isp_reg_writel(isp, ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SMART |
+ ((isp->revision == ISP_REVISION_15_0 && isp->autoidle) ?
+ ISPCCP2_SYSCONFIG_AUTO_IDLE : 0),
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG);
+}
+
+/*
+ * ccp2_if_enable - Enable CCP2 interface.
+ * @ccp2: pointer to ISP CCP2 device
+ * @enable: enable/disable flag
+ */
+static int ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ int ret;
+ int i;
+
+ if (enable && ccp2->vdds_csib) {
+ ret = regulator_enable(ccp2->vdds_csib);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable/Disable all the LCx channels */
+ for (i = 0; i < CCP2_LCx_CHANS_NUM; i++)
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(i),
+ ISPCCP2_LCx_CTRL_CHAN_EN,
+ enable ? ISPCCP2_LCx_CTRL_CHAN_EN : 0);
+
+ /* Enable/Disable ccp2 interface in ccp2 mode */
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
+ ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN,
+ enable ? (ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN) : 0);
+
+ if (!enable && ccp2->vdds_csib)
+ regulator_disable(ccp2->vdds_csib);
+
+ return 0;
+}
+
+/*
+ * ccp2_mem_enable - Enable CCP2 memory interface.
+ * @ccp2: pointer to ISP CCP2 device
+ * @enable: enable/disable flag
+ */
+static void ccp2_mem_enable(struct isp_ccp2_device *ccp2, u8 enable)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+
+ if (enable)
+ ccp2_if_enable(ccp2, 0);
+
+ /* Enable/Disable ccp2 interface in ccp2 mode */
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
+ ISPCCP2_CTRL_MODE, enable ? ISPCCP2_CTRL_MODE : 0);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL,
+ ISPCCP2_LCM_CTRL_CHAN_EN,
+ enable ? ISPCCP2_LCM_CTRL_CHAN_EN : 0);
+}
+
+/*
+ * ccp2_phyif_config - Initialize CCP2 phy interface config
+ * @ccp2: Pointer to ISP CCP2 device
+ * @buscfg: CCP2 platform data
+ *
+ * Configure the CCP2 physical interface module from platform data.
+ *
+ * Returns -EIO if strobe is chosen in CSI1 mode, or 0 on success.
+ */
+static int ccp2_phyif_config(struct isp_ccp2_device *ccp2,
+ const struct isp_ccp2_cfg *buscfg)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ u32 val;
+
+ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL) |
+ ISPCCP2_CTRL_MODE;
+ /* Data/strobe physical layer */
+ BIT_SET(val, ISPCCP2_CTRL_PHY_SEL_SHIFT, ISPCCP2_CTRL_PHY_SEL_MASK,
+ buscfg->phy_layer);
+ BIT_SET(val, ISPCCP2_CTRL_IO_OUT_SEL_SHIFT,
+ ISPCCP2_CTRL_IO_OUT_SEL_MASK, buscfg->ccp2_mode);
+ BIT_SET(val, ISPCCP2_CTRL_INV_SHIFT, ISPCCP2_CTRL_INV_MASK,
+ buscfg->strobe_clk_pol);
+ BIT_SET(val, ISPCCP2_CTRL_VP_CLK_POL_SHIFT,
+ ISPCCP2_CTRL_VP_CLK_POL_MASK, buscfg->vp_clk_pol);
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+
+ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+ if (!(val & ISPCCP2_CTRL_MODE)) {
+ if (buscfg->ccp2_mode == ISP_CCP2_MODE_CCP2)
+ dev_warn(isp->dev, "OMAP3 CCP2 bus not available\n");
+ if (buscfg->phy_layer == ISP_CCP2_PHY_DATA_STROBE)
+ /* Strobe mode requires CCP2 */
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * ccp2_vp_config - Initialize CCP2 video port interface.
+ * @ccp2: Pointer to ISP CCP2 device
+ * @vpclk_div: Video port divisor
+ *
+ * Configure the CCP2 video port with the given clock divisor. The valid divisor
+ * values depend on the ISP revision:
+ *
+ * - revision 1.0 and 2.0 1 to 4
+ * - revision 15.0 1 to 65536
+ *
+ * The exact divisor value used might differ from the requested value, as ISP
+ * revision 15.0 represent the divisor by 65536 divided by an integer.
+ */
+static void ccp2_vp_config(struct isp_ccp2_device *ccp2,
+ unsigned int vpclk_div)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ u32 val;
+
+ /* ISPCCP2_CTRL Video port */
+ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+ val |= ISPCCP2_CTRL_VP_ONLY_EN; /* Disable the memory write port */
+
+ if (isp->revision == ISP_REVISION_15_0) {
+ vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 65536);
+ vpclk_div = min(ISPCCP2_VPCLK_FRACDIV / vpclk_div, 65535U);
+ BIT_SET(val, ISPCCP2_CTRL_VPCLK_DIV_SHIFT,
+ ISPCCP2_CTRL_VPCLK_DIV_MASK, vpclk_div);
+ } else {
+ vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 4);
+ BIT_SET(val, ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT,
+ ISPCCP2_CTRL_VP_OUT_CTRL_MASK, vpclk_div - 1);
+ }
+
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+}
+
+/*
+ * ccp2_lcx_config - Initialize CCP2 logical channel interface.
+ * @ccp2: Pointer to ISP CCP2 device
+ * @config: Pointer to ISP LCx config structure.
+ *
+ * This will analyze the parameters passed by the interface config
+ * and configure CSI1/CCP2 logical channel
+ *
+ */
+static void ccp2_lcx_config(struct isp_ccp2_device *ccp2,
+ struct isp_interface_lcx_config *config)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ u32 val, format;
+
+ switch (config->format) {
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ format = ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP;
+ break;
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ default:
+ format = ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP; /* RAW10+VP */
+ break;
+ }
+ /* ISPCCP2_LCx_CTRL logical channel #0 */
+ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0))
+ | (ISPCCP2_LCx_CTRL_REGION_EN); /* Region */
+
+ if (isp->revision == ISP_REVISION_15_0) {
+ /* CRC */
+ BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT_15_0,
+ ISPCCP2_LCx_CTRL_CRC_MASK,
+ config->crc);
+ /* Format = RAW10+VP or RAW8+DPCM10+VP*/
+ BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT_15_0,
+ ISPCCP2_LCx_CTRL_FORMAT_MASK_15_0, format);
+ } else {
+ BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT,
+ ISPCCP2_LCx_CTRL_CRC_MASK,
+ config->crc);
+
+ BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT,
+ ISPCCP2_LCx_CTRL_FORMAT_MASK, format);
+ }
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0));
+
+ /* ISPCCP2_DAT_START for logical channel #0 */
+ isp_reg_writel(isp, config->data_start << ISPCCP2_LCx_DAT_SHIFT,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_START(0));
+
+ /* ISPCCP2_DAT_SIZE for logical channel #0 */
+ isp_reg_writel(isp, config->data_size << ISPCCP2_LCx_DAT_SHIFT,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_SIZE(0));
+
+ /* Enable error IRQs for logical channel #0 */
+ val = ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
+
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQSTATUS);
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQENABLE, val);
+}
+
+/*
+ * ccp2_if_configure - Configure ccp2 with data from sensor
+ * @ccp2: Pointer to ISP CCP2 device
+ *
+ * Return 0 on success or a negative error code
+ */
+static int ccp2_if_configure(struct isp_ccp2_device *ccp2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+ const struct isp_bus_cfg *buscfg;
+ struct v4l2_mbus_framefmt *format;
+ struct media_pad *pad;
+ struct v4l2_subdev *sensor;
+ u32 lines = 0;
+ int ret;
+
+ ccp2_pwr_cfg(ccp2);
+
+ pad = media_entity_remote_pad(&ccp2->pads[CCP2_PAD_SINK]);
+ sensor = media_entity_to_v4l2_subdev(pad->entity);
+ buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
+
+ ret = ccp2_phyif_config(ccp2, &buscfg->bus.ccp2);
+ if (ret < 0)
+ return ret;
+
+ ccp2_vp_config(ccp2, buscfg->bus.ccp2.vpclk_div + 1);
+
+ v4l2_subdev_call(sensor, sensor, g_skip_top_lines, &lines);
+
+ format = &ccp2->formats[CCP2_PAD_SINK];
+
+ ccp2->if_cfg.data_start = lines;
+ ccp2->if_cfg.crc = buscfg->bus.ccp2.crc;
+ ccp2->if_cfg.format = format->code;
+ ccp2->if_cfg.data_size = format->height;
+
+ ccp2_lcx_config(ccp2, &ccp2->if_cfg);
+
+ return 0;
+}
+
+static int ccp2_adjust_bandwidth(struct isp_ccp2_device *ccp2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccp2);
+ const struct v4l2_mbus_framefmt *ofmt = &ccp2->formats[CCP2_PAD_SOURCE];
+ unsigned long l3_ick = pipe->l3_ick;
+ struct v4l2_fract *timeperframe;
+ unsigned int vpclk_div = 2;
+ unsigned int value;
+ u64 bound;
+ u64 area;
+
+ /* Compute the minimum clock divisor, based on the pipeline maximum
+ * data rate. This is an absolute lower bound if we don't want SBL
+ * overflows, so round the value up.
+ */
+ vpclk_div = max_t(unsigned int, DIV_ROUND_UP(l3_ick, pipe->max_rate),
+ vpclk_div);
+
+ /* Compute the maximum clock divisor, based on the requested frame rate.
+ * This is a soft lower bound to achieve a frame rate equal or higher
+ * than the requested value, so round the value down.
+ */
+ timeperframe = &pipe->max_timeperframe;
+
+ if (timeperframe->numerator) {
+ area = ofmt->width * ofmt->height;
+ bound = div_u64(area * timeperframe->denominator,
+ timeperframe->numerator);
+ value = min_t(u64, bound, l3_ick);
+ vpclk_div = max_t(unsigned int, l3_ick / value, vpclk_div);
+ }
+
+ dev_dbg(isp->dev, "%s: minimum clock divisor = %u\n", __func__,
+ vpclk_div);
+
+ return vpclk_div;
+}
+
+/*
+ * ccp2_mem_configure - Initialize CCP2 memory input/output interface
+ * @ccp2: Pointer to ISP CCP2 device
+ * @config: Pointer to ISP mem interface config structure
+ *
+ * This will analyze the parameters passed by the interface config
+ * structure, and configure the respective registers for proper
+ * CSI1/CCP2 memory input.
+ */
+static void ccp2_mem_configure(struct isp_ccp2_device *ccp2,
+ struct isp_interface_mem_config *config)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+ u32 sink_pixcode = ccp2->formats[CCP2_PAD_SINK].code;
+ u32 source_pixcode = ccp2->formats[CCP2_PAD_SOURCE].code;
+ unsigned int dpcm_decompress = 0;
+ u32 val, hwords;
+
+ if (sink_pixcode != source_pixcode &&
+ sink_pixcode == MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8)
+ dpcm_decompress = 1;
+
+ ccp2_pwr_cfg(ccp2);
+
+ /* Hsize, Skip */
+ isp_reg_writel(isp, ISPCCP2_LCM_HSIZE_SKIP_MIN |
+ (config->hsize_count << ISPCCP2_LCM_HSIZE_SHIFT),
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_HSIZE);
+
+ /* Vsize, no. of lines */
+ isp_reg_writel(isp, config->vsize_count << ISPCCP2_LCM_VSIZE_SHIFT,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_VSIZE);
+
+ if (ccp2->video_in.bpl_padding == 0)
+ config->src_ofst = 0;
+ else
+ config->src_ofst = ccp2->video_in.bpl_value;
+
+ isp_reg_writel(isp, config->src_ofst, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LCM_SRC_OFST);
+
+ /* Source and Destination formats */
+ val = ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10 <<
+ ISPCCP2_LCM_CTRL_DST_FORMAT_SHIFT;
+
+ if (dpcm_decompress) {
+ /* source format is RAW8 */
+ val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8 <<
+ ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT;
+
+ /* RAW8 + DPCM10 - simple predictor */
+ val |= ISPCCP2_LCM_CTRL_SRC_DPCM_PRED;
+
+ /* enable source DPCM decompression */
+ val |= ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10 <<
+ ISPCCP2_LCM_CTRL_SRC_DECOMPR_SHIFT;
+ } else {
+ /* source format is RAW10 */
+ val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10 <<
+ ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT;
+ }
+
+ /* Burst size to 32x64 */
+ val |= ISPCCP2_LCM_CTRL_BURST_SIZE_32X <<
+ ISPCCP2_LCM_CTRL_BURST_SIZE_SHIFT;
+
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL);
+
+ /* Prefetch setup */
+ if (dpcm_decompress)
+ hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN +
+ config->hsize_count) >> 3;
+ else
+ hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN +
+ config->hsize_count) >> 2;
+
+ isp_reg_writel(isp, hwords << ISPCCP2_LCM_PREFETCH_SHIFT,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_PREFETCH);
+
+ /* Video port */
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
+ ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE);
+ ccp2_vp_config(ccp2, ccp2_adjust_bandwidth(ccp2));
+
+ /* Clear LCM interrupts */
+ isp_reg_writel(isp, ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ |
+ ISPCCP2_LCM_IRQSTATUS_EOF_IRQ,
+ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQSTATUS);
+
+ /* Enable LCM interrupts */
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQENABLE,
+ ISPCCP2_LCM_IRQSTATUS_EOF_IRQ |
+ ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ);
+}
+
+/*
+ * ccp2_set_inaddr - Sets memory address of input frame.
+ * @ccp2: Pointer to ISP CCP2 device
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ *
+ * Configures the memory address from which the input frame is to be read.
+ */
+static void ccp2_set_inaddr(struct isp_ccp2_device *ccp2, u32 addr)
+{
+ struct isp_device *isp = to_isp_device(ccp2);
+
+ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_SRC_ADDR);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+ struct isp_buffer *buffer;
+
+ buffer = omap3isp_video_buffer_next(&ccp2->video_in);
+ if (buffer != NULL)
+ ccp2_set_inaddr(ccp2, buffer->dma);
+
+ pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+
+ if (ccp2->state == ISP_PIPELINE_STREAM_SINGLESHOT) {
+ if (isp_pipeline_ready(pipe))
+ omap3isp_pipeline_set_stream(pipe,
+ ISP_PIPELINE_STREAM_SINGLESHOT);
+ }
+}
+
+/*
+ * omap3isp_ccp2_isr - Handle ISP CCP2 interrupts
+ * @ccp2: Pointer to ISP CCP2 device
+ *
+ * This will handle the CCP2 interrupts
+ */
+void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccp2);
+ static const u32 ISPCCP2_LC01_ERROR =
+ ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
+ ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
+ u32 lcx_irqstatus, lcm_irqstatus;
+
+ /* First clear the interrupts */
+ lcx_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LC01_IRQSTATUS);
+ isp_reg_writel(isp, lcx_irqstatus, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LC01_IRQSTATUS);
+
+ lcm_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LCM_IRQSTATUS);
+ isp_reg_writel(isp, lcm_irqstatus, OMAP3_ISP_IOMEM_CCP2,
+ ISPCCP2_LCM_IRQSTATUS);
+ /* Errors */
+ if (lcx_irqstatus & ISPCCP2_LC01_ERROR) {
+ pipe->error = true;
+ dev_dbg(isp->dev, "CCP2 err:%x\n", lcx_irqstatus);
+ return;
+ }
+
+ if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ) {
+ pipe->error = true;
+ dev_dbg(isp->dev, "CCP2 OCP err:%x\n", lcm_irqstatus);
+ }
+
+ if (omap3isp_module_sync_is_stopping(&ccp2->wait, &ccp2->stopping))
+ return;
+
+ /* Handle queued buffers on frame end interrupts */
+ if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_EOF_IRQ)
+ ccp2_isr_buffer(ccp2);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static const unsigned int ccp2_fmts[] = {
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+};
+
+/*
+ * __ccp2_get_format - helper function for getting ccp2 format
+ * @ccp2 : Pointer to ISP CCP2 device
+ * @cfg: V4L2 subdev pad configuration
+ * @pad : pad number
+ * @which : wanted subdev format
+ * return format structure or NULL on error
+ */
+static struct v4l2_mbus_framefmt *
+__ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&ccp2->subdev, cfg, pad);
+ else
+ return &ccp2->formats[pad];
+}
+
+/*
+ * ccp2_try_format - Handle try format by pad subdev method
+ * @ccp2 : Pointer to ISP CCP2 device
+ * @cfg: V4L2 subdev pad configuration
+ * @pad : pad num
+ * @fmt : pointer to v4l2 mbus format structure
+ * @which : wanted subdev format
+ */
+static void ccp2_try_format(struct isp_ccp2_device *ccp2,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ switch (pad) {
+ case CCP2_PAD_SINK:
+ if (fmt->code != MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8)
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ if (ccp2->input == CCP2_INPUT_SENSOR) {
+ fmt->width = clamp_t(u32, fmt->width,
+ ISPCCP2_DAT_START_MIN,
+ ISPCCP2_DAT_START_MAX);
+ fmt->height = clamp_t(u32, fmt->height,
+ ISPCCP2_DAT_SIZE_MIN,
+ ISPCCP2_DAT_SIZE_MAX);
+ } else if (ccp2->input == CCP2_INPUT_MEMORY) {
+ fmt->width = clamp_t(u32, fmt->width,
+ ISPCCP2_LCM_HSIZE_COUNT_MIN,
+ ISPCCP2_LCM_HSIZE_COUNT_MAX);
+ fmt->height = clamp_t(u32, fmt->height,
+ ISPCCP2_LCM_VSIZE_MIN,
+ ISPCCP2_LCM_VSIZE_MAX);
+ }
+ break;
+
+ case CCP2_PAD_SOURCE:
+ /* Source format - copy sink format and change pixel code
+ * to SGRBG10_1X10 as we don't support CCP2 write to memory.
+ * When CCP2 write to memory feature will be added this
+ * should be changed properly.
+ */
+ format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SINK, which);
+ memcpy(fmt, format, sizeof(*fmt));
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+/*
+ * ccp2_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == CCP2_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(ccp2_fmts))
+ return -EINVAL;
+
+ code->code = ccp2_fmts[code->index];
+ } else {
+ if (code->index != 0)
+ return -EINVAL;
+
+ format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SINK,
+ code->which);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ccp2_try_format(ccp2, cfg, fse->pad, &format, fse->which);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ccp2_try_format(ccp2, cfg, fse->pad, &format, fse->which);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ccp2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt : pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ccp2_get_format(ccp2, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * ccp2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt : pointer to v4l2 subdev format structure
+ * returns zero
+ */
+static int ccp2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ccp2_get_format(ccp2, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ccp2_try_format(ccp2, cfg, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == CCP2_PAD_SINK) {
+ format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ ccp2_try_format(ccp2, cfg, CCP2_PAD_SOURCE, format, fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * ccp2_init_formats - Initialize formats on all pads
+ * @sd: ISP CCP2 V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ccp2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = CCP2_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ ccp2_set_format(sd, fh ? fh->pad : NULL, &format);
+
+ return 0;
+}
+
+/*
+ * ccp2_s_stream - Enable/Disable streaming on ccp2 subdev
+ * @sd : pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ * return zero
+ */
+static int ccp2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ struct isp_device *isp = to_isp_device(ccp2);
+ struct device *dev = to_device(ccp2);
+ int ret;
+
+ if (ccp2->state == ISP_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+ atomic_set(&ccp2->stopping, 0);
+ }
+
+ switch (enable) {
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ if (ccp2->phy) {
+ ret = omap3isp_csiphy_acquire(ccp2->phy, &sd->entity);
+ if (ret < 0)
+ return ret;
+ }
+
+ ccp2_if_configure(ccp2);
+ ccp2_print_status(ccp2);
+
+ /* Enable CSI1/CCP2 interface */
+ ret = ccp2_if_enable(ccp2, 1);
+ if (ret < 0) {
+ if (ccp2->phy)
+ omap3isp_csiphy_release(ccp2->phy);
+ return ret;
+ }
+ break;
+
+ case ISP_PIPELINE_STREAM_SINGLESHOT:
+ if (ccp2->state != ISP_PIPELINE_STREAM_SINGLESHOT) {
+ struct v4l2_mbus_framefmt *format;
+
+ format = &ccp2->formats[CCP2_PAD_SINK];
+
+ ccp2->mem_cfg.hsize_count = format->width;
+ ccp2->mem_cfg.vsize_count = format->height;
+ ccp2->mem_cfg.src_ofst = 0;
+
+ ccp2_mem_configure(ccp2, &ccp2->mem_cfg);
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI1_READ);
+ ccp2_print_status(ccp2);
+ }
+ ccp2_mem_enable(ccp2, 1);
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ if (omap3isp_module_sync_idle(&sd->entity, &ccp2->wait,
+ &ccp2->stopping))
+ dev_dbg(dev, "%s: module stop timeout.\n", sd->name);
+ if (ccp2->input == CCP2_INPUT_MEMORY) {
+ ccp2_mem_enable(ccp2, 0);
+ omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI1_READ);
+ } else if (ccp2->input == CCP2_INPUT_SENSOR) {
+ /* Disable CSI1/CCP2 interface */
+ ccp2_if_enable(ccp2, 0);
+ if (ccp2->phy)
+ omap3isp_csiphy_release(ccp2->phy);
+ }
+ break;
+ }
+
+ ccp2->state = enable;
+ return 0;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops ccp2_sd_video_ops = {
+ .s_stream = ccp2_s_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops ccp2_sd_pad_ops = {
+ .enum_mbus_code = ccp2_enum_mbus_code,
+ .enum_frame_size = ccp2_enum_frame_size,
+ .get_fmt = ccp2_get_format,
+ .set_fmt = ccp2_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops ccp2_sd_ops = {
+ .video = &ccp2_sd_video_ops,
+ .pad = &ccp2_sd_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops ccp2_sd_internal_ops = {
+ .open = ccp2_init_formats,
+};
+
+/* --------------------------------------------------------------------------
+ * ISP ccp2 video device node
+ */
+
+/*
+ * ccp2_video_queue - Queue video buffer.
+ * @video : Pointer to isp video structure
+ * @buffer: Pointer to isp_buffer structure
+ * return -EIO or zero on success
+ */
+static int ccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer)
+{
+ struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2;
+
+ ccp2_set_inaddr(ccp2, buffer->dma);
+ return 0;
+}
+
+static const struct isp_video_operations ccp2_video_ops = {
+ .queue = ccp2_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ccp2_link_setup - Setup ccp2 connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL on error or zero on success
+ */
+static int ccp2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+ unsigned int index = local->index;
+
+ /* FIXME: this is actually a hack! */
+ if (is_media_entity_v4l2_subdev(remote->entity))
+ index |= 2 << 16;
+
+ switch (index) {
+ case CCP2_PAD_SINK:
+ /* read from memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccp2->input == CCP2_INPUT_SENSOR)
+ return -EBUSY;
+ ccp2->input = CCP2_INPUT_MEMORY;
+ } else {
+ if (ccp2->input == CCP2_INPUT_MEMORY)
+ ccp2->input = CCP2_INPUT_NONE;
+ }
+ break;
+
+ case CCP2_PAD_SINK | 2 << 16:
+ /* read from sensor/phy */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ccp2->input == CCP2_INPUT_MEMORY)
+ return -EBUSY;
+ ccp2->input = CCP2_INPUT_SENSOR;
+ } else {
+ if (ccp2->input == CCP2_INPUT_SENSOR)
+ ccp2->input = CCP2_INPUT_NONE;
+ } break;
+
+ case CCP2_PAD_SOURCE | 2 << 16:
+ /* write to video port/ccdc */
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ ccp2->output = CCP2_OUTPUT_CCDC;
+ else
+ ccp2->output = CCP2_OUTPUT_NONE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ccp2_media_ops = {
+ .link_setup = ccp2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * omap3isp_ccp2_unregister_entities - Unregister media entities: subdev
+ * @ccp2: Pointer to ISP CCP2 device
+ */
+void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2)
+{
+ v4l2_device_unregister_subdev(&ccp2->subdev);
+ omap3isp_video_unregister(&ccp2->video_in);
+}
+
+/*
+ * omap3isp_ccp2_register_entities - Register the subdev media entity
+ * @ccp2: Pointer to ISP CCP2 device
+ * @vdev: Pointer to v4l device
+ * return negative error code or zero on success
+ */
+
+int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ccp2->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&ccp2->video_in, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_ccp2_unregister_entities(ccp2);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP ccp2 initialisation and cleanup
+ */
+
+/*
+ * ccp2_init_entities - Initialize ccp2 subdev and media entity.
+ * @ccp2: Pointer to ISP CCP2 device
+ * return negative error code or zero on success
+ */
+static int ccp2_init_entities(struct isp_ccp2_device *ccp2)
+{
+ struct v4l2_subdev *sd = &ccp2->subdev;
+ struct media_pad *pads = ccp2->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ ccp2->input = CCP2_INPUT_NONE;
+ ccp2->output = CCP2_OUTPUT_NONE;
+
+ v4l2_subdev_init(sd, &ccp2_sd_ops);
+ sd->internal_ops = &ccp2_sd_internal_ops;
+ strlcpy(sd->name, "OMAP3 ISP CCP2", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
+ v4l2_set_subdevdata(sd, ccp2);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ pads[CCP2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &ccp2_media_ops;
+ ret = media_entity_pads_init(me, CCP2_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ ccp2_init_formats(sd, NULL);
+
+ /*
+ * The CCP2 has weird line alignment requirements, possibly caused by
+ * DPCM8 decompression. Line length for data read from memory must be a
+ * multiple of 128 bits (16 bytes) in continuous mode (when no padding
+ * is present at end of lines). Additionally, if padding is used, the
+ * padded line length must be a multiple of 32 bytes. To simplify the
+ * implementation we use a fixed 32 bytes alignment regardless of the
+ * input format and width. If strict 128 bits alignment support is
+ * required ispvideo will need to be made aware of this special dual
+ * alignment requirements.
+ */
+ ccp2->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ccp2->video_in.bpl_alignment = 32;
+ ccp2->video_in.bpl_max = 0xffffffe0;
+ ccp2->video_in.isp = to_isp_device(ccp2);
+ ccp2->video_in.ops = &ccp2_video_ops;
+ ccp2->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+
+ ret = omap3isp_video_init(&ccp2->video_in, "CCP2");
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ media_entity_cleanup(&ccp2->subdev.entity);
+ return ret;
+}
+
+/*
+ * omap3isp_ccp2_init - CCP2 initialization.
+ * @isp : Pointer to ISP device
+ * return negative error code or zero on success
+ */
+int omap3isp_ccp2_init(struct isp_device *isp)
+{
+ struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
+ int ret;
+
+ init_waitqueue_head(&ccp2->wait);
+
+ /*
+ * On the OMAP34xx the CSI1 receiver is operated in the CSIb IO
+ * complex, which is powered by vdds_csib power rail. Hence the
+ * request for the regulator.
+ *
+ * On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with
+ * the CSI2c or CSI2a receivers. The PHY then needs to be explicitly
+ * configured.
+ *
+ * TODO: Don't hardcode the usage of PHY1 (shared with CSI2c).
+ */
+ if (isp->revision == ISP_REVISION_2_0) {
+ ccp2->vdds_csib = devm_regulator_get(isp->dev, "vdds_csib");
+ if (IS_ERR(ccp2->vdds_csib)) {
+ if (PTR_ERR(ccp2->vdds_csib) == -EPROBE_DEFER) {
+ dev_dbg(isp->dev,
+ "Can't get regulator vdds_csib, deferring probing\n");
+ return -EPROBE_DEFER;
+ }
+ dev_dbg(isp->dev,
+ "Could not get regulator vdds_csib\n");
+ ccp2->vdds_csib = NULL;
+ }
+ ccp2->phy = &isp->isp_csiphy2;
+ } else if (isp->revision == ISP_REVISION_15_0) {
+ ccp2->phy = &isp->isp_csiphy1;
+ }
+
+ ret = ccp2_init_entities(ccp2);
+ if (ret < 0)
+ return ret;
+
+ ccp2_reset(ccp2);
+ return 0;
+}
+
+/*
+ * omap3isp_ccp2_cleanup - CCP2 un-initialization
+ * @isp : Pointer to ISP device
+ */
+void omap3isp_ccp2_cleanup(struct isp_device *isp)
+{
+ struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
+
+ omap3isp_video_cleanup(&ccp2->video_in);
+ media_entity_cleanup(&ccp2->subdev.entity);
+}
diff --git a/drivers/media/platform/omap3isp/ispccp2.h b/drivers/media/platform/omap3isp/ispccp2.h
new file mode 100644
index 000000000..4662bffa7
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccp2.h
@@ -0,0 +1,88 @@
+/*
+ * ispccp2.h
+ *
+ * TI OMAP3 ISP - CCP2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_CCP2_H
+#define OMAP3_ISP_CCP2_H
+
+#include <linux/videodev2.h>
+
+struct isp_device;
+struct isp_csiphy;
+
+/* Sink and source ccp2 pads */
+#define CCP2_PAD_SINK 0
+#define CCP2_PAD_SOURCE 1
+#define CCP2_PADS_NUM 2
+
+/* CCP2 input media entity */
+enum ccp2_input_entity {
+ CCP2_INPUT_NONE,
+ CCP2_INPUT_SENSOR,
+ CCP2_INPUT_MEMORY,
+};
+
+/* CCP2 output media entity */
+enum ccp2_output_entity {
+ CCP2_OUTPUT_NONE,
+ CCP2_OUTPUT_CCDC,
+ CCP2_OUTPUT_MEMORY,
+};
+
+
+/* Logical channel configuration */
+struct isp_interface_lcx_config {
+ int crc;
+ u32 data_start;
+ u32 data_size;
+ u32 format;
+};
+
+/* Memory channel configuration */
+struct isp_interface_mem_config {
+ u32 dst_port;
+ u32 vsize_count;
+ u32 hsize_count;
+ u32 src_ofst;
+ u32 dst_ofst;
+};
+
+/* CCP2 device */
+struct isp_ccp2_device {
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt formats[CCP2_PADS_NUM];
+ struct media_pad pads[CCP2_PADS_NUM];
+
+ enum ccp2_input_entity input;
+ enum ccp2_output_entity output;
+ struct isp_interface_lcx_config if_cfg;
+ struct isp_interface_mem_config mem_cfg;
+ struct isp_video video_in;
+ struct isp_csiphy *phy;
+ struct regulator *vdds_csib;
+ enum isp_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+/* Function declarations */
+int omap3isp_ccp2_init(struct isp_device *isp);
+void omap3isp_ccp2_cleanup(struct isp_device *isp);
+int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
+ struct v4l2_device *vdev);
+void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2);
+void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2);
+
+#endif /* OMAP3_ISP_CCP2_H */
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
new file mode 100644
index 000000000..e45292a1b
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -0,0 +1,1318 @@
+/*
+ * ispcsi2.c
+ *
+ * TI OMAP3 ISP - CSI2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/mm.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispcsi2.h"
+
+/*
+ * csi2_if_enable - Enable CSI2 Receiver interface.
+ * @enable: enable flag
+ *
+ */
+static void csi2_if_enable(struct isp_device *isp,
+ struct isp_csi2_device *csi2, u8 enable)
+{
+ struct isp_csi2_ctrl_cfg *currctrl = &csi2->ctrl;
+
+ isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_CTRL, ISPCSI2_CTRL_IF_EN,
+ enable ? ISPCSI2_CTRL_IF_EN : 0);
+
+ currctrl->if_enable = enable;
+}
+
+/*
+ * csi2_recv_config - CSI2 receiver module configuration.
+ * @currctrl: isp_csi2_ctrl_cfg structure
+ *
+ */
+static void csi2_recv_config(struct isp_device *isp,
+ struct isp_csi2_device *csi2,
+ struct isp_csi2_ctrl_cfg *currctrl)
+{
+ u32 reg;
+
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTRL);
+
+ if (currctrl->frame_mode)
+ reg |= ISPCSI2_CTRL_FRAME;
+ else
+ reg &= ~ISPCSI2_CTRL_FRAME;
+
+ if (currctrl->vp_clk_enable)
+ reg |= ISPCSI2_CTRL_VP_CLK_EN;
+ else
+ reg &= ~ISPCSI2_CTRL_VP_CLK_EN;
+
+ if (currctrl->vp_only_enable)
+ reg |= ISPCSI2_CTRL_VP_ONLY_EN;
+ else
+ reg &= ~ISPCSI2_CTRL_VP_ONLY_EN;
+
+ reg &= ~ISPCSI2_CTRL_VP_OUT_CTRL_MASK;
+ reg |= currctrl->vp_out_ctrl << ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT;
+
+ if (currctrl->ecc_enable)
+ reg |= ISPCSI2_CTRL_ECC_EN;
+ else
+ reg &= ~ISPCSI2_CTRL_ECC_EN;
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTRL);
+}
+
+static const unsigned int csi2_input_fmts[] = {
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+};
+
+/* To set the format on the CSI2 requires a mapping function that takes
+ * the following inputs:
+ * - 3 different formats (at this time)
+ * - 2 destinations (mem, vp+mem) (vp only handled separately)
+ * - 2 decompression options (on, off)
+ * - 2 isp revisions (certain format must be handled differently on OMAP3630)
+ * Output should be CSI2 frame format code
+ * Array indices as follows: [format][dest][decompr][is_3630]
+ * Not all combinations are valid. 0 means invalid.
+ */
+static const u16 __csi2_fmt_map[3][2][2][2] = {
+ /* RAW10 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_RAW10_EXP16, CSI2_PIX_FMT_RAW10_EXP16 },
+ /* DPCM decompression */
+ { 0, 0 },
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_RAW10_EXP16_VP,
+ CSI2_PIX_FMT_RAW10_EXP16_VP },
+ /* DPCM decompression */
+ { 0, 0 },
+ },
+ },
+ /* RAW10 DPCM8 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_RAW8, CSI2_USERDEF_8BIT_DATA1 },
+ /* DPCM decompression */
+ { CSI2_PIX_FMT_RAW8_DPCM10_EXP16,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10 },
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_RAW8_VP,
+ CSI2_PIX_FMT_RAW8_VP },
+ /* DPCM decompression */
+ { CSI2_PIX_FMT_RAW8_DPCM10_VP,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP },
+ },
+ },
+ /* YUYV8 2X8 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_YUV422_8BIT,
+ CSI2_PIX_FMT_YUV422_8BIT },
+ /* DPCM decompression */
+ { 0, 0 },
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ { CSI2_PIX_FMT_YUV422_8BIT_VP,
+ CSI2_PIX_FMT_YUV422_8BIT_VP },
+ /* DPCM decompression */
+ { 0, 0 },
+ },
+ },
+};
+
+/*
+ * csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID
+ * @csi2: ISP CSI2 device
+ *
+ * Returns CSI2 physical format id
+ */
+static u16 csi2_ctx_map_format(struct isp_csi2_device *csi2)
+{
+ const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK];
+ int fmtidx, destidx, is_3630;
+
+ switch (fmt->code) {
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ fmtidx = 0;
+ break;
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+ fmtidx = 1;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ fmtidx = 2;
+ break;
+ default:
+ WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
+ fmt->code);
+ return 0;
+ }
+
+ if (!(csi2->output & CSI2_OUTPUT_CCDC) &&
+ !(csi2->output & CSI2_OUTPUT_MEMORY)) {
+ /* Neither output enabled is a valid combination */
+ return CSI2_PIX_FMT_OTHERS;
+ }
+
+ /* If we need to skip frames at the beginning of the stream disable the
+ * video port to avoid sending the skipped frames to the CCDC.
+ */
+ destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_CCDC);
+ is_3630 = csi2->isp->revision == ISP_REVISION_15_0;
+
+ return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress][is_3630];
+}
+
+/*
+ * csi2_set_outaddr - Set memory address to save output image
+ * @csi2: Pointer to ISP CSI2a device.
+ * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ *
+ * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte
+ * boundary.
+ */
+static void csi2_set_outaddr(struct isp_csi2_device *csi2, u32 addr)
+{
+ struct isp_device *isp = csi2->isp;
+ struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[0];
+
+ ctx->ping_addr = addr;
+ ctx->pong_addr = addr;
+ isp_reg_writel(isp, ctx->ping_addr,
+ csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum));
+ isp_reg_writel(isp, ctx->pong_addr,
+ csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum));
+}
+
+/*
+ * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should
+ * be enabled by CSI2.
+ * @format_id: mapped format id
+ *
+ */
+static inline int is_usr_def_mapping(u32 format_id)
+{
+ return (format_id & 0x40) ? 1 : 0;
+}
+
+/*
+ * csi2_ctx_enable - Enable specified CSI2 context
+ * @ctxnum: Context number, valid between 0 and 7 values.
+ * @enable: enable
+ *
+ */
+static void csi2_ctx_enable(struct isp_device *isp,
+ struct isp_csi2_device *csi2, u8 ctxnum, u8 enable)
+{
+ struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum];
+ unsigned int skip = 0;
+ u32 reg;
+
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum));
+
+ if (enable) {
+ if (csi2->frame_skip)
+ skip = csi2->frame_skip;
+ else if (csi2->output & CSI2_OUTPUT_MEMORY)
+ skip = 1;
+
+ reg &= ~ISPCSI2_CTX_CTRL1_COUNT_MASK;
+ reg |= ISPCSI2_CTX_CTRL1_COUNT_UNLOCK
+ | (skip << ISPCSI2_CTX_CTRL1_COUNT_SHIFT)
+ | ISPCSI2_CTX_CTRL1_CTX_EN;
+ } else {
+ reg &= ~ISPCSI2_CTX_CTRL1_CTX_EN;
+ }
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum));
+ ctx->enabled = enable;
+}
+
+/*
+ * csi2_ctx_config - CSI2 context configuration.
+ * @ctx: context configuration
+ *
+ */
+static void csi2_ctx_config(struct isp_device *isp,
+ struct isp_csi2_device *csi2,
+ struct isp_csi2_ctx_cfg *ctx)
+{
+ u32 reg;
+
+ /* Set up CSI2_CTx_CTRL1 */
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum));
+
+ if (ctx->eof_enabled)
+ reg |= ISPCSI2_CTX_CTRL1_EOF_EN;
+ else
+ reg &= ~ISPCSI2_CTX_CTRL1_EOF_EN;
+
+ if (ctx->eol_enabled)
+ reg |= ISPCSI2_CTX_CTRL1_EOL_EN;
+ else
+ reg &= ~ISPCSI2_CTX_CTRL1_EOL_EN;
+
+ if (ctx->checksum_enabled)
+ reg |= ISPCSI2_CTX_CTRL1_CS_EN;
+ else
+ reg &= ~ISPCSI2_CTX_CTRL1_CS_EN;
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum));
+
+ /* Set up CSI2_CTx_CTRL2 */
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum));
+
+ reg &= ~(ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK);
+ reg |= ctx->virtual_id << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT;
+
+ reg &= ~(ISPCSI2_CTX_CTRL2_FORMAT_MASK);
+ reg |= ctx->format_id << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT;
+
+ if (ctx->dpcm_decompress) {
+ if (ctx->dpcm_predictor)
+ reg |= ISPCSI2_CTX_CTRL2_DPCM_PRED;
+ else
+ reg &= ~ISPCSI2_CTX_CTRL2_DPCM_PRED;
+ }
+
+ if (is_usr_def_mapping(ctx->format_id)) {
+ reg &= ~ISPCSI2_CTX_CTRL2_USER_DEF_MAP_MASK;
+ reg |= 2 << ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT;
+ }
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum));
+
+ /* Set up CSI2_CTx_CTRL3 */
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum));
+ reg &= ~(ISPCSI2_CTX_CTRL3_ALPHA_MASK);
+ reg |= (ctx->alpha << ISPCSI2_CTX_CTRL3_ALPHA_SHIFT);
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum));
+
+ /* Set up CSI2_CTx_DAT_OFST */
+ reg = isp_reg_readl(isp, csi2->regs1,
+ ISPCSI2_CTX_DAT_OFST(ctx->ctxnum));
+ reg &= ~ISPCSI2_CTX_DAT_OFST_OFST_MASK;
+ reg |= ctx->data_offset << ISPCSI2_CTX_DAT_OFST_OFST_SHIFT;
+ isp_reg_writel(isp, reg, csi2->regs1,
+ ISPCSI2_CTX_DAT_OFST(ctx->ctxnum));
+
+ isp_reg_writel(isp, ctx->ping_addr,
+ csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum));
+
+ isp_reg_writel(isp, ctx->pong_addr,
+ csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum));
+}
+
+/*
+ * csi2_timing_config - CSI2 timing configuration.
+ * @timing: csi2_timing_cfg structure
+ */
+static void csi2_timing_config(struct isp_device *isp,
+ struct isp_csi2_device *csi2,
+ struct isp_csi2_timing_cfg *timing)
+{
+ u32 reg;
+
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_TIMING);
+
+ if (timing->force_rx_mode)
+ reg |= ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum);
+ else
+ reg &= ~ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum);
+
+ if (timing->stop_state_16x)
+ reg |= ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum);
+ else
+ reg &= ~ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum);
+
+ if (timing->stop_state_4x)
+ reg |= ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum);
+ else
+ reg &= ~ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum);
+
+ reg &= ~ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(timing->ionum);
+ reg |= timing->stop_state_counter <<
+ ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(timing->ionum);
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_TIMING);
+}
+
+/*
+ * csi2_irq_ctx_set - Enables CSI2 Context IRQs.
+ * @enable: Enable/disable CSI2 Context interrupts
+ */
+static void csi2_irq_ctx_set(struct isp_device *isp,
+ struct isp_csi2_device *csi2, int enable)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ isp_reg_writel(isp, ISPCSI2_CTX_IRQSTATUS_FE_IRQ, csi2->regs1,
+ ISPCSI2_CTX_IRQSTATUS(i));
+ if (enable)
+ isp_reg_set(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
+ ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
+ else
+ isp_reg_clr(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
+ ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
+ }
+}
+
+/*
+ * csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs.
+ * @enable: Enable/disable CSI2 ComplexIO #1 interrupts
+ */
+static void csi2_irq_complexio1_set(struct isp_device *isp,
+ struct isp_csi2_device *csi2, int enable)
+{
+ u32 reg;
+ reg = ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT |
+ ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM5 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC5 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM4 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC4 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM3 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC3 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM2 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC2 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 |
+ ISPCSI2_PHY_IRQENABLE_STATEULPM1 |
+ ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 |
+ ISPCSI2_PHY_IRQENABLE_ERRESC1 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 |
+ ISPCSI2_PHY_IRQENABLE_ERRSOTHS1;
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
+ if (enable)
+ reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_PHY_IRQENABLE);
+ else
+ reg = 0;
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQENABLE);
+}
+
+/*
+ * csi2_irq_status_set - Enables CSI2 Status IRQs.
+ * @enable: Enable/disable CSI2 Status interrupts
+ */
+static void csi2_irq_status_set(struct isp_device *isp,
+ struct isp_csi2_device *csi2, int enable)
+{
+ u32 reg;
+ reg = ISPCSI2_IRQSTATUS_OCP_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ |
+ ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ |
+ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
+ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ |
+ ISPCSI2_IRQSTATUS_CONTEXT(0);
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQSTATUS);
+ if (enable)
+ reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQENABLE);
+ else
+ reg = 0;
+
+ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQENABLE);
+}
+
+/*
+ * omap3isp_csi2_reset - Resets the CSI2 module.
+ *
+ * Must be called with the phy lock held.
+ *
+ * Returns 0 if successful, or -EBUSY if power command didn't respond.
+ */
+int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
+{
+ struct isp_device *isp = csi2->isp;
+ u8 soft_reset_retries = 0;
+ u32 reg;
+ int i;
+
+ if (!csi2->available)
+ return -ENODEV;
+
+ if (csi2->phy->entity)
+ return -EBUSY;
+
+ isp_reg_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
+ ISPCSI2_SYSCONFIG_SOFT_RESET);
+
+ do {
+ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_SYSSTATUS) &
+ ISPCSI2_SYSSTATUS_RESET_DONE;
+ if (reg == ISPCSI2_SYSSTATUS_RESET_DONE)
+ break;
+ soft_reset_retries++;
+ if (soft_reset_retries < 5)
+ udelay(100);
+ } while (soft_reset_retries < 5);
+
+ if (soft_reset_retries == 5) {
+ dev_err(isp->dev, "CSI2: Soft reset try count exceeded!\n");
+ return -EBUSY;
+ }
+
+ if (isp->revision == ISP_REVISION_15_0)
+ isp_reg_set(isp, csi2->regs1, ISPCSI2_PHY_CFG,
+ ISPCSI2_PHY_CFG_RESET_CTRL);
+
+ i = 100;
+ do {
+ reg = isp_reg_readl(isp, csi2->phy->phy_regs, ISPCSIPHY_REG1)
+ & ISPCSIPHY_REG1_RESET_DONE_CTRLCLK;
+ if (reg == ISPCSIPHY_REG1_RESET_DONE_CTRLCLK)
+ break;
+ udelay(100);
+ } while (--i > 0);
+
+ if (i == 0) {
+ dev_err(isp->dev,
+ "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
+ return -EBUSY;
+ }
+
+ if (isp->autoidle)
+ isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
+ ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
+ ISPCSI2_SYSCONFIG_AUTO_IDLE,
+ ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART |
+ ((isp->revision == ISP_REVISION_15_0) ?
+ ISPCSI2_SYSCONFIG_AUTO_IDLE : 0));
+ else
+ isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
+ ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
+ ISPCSI2_SYSCONFIG_AUTO_IDLE,
+ ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO);
+
+ return 0;
+}
+
+static int csi2_configure(struct isp_csi2_device *csi2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
+ const struct isp_bus_cfg *buscfg;
+ struct isp_device *isp = csi2->isp;
+ struct isp_csi2_timing_cfg *timing = &csi2->timing[0];
+ struct v4l2_subdev *sensor;
+ struct media_pad *pad;
+
+ /*
+ * CSI2 fields that can be updated while the context has
+ * been enabled or the interface has been enabled are not
+ * updated dynamically currently. So we do not allow to
+ * reconfigure if either has been enabled
+ */
+ if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
+ return -EBUSY;
+
+ pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
+ sensor = media_entity_to_v4l2_subdev(pad->entity);
+ buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
+
+ csi2->frame_skip = 0;
+ v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
+
+ csi2->ctrl.vp_out_ctrl =
+ clamp_t(unsigned int, pipe->l3_ick / pipe->external_rate - 1,
+ 1, 3);
+ dev_dbg(isp->dev, "%s: l3_ick %lu, external_rate %u, vp_out_ctrl %u\n",
+ __func__, pipe->l3_ick, pipe->external_rate,
+ csi2->ctrl.vp_out_ctrl);
+ csi2->ctrl.frame_mode = ISP_CSI2_FRAME_IMMEDIATE;
+ csi2->ctrl.ecc_enable = buscfg->bus.csi2.crc;
+
+ timing->ionum = 1;
+ timing->force_rx_mode = 1;
+ timing->stop_state_16x = 1;
+ timing->stop_state_4x = 1;
+ timing->stop_state_counter = 0x1FF;
+
+ /*
+ * The CSI2 receiver can't do any format conversion except DPCM
+ * decompression, so every set_format call configures both pads
+ * and enables DPCM decompression as a special case:
+ */
+ if (csi2->formats[CSI2_PAD_SINK].code !=
+ csi2->formats[CSI2_PAD_SOURCE].code)
+ csi2->dpcm_decompress = true;
+ else
+ csi2->dpcm_decompress = false;
+
+ csi2->contexts[0].format_id = csi2_ctx_map_format(csi2);
+
+ if (csi2->video_out.bpl_padding == 0)
+ csi2->contexts[0].data_offset = 0;
+ else
+ csi2->contexts[0].data_offset = csi2->video_out.bpl_value;
+
+ /*
+ * Enable end of frame and end of line signals generation for
+ * context 0. These signals are generated from CSI2 receiver to
+ * qualify the last pixel of a frame and the last pixel of a line.
+ * Without enabling the signals CSI2 receiver writes data to memory
+ * beyond buffer size and/or data line offset is not handled correctly.
+ */
+ csi2->contexts[0].eof_enabled = 1;
+ csi2->contexts[0].eol_enabled = 1;
+
+ csi2_irq_complexio1_set(isp, csi2, 1);
+ csi2_irq_ctx_set(isp, csi2, 1);
+ csi2_irq_status_set(isp, csi2, 1);
+
+ /* Set configuration (timings, format and links) */
+ csi2_timing_config(isp, csi2, timing);
+ csi2_recv_config(isp, csi2, &csi2->ctrl);
+ csi2_ctx_config(isp, csi2, &csi2->contexts[0]);
+
+ return 0;
+}
+
+/*
+ * csi2_print_status - Prints CSI2 debug information.
+ */
+#define CSI2_PRINT_REGISTER(isp, regs, name)\
+ dev_dbg(isp->dev, "###CSI2 " #name "=0x%08x\n", \
+ isp_reg_readl(isp, regs, ISPCSI2_##name))
+
+static void csi2_print_status(struct isp_csi2_device *csi2)
+{
+ struct isp_device *isp = csi2->isp;
+
+ if (!csi2->available)
+ return;
+
+ dev_dbg(isp->dev, "-------------CSI2 Register dump-------------\n");
+
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSCONFIG);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSSTATUS);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQENABLE);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQSTATUS);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTRL);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_H);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, GNQ);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_CFG);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQSTATUS);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, SHORT_PACKET);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQENABLE);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_P);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, TIMING);
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL1(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL2(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_OFST(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PING_ADDR(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PONG_ADDR(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQENABLE(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQSTATUS(0));
+ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL3(0));
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+/*
+ * csi2_isr_buffer - Does buffer handling at end-of-frame
+ * when writing to memory.
+ */
+static void csi2_isr_buffer(struct isp_csi2_device *csi2)
+{
+ struct isp_device *isp = csi2->isp;
+ struct isp_buffer *buffer;
+
+ csi2_ctx_enable(isp, csi2, 0, 0);
+
+ buffer = omap3isp_video_buffer_next(&csi2->video_out);
+
+ /*
+ * Let video queue operation restart engine if there is an underrun
+ * condition.
+ */
+ if (buffer == NULL)
+ return;
+
+ csi2_set_outaddr(csi2, buffer->dma);
+ csi2_ctx_enable(isp, csi2, 0, 1);
+}
+
+static void csi2_isr_ctx(struct isp_csi2_device *csi2,
+ struct isp_csi2_ctx_cfg *ctx)
+{
+ struct isp_device *isp = csi2->isp;
+ unsigned int n = ctx->ctxnum;
+ u32 status;
+
+ status = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
+ isp_reg_writel(isp, status, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
+
+ if (!(status & ISPCSI2_CTX_IRQSTATUS_FE_IRQ))
+ return;
+
+ /* Skip interrupts until we reach the frame skip count. The CSI2 will be
+ * automatically disabled, as the frame skip count has been programmed
+ * in the CSI2_CTx_CTRL1::COUNT field, so reenable it.
+ *
+ * It would have been nice to rely on the FRAME_NUMBER interrupt instead
+ * but it turned out that the interrupt is only generated when the CSI2
+ * writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased
+ * correctly and reaches 0 when data is forwarded to the video port only
+ * but no interrupt arrives). Maybe a CSI2 hardware bug.
+ */
+ if (csi2->frame_skip) {
+ csi2->frame_skip--;
+ if (csi2->frame_skip == 0) {
+ ctx->format_id = csi2_ctx_map_format(csi2);
+ csi2_ctx_config(isp, csi2, ctx);
+ csi2_ctx_enable(isp, csi2, n, 1);
+ }
+ return;
+ }
+
+ if (csi2->output & CSI2_OUTPUT_MEMORY)
+ csi2_isr_buffer(csi2);
+}
+
+/*
+ * omap3isp_csi2_isr - CSI2 interrupt handling.
+ */
+void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
+ u32 csi2_irqstatus, cpxio1_irqstatus;
+ struct isp_device *isp = csi2->isp;
+
+ if (!csi2->available)
+ return;
+
+ csi2_irqstatus = isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQSTATUS);
+ isp_reg_writel(isp, csi2_irqstatus, csi2->regs1, ISPCSI2_IRQSTATUS);
+
+ /* Failure Cases */
+ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ) {
+ cpxio1_irqstatus = isp_reg_readl(isp, csi2->regs1,
+ ISPCSI2_PHY_IRQSTATUS);
+ isp_reg_writel(isp, cpxio1_irqstatus,
+ csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
+ dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ %x\n",
+ cpxio1_irqstatus);
+ pipe->error = true;
+ }
+
+ if (csi2_irqstatus & (ISPCSI2_IRQSTATUS_OCP_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ |
+ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
+ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
+ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) {
+ dev_dbg(isp->dev,
+ "CSI2 Err: OCP:%d, Short_pack:%d, ECC:%d, CPXIO2:%d, FIFO_OVF:%d,\n",
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0,
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ) ? 1 : 0,
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ) ? 1 : 0,
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ) ? 1 : 0,
+ (csi2_irqstatus &
+ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ) ? 1 : 0);
+ pipe->error = true;
+ }
+
+ if (omap3isp_module_sync_is_stopping(&csi2->wait, &csi2->stopping))
+ return;
+
+ /* Successful cases */
+ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_CONTEXT(0))
+ csi2_isr_ctx(csi2, &csi2->contexts[0]);
+
+ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ)
+ dev_dbg(isp->dev, "CSI2: ECC correction done\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+/*
+ * csi2_queue - Queues the first buffer when using memory output
+ * @video: The video node
+ * @buffer: buffer to queue
+ */
+static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer)
+{
+ struct isp_device *isp = video->isp;
+ struct isp_csi2_device *csi2 = &isp->isp_csi2a;
+
+ csi2_set_outaddr(csi2, buffer->dma);
+
+ /*
+ * If streaming was enabled before there was a buffer queued
+ * or underrun happened in the ISR, the hardware was not enabled
+ * and DMA queue flag ISP_VIDEO_DMAQUEUE_UNDERRUN is still set.
+ * Enable it now.
+ */
+ if (csi2->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
+ /* Enable / disable context 0 and IRQs */
+ csi2_if_enable(isp, csi2, 1);
+ csi2_ctx_enable(isp, csi2, 0, 1);
+ isp_video_dmaqueue_flags_clr(&csi2->video_out);
+ }
+
+ return 0;
+}
+
+static const struct isp_video_operations csi2_ispvideo_ops = {
+ .queue = csi2_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
+ else
+ return &csi2->formats[pad];
+}
+
+static void
+csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ u32 pixelcode;
+ struct v4l2_mbus_framefmt *format;
+ const struct isp_format_info *info;
+ unsigned int i;
+
+ switch (pad) {
+ case CSI2_PAD_SINK:
+ /* Clamp the width and height to valid range (1-8191). */
+ for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) {
+ if (fmt->code == csi2_input_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(csi2_input_fmts))
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+ break;
+
+ case CSI2_PAD_SOURCE:
+ /* Source format same as sink format, except for DPCM
+ * compression.
+ */
+ pixelcode = fmt->code;
+ format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK, which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ /*
+ * Only Allow DPCM decompression, and check that the
+ * pattern is preserved
+ */
+ info = omap3isp_video_format_info(fmt->code);
+ if (info->uncompressed == pixelcode)
+ fmt->code = pixelcode;
+ break;
+ }
+
+ /* RGB, non-interlaced */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * csi2_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+ const struct isp_format_info *info;
+
+ if (code->pad == CSI2_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(csi2_input_fmts))
+ return -EINVAL;
+
+ code->code = csi2_input_fmts[code->index];
+ } else {
+ format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK,
+ code->which);
+ switch (code->index) {
+ case 0:
+ /* Passthrough sink pad code */
+ code->code = format->code;
+ break;
+ case 1:
+ /* Uncompressed code */
+ info = omap3isp_video_format_info(format->code);
+ if (info->uncompressed == format->code)
+ return -EINVAL;
+
+ code->code = info->uncompressed;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int csi2_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * csi2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * csi2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ csi2_try_format(csi2, cfg, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == CSI2_PAD_SINK) {
+ format = __csi2_get_format(csi2, cfg, CSI2_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ csi2_try_format(csi2, cfg, CSI2_PAD_SOURCE, format, fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * csi2_init_formats - Initialize formats on all pads
+ * @sd: ISP CSI2 V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = CSI2_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ csi2_set_format(sd, fh ? fh->pad : NULL, &format);
+
+ return 0;
+}
+
+/*
+ * csi2_set_stream - Enable/Disable streaming on the CSI2 module
+ * @sd: ISP CSI2 V4L2 subdevice
+ * @enable: ISP pipeline stream state
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct isp_device *isp = csi2->isp;
+ struct isp_video *video_out = &csi2->video_out;
+
+ switch (enable) {
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ if (omap3isp_csiphy_acquire(csi2->phy, &sd->entity) < 0)
+ return -ENODEV;
+ if (csi2->output & CSI2_OUTPUT_MEMORY)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI2A_WRITE);
+ csi2_configure(csi2);
+ csi2_print_status(csi2);
+
+ /*
+ * When outputting to memory with no buffer available, let the
+ * buffer queue handler start the hardware. A DMA queue flag
+ * ISP_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+ * a buffer available.
+ */
+ if (csi2->output & CSI2_OUTPUT_MEMORY &&
+ !(video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED))
+ break;
+ /* Enable context 0 and IRQs */
+ atomic_set(&csi2->stopping, 0);
+ csi2_ctx_enable(isp, csi2, 0, 1);
+ csi2_if_enable(isp, csi2, 1);
+ isp_video_dmaqueue_flags_clr(video_out);
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ if (csi2->state == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap3isp_module_sync_idle(&sd->entity, &csi2->wait,
+ &csi2->stopping))
+ dev_dbg(isp->dev, "%s: module stop timeout.\n",
+ sd->name);
+ csi2_ctx_enable(isp, csi2, 0, 0);
+ csi2_if_enable(isp, csi2, 0);
+ csi2_irq_ctx_set(isp, csi2, 0);
+ omap3isp_csiphy_release(csi2->phy);
+ isp_video_dmaqueue_flags_clr(video_out);
+ omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI2A_WRITE);
+ break;
+ }
+
+ csi2->state = enable;
+ return 0;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+ .s_stream = csi2_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .enum_mbus_code = csi2_enum_mbus_code,
+ .enum_frame_size = csi2_enum_frame_size,
+ .get_fmt = csi2_get_format,
+ .set_fmt = csi2_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops csi2_ops = {
+ .video = &csi2_video_ops,
+ .pad = &csi2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+ .open = csi2_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * csi2_link_setup - Setup CSI2 connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL or zero on success
+ */
+static int csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct isp_csi2_ctrl_cfg *ctrl = &csi2->ctrl;
+ unsigned int index = local->index;
+
+ /*
+ * The ISP core doesn't support pipelines with multiple video outputs.
+ * Revisit this when it will be implemented, and return -EBUSY for now.
+ */
+
+ /* FIXME: this is actually a hack! */
+ if (is_media_entity_v4l2_subdev(remote->entity))
+ index |= 2 << 16;
+
+ switch (index) {
+ case CSI2_PAD_SOURCE:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_MEMORY)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_MEMORY;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_MEMORY;
+ }
+ break;
+
+ case CSI2_PAD_SOURCE | 2 << 16:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_CCDC)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_CCDC;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_CCDC;
+ }
+ break;
+
+ default:
+ /* Link from camera to CSI2 is fixed... */
+ return -EINVAL;
+ }
+
+ ctrl->vp_only_enable =
+ (csi2->output & CSI2_OUTPUT_MEMORY) ? false : true;
+ ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_CCDC);
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations csi2_media_ops = {
+ .link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2)
+{
+ v4l2_device_unregister_subdev(&csi2->subdev);
+ omap3isp_video_unregister(&csi2->video_out);
+}
+
+int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ csi2->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&csi2->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_csi2_unregister_entities(csi2);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP CSI2 initialisation and cleanup
+ */
+
+/*
+ * csi2_init_entities - Initialize subdev and media entity.
+ * @csi2: Pointer to csi2 structure.
+ * return -ENOMEM or zero on success
+ */
+static int csi2_init_entities(struct isp_csi2_device *csi2)
+{
+ struct v4l2_subdev *sd = &csi2->subdev;
+ struct media_pad *pads = csi2->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ v4l2_subdev_init(sd, &csi2_ops);
+ sd->internal_ops = &csi2_internal_ops;
+ strlcpy(sd->name, "OMAP3 ISP CSI2a", sizeof(sd->name));
+
+ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
+ v4l2_set_subdevdata(sd, csi2);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+
+ me->ops = &csi2_media_ops;
+ ret = media_entity_pads_init(me, CSI2_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ csi2_init_formats(sd, NULL);
+
+ /* Video device node */
+ csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ csi2->video_out.ops = &csi2_ispvideo_ops;
+ csi2->video_out.bpl_alignment = 32;
+ csi2->video_out.bpl_zero_padding = 1;
+ csi2->video_out.bpl_max = 0x1ffe0;
+ csi2->video_out.isp = csi2->isp;
+ csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+
+ ret = omap3isp_video_init(&csi2->video_out, "CSI2a");
+ if (ret < 0)
+ goto error_video;
+
+ return 0;
+
+error_video:
+ media_entity_cleanup(&csi2->subdev.entity);
+ return ret;
+}
+
+/*
+ * omap3isp_csi2_init - Routine for module driver init
+ */
+int omap3isp_csi2_init(struct isp_device *isp)
+{
+ struct isp_csi2_device *csi2a = &isp->isp_csi2a;
+ struct isp_csi2_device *csi2c = &isp->isp_csi2c;
+ int ret;
+
+ csi2a->isp = isp;
+ csi2a->available = 1;
+ csi2a->regs1 = OMAP3_ISP_IOMEM_CSI2A_REGS1;
+ csi2a->regs2 = OMAP3_ISP_IOMEM_CSI2A_REGS2;
+ csi2a->phy = &isp->isp_csiphy2;
+ csi2a->state = ISP_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&csi2a->wait);
+
+ ret = csi2_init_entities(csi2a);
+ if (ret < 0)
+ return ret;
+
+ if (isp->revision == ISP_REVISION_15_0) {
+ csi2c->isp = isp;
+ csi2c->available = 1;
+ csi2c->regs1 = OMAP3_ISP_IOMEM_CSI2C_REGS1;
+ csi2c->regs2 = OMAP3_ISP_IOMEM_CSI2C_REGS2;
+ csi2c->phy = &isp->isp_csiphy1;
+ csi2c->state = ISP_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&csi2c->wait);
+ }
+
+ return 0;
+}
+
+/*
+ * omap3isp_csi2_cleanup - Routine for module driver cleanup
+ */
+void omap3isp_csi2_cleanup(struct isp_device *isp)
+{
+ struct isp_csi2_device *csi2a = &isp->isp_csi2a;
+
+ omap3isp_video_cleanup(&csi2a->video_out);
+ media_entity_cleanup(&csi2a->subdev.entity);
+}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.h b/drivers/media/platform/omap3isp/ispcsi2.h
new file mode 100644
index 000000000..453ed62fe
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsi2.h
@@ -0,0 +1,155 @@
+/*
+ * ispcsi2.h
+ *
+ * TI OMAP3 ISP - CSI2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_CSI2_H
+#define OMAP3_ISP_CSI2_H
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+struct isp_csiphy;
+
+/* This is not an exhaustive list */
+enum isp_csi2_pix_formats {
+ CSI2_PIX_FMT_OTHERS = 0,
+ CSI2_PIX_FMT_YUV422_8BIT = 0x1e,
+ CSI2_PIX_FMT_YUV422_8BIT_VP = 0x9e,
+ CSI2_PIX_FMT_RAW10_EXP16 = 0xab,
+ CSI2_PIX_FMT_RAW10_EXP16_VP = 0x12f,
+ CSI2_PIX_FMT_RAW8 = 0x2a,
+ CSI2_PIX_FMT_RAW8_DPCM10_EXP16 = 0x2aa,
+ CSI2_PIX_FMT_RAW8_DPCM10_VP = 0x32a,
+ CSI2_PIX_FMT_RAW8_VP = 0x12a,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP = 0x340,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10 = 0x2c0,
+ CSI2_USERDEF_8BIT_DATA1 = 0x40,
+};
+
+enum isp_csi2_irqevents {
+ OCP_ERR_IRQ = 0x4000,
+ SHORT_PACKET_IRQ = 0x2000,
+ ECC_CORRECTION_IRQ = 0x1000,
+ ECC_NO_CORRECTION_IRQ = 0x800,
+ COMPLEXIO2_ERR_IRQ = 0x400,
+ COMPLEXIO1_ERR_IRQ = 0x200,
+ FIFO_OVF_IRQ = 0x100,
+ CONTEXT7 = 0x80,
+ CONTEXT6 = 0x40,
+ CONTEXT5 = 0x20,
+ CONTEXT4 = 0x10,
+ CONTEXT3 = 0x8,
+ CONTEXT2 = 0x4,
+ CONTEXT1 = 0x2,
+ CONTEXT0 = 0x1,
+};
+
+enum isp_csi2_ctx_irqevents {
+ CTX_ECC_CORRECTION = 0x100,
+ CTX_LINE_NUMBER = 0x80,
+ CTX_FRAME_NUMBER = 0x40,
+ CTX_CS = 0x20,
+ CTX_LE = 0x8,
+ CTX_LS = 0x4,
+ CTX_FE = 0x2,
+ CTX_FS = 0x1,
+};
+
+enum isp_csi2_frame_mode {
+ ISP_CSI2_FRAME_IMMEDIATE,
+ ISP_CSI2_FRAME_AFTERFEC,
+};
+
+#define ISP_CSI2_MAX_CTX_NUM 7
+
+struct isp_csi2_ctx_cfg {
+ u8 ctxnum; /* context number 0 - 7 */
+ u8 dpcm_decompress;
+
+ /* Fields in CSI2_CTx_CTRL2 - locked by CSI2_CTx_CTRL1.CTX_EN */
+ u8 virtual_id;
+ u16 format_id; /* as in CSI2_CTx_CTRL2[9:0] */
+ u8 dpcm_predictor; /* 1: simple, 0: advanced */
+
+ /* Fields in CSI2_CTx_CTRL1/3 - Shadowed */
+ u16 alpha;
+ u16 data_offset;
+ u32 ping_addr;
+ u32 pong_addr;
+ u8 eof_enabled;
+ u8 eol_enabled;
+ u8 checksum_enabled;
+ u8 enabled;
+};
+
+struct isp_csi2_timing_cfg {
+ u8 ionum; /* IO1 or IO2 as in CSI2_TIMING */
+ unsigned force_rx_mode:1;
+ unsigned stop_state_16x:1;
+ unsigned stop_state_4x:1;
+ u16 stop_state_counter;
+};
+
+struct isp_csi2_ctrl_cfg {
+ bool vp_clk_enable;
+ bool vp_only_enable;
+ u8 vp_out_ctrl;
+ enum isp_csi2_frame_mode frame_mode;
+ bool ecc_enable;
+ bool if_enable;
+};
+
+#define CSI2_PAD_SINK 0
+#define CSI2_PAD_SOURCE 1
+#define CSI2_PADS_NUM 2
+
+#define CSI2_OUTPUT_CCDC (1 << 0)
+#define CSI2_OUTPUT_MEMORY (1 << 1)
+
+struct isp_csi2_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CSI2_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
+
+ struct isp_video video_out;
+ struct isp_device *isp;
+
+ u8 available; /* Is the IP present on the silicon? */
+
+ /* mem resources - enums as defined in enum isp_mem_resources */
+ u8 regs1;
+ u8 regs2;
+
+ u32 output; /* output to CCDC, memory or both? */
+ bool dpcm_decompress;
+ unsigned int frame_skip;
+
+ struct isp_csiphy *phy;
+ struct isp_csi2_ctx_cfg contexts[ISP_CSI2_MAX_CTX_NUM + 1];
+ struct isp_csi2_timing_cfg timing[2];
+ struct isp_csi2_ctrl_cfg ctrl;
+ enum isp_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+void omap3isp_csi2_isr(struct isp_csi2_device *csi2);
+int omap3isp_csi2_reset(struct isp_csi2_device *csi2);
+int omap3isp_csi2_init(struct isp_device *isp);
+void omap3isp_csi2_cleanup(struct isp_device *isp);
+void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2);
+int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
+ struct v4l2_device *vdev);
+#endif /* OMAP3_ISP_CSI2_H */
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
new file mode 100644
index 000000000..a28fb79ab
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -0,0 +1,358 @@
+/*
+ * ispcsiphy.c
+ *
+ * TI OMAP3 ISP - CSI PHY module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispcsiphy.h"
+
+static void csiphy_routing_cfg_3630(struct isp_csiphy *phy,
+ enum isp_interface_type iface,
+ bool ccp2_strobe)
+{
+ u32 reg;
+ u32 shift, mode;
+
+ regmap_read(phy->isp->syscon, phy->isp->syscon_offset, &reg);
+
+ switch (iface) {
+ default:
+ /* Should not happen in practice, but let's keep the compiler happy. */
+ case ISP_INTERFACE_CCP2B_PHY1:
+ reg &= ~OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2C_PHY1:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ case ISP_INTERFACE_CCP2B_PHY2:
+ reg |= OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2A_PHY2:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ }
+
+ /* Select data/clock or data/strobe mode for CCP2 */
+ if (iface == ISP_INTERFACE_CCP2B_PHY1 ||
+ iface == ISP_INTERFACE_CCP2B_PHY2) {
+ if (ccp2_strobe)
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE;
+ else
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK;
+ }
+
+ reg &= ~(OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK << shift);
+ reg |= mode << shift;
+
+ regmap_write(phy->isp->syscon, phy->isp->syscon_offset, reg);
+}
+
+static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on,
+ bool ccp2_strobe)
+{
+ u32 csirxfe = OMAP343X_CONTROL_CSIRXFE_PWRDNZ
+ | OMAP343X_CONTROL_CSIRXFE_RESET;
+
+ /* Only the CCP2B on PHY1 is configurable. */
+ if (iface != ISP_INTERFACE_CCP2B_PHY1)
+ return;
+
+ if (!on) {
+ regmap_write(phy->isp->syscon, phy->isp->syscon_offset, 0);
+ return;
+ }
+
+ if (ccp2_strobe)
+ csirxfe |= OMAP343X_CONTROL_CSIRXFE_SELFORM;
+
+ regmap_write(phy->isp->syscon, phy->isp->syscon_offset, csirxfe);
+}
+
+/*
+ * Configure OMAP 3 CSI PHY routing.
+ * @phy: relevant phy device
+ * @iface: ISP_INTERFACE_*
+ * @on: power on or off
+ * @ccp2_strobe: false: data/clock, true: data/strobe
+ *
+ * Note that the underlying routing configuration registers are part of the
+ * control (SCM) register space and part of the CORE power domain on both 3430
+ * and 3630, so they will not hold their contents in off-mode. This isn't an
+ * issue since the MPU power domain is forced on whilst the ISP is in use.
+ */
+static void csiphy_routing_cfg(struct isp_csiphy *phy,
+ enum isp_interface_type iface, bool on,
+ bool ccp2_strobe)
+{
+ if (phy->isp->phy_type == ISP_PHY_TYPE_3630 && on)
+ return csiphy_routing_cfg_3630(phy, iface, ccp2_strobe);
+ if (phy->isp->phy_type == ISP_PHY_TYPE_3430)
+ return csiphy_routing_cfg_3430(phy, iface, on, ccp2_strobe);
+}
+
+/*
+ * csiphy_power_autoswitch_enable
+ * @enable: Sets or clears the autoswitch function enable flag.
+ */
+static void csiphy_power_autoswitch_enable(struct isp_csiphy *phy, bool enable)
+{
+ isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG,
+ ISPCSI2_PHY_CFG_PWR_AUTO,
+ enable ? ISPCSI2_PHY_CFG_PWR_AUTO : 0);
+}
+
+/*
+ * csiphy_set_power
+ * @power: Power state to be set.
+ *
+ * Returns 0 if successful, or -EBUSY if the retry count is exceeded.
+ */
+static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
+{
+ u32 reg;
+ u8 retry_count;
+
+ isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG,
+ ISPCSI2_PHY_CFG_PWR_CMD_MASK, power);
+
+ retry_count = 0;
+ do {
+ udelay(50);
+ reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG) &
+ ISPCSI2_PHY_CFG_PWR_STATUS_MASK;
+
+ if (reg != power >> 2)
+ retry_count++;
+
+ } while ((reg != power >> 2) && (retry_count < 100));
+
+ if (retry_count == 100) {
+ dev_err(phy->isp->dev, "CSI2 CIO set power failed!\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * TCLK values are OK at their reset values
+ */
+#define TCLK_TERM 0
+#define TCLK_MISS 1
+#define TCLK_SETTLE 14
+
+static int omap3isp_csiphy_config(struct isp_csiphy *phy)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(phy->entity);
+ struct isp_bus_cfg *buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
+ struct isp_csiphy_lanes_cfg *lanes;
+ int csi2_ddrclk_khz;
+ unsigned int num_data_lanes, used_lanes = 0;
+ unsigned int i;
+ u32 reg;
+
+ if (buscfg->interface == ISP_INTERFACE_CCP2B_PHY1
+ || buscfg->interface == ISP_INTERFACE_CCP2B_PHY2) {
+ lanes = &buscfg->bus.ccp2.lanecfg;
+ num_data_lanes = 1;
+ } else {
+ lanes = &buscfg->bus.csi2.lanecfg;
+ num_data_lanes = buscfg->bus.csi2.num_data_lanes;
+ }
+
+ if (num_data_lanes > phy->num_data_lanes)
+ return -EINVAL;
+
+ /* Clock and data lanes verification */
+ for (i = 0; i < num_data_lanes; i++) {
+ if (lanes->data[i].pol > 1 || lanes->data[i].pos > 3)
+ return -EINVAL;
+
+ if (used_lanes & (1 << lanes->data[i].pos))
+ return -EINVAL;
+
+ used_lanes |= 1 << lanes->data[i].pos;
+ }
+
+ if (lanes->clk.pol > 1 || lanes->clk.pos > 3)
+ return -EINVAL;
+
+ if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
+ return -EINVAL;
+
+ /*
+ * The PHY configuration is lost in off mode, that's not an
+ * issue since the MPU power domain is forced on whilst the
+ * ISP is in use.
+ */
+ csiphy_routing_cfg(phy, buscfg->interface, true,
+ buscfg->bus.ccp2.phy_layer);
+
+ /* DPHY timing configuration */
+ /* CSI-2 is DDR and we only count used lanes. */
+ csi2_ddrclk_khz = pipe->external_rate / 1000
+ / (2 * hweight32(used_lanes)) * pipe->external_width;
+
+ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
+ ISPCSIPHY_REG0_THS_SETTLE_MASK);
+ /* THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1. */
+ reg |= (DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1)
+ << ISPCSIPHY_REG0_THS_TERM_SHIFT;
+ /* THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3. */
+ reg |= (DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3)
+ << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
+
+ isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1);
+
+ reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
+ ISPCSIPHY_REG1_TCLK_MISS_MASK |
+ ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
+ reg |= TCLK_TERM << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
+ reg |= TCLK_MISS << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
+ reg |= TCLK_SETTLE << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
+
+ isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
+
+ /* DPHY lane configuration */
+ reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+
+ for (i = 0; i < num_data_lanes; i++) {
+ reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
+ ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
+ reg |= (lanes->data[i].pol <<
+ ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
+ reg |= (lanes->data[i].pos <<
+ ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
+ }
+
+ reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
+ ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
+ reg |= lanes->clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
+ reg |= lanes->clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
+
+ isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
+
+ return 0;
+}
+
+int omap3isp_csiphy_acquire(struct isp_csiphy *phy, struct media_entity *entity)
+{
+ int rval;
+
+ if (phy->vdd == NULL) {
+ dev_err(phy->isp->dev,
+ "Power regulator for CSI PHY not available\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&phy->mutex);
+
+ rval = regulator_enable(phy->vdd);
+ if (rval < 0)
+ goto done;
+
+ rval = omap3isp_csi2_reset(phy->csi2);
+ if (rval < 0)
+ goto done;
+
+ phy->entity = entity;
+
+ rval = omap3isp_csiphy_config(phy);
+ if (rval < 0)
+ goto done;
+
+ if (phy->isp->revision == ISP_REVISION_15_0) {
+ rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON);
+ if (rval) {
+ regulator_disable(phy->vdd);
+ goto done;
+ }
+
+ csiphy_power_autoswitch_enable(phy, true);
+ }
+done:
+ if (rval < 0)
+ phy->entity = NULL;
+
+ mutex_unlock(&phy->mutex);
+ return rval;
+}
+
+void omap3isp_csiphy_release(struct isp_csiphy *phy)
+{
+ mutex_lock(&phy->mutex);
+ if (phy->entity) {
+ struct isp_pipeline *pipe = to_isp_pipeline(phy->entity);
+ struct isp_bus_cfg *buscfg =
+ v4l2_subdev_to_bus_cfg(pipe->external);
+
+ csiphy_routing_cfg(phy, buscfg->interface, false,
+ buscfg->bus.ccp2.phy_layer);
+ if (phy->isp->revision == ISP_REVISION_15_0) {
+ csiphy_power_autoswitch_enable(phy, false);
+ csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
+ }
+ regulator_disable(phy->vdd);
+ phy->entity = NULL;
+ }
+ mutex_unlock(&phy->mutex);
+}
+
+/*
+ * omap3isp_csiphy_init - Initialize the CSI PHY frontends
+ */
+int omap3isp_csiphy_init(struct isp_device *isp)
+{
+ struct isp_csiphy *phy1 = &isp->isp_csiphy1;
+ struct isp_csiphy *phy2 = &isp->isp_csiphy2;
+
+ phy2->isp = isp;
+ phy2->csi2 = &isp->isp_csi2a;
+ phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES;
+ phy2->cfg_regs = OMAP3_ISP_IOMEM_CSI2A_REGS1;
+ phy2->phy_regs = OMAP3_ISP_IOMEM_CSIPHY2;
+ mutex_init(&phy2->mutex);
+
+ phy1->isp = isp;
+ mutex_init(&phy1->mutex);
+
+ if (isp->revision == ISP_REVISION_15_0) {
+ phy1->csi2 = &isp->isp_csi2c;
+ phy1->num_data_lanes = ISP_CSIPHY1_NUM_DATA_LANES;
+ phy1->cfg_regs = OMAP3_ISP_IOMEM_CSI2C_REGS1;
+ phy1->phy_regs = OMAP3_ISP_IOMEM_CSIPHY1;
+ }
+
+ return 0;
+}
+
+void omap3isp_csiphy_cleanup(struct isp_device *isp)
+{
+ mutex_destroy(&isp->isp_csiphy1.mutex);
+ mutex_destroy(&isp->isp_csiphy2.mutex);
+}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
new file mode 100644
index 000000000..91543a09b
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -0,0 +1,46 @@
+/*
+ * ispcsiphy.h
+ *
+ * TI OMAP3 ISP - CSI PHY module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_CSI_PHY_H
+#define OMAP3_ISP_CSI_PHY_H
+
+#include "omap3isp.h"
+
+struct isp_csi2_device;
+struct regulator;
+
+struct isp_csiphy {
+ struct isp_device *isp;
+ struct mutex mutex; /* serialize csiphy configuration */
+ struct isp_csi2_device *csi2;
+ struct regulator *vdd;
+ /* the entity that acquired the phy */
+ struct media_entity *entity;
+
+ /* mem resources - enums as defined in enum isp_mem_resources */
+ unsigned int cfg_regs;
+ unsigned int phy_regs;
+
+ u8 num_data_lanes; /* number of CSI2 Data Lanes supported */
+};
+
+int omap3isp_csiphy_acquire(struct isp_csiphy *phy,
+ struct media_entity *entity);
+void omap3isp_csiphy_release(struct isp_csiphy *phy);
+int omap3isp_csiphy_init(struct isp_device *isp);
+void omap3isp_csiphy_cleanup(struct isp_device *isp);
+
+#endif /* OMAP3_ISP_CSI_PHY_H */
diff --git a/drivers/media/platform/omap3isp/isph3a.h b/drivers/media/platform/omap3isp/isph3a.h
new file mode 100644
index 000000000..e5b28d0f3
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isph3a.h
@@ -0,0 +1,107 @@
+/*
+ * isph3a.h
+ *
+ * TI OMAP3 ISP - H3A AF module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_H3A_H
+#define OMAP3_ISP_H3A_H
+
+#include <linux/omap3isp.h>
+
+/*
+ * ----------
+ * -H3A AEWB-
+ * ----------
+ */
+
+#define AEWB_PACKET_SIZE 16
+#define AEWB_SATURATION_LIMIT 0x3ff
+
+/* Flags for changed registers */
+#define PCR_CHNG (1 << 0)
+#define AEWWIN1_CHNG (1 << 1)
+#define AEWINSTART_CHNG (1 << 2)
+#define AEWINBLK_CHNG (1 << 3)
+#define AEWSUBWIN_CHNG (1 << 4)
+#define PRV_WBDGAIN_CHNG (1 << 5)
+#define PRV_WBGAIN_CHNG (1 << 6)
+
+/* ISPH3A REGISTERS bits */
+#define ISPH3A_PCR_AF_EN (1 << 0)
+#define ISPH3A_PCR_AF_ALAW_EN (1 << 1)
+#define ISPH3A_PCR_AF_MED_EN (1 << 2)
+#define ISPH3A_PCR_AF_BUSY (1 << 15)
+#define ISPH3A_PCR_AEW_EN (1 << 16)
+#define ISPH3A_PCR_AEW_ALAW_EN (1 << 17)
+#define ISPH3A_PCR_AEW_BUSY (1 << 18)
+#define ISPH3A_PCR_AEW_MASK (ISPH3A_PCR_AEW_ALAW_EN | \
+ ISPH3A_PCR_AEW_AVE2LMT_MASK)
+
+/*
+ * --------
+ * -H3A AF-
+ * --------
+ */
+
+/* Peripheral Revision */
+#define AFPID 0x0
+
+#define AFCOEF_OFFSET 0x00000004 /* COEF base address */
+
+/* PCR fields */
+#define AF_BUSYAF (1 << 15)
+#define AF_FVMODE (1 << 14)
+#define AF_RGBPOS (0x7 << 11)
+#define AF_MED_TH (0xFF << 3)
+#define AF_MED_EN (1 << 2)
+#define AF_ALAW_EN (1 << 1)
+#define AF_EN (1 << 0)
+#define AF_PCR_MASK (AF_FVMODE | AF_RGBPOS | AF_MED_TH | \
+ AF_MED_EN | AF_ALAW_EN)
+
+/* AFPAX1 fields */
+#define AF_PAXW (0x7F << 16)
+#define AF_PAXH 0x7F
+
+/* AFPAX2 fields */
+#define AF_AFINCV (0xF << 13)
+#define AF_PAXVC (0x7F << 6)
+#define AF_PAXHC 0x3F
+
+/* AFPAXSTART fields */
+#define AF_PAXSH (0xFFF<<16)
+#define AF_PAXSV 0xFFF
+
+/* COEFFICIENT MASK */
+#define AF_COEF_MASK0 0xFFF
+#define AF_COEF_MASK1 (0xFFF<<16)
+
+/* BIT SHIFTS */
+#define AF_RGBPOS_SHIFT 11
+#define AF_MED_TH_SHIFT 3
+#define AF_PAXW_SHIFT 16
+#define AF_LINE_INCR_SHIFT 13
+#define AF_VT_COUNT_SHIFT 6
+#define AF_HZ_START_SHIFT 16
+#define AF_COEF_SHIFT 16
+
+/* Init and cleanup functions */
+int omap3isp_h3a_aewb_init(struct isp_device *isp);
+int omap3isp_h3a_af_init(struct isp_device *isp);
+
+void omap3isp_h3a_aewb_cleanup(struct isp_device *isp);
+void omap3isp_h3a_af_cleanup(struct isp_device *isp);
+
+#endif /* OMAP3_ISP_H3A_H */
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
new file mode 100644
index 000000000..3c82dea4d
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -0,0 +1,343 @@
+/*
+ * isph3a.c
+ *
+ * TI OMAP3 ISP - H3A module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "isp.h"
+#include "isph3a.h"
+#include "ispstat.h"
+
+/*
+ * h3a_aewb_update_regs - Helper function to update h3a registers.
+ */
+static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv)
+{
+ struct omap3isp_h3a_aewb_config *conf = priv;
+ u32 pcr;
+ u32 win1;
+ u32 start;
+ u32 blk;
+ u32 subwin;
+
+ if (aewb->state == ISPSTAT_DISABLED)
+ return;
+
+ isp_reg_writel(aewb->isp, aewb->active_buf->dma_addr,
+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST);
+
+ if (!aewb->update)
+ return;
+
+ /* Converting config metadata into reg values */
+ pcr = conf->saturation_limit << ISPH3A_PCR_AEW_AVE2LMT_SHIFT;
+ pcr |= !!conf->alaw_enable << ISPH3A_PCR_AEW_ALAW_EN_SHIFT;
+
+ win1 = ((conf->win_height >> 1) - 1) << ISPH3A_AEWWIN1_WINH_SHIFT;
+ win1 |= ((conf->win_width >> 1) - 1) << ISPH3A_AEWWIN1_WINW_SHIFT;
+ win1 |= (conf->ver_win_count - 1) << ISPH3A_AEWWIN1_WINVC_SHIFT;
+ win1 |= (conf->hor_win_count - 1) << ISPH3A_AEWWIN1_WINHC_SHIFT;
+
+ start = conf->hor_win_start << ISPH3A_AEWINSTART_WINSH_SHIFT;
+ start |= conf->ver_win_start << ISPH3A_AEWINSTART_WINSV_SHIFT;
+
+ blk = conf->blk_ver_win_start << ISPH3A_AEWINBLK_WINSV_SHIFT;
+ blk |= ((conf->blk_win_height >> 1) - 1) << ISPH3A_AEWINBLK_WINH_SHIFT;
+
+ subwin = ((conf->subsample_ver_inc >> 1) - 1) <<
+ ISPH3A_AEWSUBWIN_AEWINCV_SHIFT;
+ subwin |= ((conf->subsample_hor_inc >> 1) - 1) <<
+ ISPH3A_AEWSUBWIN_AEWINCH_SHIFT;
+
+ isp_reg_writel(aewb->isp, win1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWWIN1);
+ isp_reg_writel(aewb->isp, start, OMAP3_ISP_IOMEM_H3A,
+ ISPH3A_AEWINSTART);
+ isp_reg_writel(aewb->isp, blk, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWINBLK);
+ isp_reg_writel(aewb->isp, subwin, OMAP3_ISP_IOMEM_H3A,
+ ISPH3A_AEWSUBWIN);
+ isp_reg_clr_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AEW_MASK, pcr);
+
+ aewb->update = 0;
+ aewb->config_counter += aewb->inc_config;
+ aewb->inc_config = 0;
+ aewb->buf_size = conf->buf_size;
+}
+
+static void h3a_aewb_enable(struct ispstat *aewb, int enable)
+{
+ if (enable) {
+ isp_reg_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AEW_EN);
+ omap3isp_subclk_enable(aewb->isp, OMAP3_ISP_SUBCLK_AEWB);
+ } else {
+ isp_reg_clr(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AEW_EN);
+ omap3isp_subclk_disable(aewb->isp, OMAP3_ISP_SUBCLK_AEWB);
+ }
+}
+
+static int h3a_aewb_busy(struct ispstat *aewb)
+{
+ return isp_reg_readl(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR)
+ & ISPH3A_PCR_BUSYAEAWB;
+}
+
+static u32 h3a_aewb_get_buf_size(struct omap3isp_h3a_aewb_config *conf)
+{
+ /* Number of configured windows + extra row for black data */
+ u32 win_count = (conf->ver_win_count + 1) * conf->hor_win_count;
+
+ /*
+ * Unsaturated block counts for each 8 windows.
+ * 1 extra for the last (win_count % 8) windows if win_count is not
+ * divisible by 8.
+ */
+ win_count += (win_count + 7) / 8;
+
+ return win_count * AEWB_PACKET_SIZE;
+}
+
+static int h3a_aewb_validate_params(struct ispstat *aewb, void *new_conf)
+{
+ struct omap3isp_h3a_aewb_config *user_cfg = new_conf;
+ u32 buf_size;
+
+ if (unlikely(user_cfg->saturation_limit >
+ OMAP3ISP_AEWB_MAX_SATURATION_LIM))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->win_height < OMAP3ISP_AEWB_MIN_WIN_H ||
+ user_cfg->win_height > OMAP3ISP_AEWB_MAX_WIN_H ||
+ user_cfg->win_height & 0x01))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->win_width < OMAP3ISP_AEWB_MIN_WIN_W ||
+ user_cfg->win_width > OMAP3ISP_AEWB_MAX_WIN_W ||
+ user_cfg->win_width & 0x01))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->ver_win_count < OMAP3ISP_AEWB_MIN_WINVC ||
+ user_cfg->ver_win_count > OMAP3ISP_AEWB_MAX_WINVC))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->hor_win_count < OMAP3ISP_AEWB_MIN_WINHC ||
+ user_cfg->hor_win_count > OMAP3ISP_AEWB_MAX_WINHC))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->hor_win_start > OMAP3ISP_AEWB_MAX_WINSTART))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->blk_ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->blk_win_height < OMAP3ISP_AEWB_MIN_WIN_H ||
+ user_cfg->blk_win_height > OMAP3ISP_AEWB_MAX_WIN_H ||
+ user_cfg->blk_win_height & 0x01))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->subsample_ver_inc < OMAP3ISP_AEWB_MIN_SUB_INC ||
+ user_cfg->subsample_ver_inc > OMAP3ISP_AEWB_MAX_SUB_INC ||
+ user_cfg->subsample_ver_inc & 0x01))
+ return -EINVAL;
+
+ if (unlikely(user_cfg->subsample_hor_inc < OMAP3ISP_AEWB_MIN_SUB_INC ||
+ user_cfg->subsample_hor_inc > OMAP3ISP_AEWB_MAX_SUB_INC ||
+ user_cfg->subsample_hor_inc & 0x01))
+ return -EINVAL;
+
+ buf_size = h3a_aewb_get_buf_size(user_cfg);
+ if (buf_size > user_cfg->buf_size)
+ user_cfg->buf_size = buf_size;
+ else if (user_cfg->buf_size > OMAP3ISP_AEWB_MAX_BUF_SIZE)
+ user_cfg->buf_size = OMAP3ISP_AEWB_MAX_BUF_SIZE;
+
+ return 0;
+}
+
+/*
+ * h3a_aewb_set_params - Helper function to check & store user given params.
+ * @new_conf: Pointer to AE and AWB parameters struct.
+ *
+ * As most of them are busy-lock registers, need to wait until AEW_BUSY = 0 to
+ * program them during ISR.
+ */
+static void h3a_aewb_set_params(struct ispstat *aewb, void *new_conf)
+{
+ struct omap3isp_h3a_aewb_config *user_cfg = new_conf;
+ struct omap3isp_h3a_aewb_config *cur_cfg = aewb->priv;
+ int update = 0;
+
+ if (cur_cfg->saturation_limit != user_cfg->saturation_limit) {
+ cur_cfg->saturation_limit = user_cfg->saturation_limit;
+ update = 1;
+ }
+ if (cur_cfg->alaw_enable != user_cfg->alaw_enable) {
+ cur_cfg->alaw_enable = user_cfg->alaw_enable;
+ update = 1;
+ }
+ if (cur_cfg->win_height != user_cfg->win_height) {
+ cur_cfg->win_height = user_cfg->win_height;
+ update = 1;
+ }
+ if (cur_cfg->win_width != user_cfg->win_width) {
+ cur_cfg->win_width = user_cfg->win_width;
+ update = 1;
+ }
+ if (cur_cfg->ver_win_count != user_cfg->ver_win_count) {
+ cur_cfg->ver_win_count = user_cfg->ver_win_count;
+ update = 1;
+ }
+ if (cur_cfg->hor_win_count != user_cfg->hor_win_count) {
+ cur_cfg->hor_win_count = user_cfg->hor_win_count;
+ update = 1;
+ }
+ if (cur_cfg->ver_win_start != user_cfg->ver_win_start) {
+ cur_cfg->ver_win_start = user_cfg->ver_win_start;
+ update = 1;
+ }
+ if (cur_cfg->hor_win_start != user_cfg->hor_win_start) {
+ cur_cfg->hor_win_start = user_cfg->hor_win_start;
+ update = 1;
+ }
+ if (cur_cfg->blk_ver_win_start != user_cfg->blk_ver_win_start) {
+ cur_cfg->blk_ver_win_start = user_cfg->blk_ver_win_start;
+ update = 1;
+ }
+ if (cur_cfg->blk_win_height != user_cfg->blk_win_height) {
+ cur_cfg->blk_win_height = user_cfg->blk_win_height;
+ update = 1;
+ }
+ if (cur_cfg->subsample_ver_inc != user_cfg->subsample_ver_inc) {
+ cur_cfg->subsample_ver_inc = user_cfg->subsample_ver_inc;
+ update = 1;
+ }
+ if (cur_cfg->subsample_hor_inc != user_cfg->subsample_hor_inc) {
+ cur_cfg->subsample_hor_inc = user_cfg->subsample_hor_inc;
+ update = 1;
+ }
+
+ if (update || !aewb->configured) {
+ aewb->inc_config++;
+ aewb->update = 1;
+ cur_cfg->buf_size = h3a_aewb_get_buf_size(cur_cfg);
+ }
+}
+
+static long h3a_aewb_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct ispstat *stat = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_OMAP3ISP_AEWB_CFG:
+ return omap3isp_stat_config(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_REQ:
+ return omap3isp_stat_request_statistics(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_REQ_TIME32:
+ return omap3isp_stat_request_statistics_time32(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_EN: {
+ unsigned long *en = arg;
+ return omap3isp_stat_enable(stat, !!*en);
+ }
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static const struct ispstat_ops h3a_aewb_ops = {
+ .validate_params = h3a_aewb_validate_params,
+ .set_params = h3a_aewb_set_params,
+ .setup_regs = h3a_aewb_setup_regs,
+ .enable = h3a_aewb_enable,
+ .busy = h3a_aewb_busy,
+};
+
+static const struct v4l2_subdev_core_ops h3a_aewb_subdev_core_ops = {
+ .ioctl = h3a_aewb_ioctl,
+ .subscribe_event = omap3isp_stat_subscribe_event,
+ .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops h3a_aewb_subdev_video_ops = {
+ .s_stream = omap3isp_stat_s_stream,
+};
+
+static const struct v4l2_subdev_ops h3a_aewb_subdev_ops = {
+ .core = &h3a_aewb_subdev_core_ops,
+ .video = &h3a_aewb_subdev_video_ops,
+};
+
+/*
+ * omap3isp_h3a_aewb_init - Module Initialisation.
+ */
+int omap3isp_h3a_aewb_init(struct isp_device *isp)
+{
+ struct ispstat *aewb = &isp->isp_aewb;
+ struct omap3isp_h3a_aewb_config *aewb_cfg;
+ struct omap3isp_h3a_aewb_config *aewb_recover_cfg;
+
+ aewb_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_cfg), GFP_KERNEL);
+ if (!aewb_cfg)
+ return -ENOMEM;
+
+ aewb->ops = &h3a_aewb_ops;
+ aewb->priv = aewb_cfg;
+ aewb->event_type = V4L2_EVENT_OMAP3ISP_AEWB;
+ aewb->isp = isp;
+
+ /* Set recover state configuration */
+ aewb_recover_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_recover_cfg),
+ GFP_KERNEL);
+ if (!aewb_recover_cfg) {
+ dev_err(aewb->isp->dev,
+ "AEWB: cannot allocate memory for recover configuration.\n");
+ return -ENOMEM;
+ }
+
+ aewb_recover_cfg->saturation_limit = OMAP3ISP_AEWB_MAX_SATURATION_LIM;
+ aewb_recover_cfg->win_height = OMAP3ISP_AEWB_MIN_WIN_H;
+ aewb_recover_cfg->win_width = OMAP3ISP_AEWB_MIN_WIN_W;
+ aewb_recover_cfg->ver_win_count = OMAP3ISP_AEWB_MIN_WINVC;
+ aewb_recover_cfg->hor_win_count = OMAP3ISP_AEWB_MIN_WINHC;
+ aewb_recover_cfg->blk_ver_win_start = aewb_recover_cfg->ver_win_start +
+ aewb_recover_cfg->win_height * aewb_recover_cfg->ver_win_count;
+ aewb_recover_cfg->blk_win_height = OMAP3ISP_AEWB_MIN_WIN_H;
+ aewb_recover_cfg->subsample_ver_inc = OMAP3ISP_AEWB_MIN_SUB_INC;
+ aewb_recover_cfg->subsample_hor_inc = OMAP3ISP_AEWB_MIN_SUB_INC;
+
+ if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
+ dev_err(aewb->isp->dev,
+ "AEWB: recover configuration is invalid.\n");
+ return -EINVAL;
+ }
+
+ aewb_recover_cfg->buf_size = h3a_aewb_get_buf_size(aewb_recover_cfg);
+ aewb->recover_priv = aewb_recover_cfg;
+
+ return omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops);
+}
+
+/*
+ * omap3isp_h3a_aewb_cleanup - Module exit.
+ */
+void omap3isp_h3a_aewb_cleanup(struct isp_device *isp)
+{
+ omap3isp_stat_cleanup(&isp->isp_aewb);
+}
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
new file mode 100644
index 000000000..4da25c84f
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -0,0 +1,398 @@
+/*
+ * isph3a_af.c
+ *
+ * TI OMAP3 ISP - H3A AF module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Linux specific include files */
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "isp.h"
+#include "isph3a.h"
+#include "ispstat.h"
+
+#define IS_OUT_OF_BOUNDS(value, min, max) \
+ (((value) < (min)) || ((value) > (max)))
+
+static void h3a_af_setup_regs(struct ispstat *af, void *priv)
+{
+ struct omap3isp_h3a_af_config *conf = priv;
+ u32 pcr;
+ u32 pax1;
+ u32 pax2;
+ u32 paxstart;
+ u32 coef;
+ u32 base_coef_set0;
+ u32 base_coef_set1;
+ int index;
+
+ if (af->state == ISPSTAT_DISABLED)
+ return;
+
+ isp_reg_writel(af->isp, af->active_buf->dma_addr, OMAP3_ISP_IOMEM_H3A,
+ ISPH3A_AFBUFST);
+
+ if (!af->update)
+ return;
+
+ /* Configure Hardware Registers */
+ pax1 = ((conf->paxel.width >> 1) - 1) << AF_PAXW_SHIFT;
+ /* Set height in AFPAX1 */
+ pax1 |= (conf->paxel.height >> 1) - 1;
+ isp_reg_writel(af->isp, pax1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX1);
+
+ /* Configure AFPAX2 Register */
+ /* Set Line Increment in AFPAX2 Register */
+ pax2 = ((conf->paxel.line_inc >> 1) - 1) << AF_LINE_INCR_SHIFT;
+ /* Set Vertical Count */
+ pax2 |= (conf->paxel.v_cnt - 1) << AF_VT_COUNT_SHIFT;
+ /* Set Horizontal Count */
+ pax2 |= (conf->paxel.h_cnt - 1);
+ isp_reg_writel(af->isp, pax2, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX2);
+
+ /* Configure PAXSTART Register */
+ /*Configure Horizontal Start */
+ paxstart = conf->paxel.h_start << AF_HZ_START_SHIFT;
+ /* Configure Vertical Start */
+ paxstart |= conf->paxel.v_start;
+ isp_reg_writel(af->isp, paxstart, OMAP3_ISP_IOMEM_H3A,
+ ISPH3A_AFPAXSTART);
+
+ /*SetIIRSH Register */
+ isp_reg_writel(af->isp, conf->iir.h_start,
+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFIIRSH);
+
+ base_coef_set0 = ISPH3A_AFCOEF010;
+ base_coef_set1 = ISPH3A_AFCOEF110;
+ for (index = 0; index <= 8; index += 2) {
+ /*Set IIR Filter0 Coefficients */
+ coef = 0;
+ coef |= conf->iir.coeff_set0[index];
+ coef |= conf->iir.coeff_set0[index + 1] <<
+ AF_COEF_SHIFT;
+ isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A,
+ base_coef_set0);
+ base_coef_set0 += AFCOEF_OFFSET;
+
+ /*Set IIR Filter1 Coefficients */
+ coef = 0;
+ coef |= conf->iir.coeff_set1[index];
+ coef |= conf->iir.coeff_set1[index + 1] <<
+ AF_COEF_SHIFT;
+ isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A,
+ base_coef_set1);
+ base_coef_set1 += AFCOEF_OFFSET;
+ }
+ /* set AFCOEF0010 Register */
+ isp_reg_writel(af->isp, conf->iir.coeff_set0[10],
+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF0010);
+ /* set AFCOEF1010 Register */
+ isp_reg_writel(af->isp, conf->iir.coeff_set1[10],
+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF1010);
+
+ /* PCR Register */
+ /* Set RGB Position */
+ pcr = conf->rgb_pos << AF_RGBPOS_SHIFT;
+ /* Set Accumulator Mode */
+ if (conf->fvmode == OMAP3ISP_AF_MODE_PEAK)
+ pcr |= AF_FVMODE;
+ /* Set A-law */
+ if (conf->alaw_enable)
+ pcr |= AF_ALAW_EN;
+ /* HMF Configurations */
+ if (conf->hmf.enable) {
+ /* Enable HMF */
+ pcr |= AF_MED_EN;
+ /* Set Median Threshold */
+ pcr |= conf->hmf.threshold << AF_MED_TH_SHIFT;
+ }
+ /* Set PCR Register */
+ isp_reg_clr_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ AF_PCR_MASK, pcr);
+
+ af->update = 0;
+ af->config_counter += af->inc_config;
+ af->inc_config = 0;
+ af->buf_size = conf->buf_size;
+}
+
+static void h3a_af_enable(struct ispstat *af, int enable)
+{
+ if (enable) {
+ isp_reg_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AF_EN);
+ omap3isp_subclk_enable(af->isp, OMAP3_ISP_SUBCLK_AF);
+ } else {
+ isp_reg_clr(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+ ISPH3A_PCR_AF_EN);
+ omap3isp_subclk_disable(af->isp, OMAP3_ISP_SUBCLK_AF);
+ }
+}
+
+static int h3a_af_busy(struct ispstat *af)
+{
+ return isp_reg_readl(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR)
+ & ISPH3A_PCR_BUSYAF;
+}
+
+static u32 h3a_af_get_buf_size(struct omap3isp_h3a_af_config *conf)
+{
+ return conf->paxel.h_cnt * conf->paxel.v_cnt * OMAP3ISP_AF_PAXEL_SIZE;
+}
+
+/* Function to check paxel parameters */
+static int h3a_af_validate_params(struct ispstat *af, void *new_conf)
+{
+ struct omap3isp_h3a_af_config *user_cfg = new_conf;
+ struct omap3isp_h3a_af_paxel *paxel_cfg = &user_cfg->paxel;
+ struct omap3isp_h3a_af_iir *iir_cfg = &user_cfg->iir;
+ int index;
+ u32 buf_size;
+
+ /* Check horizontal Count */
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->h_cnt,
+ OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN,
+ OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MAX))
+ return -EINVAL;
+
+ /* Check Vertical Count */
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->v_cnt,
+ OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN,
+ OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MAX))
+ return -EINVAL;
+
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->height, OMAP3ISP_AF_PAXEL_HEIGHT_MIN,
+ OMAP3ISP_AF_PAXEL_HEIGHT_MAX) ||
+ paxel_cfg->height % 2)
+ return -EINVAL;
+
+ /* Check width */
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->width, OMAP3ISP_AF_PAXEL_WIDTH_MIN,
+ OMAP3ISP_AF_PAXEL_WIDTH_MAX) ||
+ paxel_cfg->width % 2)
+ return -EINVAL;
+
+ /* Check Line Increment */
+ if (IS_OUT_OF_BOUNDS(paxel_cfg->line_inc,
+ OMAP3ISP_AF_PAXEL_INCREMENT_MIN,
+ OMAP3ISP_AF_PAXEL_INCREMENT_MAX) ||
+ paxel_cfg->line_inc % 2)
+ return -EINVAL;
+
+ /* Check Horizontal Start */
+ if ((paxel_cfg->h_start < iir_cfg->h_start) ||
+ IS_OUT_OF_BOUNDS(paxel_cfg->h_start,
+ OMAP3ISP_AF_PAXEL_HZSTART_MIN,
+ OMAP3ISP_AF_PAXEL_HZSTART_MAX))
+ return -EINVAL;
+
+ /* Check IIR */
+ for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) {
+ if ((iir_cfg->coeff_set0[index]) > OMAP3ISP_AF_COEF_MAX)
+ return -EINVAL;
+
+ if ((iir_cfg->coeff_set1[index]) > OMAP3ISP_AF_COEF_MAX)
+ return -EINVAL;
+ }
+
+ if (IS_OUT_OF_BOUNDS(iir_cfg->h_start, OMAP3ISP_AF_IIRSH_MIN,
+ OMAP3ISP_AF_IIRSH_MAX))
+ return -EINVAL;
+
+ /* Hack: If paxel size is 12, the 10th AF window may be corrupted */
+ if ((paxel_cfg->h_cnt * paxel_cfg->v_cnt > 9) &&
+ (paxel_cfg->width * paxel_cfg->height == 12))
+ return -EINVAL;
+
+ buf_size = h3a_af_get_buf_size(user_cfg);
+ if (buf_size > user_cfg->buf_size)
+ /* User buf_size request wasn't enough */
+ user_cfg->buf_size = buf_size;
+ else if (user_cfg->buf_size > OMAP3ISP_AF_MAX_BUF_SIZE)
+ user_cfg->buf_size = OMAP3ISP_AF_MAX_BUF_SIZE;
+
+ return 0;
+}
+
+/* Update local parameters */
+static void h3a_af_set_params(struct ispstat *af, void *new_conf)
+{
+ struct omap3isp_h3a_af_config *user_cfg = new_conf;
+ struct omap3isp_h3a_af_config *cur_cfg = af->priv;
+ int update = 0;
+ int index;
+
+ /* alaw */
+ if (cur_cfg->alaw_enable != user_cfg->alaw_enable) {
+ update = 1;
+ goto out;
+ }
+
+ /* hmf */
+ if (cur_cfg->hmf.enable != user_cfg->hmf.enable) {
+ update = 1;
+ goto out;
+ }
+ if (cur_cfg->hmf.threshold != user_cfg->hmf.threshold) {
+ update = 1;
+ goto out;
+ }
+
+ /* rgbpos */
+ if (cur_cfg->rgb_pos != user_cfg->rgb_pos) {
+ update = 1;
+ goto out;
+ }
+
+ /* iir */
+ if (cur_cfg->iir.h_start != user_cfg->iir.h_start) {
+ update = 1;
+ goto out;
+ }
+ for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) {
+ if (cur_cfg->iir.coeff_set0[index] !=
+ user_cfg->iir.coeff_set0[index]) {
+ update = 1;
+ goto out;
+ }
+ if (cur_cfg->iir.coeff_set1[index] !=
+ user_cfg->iir.coeff_set1[index]) {
+ update = 1;
+ goto out;
+ }
+ }
+
+ /* paxel */
+ if ((cur_cfg->paxel.width != user_cfg->paxel.width) ||
+ (cur_cfg->paxel.height != user_cfg->paxel.height) ||
+ (cur_cfg->paxel.h_start != user_cfg->paxel.h_start) ||
+ (cur_cfg->paxel.v_start != user_cfg->paxel.v_start) ||
+ (cur_cfg->paxel.h_cnt != user_cfg->paxel.h_cnt) ||
+ (cur_cfg->paxel.v_cnt != user_cfg->paxel.v_cnt) ||
+ (cur_cfg->paxel.line_inc != user_cfg->paxel.line_inc)) {
+ update = 1;
+ goto out;
+ }
+
+ /* af_mode */
+ if (cur_cfg->fvmode != user_cfg->fvmode)
+ update = 1;
+
+out:
+ if (update || !af->configured) {
+ memcpy(cur_cfg, user_cfg, sizeof(*cur_cfg));
+ af->inc_config++;
+ af->update = 1;
+ /*
+ * User might be asked for a bigger buffer than necessary for
+ * this configuration. In order to return the right amount of
+ * data during buffer request, let's calculate the size here
+ * instead of stick with user_cfg->buf_size.
+ */
+ cur_cfg->buf_size = h3a_af_get_buf_size(cur_cfg);
+ }
+}
+
+static long h3a_af_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct ispstat *stat = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_OMAP3ISP_AF_CFG:
+ return omap3isp_stat_config(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_REQ:
+ return omap3isp_stat_request_statistics(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_REQ_TIME32:
+ return omap3isp_stat_request_statistics_time32(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_EN: {
+ int *en = arg;
+ return omap3isp_stat_enable(stat, !!*en);
+ }
+ }
+
+ return -ENOIOCTLCMD;
+
+}
+
+static const struct ispstat_ops h3a_af_ops = {
+ .validate_params = h3a_af_validate_params,
+ .set_params = h3a_af_set_params,
+ .setup_regs = h3a_af_setup_regs,
+ .enable = h3a_af_enable,
+ .busy = h3a_af_busy,
+};
+
+static const struct v4l2_subdev_core_ops h3a_af_subdev_core_ops = {
+ .ioctl = h3a_af_ioctl,
+ .subscribe_event = omap3isp_stat_subscribe_event,
+ .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops h3a_af_subdev_video_ops = {
+ .s_stream = omap3isp_stat_s_stream,
+};
+
+static const struct v4l2_subdev_ops h3a_af_subdev_ops = {
+ .core = &h3a_af_subdev_core_ops,
+ .video = &h3a_af_subdev_video_ops,
+};
+
+/* Function to register the AF character device driver. */
+int omap3isp_h3a_af_init(struct isp_device *isp)
+{
+ struct ispstat *af = &isp->isp_af;
+ struct omap3isp_h3a_af_config *af_cfg;
+ struct omap3isp_h3a_af_config *af_recover_cfg;
+
+ af_cfg = devm_kzalloc(isp->dev, sizeof(*af_cfg), GFP_KERNEL);
+ if (af_cfg == NULL)
+ return -ENOMEM;
+
+ af->ops = &h3a_af_ops;
+ af->priv = af_cfg;
+ af->event_type = V4L2_EVENT_OMAP3ISP_AF;
+ af->isp = isp;
+
+ /* Set recover state configuration */
+ af_recover_cfg = devm_kzalloc(isp->dev, sizeof(*af_recover_cfg),
+ GFP_KERNEL);
+ if (!af_recover_cfg) {
+ dev_err(af->isp->dev,
+ "AF: cannot allocate memory for recover configuration.\n");
+ return -ENOMEM;
+ }
+
+ af_recover_cfg->paxel.h_start = OMAP3ISP_AF_PAXEL_HZSTART_MIN;
+ af_recover_cfg->paxel.width = OMAP3ISP_AF_PAXEL_WIDTH_MIN;
+ af_recover_cfg->paxel.height = OMAP3ISP_AF_PAXEL_HEIGHT_MIN;
+ af_recover_cfg->paxel.h_cnt = OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN;
+ af_recover_cfg->paxel.v_cnt = OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN;
+ af_recover_cfg->paxel.line_inc = OMAP3ISP_AF_PAXEL_INCREMENT_MIN;
+ if (h3a_af_validate_params(af, af_recover_cfg)) {
+ dev_err(af->isp->dev,
+ "AF: recover configuration is invalid.\n");
+ return -EINVAL;
+ }
+
+ af_recover_cfg->buf_size = h3a_af_get_buf_size(af_recover_cfg);
+ af->recover_priv = af_recover_cfg;
+
+ return omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops);
+}
+
+void omap3isp_h3a_af_cleanup(struct isp_device *isp)
+{
+ omap3isp_stat_cleanup(&isp->isp_af);
+}
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
new file mode 100644
index 000000000..d4be3d0e0
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -0,0 +1,540 @@
+/*
+ * isphist.c
+ *
+ * TI OMAP3 ISP - Histogram module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "isphist.h"
+
+#define HIST_CONFIG_DMA 1
+
+/*
+ * hist_reset_mem - clear Histogram memory before start stats engine.
+ */
+static void hist_reset_mem(struct ispstat *hist)
+{
+ struct isp_device *isp = hist->isp;
+ struct omap3isp_hist_config *conf = hist->priv;
+ unsigned int i;
+
+ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
+
+ /*
+ * By setting it, the histogram internal buffer is being cleared at the
+ * same time it's being read. This bit must be cleared afterwards.
+ */
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
+
+ /*
+ * We'll clear 4 words at each iteration for optimization. It avoids
+ * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4.
+ */
+ for (i = OMAP3ISP_HIST_MEM_SIZE / 4; i > 0; i--) {
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+ }
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
+
+ hist->wait_acc_frames = conf->num_acc_frames;
+}
+
+/*
+ * hist_setup_regs - Helper function to update Histogram registers.
+ */
+static void hist_setup_regs(struct ispstat *hist, void *priv)
+{
+ struct isp_device *isp = hist->isp;
+ struct omap3isp_hist_config *conf = priv;
+ int c;
+ u32 cnt;
+ u32 wb_gain;
+ u32 reg_hor[OMAP3ISP_HIST_MAX_REGIONS];
+ u32 reg_ver[OMAP3ISP_HIST_MAX_REGIONS];
+
+ if (!hist->update || hist->state == ISPSTAT_DISABLED ||
+ hist->state == ISPSTAT_DISABLING)
+ return;
+
+ cnt = conf->cfa << ISPHIST_CNT_CFA_SHIFT;
+
+ wb_gain = conf->wg[0] << ISPHIST_WB_GAIN_WG00_SHIFT;
+ wb_gain |= conf->wg[1] << ISPHIST_WB_GAIN_WG01_SHIFT;
+ wb_gain |= conf->wg[2] << ISPHIST_WB_GAIN_WG02_SHIFT;
+ if (conf->cfa == OMAP3ISP_HIST_CFA_BAYER)
+ wb_gain |= conf->wg[3] << ISPHIST_WB_GAIN_WG03_SHIFT;
+
+ /* Regions size and position */
+ for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) {
+ if (c < conf->num_regions) {
+ reg_hor[c] = (conf->region[c].h_start <<
+ ISPHIST_REG_START_SHIFT)
+ | (conf->region[c].h_end <<
+ ISPHIST_REG_END_SHIFT);
+ reg_ver[c] = (conf->region[c].v_start <<
+ ISPHIST_REG_START_SHIFT)
+ | (conf->region[c].v_end <<
+ ISPHIST_REG_END_SHIFT);
+ } else {
+ reg_hor[c] = 0;
+ reg_ver[c] = 0;
+ }
+ }
+
+ cnt |= conf->hist_bins << ISPHIST_CNT_BINS_SHIFT;
+ switch (conf->hist_bins) {
+ case OMAP3ISP_HIST_BINS_256:
+ cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 8) <<
+ ISPHIST_CNT_SHIFT_SHIFT;
+ break;
+ case OMAP3ISP_HIST_BINS_128:
+ cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 7) <<
+ ISPHIST_CNT_SHIFT_SHIFT;
+ break;
+ case OMAP3ISP_HIST_BINS_64:
+ cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 6) <<
+ ISPHIST_CNT_SHIFT_SHIFT;
+ break;
+ default: /* OMAP3ISP_HIST_BINS_32 */
+ cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 5) <<
+ ISPHIST_CNT_SHIFT_SHIFT;
+ break;
+ }
+
+ hist_reset_mem(hist);
+
+ isp_reg_writel(isp, cnt, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT);
+ isp_reg_writel(isp, wb_gain, OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN);
+ isp_reg_writel(isp, reg_hor[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ);
+ isp_reg_writel(isp, reg_ver[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT);
+ isp_reg_writel(isp, reg_hor[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ);
+ isp_reg_writel(isp, reg_ver[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT);
+ isp_reg_writel(isp, reg_hor[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ);
+ isp_reg_writel(isp, reg_ver[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT);
+ isp_reg_writel(isp, reg_hor[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ);
+ isp_reg_writel(isp, reg_ver[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT);
+
+ hist->update = 0;
+ hist->config_counter += hist->inc_config;
+ hist->inc_config = 0;
+ hist->buf_size = conf->buf_size;
+}
+
+static void hist_enable(struct ispstat *hist, int enable)
+{
+ if (enable) {
+ isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
+ ISPHIST_PCR_ENABLE);
+ omap3isp_subclk_enable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
+ } else {
+ isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
+ ISPHIST_PCR_ENABLE);
+ omap3isp_subclk_disable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
+ }
+}
+
+static int hist_busy(struct ispstat *hist)
+{
+ return isp_reg_readl(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR)
+ & ISPHIST_PCR_BUSY;
+}
+
+static void hist_dma_cb(void *data)
+{
+ struct ispstat *hist = data;
+
+ /* FIXME: The DMA engine API can't report transfer errors :-/ */
+
+ isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
+ ISPHIST_CNT_CLEAR);
+
+ omap3isp_stat_dma_isr(hist);
+ if (hist->state != ISPSTAT_DISABLED)
+ omap3isp_hist_dma_done(hist->isp);
+}
+
+static int hist_buf_dma(struct ispstat *hist)
+{
+ dma_addr_t dma_addr = hist->active_buf->dma_addr;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_slave_config cfg;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (unlikely(!dma_addr)) {
+ dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
+ goto error;
+ }
+
+ isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
+ isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
+ ISPHIST_CNT_CLEAR);
+ omap3isp_flush(hist->isp);
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = hist->isp->mmio_hist_base_phys + ISPHIST_DATA;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = hist->buf_size / 4;
+
+ ret = dmaengine_slave_config(hist->dma_ch, &cfg);
+ if (ret < 0) {
+ dev_dbg(hist->isp->dev,
+ "hist: DMA slave configuration failed\n");
+ goto error;
+ }
+
+ tx = dmaengine_prep_slave_single(hist->dma_ch, dma_addr,
+ hist->buf_size, DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK);
+ if (tx == NULL) {
+ dev_dbg(hist->isp->dev,
+ "hist: DMA slave preparation failed\n");
+ goto error;
+ }
+
+ tx->callback = hist_dma_cb;
+ tx->callback_param = hist;
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_dbg(hist->isp->dev, "hist: DMA submission failed\n");
+ goto error;
+ }
+
+ dma_async_issue_pending(hist->dma_ch);
+
+ return STAT_BUF_WAITING_DMA;
+
+error:
+ hist_reset_mem(hist);
+ return STAT_NO_BUF;
+}
+
+static int hist_buf_pio(struct ispstat *hist)
+{
+ struct isp_device *isp = hist->isp;
+ u32 *buf = hist->active_buf->virt_addr;
+ unsigned int i;
+
+ if (!buf) {
+ dev_dbg(isp->dev, "hist: invalid PIO buffer address\n");
+ hist_reset_mem(hist);
+ return STAT_NO_BUF;
+ }
+
+ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
+
+ /*
+ * By setting it, the histogram internal buffer is being cleared at the
+ * same time it's being read. This bit must be cleared just after all
+ * data is acquired.
+ */
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
+
+ /*
+ * We'll read 4 times a 4-bytes-word at each iteration for
+ * optimization. It avoids 3/4 of the jumps. We also know buf_size is
+ * divisible by 16.
+ */
+ for (i = hist->buf_size / 16; i > 0; i--) {
+ *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+ *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+ *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+ *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+ }
+ isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
+ ISPHIST_CNT_CLEAR);
+
+ return STAT_BUF_DONE;
+}
+
+/*
+ * hist_buf_process - Callback from ISP driver for HIST interrupt.
+ */
+static int hist_buf_process(struct ispstat *hist)
+{
+ struct omap3isp_hist_config *user_cfg = hist->priv;
+ int ret;
+
+ if (atomic_read(&hist->buf_err) || hist->state != ISPSTAT_ENABLED) {
+ hist_reset_mem(hist);
+ return STAT_NO_BUF;
+ }
+
+ if (--(hist->wait_acc_frames))
+ return STAT_NO_BUF;
+
+ if (hist->dma_ch)
+ ret = hist_buf_dma(hist);
+ else
+ ret = hist_buf_pio(hist);
+
+ hist->wait_acc_frames = user_cfg->num_acc_frames;
+
+ return ret;
+}
+
+static u32 hist_get_buf_size(struct omap3isp_hist_config *conf)
+{
+ return OMAP3ISP_HIST_MEM_SIZE_BINS(conf->hist_bins) * conf->num_regions;
+}
+
+/*
+ * hist_validate_params - Helper function to check user given params.
+ * @new_conf: Pointer to user configuration structure.
+ *
+ * Returns 0 on success configuration.
+ */
+static int hist_validate_params(struct ispstat *hist, void *new_conf)
+{
+ struct omap3isp_hist_config *user_cfg = new_conf;
+ int c;
+ u32 buf_size;
+
+ if (user_cfg->cfa > OMAP3ISP_HIST_CFA_FOVEONX3)
+ return -EINVAL;
+
+ /* Regions size and position */
+
+ if ((user_cfg->num_regions < OMAP3ISP_HIST_MIN_REGIONS) ||
+ (user_cfg->num_regions > OMAP3ISP_HIST_MAX_REGIONS))
+ return -EINVAL;
+
+ /* Regions */
+ for (c = 0; c < user_cfg->num_regions; c++) {
+ if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK)
+ return -EINVAL;
+ if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK)
+ return -EINVAL;
+ if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK)
+ return -EINVAL;
+ if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK)
+ return -EINVAL;
+ if (user_cfg->region[c].h_start > user_cfg->region[c].h_end)
+ return -EINVAL;
+ if (user_cfg->region[c].v_start > user_cfg->region[c].v_end)
+ return -EINVAL;
+ }
+
+ switch (user_cfg->num_regions) {
+ case 1:
+ if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_256)
+ return -EINVAL;
+ break;
+ case 2:
+ if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_128)
+ return -EINVAL;
+ break;
+ default: /* 3 or 4 */
+ if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_64)
+ return -EINVAL;
+ break;
+ }
+
+ buf_size = hist_get_buf_size(user_cfg);
+ if (buf_size > user_cfg->buf_size)
+ /* User's buf_size request wasn't enough */
+ user_cfg->buf_size = buf_size;
+ else if (user_cfg->buf_size > OMAP3ISP_HIST_MAX_BUF_SIZE)
+ user_cfg->buf_size = OMAP3ISP_HIST_MAX_BUF_SIZE;
+
+ return 0;
+}
+
+static int hist_comp_params(struct ispstat *hist,
+ struct omap3isp_hist_config *user_cfg)
+{
+ struct omap3isp_hist_config *cur_cfg = hist->priv;
+ int c;
+
+ if (cur_cfg->cfa != user_cfg->cfa)
+ return 1;
+
+ if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames)
+ return 1;
+
+ if (cur_cfg->hist_bins != user_cfg->hist_bins)
+ return 1;
+
+ for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) {
+ if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3)
+ break;
+ else if (cur_cfg->wg[c] != user_cfg->wg[c])
+ return 1;
+ }
+
+ if (cur_cfg->num_regions != user_cfg->num_regions)
+ return 1;
+
+ /* Regions */
+ for (c = 0; c < user_cfg->num_regions; c++) {
+ if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start)
+ return 1;
+ if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end)
+ return 1;
+ if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start)
+ return 1;
+ if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * hist_update_params - Helper function to check and store user given params.
+ * @new_conf: Pointer to user configuration structure.
+ */
+static void hist_set_params(struct ispstat *hist, void *new_conf)
+{
+ struct omap3isp_hist_config *user_cfg = new_conf;
+ struct omap3isp_hist_config *cur_cfg = hist->priv;
+
+ if (!hist->configured || hist_comp_params(hist, user_cfg)) {
+ memcpy(cur_cfg, user_cfg, sizeof(*user_cfg));
+ if (user_cfg->num_acc_frames == 0)
+ user_cfg->num_acc_frames = 1;
+ hist->inc_config++;
+ hist->update = 1;
+ /*
+ * User might be asked for a bigger buffer than necessary for
+ * this configuration. In order to return the right amount of
+ * data during buffer request, let's calculate the size here
+ * instead of stick with user_cfg->buf_size.
+ */
+ cur_cfg->buf_size = hist_get_buf_size(cur_cfg);
+
+ }
+}
+
+static long hist_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct ispstat *stat = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_OMAP3ISP_HIST_CFG:
+ return omap3isp_stat_config(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_REQ:
+ return omap3isp_stat_request_statistics(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_REQ_TIME32:
+ return omap3isp_stat_request_statistics_time32(stat, arg);
+ case VIDIOC_OMAP3ISP_STAT_EN: {
+ int *en = arg;
+ return omap3isp_stat_enable(stat, !!*en);
+ }
+ }
+
+ return -ENOIOCTLCMD;
+
+}
+
+static const struct ispstat_ops hist_ops = {
+ .validate_params = hist_validate_params,
+ .set_params = hist_set_params,
+ .setup_regs = hist_setup_regs,
+ .enable = hist_enable,
+ .busy = hist_busy,
+ .buf_process = hist_buf_process,
+};
+
+static const struct v4l2_subdev_core_ops hist_subdev_core_ops = {
+ .ioctl = hist_ioctl,
+ .subscribe_event = omap3isp_stat_subscribe_event,
+ .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops hist_subdev_video_ops = {
+ .s_stream = omap3isp_stat_s_stream,
+};
+
+static const struct v4l2_subdev_ops hist_subdev_ops = {
+ .core = &hist_subdev_core_ops,
+ .video = &hist_subdev_video_ops,
+};
+
+/*
+ * omap3isp_hist_init - Module Initialization.
+ */
+int omap3isp_hist_init(struct isp_device *isp)
+{
+ struct ispstat *hist = &isp->isp_hist;
+ struct omap3isp_hist_config *hist_cfg;
+ int ret = -1;
+
+ hist_cfg = devm_kzalloc(isp->dev, sizeof(*hist_cfg), GFP_KERNEL);
+ if (hist_cfg == NULL)
+ return -ENOMEM;
+
+ hist->isp = isp;
+
+ if (HIST_CONFIG_DMA) {
+ dma_cap_mask_t mask;
+
+ /*
+ * We need slave capable channel without DMA request line for
+ * reading out the data.
+ * For this we can use dma_request_chan_by_mask() as we are
+ * happy with any channel as long as it is capable of slave
+ * configuration.
+ */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ hist->dma_ch = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(hist->dma_ch)) {
+ ret = PTR_ERR(hist->dma_ch);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ hist->dma_ch = NULL;
+ dev_warn(isp->dev,
+ "hist: DMA channel request failed, using PIO\n");
+ } else {
+ dev_dbg(isp->dev, "hist: using DMA channel %s\n",
+ dma_chan_name(hist->dma_ch));
+ }
+ }
+
+ hist->ops = &hist_ops;
+ hist->priv = hist_cfg;
+ hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
+
+ ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
+ if (ret) {
+ if (hist->dma_ch)
+ dma_release_channel(hist->dma_ch);
+ }
+
+ return ret;
+}
+
+/*
+ * omap3isp_hist_cleanup - Module cleanup.
+ */
+void omap3isp_hist_cleanup(struct isp_device *isp)
+{
+ struct ispstat *hist = &isp->isp_hist;
+
+ if (hist->dma_ch)
+ dma_release_channel(hist->dma_ch);
+
+ omap3isp_stat_cleanup(hist);
+}
diff --git a/drivers/media/platform/omap3isp/isphist.h b/drivers/media/platform/omap3isp/isphist.h
new file mode 100644
index 000000000..3b5415517
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isphist.h
@@ -0,0 +1,30 @@
+/*
+ * isphist.h
+ *
+ * TI OMAP3 ISP - Histogram module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_HIST_H
+#define OMAP3_ISP_HIST_H
+
+#include <linux/omap3isp.h>
+
+#define ISPHIST_IN_BIT_WIDTH_CCDC 10
+
+struct isp_device;
+
+int omap3isp_hist_init(struct isp_device *isp);
+void omap3isp_hist_cleanup(struct isp_device *isp);
+
+#endif /* OMAP3_ISP_HIST */
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
new file mode 100644
index 000000000..20857ae42
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -0,0 +1,2355 @@
+/*
+ * isppreview.c
+ *
+ * TI OMAP3 ISP driver - Preview module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "isppreview.h"
+
+/* Default values in Office Fluorescent Light for RGBtoRGB Blending */
+static const struct omap3isp_prev_rgbtorgb flr_rgb2rgb = {
+ { /* RGB-RGB Matrix */
+ {0x01E2, 0x0F30, 0x0FEE},
+ {0x0F9B, 0x01AC, 0x0FB9},
+ {0x0FE0, 0x0EC0, 0x0260}
+ }, /* RGB Offset */
+ {0x0000, 0x0000, 0x0000}
+};
+
+/* Default values in Office Fluorescent Light for RGB to YUV Conversion*/
+static const struct omap3isp_prev_csc flr_prev_csc = {
+ { /* CSC Coef Matrix */
+ {66, 129, 25},
+ {-38, -75, 112},
+ {112, -94 , -18}
+ }, /* CSC Offset */
+ {0x0, 0x0, 0x0}
+};
+
+/* Default values in Office Fluorescent Light for CFA Gradient*/
+#define FLR_CFA_GRADTHRS_HORZ 0x28
+#define FLR_CFA_GRADTHRS_VERT 0x28
+
+/* Default values in Office Fluorescent Light for Chroma Suppression*/
+#define FLR_CSUP_GAIN 0x0D
+#define FLR_CSUP_THRES 0xEB
+
+/* Default values in Office Fluorescent Light for Noise Filter*/
+#define FLR_NF_STRGTH 0x03
+
+/* Default values for White Balance */
+#define FLR_WBAL_DGAIN 0x100
+#define FLR_WBAL_COEF 0x20
+
+/* Default values in Office Fluorescent Light for Black Adjustment*/
+#define FLR_BLKADJ_BLUE 0x0
+#define FLR_BLKADJ_GREEN 0x0
+#define FLR_BLKADJ_RED 0x0
+
+#define DEF_DETECT_CORRECT_VAL 0xe
+
+/*
+ * Margins and image size limits.
+ *
+ * The preview engine crops several rows and columns internally depending on
+ * which filters are enabled. To avoid format changes when the filters are
+ * enabled or disabled (which would prevent them from being turned on or off
+ * during streaming), the driver assumes all filters that can be configured
+ * during streaming are enabled when computing sink crop and source format
+ * limits.
+ *
+ * If a filter is disabled, additional cropping is automatically added at the
+ * preview engine input by the driver to avoid overflow at line and frame end.
+ * This is completely transparent for applications.
+ *
+ * Median filter 4 pixels
+ * Noise filter,
+ * Faulty pixels correction 4 pixels, 4 lines
+ * Color suppression 2 pixels
+ * or luma enhancement
+ * -------------------------------------------------------------
+ * Maximum total 10 pixels, 4 lines
+ *
+ * The color suppression and luma enhancement filters are applied after bayer to
+ * YUV conversion. They thus can crop one pixel on the left and one pixel on the
+ * right side of the image without changing the color pattern. When both those
+ * filters are disabled, the driver must crop the two pixels on the same side of
+ * the image to avoid changing the bayer pattern. The left margin is thus set to
+ * 6 pixels and the right margin to 4 pixels.
+ */
+
+#define PREV_MARGIN_LEFT 6
+#define PREV_MARGIN_RIGHT 4
+#define PREV_MARGIN_TOP 2
+#define PREV_MARGIN_BOTTOM 2
+
+#define PREV_MIN_IN_WIDTH 64
+#define PREV_MIN_IN_HEIGHT 8
+#define PREV_MAX_IN_HEIGHT 16384
+
+#define PREV_MIN_OUT_WIDTH 0
+#define PREV_MIN_OUT_HEIGHT 0
+#define PREV_MAX_OUT_WIDTH_REV_1 1280
+#define PREV_MAX_OUT_WIDTH_REV_2 3300
+#define PREV_MAX_OUT_WIDTH_REV_15 4096
+
+/*
+ * Coefficient Tables for the submodules in Preview.
+ * Array is initialised with the values from.the tables text file.
+ */
+
+/*
+ * CFA Filter Coefficient Table
+ *
+ */
+static u32 cfa_coef_table[4][OMAP3ISP_PREV_CFA_BLK_SIZE] = {
+#include "cfa_coef_table.h"
+};
+
+/*
+ * Default Gamma Correction Table - All components
+ */
+static u32 gamma_table[] = {
+#include "gamma_table.h"
+};
+
+/*
+ * Noise Filter Threshold table
+ */
+static u32 noise_filter_table[] = {
+#include "noise_filter_table.h"
+};
+
+/*
+ * Luminance Enhancement Table
+ */
+static u32 luma_enhance_table[] = {
+#include "luma_enhance_table.h"
+};
+
+/*
+ * preview_config_luma_enhancement - Configure the Luminance Enhancement table
+ */
+static void
+preview_config_luma_enhancement(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_luma *yt = &params->luma;
+ unsigned int i;
+
+ isp_reg_writel(isp, ISPPRV_YENH_TABLE_ADDR,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+ for (i = 0; i < OMAP3ISP_PREV_YENH_TBL_SIZE; i++) {
+ isp_reg_writel(isp, yt->table[i],
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA);
+ }
+}
+
+/*
+ * preview_enable_luma_enhancement - Enable/disable Luminance Enhancement
+ */
+static void
+preview_enable_luma_enhancement(struct isp_prev_device *prev, bool enable)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (enable)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_YNENHEN);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_YNENHEN);
+}
+
+/*
+ * preview_enable_invalaw - Enable/disable Inverse A-Law decompression
+ */
+static void preview_enable_invalaw(struct isp_prev_device *prev, bool enable)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (enable)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_INVALAW);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_INVALAW);
+}
+
+/*
+ * preview_config_hmed - Configure the Horizontal Median Filter
+ */
+static void preview_config_hmed(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_hmed *hmed = &params->hmed;
+
+ isp_reg_writel(isp, (hmed->odddist == 1 ? 0 : ISPPRV_HMED_ODDDIST) |
+ (hmed->evendist == 1 ? 0 : ISPPRV_HMED_EVENDIST) |
+ (hmed->thres << ISPPRV_HMED_THRESHOLD_SHIFT),
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_HMED);
+}
+
+/*
+ * preview_enable_hmed - Enable/disable the Horizontal Median Filter
+ */
+static void preview_enable_hmed(struct isp_prev_device *prev, bool enable)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (enable)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_HMEDEN);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_HMEDEN);
+}
+
+/*
+ * preview_config_cfa - Configure CFA Interpolation for Bayer formats
+ *
+ * The CFA table is organised in four blocks, one per Bayer component. The
+ * hardware expects blocks to follow the Bayer order of the input data, while
+ * the driver stores the table in GRBG order in memory. The blocks need to be
+ * reordered to support non-GRBG Bayer patterns.
+ */
+static void preview_config_cfa(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ static const unsigned int cfa_coef_order[4][4] = {
+ { 0, 1, 2, 3 }, /* GRBG */
+ { 1, 0, 3, 2 }, /* RGGB */
+ { 2, 3, 0, 1 }, /* BGGR */
+ { 3, 2, 1, 0 }, /* GBRG */
+ };
+ const unsigned int *order = cfa_coef_order[prev->params.cfa_order];
+ const struct omap3isp_prev_cfa *cfa = &params->cfa;
+ struct isp_device *isp = to_isp_device(prev);
+ unsigned int i;
+ unsigned int j;
+
+ isp_reg_writel(isp,
+ (cfa->gradthrs_vert << ISPPRV_CFA_GRADTH_VER_SHIFT) |
+ (cfa->gradthrs_horz << ISPPRV_CFA_GRADTH_HOR_SHIFT),
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CFA);
+
+ isp_reg_writel(isp, ISPPRV_CFA_TABLE_ADDR,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+
+ for (i = 0; i < 4; ++i) {
+ const __u32 *block = cfa->table[order[i]];
+
+ for (j = 0; j < OMAP3ISP_PREV_CFA_BLK_SIZE; ++j)
+ isp_reg_writel(isp, block[j], OMAP3_ISP_IOMEM_PREV,
+ ISPPRV_SET_TBL_DATA);
+ }
+}
+
+/*
+ * preview_config_chroma_suppression - Configure Chroma Suppression
+ */
+static void
+preview_config_chroma_suppression(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_csup *cs = &params->csup;
+
+ isp_reg_writel(isp,
+ cs->gain | (cs->thres << ISPPRV_CSUP_THRES_SHIFT) |
+ (cs->hypf_en << ISPPRV_CSUP_HPYF_SHIFT),
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CSUP);
+}
+
+/*
+ * preview_enable_chroma_suppression - Enable/disable Chrominance Suppression
+ */
+static void
+preview_enable_chroma_suppression(struct isp_prev_device *prev, bool enable)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (enable)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_SUPEN);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_SUPEN);
+}
+
+/*
+ * preview_config_whitebalance - Configure White Balance parameters
+ *
+ * Coefficient matrix always with default values.
+ */
+static void
+preview_config_whitebalance(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_wbal *wbal = &params->wbal;
+ u32 val;
+
+ isp_reg_writel(isp, wbal->dgain, OMAP3_ISP_IOMEM_PREV, ISPPRV_WB_DGAIN);
+
+ val = wbal->coef0 << ISPPRV_WBGAIN_COEF0_SHIFT;
+ val |= wbal->coef1 << ISPPRV_WBGAIN_COEF1_SHIFT;
+ val |= wbal->coef2 << ISPPRV_WBGAIN_COEF2_SHIFT;
+ val |= wbal->coef3 << ISPPRV_WBGAIN_COEF3_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_WBGAIN);
+
+ isp_reg_writel(isp,
+ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_0_SHIFT |
+ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_1_SHIFT |
+ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_2_SHIFT |
+ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_3_SHIFT |
+ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_0_SHIFT |
+ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_1_SHIFT |
+ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_2_SHIFT |
+ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_3_SHIFT |
+ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_0_SHIFT |
+ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_1_SHIFT |
+ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_2_SHIFT |
+ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_3_SHIFT |
+ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_0_SHIFT |
+ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_1_SHIFT |
+ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_2_SHIFT |
+ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_3_SHIFT,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_WBSEL);
+}
+
+/*
+ * preview_config_blkadj - Configure Black Adjustment
+ */
+static void
+preview_config_blkadj(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_blkadj *blkadj = &params->blkadj;
+
+ isp_reg_writel(isp, (blkadj->blue << ISPPRV_BLKADJOFF_B_SHIFT) |
+ (blkadj->green << ISPPRV_BLKADJOFF_G_SHIFT) |
+ (blkadj->red << ISPPRV_BLKADJOFF_R_SHIFT),
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_BLKADJOFF);
+}
+
+/*
+ * preview_config_rgb_blending - Configure RGB-RGB Blending
+ */
+static void
+preview_config_rgb_blending(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_rgbtorgb *rgbrgb = &params->rgb2rgb;
+ u32 val;
+
+ val = (rgbrgb->matrix[0][0] & 0xfff) << ISPPRV_RGB_MAT1_MTX_RR_SHIFT;
+ val |= (rgbrgb->matrix[0][1] & 0xfff) << ISPPRV_RGB_MAT1_MTX_GR_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT1);
+
+ val = (rgbrgb->matrix[0][2] & 0xfff) << ISPPRV_RGB_MAT2_MTX_BR_SHIFT;
+ val |= (rgbrgb->matrix[1][0] & 0xfff) << ISPPRV_RGB_MAT2_MTX_RG_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT2);
+
+ val = (rgbrgb->matrix[1][1] & 0xfff) << ISPPRV_RGB_MAT3_MTX_GG_SHIFT;
+ val |= (rgbrgb->matrix[1][2] & 0xfff) << ISPPRV_RGB_MAT3_MTX_BG_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT3);
+
+ val = (rgbrgb->matrix[2][0] & 0xfff) << ISPPRV_RGB_MAT4_MTX_RB_SHIFT;
+ val |= (rgbrgb->matrix[2][1] & 0xfff) << ISPPRV_RGB_MAT4_MTX_GB_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT4);
+
+ val = (rgbrgb->matrix[2][2] & 0xfff) << ISPPRV_RGB_MAT5_MTX_BB_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT5);
+
+ val = (rgbrgb->offset[0] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFR_SHIFT;
+ val |= (rgbrgb->offset[1] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFG_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF1);
+
+ val = (rgbrgb->offset[2] & 0x3ff) << ISPPRV_RGB_OFF2_MTX_OFFB_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF2);
+}
+
+/*
+ * preview_config_csc - Configure Color Space Conversion (RGB to YCbYCr)
+ */
+static void
+preview_config_csc(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_csc *csc = &params->csc;
+ u32 val;
+
+ val = (csc->matrix[0][0] & 0x3ff) << ISPPRV_CSC0_RY_SHIFT;
+ val |= (csc->matrix[0][1] & 0x3ff) << ISPPRV_CSC0_GY_SHIFT;
+ val |= (csc->matrix[0][2] & 0x3ff) << ISPPRV_CSC0_BY_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC0);
+
+ val = (csc->matrix[1][0] & 0x3ff) << ISPPRV_CSC1_RCB_SHIFT;
+ val |= (csc->matrix[1][1] & 0x3ff) << ISPPRV_CSC1_GCB_SHIFT;
+ val |= (csc->matrix[1][2] & 0x3ff) << ISPPRV_CSC1_BCB_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC1);
+
+ val = (csc->matrix[2][0] & 0x3ff) << ISPPRV_CSC2_RCR_SHIFT;
+ val |= (csc->matrix[2][1] & 0x3ff) << ISPPRV_CSC2_GCR_SHIFT;
+ val |= (csc->matrix[2][2] & 0x3ff) << ISPPRV_CSC2_BCR_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC2);
+
+ val = (csc->offset[0] & 0xff) << ISPPRV_CSC_OFFSET_Y_SHIFT;
+ val |= (csc->offset[1] & 0xff) << ISPPRV_CSC_OFFSET_CB_SHIFT;
+ val |= (csc->offset[2] & 0xff) << ISPPRV_CSC_OFFSET_CR_SHIFT;
+ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC_OFFSET);
+}
+
+/*
+ * preview_config_yc_range - Configure the max and min Y and C values
+ */
+static void
+preview_config_yc_range(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_yclimit *yc = &params->yclimit;
+
+ isp_reg_writel(isp,
+ yc->maxC << ISPPRV_SETUP_YC_MAXC_SHIFT |
+ yc->maxY << ISPPRV_SETUP_YC_MAXY_SHIFT |
+ yc->minC << ISPPRV_SETUP_YC_MINC_SHIFT |
+ yc->minY << ISPPRV_SETUP_YC_MINY_SHIFT,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SETUP_YC);
+}
+
+/*
+ * preview_config_dcor - Configure Couplet Defect Correction
+ */
+static void
+preview_config_dcor(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_dcor *dcor = &params->dcor;
+
+ isp_reg_writel(isp, dcor->detect_correct[0],
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR0);
+ isp_reg_writel(isp, dcor->detect_correct[1],
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR1);
+ isp_reg_writel(isp, dcor->detect_correct[2],
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR2);
+ isp_reg_writel(isp, dcor->detect_correct[3],
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR3);
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_DCCOUP,
+ dcor->couplet_mode_en ? ISPPRV_PCR_DCCOUP : 0);
+}
+
+/*
+ * preview_enable_dcor - Enable/disable Couplet Defect Correction
+ */
+static void preview_enable_dcor(struct isp_prev_device *prev, bool enable)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (enable)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_DCOREN);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_DCOREN);
+}
+
+/*
+ * preview_enable_drkframe_capture - Enable/disable Dark Frame Capture
+ */
+static void
+preview_enable_drkframe_capture(struct isp_prev_device *prev, bool enable)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (enable)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_DRKFCAP);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_DRKFCAP);
+}
+
+/*
+ * preview_enable_drkframe - Enable/disable Dark Frame Subtraction
+ */
+static void preview_enable_drkframe(struct isp_prev_device *prev, bool enable)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (enable)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_DRKFEN);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_DRKFEN);
+}
+
+/*
+ * preview_config_noisefilter - Configure the Noise Filter
+ */
+static void
+preview_config_noisefilter(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_nf *nf = &params->nf;
+ unsigned int i;
+
+ isp_reg_writel(isp, nf->spread, OMAP3_ISP_IOMEM_PREV, ISPPRV_NF);
+ isp_reg_writel(isp, ISPPRV_NF_TABLE_ADDR,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+ for (i = 0; i < OMAP3ISP_PREV_NF_TBL_SIZE; i++) {
+ isp_reg_writel(isp, nf->table[i],
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA);
+ }
+}
+
+/*
+ * preview_enable_noisefilter - Enable/disable the Noise Filter
+ */
+static void
+preview_enable_noisefilter(struct isp_prev_device *prev, bool enable)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (enable)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_NFEN);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_NFEN);
+}
+
+/*
+ * preview_config_gammacorrn - Configure the Gamma Correction tables
+ */
+static void
+preview_config_gammacorrn(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct omap3isp_prev_gtables *gt = &params->gamma;
+ unsigned int i;
+
+ isp_reg_writel(isp, ISPPRV_REDGAMMA_TABLE_ADDR,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+ for (i = 0; i < OMAP3ISP_PREV_GAMMA_TBL_SIZE; i++)
+ isp_reg_writel(isp, gt->red[i], OMAP3_ISP_IOMEM_PREV,
+ ISPPRV_SET_TBL_DATA);
+
+ isp_reg_writel(isp, ISPPRV_GREENGAMMA_TABLE_ADDR,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+ for (i = 0; i < OMAP3ISP_PREV_GAMMA_TBL_SIZE; i++)
+ isp_reg_writel(isp, gt->green[i], OMAP3_ISP_IOMEM_PREV,
+ ISPPRV_SET_TBL_DATA);
+
+ isp_reg_writel(isp, ISPPRV_BLUEGAMMA_TABLE_ADDR,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+ for (i = 0; i < OMAP3ISP_PREV_GAMMA_TBL_SIZE; i++)
+ isp_reg_writel(isp, gt->blue[i], OMAP3_ISP_IOMEM_PREV,
+ ISPPRV_SET_TBL_DATA);
+}
+
+/*
+ * preview_enable_gammacorrn - Enable/disable Gamma Correction
+ *
+ * When gamma correction is disabled, the module is bypassed and its output is
+ * the 8 MSB of the 10-bit input .
+ */
+static void
+preview_enable_gammacorrn(struct isp_prev_device *prev, bool enable)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (enable)
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_GAMMA_BYPASS);
+ else
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_GAMMA_BYPASS);
+}
+
+/*
+ * preview_config_contrast - Configure the Contrast
+ *
+ * Value should be programmed before enabling the module.
+ */
+static void
+preview_config_contrast(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT,
+ 0xff << ISPPRV_CNT_BRT_CNT_SHIFT,
+ params->contrast << ISPPRV_CNT_BRT_CNT_SHIFT);
+}
+
+/*
+ * preview_config_brightness - Configure the Brightness
+ */
+static void
+preview_config_brightness(struct isp_prev_device *prev,
+ const struct prev_params *params)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT,
+ 0xff << ISPPRV_CNT_BRT_BRT_SHIFT,
+ params->brightness << ISPPRV_CNT_BRT_BRT_SHIFT);
+}
+
+/*
+ * preview_update_contrast - Updates the contrast.
+ * @contrast: Pointer to hold the current programmed contrast value.
+ *
+ * Value should be programmed before enabling the module.
+ */
+static void
+preview_update_contrast(struct isp_prev_device *prev, u8 contrast)
+{
+ struct prev_params *params;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ params = (prev->params.active & OMAP3ISP_PREV_CONTRAST)
+ ? &prev->params.params[0] : &prev->params.params[1];
+
+ if (params->contrast != (contrast * ISPPRV_CONTRAST_UNITS)) {
+ params->contrast = contrast * ISPPRV_CONTRAST_UNITS;
+ params->update |= OMAP3ISP_PREV_CONTRAST;
+ }
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+}
+
+/*
+ * preview_update_brightness - Updates the brightness in preview module.
+ * @brightness: Pointer to hold the current programmed brightness value.
+ *
+ */
+static void
+preview_update_brightness(struct isp_prev_device *prev, u8 brightness)
+{
+ struct prev_params *params;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ params = (prev->params.active & OMAP3ISP_PREV_BRIGHTNESS)
+ ? &prev->params.params[0] : &prev->params.params[1];
+
+ if (params->brightness != (brightness * ISPPRV_BRIGHT_UNITS)) {
+ params->brightness = brightness * ISPPRV_BRIGHT_UNITS;
+ params->update |= OMAP3ISP_PREV_BRIGHTNESS;
+ }
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+}
+
+static u32
+preview_params_lock(struct isp_prev_device *prev, u32 update, bool shadow)
+{
+ u32 active = prev->params.active;
+
+ if (shadow) {
+ /* Mark all shadow parameters we are going to touch as busy. */
+ prev->params.params[0].busy |= ~active & update;
+ prev->params.params[1].busy |= active & update;
+ } else {
+ /* Mark all active parameters we are going to touch as busy. */
+ update = (prev->params.params[0].update & active)
+ | (prev->params.params[1].update & ~active);
+
+ prev->params.params[0].busy |= active & update;
+ prev->params.params[1].busy |= ~active & update;
+ }
+
+ return update;
+}
+
+static void
+preview_params_unlock(struct isp_prev_device *prev, u32 update, bool shadow)
+{
+ u32 active = prev->params.active;
+
+ if (shadow) {
+ /* Set the update flag for shadow parameters that have been
+ * updated and clear the busy flag for all shadow parameters.
+ */
+ prev->params.params[0].update |= (~active & update);
+ prev->params.params[1].update |= (active & update);
+ prev->params.params[0].busy &= active;
+ prev->params.params[1].busy &= ~active;
+ } else {
+ /* Clear the update flag for active parameters that have been
+ * applied and the busy flag for all active parameters.
+ */
+ prev->params.params[0].update &= ~(active & update);
+ prev->params.params[1].update &= ~(~active & update);
+ prev->params.params[0].busy &= ~active;
+ prev->params.params[1].busy &= active;
+ }
+}
+
+static void preview_params_switch(struct isp_prev_device *prev)
+{
+ u32 to_switch;
+
+ /* Switch active parameters with updated shadow parameters when the
+ * shadow parameter has been updated and neither the active not the
+ * shadow parameter is busy.
+ */
+ to_switch = (prev->params.params[0].update & ~prev->params.active)
+ | (prev->params.params[1].update & prev->params.active);
+ to_switch &= ~(prev->params.params[0].busy |
+ prev->params.params[1].busy);
+ if (to_switch == 0)
+ return;
+
+ prev->params.active ^= to_switch;
+
+ /* Remove the update flag for the shadow copy of parameters we have
+ * switched.
+ */
+ prev->params.params[0].update &= ~(~prev->params.active & to_switch);
+ prev->params.params[1].update &= ~(prev->params.active & to_switch);
+}
+
+/* preview parameters update structure */
+struct preview_update {
+ void (*config)(struct isp_prev_device *, const struct prev_params *);
+ void (*enable)(struct isp_prev_device *, bool);
+ unsigned int param_offset;
+ unsigned int param_size;
+ unsigned int config_offset;
+ bool skip;
+};
+
+/* Keep the array indexed by the OMAP3ISP_PREV_* bit number. */
+static const struct preview_update update_attrs[] = {
+ /* OMAP3ISP_PREV_LUMAENH */ {
+ preview_config_luma_enhancement,
+ preview_enable_luma_enhancement,
+ offsetof(struct prev_params, luma),
+ FIELD_SIZEOF(struct prev_params, luma),
+ offsetof(struct omap3isp_prev_update_config, luma),
+ }, /* OMAP3ISP_PREV_INVALAW */ {
+ NULL,
+ preview_enable_invalaw,
+ }, /* OMAP3ISP_PREV_HRZ_MED */ {
+ preview_config_hmed,
+ preview_enable_hmed,
+ offsetof(struct prev_params, hmed),
+ FIELD_SIZEOF(struct prev_params, hmed),
+ offsetof(struct omap3isp_prev_update_config, hmed),
+ }, /* OMAP3ISP_PREV_CFA */ {
+ preview_config_cfa,
+ NULL,
+ offsetof(struct prev_params, cfa),
+ FIELD_SIZEOF(struct prev_params, cfa),
+ offsetof(struct omap3isp_prev_update_config, cfa),
+ }, /* OMAP3ISP_PREV_CHROMA_SUPP */ {
+ preview_config_chroma_suppression,
+ preview_enable_chroma_suppression,
+ offsetof(struct prev_params, csup),
+ FIELD_SIZEOF(struct prev_params, csup),
+ offsetof(struct omap3isp_prev_update_config, csup),
+ }, /* OMAP3ISP_PREV_WB */ {
+ preview_config_whitebalance,
+ NULL,
+ offsetof(struct prev_params, wbal),
+ FIELD_SIZEOF(struct prev_params, wbal),
+ offsetof(struct omap3isp_prev_update_config, wbal),
+ }, /* OMAP3ISP_PREV_BLKADJ */ {
+ preview_config_blkadj,
+ NULL,
+ offsetof(struct prev_params, blkadj),
+ FIELD_SIZEOF(struct prev_params, blkadj),
+ offsetof(struct omap3isp_prev_update_config, blkadj),
+ }, /* OMAP3ISP_PREV_RGB2RGB */ {
+ preview_config_rgb_blending,
+ NULL,
+ offsetof(struct prev_params, rgb2rgb),
+ FIELD_SIZEOF(struct prev_params, rgb2rgb),
+ offsetof(struct omap3isp_prev_update_config, rgb2rgb),
+ }, /* OMAP3ISP_PREV_COLOR_CONV */ {
+ preview_config_csc,
+ NULL,
+ offsetof(struct prev_params, csc),
+ FIELD_SIZEOF(struct prev_params, csc),
+ offsetof(struct omap3isp_prev_update_config, csc),
+ }, /* OMAP3ISP_PREV_YC_LIMIT */ {
+ preview_config_yc_range,
+ NULL,
+ offsetof(struct prev_params, yclimit),
+ FIELD_SIZEOF(struct prev_params, yclimit),
+ offsetof(struct omap3isp_prev_update_config, yclimit),
+ }, /* OMAP3ISP_PREV_DEFECT_COR */ {
+ preview_config_dcor,
+ preview_enable_dcor,
+ offsetof(struct prev_params, dcor),
+ FIELD_SIZEOF(struct prev_params, dcor),
+ offsetof(struct omap3isp_prev_update_config, dcor),
+ }, /* Previously OMAP3ISP_PREV_GAMMABYPASS, not used anymore */ {
+ NULL,
+ NULL,
+ }, /* OMAP3ISP_PREV_DRK_FRM_CAPTURE */ {
+ NULL,
+ preview_enable_drkframe_capture,
+ }, /* OMAP3ISP_PREV_DRK_FRM_SUBTRACT */ {
+ NULL,
+ preview_enable_drkframe,
+ }, /* OMAP3ISP_PREV_LENS_SHADING */ {
+ NULL,
+ preview_enable_drkframe,
+ }, /* OMAP3ISP_PREV_NF */ {
+ preview_config_noisefilter,
+ preview_enable_noisefilter,
+ offsetof(struct prev_params, nf),
+ FIELD_SIZEOF(struct prev_params, nf),
+ offsetof(struct omap3isp_prev_update_config, nf),
+ }, /* OMAP3ISP_PREV_GAMMA */ {
+ preview_config_gammacorrn,
+ preview_enable_gammacorrn,
+ offsetof(struct prev_params, gamma),
+ FIELD_SIZEOF(struct prev_params, gamma),
+ offsetof(struct omap3isp_prev_update_config, gamma),
+ }, /* OMAP3ISP_PREV_CONTRAST */ {
+ preview_config_contrast,
+ NULL,
+ 0, 0, 0, true,
+ }, /* OMAP3ISP_PREV_BRIGHTNESS */ {
+ preview_config_brightness,
+ NULL,
+ 0, 0, 0, true,
+ },
+};
+
+/*
+ * preview_config - Copy and update local structure with userspace preview
+ * configuration.
+ * @prev: ISP preview engine
+ * @cfg: Configuration
+ *
+ * Return zero if success or -EFAULT if the configuration can't be copied from
+ * userspace.
+ */
+static int preview_config(struct isp_prev_device *prev,
+ struct omap3isp_prev_update_config *cfg)
+{
+ unsigned long flags;
+ unsigned int i;
+ int rval = 0;
+ u32 update;
+ u32 active;
+
+ if (cfg->update == 0)
+ return 0;
+
+ /* Mark the shadow parameters we're going to update as busy. */
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_lock(prev, cfg->update, true);
+ active = prev->params.active;
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+
+ update = 0;
+
+ for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
+ const struct preview_update *attr = &update_attrs[i];
+ struct prev_params *params;
+ unsigned int bit = 1 << i;
+
+ if (attr->skip || !(cfg->update & bit))
+ continue;
+
+ params = &prev->params.params[!!(active & bit)];
+
+ if (cfg->flag & bit) {
+ void __user *from = *(void __user **)
+ ((void *)cfg + attr->config_offset);
+ void *to = (void *)params + attr->param_offset;
+ size_t size = attr->param_size;
+
+ if (to && from && size) {
+ if (copy_from_user(to, from, size)) {
+ rval = -EFAULT;
+ break;
+ }
+ }
+ params->features |= bit;
+ } else {
+ params->features &= ~bit;
+ }
+
+ update |= bit;
+ }
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_unlock(prev, update, true);
+ preview_params_switch(prev);
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+
+ return rval;
+}
+
+/*
+ * preview_setup_hw - Setup preview registers and/or internal memory
+ * @prev: pointer to preview private structure
+ * @update: Bitmask of parameters to setup
+ * @active: Bitmask of parameters active in set 0
+ * Note: can be called from interrupt context
+ * Return none
+ */
+static void preview_setup_hw(struct isp_prev_device *prev, u32 update,
+ u32 active)
+{
+ unsigned int i;
+
+ if (update == 0)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
+ const struct preview_update *attr = &update_attrs[i];
+ struct prev_params *params;
+ unsigned int bit = 1 << i;
+
+ if (!(update & bit))
+ continue;
+
+ params = &prev->params.params[!(active & bit)];
+
+ if (params->features & bit) {
+ if (attr->config)
+ attr->config(prev, params);
+ if (attr->enable)
+ attr->enable(prev, true);
+ } else {
+ if (attr->enable)
+ attr->enable(prev, false);
+ }
+ }
+}
+
+/*
+ * preview_config_ycpos - Configure byte layout of YUV image.
+ * @prev: pointer to previewer private structure
+ * @pixelcode: pixel code
+ */
+static void preview_config_ycpos(struct isp_prev_device *prev, u32 pixelcode)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ enum preview_ycpos_mode mode;
+
+ switch (pixelcode) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ mode = YCPOS_CrYCbY;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ mode = YCPOS_YCrYCb;
+ break;
+ default:
+ return;
+ }
+
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_YCPOS_CrYCbY,
+ mode << ISPPRV_PCR_YCPOS_SHIFT);
+}
+
+/*
+ * preview_config_averager - Enable / disable / configure averager
+ * @average: Average value to be configured.
+ */
+static void preview_config_averager(struct isp_prev_device *prev, u8 average)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ isp_reg_writel(isp, ISPPRV_AVE_EVENDIST_2 << ISPPRV_AVE_EVENDIST_SHIFT |
+ ISPPRV_AVE_ODDDIST_2 << ISPPRV_AVE_ODDDIST_SHIFT |
+ average, OMAP3_ISP_IOMEM_PREV, ISPPRV_AVE);
+}
+
+
+/*
+ * preview_config_input_format - Configure the input format
+ * @prev: The preview engine
+ * @info: Sink pad format information
+ *
+ * Enable and configure CFA interpolation for Bayer formats and disable it for
+ * greyscale formats.
+ *
+ * The CFA table is organised in four blocks, one per Bayer component. The
+ * hardware expects blocks to follow the Bayer order of the input data, while
+ * the driver stores the table in GRBG order in memory. The blocks need to be
+ * reordered to support non-GRBG Bayer patterns.
+ */
+static void preview_config_input_format(struct isp_prev_device *prev,
+ const struct isp_format_info *info)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ struct prev_params *params;
+
+ if (info->width == 8)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_WIDTH);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_WIDTH);
+
+ switch (info->flavor) {
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ prev->params.cfa_order = 0;
+ break;
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ prev->params.cfa_order = 1;
+ break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ prev->params.cfa_order = 2;
+ break;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ prev->params.cfa_order = 3;
+ break;
+ default:
+ /* Disable CFA for non-Bayer formats. */
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_CFAEN);
+ return;
+ }
+
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_CFAEN);
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_CFAFMT_MASK, ISPPRV_PCR_CFAFMT_BAYER);
+
+ params = (prev->params.active & OMAP3ISP_PREV_CFA)
+ ? &prev->params.params[0] : &prev->params.params[1];
+
+ preview_config_cfa(prev, params);
+}
+
+/*
+ * preview_config_input_size - Configure the input frame size
+ *
+ * The preview engine crops several rows and columns internally depending on
+ * which processing blocks are enabled. The driver assumes all those blocks are
+ * enabled when reporting source pad formats to userspace. If this assumption is
+ * not true, rows and columns must be manually cropped at the preview engine
+ * input to avoid overflows at the end of lines and frames.
+ *
+ * See the explanation at the PREV_MARGIN_* definitions for more details.
+ */
+static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
+{
+ const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
+ struct isp_device *isp = to_isp_device(prev);
+ unsigned int sph = prev->crop.left;
+ unsigned int eph = prev->crop.left + prev->crop.width - 1;
+ unsigned int slv = prev->crop.top;
+ unsigned int elv = prev->crop.top + prev->crop.height - 1;
+ u32 features;
+
+ if (format->code != MEDIA_BUS_FMT_Y8_1X8 &&
+ format->code != MEDIA_BUS_FMT_Y10_1X10) {
+ sph -= 2;
+ eph += 2;
+ slv -= 2;
+ elv += 2;
+ }
+
+ features = (prev->params.params[0].features & active)
+ | (prev->params.params[1].features & ~active);
+
+ if (features & (OMAP3ISP_PREV_DEFECT_COR | OMAP3ISP_PREV_NF)) {
+ sph -= 2;
+ eph += 2;
+ slv -= 2;
+ elv += 2;
+ }
+ if (features & OMAP3ISP_PREV_HRZ_MED) {
+ sph -= 2;
+ eph += 2;
+ }
+ if (features & (OMAP3ISP_PREV_CHROMA_SUPP | OMAP3ISP_PREV_LUMAENH))
+ sph -= 2;
+
+ isp_reg_writel(isp, (sph << ISPPRV_HORZ_INFO_SPH_SHIFT) | eph,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_HORZ_INFO);
+ isp_reg_writel(isp, (slv << ISPPRV_VERT_INFO_SLV_SHIFT) | elv,
+ OMAP3_ISP_IOMEM_PREV, ISPPRV_VERT_INFO);
+}
+
+/*
+ * preview_config_inlineoffset - Configures the Read address line offset.
+ * @prev: Preview module
+ * @offset: Line offset
+ *
+ * According to the TRM, the line offset must be aligned on a 32 bytes boundary.
+ * However, a hardware bug requires the memory start address to be aligned on a
+ * 64 bytes boundary, so the offset probably should be aligned on 64 bytes as
+ * well.
+ */
+static void
+preview_config_inlineoffset(struct isp_prev_device *prev, u32 offset)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ isp_reg_writel(isp, offset & 0xffff, OMAP3_ISP_IOMEM_PREV,
+ ISPPRV_RADR_OFFSET);
+}
+
+/*
+ * preview_set_inaddr - Sets memory address of input frame.
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ *
+ * Configures the memory address from which the input frame is to be read.
+ */
+static void preview_set_inaddr(struct isp_prev_device *prev, u32 addr)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_PREV, ISPPRV_RSDR_ADDR);
+}
+
+/*
+ * preview_config_outlineoffset - Configures the Write address line offset.
+ * @offset: Line Offset for the preview output.
+ *
+ * The offset must be a multiple of 32 bytes.
+ */
+static void preview_config_outlineoffset(struct isp_prev_device *prev,
+ u32 offset)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ isp_reg_writel(isp, offset & 0xffff, OMAP3_ISP_IOMEM_PREV,
+ ISPPRV_WADD_OFFSET);
+}
+
+/*
+ * preview_set_outaddr - Sets the memory address to store output frame
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ *
+ * Configures the memory address to which the output frame is written.
+ */
+static void preview_set_outaddr(struct isp_prev_device *prev, u32 addr)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_PREV, ISPPRV_WSDR_ADDR);
+}
+
+static void preview_adjust_bandwidth(struct isp_prev_device *prev)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&prev->subdev.entity);
+ struct isp_device *isp = to_isp_device(prev);
+ const struct v4l2_mbus_framefmt *ifmt = &prev->formats[PREV_PAD_SINK];
+ unsigned long l3_ick = pipe->l3_ick;
+ struct v4l2_fract *timeperframe;
+ unsigned int cycles_per_frame;
+ unsigned int requests_per_frame;
+ unsigned int cycles_per_request;
+ unsigned int minimum;
+ unsigned int maximum;
+ unsigned int value;
+
+ if (prev->input != PREVIEW_INPUT_MEMORY) {
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
+ ISPSBL_SDR_REQ_PRV_EXP_MASK);
+ return;
+ }
+
+ /* Compute the minimum number of cycles per request, based on the
+ * pipeline maximum data rate. This is an absolute lower bound if we
+ * don't want SBL overflows, so round the value up.
+ */
+ cycles_per_request = div_u64((u64)l3_ick / 2 * 256 + pipe->max_rate - 1,
+ pipe->max_rate);
+ minimum = DIV_ROUND_UP(cycles_per_request, 32);
+
+ /* Compute the maximum number of cycles per request, based on the
+ * requested frame rate. This is a soft upper bound to achieve a frame
+ * rate equal or higher than the requested value, so round the value
+ * down.
+ */
+ timeperframe = &pipe->max_timeperframe;
+
+ requests_per_frame = DIV_ROUND_UP(ifmt->width * 2, 256) * ifmt->height;
+ cycles_per_frame = div_u64((u64)l3_ick * timeperframe->numerator,
+ timeperframe->denominator);
+ cycles_per_request = cycles_per_frame / requests_per_frame;
+
+ maximum = cycles_per_request / 32;
+
+ value = max(minimum, maximum);
+
+ dev_dbg(isp->dev, "%s: cycles per request = %u\n", __func__, value);
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
+ ISPSBL_SDR_REQ_PRV_EXP_MASK,
+ value << ISPSBL_SDR_REQ_PRV_EXP_SHIFT);
+}
+
+/*
+ * omap3isp_preview_busy - Gets busy state of preview module.
+ */
+int omap3isp_preview_busy(struct isp_prev_device *prev)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR)
+ & ISPPRV_PCR_BUSY;
+}
+
+/*
+ * omap3isp_preview_restore_context - Restores the values of preview registers
+ */
+void omap3isp_preview_restore_context(struct isp_device *isp)
+{
+ struct isp_prev_device *prev = &isp->isp_prev;
+ const u32 update = OMAP3ISP_PREV_FEATURES_END - 1;
+
+ prev->params.params[0].update = prev->params.active & update;
+ prev->params.params[1].update = ~prev->params.active & update;
+
+ preview_setup_hw(prev, update, prev->params.active);
+
+ prev->params.params[0].update = 0;
+ prev->params.params[1].update = 0;
+}
+
+/*
+ * preview_print_status - Dump preview module registers to the kernel log
+ */
+#define PREV_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###PRV " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_##name))
+
+static void preview_print_status(struct isp_prev_device *prev)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ dev_dbg(isp->dev, "-------------Preview Register dump----------\n");
+
+ PREV_PRINT_REGISTER(isp, PCR);
+ PREV_PRINT_REGISTER(isp, HORZ_INFO);
+ PREV_PRINT_REGISTER(isp, VERT_INFO);
+ PREV_PRINT_REGISTER(isp, RSDR_ADDR);
+ PREV_PRINT_REGISTER(isp, RADR_OFFSET);
+ PREV_PRINT_REGISTER(isp, DSDR_ADDR);
+ PREV_PRINT_REGISTER(isp, DRKF_OFFSET);
+ PREV_PRINT_REGISTER(isp, WSDR_ADDR);
+ PREV_PRINT_REGISTER(isp, WADD_OFFSET);
+ PREV_PRINT_REGISTER(isp, AVE);
+ PREV_PRINT_REGISTER(isp, HMED);
+ PREV_PRINT_REGISTER(isp, NF);
+ PREV_PRINT_REGISTER(isp, WB_DGAIN);
+ PREV_PRINT_REGISTER(isp, WBGAIN);
+ PREV_PRINT_REGISTER(isp, WBSEL);
+ PREV_PRINT_REGISTER(isp, CFA);
+ PREV_PRINT_REGISTER(isp, BLKADJOFF);
+ PREV_PRINT_REGISTER(isp, RGB_MAT1);
+ PREV_PRINT_REGISTER(isp, RGB_MAT2);
+ PREV_PRINT_REGISTER(isp, RGB_MAT3);
+ PREV_PRINT_REGISTER(isp, RGB_MAT4);
+ PREV_PRINT_REGISTER(isp, RGB_MAT5);
+ PREV_PRINT_REGISTER(isp, RGB_OFF1);
+ PREV_PRINT_REGISTER(isp, RGB_OFF2);
+ PREV_PRINT_REGISTER(isp, CSC0);
+ PREV_PRINT_REGISTER(isp, CSC1);
+ PREV_PRINT_REGISTER(isp, CSC2);
+ PREV_PRINT_REGISTER(isp, CSC_OFFSET);
+ PREV_PRINT_REGISTER(isp, CNT_BRT);
+ PREV_PRINT_REGISTER(isp, CSUP);
+ PREV_PRINT_REGISTER(isp, SETUP_YC);
+ PREV_PRINT_REGISTER(isp, SET_TBL_ADDR);
+ PREV_PRINT_REGISTER(isp, CDC_THR0);
+ PREV_PRINT_REGISTER(isp, CDC_THR1);
+ PREV_PRINT_REGISTER(isp, CDC_THR2);
+ PREV_PRINT_REGISTER(isp, CDC_THR3);
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * preview_init_params - init image processing parameters.
+ * @prev: pointer to previewer private structure
+ */
+static void preview_init_params(struct isp_prev_device *prev)
+{
+ struct prev_params *params;
+ unsigned int i;
+
+ spin_lock_init(&prev->params.lock);
+
+ prev->params.active = ~0;
+ prev->params.params[0].busy = 0;
+ prev->params.params[0].update = OMAP3ISP_PREV_FEATURES_END - 1;
+ prev->params.params[1].busy = 0;
+ prev->params.params[1].update = 0;
+
+ params = &prev->params.params[0];
+
+ /* Init values */
+ params->contrast = ISPPRV_CONTRAST_DEF * ISPPRV_CONTRAST_UNITS;
+ params->brightness = ISPPRV_BRIGHT_DEF * ISPPRV_BRIGHT_UNITS;
+ params->cfa.format = OMAP3ISP_CFAFMT_BAYER;
+ memcpy(params->cfa.table, cfa_coef_table,
+ sizeof(params->cfa.table));
+ params->cfa.gradthrs_horz = FLR_CFA_GRADTHRS_HORZ;
+ params->cfa.gradthrs_vert = FLR_CFA_GRADTHRS_VERT;
+ params->csup.gain = FLR_CSUP_GAIN;
+ params->csup.thres = FLR_CSUP_THRES;
+ params->csup.hypf_en = 0;
+ memcpy(params->luma.table, luma_enhance_table,
+ sizeof(params->luma.table));
+ params->nf.spread = FLR_NF_STRGTH;
+ memcpy(params->nf.table, noise_filter_table, sizeof(params->nf.table));
+ params->dcor.couplet_mode_en = 1;
+ for (i = 0; i < OMAP3ISP_PREV_DETECT_CORRECT_CHANNELS; i++)
+ params->dcor.detect_correct[i] = DEF_DETECT_CORRECT_VAL;
+ memcpy(params->gamma.blue, gamma_table, sizeof(params->gamma.blue));
+ memcpy(params->gamma.green, gamma_table, sizeof(params->gamma.green));
+ memcpy(params->gamma.red, gamma_table, sizeof(params->gamma.red));
+ params->wbal.dgain = FLR_WBAL_DGAIN;
+ params->wbal.coef0 = FLR_WBAL_COEF;
+ params->wbal.coef1 = FLR_WBAL_COEF;
+ params->wbal.coef2 = FLR_WBAL_COEF;
+ params->wbal.coef3 = FLR_WBAL_COEF;
+ params->blkadj.red = FLR_BLKADJ_RED;
+ params->blkadj.green = FLR_BLKADJ_GREEN;
+ params->blkadj.blue = FLR_BLKADJ_BLUE;
+ params->rgb2rgb = flr_rgb2rgb;
+ params->csc = flr_prev_csc;
+ params->yclimit.minC = ISPPRV_YC_MIN;
+ params->yclimit.maxC = ISPPRV_YC_MAX;
+ params->yclimit.minY = ISPPRV_YC_MIN;
+ params->yclimit.maxY = ISPPRV_YC_MAX;
+
+ params->features = OMAP3ISP_PREV_CFA | OMAP3ISP_PREV_DEFECT_COR
+ | OMAP3ISP_PREV_NF | OMAP3ISP_PREV_GAMMA
+ | OMAP3ISP_PREV_BLKADJ | OMAP3ISP_PREV_YC_LIMIT
+ | OMAP3ISP_PREV_RGB2RGB | OMAP3ISP_PREV_COLOR_CONV
+ | OMAP3ISP_PREV_WB | OMAP3ISP_PREV_BRIGHTNESS
+ | OMAP3ISP_PREV_CONTRAST;
+}
+
+/*
+ * preview_max_out_width - Handle previewer hardware output limitations
+ * @prev: pointer to previewer private structure
+ * returns maximum width output for current isp revision
+ */
+static unsigned int preview_max_out_width(struct isp_prev_device *prev)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ switch (isp->revision) {
+ case ISP_REVISION_1_0:
+ return PREV_MAX_OUT_WIDTH_REV_1;
+
+ case ISP_REVISION_2_0:
+ default:
+ return PREV_MAX_OUT_WIDTH_REV_2;
+
+ case ISP_REVISION_15_0:
+ return PREV_MAX_OUT_WIDTH_REV_15;
+ }
+}
+
+static void preview_configure(struct isp_prev_device *prev)
+{
+ struct isp_device *isp = to_isp_device(prev);
+ const struct isp_format_info *info;
+ struct v4l2_mbus_framefmt *format;
+ unsigned long flags;
+ u32 update;
+ u32 active;
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ /* Mark all active parameters we are going to touch as busy. */
+ update = preview_params_lock(prev, 0, false);
+ active = prev->params.active;
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+
+ /* PREV_PAD_SINK */
+ format = &prev->formats[PREV_PAD_SINK];
+ info = omap3isp_video_format_info(format->code);
+
+ preview_adjust_bandwidth(prev);
+
+ preview_config_input_format(prev, info);
+ preview_config_input_size(prev, active);
+
+ if (prev->input == PREVIEW_INPUT_CCDC)
+ preview_config_inlineoffset(prev, 0);
+ else
+ preview_config_inlineoffset(prev, ALIGN(format->width, 0x20) *
+ info->bpp);
+
+ preview_setup_hw(prev, update, active);
+
+ /* PREV_PAD_SOURCE */
+ format = &prev->formats[PREV_PAD_SOURCE];
+
+ if (prev->output & PREVIEW_OUTPUT_MEMORY)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_SDRPORT);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_SDRPORT);
+
+ if (prev->output & PREVIEW_OUTPUT_RESIZER)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_RSZPORT);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_RSZPORT);
+
+ if (prev->output & PREVIEW_OUTPUT_MEMORY)
+ preview_config_outlineoffset(prev,
+ ALIGN(format->width, 0x10) * 2);
+
+ preview_config_averager(prev, 0);
+ preview_config_ycpos(prev, format->code);
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_unlock(prev, update, false);
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void preview_enable_oneshot(struct isp_prev_device *prev)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ /* The PCR.SOURCE bit is automatically reset to 0 when the PCR.ENABLE
+ * bit is set. As the preview engine is used in single-shot mode, we
+ * need to set PCR.SOURCE before enabling the preview engine.
+ */
+ if (prev->input == PREVIEW_INPUT_MEMORY)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_SOURCE);
+
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_EN | ISPPRV_PCR_ONESHOT);
+}
+
+void omap3isp_preview_isr_frame_sync(struct isp_prev_device *prev)
+{
+ /*
+ * If ISP_VIDEO_DMAQUEUE_QUEUED is set, DMA queue had an underrun
+ * condition, the module was paused and now we have a buffer queued
+ * on the output again. Restart the pipeline if running in continuous
+ * mode.
+ */
+ if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS &&
+ prev->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED) {
+ preview_enable_oneshot(prev);
+ isp_video_dmaqueue_flags_clr(&prev->video_out);
+ }
+}
+
+static void preview_isr_buffer(struct isp_prev_device *prev)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&prev->subdev.entity);
+ struct isp_buffer *buffer;
+ int restart = 0;
+
+ if (prev->output & PREVIEW_OUTPUT_MEMORY) {
+ buffer = omap3isp_video_buffer_next(&prev->video_out);
+ if (buffer != NULL) {
+ preview_set_outaddr(prev, buffer->dma);
+ restart = 1;
+ }
+ pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
+ }
+
+ if (prev->input == PREVIEW_INPUT_MEMORY) {
+ buffer = omap3isp_video_buffer_next(&prev->video_in);
+ if (buffer != NULL)
+ preview_set_inaddr(prev, buffer->dma);
+ pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+ }
+
+ switch (prev->state) {
+ case ISP_PIPELINE_STREAM_SINGLESHOT:
+ if (isp_pipeline_ready(pipe))
+ omap3isp_pipeline_set_stream(pipe,
+ ISP_PIPELINE_STREAM_SINGLESHOT);
+ break;
+
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ /* If an underrun occurs, the video queue operation handler will
+ * restart the preview engine. Otherwise restart it immediately.
+ */
+ if (restart)
+ preview_enable_oneshot(prev);
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ default:
+ return;
+ }
+}
+
+/*
+ * omap3isp_preview_isr - ISP preview engine interrupt handler
+ *
+ * Manage the preview engine video buffers and configure shadowed registers.
+ */
+void omap3isp_preview_isr(struct isp_prev_device *prev)
+{
+ unsigned long flags;
+ u32 update;
+ u32 active;
+
+ if (omap3isp_module_sync_is_stopping(&prev->wait, &prev->stopping))
+ return;
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_switch(prev);
+ update = preview_params_lock(prev, 0, false);
+ active = prev->params.active;
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+
+ preview_setup_hw(prev, update, active);
+ preview_config_input_size(prev, active);
+
+ if (prev->input == PREVIEW_INPUT_MEMORY ||
+ prev->output & PREVIEW_OUTPUT_MEMORY)
+ preview_isr_buffer(prev);
+ else if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS)
+ preview_enable_oneshot(prev);
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_unlock(prev, update, false);
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int preview_video_queue(struct isp_video *video,
+ struct isp_buffer *buffer)
+{
+ struct isp_prev_device *prev = &video->isp->isp_prev;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ preview_set_inaddr(prev, buffer->dma);
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ preview_set_outaddr(prev, buffer->dma);
+
+ return 0;
+}
+
+static const struct isp_video_operations preview_video_ops = {
+ .queue = preview_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * preview_s_ctrl - Handle set control subdev method
+ * @ctrl: pointer to v4l2 control structure
+ */
+static int preview_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct isp_prev_device *prev =
+ container_of(ctrl->handler, struct isp_prev_device, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ preview_update_brightness(prev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ preview_update_contrast(prev, ctrl->val);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops preview_ctrl_ops = {
+ .s_ctrl = preview_s_ctrl,
+};
+
+/*
+ * preview_ioctl - Handle preview module private ioctl's
+ * @sd: pointer to v4l2 subdev structure
+ * @cmd: configuration command
+ * @arg: configuration argument
+ * return -EINVAL or zero on success
+ */
+static long preview_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_OMAP3ISP_PRV_CFG:
+ return preview_config(prev, arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/*
+ * preview_set_stream - Enable/Disable streaming on preview subdev
+ * @sd : pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ * return -EINVAL or zero on success
+ */
+static int preview_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ struct isp_video *video_out = &prev->video_out;
+ struct isp_device *isp = to_isp_device(prev);
+ struct device *dev = to_device(prev);
+
+ if (prev->state == ISP_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_PREVIEW);
+ preview_configure(prev);
+ atomic_set(&prev->stopping, 0);
+ preview_print_status(prev);
+ }
+
+ switch (enable) {
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ if (prev->output & PREVIEW_OUTPUT_MEMORY)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
+
+ if (video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED ||
+ !(prev->output & PREVIEW_OUTPUT_MEMORY))
+ preview_enable_oneshot(prev);
+
+ isp_video_dmaqueue_flags_clr(video_out);
+ break;
+
+ case ISP_PIPELINE_STREAM_SINGLESHOT:
+ if (prev->input == PREVIEW_INPUT_MEMORY)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_READ);
+ if (prev->output & PREVIEW_OUTPUT_MEMORY)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
+
+ preview_enable_oneshot(prev);
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ if (omap3isp_module_sync_idle(&sd->entity, &prev->wait,
+ &prev->stopping))
+ dev_dbg(dev, "%s: stop timeout.\n", sd->name);
+ omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_READ);
+ omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
+ omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_PREVIEW);
+ isp_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+
+ prev->state = enable;
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__preview_get_format(struct isp_prev_device *prev, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&prev->subdev, cfg, pad);
+ else
+ return &prev->formats[pad];
+}
+
+static struct v4l2_rect *
+__preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&prev->subdev, cfg, PREV_PAD_SINK);
+ else
+ return &prev->crop;
+}
+
+/* previewer format descriptions */
+static const unsigned int preview_input_fmts[] = {
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+};
+
+static const unsigned int preview_output_fmts[] = {
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+};
+
+/*
+ * preview_try_format - Validate a format
+ * @prev: ISP preview engine
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad number
+ * @fmt: format to be validated
+ * @which: try/active format selector
+ *
+ * Validate and adjust the given format for the given pad based on the preview
+ * engine limits and the format and crop rectangles on other pads.
+ */
+static void preview_try_format(struct isp_prev_device *prev,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ u32 pixelcode;
+ struct v4l2_rect *crop;
+ unsigned int i;
+
+ switch (pad) {
+ case PREV_PAD_SINK:
+ /* When reading data from the CCDC, the input size has already
+ * been mangled by the CCDC output pad so it can be accepted
+ * as-is.
+ *
+ * When reading data from memory, clamp the requested width and
+ * height. The TRM doesn't specify a minimum input height, make
+ * sure we got enough lines to enable the noise filter and color
+ * filter array interpolation.
+ */
+ if (prev->input == PREVIEW_INPUT_MEMORY) {
+ fmt->width = clamp_t(u32, fmt->width, PREV_MIN_IN_WIDTH,
+ preview_max_out_width(prev));
+ fmt->height = clamp_t(u32, fmt->height,
+ PREV_MIN_IN_HEIGHT,
+ PREV_MAX_IN_HEIGHT);
+ }
+
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ for (i = 0; i < ARRAY_SIZE(preview_input_fmts); i++) {
+ if (fmt->code == preview_input_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(preview_input_fmts))
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ break;
+
+ case PREV_PAD_SOURCE:
+ pixelcode = fmt->code;
+ *fmt = *__preview_get_format(prev, cfg, PREV_PAD_SINK, which);
+
+ switch (pixelcode) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ fmt->code = pixelcode;
+ break;
+
+ default:
+ fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
+ break;
+ }
+
+ /* The preview module output size is configurable through the
+ * averager (horizontal scaling by 1/1, 1/2, 1/4 or 1/8). This
+ * is not supported yet, hardcode the output size to the crop
+ * rectangle size.
+ */
+ crop = __preview_get_crop(prev, cfg, which);
+ fmt->width = crop->width;
+ fmt->height = crop->height;
+
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * preview_try_crop - Validate a crop rectangle
+ * @prev: ISP preview engine
+ * @sink: format on the sink pad
+ * @crop: crop rectangle to be validated
+ *
+ * The preview engine crops lines and columns for its internal operation,
+ * depending on which filters are enabled. Enforce minimum crop margins to
+ * handle that transparently for userspace.
+ *
+ * See the explanation at the PREV_MARGIN_* definitions for more details.
+ */
+static void preview_try_crop(struct isp_prev_device *prev,
+ const struct v4l2_mbus_framefmt *sink,
+ struct v4l2_rect *crop)
+{
+ unsigned int left = PREV_MARGIN_LEFT;
+ unsigned int right = sink->width - PREV_MARGIN_RIGHT;
+ unsigned int top = PREV_MARGIN_TOP;
+ unsigned int bottom = sink->height - PREV_MARGIN_BOTTOM;
+
+ /* When processing data on-the-fly from the CCDC, at least 2 pixels must
+ * be cropped from the left and right sides of the image. As we don't
+ * know which filters will be enabled, increase the left and right
+ * margins by two.
+ */
+ if (prev->input == PREVIEW_INPUT_CCDC) {
+ left += 2;
+ right -= 2;
+ }
+
+ /* The CFA filter crops 4 lines and 4 columns in Bayer mode, and 2 lines
+ * and no columns in other modes. Increase the margins based on the sink
+ * format.
+ */
+ if (sink->code != MEDIA_BUS_FMT_Y8_1X8 &&
+ sink->code != MEDIA_BUS_FMT_Y10_1X10) {
+ left += 2;
+ right -= 2;
+ top += 2;
+ bottom -= 2;
+ }
+
+ /* Restrict left/top to even values to keep the Bayer pattern. */
+ crop->left &= ~1;
+ crop->top &= ~1;
+
+ crop->left = clamp_t(u32, crop->left, left, right - PREV_MIN_OUT_WIDTH);
+ crop->top = clamp_t(u32, crop->top, top, bottom - PREV_MIN_OUT_HEIGHT);
+ crop->width = clamp_t(u32, crop->width, PREV_MIN_OUT_WIDTH,
+ right - crop->left);
+ crop->height = clamp_t(u32, crop->height, PREV_MIN_OUT_HEIGHT,
+ bottom - crop->top);
+}
+
+/*
+ * preview_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int preview_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->pad) {
+ case PREV_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(preview_input_fmts))
+ return -EINVAL;
+
+ code->code = preview_input_fmts[code->index];
+ break;
+ case PREV_PAD_SOURCE:
+ if (code->index >= ARRAY_SIZE(preview_output_fmts))
+ return -EINVAL;
+
+ code->code = preview_output_fmts[code->index];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int preview_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ preview_try_format(prev, cfg, fse->pad, &format, fse->which);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ preview_try_format(prev, cfg, fse->pad, &format, fse->which);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * preview_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP preview V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the sink pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int preview_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != PREV_PAD_SINK)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ format = __preview_get_format(prev, cfg, PREV_PAD_SINK,
+ sel->which);
+ preview_try_crop(prev, format, &sel->r);
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *__preview_get_crop(prev, cfg, sel->which);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * preview_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP preview V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the sink pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int preview_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->target != V4L2_SEL_TGT_CROP ||
+ sel->pad != PREV_PAD_SINK)
+ return -EINVAL;
+
+ /* The crop rectangle can't be changed while streaming. */
+ if (prev->state != ISP_PIPELINE_STREAM_STOPPED)
+ return -EBUSY;
+
+ /* Modifying the crop rectangle always changes the format on the source
+ * pad. If the KEEP_CONFIG flag is set, just return the current crop
+ * rectangle.
+ */
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = *__preview_get_crop(prev, cfg, sel->which);
+ return 0;
+ }
+
+ format = __preview_get_format(prev, cfg, PREV_PAD_SINK, sel->which);
+ preview_try_crop(prev, format, &sel->r);
+ *__preview_get_crop(prev, cfg, sel->which) = sel->r;
+
+ /* Update the source format. */
+ format = __preview_get_format(prev, cfg, PREV_PAD_SOURCE, sel->which);
+ preview_try_format(prev, cfg, PREV_PAD_SOURCE, format, sel->which);
+
+ return 0;
+}
+
+/*
+ * preview_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __preview_get_format(prev, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * preview_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ format = __preview_get_format(prev, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ preview_try_format(prev, cfg, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == PREV_PAD_SINK) {
+ /* Reset the crop rectangle. */
+ crop = __preview_get_crop(prev, cfg, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+
+ preview_try_crop(prev, &fmt->format, crop);
+
+ /* Update the source format. */
+ format = __preview_get_format(prev, cfg, PREV_PAD_SOURCE,
+ fmt->which);
+ preview_try_format(prev, cfg, PREV_PAD_SOURCE, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * preview_init_formats - Initialize formats on all pads
+ * @sd: ISP preview V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int preview_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = PREV_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ preview_set_format(sd, fh ? fh->pad : NULL, &format);
+
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops preview_v4l2_core_ops = {
+ .ioctl = preview_ioctl,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops preview_v4l2_video_ops = {
+ .s_stream = preview_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops preview_v4l2_pad_ops = {
+ .enum_mbus_code = preview_enum_mbus_code,
+ .enum_frame_size = preview_enum_frame_size,
+ .get_fmt = preview_get_format,
+ .set_fmt = preview_set_format,
+ .get_selection = preview_get_selection,
+ .set_selection = preview_set_selection,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops preview_v4l2_ops = {
+ .core = &preview_v4l2_core_ops,
+ .video = &preview_v4l2_video_ops,
+ .pad = &preview_v4l2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops preview_v4l2_internal_ops = {
+ .open = preview_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * preview_link_setup - Setup previewer connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL or zero on success
+ */
+static int preview_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ unsigned int index = local->index;
+
+ /* FIXME: this is actually a hack! */
+ if (is_media_entity_v4l2_subdev(remote->entity))
+ index |= 2 << 16;
+
+ switch (index) {
+ case PREV_PAD_SINK:
+ /* read from memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (prev->input == PREVIEW_INPUT_CCDC)
+ return -EBUSY;
+ prev->input = PREVIEW_INPUT_MEMORY;
+ } else {
+ if (prev->input == PREVIEW_INPUT_MEMORY)
+ prev->input = PREVIEW_INPUT_NONE;
+ }
+ break;
+
+ case PREV_PAD_SINK | 2 << 16:
+ /* read from ccdc */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (prev->input == PREVIEW_INPUT_MEMORY)
+ return -EBUSY;
+ prev->input = PREVIEW_INPUT_CCDC;
+ } else {
+ if (prev->input == PREVIEW_INPUT_CCDC)
+ prev->input = PREVIEW_INPUT_NONE;
+ }
+ break;
+
+ /*
+ * The ISP core doesn't support pipelines with multiple video outputs.
+ * Revisit this when it will be implemented, and return -EBUSY for now.
+ */
+
+ case PREV_PAD_SOURCE:
+ /* write to memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (prev->output & ~PREVIEW_OUTPUT_MEMORY)
+ return -EBUSY;
+ prev->output |= PREVIEW_OUTPUT_MEMORY;
+ } else {
+ prev->output &= ~PREVIEW_OUTPUT_MEMORY;
+ }
+ break;
+
+ case PREV_PAD_SOURCE | 2 << 16:
+ /* write to resizer */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (prev->output & ~PREVIEW_OUTPUT_RESIZER)
+ return -EBUSY;
+ prev->output |= PREVIEW_OUTPUT_RESIZER;
+ } else {
+ prev->output &= ~PREVIEW_OUTPUT_RESIZER;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations preview_media_ops = {
+ .link_setup = preview_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_preview_unregister_entities(struct isp_prev_device *prev)
+{
+ v4l2_device_unregister_subdev(&prev->subdev);
+ omap3isp_video_unregister(&prev->video_in);
+ omap3isp_video_unregister(&prev->video_out);
+}
+
+int omap3isp_preview_register_entities(struct isp_prev_device *prev,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ prev->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &prev->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&prev->video_in, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&prev->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_preview_unregister_entities(prev);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP previewer initialisation and cleanup
+ */
+
+/*
+ * preview_init_entities - Initialize subdev and media entity.
+ * @prev : Pointer to preview structure
+ * return -ENOMEM or zero on success
+ */
+static int preview_init_entities(struct isp_prev_device *prev)
+{
+ struct v4l2_subdev *sd = &prev->subdev;
+ struct media_pad *pads = prev->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ prev->input = PREVIEW_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &preview_v4l2_ops);
+ sd->internal_ops = &preview_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP3 ISP preview", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
+ v4l2_set_subdevdata(sd, prev);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ v4l2_ctrl_handler_init(&prev->ctrls, 2);
+ v4l2_ctrl_new_std(&prev->ctrls, &preview_ctrl_ops, V4L2_CID_BRIGHTNESS,
+ ISPPRV_BRIGHT_LOW, ISPPRV_BRIGHT_HIGH,
+ ISPPRV_BRIGHT_STEP, ISPPRV_BRIGHT_DEF);
+ v4l2_ctrl_new_std(&prev->ctrls, &preview_ctrl_ops, V4L2_CID_CONTRAST,
+ ISPPRV_CONTRAST_LOW, ISPPRV_CONTRAST_HIGH,
+ ISPPRV_CONTRAST_STEP, ISPPRV_CONTRAST_DEF);
+ v4l2_ctrl_handler_setup(&prev->ctrls);
+ sd->ctrl_handler = &prev->ctrls;
+
+ pads[PREV_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ pads[PREV_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &preview_media_ops;
+ ret = media_entity_pads_init(me, PREV_PADS_NUM, pads);
+ if (ret < 0)
+ goto error_handler_free;
+
+ preview_init_formats(sd, NULL);
+
+ /* According to the OMAP34xx TRM, video buffers need to be aligned on a
+ * 32 bytes boundary. However, an undocumented hardware bug requires a
+ * 64 bytes boundary at the preview engine input.
+ */
+ prev->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ prev->video_in.ops = &preview_video_ops;
+ prev->video_in.isp = to_isp_device(prev);
+ prev->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
+ prev->video_in.bpl_alignment = 64;
+ prev->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ prev->video_out.ops = &preview_video_ops;
+ prev->video_out.isp = to_isp_device(prev);
+ prev->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
+ prev->video_out.bpl_alignment = 32;
+
+ ret = omap3isp_video_init(&prev->video_in, "preview");
+ if (ret < 0)
+ goto error_video_in;
+
+ ret = omap3isp_video_init(&prev->video_out, "preview");
+ if (ret < 0)
+ goto error_video_out;
+
+ return 0;
+
+error_video_out:
+ omap3isp_video_cleanup(&prev->video_in);
+error_video_in:
+ media_entity_cleanup(&prev->subdev.entity);
+error_handler_free:
+ v4l2_ctrl_handler_free(&prev->ctrls);
+ return ret;
+}
+
+/*
+ * omap3isp_preview_init - Previewer initialization.
+ * @isp : Pointer to ISP device
+ * return -ENOMEM or zero on success
+ */
+int omap3isp_preview_init(struct isp_device *isp)
+{
+ struct isp_prev_device *prev = &isp->isp_prev;
+
+ init_waitqueue_head(&prev->wait);
+
+ preview_init_params(prev);
+
+ return preview_init_entities(prev);
+}
+
+void omap3isp_preview_cleanup(struct isp_device *isp)
+{
+ struct isp_prev_device *prev = &isp->isp_prev;
+
+ v4l2_ctrl_handler_free(&prev->ctrls);
+ omap3isp_video_cleanup(&prev->video_in);
+ omap3isp_video_cleanup(&prev->video_out);
+ media_entity_cleanup(&prev->subdev.entity);
+}
diff --git a/drivers/media/platform/omap3isp/isppreview.h b/drivers/media/platform/omap3isp/isppreview.h
new file mode 100644
index 000000000..16fdc03a3
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isppreview.h
@@ -0,0 +1,164 @@
+/*
+ * isppreview.h
+ *
+ * TI OMAP3 ISP - Preview module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_PREVIEW_H
+#define OMAP3_ISP_PREVIEW_H
+
+#include <linux/omap3isp.h>
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+
+#include "ispvideo.h"
+
+#define ISPPRV_BRIGHT_STEP 0x1
+#define ISPPRV_BRIGHT_DEF 0x0
+#define ISPPRV_BRIGHT_LOW 0x0
+#define ISPPRV_BRIGHT_HIGH 0xFF
+#define ISPPRV_BRIGHT_UNITS 0x1
+
+#define ISPPRV_CONTRAST_STEP 0x1
+#define ISPPRV_CONTRAST_DEF 0x10
+#define ISPPRV_CONTRAST_LOW 0x0
+#define ISPPRV_CONTRAST_HIGH 0xFF
+#define ISPPRV_CONTRAST_UNITS 0x1
+
+/* Additional features not listed in linux/omap3isp.h */
+#define OMAP3ISP_PREV_CONTRAST (1 << 17)
+#define OMAP3ISP_PREV_BRIGHTNESS (1 << 18)
+#define OMAP3ISP_PREV_FEATURES_END (1 << 19)
+
+enum preview_input_entity {
+ PREVIEW_INPUT_NONE,
+ PREVIEW_INPUT_CCDC,
+ PREVIEW_INPUT_MEMORY,
+};
+
+#define PREVIEW_OUTPUT_RESIZER (1 << 1)
+#define PREVIEW_OUTPUT_MEMORY (1 << 2)
+
+/* Configure byte layout of YUV image */
+enum preview_ycpos_mode {
+ YCPOS_YCrYCb = 0,
+ YCPOS_YCbYCr = 1,
+ YCPOS_CbYCrY = 2,
+ YCPOS_CrYCbY = 3
+};
+
+/*
+ * struct prev_params - Structure for all configuration
+ * @busy: Bitmask of busy parameters (being updated or used)
+ * @update: Bitmask of the parameters to be updated
+ * @features: Set of features enabled.
+ * @cfa: CFA coefficients.
+ * @csup: Chroma suppression coefficients.
+ * @luma: Luma enhancement coefficients.
+ * @nf: Noise filter coefficients.
+ * @dcor: Noise filter coefficients.
+ * @gamma: Gamma coefficients.
+ * @wbal: White Balance parameters.
+ * @blkadj: Black adjustment parameters.
+ * @rgb2rgb: RGB blending parameters.
+ * @csc: Color space conversion (RGB to YCbCr) parameters.
+ * @hmed: Horizontal median filter.
+ * @yclimit: YC limits parameters.
+ * @contrast: Contrast.
+ * @brightness: Brightness.
+ */
+struct prev_params {
+ u32 busy;
+ u32 update;
+ u32 features;
+ struct omap3isp_prev_cfa cfa;
+ struct omap3isp_prev_csup csup;
+ struct omap3isp_prev_luma luma;
+ struct omap3isp_prev_nf nf;
+ struct omap3isp_prev_dcor dcor;
+ struct omap3isp_prev_gtables gamma;
+ struct omap3isp_prev_wbal wbal;
+ struct omap3isp_prev_blkadj blkadj;
+ struct omap3isp_prev_rgbtorgb rgb2rgb;
+ struct omap3isp_prev_csc csc;
+ struct omap3isp_prev_hmed hmed;
+ struct omap3isp_prev_yclimit yclimit;
+ u8 contrast;
+ u8 brightness;
+};
+
+/* Sink and source previewer pads */
+#define PREV_PAD_SINK 0
+#define PREV_PAD_SOURCE 1
+#define PREV_PADS_NUM 2
+
+/*
+ * struct isp_prev_device - Structure for storing ISP Preview module information
+ * @subdev: V4L2 subdevice
+ * @pads: Media entity pads
+ * @formats: Active formats at the subdev pad
+ * @crop: Active crop rectangle
+ * @input: Module currently connected to the input pad
+ * @output: Bitmask of the active output
+ * @video_in: Input video entity
+ * @video_out: Output video entity
+ * @params.params : Active and shadow parameters sets
+ * @params.active: Bitmask of parameters active in set 0
+ * @params.lock: Parameters lock, protects params.active and params.shadow
+ * @underrun: Whether the preview entity has queued buffers on the output
+ * @state: Current preview pipeline state
+ *
+ * This structure is used to store the OMAP ISP Preview module Information.
+ */
+struct isp_prev_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[PREV_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[PREV_PADS_NUM];
+ struct v4l2_rect crop;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ enum preview_input_entity input;
+ unsigned int output;
+ struct isp_video video_in;
+ struct isp_video video_out;
+
+ struct {
+ unsigned int cfa_order;
+ struct prev_params params[2];
+ u32 active;
+ spinlock_t lock;
+ } params;
+
+ enum isp_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+struct isp_device;
+
+int omap3isp_preview_init(struct isp_device *isp);
+void omap3isp_preview_cleanup(struct isp_device *isp);
+
+int omap3isp_preview_register_entities(struct isp_prev_device *prv,
+ struct v4l2_device *vdev);
+void omap3isp_preview_unregister_entities(struct isp_prev_device *prv);
+
+void omap3isp_preview_isr_frame_sync(struct isp_prev_device *prev);
+void omap3isp_preview_isr(struct isp_prev_device *prev);
+
+int omap3isp_preview_busy(struct isp_prev_device *isp_prev);
+
+void omap3isp_preview_restore_context(struct isp_device *isp);
+
+#endif /* OMAP3_ISP_PREVIEW_H */
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h
new file mode 100644
index 000000000..d08483919
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispreg.h
@@ -0,0 +1,1521 @@
+/*
+ * ispreg.h
+ *
+ * TI OMAP3 ISP - Registers definitions
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_REG_H
+#define OMAP3_ISP_REG_H
+
+#define CM_CAM_MCLK_HZ 172800000 /* Hz */
+
+/* ISP module register offset */
+
+#define ISP_REVISION (0x000)
+#define ISP_SYSCONFIG (0x004)
+#define ISP_SYSSTATUS (0x008)
+#define ISP_IRQ0ENABLE (0x00C)
+#define ISP_IRQ0STATUS (0x010)
+#define ISP_IRQ1ENABLE (0x014)
+#define ISP_IRQ1STATUS (0x018)
+#define ISP_TCTRL_GRESET_LENGTH (0x030)
+#define ISP_TCTRL_PSTRB_REPLAY (0x034)
+#define ISP_CTRL (0x040)
+#define ISP_SECURE (0x044)
+#define ISP_TCTRL_CTRL (0x050)
+#define ISP_TCTRL_FRAME (0x054)
+#define ISP_TCTRL_PSTRB_DELAY (0x058)
+#define ISP_TCTRL_STRB_DELAY (0x05C)
+#define ISP_TCTRL_SHUT_DELAY (0x060)
+#define ISP_TCTRL_PSTRB_LENGTH (0x064)
+#define ISP_TCTRL_STRB_LENGTH (0x068)
+#define ISP_TCTRL_SHUT_LENGTH (0x06C)
+#define ISP_PING_PONG_ADDR (0x070)
+#define ISP_PING_PONG_MEM_RANGE (0x074)
+#define ISP_PING_PONG_BUF_SIZE (0x078)
+
+/* CCP2 receiver registers */
+
+#define ISPCCP2_REVISION (0x000)
+#define ISPCCP2_SYSCONFIG (0x004)
+#define ISPCCP2_SYSCONFIG_SOFT_RESET (1 << 1)
+#define ISPCCP2_SYSCONFIG_AUTO_IDLE 0x1
+#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT 12
+#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_FORCE \
+ (0x0 << ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_NO \
+ (0x1 << ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SMART \
+ (0x2 << ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCCP2_SYSSTATUS (0x008)
+#define ISPCCP2_SYSSTATUS_RESET_DONE (1 << 0)
+#define ISPCCP2_LC01_IRQENABLE (0x00C)
+#define ISPCCP2_LC01_IRQSTATUS (0x010)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ (1 << 11)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_LE_IRQ (1 << 10)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_LS_IRQ (1 << 9)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FE_IRQ (1 << 8)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_COUNT_IRQ (1 << 7)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ (1 << 5)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ (1 << 4)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ (1 << 3)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ (1 << 2)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ (1 << 1)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ (1 << 0)
+
+#define ISPCCP2_LC23_IRQENABLE (0x014)
+#define ISPCCP2_LC23_IRQSTATUS (0x018)
+#define ISPCCP2_LCM_IRQENABLE (0x02C)
+#define ISPCCP2_LCM_IRQSTATUS_EOF_IRQ (1 << 0)
+#define ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ (1 << 1)
+#define ISPCCP2_LCM_IRQSTATUS (0x030)
+#define ISPCCP2_CTRL (0x040)
+#define ISPCCP2_CTRL_IF_EN (1 << 0)
+#define ISPCCP2_CTRL_PHY_SEL (1 << 1)
+#define ISPCCP2_CTRL_PHY_SEL_CLOCK (0 << 1)
+#define ISPCCP2_CTRL_PHY_SEL_STROBE (1 << 1)
+#define ISPCCP2_CTRL_PHY_SEL_MASK 0x1
+#define ISPCCP2_CTRL_PHY_SEL_SHIFT 1
+#define ISPCCP2_CTRL_IO_OUT_SEL (1 << 2)
+#define ISPCCP2_CTRL_IO_OUT_SEL_MASK 0x1
+#define ISPCCP2_CTRL_IO_OUT_SEL_SHIFT 2
+#define ISPCCP2_CTRL_MODE (1 << 4)
+#define ISPCCP2_CTRL_VP_CLK_FORCE_ON (1 << 9)
+#define ISPCCP2_CTRL_INV (1 << 10)
+#define ISPCCP2_CTRL_INV_MASK 0x1
+#define ISPCCP2_CTRL_INV_SHIFT 10
+#define ISPCCP2_CTRL_VP_ONLY_EN (1 << 11)
+#define ISPCCP2_CTRL_VP_CLK_POL (1 << 12)
+#define ISPCCP2_CTRL_VP_CLK_POL_MASK 0x1
+#define ISPCCP2_CTRL_VP_CLK_POL_SHIFT 12
+#define ISPCCP2_CTRL_VPCLK_DIV_SHIFT 15
+#define ISPCCP2_CTRL_VPCLK_DIV_MASK 0x1ffff /* [31:15] */
+#define ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT 8 /* 3430 bits */
+#define ISPCCP2_CTRL_VP_OUT_CTRL_MASK 0x3 /* 3430 bits */
+#define ISPCCP2_DBG (0x044)
+#define ISPCCP2_GNQ (0x048)
+#define ISPCCP2_LCx_CTRL(x) ((0x050)+0x30*(x))
+#define ISPCCP2_LCx_CTRL_CHAN_EN (1 << 0)
+#define ISPCCP2_LCx_CTRL_CRC_EN (1 << 19)
+#define ISPCCP2_LCx_CTRL_CRC_MASK 0x1
+#define ISPCCP2_LCx_CTRL_CRC_SHIFT 2
+#define ISPCCP2_LCx_CTRL_CRC_SHIFT_15_0 19
+#define ISPCCP2_LCx_CTRL_REGION_EN (1 << 1)
+#define ISPCCP2_LCx_CTRL_REGION_MASK 0x1
+#define ISPCCP2_LCx_CTRL_REGION_SHIFT 1
+#define ISPCCP2_LCx_CTRL_FORMAT_MASK_15_0 0x3f
+#define ISPCCP2_LCx_CTRL_FORMAT_SHIFT_15_0 0x2
+#define ISPCCP2_LCx_CTRL_FORMAT_MASK 0x1f
+#define ISPCCP2_LCx_CTRL_FORMAT_SHIFT 0x3
+#define ISPCCP2_LCx_CODE(x) ((0x054)+0x30*(x))
+#define ISPCCP2_LCx_STAT_START(x) ((0x058)+0x30*(x))
+#define ISPCCP2_LCx_STAT_SIZE(x) ((0x05C)+0x30*(x))
+#define ISPCCP2_LCx_SOF_ADDR(x) ((0x060)+0x30*(x))
+#define ISPCCP2_LCx_EOF_ADDR(x) ((0x064)+0x30*(x))
+#define ISPCCP2_LCx_DAT_START(x) ((0x068)+0x30*(x))
+#define ISPCCP2_LCx_DAT_SIZE(x) ((0x06C)+0x30*(x))
+#define ISPCCP2_LCx_DAT_MASK 0xFFF
+#define ISPCCP2_LCx_DAT_SHIFT 16
+#define ISPCCP2_LCx_DAT_PING_ADDR(x) ((0x070)+0x30*(x))
+#define ISPCCP2_LCx_DAT_PONG_ADDR(x) ((0x074)+0x30*(x))
+#define ISPCCP2_LCx_DAT_OFST(x) ((0x078)+0x30*(x))
+#define ISPCCP2_LCM_CTRL (0x1D0)
+#define ISPCCP2_LCM_CTRL_CHAN_EN (1 << 0)
+#define ISPCCP2_LCM_CTRL_DST_PORT (1 << 2)
+#define ISPCCP2_LCM_CTRL_DST_PORT_SHIFT 2
+#define ISPCCP2_LCM_CTRL_READ_THROTTLE_SHIFT 3
+#define ISPCCP2_LCM_CTRL_READ_THROTTLE_MASK 0x11
+#define ISPCCP2_LCM_CTRL_BURST_SIZE_SHIFT 5
+#define ISPCCP2_LCM_CTRL_BURST_SIZE_MASK 0x7
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT 16
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_MASK 0x7
+#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_SHIFT 20
+#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_MASK 0x3
+#define ISPCCP2_LCM_CTRL_SRC_DPCM_PRED (1 << 22)
+#define ISPCCP2_LCM_CTRL_SRC_PACK (1 << 23)
+#define ISPCCP2_LCM_CTRL_DST_FORMAT_SHIFT 24
+#define ISPCCP2_LCM_CTRL_DST_FORMAT_MASK 0x7
+#define ISPCCP2_LCM_VSIZE (0x1D4)
+#define ISPCCP2_LCM_VSIZE_SHIFT 16
+#define ISPCCP2_LCM_HSIZE (0x1D8)
+#define ISPCCP2_LCM_HSIZE_SHIFT 16
+#define ISPCCP2_LCM_PREFETCH (0x1DC)
+#define ISPCCP2_LCM_PREFETCH_SHIFT 3
+#define ISPCCP2_LCM_SRC_ADDR (0x1E0)
+#define ISPCCP2_LCM_SRC_OFST (0x1E4)
+#define ISPCCP2_LCM_DST_ADDR (0x1E8)
+#define ISPCCP2_LCM_DST_OFST (0x1EC)
+
+/* CCDC module register offset */
+
+#define ISPCCDC_PID (0x000)
+#define ISPCCDC_PCR (0x004)
+#define ISPCCDC_SYN_MODE (0x008)
+#define ISPCCDC_HD_VD_WID (0x00C)
+#define ISPCCDC_PIX_LINES (0x010)
+#define ISPCCDC_HORZ_INFO (0x014)
+#define ISPCCDC_VERT_START (0x018)
+#define ISPCCDC_VERT_LINES (0x01C)
+#define ISPCCDC_CULLING (0x020)
+#define ISPCCDC_HSIZE_OFF (0x024)
+#define ISPCCDC_SDOFST (0x028)
+#define ISPCCDC_SDR_ADDR (0x02C)
+#define ISPCCDC_CLAMP (0x030)
+#define ISPCCDC_DCSUB (0x034)
+#define ISPCCDC_COLPTN (0x038)
+#define ISPCCDC_BLKCMP (0x03C)
+#define ISPCCDC_FPC (0x040)
+#define ISPCCDC_FPC_ADDR (0x044)
+#define ISPCCDC_VDINT (0x048)
+#define ISPCCDC_ALAW (0x04C)
+#define ISPCCDC_REC656IF (0x050)
+#define ISPCCDC_CFG (0x054)
+#define ISPCCDC_FMTCFG (0x058)
+#define ISPCCDC_FMT_HORZ (0x05C)
+#define ISPCCDC_FMT_VERT (0x060)
+#define ISPCCDC_FMT_ADDR0 (0x064)
+#define ISPCCDC_FMT_ADDR1 (0x068)
+#define ISPCCDC_FMT_ADDR2 (0x06C)
+#define ISPCCDC_FMT_ADDR3 (0x070)
+#define ISPCCDC_FMT_ADDR4 (0x074)
+#define ISPCCDC_FMT_ADDR5 (0x078)
+#define ISPCCDC_FMT_ADDR6 (0x07C)
+#define ISPCCDC_FMT_ADDR7 (0x080)
+#define ISPCCDC_PRGEVEN0 (0x084)
+#define ISPCCDC_PRGEVEN1 (0x088)
+#define ISPCCDC_PRGODD0 (0x08C)
+#define ISPCCDC_PRGODD1 (0x090)
+#define ISPCCDC_VP_OUT (0x094)
+
+#define ISPCCDC_LSC_CONFIG (0x098)
+#define ISPCCDC_LSC_INITIAL (0x09C)
+#define ISPCCDC_LSC_TABLE_BASE (0x0A0)
+#define ISPCCDC_LSC_TABLE_OFFSET (0x0A4)
+
+/* SBL */
+#define ISPSBL_PCR 0x4
+#define ISPSBL_PCR_H3A_AEAWB_WBL_OVF (1 << 16)
+#define ISPSBL_PCR_H3A_AF_WBL_OVF (1 << 17)
+#define ISPSBL_PCR_RSZ4_WBL_OVF (1 << 18)
+#define ISPSBL_PCR_RSZ3_WBL_OVF (1 << 19)
+#define ISPSBL_PCR_RSZ2_WBL_OVF (1 << 20)
+#define ISPSBL_PCR_RSZ1_WBL_OVF (1 << 21)
+#define ISPSBL_PCR_PRV_WBL_OVF (1 << 22)
+#define ISPSBL_PCR_CCDC_WBL_OVF (1 << 23)
+#define ISPSBL_PCR_CCDCPRV_2_RSZ_OVF (1 << 24)
+#define ISPSBL_PCR_CSIA_WBL_OVF (1 << 25)
+#define ISPSBL_PCR_CSIB_WBL_OVF (1 << 26)
+#define ISPSBL_CCDC_WR_0 (0x028)
+#define ISPSBL_CCDC_WR_0_DATA_READY (1 << 21)
+#define ISPSBL_CCDC_WR_1 (0x02C)
+#define ISPSBL_CCDC_WR_2 (0x030)
+#define ISPSBL_CCDC_WR_3 (0x034)
+
+#define ISPSBL_SDR_REQ_EXP 0xF8
+#define ISPSBL_SDR_REQ_HIST_EXP_SHIFT 0
+#define ISPSBL_SDR_REQ_HIST_EXP_MASK (0x3FF)
+#define ISPSBL_SDR_REQ_RSZ_EXP_SHIFT 10
+#define ISPSBL_SDR_REQ_RSZ_EXP_MASK (0x3FF << ISPSBL_SDR_REQ_RSZ_EXP_SHIFT)
+#define ISPSBL_SDR_REQ_PRV_EXP_SHIFT 20
+#define ISPSBL_SDR_REQ_PRV_EXP_MASK (0x3FF << ISPSBL_SDR_REQ_PRV_EXP_SHIFT)
+
+/* Histogram registers */
+#define ISPHIST_PID (0x000)
+#define ISPHIST_PCR (0x004)
+#define ISPHIST_CNT (0x008)
+#define ISPHIST_WB_GAIN (0x00C)
+#define ISPHIST_R0_HORZ (0x010)
+#define ISPHIST_R0_VERT (0x014)
+#define ISPHIST_R1_HORZ (0x018)
+#define ISPHIST_R1_VERT (0x01C)
+#define ISPHIST_R2_HORZ (0x020)
+#define ISPHIST_R2_VERT (0x024)
+#define ISPHIST_R3_HORZ (0x028)
+#define ISPHIST_R3_VERT (0x02C)
+#define ISPHIST_ADDR (0x030)
+#define ISPHIST_DATA (0x034)
+#define ISPHIST_RADD (0x038)
+#define ISPHIST_RADD_OFF (0x03C)
+#define ISPHIST_H_V_INFO (0x040)
+
+/* H3A module registers */
+#define ISPH3A_PID (0x000)
+#define ISPH3A_PCR (0x004)
+#define ISPH3A_AEWWIN1 (0x04C)
+#define ISPH3A_AEWINSTART (0x050)
+#define ISPH3A_AEWINBLK (0x054)
+#define ISPH3A_AEWSUBWIN (0x058)
+#define ISPH3A_AEWBUFST (0x05C)
+#define ISPH3A_AFPAX1 (0x008)
+#define ISPH3A_AFPAX2 (0x00C)
+#define ISPH3A_AFPAXSTART (0x010)
+#define ISPH3A_AFIIRSH (0x014)
+#define ISPH3A_AFBUFST (0x018)
+#define ISPH3A_AFCOEF010 (0x01C)
+#define ISPH3A_AFCOEF032 (0x020)
+#define ISPH3A_AFCOEF054 (0x024)
+#define ISPH3A_AFCOEF076 (0x028)
+#define ISPH3A_AFCOEF098 (0x02C)
+#define ISPH3A_AFCOEF0010 (0x030)
+#define ISPH3A_AFCOEF110 (0x034)
+#define ISPH3A_AFCOEF132 (0x038)
+#define ISPH3A_AFCOEF154 (0x03C)
+#define ISPH3A_AFCOEF176 (0x040)
+#define ISPH3A_AFCOEF198 (0x044)
+#define ISPH3A_AFCOEF1010 (0x048)
+
+#define ISPPRV_PCR (0x004)
+#define ISPPRV_HORZ_INFO (0x008)
+#define ISPPRV_VERT_INFO (0x00C)
+#define ISPPRV_RSDR_ADDR (0x010)
+#define ISPPRV_RADR_OFFSET (0x014)
+#define ISPPRV_DSDR_ADDR (0x018)
+#define ISPPRV_DRKF_OFFSET (0x01C)
+#define ISPPRV_WSDR_ADDR (0x020)
+#define ISPPRV_WADD_OFFSET (0x024)
+#define ISPPRV_AVE (0x028)
+#define ISPPRV_HMED (0x02C)
+#define ISPPRV_NF (0x030)
+#define ISPPRV_WB_DGAIN (0x034)
+#define ISPPRV_WBGAIN (0x038)
+#define ISPPRV_WBSEL (0x03C)
+#define ISPPRV_CFA (0x040)
+#define ISPPRV_BLKADJOFF (0x044)
+#define ISPPRV_RGB_MAT1 (0x048)
+#define ISPPRV_RGB_MAT2 (0x04C)
+#define ISPPRV_RGB_MAT3 (0x050)
+#define ISPPRV_RGB_MAT4 (0x054)
+#define ISPPRV_RGB_MAT5 (0x058)
+#define ISPPRV_RGB_OFF1 (0x05C)
+#define ISPPRV_RGB_OFF2 (0x060)
+#define ISPPRV_CSC0 (0x064)
+#define ISPPRV_CSC1 (0x068)
+#define ISPPRV_CSC2 (0x06C)
+#define ISPPRV_CSC_OFFSET (0x070)
+#define ISPPRV_CNT_BRT (0x074)
+#define ISPPRV_CSUP (0x078)
+#define ISPPRV_SETUP_YC (0x07C)
+#define ISPPRV_SET_TBL_ADDR (0x080)
+#define ISPPRV_SET_TBL_DATA (0x084)
+#define ISPPRV_CDC_THR0 (0x090)
+#define ISPPRV_CDC_THR1 (ISPPRV_CDC_THR0 + (0x4))
+#define ISPPRV_CDC_THR2 (ISPPRV_CDC_THR0 + (0x4) * 2)
+#define ISPPRV_CDC_THR3 (ISPPRV_CDC_THR0 + (0x4) * 3)
+
+#define ISPPRV_REDGAMMA_TABLE_ADDR 0x0000
+#define ISPPRV_GREENGAMMA_TABLE_ADDR 0x0400
+#define ISPPRV_BLUEGAMMA_TABLE_ADDR 0x0800
+#define ISPPRV_NF_TABLE_ADDR 0x0C00
+#define ISPPRV_YENH_TABLE_ADDR 0x1000
+#define ISPPRV_CFA_TABLE_ADDR 0x1400
+
+#define ISPRSZ_MIN_OUTPUT 64
+#define ISPRSZ_MAX_OUTPUT 3312
+
+/* Resizer module register offset */
+#define ISPRSZ_PID (0x000)
+#define ISPRSZ_PCR (0x004)
+#define ISPRSZ_CNT (0x008)
+#define ISPRSZ_OUT_SIZE (0x00C)
+#define ISPRSZ_IN_START (0x010)
+#define ISPRSZ_IN_SIZE (0x014)
+#define ISPRSZ_SDR_INADD (0x018)
+#define ISPRSZ_SDR_INOFF (0x01C)
+#define ISPRSZ_SDR_OUTADD (0x020)
+#define ISPRSZ_SDR_OUTOFF (0x024)
+#define ISPRSZ_HFILT10 (0x028)
+#define ISPRSZ_HFILT32 (0x02C)
+#define ISPRSZ_HFILT54 (0x030)
+#define ISPRSZ_HFILT76 (0x034)
+#define ISPRSZ_HFILT98 (0x038)
+#define ISPRSZ_HFILT1110 (0x03C)
+#define ISPRSZ_HFILT1312 (0x040)
+#define ISPRSZ_HFILT1514 (0x044)
+#define ISPRSZ_HFILT1716 (0x048)
+#define ISPRSZ_HFILT1918 (0x04C)
+#define ISPRSZ_HFILT2120 (0x050)
+#define ISPRSZ_HFILT2322 (0x054)
+#define ISPRSZ_HFILT2524 (0x058)
+#define ISPRSZ_HFILT2726 (0x05C)
+#define ISPRSZ_HFILT2928 (0x060)
+#define ISPRSZ_HFILT3130 (0x064)
+#define ISPRSZ_VFILT10 (0x068)
+#define ISPRSZ_VFILT32 (0x06C)
+#define ISPRSZ_VFILT54 (0x070)
+#define ISPRSZ_VFILT76 (0x074)
+#define ISPRSZ_VFILT98 (0x078)
+#define ISPRSZ_VFILT1110 (0x07C)
+#define ISPRSZ_VFILT1312 (0x080)
+#define ISPRSZ_VFILT1514 (0x084)
+#define ISPRSZ_VFILT1716 (0x088)
+#define ISPRSZ_VFILT1918 (0x08C)
+#define ISPRSZ_VFILT2120 (0x090)
+#define ISPRSZ_VFILT2322 (0x094)
+#define ISPRSZ_VFILT2524 (0x098)
+#define ISPRSZ_VFILT2726 (0x09C)
+#define ISPRSZ_VFILT2928 (0x0A0)
+#define ISPRSZ_VFILT3130 (0x0A4)
+#define ISPRSZ_YENH (0x0A8)
+
+#define ISP_INT_CLR 0xFF113F11
+#define ISPPRV_PCR_EN 1
+#define ISPPRV_PCR_BUSY (1 << 1)
+#define ISPPRV_PCR_SOURCE (1 << 2)
+#define ISPPRV_PCR_ONESHOT (1 << 3)
+#define ISPPRV_PCR_WIDTH (1 << 4)
+#define ISPPRV_PCR_INVALAW (1 << 5)
+#define ISPPRV_PCR_DRKFEN (1 << 6)
+#define ISPPRV_PCR_DRKFCAP (1 << 7)
+#define ISPPRV_PCR_HMEDEN (1 << 8)
+#define ISPPRV_PCR_NFEN (1 << 9)
+#define ISPPRV_PCR_CFAEN (1 << 10)
+#define ISPPRV_PCR_CFAFMT_SHIFT 11
+#define ISPPRV_PCR_CFAFMT_MASK 0x7800
+#define ISPPRV_PCR_CFAFMT_BAYER (0 << 11)
+#define ISPPRV_PCR_CFAFMT_SONYVGA (1 << 11)
+#define ISPPRV_PCR_CFAFMT_RGBFOVEON (2 << 11)
+#define ISPPRV_PCR_CFAFMT_DNSPL (3 << 11)
+#define ISPPRV_PCR_CFAFMT_HONEYCOMB (4 << 11)
+#define ISPPRV_PCR_CFAFMT_RRGGBBFOVEON (5 << 11)
+#define ISPPRV_PCR_YNENHEN (1 << 15)
+#define ISPPRV_PCR_SUPEN (1 << 16)
+#define ISPPRV_PCR_YCPOS_SHIFT 17
+#define ISPPRV_PCR_YCPOS_YCrYCb (0 << 17)
+#define ISPPRV_PCR_YCPOS_YCbYCr (1 << 17)
+#define ISPPRV_PCR_YCPOS_CbYCrY (2 << 17)
+#define ISPPRV_PCR_YCPOS_CrYCbY (3 << 17)
+#define ISPPRV_PCR_RSZPORT (1 << 19)
+#define ISPPRV_PCR_SDRPORT (1 << 20)
+#define ISPPRV_PCR_SCOMP_EN (1 << 21)
+#define ISPPRV_PCR_SCOMP_SFT_SHIFT (22)
+#define ISPPRV_PCR_SCOMP_SFT_MASK (7 << 22)
+#define ISPPRV_PCR_GAMMA_BYPASS (1 << 26)
+#define ISPPRV_PCR_DCOREN (1 << 27)
+#define ISPPRV_PCR_DCCOUP (1 << 28)
+#define ISPPRV_PCR_DRK_FAIL (1 << 31)
+
+#define ISPPRV_HORZ_INFO_EPH_SHIFT 0
+#define ISPPRV_HORZ_INFO_EPH_MASK 0x3fff
+#define ISPPRV_HORZ_INFO_SPH_SHIFT 16
+#define ISPPRV_HORZ_INFO_SPH_MASK 0x3fff0
+
+#define ISPPRV_VERT_INFO_ELV_SHIFT 0
+#define ISPPRV_VERT_INFO_ELV_MASK 0x3fff
+#define ISPPRV_VERT_INFO_SLV_SHIFT 16
+#define ISPPRV_VERT_INFO_SLV_MASK 0x3fff0
+
+#define ISPPRV_AVE_EVENDIST_SHIFT 2
+#define ISPPRV_AVE_EVENDIST_1 0x0
+#define ISPPRV_AVE_EVENDIST_2 0x1
+#define ISPPRV_AVE_EVENDIST_3 0x2
+#define ISPPRV_AVE_EVENDIST_4 0x3
+#define ISPPRV_AVE_ODDDIST_SHIFT 4
+#define ISPPRV_AVE_ODDDIST_1 0x0
+#define ISPPRV_AVE_ODDDIST_2 0x1
+#define ISPPRV_AVE_ODDDIST_3 0x2
+#define ISPPRV_AVE_ODDDIST_4 0x3
+
+#define ISPPRV_HMED_THRESHOLD_SHIFT 0
+#define ISPPRV_HMED_EVENDIST (1 << 8)
+#define ISPPRV_HMED_ODDDIST (1 << 9)
+
+#define ISPPRV_WBGAIN_COEF0_SHIFT 0
+#define ISPPRV_WBGAIN_COEF1_SHIFT 8
+#define ISPPRV_WBGAIN_COEF2_SHIFT 16
+#define ISPPRV_WBGAIN_COEF3_SHIFT 24
+
+#define ISPPRV_WBSEL_COEF0 0x0
+#define ISPPRV_WBSEL_COEF1 0x1
+#define ISPPRV_WBSEL_COEF2 0x2
+#define ISPPRV_WBSEL_COEF3 0x3
+
+#define ISPPRV_WBSEL_N0_0_SHIFT 0
+#define ISPPRV_WBSEL_N0_1_SHIFT 2
+#define ISPPRV_WBSEL_N0_2_SHIFT 4
+#define ISPPRV_WBSEL_N0_3_SHIFT 6
+#define ISPPRV_WBSEL_N1_0_SHIFT 8
+#define ISPPRV_WBSEL_N1_1_SHIFT 10
+#define ISPPRV_WBSEL_N1_2_SHIFT 12
+#define ISPPRV_WBSEL_N1_3_SHIFT 14
+#define ISPPRV_WBSEL_N2_0_SHIFT 16
+#define ISPPRV_WBSEL_N2_1_SHIFT 18
+#define ISPPRV_WBSEL_N2_2_SHIFT 20
+#define ISPPRV_WBSEL_N2_3_SHIFT 22
+#define ISPPRV_WBSEL_N3_0_SHIFT 24
+#define ISPPRV_WBSEL_N3_1_SHIFT 26
+#define ISPPRV_WBSEL_N3_2_SHIFT 28
+#define ISPPRV_WBSEL_N3_3_SHIFT 30
+
+#define ISPPRV_CFA_GRADTH_HOR_SHIFT 0
+#define ISPPRV_CFA_GRADTH_VER_SHIFT 8
+
+#define ISPPRV_BLKADJOFF_B_SHIFT 0
+#define ISPPRV_BLKADJOFF_G_SHIFT 8
+#define ISPPRV_BLKADJOFF_R_SHIFT 16
+
+#define ISPPRV_RGB_MAT1_MTX_RR_SHIFT 0
+#define ISPPRV_RGB_MAT1_MTX_GR_SHIFT 16
+
+#define ISPPRV_RGB_MAT2_MTX_BR_SHIFT 0
+#define ISPPRV_RGB_MAT2_MTX_RG_SHIFT 16
+
+#define ISPPRV_RGB_MAT3_MTX_GG_SHIFT 0
+#define ISPPRV_RGB_MAT3_MTX_BG_SHIFT 16
+
+#define ISPPRV_RGB_MAT4_MTX_RB_SHIFT 0
+#define ISPPRV_RGB_MAT4_MTX_GB_SHIFT 16
+
+#define ISPPRV_RGB_MAT5_MTX_BB_SHIFT 0
+
+#define ISPPRV_RGB_OFF1_MTX_OFFG_SHIFT 0
+#define ISPPRV_RGB_OFF1_MTX_OFFR_SHIFT 16
+
+#define ISPPRV_RGB_OFF2_MTX_OFFB_SHIFT 0
+
+#define ISPPRV_CSC0_RY_SHIFT 0
+#define ISPPRV_CSC0_GY_SHIFT 10
+#define ISPPRV_CSC0_BY_SHIFT 20
+
+#define ISPPRV_CSC1_RCB_SHIFT 0
+#define ISPPRV_CSC1_GCB_SHIFT 10
+#define ISPPRV_CSC1_BCB_SHIFT 20
+
+#define ISPPRV_CSC2_RCR_SHIFT 0
+#define ISPPRV_CSC2_GCR_SHIFT 10
+#define ISPPRV_CSC2_BCR_SHIFT 20
+
+#define ISPPRV_CSC_OFFSET_CR_SHIFT 0
+#define ISPPRV_CSC_OFFSET_CB_SHIFT 8
+#define ISPPRV_CSC_OFFSET_Y_SHIFT 16
+
+#define ISPPRV_CNT_BRT_BRT_SHIFT 0
+#define ISPPRV_CNT_BRT_CNT_SHIFT 8
+
+#define ISPPRV_CONTRAST_MAX 0x10
+#define ISPPRV_CONTRAST_MIN 0xFF
+#define ISPPRV_BRIGHT_MIN 0x00
+#define ISPPRV_BRIGHT_MAX 0xFF
+
+#define ISPPRV_CSUP_CSUPG_SHIFT 0
+#define ISPPRV_CSUP_THRES_SHIFT 8
+#define ISPPRV_CSUP_HPYF_SHIFT 16
+
+#define ISPPRV_SETUP_YC_MINC_SHIFT 0
+#define ISPPRV_SETUP_YC_MAXC_SHIFT 8
+#define ISPPRV_SETUP_YC_MINY_SHIFT 16
+#define ISPPRV_SETUP_YC_MAXY_SHIFT 24
+#define ISPPRV_YC_MAX 0xFF
+#define ISPPRV_YC_MIN 0x0
+
+/* Define bit fields within selected registers */
+#define ISP_REVISION_SHIFT 0
+
+#define ISP_SYSCONFIG_AUTOIDLE (1 << 0)
+#define ISP_SYSCONFIG_SOFTRESET (1 << 1)
+#define ISP_SYSCONFIG_MIDLEMODE_SHIFT 12
+#define ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY 0x0
+#define ISP_SYSCONFIG_MIDLEMODE_NOSTANBY 0x1
+#define ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY 0x2
+
+#define ISP_SYSSTATUS_RESETDONE 0
+
+#define IRQ0ENABLE_CSIA_IRQ (1 << 0)
+#define IRQ0ENABLE_CSIC_IRQ (1 << 1)
+#define IRQ0ENABLE_CCP2_LCM_IRQ (1 << 3)
+#define IRQ0ENABLE_CCP2_LC0_IRQ (1 << 4)
+#define IRQ0ENABLE_CCP2_LC1_IRQ (1 << 5)
+#define IRQ0ENABLE_CCP2_LC2_IRQ (1 << 6)
+#define IRQ0ENABLE_CCP2_LC3_IRQ (1 << 7)
+#define IRQ0ENABLE_CSIB_IRQ (IRQ0ENABLE_CCP2_LCM_IRQ | \
+ IRQ0ENABLE_CCP2_LC0_IRQ | \
+ IRQ0ENABLE_CCP2_LC1_IRQ | \
+ IRQ0ENABLE_CCP2_LC2_IRQ | \
+ IRQ0ENABLE_CCP2_LC3_IRQ)
+
+#define IRQ0ENABLE_CCDC_VD0_IRQ (1 << 8)
+#define IRQ0ENABLE_CCDC_VD1_IRQ (1 << 9)
+#define IRQ0ENABLE_CCDC_VD2_IRQ (1 << 10)
+#define IRQ0ENABLE_CCDC_ERR_IRQ (1 << 11)
+#define IRQ0ENABLE_H3A_AF_DONE_IRQ (1 << 12)
+#define IRQ0ENABLE_H3A_AWB_DONE_IRQ (1 << 13)
+#define IRQ0ENABLE_HIST_DONE_IRQ (1 << 16)
+#define IRQ0ENABLE_CCDC_LSC_DONE_IRQ (1 << 17)
+#define IRQ0ENABLE_CCDC_LSC_PREF_COMP_IRQ (1 << 18)
+#define IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ (1 << 19)
+#define IRQ0ENABLE_PRV_DONE_IRQ (1 << 20)
+#define IRQ0ENABLE_RSZ_DONE_IRQ (1 << 24)
+#define IRQ0ENABLE_OVF_IRQ (1 << 25)
+#define IRQ0ENABLE_PING_IRQ (1 << 26)
+#define IRQ0ENABLE_PONG_IRQ (1 << 27)
+#define IRQ0ENABLE_MMU_ERR_IRQ (1 << 28)
+#define IRQ0ENABLE_OCP_ERR_IRQ (1 << 29)
+#define IRQ0ENABLE_SEC_ERR_IRQ (1 << 30)
+#define IRQ0ENABLE_HS_VS_IRQ (1 << 31)
+
+#define IRQ0STATUS_CSIA_IRQ (1 << 0)
+#define IRQ0STATUS_CSI2C_IRQ (1 << 1)
+#define IRQ0STATUS_CCP2_LCM_IRQ (1 << 3)
+#define IRQ0STATUS_CCP2_LC0_IRQ (1 << 4)
+#define IRQ0STATUS_CSIB_IRQ (IRQ0STATUS_CCP2_LCM_IRQ | \
+ IRQ0STATUS_CCP2_LC0_IRQ)
+
+#define IRQ0STATUS_CSIB_LC1_IRQ (1 << 5)
+#define IRQ0STATUS_CSIB_LC2_IRQ (1 << 6)
+#define IRQ0STATUS_CSIB_LC3_IRQ (1 << 7)
+#define IRQ0STATUS_CCDC_VD0_IRQ (1 << 8)
+#define IRQ0STATUS_CCDC_VD1_IRQ (1 << 9)
+#define IRQ0STATUS_CCDC_VD2_IRQ (1 << 10)
+#define IRQ0STATUS_CCDC_ERR_IRQ (1 << 11)
+#define IRQ0STATUS_H3A_AF_DONE_IRQ (1 << 12)
+#define IRQ0STATUS_H3A_AWB_DONE_IRQ (1 << 13)
+#define IRQ0STATUS_HIST_DONE_IRQ (1 << 16)
+#define IRQ0STATUS_CCDC_LSC_DONE_IRQ (1 << 17)
+#define IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ (1 << 18)
+#define IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ (1 << 19)
+#define IRQ0STATUS_PRV_DONE_IRQ (1 << 20)
+#define IRQ0STATUS_RSZ_DONE_IRQ (1 << 24)
+#define IRQ0STATUS_OVF_IRQ (1 << 25)
+#define IRQ0STATUS_PING_IRQ (1 << 26)
+#define IRQ0STATUS_PONG_IRQ (1 << 27)
+#define IRQ0STATUS_MMU_ERR_IRQ (1 << 28)
+#define IRQ0STATUS_OCP_ERR_IRQ (1 << 29)
+#define IRQ0STATUS_SEC_ERR_IRQ (1 << 30)
+#define IRQ0STATUS_HS_VS_IRQ (1 << 31)
+
+#define TCTRL_GRESET_LEN 0
+
+#define TCTRL_PSTRB_REPLAY_DELAY 0
+#define TCTRL_PSTRB_REPLAY_COUNTER_SHIFT 25
+
+#define ISPCTRL_PAR_SER_CLK_SEL_PARALLEL 0x0
+#define ISPCTRL_PAR_SER_CLK_SEL_CSIA 0x1
+#define ISPCTRL_PAR_SER_CLK_SEL_CSIB 0x2
+#define ISPCTRL_PAR_SER_CLK_SEL_CSIC 0x3
+#define ISPCTRL_PAR_SER_CLK_SEL_MASK 0x3
+
+#define ISPCTRL_PAR_BRIDGE_SHIFT 2
+#define ISPCTRL_PAR_BRIDGE_DISABLE (0x0 << 2)
+#define ISPCTRL_PAR_BRIDGE_LENDIAN (0x2 << 2)
+#define ISPCTRL_PAR_BRIDGE_BENDIAN (0x3 << 2)
+#define ISPCTRL_PAR_BRIDGE_MASK (0x3 << 2)
+
+#define ISPCTRL_PAR_CLK_POL_SHIFT 4
+#define ISPCTRL_PAR_CLK_POL_INV (1 << 4)
+#define ISPCTRL_PING_PONG_EN (1 << 5)
+#define ISPCTRL_SHIFT_SHIFT 6
+#define ISPCTRL_SHIFT_0 (0x0 << 6)
+#define ISPCTRL_SHIFT_2 (0x1 << 6)
+#define ISPCTRL_SHIFT_4 (0x2 << 6)
+#define ISPCTRL_SHIFT_MASK (0x3 << 6)
+
+#define ISPCTRL_CCDC_CLK_EN (1 << 8)
+#define ISPCTRL_SCMP_CLK_EN (1 << 9)
+#define ISPCTRL_H3A_CLK_EN (1 << 10)
+#define ISPCTRL_HIST_CLK_EN (1 << 11)
+#define ISPCTRL_PREV_CLK_EN (1 << 12)
+#define ISPCTRL_RSZ_CLK_EN (1 << 13)
+#define ISPCTRL_SYNC_DETECT_SHIFT 14
+#define ISPCTRL_SYNC_DETECT_HSFALL (0x0 << ISPCTRL_SYNC_DETECT_SHIFT)
+#define ISPCTRL_SYNC_DETECT_HSRISE (0x1 << ISPCTRL_SYNC_DETECT_SHIFT)
+#define ISPCTRL_SYNC_DETECT_VSFALL (0x2 << ISPCTRL_SYNC_DETECT_SHIFT)
+#define ISPCTRL_SYNC_DETECT_VSRISE (0x3 << ISPCTRL_SYNC_DETECT_SHIFT)
+#define ISPCTRL_SYNC_DETECT_MASK (0x3 << ISPCTRL_SYNC_DETECT_SHIFT)
+
+#define ISPCTRL_CCDC_RAM_EN (1 << 16)
+#define ISPCTRL_PREV_RAM_EN (1 << 17)
+#define ISPCTRL_SBL_RD_RAM_EN (1 << 18)
+#define ISPCTRL_SBL_WR1_RAM_EN (1 << 19)
+#define ISPCTRL_SBL_WR0_RAM_EN (1 << 20)
+#define ISPCTRL_SBL_AUTOIDLE (1 << 21)
+#define ISPCTRL_SBL_SHARED_WPORTC (1 << 26)
+#define ISPCTRL_SBL_SHARED_RPORTA (1 << 27)
+#define ISPCTRL_SBL_SHARED_RPORTB (1 << 28)
+#define ISPCTRL_JPEG_FLUSH (1 << 30)
+#define ISPCTRL_CCDC_FLUSH (1 << 31)
+
+#define ISPSECURE_SECUREMODE 0
+
+#define ISPTCTRL_CTRL_DIV_LOW 0x0
+#define ISPTCTRL_CTRL_DIV_HIGH 0x1
+#define ISPTCTRL_CTRL_DIV_BYPASS 0x1F
+
+#define ISPTCTRL_CTRL_DIVA_SHIFT 0
+#define ISPTCTRL_CTRL_DIVA_MASK (0x1F << ISPTCTRL_CTRL_DIVA_SHIFT)
+
+#define ISPTCTRL_CTRL_DIVB_SHIFT 5
+#define ISPTCTRL_CTRL_DIVB_MASK (0x1F << ISPTCTRL_CTRL_DIVB_SHIFT)
+
+#define ISPTCTRL_CTRL_DIVC_SHIFT 10
+#define ISPTCTRL_CTRL_DIVC_NOCLOCK (0x0 << 10)
+
+#define ISPTCTRL_CTRL_SHUTEN (1 << 21)
+#define ISPTCTRL_CTRL_PSTRBEN (1 << 22)
+#define ISPTCTRL_CTRL_STRBEN (1 << 23)
+#define ISPTCTRL_CTRL_SHUTPOL (1 << 24)
+#define ISPTCTRL_CTRL_STRBPSTRBPOL (1 << 26)
+
+#define ISPTCTRL_CTRL_INSEL_SHIFT 27
+#define ISPTCTRL_CTRL_INSEL_PARALLEL (0x0 << 27)
+#define ISPTCTRL_CTRL_INSEL_CSIA (0x1 << 27)
+#define ISPTCTRL_CTRL_INSEL_CSIB (0x2 << 27)
+
+#define ISPTCTRL_CTRL_GRESETEn (1 << 29)
+#define ISPTCTRL_CTRL_GRESETPOL (1 << 30)
+#define ISPTCTRL_CTRL_GRESETDIR (1 << 31)
+
+#define ISPTCTRL_FRAME_SHUT_SHIFT 0
+#define ISPTCTRL_FRAME_PSTRB_SHIFT 6
+#define ISPTCTRL_FRAME_STRB_SHIFT 12
+
+#define ISPCCDC_PID_PREV_SHIFT 0
+#define ISPCCDC_PID_CID_SHIFT 8
+#define ISPCCDC_PID_TID_SHIFT 16
+
+#define ISPCCDC_PCR_EN 1
+#define ISPCCDC_PCR_BUSY (1 << 1)
+
+#define ISPCCDC_SYN_MODE_VDHDOUT 0x1
+#define ISPCCDC_SYN_MODE_FLDOUT (1 << 1)
+#define ISPCCDC_SYN_MODE_VDPOL (1 << 2)
+#define ISPCCDC_SYN_MODE_HDPOL (1 << 3)
+#define ISPCCDC_SYN_MODE_FLDPOL (1 << 4)
+#define ISPCCDC_SYN_MODE_EXWEN (1 << 5)
+#define ISPCCDC_SYN_MODE_DATAPOL (1 << 6)
+#define ISPCCDC_SYN_MODE_FLDMODE (1 << 7)
+#define ISPCCDC_SYN_MODE_DATSIZ_MASK (0x7 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_8_16 (0x0 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_12 (0x4 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_11 (0x5 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_10 (0x6 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_8 (0x7 << 8)
+#define ISPCCDC_SYN_MODE_PACK8 (1 << 11)
+#define ISPCCDC_SYN_MODE_INPMOD_MASK (3 << 12)
+#define ISPCCDC_SYN_MODE_INPMOD_RAW (0 << 12)
+#define ISPCCDC_SYN_MODE_INPMOD_YCBCR16 (1 << 12)
+#define ISPCCDC_SYN_MODE_INPMOD_YCBCR8 (2 << 12)
+#define ISPCCDC_SYN_MODE_LPF (1 << 14)
+#define ISPCCDC_SYN_MODE_FLDSTAT (1 << 15)
+#define ISPCCDC_SYN_MODE_VDHDEN (1 << 16)
+#define ISPCCDC_SYN_MODE_WEN (1 << 17)
+#define ISPCCDC_SYN_MODE_VP2SDR (1 << 18)
+#define ISPCCDC_SYN_MODE_SDR2RSZ (1 << 19)
+
+#define ISPCCDC_HD_VD_WID_VDW_SHIFT 0
+#define ISPCCDC_HD_VD_WID_HDW_SHIFT 16
+
+#define ISPCCDC_PIX_LINES_HLPRF_SHIFT 0
+#define ISPCCDC_PIX_LINES_PPLN_SHIFT 16
+
+#define ISPCCDC_HORZ_INFO_NPH_SHIFT 0
+#define ISPCCDC_HORZ_INFO_NPH_MASK 0x00007fff
+#define ISPCCDC_HORZ_INFO_SPH_SHIFT 16
+#define ISPCCDC_HORZ_INFO_SPH_MASK 0x7fff0000
+
+#define ISPCCDC_VERT_START_SLV1_SHIFT 0
+#define ISPCCDC_VERT_START_SLV0_SHIFT 16
+#define ISPCCDC_VERT_START_SLV0_MASK 0x7fff0000
+
+#define ISPCCDC_VERT_LINES_NLV_SHIFT 0
+#define ISPCCDC_VERT_LINES_NLV_MASK 0x00007fff
+
+#define ISPCCDC_CULLING_CULV_SHIFT 0
+#define ISPCCDC_CULLING_CULHODD_SHIFT 16
+#define ISPCCDC_CULLING_CULHEVN_SHIFT 24
+
+#define ISPCCDC_HSIZE_OFF_SHIFT 0
+
+#define ISPCCDC_SDOFST_FIINV (1 << 14)
+#define ISPCCDC_SDOFST_FOFST_SHIFT 12
+#define ISPCCDC_SDOFST_FOFST_MASK (3 << 12)
+#define ISPCCDC_SDOFST_LOFST3_SHIFT 0
+#define ISPCCDC_SDOFST_LOFST2_SHIFT 3
+#define ISPCCDC_SDOFST_LOFST1_SHIFT 6
+#define ISPCCDC_SDOFST_LOFST0_SHIFT 9
+
+#define ISPCCDC_CLAMP_OBGAIN_SHIFT 0
+#define ISPCCDC_CLAMP_OBST_SHIFT 10
+#define ISPCCDC_CLAMP_OBSLN_SHIFT 25
+#define ISPCCDC_CLAMP_OBSLEN_SHIFT 28
+#define ISPCCDC_CLAMP_CLAMPEN (1 << 31)
+
+#define ISPCCDC_COLPTN_R_Ye 0x0
+#define ISPCCDC_COLPTN_Gr_Cy 0x1
+#define ISPCCDC_COLPTN_Gb_G 0x2
+#define ISPCCDC_COLPTN_B_Mg 0x3
+#define ISPCCDC_COLPTN_CP0PLC0_SHIFT 0
+#define ISPCCDC_COLPTN_CP0PLC1_SHIFT 2
+#define ISPCCDC_COLPTN_CP0PLC2_SHIFT 4
+#define ISPCCDC_COLPTN_CP0PLC3_SHIFT 6
+#define ISPCCDC_COLPTN_CP1PLC0_SHIFT 8
+#define ISPCCDC_COLPTN_CP1PLC1_SHIFT 10
+#define ISPCCDC_COLPTN_CP1PLC2_SHIFT 12
+#define ISPCCDC_COLPTN_CP1PLC3_SHIFT 14
+#define ISPCCDC_COLPTN_CP2PLC0_SHIFT 16
+#define ISPCCDC_COLPTN_CP2PLC1_SHIFT 18
+#define ISPCCDC_COLPTN_CP2PLC2_SHIFT 20
+#define ISPCCDC_COLPTN_CP2PLC3_SHIFT 22
+#define ISPCCDC_COLPTN_CP3PLC0_SHIFT 24
+#define ISPCCDC_COLPTN_CP3PLC1_SHIFT 26
+#define ISPCCDC_COLPTN_CP3PLC2_SHIFT 28
+#define ISPCCDC_COLPTN_CP3PLC3_SHIFT 30
+
+#define ISPCCDC_BLKCMP_B_MG_SHIFT 0
+#define ISPCCDC_BLKCMP_GB_G_SHIFT 8
+#define ISPCCDC_BLKCMP_GR_CY_SHIFT 16
+#define ISPCCDC_BLKCMP_R_YE_SHIFT 24
+
+#define ISPCCDC_FPC_FPNUM_SHIFT 0
+#define ISPCCDC_FPC_FPCEN (1 << 15)
+#define ISPCCDC_FPC_FPERR (1 << 16)
+
+#define ISPCCDC_VDINT_1_SHIFT 0
+#define ISPCCDC_VDINT_1_MASK 0x00007fff
+#define ISPCCDC_VDINT_0_SHIFT 16
+#define ISPCCDC_VDINT_0_MASK 0x7fff0000
+
+#define ISPCCDC_ALAW_GWDI_12_3 (0x3 << 0)
+#define ISPCCDC_ALAW_GWDI_11_2 (0x4 << 0)
+#define ISPCCDC_ALAW_GWDI_10_1 (0x5 << 0)
+#define ISPCCDC_ALAW_GWDI_9_0 (0x6 << 0)
+#define ISPCCDC_ALAW_CCDTBL (1 << 3)
+
+#define ISPCCDC_REC656IF_R656ON 1
+#define ISPCCDC_REC656IF_ECCFVH (1 << 1)
+
+#define ISPCCDC_CFG_BW656 (1 << 5)
+#define ISPCCDC_CFG_FIDMD_SHIFT 6
+#define ISPCCDC_CFG_WENLOG (1 << 8)
+#define ISPCCDC_CFG_WENLOG_AND (0 << 8)
+#define ISPCCDC_CFG_WENLOG_OR (1 << 8)
+#define ISPCCDC_CFG_Y8POS (1 << 11)
+#define ISPCCDC_CFG_BSWD (1 << 12)
+#define ISPCCDC_CFG_MSBINVI (1 << 13)
+#define ISPCCDC_CFG_VDLC (1 << 15)
+
+#define ISPCCDC_FMTCFG_FMTEN 0x1
+#define ISPCCDC_FMTCFG_LNALT (1 << 1)
+#define ISPCCDC_FMTCFG_LNUM_SHIFT 2
+#define ISPCCDC_FMTCFG_PLEN_ODD_SHIFT 4
+#define ISPCCDC_FMTCFG_PLEN_EVEN_SHIFT 8
+#define ISPCCDC_FMTCFG_VPIN_MASK 0x00007000
+#define ISPCCDC_FMTCFG_VPIN_12_3 (0x3 << 12)
+#define ISPCCDC_FMTCFG_VPIN_11_2 (0x4 << 12)
+#define ISPCCDC_FMTCFG_VPIN_10_1 (0x5 << 12)
+#define ISPCCDC_FMTCFG_VPIN_9_0 (0x6 << 12)
+#define ISPCCDC_FMTCFG_VPEN (1 << 15)
+
+#define ISPCCDC_FMTCFG_VPIF_FRQ_MASK 0x003f0000
+#define ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT 16
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY2 (0x0 << 16)
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY3 (0x1 << 16)
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY4 (0x2 << 16)
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY5 (0x3 << 16)
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY6 (0x4 << 16)
+
+#define ISPCCDC_FMT_HORZ_FMTLNH_SHIFT 0
+#define ISPCCDC_FMT_HORZ_FMTSPH_SHIFT 16
+
+#define ISPCCDC_FMT_VERT_FMTLNV_SHIFT 0
+#define ISPCCDC_FMT_VERT_FMTSLV_SHIFT 16
+
+#define ISPCCDC_FMT_HORZ_FMTSPH_MASK 0x1fff0000
+#define ISPCCDC_FMT_HORZ_FMTLNH_MASK 0x00001fff
+
+#define ISPCCDC_FMT_VERT_FMTSLV_MASK 0x1fff0000
+#define ISPCCDC_FMT_VERT_FMTLNV_MASK 0x00001fff
+
+#define ISPCCDC_VP_OUT_HORZ_ST_SHIFT 0
+#define ISPCCDC_VP_OUT_HORZ_NUM_SHIFT 4
+#define ISPCCDC_VP_OUT_VERT_NUM_SHIFT 17
+
+#define ISPRSZ_PID_PREV_SHIFT 0
+#define ISPRSZ_PID_CID_SHIFT 8
+#define ISPRSZ_PID_TID_SHIFT 16
+
+#define ISPRSZ_PCR_ENABLE (1 << 0)
+#define ISPRSZ_PCR_BUSY (1 << 1)
+#define ISPRSZ_PCR_ONESHOT (1 << 2)
+
+#define ISPRSZ_CNT_HRSZ_SHIFT 0
+#define ISPRSZ_CNT_HRSZ_MASK \
+ (0x3FF << ISPRSZ_CNT_HRSZ_SHIFT)
+#define ISPRSZ_CNT_VRSZ_SHIFT 10
+#define ISPRSZ_CNT_VRSZ_MASK \
+ (0x3FF << ISPRSZ_CNT_VRSZ_SHIFT)
+#define ISPRSZ_CNT_HSTPH_SHIFT 20
+#define ISPRSZ_CNT_HSTPH_MASK (0x7 << ISPRSZ_CNT_HSTPH_SHIFT)
+#define ISPRSZ_CNT_VSTPH_SHIFT 23
+#define ISPRSZ_CNT_VSTPH_MASK (0x7 << ISPRSZ_CNT_VSTPH_SHIFT)
+#define ISPRSZ_CNT_YCPOS (1 << 26)
+#define ISPRSZ_CNT_INPTYP (1 << 27)
+#define ISPRSZ_CNT_INPSRC (1 << 28)
+#define ISPRSZ_CNT_CBILIN (1 << 29)
+
+#define ISPRSZ_OUT_SIZE_HORZ_SHIFT 0
+#define ISPRSZ_OUT_SIZE_HORZ_MASK \
+ (0xFFF << ISPRSZ_OUT_SIZE_HORZ_SHIFT)
+#define ISPRSZ_OUT_SIZE_VERT_SHIFT 16
+#define ISPRSZ_OUT_SIZE_VERT_MASK \
+ (0xFFF << ISPRSZ_OUT_SIZE_VERT_SHIFT)
+
+#define ISPRSZ_IN_START_HORZ_ST_SHIFT 0
+#define ISPRSZ_IN_START_HORZ_ST_MASK \
+ (0x1FFF << ISPRSZ_IN_START_HORZ_ST_SHIFT)
+#define ISPRSZ_IN_START_VERT_ST_SHIFT 16
+#define ISPRSZ_IN_START_VERT_ST_MASK \
+ (0x1FFF << ISPRSZ_IN_START_VERT_ST_SHIFT)
+
+#define ISPRSZ_IN_SIZE_HORZ_SHIFT 0
+#define ISPRSZ_IN_SIZE_HORZ_MASK \
+ (0x1FFF << ISPRSZ_IN_SIZE_HORZ_SHIFT)
+#define ISPRSZ_IN_SIZE_VERT_SHIFT 16
+#define ISPRSZ_IN_SIZE_VERT_MASK \
+ (0x1FFF << ISPRSZ_IN_SIZE_VERT_SHIFT)
+
+#define ISPRSZ_SDR_INADD_ADDR_SHIFT 0
+#define ISPRSZ_SDR_INADD_ADDR_MASK 0xFFFFFFFF
+
+#define ISPRSZ_SDR_INOFF_OFFSET_SHIFT 0
+#define ISPRSZ_SDR_INOFF_OFFSET_MASK \
+ (0xFFFF << ISPRSZ_SDR_INOFF_OFFSET_SHIFT)
+
+#define ISPRSZ_SDR_OUTADD_ADDR_SHIFT 0
+#define ISPRSZ_SDR_OUTADD_ADDR_MASK 0xFFFFFFFF
+
+
+#define ISPRSZ_SDR_OUTOFF_OFFSET_SHIFT 0
+#define ISPRSZ_SDR_OUTOFF_OFFSET_MASK \
+ (0xFFFF << ISPRSZ_SDR_OUTOFF_OFFSET_SHIFT)
+
+#define ISPRSZ_HFILT_COEF0_SHIFT 0
+#define ISPRSZ_HFILT_COEF0_MASK \
+ (0x3FF << ISPRSZ_HFILT_COEF0_SHIFT)
+#define ISPRSZ_HFILT_COEF1_SHIFT 16
+#define ISPRSZ_HFILT_COEF1_MASK \
+ (0x3FF << ISPRSZ_HFILT_COEF1_SHIFT)
+
+#define ISPRSZ_HFILT32_COEF2_SHIFT 0
+#define ISPRSZ_HFILT32_COEF2_MASK 0x3FF
+#define ISPRSZ_HFILT32_COEF3_SHIFT 16
+#define ISPRSZ_HFILT32_COEF3_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT54_COEF4_SHIFT 0
+#define ISPRSZ_HFILT54_COEF4_MASK 0x3FF
+#define ISPRSZ_HFILT54_COEF5_SHIFT 16
+#define ISPRSZ_HFILT54_COEF5_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT76_COEFF6_SHIFT 0
+#define ISPRSZ_HFILT76_COEFF6_MASK 0x3FF
+#define ISPRSZ_HFILT76_COEFF7_SHIFT 16
+#define ISPRSZ_HFILT76_COEFF7_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT98_COEFF8_SHIFT 0
+#define ISPRSZ_HFILT98_COEFF8_MASK 0x3FF
+#define ISPRSZ_HFILT98_COEFF9_SHIFT 16
+#define ISPRSZ_HFILT98_COEFF9_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT1110_COEF10_SHIFT 0
+#define ISPRSZ_HFILT1110_COEF10_MASK 0x3FF
+#define ISPRSZ_HFILT1110_COEF11_SHIFT 16
+#define ISPRSZ_HFILT1110_COEF11_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT1312_COEFF12_SHIFT 0
+#define ISPRSZ_HFILT1312_COEFF12_MASK 0x3FF
+#define ISPRSZ_HFILT1312_COEFF13_SHIFT 16
+#define ISPRSZ_HFILT1312_COEFF13_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT1514_COEFF14_SHIFT 0
+#define ISPRSZ_HFILT1514_COEFF14_MASK 0x3FF
+#define ISPRSZ_HFILT1514_COEFF15_SHIFT 16
+#define ISPRSZ_HFILT1514_COEFF15_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT1716_COEF16_SHIFT 0
+#define ISPRSZ_HFILT1716_COEF16_MASK 0x3FF
+#define ISPRSZ_HFILT1716_COEF17_SHIFT 16
+#define ISPRSZ_HFILT1716_COEF17_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT1918_COEF18_SHIFT 0
+#define ISPRSZ_HFILT1918_COEF18_MASK 0x3FF
+#define ISPRSZ_HFILT1918_COEF19_SHIFT 16
+#define ISPRSZ_HFILT1918_COEF19_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT2120_COEF20_SHIFT 0
+#define ISPRSZ_HFILT2120_COEF20_MASK 0x3FF
+#define ISPRSZ_HFILT2120_COEF21_SHIFT 16
+#define ISPRSZ_HFILT2120_COEF21_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT2322_COEF22_SHIFT 0
+#define ISPRSZ_HFILT2322_COEF22_MASK 0x3FF
+#define ISPRSZ_HFILT2322_COEF23_SHIFT 16
+#define ISPRSZ_HFILT2322_COEF23_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT2524_COEF24_SHIFT 0
+#define ISPRSZ_HFILT2524_COEF24_MASK 0x3FF
+#define ISPRSZ_HFILT2524_COEF25_SHIFT 16
+#define ISPRSZ_HFILT2524_COEF25_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT2726_COEF26_SHIFT 0
+#define ISPRSZ_HFILT2726_COEF26_MASK 0x3FF
+#define ISPRSZ_HFILT2726_COEF27_SHIFT 16
+#define ISPRSZ_HFILT2726_COEF27_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT2928_COEF28_SHIFT 0
+#define ISPRSZ_HFILT2928_COEF28_MASK 0x3FF
+#define ISPRSZ_HFILT2928_COEF29_SHIFT 16
+#define ISPRSZ_HFILT2928_COEF29_MASK 0x3FF0000
+
+#define ISPRSZ_HFILT3130_COEF30_SHIFT 0
+#define ISPRSZ_HFILT3130_COEF30_MASK 0x3FF
+#define ISPRSZ_HFILT3130_COEF31_SHIFT 16
+#define ISPRSZ_HFILT3130_COEF31_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT_COEF0_SHIFT 0
+#define ISPRSZ_VFILT_COEF0_MASK \
+ (0x3FF << ISPRSZ_VFILT_COEF0_SHIFT)
+#define ISPRSZ_VFILT_COEF1_SHIFT 16
+#define ISPRSZ_VFILT_COEF1_MASK \
+ (0x3FF << ISPRSZ_VFILT_COEF1_SHIFT)
+
+#define ISPRSZ_VFILT10_COEF0_SHIFT 0
+#define ISPRSZ_VFILT10_COEF0_MASK 0x3FF
+#define ISPRSZ_VFILT10_COEF1_SHIFT 16
+#define ISPRSZ_VFILT10_COEF1_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT32_COEF2_SHIFT 0
+#define ISPRSZ_VFILT32_COEF2_MASK 0x3FF
+#define ISPRSZ_VFILT32_COEF3_SHIFT 16
+#define ISPRSZ_VFILT32_COEF3_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT54_COEF4_SHIFT 0
+#define ISPRSZ_VFILT54_COEF4_MASK 0x3FF
+#define ISPRSZ_VFILT54_COEF5_SHIFT 16
+#define ISPRSZ_VFILT54_COEF5_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT76_COEFF6_SHIFT 0
+#define ISPRSZ_VFILT76_COEFF6_MASK 0x3FF
+#define ISPRSZ_VFILT76_COEFF7_SHIFT 16
+#define ISPRSZ_VFILT76_COEFF7_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT98_COEFF8_SHIFT 0
+#define ISPRSZ_VFILT98_COEFF8_MASK 0x3FF
+#define ISPRSZ_VFILT98_COEFF9_SHIFT 16
+#define ISPRSZ_VFILT98_COEFF9_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT1110_COEF10_SHIFT 0
+#define ISPRSZ_VFILT1110_COEF10_MASK 0x3FF
+#define ISPRSZ_VFILT1110_COEF11_SHIFT 16
+#define ISPRSZ_VFILT1110_COEF11_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT1312_COEFF12_SHIFT 0
+#define ISPRSZ_VFILT1312_COEFF12_MASK 0x3FF
+#define ISPRSZ_VFILT1312_COEFF13_SHIFT 16
+#define ISPRSZ_VFILT1312_COEFF13_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT1514_COEFF14_SHIFT 0
+#define ISPRSZ_VFILT1514_COEFF14_MASK 0x3FF
+#define ISPRSZ_VFILT1514_COEFF15_SHIFT 16
+#define ISPRSZ_VFILT1514_COEFF15_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT1716_COEF16_SHIFT 0
+#define ISPRSZ_VFILT1716_COEF16_MASK 0x3FF
+#define ISPRSZ_VFILT1716_COEF17_SHIFT 16
+#define ISPRSZ_VFILT1716_COEF17_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT1918_COEF18_SHIFT 0
+#define ISPRSZ_VFILT1918_COEF18_MASK 0x3FF
+#define ISPRSZ_VFILT1918_COEF19_SHIFT 16
+#define ISPRSZ_VFILT1918_COEF19_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT2120_COEF20_SHIFT 0
+#define ISPRSZ_VFILT2120_COEF20_MASK 0x3FF
+#define ISPRSZ_VFILT2120_COEF21_SHIFT 16
+#define ISPRSZ_VFILT2120_COEF21_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT2322_COEF22_SHIFT 0
+#define ISPRSZ_VFILT2322_COEF22_MASK 0x3FF
+#define ISPRSZ_VFILT2322_COEF23_SHIFT 16
+#define ISPRSZ_VFILT2322_COEF23_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT2524_COEF24_SHIFT 0
+#define ISPRSZ_VFILT2524_COEF24_MASK 0x3FF
+#define ISPRSZ_VFILT2524_COEF25_SHIFT 16
+#define ISPRSZ_VFILT2524_COEF25_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT2726_COEF26_SHIFT 0
+#define ISPRSZ_VFILT2726_COEF26_MASK 0x3FF
+#define ISPRSZ_VFILT2726_COEF27_SHIFT 16
+#define ISPRSZ_VFILT2726_COEF27_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT2928_COEF28_SHIFT 0
+#define ISPRSZ_VFILT2928_COEF28_MASK 0x3FF
+#define ISPRSZ_VFILT2928_COEF29_SHIFT 16
+#define ISPRSZ_VFILT2928_COEF29_MASK 0x3FF0000
+
+#define ISPRSZ_VFILT3130_COEF30_SHIFT 0
+#define ISPRSZ_VFILT3130_COEF30_MASK 0x3FF
+#define ISPRSZ_VFILT3130_COEF31_SHIFT 16
+#define ISPRSZ_VFILT3130_COEF31_MASK 0x3FF0000
+
+#define ISPRSZ_YENH_CORE_SHIFT 0
+#define ISPRSZ_YENH_CORE_MASK \
+ (0xFF << ISPRSZ_YENH_CORE_SHIFT)
+#define ISPRSZ_YENH_SLOP_SHIFT 8
+#define ISPRSZ_YENH_SLOP_MASK \
+ (0xF << ISPRSZ_YENH_SLOP_SHIFT)
+#define ISPRSZ_YENH_GAIN_SHIFT 12
+#define ISPRSZ_YENH_GAIN_MASK \
+ (0xF << ISPRSZ_YENH_GAIN_SHIFT)
+#define ISPRSZ_YENH_ALGO_SHIFT 16
+#define ISPRSZ_YENH_ALGO_MASK \
+ (0x3 << ISPRSZ_YENH_ALGO_SHIFT)
+
+#define ISPH3A_PCR_AEW_ALAW_EN_SHIFT 1
+#define ISPH3A_PCR_AF_MED_TH_SHIFT 3
+#define ISPH3A_PCR_AF_RGBPOS_SHIFT 11
+#define ISPH3A_PCR_AEW_AVE2LMT_SHIFT 22
+#define ISPH3A_PCR_AEW_AVE2LMT_MASK 0xFFC00000
+#define ISPH3A_PCR_BUSYAF (1 << 15)
+#define ISPH3A_PCR_BUSYAEAWB (1 << 18)
+
+#define ISPH3A_AEWWIN1_WINHC_SHIFT 0
+#define ISPH3A_AEWWIN1_WINHC_MASK 0x3F
+#define ISPH3A_AEWWIN1_WINVC_SHIFT 6
+#define ISPH3A_AEWWIN1_WINVC_MASK 0x1FC0
+#define ISPH3A_AEWWIN1_WINW_SHIFT 13
+#define ISPH3A_AEWWIN1_WINW_MASK 0xFE000
+#define ISPH3A_AEWWIN1_WINH_SHIFT 24
+#define ISPH3A_AEWWIN1_WINH_MASK 0x7F000000
+
+#define ISPH3A_AEWINSTART_WINSH_SHIFT 0
+#define ISPH3A_AEWINSTART_WINSH_MASK 0x0FFF
+#define ISPH3A_AEWINSTART_WINSV_SHIFT 16
+#define ISPH3A_AEWINSTART_WINSV_MASK 0x0FFF0000
+
+#define ISPH3A_AEWINBLK_WINH_SHIFT 0
+#define ISPH3A_AEWINBLK_WINH_MASK 0x7F
+#define ISPH3A_AEWINBLK_WINSV_SHIFT 16
+#define ISPH3A_AEWINBLK_WINSV_MASK 0x0FFF0000
+
+#define ISPH3A_AEWSUBWIN_AEWINCH_SHIFT 0
+#define ISPH3A_AEWSUBWIN_AEWINCH_MASK 0x0F
+#define ISPH3A_AEWSUBWIN_AEWINCV_SHIFT 8
+#define ISPH3A_AEWSUBWIN_AEWINCV_MASK 0x0F00
+
+#define ISPHIST_PCR_ENABLE_SHIFT 0
+#define ISPHIST_PCR_ENABLE_MASK 0x01
+#define ISPHIST_PCR_ENABLE (1 << ISPHIST_PCR_ENABLE_SHIFT)
+#define ISPHIST_PCR_BUSY 0x02
+
+#define ISPHIST_CNT_DATASIZE_SHIFT 8
+#define ISPHIST_CNT_DATASIZE_MASK 0x0100
+#define ISPHIST_CNT_CLEAR_SHIFT 7
+#define ISPHIST_CNT_CLEAR_MASK 0x080
+#define ISPHIST_CNT_CLEAR (1 << ISPHIST_CNT_CLEAR_SHIFT)
+#define ISPHIST_CNT_CFA_SHIFT 6
+#define ISPHIST_CNT_CFA_MASK 0x040
+#define ISPHIST_CNT_BINS_SHIFT 4
+#define ISPHIST_CNT_BINS_MASK 0x030
+#define ISPHIST_CNT_SOURCE_SHIFT 3
+#define ISPHIST_CNT_SOURCE_MASK 0x08
+#define ISPHIST_CNT_SHIFT_SHIFT 0
+#define ISPHIST_CNT_SHIFT_MASK 0x07
+
+#define ISPHIST_WB_GAIN_WG00_SHIFT 24
+#define ISPHIST_WB_GAIN_WG00_MASK 0xFF000000
+#define ISPHIST_WB_GAIN_WG01_SHIFT 16
+#define ISPHIST_WB_GAIN_WG01_MASK 0xFF0000
+#define ISPHIST_WB_GAIN_WG02_SHIFT 8
+#define ISPHIST_WB_GAIN_WG02_MASK 0xFF00
+#define ISPHIST_WB_GAIN_WG03_SHIFT 0
+#define ISPHIST_WB_GAIN_WG03_MASK 0xFF
+
+#define ISPHIST_REG_START_END_MASK 0x3FFF
+#define ISPHIST_REG_START_SHIFT 16
+#define ISPHIST_REG_END_SHIFT 0
+#define ISPHIST_REG_START_MASK (ISPHIST_REG_START_END_MASK << \
+ ISPHIST_REG_START_SHIFT)
+#define ISPHIST_REG_END_MASK (ISPHIST_REG_START_END_MASK << \
+ ISPHIST_REG_END_SHIFT)
+
+#define ISPHIST_REG_MASK (ISPHIST_REG_START_MASK | \
+ ISPHIST_REG_END_MASK)
+
+#define ISPHIST_ADDR_SHIFT 0
+#define ISPHIST_ADDR_MASK 0x3FF
+
+#define ISPHIST_DATA_SHIFT 0
+#define ISPHIST_DATA_MASK 0xFFFFF
+
+#define ISPHIST_RADD_SHIFT 0
+#define ISPHIST_RADD_MASK 0xFFFFFFFF
+
+#define ISPHIST_RADD_OFF_SHIFT 0
+#define ISPHIST_RADD_OFF_MASK 0xFFFF
+
+#define ISPHIST_HV_INFO_HSIZE_SHIFT 16
+#define ISPHIST_HV_INFO_HSIZE_MASK 0x3FFF0000
+#define ISPHIST_HV_INFO_VSIZE_SHIFT 0
+#define ISPHIST_HV_INFO_VSIZE_MASK 0x3FFF
+
+#define ISPHIST_HV_INFO_MASK 0x3FFF3FFF
+
+#define ISPCCDC_LSC_ENABLE 1
+#define ISPCCDC_LSC_BUSY (1 << 7)
+#define ISPCCDC_LSC_GAIN_MODE_N_MASK 0x700
+#define ISPCCDC_LSC_GAIN_MODE_N_SHIFT 8
+#define ISPCCDC_LSC_GAIN_MODE_M_MASK 0x3800
+#define ISPCCDC_LSC_GAIN_MODE_M_SHIFT 12
+#define ISPCCDC_LSC_GAIN_FORMAT_MASK 0xE
+#define ISPCCDC_LSC_GAIN_FORMAT_SHIFT 1
+#define ISPCCDC_LSC_AFTER_REFORMATTER_MASK (1<<6)
+
+#define ISPCCDC_LSC_INITIAL_X_MASK 0x3F
+#define ISPCCDC_LSC_INITIAL_X_SHIFT 0
+#define ISPCCDC_LSC_INITIAL_Y_MASK 0x3F0000
+#define ISPCCDC_LSC_INITIAL_Y_SHIFT 16
+
+/* -----------------------------------------------------------------------------
+ * CSI2 receiver registers (ES2.0)
+ */
+
+#define ISPCSI2_REVISION (0x000)
+#define ISPCSI2_SYSCONFIG (0x010)
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT 12
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK \
+ (0x3 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_FORCE \
+ (0x0 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO \
+ (0x1 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART \
+ (0x2 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCSI2_SYSCONFIG_SOFT_RESET (1 << 1)
+#define ISPCSI2_SYSCONFIG_AUTO_IDLE (1 << 0)
+
+#define ISPCSI2_SYSSTATUS (0x014)
+#define ISPCSI2_SYSSTATUS_RESET_DONE (1 << 0)
+
+#define ISPCSI2_IRQSTATUS (0x018)
+#define ISPCSI2_IRQSTATUS_OCP_ERR_IRQ (1 << 14)
+#define ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ (1 << 13)
+#define ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ (1 << 12)
+#define ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ (1 << 11)
+#define ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ (1 << 10)
+#define ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ (1 << 9)
+#define ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ (1 << 8)
+#define ISPCSI2_IRQSTATUS_CONTEXT(n) (1 << (n))
+
+#define ISPCSI2_IRQENABLE (0x01c)
+#define ISPCSI2_CTRL (0x040)
+#define ISPCSI2_CTRL_VP_CLK_EN (1 << 15)
+#define ISPCSI2_CTRL_VP_ONLY_EN (1 << 11)
+#define ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT 8
+#define ISPCSI2_CTRL_VP_OUT_CTRL_MASK \
+ (3 << ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT)
+#define ISPCSI2_CTRL_DBG_EN (1 << 7)
+#define ISPCSI2_CTRL_BURST_SIZE_SHIFT 5
+#define ISPCSI2_CTRL_BURST_SIZE_MASK \
+ (3 << ISPCSI2_CTRL_BURST_SIZE_SHIFT)
+#define ISPCSI2_CTRL_FRAME (1 << 3)
+#define ISPCSI2_CTRL_ECC_EN (1 << 2)
+#define ISPCSI2_CTRL_SECURE (1 << 1)
+#define ISPCSI2_CTRL_IF_EN (1 << 0)
+
+#define ISPCSI2_DBG_H (0x044)
+#define ISPCSI2_GNQ (0x048)
+#define ISPCSI2_PHY_CFG (0x050)
+#define ISPCSI2_PHY_CFG_RESET_CTRL (1 << 30)
+#define ISPCSI2_PHY_CFG_RESET_DONE (1 << 29)
+#define ISPCSI2_PHY_CFG_PWR_CMD_SHIFT 27
+#define ISPCSI2_PHY_CFG_PWR_CMD_MASK \
+ (0x3 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_CMD_OFF \
+ (0x0 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_CMD_ON \
+ (0x1 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_CMD_ULPW \
+ (0x2 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT 25
+#define ISPCSI2_PHY_CFG_PWR_STATUS_MASK \
+ (0x3 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_STATUS_OFF \
+ (0x0 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_STATUS_ON \
+ (0x1 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_STATUS_ULPW \
+ (0x2 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_AUTO (1 << 24)
+
+#define ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n) (3 + ((n) * 4))
+#define ISPCSI2_PHY_CFG_DATA_POL_MASK(n) \
+ (0x1 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POL_PN(n) \
+ (0x0 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POL_NP(n) \
+ (0x1 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n))
+
+#define ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n) ((n) * 4)
+#define ISPCSI2_PHY_CFG_DATA_POSITION_MASK(n) \
+ (0x7 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_NC(n) \
+ (0x0 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_1(n) \
+ (0x1 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_2(n) \
+ (0x2 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_3(n) \
+ (0x3 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_4(n) \
+ (0x4 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_5(n) \
+ (0x5 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+
+#define ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT 3
+#define ISPCSI2_PHY_CFG_CLOCK_POL_MASK \
+ (0x1 << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POL_PN \
+ (0x0 << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POL_NP \
+ (0x1 << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT)
+
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT 0
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK \
+ (0x7 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_1 \
+ (0x1 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_2 \
+ (0x2 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_3 \
+ (0x3 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_4 \
+ (0x4 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_5 \
+ (0x5 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+
+#define ISPCSI2_PHY_IRQSTATUS (0x054)
+#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMEXIT (1 << 26)
+#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMENTER (1 << 25)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM5 (1 << 24)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM4 (1 << 23)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM3 (1 << 22)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM2 (1 << 21)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM1 (1 << 20)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL5 (1 << 19)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL4 (1 << 18)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL3 (1 << 17)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL2 (1 << 16)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL1 (1 << 15)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC5 (1 << 14)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC4 (1 << 13)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC3 (1 << 12)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC2 (1 << 11)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC1 (1 << 10)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS5 (1 << 9)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS4 (1 << 8)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS3 (1 << 7)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS2 (1 << 6)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS1 (1 << 5)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS5 (1 << 4)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS4 (1 << 3)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS3 (1 << 2)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS2 (1 << 1)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS1 1
+
+#define ISPCSI2_SHORT_PACKET (0x05c)
+#define ISPCSI2_PHY_IRQENABLE (0x060)
+#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT (1 << 26)
+#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER (1 << 25)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM5 (1 << 24)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM4 (1 << 23)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM3 (1 << 22)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM2 (1 << 21)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM1 (1 << 20)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 (1 << 19)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 (1 << 18)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 (1 << 17)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 (1 << 16)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 (1 << 15)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC5 (1 << 14)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC4 (1 << 13)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC3 (1 << 12)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC2 (1 << 11)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC1 (1 << 10)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 (1 << 9)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 (1 << 8)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 (1 << 7)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 (1 << 6)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 (1 << 5)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 (1 << 4)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 (1 << 3)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 (1 << 2)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 (1 << 1)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS1 (1 << 0)
+
+#define ISPCSI2_DBG_P (0x068)
+#define ISPCSI2_TIMING (0x06c)
+#define ISPCSI2_TIMING_FORCE_RX_MODE_IO(n) (1 << ((16 * ((n) - 1)) + 15))
+#define ISPCSI2_TIMING_STOP_STATE_X16_IO(n) (1 << ((16 * ((n) - 1)) + 14))
+#define ISPCSI2_TIMING_STOP_STATE_X4_IO(n) (1 << ((16 * ((n) - 1)) + 13))
+#define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n) (16 * ((n) - 1))
+#define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(n) \
+ (0x1fff << ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n))
+
+#define ISPCSI2_CTX_CTRL1(n) ((0x070) + 0x20 * (n))
+#define ISPCSI2_CTX_CTRL1_COUNT_SHIFT 8
+#define ISPCSI2_CTX_CTRL1_COUNT_MASK \
+ (0xff << ISPCSI2_CTX_CTRL1_COUNT_SHIFT)
+#define ISPCSI2_CTX_CTRL1_EOF_EN (1 << 7)
+#define ISPCSI2_CTX_CTRL1_EOL_EN (1 << 6)
+#define ISPCSI2_CTX_CTRL1_CS_EN (1 << 5)
+#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK (1 << 4)
+#define ISPCSI2_CTX_CTRL1_PING_PONG (1 << 3)
+#define ISPCSI2_CTX_CTRL1_CTX_EN (1 << 0)
+
+#define ISPCSI2_CTX_CTRL2(n) ((0x074) + 0x20 * (n))
+#define ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT 13
+#define ISPCSI2_CTX_CTRL2_USER_DEF_MAP_MASK \
+ (0x3 << ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT)
+#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT 11
+#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK \
+ (0x3 << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT)
+#define ISPCSI2_CTX_CTRL2_DPCM_PRED (1 << 10)
+#define ISPCSI2_CTX_CTRL2_FORMAT_SHIFT 0
+#define ISPCSI2_CTX_CTRL2_FORMAT_MASK \
+ (0x3ff << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT)
+#define ISPCSI2_CTX_CTRL2_FRAME_SHIFT 16
+#define ISPCSI2_CTX_CTRL2_FRAME_MASK \
+ (0xffff << ISPCSI2_CTX_CTRL2_FRAME_SHIFT)
+
+#define ISPCSI2_CTX_DAT_OFST(n) ((0x078) + 0x20 * (n))
+#define ISPCSI2_CTX_DAT_OFST_OFST_SHIFT 0
+#define ISPCSI2_CTX_DAT_OFST_OFST_MASK \
+ (0x1ffe0 << ISPCSI2_CTX_DAT_OFST_OFST_SHIFT)
+
+#define ISPCSI2_CTX_DAT_PING_ADDR(n) ((0x07c) + 0x20 * (n))
+#define ISPCSI2_CTX_DAT_PONG_ADDR(n) ((0x080) + 0x20 * (n))
+#define ISPCSI2_CTX_IRQENABLE(n) ((0x084) + 0x20 * (n))
+#define ISPCSI2_CTX_IRQENABLE_ECC_CORRECTION_IRQ (1 << 8)
+#define ISPCSI2_CTX_IRQENABLE_LINE_NUMBER_IRQ (1 << 7)
+#define ISPCSI2_CTX_IRQENABLE_FRAME_NUMBER_IRQ (1 << 6)
+#define ISPCSI2_CTX_IRQENABLE_CS_IRQ (1 << 5)
+#define ISPCSI2_CTX_IRQENABLE_LE_IRQ (1 << 3)
+#define ISPCSI2_CTX_IRQENABLE_LS_IRQ (1 << 2)
+#define ISPCSI2_CTX_IRQENABLE_FE_IRQ (1 << 1)
+#define ISPCSI2_CTX_IRQENABLE_FS_IRQ (1 << 0)
+
+#define ISPCSI2_CTX_IRQSTATUS(n) ((0x088) + 0x20 * (n))
+#define ISPCSI2_CTX_IRQSTATUS_ECC_CORRECTION_IRQ (1 << 8)
+#define ISPCSI2_CTX_IRQSTATUS_LINE_NUMBER_IRQ (1 << 7)
+#define ISPCSI2_CTX_IRQSTATUS_FRAME_NUMBER_IRQ (1 << 6)
+#define ISPCSI2_CTX_IRQSTATUS_CS_IRQ (1 << 5)
+#define ISPCSI2_CTX_IRQSTATUS_LE_IRQ (1 << 3)
+#define ISPCSI2_CTX_IRQSTATUS_LS_IRQ (1 << 2)
+#define ISPCSI2_CTX_IRQSTATUS_FE_IRQ (1 << 1)
+#define ISPCSI2_CTX_IRQSTATUS_FS_IRQ (1 << 0)
+
+#define ISPCSI2_CTX_CTRL3(n) ((0x08c) + 0x20 * (n))
+#define ISPCSI2_CTX_CTRL3_ALPHA_SHIFT 5
+#define ISPCSI2_CTX_CTRL3_ALPHA_MASK \
+ (0x3fff << ISPCSI2_CTX_CTRL3_ALPHA_SHIFT)
+
+/* This instance is for OMAP3630 only */
+#define ISPCSI2_CTX_TRANSCODEH(n) (0x000 + 0x8 * (n))
+#define ISPCSI2_CTX_TRANSCODEH_HCOUNT_SHIFT 16
+#define ISPCSI2_CTX_TRANSCODEH_HCOUNT_MASK \
+ (0x1fff << ISPCSI2_CTX_TRANSCODEH_HCOUNT_SHIFT)
+#define ISPCSI2_CTX_TRANSCODEH_HSKIP_SHIFT 0
+#define ISPCSI2_CTX_TRANSCODEH_HSKIP_MASK \
+ (0x1fff << ISPCSI2_CTX_TRANSCODEH_HCOUNT_SHIFT)
+#define ISPCSI2_CTX_TRANSCODEV(n) (0x004 + 0x8 * (n))
+#define ISPCSI2_CTX_TRANSCODEV_VCOUNT_SHIFT 16
+#define ISPCSI2_CTX_TRANSCODEV_VCOUNT_MASK \
+ (0x1fff << ISPCSI2_CTX_TRANSCODEV_VCOUNT_SHIFT)
+#define ISPCSI2_CTX_TRANSCODEV_VSKIP_SHIFT 0
+#define ISPCSI2_CTX_TRANSCODEV_VSKIP_MASK \
+ (0x1fff << ISPCSI2_CTX_TRANSCODEV_VCOUNT_SHIFT)
+
+/* -----------------------------------------------------------------------------
+ * CSI PHY registers
+ */
+
+#define ISPCSIPHY_REG0 (0x000)
+#define ISPCSIPHY_REG0_THS_TERM_SHIFT 8
+#define ISPCSIPHY_REG0_THS_TERM_MASK \
+ (0xff << ISPCSIPHY_REG0_THS_TERM_SHIFT)
+#define ISPCSIPHY_REG0_THS_SETTLE_SHIFT 0
+#define ISPCSIPHY_REG0_THS_SETTLE_MASK \
+ (0xff << ISPCSIPHY_REG0_THS_SETTLE_SHIFT)
+
+#define ISPCSIPHY_REG1 (0x004)
+#define ISPCSIPHY_REG1_RESET_DONE_CTRLCLK (1 << 29)
+/* This field is for OMAP3630 only */
+#define ISPCSIPHY_REG1_CLOCK_MISS_DETECTOR_STATUS (1 << 25)
+#define ISPCSIPHY_REG1_TCLK_TERM_SHIFT 18
+#define ISPCSIPHY_REG1_TCLK_TERM_MASK \
+ (0x7f << ISPCSIPHY_REG1_TCLK_TERM_SHIFT)
+#define ISPCSIPHY_REG1_DPHY_HS_SYNC_PATTERN_SHIFT 10
+#define ISPCSIPHY_REG1_DPHY_HS_SYNC_PATTERN_MASK \
+ (0xff << ISPCSIPHY_REG1_DPHY_HS_SYNC_PATTERN)
+/* This field is for OMAP3430 only */
+#define ISPCSIPHY_REG1_TCLK_MISS_SHIFT 8
+#define ISPCSIPHY_REG1_TCLK_MISS_MASK \
+ (0x3 << ISPCSIPHY_REG1_TCLK_MISS_SHIFT)
+/* This field is for OMAP3630 only */
+#define ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_SHIFT 8
+#define ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_MASK \
+ (0x3 << ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_SHIFT)
+#define ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT 0
+#define ISPCSIPHY_REG1_TCLK_SETTLE_MASK \
+ (0xff << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT)
+
+/* This register is for OMAP3630 only */
+#define ISPCSIPHY_REG2 (0x008)
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC0_SHIFT 30
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK \
+ (0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC0_SHIFT)
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC1_SHIFT 28
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK \
+ (0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC1_SHIFT)
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC2_SHIFT 26
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK \
+ (0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC2_SHIFT)
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC3_SHIFT 24
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK \
+ (0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC3_SHIFT)
+#define ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_SHIFT 0
+#define ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_MASK \
+ (0x7fffff << ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_SHIFT)
+
+/* -----------------------------------------------------------------------------
+ * CONTROL registers for CSI-2 phy routing
+ */
+
+/* OMAP343X_CONTROL_CSIRXFE */
+#define OMAP343X_CONTROL_CSIRXFE_CSIB_INV (1 << 7)
+#define OMAP343X_CONTROL_CSIRXFE_RESENABLE (1 << 8)
+#define OMAP343X_CONTROL_CSIRXFE_SELFORM (1 << 10)
+#define OMAP343X_CONTROL_CSIRXFE_PWRDNZ (1 << 12)
+#define OMAP343X_CONTROL_CSIRXFE_RESET (1 << 13)
+
+/* OMAP3630_CONTROL_CAMERA_PHY_CTRL */
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT 2
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT 0
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY 0x0
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE 0x1
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK 0x2
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_GPI 0x3
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK 0x3
+/* CCP2B: set to receive data from PHY2 instead of PHY1 */
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2 (1 << 4)
+
+#endif /* OMAP3_ISP_REG_H */
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
new file mode 100644
index 000000000..2035e3c6a
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -0,0 +1,1796 @@
+/*
+ * ispresizer.c
+ *
+ * TI OMAP3 ISP - Resizer module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispresizer.h"
+
+/*
+ * Resizer Constants
+ */
+#define MIN_RESIZE_VALUE 64
+#define MID_RESIZE_VALUE 512
+#define MAX_RESIZE_VALUE 1024
+
+#define MIN_IN_WIDTH 32
+#define MIN_IN_HEIGHT 32
+#define MAX_IN_WIDTH_MEMORY_MODE 4095
+#define MAX_IN_WIDTH_ONTHEFLY_MODE_ES1 1280
+#define MAX_IN_WIDTH_ONTHEFLY_MODE_ES2 4095
+#define MAX_IN_HEIGHT 4095
+
+#define MIN_OUT_WIDTH 16
+#define MIN_OUT_HEIGHT 2
+#define MAX_OUT_HEIGHT 4095
+
+/*
+ * Resizer Use Constraints
+ * "TRM ES3.1, table 12-46"
+ */
+#define MAX_4TAP_OUT_WIDTH_ES1 1280
+#define MAX_7TAP_OUT_WIDTH_ES1 640
+#define MAX_4TAP_OUT_WIDTH_ES2 3312
+#define MAX_7TAP_OUT_WIDTH_ES2 1650
+#define MAX_4TAP_OUT_WIDTH_3630 4096
+#define MAX_7TAP_OUT_WIDTH_3630 2048
+
+/*
+ * Constants for ratio calculation
+ */
+#define RESIZE_DIVISOR 256
+#define DEFAULT_PHASE 1
+
+/*
+ * Default (and only) configuration of filter coefficients.
+ * 7-tap mode is for scale factors 0.25x to 0.5x.
+ * 4-tap mode is for scale factors 0.5x to 4.0x.
+ * There shouldn't be any reason to recalculate these, EVER.
+ */
+static const struct isprsz_coef filter_coefs = {
+ /* For 8-phase 4-tap horizontal filter: */
+ {
+ 0x0000, 0x0100, 0x0000, 0x0000,
+ 0x03FA, 0x00F6, 0x0010, 0x0000,
+ 0x03F9, 0x00DB, 0x002C, 0x0000,
+ 0x03FB, 0x00B3, 0x0053, 0x03FF,
+ 0x03FD, 0x0082, 0x0084, 0x03FD,
+ 0x03FF, 0x0053, 0x00B3, 0x03FB,
+ 0x0000, 0x002C, 0x00DB, 0x03F9,
+ 0x0000, 0x0010, 0x00F6, 0x03FA
+ },
+ /* For 8-phase 4-tap vertical filter: */
+ {
+ 0x0000, 0x0100, 0x0000, 0x0000,
+ 0x03FA, 0x00F6, 0x0010, 0x0000,
+ 0x03F9, 0x00DB, 0x002C, 0x0000,
+ 0x03FB, 0x00B3, 0x0053, 0x03FF,
+ 0x03FD, 0x0082, 0x0084, 0x03FD,
+ 0x03FF, 0x0053, 0x00B3, 0x03FB,
+ 0x0000, 0x002C, 0x00DB, 0x03F9,
+ 0x0000, 0x0010, 0x00F6, 0x03FA
+ },
+ /* For 4-phase 7-tap horizontal filter: */
+ #define DUMMY 0
+ {
+ 0x0004, 0x0023, 0x005A, 0x0058, 0x0023, 0x0004, 0x0000, DUMMY,
+ 0x0002, 0x0018, 0x004d, 0x0060, 0x0031, 0x0008, 0x0000, DUMMY,
+ 0x0001, 0x000f, 0x003f, 0x0062, 0x003f, 0x000f, 0x0001, DUMMY,
+ 0x0000, 0x0008, 0x0031, 0x0060, 0x004d, 0x0018, 0x0002, DUMMY
+ },
+ /* For 4-phase 7-tap vertical filter: */
+ {
+ 0x0004, 0x0023, 0x005A, 0x0058, 0x0023, 0x0004, 0x0000, DUMMY,
+ 0x0002, 0x0018, 0x004d, 0x0060, 0x0031, 0x0008, 0x0000, DUMMY,
+ 0x0001, 0x000f, 0x003f, 0x0062, 0x003f, 0x000f, 0x0001, DUMMY,
+ 0x0000, 0x0008, 0x0031, 0x0060, 0x004d, 0x0018, 0x0002, DUMMY
+ }
+ /*
+ * The dummy padding is required in 7-tap mode because of how the
+ * registers are arranged physically.
+ */
+ #undef DUMMY
+};
+
+/*
+ * __resizer_get_format - helper function for getting resizer format
+ * @res : pointer to resizer private structure
+ * @pad : pad number
+ * @cfg: V4L2 subdev pad configuration
+ * @which : wanted subdev format
+ * return zero
+ */
+static struct v4l2_mbus_framefmt *
+__resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&res->subdev, cfg, pad);
+ else
+ return &res->formats[pad];
+}
+
+/*
+ * __resizer_get_crop - helper function for getting resizer crop rectangle
+ * @res : pointer to resizer private structure
+ * @cfg: V4L2 subdev pad configuration
+ * @which : wanted subdev crop rectangle
+ */
+static struct v4l2_rect *
+__resizer_get_crop(struct isp_res_device *res, struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&res->subdev, cfg, RESZ_PAD_SINK);
+ else
+ return &res->crop.request;
+}
+
+/*
+ * resizer_set_filters - Set resizer filters
+ * @res: Device context.
+ * @h_coeff: horizontal coefficient
+ * @v_coeff: vertical coefficient
+ * Return none
+ */
+static void resizer_set_filters(struct isp_res_device *res, const u16 *h_coeff,
+ const u16 *v_coeff)
+{
+ struct isp_device *isp = to_isp_device(res);
+ u32 startaddr_h, startaddr_v, tmp_h, tmp_v;
+ int i;
+
+ startaddr_h = ISPRSZ_HFILT10;
+ startaddr_v = ISPRSZ_VFILT10;
+
+ for (i = 0; i < COEFF_CNT; i += 2) {
+ tmp_h = h_coeff[i] |
+ (h_coeff[i + 1] << ISPRSZ_HFILT_COEF1_SHIFT);
+ tmp_v = v_coeff[i] |
+ (v_coeff[i + 1] << ISPRSZ_VFILT_COEF1_SHIFT);
+ isp_reg_writel(isp, tmp_h, OMAP3_ISP_IOMEM_RESZ, startaddr_h);
+ isp_reg_writel(isp, tmp_v, OMAP3_ISP_IOMEM_RESZ, startaddr_v);
+ startaddr_h += 4;
+ startaddr_v += 4;
+ }
+}
+
+/*
+ * resizer_set_bilinear - Chrominance horizontal algorithm select
+ * @res: Device context.
+ * @type: Filtering interpolation type.
+ *
+ * Filtering that is same as luminance processing is
+ * intended only for downsampling, and bilinear interpolation
+ * is intended only for upsampling.
+ */
+static void resizer_set_bilinear(struct isp_res_device *res,
+ enum resizer_chroma_algo type)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ if (type == RSZ_BILINEAR)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+ ISPRSZ_CNT_CBILIN);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+ ISPRSZ_CNT_CBILIN);
+}
+
+/*
+ * resizer_set_ycpos - Luminance and chrominance order
+ * @res: Device context.
+ * @pixelcode: pixel code.
+ */
+static void resizer_set_ycpos(struct isp_res_device *res, u32 pixelcode)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ switch (pixelcode) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+ ISPRSZ_CNT_YCPOS);
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+ ISPRSZ_CNT_YCPOS);
+ break;
+ default:
+ return;
+ }
+}
+
+/*
+ * resizer_set_phase - Setup horizontal and vertical starting phase
+ * @res: Device context.
+ * @h_phase: horizontal phase parameters.
+ * @v_phase: vertical phase parameters.
+ *
+ * Horizontal and vertical phase range is 0 to 7
+ */
+static void resizer_set_phase(struct isp_res_device *res, u32 h_phase,
+ u32 v_phase)
+{
+ struct isp_device *isp = to_isp_device(res);
+ u32 rgval;
+
+ rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) &
+ ~(ISPRSZ_CNT_HSTPH_MASK | ISPRSZ_CNT_VSTPH_MASK);
+ rgval |= (h_phase << ISPRSZ_CNT_HSTPH_SHIFT) & ISPRSZ_CNT_HSTPH_MASK;
+ rgval |= (v_phase << ISPRSZ_CNT_VSTPH_SHIFT) & ISPRSZ_CNT_VSTPH_MASK;
+
+ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT);
+}
+
+/*
+ * resizer_set_luma - Setup luminance enhancer parameters
+ * @res: Device context.
+ * @luma: Structure for luminance enhancer parameters.
+ *
+ * Algorithm select:
+ * 0x0: Disable
+ * 0x1: [-1 2 -1]/2 high-pass filter
+ * 0x2: [-1 -2 6 -2 -1]/4 high-pass filter
+ *
+ * Maximum gain:
+ * The data is coded in U4Q4 representation.
+ *
+ * Slope:
+ * The data is coded in U4Q4 representation.
+ *
+ * Coring offset:
+ * The data is coded in U8Q0 representation.
+ *
+ * The new luminance value is computed as:
+ * Y += HPF(Y) x max(GAIN, (HPF(Y) - CORE) x SLOP + 8) >> 4.
+ */
+static void resizer_set_luma(struct isp_res_device *res,
+ struct resizer_luma_yenh *luma)
+{
+ struct isp_device *isp = to_isp_device(res);
+ u32 rgval;
+
+ rgval = (luma->algo << ISPRSZ_YENH_ALGO_SHIFT)
+ & ISPRSZ_YENH_ALGO_MASK;
+ rgval |= (luma->gain << ISPRSZ_YENH_GAIN_SHIFT)
+ & ISPRSZ_YENH_GAIN_MASK;
+ rgval |= (luma->slope << ISPRSZ_YENH_SLOP_SHIFT)
+ & ISPRSZ_YENH_SLOP_MASK;
+ rgval |= (luma->core << ISPRSZ_YENH_CORE_SHIFT)
+ & ISPRSZ_YENH_CORE_MASK;
+
+ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_YENH);
+}
+
+/*
+ * resizer_set_source - Input source select
+ * @res: Device context.
+ * @source: Input source type
+ *
+ * If this field is set to RESIZER_INPUT_VP, the resizer input is fed from
+ * Preview/CCDC engine, otherwise from memory.
+ */
+static void resizer_set_source(struct isp_res_device *res,
+ enum resizer_input_entity source)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ if (source == RESIZER_INPUT_MEMORY)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+ ISPRSZ_CNT_INPSRC);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+ ISPRSZ_CNT_INPSRC);
+}
+
+/*
+ * resizer_set_ratio - Setup horizontal and vertical resizing value
+ * @res: Device context.
+ * @ratio: Structure for ratio parameters.
+ *
+ * Resizing range from 64 to 1024
+ */
+static void resizer_set_ratio(struct isp_res_device *res,
+ const struct resizer_ratio *ratio)
+{
+ struct isp_device *isp = to_isp_device(res);
+ const u16 *h_filter, *v_filter;
+ u32 rgval;
+
+ rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) &
+ ~(ISPRSZ_CNT_HRSZ_MASK | ISPRSZ_CNT_VRSZ_MASK);
+ rgval |= ((ratio->horz - 1) << ISPRSZ_CNT_HRSZ_SHIFT)
+ & ISPRSZ_CNT_HRSZ_MASK;
+ rgval |= ((ratio->vert - 1) << ISPRSZ_CNT_VRSZ_SHIFT)
+ & ISPRSZ_CNT_VRSZ_MASK;
+ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT);
+
+ /* prepare horizontal filter coefficients */
+ if (ratio->horz > MID_RESIZE_VALUE)
+ h_filter = &filter_coefs.h_filter_coef_7tap[0];
+ else
+ h_filter = &filter_coefs.h_filter_coef_4tap[0];
+
+ /* prepare vertical filter coefficients */
+ if (ratio->vert > MID_RESIZE_VALUE)
+ v_filter = &filter_coefs.v_filter_coef_7tap[0];
+ else
+ v_filter = &filter_coefs.v_filter_coef_4tap[0];
+
+ resizer_set_filters(res, h_filter, v_filter);
+}
+
+/*
+ * resizer_set_dst_size - Setup the output height and width
+ * @res: Device context.
+ * @width: Output width.
+ * @height: Output height.
+ *
+ * Width :
+ * The value must be EVEN.
+ *
+ * Height:
+ * The number of bytes written to SDRAM must be
+ * a multiple of 16-bytes if the vertical resizing factor
+ * is greater than 1x (upsizing)
+ */
+static void resizer_set_output_size(struct isp_res_device *res,
+ u32 width, u32 height)
+{
+ struct isp_device *isp = to_isp_device(res);
+ u32 rgval;
+
+ rgval = (width << ISPRSZ_OUT_SIZE_HORZ_SHIFT)
+ & ISPRSZ_OUT_SIZE_HORZ_MASK;
+ rgval |= (height << ISPRSZ_OUT_SIZE_VERT_SHIFT)
+ & ISPRSZ_OUT_SIZE_VERT_MASK;
+ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_OUT_SIZE);
+}
+
+/*
+ * resizer_set_output_offset - Setup memory offset for the output lines.
+ * @res: Device context.
+ * @offset: Memory offset.
+ *
+ * The 5 LSBs are forced to be zeros by the hardware to align on a 32-byte
+ * boundary; the 5 LSBs are read-only. For optimal use of SDRAM bandwidth,
+ * the SDRAM line offset must be set on a 256-byte boundary
+ */
+static void resizer_set_output_offset(struct isp_res_device *res, u32 offset)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ isp_reg_writel(isp, offset, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTOFF);
+}
+
+/*
+ * resizer_set_start - Setup vertical and horizontal start position
+ * @res: Device context.
+ * @left: Horizontal start position.
+ * @top: Vertical start position.
+ *
+ * Vertical start line:
+ * This field makes sense only when the resizer obtains its input
+ * from the preview engine/CCDC
+ *
+ * Horizontal start pixel:
+ * Pixels are coded on 16 bits for YUV and 8 bits for color separate data.
+ * When the resizer gets its input from SDRAM, this field must be set
+ * to <= 15 for YUV 16-bit data and <= 31 for 8-bit color separate data
+ */
+static void resizer_set_start(struct isp_res_device *res, u32 left, u32 top)
+{
+ struct isp_device *isp = to_isp_device(res);
+ u32 rgval;
+
+ rgval = (left << ISPRSZ_IN_START_HORZ_ST_SHIFT)
+ & ISPRSZ_IN_START_HORZ_ST_MASK;
+ rgval |= (top << ISPRSZ_IN_START_VERT_ST_SHIFT)
+ & ISPRSZ_IN_START_VERT_ST_MASK;
+
+ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START);
+}
+
+/*
+ * resizer_set_input_size - Setup the input size
+ * @res: Device context.
+ * @width: The range is 0 to 4095 pixels
+ * @height: The range is 0 to 4095 lines
+ */
+static void resizer_set_input_size(struct isp_res_device *res,
+ u32 width, u32 height)
+{
+ struct isp_device *isp = to_isp_device(res);
+ u32 rgval;
+
+ rgval = (width << ISPRSZ_IN_SIZE_HORZ_SHIFT)
+ & ISPRSZ_IN_SIZE_HORZ_MASK;
+ rgval |= (height << ISPRSZ_IN_SIZE_VERT_SHIFT)
+ & ISPRSZ_IN_SIZE_VERT_MASK;
+
+ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_SIZE);
+}
+
+/*
+ * resizer_set_src_offs - Setup the memory offset for the input lines
+ * @res: Device context.
+ * @offset: Memory offset.
+ *
+ * The 5 LSBs are forced to be zeros by the hardware to align on a 32-byte
+ * boundary; the 5 LSBs are read-only. This field must be programmed to be
+ * 0x0 if the resizer input is from preview engine/CCDC.
+ */
+static void resizer_set_input_offset(struct isp_res_device *res, u32 offset)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ isp_reg_writel(isp, offset, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INOFF);
+}
+
+/*
+ * resizer_set_intype - Input type select
+ * @res: Device context.
+ * @type: Pixel format type.
+ */
+static void resizer_set_intype(struct isp_res_device *res,
+ enum resizer_colors_type type)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ if (type == RSZ_COLOR8)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+ ISPRSZ_CNT_INPTYP);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+ ISPRSZ_CNT_INPTYP);
+}
+
+/*
+ * __resizer_set_inaddr - Helper function for set input address
+ * @res : pointer to resizer private data structure
+ * @addr: input address
+ * return none
+ */
+static void __resizer_set_inaddr(struct isp_res_device *res, u32 addr)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INADD);
+}
+
+/*
+ * The data rate at the horizontal resizer output must not exceed half the
+ * functional clock or 100 MP/s, whichever is lower. According to the TRM
+ * there's no similar requirement for the vertical resizer output. However
+ * experience showed that vertical upscaling by 4 leads to SBL overflows (with
+ * data rates at the resizer output exceeding 300 MP/s). Limiting the resizer
+ * output data rate to the functional clock or 200 MP/s, whichever is lower,
+ * seems to get rid of SBL overflows.
+ *
+ * The maximum data rate at the output of the horizontal resizer can thus be
+ * computed with
+ *
+ * max intermediate rate <= L3 clock * input height / output height
+ * max intermediate rate <= L3 clock / 2
+ *
+ * The maximum data rate at the resizer input is then
+ *
+ * max input rate <= max intermediate rate * input width / output width
+ *
+ * where the input width and height are the resizer input crop rectangle size.
+ * The TRM doesn't clearly explain if that's a maximum instant data rate or a
+ * maximum average data rate.
+ */
+void omap3isp_resizer_max_rate(struct isp_res_device *res,
+ unsigned int *max_rate)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity);
+ const struct v4l2_mbus_framefmt *ofmt = &res->formats[RESZ_PAD_SOURCE];
+ unsigned long limit = min(pipe->l3_ick, 200000000UL);
+ unsigned long clock;
+
+ clock = div_u64((u64)limit * res->crop.active.height, ofmt->height);
+ clock = min(clock, limit / 2);
+ *max_rate = div_u64((u64)clock * res->crop.active.width, ofmt->width);
+}
+
+/*
+ * When the resizer processes images from memory, the driver must slow down read
+ * requests on the input to at least comply with the internal data rate
+ * requirements. If the application real-time requirements can cope with slower
+ * processing, the resizer can be slowed down even more to put less pressure on
+ * the overall system.
+ *
+ * When the resizer processes images on the fly (either from the CCDC or the
+ * preview module), the same data rate requirements apply but they can't be
+ * enforced at the resizer level. The image input module (sensor, CCP2 or
+ * preview module) must not provide image data faster than the resizer can
+ * process.
+ *
+ * For live image pipelines, the data rate is set by the frame format, size and
+ * rate. The sensor output frame rate must not exceed the maximum resizer data
+ * rate.
+ *
+ * The resizer slows down read requests by inserting wait cycles in the SBL
+ * requests. The maximum number of 256-byte requests per second can be computed
+ * as (the data rate is multiplied by 2 to convert from pixels per second to
+ * bytes per second)
+ *
+ * request per second = data rate * 2 / 256
+ * cycles per request = cycles per second / requests per second
+ *
+ * The number of cycles per second is controlled by the L3 clock, leading to
+ *
+ * cycles per request = L3 frequency / 2 * 256 / data rate
+ */
+static void resizer_adjust_bandwidth(struct isp_res_device *res)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity);
+ struct isp_device *isp = to_isp_device(res);
+ unsigned long l3_ick = pipe->l3_ick;
+ struct v4l2_fract *timeperframe;
+ unsigned int cycles_per_frame;
+ unsigned int requests_per_frame;
+ unsigned int cycles_per_request;
+ unsigned int granularity;
+ unsigned int minimum;
+ unsigned int maximum;
+ unsigned int value;
+
+ if (res->input != RESIZER_INPUT_MEMORY) {
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
+ ISPSBL_SDR_REQ_RSZ_EXP_MASK);
+ return;
+ }
+
+ switch (isp->revision) {
+ case ISP_REVISION_1_0:
+ case ISP_REVISION_2_0:
+ default:
+ granularity = 1024;
+ break;
+
+ case ISP_REVISION_15_0:
+ granularity = 32;
+ break;
+ }
+
+ /* Compute the minimum number of cycles per request, based on the
+ * pipeline maximum data rate. This is an absolute lower bound if we
+ * don't want SBL overflows, so round the value up.
+ */
+ cycles_per_request = div_u64((u64)l3_ick / 2 * 256 + pipe->max_rate - 1,
+ pipe->max_rate);
+ minimum = DIV_ROUND_UP(cycles_per_request, granularity);
+
+ /* Compute the maximum number of cycles per request, based on the
+ * requested frame rate. This is a soft upper bound to achieve a frame
+ * rate equal or higher than the requested value, so round the value
+ * down.
+ */
+ timeperframe = &pipe->max_timeperframe;
+
+ requests_per_frame = DIV_ROUND_UP(res->crop.active.width * 2, 256)
+ * res->crop.active.height;
+ cycles_per_frame = div_u64((u64)l3_ick * timeperframe->numerator,
+ timeperframe->denominator);
+ cycles_per_request = cycles_per_frame / requests_per_frame;
+
+ maximum = cycles_per_request / granularity;
+
+ value = max(minimum, maximum);
+
+ dev_dbg(isp->dev, "%s: cycles per request = %u\n", __func__, value);
+ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
+ ISPSBL_SDR_REQ_RSZ_EXP_MASK,
+ value << ISPSBL_SDR_REQ_RSZ_EXP_SHIFT);
+}
+
+/*
+ * omap3isp_resizer_busy - Checks if ISP resizer is busy.
+ *
+ * Returns busy field from ISPRSZ_PCR register.
+ */
+int omap3isp_resizer_busy(struct isp_res_device *res)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR) &
+ ISPRSZ_PCR_BUSY;
+}
+
+/*
+ * resizer_set_inaddr - Sets the memory address of the input frame.
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ */
+static void resizer_set_inaddr(struct isp_res_device *res, u32 addr)
+{
+ res->addr_base = addr;
+
+ /* This will handle crop settings in stream off state */
+ if (res->crop_offset)
+ addr += res->crop_offset & ~0x1f;
+
+ __resizer_set_inaddr(res, addr);
+}
+
+/*
+ * Configures the memory address to which the output frame is written.
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ * Note: For SBL efficiency reasons the address should be on a 256-byte
+ * boundary.
+ */
+static void resizer_set_outaddr(struct isp_res_device *res, u32 addr)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ /*
+ * Set output address. This needs to be in its own function
+ * because it changes often.
+ */
+ isp_reg_writel(isp, addr << ISPRSZ_SDR_OUTADD_ADDR_SHIFT,
+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTADD);
+}
+
+/*
+ * resizer_print_status - Prints the values of the resizer module registers.
+ */
+#define RSZ_PRINT_REGISTER(isp, name)\
+ dev_dbg(isp->dev, "###RSZ " #name "=0x%08x\n", \
+ isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_##name))
+
+static void resizer_print_status(struct isp_res_device *res)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ dev_dbg(isp->dev, "-------------Resizer Register dump----------\n");
+
+ RSZ_PRINT_REGISTER(isp, PCR);
+ RSZ_PRINT_REGISTER(isp, CNT);
+ RSZ_PRINT_REGISTER(isp, OUT_SIZE);
+ RSZ_PRINT_REGISTER(isp, IN_START);
+ RSZ_PRINT_REGISTER(isp, IN_SIZE);
+ RSZ_PRINT_REGISTER(isp, SDR_INADD);
+ RSZ_PRINT_REGISTER(isp, SDR_INOFF);
+ RSZ_PRINT_REGISTER(isp, SDR_OUTADD);
+ RSZ_PRINT_REGISTER(isp, SDR_OUTOFF);
+ RSZ_PRINT_REGISTER(isp, YENH);
+
+ dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * resizer_calc_ratios - Helper function for calculating resizer ratios
+ * @res: pointer to resizer private data structure
+ * @input: input frame size
+ * @output: output frame size
+ * @ratio : return calculated ratios
+ * return none
+ *
+ * The resizer uses a polyphase sample rate converter. The upsampling filter
+ * has a fixed number of phases that depend on the resizing ratio. As the ratio
+ * computation depends on the number of phases, we need to compute a first
+ * approximation and then refine it.
+ *
+ * The input/output/ratio relationship is given by the OMAP34xx TRM:
+ *
+ * - 8-phase, 4-tap mode (RSZ = 64 ~ 512)
+ * iw = (32 * sph + (ow - 1) * hrsz + 16) >> 8 + 7
+ * ih = (32 * spv + (oh - 1) * vrsz + 16) >> 8 + 4
+ * - 4-phase, 7-tap mode (RSZ = 513 ~ 1024)
+ * iw = (64 * sph + (ow - 1) * hrsz + 32) >> 8 + 7
+ * ih = (64 * spv + (oh - 1) * vrsz + 32) >> 8 + 7
+ *
+ * iw and ih are the input width and height after cropping. Those equations need
+ * to be satisfied exactly for the resizer to work correctly.
+ *
+ * The equations can't be easily reverted, as the >> 8 operation is not linear.
+ * In addition, not all input sizes can be achieved for a given output size. To
+ * get the highest input size lower than or equal to the requested input size,
+ * we need to compute the highest resizing ratio that satisfies the following
+ * inequality (taking the 4-tap mode width equation as an example)
+ *
+ * iw >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 - 7
+ *
+ * (where iw is the requested input width) which can be rewritten as
+ *
+ * iw - 7 >= (32 * sph + (ow - 1) * hrsz + 16) >> 8
+ * (iw - 7) << 8 >= 32 * sph + (ow - 1) * hrsz + 16 - b
+ * ((iw - 7) << 8) + b >= 32 * sph + (ow - 1) * hrsz + 16
+ *
+ * where b is the value of the 8 least significant bits of the right hand side
+ * expression of the last inequality. The highest resizing ratio value will be
+ * achieved when b is equal to its maximum value of 255. That resizing ratio
+ * value will still satisfy the original inequality, as b will disappear when
+ * the expression will be shifted right by 8.
+ *
+ * The reverted equations thus become
+ *
+ * - 8-phase, 4-tap mode
+ * hrsz = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / (ow - 1)
+ * vrsz = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / (oh - 1)
+ * - 4-phase, 7-tap mode
+ * hrsz = ((iw - 7) * 256 + 255 - 32 - 64 * sph) / (ow - 1)
+ * vrsz = ((ih - 7) * 256 + 255 - 32 - 64 * spv) / (oh - 1)
+ *
+ * The ratios are integer values, and are rounded down to ensure that the
+ * cropped input size is not bigger than the uncropped input size.
+ *
+ * As the number of phases/taps, used to select the correct equations to compute
+ * the ratio, depends on the ratio, we start with the 4-tap mode equations to
+ * compute an approximation of the ratio, and switch to the 7-tap mode equations
+ * if the approximation is higher than the ratio threshold.
+ *
+ * As the 7-tap mode equations will return a ratio smaller than or equal to the
+ * 4-tap mode equations, the resulting ratio could become lower than or equal to
+ * the ratio threshold. This 'equations loop' isn't an issue as long as the
+ * correct equations are used to compute the final input size. Starting with the
+ * 4-tap mode equations ensure that, in case of values resulting in a 'ratio
+ * loop', the smallest of the ratio values will be used, never exceeding the
+ * requested input size.
+ *
+ * We first clamp the output size according to the hardware capability to avoid
+ * auto-cropping the input more than required to satisfy the TRM equations. The
+ * minimum output size is achieved with a scaling factor of 1024. It is thus
+ * computed using the 7-tap equations.
+ *
+ * min ow = ((iw - 7) * 256 - 32 - 64 * sph) / 1024 + 1
+ * min oh = ((ih - 7) * 256 - 32 - 64 * spv) / 1024 + 1
+ *
+ * Similarly, the maximum output size is achieved with a scaling factor of 64
+ * and computed using the 4-tap equations.
+ *
+ * max ow = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / 64 + 1
+ * max oh = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / 64 + 1
+ *
+ * The additional +255 term compensates for the round down operation performed
+ * by the TRM equations when shifting the value right by 8 bits.
+ *
+ * We then compute and clamp the ratios (x1/4 ~ x4). Clamping the output size to
+ * the maximum value guarantees that the ratio value will never be smaller than
+ * the minimum, but it could still slightly exceed the maximum. Clamping the
+ * ratio will thus result in a resizing factor slightly larger than the
+ * requested value.
+ *
+ * To accommodate that, and make sure the TRM equations are satisfied exactly, we
+ * compute the input crop rectangle as the last step.
+ *
+ * As if the situation wasn't complex enough, the maximum output width depends
+ * on the vertical resizing ratio. Fortunately, the output height doesn't
+ * depend on the horizontal resizing ratio. We can then start by computing the
+ * output height and the vertical ratio, and then move to computing the output
+ * width and the horizontal ratio.
+ */
+static void resizer_calc_ratios(struct isp_res_device *res,
+ struct v4l2_rect *input,
+ struct v4l2_mbus_framefmt *output,
+ struct resizer_ratio *ratio)
+{
+ struct isp_device *isp = to_isp_device(res);
+ const unsigned int spv = DEFAULT_PHASE;
+ const unsigned int sph = DEFAULT_PHASE;
+ unsigned int upscaled_width;
+ unsigned int upscaled_height;
+ unsigned int min_width;
+ unsigned int min_height;
+ unsigned int max_width;
+ unsigned int max_height;
+ unsigned int width_alignment;
+ unsigned int width;
+ unsigned int height;
+
+ /*
+ * Clamp the output height based on the hardware capabilities and
+ * compute the vertical resizing ratio.
+ */
+ min_height = ((input->height - 7) * 256 - 32 - 64 * spv) / 1024 + 1;
+ min_height = max_t(unsigned int, min_height, MIN_OUT_HEIGHT);
+ max_height = ((input->height - 4) * 256 + 255 - 16 - 32 * spv) / 64 + 1;
+ max_height = min_t(unsigned int, max_height, MAX_OUT_HEIGHT);
+ output->height = clamp(output->height, min_height, max_height);
+
+ ratio->vert = ((input->height - 4) * 256 + 255 - 16 - 32 * spv)
+ / (output->height - 1);
+ if (ratio->vert > MID_RESIZE_VALUE)
+ ratio->vert = ((input->height - 7) * 256 + 255 - 32 - 64 * spv)
+ / (output->height - 1);
+ ratio->vert = clamp_t(unsigned int, ratio->vert,
+ MIN_RESIZE_VALUE, MAX_RESIZE_VALUE);
+
+ if (ratio->vert <= MID_RESIZE_VALUE) {
+ upscaled_height = (output->height - 1) * ratio->vert
+ + 32 * spv + 16;
+ height = (upscaled_height >> 8) + 4;
+ } else {
+ upscaled_height = (output->height - 1) * ratio->vert
+ + 64 * spv + 32;
+ height = (upscaled_height >> 8) + 7;
+ }
+
+ /*
+ * Compute the minimum and maximum output widths based on the hardware
+ * capabilities. The maximum depends on the vertical resizing ratio.
+ */
+ min_width = ((input->width - 7) * 256 - 32 - 64 * sph) / 1024 + 1;
+ min_width = max_t(unsigned int, min_width, MIN_OUT_WIDTH);
+
+ if (ratio->vert <= MID_RESIZE_VALUE) {
+ switch (isp->revision) {
+ case ISP_REVISION_1_0:
+ max_width = MAX_4TAP_OUT_WIDTH_ES1;
+ break;
+
+ case ISP_REVISION_2_0:
+ default:
+ max_width = MAX_4TAP_OUT_WIDTH_ES2;
+ break;
+
+ case ISP_REVISION_15_0:
+ max_width = MAX_4TAP_OUT_WIDTH_3630;
+ break;
+ }
+ } else {
+ switch (isp->revision) {
+ case ISP_REVISION_1_0:
+ max_width = MAX_7TAP_OUT_WIDTH_ES1;
+ break;
+
+ case ISP_REVISION_2_0:
+ default:
+ max_width = MAX_7TAP_OUT_WIDTH_ES2;
+ break;
+
+ case ISP_REVISION_15_0:
+ max_width = MAX_7TAP_OUT_WIDTH_3630;
+ break;
+ }
+ }
+ max_width = min(((input->width - 7) * 256 + 255 - 16 - 32 * sph) / 64
+ + 1, max_width);
+
+ /*
+ * The output width must be even, and must be a multiple of 16 bytes
+ * when upscaling vertically. Clamp the output width to the valid range.
+ * Take the alignment into account (the maximum width in 7-tap mode on
+ * ES2 isn't a multiple of 8) and align the result up to make sure it
+ * won't be smaller than the minimum.
+ */
+ width_alignment = ratio->vert < 256 ? 8 : 2;
+ output->width = clamp(output->width, min_width,
+ max_width & ~(width_alignment - 1));
+ output->width = ALIGN(output->width, width_alignment);
+
+ ratio->horz = ((input->width - 7) * 256 + 255 - 16 - 32 * sph)
+ / (output->width - 1);
+ if (ratio->horz > MID_RESIZE_VALUE)
+ ratio->horz = ((input->width - 7) * 256 + 255 - 32 - 64 * sph)
+ / (output->width - 1);
+ ratio->horz = clamp_t(unsigned int, ratio->horz,
+ MIN_RESIZE_VALUE, MAX_RESIZE_VALUE);
+
+ if (ratio->horz <= MID_RESIZE_VALUE) {
+ upscaled_width = (output->width - 1) * ratio->horz
+ + 32 * sph + 16;
+ width = (upscaled_width >> 8) + 7;
+ } else {
+ upscaled_width = (output->width - 1) * ratio->horz
+ + 64 * sph + 32;
+ width = (upscaled_width >> 8) + 7;
+ }
+
+ /* Center the new crop rectangle. */
+ input->left += (input->width - width) / 2;
+ input->top += (input->height - height) / 2;
+ input->width = width;
+ input->height = height;
+}
+
+/*
+ * resizer_set_crop_params - Setup hardware with cropping parameters
+ * @res : resizer private structure
+ * @input : format on sink pad
+ * @output : format on source pad
+ * return none
+ */
+static void resizer_set_crop_params(struct isp_res_device *res,
+ const struct v4l2_mbus_framefmt *input,
+ const struct v4l2_mbus_framefmt *output)
+{
+ resizer_set_ratio(res, &res->ratio);
+
+ /* Set chrominance horizontal algorithm */
+ if (res->ratio.horz >= RESIZE_DIVISOR)
+ resizer_set_bilinear(res, RSZ_THE_SAME);
+ else
+ resizer_set_bilinear(res, RSZ_BILINEAR);
+
+ resizer_adjust_bandwidth(res);
+
+ if (res->input == RESIZER_INPUT_MEMORY) {
+ /* Calculate additional offset for crop */
+ res->crop_offset = (res->crop.active.top * input->width +
+ res->crop.active.left) * 2;
+ /*
+ * Write lowest 4 bits of horizontal pixel offset (in pixels),
+ * vertical start must be 0.
+ */
+ resizer_set_start(res, (res->crop_offset / 2) & 0xf, 0);
+
+ /*
+ * Set start (read) address for cropping, in bytes.
+ * Lowest 5 bits must be zero.
+ */
+ __resizer_set_inaddr(res,
+ res->addr_base + (res->crop_offset & ~0x1f));
+ } else {
+ /*
+ * Set vertical start line and horizontal starting pixel.
+ * If the input is from CCDC/PREV, horizontal start field is
+ * in bytes (twice number of pixels).
+ */
+ resizer_set_start(res, res->crop.active.left * 2,
+ res->crop.active.top);
+ /* Input address and offset must be 0 for preview/ccdc input */
+ __resizer_set_inaddr(res, 0);
+ resizer_set_input_offset(res, 0);
+ }
+
+ /* Set the input size */
+ resizer_set_input_size(res, res->crop.active.width,
+ res->crop.active.height);
+}
+
+static void resizer_configure(struct isp_res_device *res)
+{
+ struct v4l2_mbus_framefmt *informat, *outformat;
+ struct resizer_luma_yenh luma = {0, 0, 0, 0};
+
+ resizer_set_source(res, res->input);
+
+ informat = &res->formats[RESZ_PAD_SINK];
+ outformat = &res->formats[RESZ_PAD_SOURCE];
+
+ /* RESZ_PAD_SINK */
+ if (res->input == RESIZER_INPUT_VP)
+ resizer_set_input_offset(res, 0);
+ else
+ resizer_set_input_offset(res, ALIGN(informat->width, 0x10) * 2);
+
+ /* YUV422 interleaved, default phase, no luma enhancement */
+ resizer_set_intype(res, RSZ_YUV422);
+ resizer_set_ycpos(res, informat->code);
+ resizer_set_phase(res, DEFAULT_PHASE, DEFAULT_PHASE);
+ resizer_set_luma(res, &luma);
+
+ /* RESZ_PAD_SOURCE */
+ resizer_set_output_offset(res, ALIGN(outformat->width * 2, 32));
+ resizer_set_output_size(res, outformat->width, outformat->height);
+
+ resizer_set_crop_params(res, informat, outformat);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void resizer_enable_oneshot(struct isp_res_device *res)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR,
+ ISPRSZ_PCR_ENABLE | ISPRSZ_PCR_ONESHOT);
+}
+
+void omap3isp_resizer_isr_frame_sync(struct isp_res_device *res)
+{
+ /*
+ * If ISP_VIDEO_DMAQUEUE_QUEUED is set, DMA queue had an underrun
+ * condition, the module was paused and now we have a buffer queued
+ * on the output again. Restart the pipeline if running in continuous
+ * mode.
+ */
+ if (res->state == ISP_PIPELINE_STREAM_CONTINUOUS &&
+ res->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED) {
+ resizer_enable_oneshot(res);
+ isp_video_dmaqueue_flags_clr(&res->video_out);
+ }
+}
+
+static void resizer_isr_buffer(struct isp_res_device *res)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity);
+ struct isp_buffer *buffer;
+ int restart = 0;
+
+ if (res->state == ISP_PIPELINE_STREAM_STOPPED)
+ return;
+
+ /* Complete the output buffer and, if reading from memory, the input
+ * buffer.
+ */
+ buffer = omap3isp_video_buffer_next(&res->video_out);
+ if (buffer != NULL) {
+ resizer_set_outaddr(res, buffer->dma);
+ restart = 1;
+ }
+
+ pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
+
+ if (res->input == RESIZER_INPUT_MEMORY) {
+ buffer = omap3isp_video_buffer_next(&res->video_in);
+ if (buffer != NULL)
+ resizer_set_inaddr(res, buffer->dma);
+ pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+ }
+
+ if (res->state == ISP_PIPELINE_STREAM_SINGLESHOT) {
+ if (isp_pipeline_ready(pipe))
+ omap3isp_pipeline_set_stream(pipe,
+ ISP_PIPELINE_STREAM_SINGLESHOT);
+ } else {
+ /* If an underrun occurs, the video queue operation handler will
+ * restart the resizer. Otherwise restart it immediately.
+ */
+ if (restart)
+ resizer_enable_oneshot(res);
+ }
+}
+
+/*
+ * omap3isp_resizer_isr - ISP resizer interrupt handler
+ *
+ * Manage the resizer video buffers and configure shadowed and busy-locked
+ * registers.
+ */
+void omap3isp_resizer_isr(struct isp_res_device *res)
+{
+ struct v4l2_mbus_framefmt *informat, *outformat;
+ unsigned long flags;
+
+ if (omap3isp_module_sync_is_stopping(&res->wait, &res->stopping))
+ return;
+
+ spin_lock_irqsave(&res->lock, flags);
+
+ if (res->applycrop) {
+ outformat = __resizer_get_format(res, NULL, RESZ_PAD_SOURCE,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ informat = __resizer_get_format(res, NULL, RESZ_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ resizer_set_crop_params(res, informat, outformat);
+ res->applycrop = 0;
+ }
+
+ spin_unlock_irqrestore(&res->lock, flags);
+
+ resizer_isr_buffer(res);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int resizer_video_queue(struct isp_video *video,
+ struct isp_buffer *buffer)
+{
+ struct isp_res_device *res = &video->isp->isp_res;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ resizer_set_inaddr(res, buffer->dma);
+
+ /*
+ * We now have a buffer queued on the output. Despite what the
+ * TRM says, the resizer can't be restarted immediately.
+ * Enabling it in one shot mode in the middle of a frame (or at
+ * least asynchronously to the frame) results in the output
+ * being shifted randomly left/right and up/down, as if the
+ * hardware didn't synchronize itself to the beginning of the
+ * frame correctly.
+ *
+ * Restart the resizer on the next sync interrupt if running in
+ * continuous mode or when starting the stream.
+ */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ resizer_set_outaddr(res, buffer->dma);
+
+ return 0;
+}
+
+static const struct isp_video_operations resizer_video_ops = {
+ .queue = resizer_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * resizer_set_stream - Enable/Disable streaming on resizer subdev
+ * @sd: ISP resizer V4L2 subdev
+ * @enable: 1 == Enable, 0 == Disable
+ *
+ * The resizer hardware can't be enabled without a memory buffer to write to.
+ * As the s_stream operation is called in response to a STREAMON call without
+ * any buffer queued yet, just update the state field and return immediately.
+ * The resizer will be enabled in resizer_video_queue().
+ */
+static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct isp_video *video_out = &res->video_out;
+ struct isp_device *isp = to_isp_device(res);
+ struct device *dev = to_device(res);
+
+ if (res->state == ISP_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISP_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_RESIZER);
+ resizer_configure(res);
+ resizer_print_status(res);
+ }
+
+ switch (enable) {
+ case ISP_PIPELINE_STREAM_CONTINUOUS:
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_WRITE);
+ if (video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED) {
+ resizer_enable_oneshot(res);
+ isp_video_dmaqueue_flags_clr(video_out);
+ }
+ break;
+
+ case ISP_PIPELINE_STREAM_SINGLESHOT:
+ if (res->input == RESIZER_INPUT_MEMORY)
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_READ);
+ omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_WRITE);
+
+ resizer_enable_oneshot(res);
+ break;
+
+ case ISP_PIPELINE_STREAM_STOPPED:
+ if (omap3isp_module_sync_idle(&sd->entity, &res->wait,
+ &res->stopping))
+ dev_dbg(dev, "%s: module stop timeout.\n", sd->name);
+ omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_RESIZER_READ |
+ OMAP3_ISP_SBL_RESIZER_WRITE);
+ omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_RESIZER);
+ isp_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+
+ res->state = enable;
+ return 0;
+}
+
+/*
+ * resizer_try_crop - mangles crop parameters.
+ */
+static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
+ const struct v4l2_mbus_framefmt *source,
+ struct v4l2_rect *crop)
+{
+ const unsigned int spv = DEFAULT_PHASE;
+ const unsigned int sph = DEFAULT_PHASE;
+
+ /* Crop rectangle is constrained by the output size so that zoom ratio
+ * cannot exceed +/-4.0.
+ */
+ unsigned int min_width =
+ ((32 * sph + (source->width - 1) * 64 + 16) >> 8) + 7;
+ unsigned int min_height =
+ ((32 * spv + (source->height - 1) * 64 + 16) >> 8) + 4;
+ unsigned int max_width =
+ ((64 * sph + (source->width - 1) * 1024 + 32) >> 8) + 7;
+ unsigned int max_height =
+ ((64 * spv + (source->height - 1) * 1024 + 32) >> 8) + 7;
+
+ crop->width = clamp_t(u32, crop->width, min_width, max_width);
+ crop->height = clamp_t(u32, crop->height, min_height, max_height);
+
+ /* Crop can not go beyond of the input rectangle */
+ crop->left = clamp_t(u32, crop->left, 0, sink->width - MIN_IN_WIDTH);
+ crop->width = clamp_t(u32, crop->width, MIN_IN_WIDTH,
+ sink->width - crop->left);
+ crop->top = clamp_t(u32, crop->top, 0, sink->height - MIN_IN_HEIGHT);
+ crop->height = clamp_t(u32, crop->height, MIN_IN_HEIGHT,
+ sink->height - crop->top);
+}
+
+/*
+ * resizer_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP resizer V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the sink pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int resizer_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format_source;
+ struct v4l2_mbus_framefmt *format_sink;
+ struct resizer_ratio ratio;
+
+ if (sel->pad != RESZ_PAD_SINK)
+ return -EINVAL;
+
+ format_sink = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ sel->which);
+ format_source = __resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
+ sel->which);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ resizer_try_crop(format_sink, format_source, &sel->r);
+ resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *__resizer_get_crop(res, cfg, sel->which);
+ resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * resizer_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP resizer V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the sink pad.
+ *
+ * FIXME: This function currently behaves as if the KEEP_CONFIG selection flag
+ * was always set.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int resizer_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct isp_device *isp = to_isp_device(res);
+ const struct v4l2_mbus_framefmt *format_sink;
+ struct v4l2_mbus_framefmt format_source;
+ struct resizer_ratio ratio;
+ unsigned long flags;
+
+ if (sel->target != V4L2_SEL_TGT_CROP ||
+ sel->pad != RESZ_PAD_SINK)
+ return -EINVAL;
+
+ format_sink = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ sel->which);
+ format_source = *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
+ sel->which);
+
+ dev_dbg(isp->dev, "%s(%s): req %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
+ __func__, sel->which == V4L2_SUBDEV_FORMAT_TRY ? "try" : "act",
+ format_sink->width, format_sink->height,
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+ format_source.width, format_source.height);
+
+ /* Clamp the crop rectangle to the bounds, and then mangle it further to
+ * fulfill the TRM equations. Store the clamped but otherwise unmangled
+ * rectangle to avoid cropping the input multiple times: when an
+ * application sets the output format, the current crop rectangle is
+ * mangled during crop rectangle computation, which would lead to a new,
+ * smaller input crop rectangle every time the output size is set if we
+ * stored the mangled rectangle.
+ */
+ resizer_try_crop(format_sink, &format_source, &sel->r);
+ *__resizer_get_crop(res, cfg, sel->which) = sel->r;
+ resizer_calc_ratios(res, &sel->r, &format_source, &ratio);
+
+ dev_dbg(isp->dev, "%s(%s): got %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
+ __func__, sel->which == V4L2_SUBDEV_FORMAT_TRY ? "try" : "act",
+ format_sink->width, format_sink->height,
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+ format_source.width, format_source.height);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE, sel->which) =
+ format_source;
+ return 0;
+ }
+
+ /* Update the source format, resizing ratios and crop rectangle. If
+ * streaming is on the IRQ handler will reprogram the resizer after the
+ * current frame. We thus we need to protect against race conditions.
+ */
+ spin_lock_irqsave(&res->lock, flags);
+
+ *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE, sel->which) =
+ format_source;
+
+ res->ratio = ratio;
+ res->crop.active = sel->r;
+
+ if (res->state != ISP_PIPELINE_STREAM_STOPPED)
+ res->applycrop = 1;
+
+ spin_unlock_irqrestore(&res->lock, flags);
+
+ return 0;
+}
+
+/* resizer pixel formats */
+static const unsigned int resizer_formats[] = {
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+};
+
+static unsigned int resizer_max_in_width(struct isp_res_device *res)
+{
+ struct isp_device *isp = to_isp_device(res);
+
+ if (res->input == RESIZER_INPUT_MEMORY) {
+ return MAX_IN_WIDTH_MEMORY_MODE;
+ } else {
+ if (isp->revision == ISP_REVISION_1_0)
+ return MAX_IN_WIDTH_ONTHEFLY_MODE_ES1;
+ else
+ return MAX_IN_WIDTH_ONTHEFLY_MODE_ES2;
+ }
+}
+
+/*
+ * resizer_try_format - Handle try format by pad subdev method
+ * @res : ISP resizer device
+ * @cfg: V4L2 subdev pad configuration
+ * @pad : pad num
+ * @fmt : pointer to v4l2 format structure
+ * @which : wanted subdev format
+ */
+static void resizer_try_format(struct isp_res_device *res,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct resizer_ratio ratio;
+ struct v4l2_rect crop;
+
+ switch (pad) {
+ case RESZ_PAD_SINK:
+ if (fmt->code != MEDIA_BUS_FMT_YUYV8_1X16 &&
+ fmt->code != MEDIA_BUS_FMT_UYVY8_1X16)
+ fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
+ resizer_max_in_width(res));
+ fmt->height = clamp_t(u32, fmt->height, MIN_IN_HEIGHT,
+ MAX_IN_HEIGHT);
+ break;
+
+ case RESZ_PAD_SOURCE:
+ format = __resizer_get_format(res, cfg, RESZ_PAD_SINK, which);
+ fmt->code = format->code;
+
+ crop = *__resizer_get_crop(res, cfg, which);
+ resizer_calc_ratios(res, &crop, fmt, &ratio);
+ break;
+ }
+
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * resizer_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == RESZ_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(resizer_formats))
+ return -EINVAL;
+
+ code->code = resizer_formats[code->index];
+ } else {
+ if (code->index != 0)
+ return -EINVAL;
+
+ format = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ code->which);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int resizer_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ resizer_try_format(res, cfg, fse->pad, &format, fse->which);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ resizer_try_format(res, cfg, fse->pad, &format, fse->which);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * resizer_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt : pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __resizer_get_format(res, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * resizer_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt : pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ format = __resizer_get_format(res, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ resizer_try_format(res, cfg, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ if (fmt->pad == RESZ_PAD_SINK) {
+ /* reset crop rectangle */
+ crop = __resizer_get_crop(res, cfg, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+
+ /* Propagate the format from sink to source */
+ format = __resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ resizer_try_format(res, cfg, RESZ_PAD_SOURCE, format,
+ fmt->which);
+ }
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ /* Compute and store the active crop rectangle and resizer
+ * ratios. format already points to the source pad active
+ * format.
+ */
+ res->crop.active = res->crop.request;
+ resizer_calc_ratios(res, &res->crop.active, format,
+ &res->ratio);
+ }
+
+ return 0;
+}
+
+static int resizer_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct isp_pipeline *pipe = to_isp_pipeline(&sd->entity);
+
+ omap3isp_resizer_max_rate(res, &pipe->max_rate);
+
+ return v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+}
+
+/*
+ * resizer_init_formats - Initialize formats on all pads
+ * @sd: ISP resizer V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int resizer_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESZ_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = MEDIA_BUS_FMT_YUYV8_1X16;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ resizer_set_format(sd, fh ? fh->pad : NULL, &format);
+
+ return 0;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
+ .s_stream = resizer_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
+ .enum_mbus_code = resizer_enum_mbus_code,
+ .enum_frame_size = resizer_enum_frame_size,
+ .get_fmt = resizer_get_format,
+ .set_fmt = resizer_set_format,
+ .get_selection = resizer_get_selection,
+ .set_selection = resizer_set_selection,
+ .link_validate = resizer_link_validate,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops resizer_v4l2_ops = {
+ .video = &resizer_v4l2_video_ops,
+ .pad = &resizer_v4l2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
+ .open = resizer_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * resizer_link_setup - Setup resizer connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL or zero on success
+ */
+static int resizer_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ unsigned int index = local->index;
+
+ /* FIXME: this is actually a hack! */
+ if (is_media_entity_v4l2_subdev(remote->entity))
+ index |= 2 << 16;
+
+ switch (index) {
+ case RESZ_PAD_SINK:
+ /* read from memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (res->input == RESIZER_INPUT_VP)
+ return -EBUSY;
+ res->input = RESIZER_INPUT_MEMORY;
+ } else {
+ if (res->input == RESIZER_INPUT_MEMORY)
+ res->input = RESIZER_INPUT_NONE;
+ }
+ break;
+
+ case RESZ_PAD_SINK | 2 << 16:
+ /* read from ccdc or previewer */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (res->input == RESIZER_INPUT_MEMORY)
+ return -EBUSY;
+ res->input = RESIZER_INPUT_VP;
+ } else {
+ if (res->input == RESIZER_INPUT_VP)
+ res->input = RESIZER_INPUT_NONE;
+ }
+ break;
+
+ case RESZ_PAD_SOURCE:
+ /* resizer always write to memory */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations resizer_media_ops = {
+ .link_setup = resizer_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_resizer_unregister_entities(struct isp_res_device *res)
+{
+ v4l2_device_unregister_subdev(&res->subdev);
+ omap3isp_video_unregister(&res->video_in);
+ omap3isp_video_unregister(&res->video_out);
+}
+
+int omap3isp_resizer_register_entities(struct isp_res_device *res,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ res->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &res->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&res->video_in, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3isp_video_register(&res->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap3isp_resizer_unregister_entities(res);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP resizer initialization and cleanup
+ */
+
+/*
+ * resizer_init_entities - Initialize resizer subdev and media entity.
+ * @res : Pointer to resizer device structure
+ * return -ENOMEM or zero on success
+ */
+static int resizer_init_entities(struct isp_res_device *res)
+{
+ struct v4l2_subdev *sd = &res->subdev;
+ struct media_pad *pads = res->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ res->input = RESIZER_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &resizer_v4l2_ops);
+ sd->internal_ops = &resizer_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP3 ISP resizer", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
+ v4l2_set_subdevdata(sd, res);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[RESZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ pads[RESZ_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &resizer_media_ops;
+ ret = media_entity_pads_init(me, RESZ_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ resizer_init_formats(sd, NULL);
+
+ res->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ res->video_in.ops = &resizer_video_ops;
+ res->video_in.isp = to_isp_device(res);
+ res->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
+ res->video_in.bpl_alignment = 32;
+ res->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ res->video_out.ops = &resizer_video_ops;
+ res->video_out.isp = to_isp_device(res);
+ res->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
+ res->video_out.bpl_alignment = 32;
+
+ ret = omap3isp_video_init(&res->video_in, "resizer");
+ if (ret < 0)
+ goto error_video_in;
+
+ ret = omap3isp_video_init(&res->video_out, "resizer");
+ if (ret < 0)
+ goto error_video_out;
+
+ res->video_out.video.entity.flags |= MEDIA_ENT_FL_DEFAULT;
+
+ return 0;
+
+error_video_out:
+ omap3isp_video_cleanup(&res->video_in);
+error_video_in:
+ media_entity_cleanup(&res->subdev.entity);
+ return ret;
+}
+
+/*
+ * isp_resizer_init - Resizer initialization.
+ * @isp : Pointer to ISP device
+ * return -ENOMEM or zero on success
+ */
+int omap3isp_resizer_init(struct isp_device *isp)
+{
+ struct isp_res_device *res = &isp->isp_res;
+
+ init_waitqueue_head(&res->wait);
+ atomic_set(&res->stopping, 0);
+ spin_lock_init(&res->lock);
+
+ return resizer_init_entities(res);
+}
+
+void omap3isp_resizer_cleanup(struct isp_device *isp)
+{
+ struct isp_res_device *res = &isp->isp_res;
+
+ omap3isp_video_cleanup(&res->video_in);
+ omap3isp_video_cleanup(&res->video_out);
+ media_entity_cleanup(&res->subdev.entity);
+}
diff --git a/drivers/media/platform/omap3isp/ispresizer.h b/drivers/media/platform/omap3isp/ispresizer.h
new file mode 100644
index 000000000..541454291
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispresizer.h
@@ -0,0 +1,139 @@
+/*
+ * ispresizer.h
+ *
+ * TI OMAP3 ISP - Resizer module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_RESIZER_H
+#define OMAP3_ISP_RESIZER_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/*
+ * Constants for filter coefficients count
+ */
+#define COEFF_CNT 32
+
+/*
+ * struct isprsz_coef - Structure for resizer filter coefficients.
+ * @h_filter_coef_4tap: Horizontal filter coefficients for 8-phase/4-tap
+ * mode (.5x-4x)
+ * @v_filter_coef_4tap: Vertical filter coefficients for 8-phase/4-tap
+ * mode (.5x-4x)
+ * @h_filter_coef_7tap: Horizontal filter coefficients for 4-phase/7-tap
+ * mode (.25x-.5x)
+ * @v_filter_coef_7tap: Vertical filter coefficients for 4-phase/7-tap
+ * mode (.25x-.5x)
+ */
+struct isprsz_coef {
+ u16 h_filter_coef_4tap[32];
+ u16 v_filter_coef_4tap[32];
+ /* Every 8th value is a dummy value in the following arrays: */
+ u16 h_filter_coef_7tap[32];
+ u16 v_filter_coef_7tap[32];
+};
+
+/* Chrominance horizontal algorithm */
+enum resizer_chroma_algo {
+ RSZ_THE_SAME = 0, /* Chrominance the same as Luminance */
+ RSZ_BILINEAR = 1, /* Chrominance uses bilinear interpolation */
+};
+
+/* Resizer input type select */
+enum resizer_colors_type {
+ RSZ_YUV422 = 0, /* YUV422 color is interleaved */
+ RSZ_COLOR8 = 1, /* Color separate data on 8 bits */
+};
+
+/*
+ * Structure for horizontal and vertical resizing value
+ */
+struct resizer_ratio {
+ u32 horz;
+ u32 vert;
+};
+
+/*
+ * Structure for luminance enhancer parameters.
+ */
+struct resizer_luma_yenh {
+ u8 algo; /* algorithm select. */
+ u8 gain; /* maximum gain. */
+ u8 slope; /* slope. */
+ u8 core; /* core offset. */
+};
+
+enum resizer_input_entity {
+ RESIZER_INPUT_NONE,
+ RESIZER_INPUT_VP, /* input video port - prev or ccdc */
+ RESIZER_INPUT_MEMORY,
+};
+
+/* Sink and source resizer pads */
+#define RESZ_PAD_SINK 0
+#define RESZ_PAD_SOURCE 1
+#define RESZ_PADS_NUM 2
+
+/*
+ * struct isp_res_device - OMAP3 ISP resizer module
+ * @lock: Protects formats and crop rectangles between set_selection and IRQ
+ * @crop.request: Crop rectangle requested by the user
+ * @crop.active: Active crop rectangle (based on hardware requirements)
+ */
+struct isp_res_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RESZ_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[RESZ_PADS_NUM];
+
+ enum resizer_input_entity input;
+ struct isp_video video_in;
+ struct isp_video video_out;
+
+ u32 addr_base; /* stored source buffer address in memory mode */
+ u32 crop_offset; /* additional offset for crop in memory mode */
+ struct resizer_ratio ratio;
+ int pm_state;
+ unsigned int applycrop:1;
+ enum isp_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+ spinlock_t lock;
+
+ struct {
+ struct v4l2_rect request;
+ struct v4l2_rect active;
+ } crop;
+};
+
+struct isp_device;
+
+int omap3isp_resizer_init(struct isp_device *isp);
+void omap3isp_resizer_cleanup(struct isp_device *isp);
+
+int omap3isp_resizer_register_entities(struct isp_res_device *res,
+ struct v4l2_device *vdev);
+void omap3isp_resizer_unregister_entities(struct isp_res_device *res);
+void omap3isp_resizer_isr_frame_sync(struct isp_res_device *res);
+void omap3isp_resizer_isr(struct isp_res_device *isp_res);
+
+void omap3isp_resizer_max_rate(struct isp_res_device *res,
+ unsigned int *max_rate);
+
+void omap3isp_resizer_suspend(struct isp_res_device *isp_res);
+
+void omap3isp_resizer_resume(struct isp_res_device *isp_res);
+
+int omap3isp_resizer_busy(struct isp_res_device *isp_res);
+
+#endif /* OMAP3_ISP_RESIZER_H */
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
new file mode 100644
index 000000000..bfa2d0504
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -0,0 +1,1083 @@
+/*
+ * ispstat.c
+ *
+ * TI OMAP3 ISP - Statistics core
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+#include <linux/uaccess.h>
+
+#include "isp.h"
+
+#define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch != NULL)
+
+/*
+ * MAGIC_SIZE must always be the greatest common divisor of
+ * AEWB_PACKET_SIZE and AF_PAXEL_SIZE.
+ */
+#define MAGIC_SIZE 16
+#define MAGIC_NUM 0x55
+
+/* HACK: AF module seems to be writing one more paxel data than it should. */
+#define AF_EXTRA_DATA OMAP3ISP_AF_PAXEL_SIZE
+
+/*
+ * HACK: H3A modules go to an invalid state after have a SBL overflow. It makes
+ * the next buffer to start to be written in the same point where the overflow
+ * occurred instead of the configured address. The only known way to make it to
+ * go back to a valid state is having a valid buffer processing. Of course it
+ * requires at least a doubled buffer size to avoid an access to invalid memory
+ * region. But it does not fix everything. It may happen more than one
+ * consecutive SBL overflows. In that case, it might be unpredictable how many
+ * buffers the allocated memory should fit. For that case, a recover
+ * configuration was created. It produces the minimum buffer size for each H3A
+ * module and decrease the change for more SBL overflows. This recover state
+ * will be enabled every time a SBL overflow occur. As the output buffer size
+ * isn't big, it's possible to have an extra size able to fit many recover
+ * buffers making it extreamily unlikely to have an access to invalid memory
+ * region.
+ */
+#define NUM_H3A_RECOVER_BUFS 10
+
+/*
+ * HACK: Because of HW issues the generic layer sometimes need to have
+ * different behaviour for different statistic modules.
+ */
+#define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af)
+#define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb)
+#define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat))
+
+static void __isp_stat_buf_sync_magic(struct ispstat *stat,
+ struct ispstat_buffer *buf,
+ u32 buf_size, enum dma_data_direction dir,
+ void (*dma_sync)(struct device *,
+ dma_addr_t, unsigned long, size_t,
+ enum dma_data_direction))
+{
+ /* Sync the initial and final magic words. */
+ dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir);
+ dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK),
+ buf_size & ~PAGE_MASK, MAGIC_SIZE, dir);
+}
+
+static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
+ struct ispstat_buffer *buf,
+ u32 buf_size,
+ enum dma_data_direction dir)
+{
+ if (ISP_STAT_USES_DMAENGINE(stat))
+ return;
+
+ __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
+ dma_sync_single_range_for_device);
+}
+
+static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
+ struct ispstat_buffer *buf,
+ u32 buf_size,
+ enum dma_data_direction dir)
+{
+ if (ISP_STAT_USES_DMAENGINE(stat))
+ return;
+
+ __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
+ dma_sync_single_range_for_cpu);
+}
+
+static int isp_stat_buf_check_magic(struct ispstat *stat,
+ struct ispstat_buffer *buf)
+{
+ const u32 buf_size = IS_H3A_AF(stat) ?
+ buf->buf_size + AF_EXTRA_DATA : buf->buf_size;
+ u8 *w;
+ u8 *end;
+ int ret = -EINVAL;
+
+ isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
+
+ /* Checking initial magic numbers. They shouldn't be here anymore. */
+ for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++)
+ if (likely(*w != MAGIC_NUM))
+ ret = 0;
+
+ if (ret) {
+ dev_dbg(stat->isp->dev,
+ "%s: beginning magic check does not match.\n",
+ stat->subdev.name);
+ return ret;
+ }
+
+ /* Checking magic numbers at the end. They must be still here. */
+ for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
+ w < end; w++) {
+ if (unlikely(*w != MAGIC_NUM)) {
+ dev_dbg(stat->isp->dev,
+ "%s: ending magic check does not match.\n",
+ stat->subdev.name);
+ return -EINVAL;
+ }
+ }
+
+ isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
+ DMA_FROM_DEVICE);
+
+ return 0;
+}
+
+static void isp_stat_buf_insert_magic(struct ispstat *stat,
+ struct ispstat_buffer *buf)
+{
+ const u32 buf_size = IS_H3A_AF(stat) ?
+ stat->buf_size + AF_EXTRA_DATA : stat->buf_size;
+
+ isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
+
+ /*
+ * Inserting MAGIC_NUM at the beginning and end of the buffer.
+ * buf->buf_size is set only after the buffer is queued. For now the
+ * right buf_size for the current configuration is pointed by
+ * stat->buf_size.
+ */
+ memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE);
+ memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE);
+
+ isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
+ DMA_BIDIRECTIONAL);
+}
+
+static void isp_stat_buf_sync_for_device(struct ispstat *stat,
+ struct ispstat_buffer *buf)
+{
+ if (ISP_STAT_USES_DMAENGINE(stat))
+ return;
+
+ dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
+ buf->sgt.nents, DMA_FROM_DEVICE);
+}
+
+static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
+ struct ispstat_buffer *buf)
+{
+ if (ISP_STAT_USES_DMAENGINE(stat))
+ return;
+
+ dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
+ buf->sgt.nents, DMA_FROM_DEVICE);
+}
+
+static void isp_stat_buf_clear(struct ispstat *stat)
+{
+ int i;
+
+ for (i = 0; i < STAT_MAX_BUFS; i++)
+ stat->buf[i].empty = 1;
+}
+
+static struct ispstat_buffer *
+__isp_stat_buf_find(struct ispstat *stat, int look_empty)
+{
+ struct ispstat_buffer *found = NULL;
+ int i;
+
+ for (i = 0; i < STAT_MAX_BUFS; i++) {
+ struct ispstat_buffer *curr = &stat->buf[i];
+
+ /*
+ * Don't select the buffer which is being copied to
+ * userspace or used by the module.
+ */
+ if (curr == stat->locked_buf || curr == stat->active_buf)
+ continue;
+
+ /* Don't select uninitialised buffers if it's not required */
+ if (!look_empty && curr->empty)
+ continue;
+
+ /* Pick uninitialised buffer over anything else if look_empty */
+ if (curr->empty) {
+ found = curr;
+ break;
+ }
+
+ /* Choose the oldest buffer */
+ if (!found ||
+ (s32)curr->frame_number - (s32)found->frame_number < 0)
+ found = curr;
+ }
+
+ return found;
+}
+
+static inline struct ispstat_buffer *
+isp_stat_buf_find_oldest(struct ispstat *stat)
+{
+ return __isp_stat_buf_find(stat, 0);
+}
+
+static inline struct ispstat_buffer *
+isp_stat_buf_find_oldest_or_empty(struct ispstat *stat)
+{
+ return __isp_stat_buf_find(stat, 1);
+}
+
+static int isp_stat_buf_queue(struct ispstat *stat)
+{
+ if (!stat->active_buf)
+ return STAT_NO_BUF;
+
+ ktime_get_ts64(&stat->active_buf->ts);
+
+ stat->active_buf->buf_size = stat->buf_size;
+ if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
+ dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n",
+ stat->subdev.name);
+ return STAT_NO_BUF;
+ }
+ stat->active_buf->config_counter = stat->config_counter;
+ stat->active_buf->frame_number = stat->frame_number;
+ stat->active_buf->empty = 0;
+ stat->active_buf = NULL;
+
+ return STAT_BUF_DONE;
+}
+
+/* Get next free buffer to write the statistics to and mark it active. */
+static void isp_stat_buf_next(struct ispstat *stat)
+{
+ if (unlikely(stat->active_buf))
+ /* Overwriting unused active buffer */
+ dev_dbg(stat->isp->dev,
+ "%s: new buffer requested without queuing active one.\n",
+ stat->subdev.name);
+ else
+ stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
+}
+
+static void isp_stat_buf_release(struct ispstat *stat)
+{
+ unsigned long flags;
+
+ isp_stat_buf_sync_for_device(stat, stat->locked_buf);
+ spin_lock_irqsave(&stat->isp->stat_lock, flags);
+ stat->locked_buf = NULL;
+ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+}
+
+/* Get buffer to userspace. */
+static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
+ struct omap3isp_stat_data *data)
+{
+ int rval = 0;
+ unsigned long flags;
+ struct ispstat_buffer *buf;
+
+ spin_lock_irqsave(&stat->isp->stat_lock, flags);
+
+ while (1) {
+ buf = isp_stat_buf_find_oldest(stat);
+ if (!buf) {
+ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+ dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n",
+ stat->subdev.name);
+ return ERR_PTR(-EBUSY);
+ }
+ if (isp_stat_buf_check_magic(stat, buf)) {
+ dev_dbg(stat->isp->dev,
+ "%s: current buffer has corrupted data\n.",
+ stat->subdev.name);
+ /* Mark empty because it doesn't have valid data. */
+ buf->empty = 1;
+ } else {
+ /* Buffer isn't corrupted. */
+ break;
+ }
+ }
+
+ stat->locked_buf = buf;
+
+ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+
+ if (buf->buf_size > data->buf_size) {
+ dev_warn(stat->isp->dev,
+ "%s: userspace's buffer size is not enough.\n",
+ stat->subdev.name);
+ isp_stat_buf_release(stat);
+ return ERR_PTR(-EINVAL);
+ }
+
+ isp_stat_buf_sync_for_cpu(stat, buf);
+
+ rval = copy_to_user(data->buf,
+ buf->virt_addr,
+ buf->buf_size);
+
+ if (rval) {
+ dev_info(stat->isp->dev,
+ "%s: failed copying %d bytes of stat data\n",
+ stat->subdev.name, rval);
+ buf = ERR_PTR(-EFAULT);
+ isp_stat_buf_release(stat);
+ }
+
+ return buf;
+}
+
+static void isp_stat_bufs_free(struct ispstat *stat)
+{
+ struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
+ ? NULL : stat->isp->dev;
+ unsigned int i;
+
+ for (i = 0; i < STAT_MAX_BUFS; i++) {
+ struct ispstat_buffer *buf = &stat->buf[i];
+
+ if (!buf->virt_addr)
+ continue;
+
+ sg_free_table(&buf->sgt);
+
+ dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr,
+ buf->dma_addr);
+
+ buf->dma_addr = 0;
+ buf->virt_addr = NULL;
+ buf->empty = 1;
+ }
+
+ dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n",
+ stat->subdev.name);
+
+ stat->buf_alloc_size = 0;
+ stat->active_buf = NULL;
+}
+
+static int isp_stat_bufs_alloc_one(struct device *dev,
+ struct ispstat_buffer *buf,
+ unsigned int size)
+{
+ int ret;
+
+ buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr,
+ GFP_KERNEL);
+ if (!buf->virt_addr)
+ return -ENOMEM;
+
+ ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr,
+ size);
+ if (ret < 0) {
+ dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr);
+ buf->virt_addr = NULL;
+ buf->dma_addr = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * The device passed to the DMA API depends on whether the statistics block uses
+ * ISP DMA, external DMA or PIO to transfer data.
+ *
+ * The first case (for the AEWB and AF engines) passes the ISP device, resulting
+ * in the DMA buffers being mapped through the ISP IOMMU.
+ *
+ * The second case (for the histogram engine) should pass the DMA engine device.
+ * As that device isn't accessible through the OMAP DMA engine API the driver
+ * passes NULL instead, resulting in the buffers being mapped directly as
+ * physical pages.
+ *
+ * The third case (for the histogram engine) doesn't require any mapping. The
+ * buffers could be allocated with kmalloc/vmalloc, but we still use
+ * dma_alloc_coherent() for consistency purpose.
+ */
+static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
+{
+ struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
+ ? NULL : stat->isp->dev;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&stat->isp->stat_lock, flags);
+
+ BUG_ON(stat->locked_buf != NULL);
+
+ /* Are the old buffers big enough? */
+ if (stat->buf_alloc_size >= size) {
+ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+ return 0;
+ }
+
+ if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) {
+ dev_info(stat->isp->dev,
+ "%s: trying to allocate memory when busy\n",
+ stat->subdev.name);
+ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+ return -EBUSY;
+ }
+
+ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+
+ isp_stat_bufs_free(stat);
+
+ stat->buf_alloc_size = size;
+
+ for (i = 0; i < STAT_MAX_BUFS; i++) {
+ struct ispstat_buffer *buf = &stat->buf[i];
+ int ret;
+
+ ret = isp_stat_bufs_alloc_one(dev, buf, size);
+ if (ret < 0) {
+ dev_err(stat->isp->dev,
+ "%s: Failed to allocate DMA buffer %u\n",
+ stat->subdev.name, i);
+ isp_stat_bufs_free(stat);
+ return ret;
+ }
+
+ buf->empty = 1;
+
+ dev_dbg(stat->isp->dev,
+ "%s: buffer[%u] allocated. dma=%pad virt=%p",
+ stat->subdev.name, i, &buf->dma_addr, buf->virt_addr);
+ }
+
+ return 0;
+}
+
+static void isp_stat_queue_event(struct ispstat *stat, int err)
+{
+ struct video_device *vdev = stat->subdev.devnode;
+ struct v4l2_event event;
+ struct omap3isp_stat_event_status *status = (void *)event.u.data;
+
+ memset(&event, 0, sizeof(event));
+ if (!err) {
+ status->frame_number = stat->frame_number;
+ status->config_counter = stat->config_counter;
+ } else {
+ status->buf_err = 1;
+ }
+ event.type = stat->event_type;
+ v4l2_event_queue(vdev, &event);
+}
+
+
+/*
+ * omap3isp_stat_request_statistics - Request statistics.
+ * @data: Pointer to return statistics data.
+ *
+ * Returns 0 if successful.
+ */
+int omap3isp_stat_request_statistics(struct ispstat *stat,
+ struct omap3isp_stat_data *data)
+{
+ struct ispstat_buffer *buf;
+
+ if (stat->state != ISPSTAT_ENABLED) {
+ dev_dbg(stat->isp->dev, "%s: engine not enabled.\n",
+ stat->subdev.name);
+ return -EINVAL;
+ }
+
+ mutex_lock(&stat->ioctl_lock);
+ buf = isp_stat_buf_get(stat, data);
+ if (IS_ERR(buf)) {
+ mutex_unlock(&stat->ioctl_lock);
+ return PTR_ERR(buf);
+ }
+
+ data->ts.tv_sec = buf->ts.tv_sec;
+ data->ts.tv_usec = buf->ts.tv_nsec / NSEC_PER_USEC;
+ data->config_counter = buf->config_counter;
+ data->frame_number = buf->frame_number;
+ data->buf_size = buf->buf_size;
+
+ buf->empty = 1;
+ isp_stat_buf_release(stat);
+ mutex_unlock(&stat->ioctl_lock);
+
+ return 0;
+}
+
+int omap3isp_stat_request_statistics_time32(struct ispstat *stat,
+ struct omap3isp_stat_data_time32 *data)
+{
+ struct omap3isp_stat_data data64;
+ int ret;
+
+ ret = omap3isp_stat_request_statistics(stat, &data64);
+ if (ret)
+ return ret;
+
+ data->ts.tv_sec = data64.ts.tv_sec;
+ data->ts.tv_usec = data64.ts.tv_usec;
+ memcpy(&data->buf, &data64.buf, sizeof(*data) - sizeof(data->ts));
+
+ return 0;
+}
+
+/*
+ * omap3isp_stat_config - Receives new statistic engine configuration.
+ * @new_conf: Pointer to config structure.
+ *
+ * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
+ * was unable to allocate memory for the buffer, or other errors if parameters
+ * are invalid.
+ */
+int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
+{
+ int ret;
+ unsigned long irqflags;
+ struct ispstat_generic_config *user_cfg = new_conf;
+ u32 buf_size = user_cfg->buf_size;
+
+ mutex_lock(&stat->ioctl_lock);
+
+ dev_dbg(stat->isp->dev,
+ "%s: configuring module with buffer size=0x%08lx\n",
+ stat->subdev.name, (unsigned long)buf_size);
+
+ ret = stat->ops->validate_params(stat, new_conf);
+ if (ret) {
+ mutex_unlock(&stat->ioctl_lock);
+ dev_dbg(stat->isp->dev, "%s: configuration values are invalid.\n",
+ stat->subdev.name);
+ return ret;
+ }
+
+ if (buf_size != user_cfg->buf_size)
+ dev_dbg(stat->isp->dev,
+ "%s: driver has corrected buffer size request to 0x%08lx\n",
+ stat->subdev.name,
+ (unsigned long)user_cfg->buf_size);
+
+ /*
+ * Hack: H3A modules may need a doubled buffer size to avoid access
+ * to a invalid memory address after a SBL overflow.
+ * The buffer size is always PAGE_ALIGNED.
+ * Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be
+ * inserted at the end to data integrity check purpose.
+ * Hack 3: AF module writes one paxel data more than it should, so
+ * the buffer allocation must consider it to avoid invalid memory
+ * access.
+ * Hack 4: H3A need to allocate extra space for the recover state.
+ */
+ if (IS_H3A(stat)) {
+ buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE;
+ if (IS_H3A_AF(stat))
+ /*
+ * Adding one extra paxel data size for each recover
+ * buffer + 2 regular ones.
+ */
+ buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2);
+ if (stat->recover_priv) {
+ struct ispstat_generic_config *recover_cfg =
+ stat->recover_priv;
+ buf_size += recover_cfg->buf_size *
+ NUM_H3A_RECOVER_BUFS;
+ }
+ buf_size = PAGE_ALIGN(buf_size);
+ } else { /* Histogram */
+ buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE);
+ }
+
+ ret = isp_stat_bufs_alloc(stat, buf_size);
+ if (ret) {
+ mutex_unlock(&stat->ioctl_lock);
+ return ret;
+ }
+
+ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+ stat->ops->set_params(stat, new_conf);
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+
+ /*
+ * Returning the right future config_counter for this setup, so
+ * userspace can *know* when it has been applied.
+ */
+ user_cfg->config_counter = stat->config_counter + stat->inc_config;
+
+ /* Module has a valid configuration. */
+ stat->configured = 1;
+ dev_dbg(stat->isp->dev,
+ "%s: module has been successfully configured.\n",
+ stat->subdev.name);
+
+ mutex_unlock(&stat->ioctl_lock);
+
+ return 0;
+}
+
+/*
+ * isp_stat_buf_process - Process statistic buffers.
+ * @buf_state: points out if buffer is ready to be processed. It's necessary
+ * because histogram needs to copy the data from internal memory
+ * before be able to process the buffer.
+ */
+static int isp_stat_buf_process(struct ispstat *stat, int buf_state)
+{
+ int ret = STAT_NO_BUF;
+
+ if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
+ buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) {
+ ret = isp_stat_buf_queue(stat);
+ isp_stat_buf_next(stat);
+ }
+
+ return ret;
+}
+
+int omap3isp_stat_pcr_busy(struct ispstat *stat)
+{
+ return stat->ops->busy(stat);
+}
+
+int omap3isp_stat_busy(struct ispstat *stat)
+{
+ return omap3isp_stat_pcr_busy(stat) | stat->buf_processing |
+ (stat->state != ISPSTAT_DISABLED);
+}
+
+/*
+ * isp_stat_pcr_enable - Disables/Enables statistic engines.
+ * @pcr_enable: 0/1 - Disables/Enables the engine.
+ *
+ * Must be called from ISP driver when the module is idle and synchronized
+ * with CCDC.
+ */
+static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable)
+{
+ if ((stat->state != ISPSTAT_ENABLING &&
+ stat->state != ISPSTAT_ENABLED) && pcr_enable)
+ /* Userspace has disabled the module. Aborting. */
+ return;
+
+ stat->ops->enable(stat, pcr_enable);
+ if (stat->state == ISPSTAT_DISABLING && !pcr_enable)
+ stat->state = ISPSTAT_DISABLED;
+ else if (stat->state == ISPSTAT_ENABLING && pcr_enable)
+ stat->state = ISPSTAT_ENABLED;
+}
+
+void omap3isp_stat_suspend(struct ispstat *stat)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&stat->isp->stat_lock, flags);
+
+ if (stat->state != ISPSTAT_DISABLED)
+ stat->ops->enable(stat, 0);
+ if (stat->state == ISPSTAT_ENABLED)
+ stat->state = ISPSTAT_SUSPENDED;
+
+ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+}
+
+void omap3isp_stat_resume(struct ispstat *stat)
+{
+ /* Module will be re-enabled with its pipeline */
+ if (stat->state == ISPSTAT_SUSPENDED)
+ stat->state = ISPSTAT_ENABLING;
+}
+
+static void isp_stat_try_enable(struct ispstat *stat)
+{
+ unsigned long irqflags;
+
+ if (stat->priv == NULL)
+ /* driver wasn't initialised */
+ return;
+
+ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+ if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing &&
+ stat->buf_alloc_size) {
+ /*
+ * Userspace's requested to enable the engine but it wasn't yet.
+ * Let's do that now.
+ */
+ stat->update = 1;
+ isp_stat_buf_next(stat);
+ stat->ops->setup_regs(stat, stat->priv);
+ isp_stat_buf_insert_magic(stat, stat->active_buf);
+
+ /*
+ * H3A module has some hw issues which forces the driver to
+ * ignore next buffers even if it was disabled in the meantime.
+ * On the other hand, Histogram shouldn't ignore buffers anymore
+ * if it's being enabled.
+ */
+ if (!IS_H3A(stat))
+ atomic_set(&stat->buf_err, 0);
+
+ isp_stat_pcr_enable(stat, 1);
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+ dev_dbg(stat->isp->dev, "%s: module is enabled.\n",
+ stat->subdev.name);
+ } else {
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+ }
+}
+
+void omap3isp_stat_isr_frame_sync(struct ispstat *stat)
+{
+ isp_stat_try_enable(stat);
+}
+
+void omap3isp_stat_sbl_overflow(struct ispstat *stat)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+ /*
+ * Due to a H3A hw issue which prevents the next buffer to start from
+ * the correct memory address, 2 buffers must be ignored.
+ */
+ atomic_set(&stat->buf_err, 2);
+
+ /*
+ * If more than one SBL overflow happen in a row, H3A module may access
+ * invalid memory region.
+ * stat->sbl_ovl_recover is set to tell to the driver to temporarily use
+ * a soft configuration which helps to avoid consecutive overflows.
+ */
+ if (stat->recover_priv)
+ stat->sbl_ovl_recover = 1;
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+}
+
+/*
+ * omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible
+ * @enable: 0/1 - Disables/Enables the engine.
+ *
+ * Client should configure all the module registers before this.
+ * This function can be called from a userspace request.
+ */
+int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
+{
+ unsigned long irqflags;
+
+ dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n",
+ stat->subdev.name, enable ? "enable" : "disable");
+
+ /* Prevent enabling while configuring */
+ mutex_lock(&stat->ioctl_lock);
+
+ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+
+ if (!stat->configured && enable) {
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+ mutex_unlock(&stat->ioctl_lock);
+ dev_dbg(stat->isp->dev,
+ "%s: cannot enable module as it's never been successfully configured so far.\n",
+ stat->subdev.name);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ if (stat->state == ISPSTAT_DISABLING)
+ /* Previous disabling request wasn't done yet */
+ stat->state = ISPSTAT_ENABLED;
+ else if (stat->state == ISPSTAT_DISABLED)
+ /* Module is now being enabled */
+ stat->state = ISPSTAT_ENABLING;
+ } else {
+ if (stat->state == ISPSTAT_ENABLING) {
+ /* Previous enabling request wasn't done yet */
+ stat->state = ISPSTAT_DISABLED;
+ } else if (stat->state == ISPSTAT_ENABLED) {
+ /* Module is now being disabled */
+ stat->state = ISPSTAT_DISABLING;
+ isp_stat_buf_clear(stat);
+ }
+ }
+
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+ mutex_unlock(&stat->ioctl_lock);
+
+ return 0;
+}
+
+int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct ispstat *stat = v4l2_get_subdevdata(subdev);
+
+ if (enable) {
+ /*
+ * Only set enable PCR bit if the module was previously
+ * enabled through ioctl.
+ */
+ isp_stat_try_enable(stat);
+ } else {
+ unsigned long flags;
+ /* Disable PCR bit and config enable field */
+ omap3isp_stat_enable(stat, 0);
+ spin_lock_irqsave(&stat->isp->stat_lock, flags);
+ stat->ops->enable(stat, 0);
+ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+
+ /*
+ * If module isn't busy, a new interrupt may come or not to
+ * set the state to DISABLED. As Histogram needs to read its
+ * internal memory to clear it, let interrupt handler
+ * responsible of changing state to DISABLED. If the last
+ * interrupt is coming, it's still safe as the handler will
+ * ignore the second time when state is already set to DISABLED.
+ * It's necessary to synchronize Histogram with streamoff, once
+ * the module may be considered idle before last SDMA transfer
+ * starts if we return here.
+ */
+ if (!omap3isp_stat_pcr_busy(stat))
+ omap3isp_stat_isr(stat);
+
+ dev_dbg(stat->isp->dev, "%s: module is being disabled\n",
+ stat->subdev.name);
+ }
+
+ return 0;
+}
+
+/*
+ * __stat_isr - Interrupt handler for statistic drivers
+ */
+static void __stat_isr(struct ispstat *stat, int from_dma)
+{
+ int ret = STAT_BUF_DONE;
+ int buf_processing;
+ unsigned long irqflags;
+ struct isp_pipeline *pipe;
+
+ /*
+ * stat->buf_processing must be set before disable module. It's
+ * necessary to not inform too early the buffers aren't busy in case
+ * of SDMA is going to be used.
+ */
+ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+ if (stat->state == ISPSTAT_DISABLED) {
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+ return;
+ }
+ buf_processing = stat->buf_processing;
+ stat->buf_processing = 1;
+ stat->ops->enable(stat, 0);
+
+ if (buf_processing && !from_dma) {
+ if (stat->state == ISPSTAT_ENABLED) {
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+ dev_err(stat->isp->dev,
+ "%s: interrupt occurred when module was still processing a buffer.\n",
+ stat->subdev.name);
+ ret = STAT_NO_BUF;
+ goto out;
+ } else {
+ /*
+ * Interrupt handler was called from streamoff when
+ * the module wasn't busy anymore to ensure it is being
+ * disabled after process last buffer. If such buffer
+ * processing has already started, no need to do
+ * anything else.
+ */
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+
+ /* If it's busy we can't process this buffer anymore */
+ if (!omap3isp_stat_pcr_busy(stat)) {
+ if (!from_dma && stat->ops->buf_process)
+ /* Module still need to copy data to buffer. */
+ ret = stat->ops->buf_process(stat);
+ if (ret == STAT_BUF_WAITING_DMA)
+ /* Buffer is not ready yet */
+ return;
+
+ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+
+ /*
+ * Histogram needs to read its internal memory to clear it
+ * before be disabled. For that reason, common statistic layer
+ * can return only after call stat's buf_process() operator.
+ */
+ if (stat->state == ISPSTAT_DISABLING) {
+ stat->state = ISPSTAT_DISABLED;
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+ stat->buf_processing = 0;
+ return;
+ }
+ pipe = to_isp_pipeline(&stat->subdev.entity);
+ stat->frame_number = atomic_read(&pipe->frame_number);
+
+ /*
+ * Before this point, 'ret' stores the buffer's status if it's
+ * ready to be processed. Afterwards, it holds the status if
+ * it was processed successfully.
+ */
+ ret = isp_stat_buf_process(stat, ret);
+
+ if (likely(!stat->sbl_ovl_recover)) {
+ stat->ops->setup_regs(stat, stat->priv);
+ } else {
+ /*
+ * Using recover config to increase the chance to have
+ * a good buffer processing and make the H3A module to
+ * go back to a valid state.
+ */
+ stat->update = 1;
+ stat->ops->setup_regs(stat, stat->recover_priv);
+ stat->sbl_ovl_recover = 0;
+
+ /*
+ * Set 'update' in case of the module needs to use
+ * regular configuration after next buffer.
+ */
+ stat->update = 1;
+ }
+
+ isp_stat_buf_insert_magic(stat, stat->active_buf);
+
+ /*
+ * Hack: H3A modules may access invalid memory address or send
+ * corrupted data to userspace if more than 1 SBL overflow
+ * happens in a row without re-writing its buffer's start memory
+ * address in the meantime. Such situation is avoided if the
+ * module is not immediately re-enabled when the ISR misses the
+ * timing to process the buffer and to setup the registers.
+ * Because of that, pcr_enable(1) was moved to inside this 'if'
+ * block. But the next interruption will still happen as during
+ * pcr_enable(0) the module was busy.
+ */
+ isp_stat_pcr_enable(stat, 1);
+ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+ } else {
+ /*
+ * If a SBL overflow occurs and the H3A driver misses the timing
+ * to process the buffer, stat->buf_err is set and won't be
+ * cleared now. So the next buffer will be correctly ignored.
+ * It's necessary due to a hw issue which makes the next H3A
+ * buffer to start from the memory address where the previous
+ * one stopped, instead of start where it was configured to.
+ * Do not "stat->buf_err = 0" here.
+ */
+
+ if (stat->ops->buf_process)
+ /*
+ * Driver may need to erase current data prior to
+ * process a new buffer. If it misses the timing, the
+ * next buffer might be wrong. So should be ignored.
+ * It happens only for Histogram.
+ */
+ atomic_set(&stat->buf_err, 1);
+
+ ret = STAT_NO_BUF;
+ dev_dbg(stat->isp->dev,
+ "%s: cannot process buffer, device is busy.\n",
+ stat->subdev.name);
+ }
+
+out:
+ stat->buf_processing = 0;
+ isp_stat_queue_event(stat, ret != STAT_BUF_DONE);
+}
+
+void omap3isp_stat_isr(struct ispstat *stat)
+{
+ __stat_isr(stat, 0);
+}
+
+void omap3isp_stat_dma_isr(struct ispstat *stat)
+{
+ __stat_isr(stat, 1);
+}
+
+int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct ispstat *stat = v4l2_get_subdevdata(subdev);
+
+ if (sub->type != stat->event_type)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL);
+}
+
+int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+void omap3isp_stat_unregister_entities(struct ispstat *stat)
+{
+ v4l2_device_unregister_subdev(&stat->subdev);
+}
+
+int omap3isp_stat_register_entities(struct ispstat *stat,
+ struct v4l2_device *vdev)
+{
+ stat->subdev.dev = vdev->mdev->dev;
+
+ return v4l2_device_register_subdev(vdev, &stat->subdev);
+}
+
+static int isp_stat_init_entities(struct ispstat *stat, const char *name,
+ const struct v4l2_subdev_ops *sd_ops)
+{
+ struct v4l2_subdev *subdev = &stat->subdev;
+ struct media_entity *me = &subdev->entity;
+
+ v4l2_subdev_init(subdev, sd_ops);
+ snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
+ subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+ v4l2_set_subdevdata(subdev, stat);
+
+ stat->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+ me->ops = NULL;
+
+ return media_entity_pads_init(me, 1, &stat->pad);
+}
+
+int omap3isp_stat_init(struct ispstat *stat, const char *name,
+ const struct v4l2_subdev_ops *sd_ops)
+{
+ int ret;
+
+ stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
+ if (!stat->buf)
+ return -ENOMEM;
+
+ isp_stat_buf_clear(stat);
+ mutex_init(&stat->ioctl_lock);
+ atomic_set(&stat->buf_err, 0);
+
+ ret = isp_stat_init_entities(stat, name, sd_ops);
+ if (ret < 0) {
+ mutex_destroy(&stat->ioctl_lock);
+ kfree(stat->buf);
+ }
+
+ return ret;
+}
+
+void omap3isp_stat_cleanup(struct ispstat *stat)
+{
+ media_entity_cleanup(&stat->subdev.entity);
+ mutex_destroy(&stat->ioctl_lock);
+ isp_stat_bufs_free(stat);
+ kfree(stat->buf);
+}
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
new file mode 100644
index 000000000..923b38cfc
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -0,0 +1,159 @@
+/*
+ * ispstat.h
+ *
+ * TI OMAP3 ISP - Statistics core
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_STAT_H
+#define OMAP3_ISP_STAT_H
+
+#include <linux/types.h>
+#include <linux/omap3isp.h>
+#include <media/v4l2-event.h>
+
+#include "isp.h"
+#include "ispvideo.h"
+
+#define STAT_MAX_BUFS 5
+#define STAT_NEVENTS 8
+
+#define STAT_BUF_DONE 0 /* Buffer is ready */
+#define STAT_NO_BUF 1 /* An error has occurred */
+#define STAT_BUF_WAITING_DMA 2 /* Histogram only: DMA is running */
+
+struct dma_chan;
+struct ispstat;
+
+struct ispstat_buffer {
+ struct sg_table sgt;
+ void *virt_addr;
+ dma_addr_t dma_addr;
+ struct timespec64 ts;
+ u32 buf_size;
+ u32 frame_number;
+ u16 config_counter;
+ u8 empty;
+};
+
+struct ispstat_ops {
+ /*
+ * Validate new params configuration.
+ * new_conf->buf_size value must be changed to the exact buffer size
+ * necessary for the new configuration if it's smaller.
+ */
+ int (*validate_params)(struct ispstat *stat, void *new_conf);
+
+ /*
+ * Save new params configuration.
+ * stat->priv->buf_size value must be set to the exact buffer size for
+ * the new configuration.
+ * stat->update is set to 1 if new configuration is different than
+ * current one.
+ */
+ void (*set_params)(struct ispstat *stat, void *new_conf);
+
+ /* Apply stored configuration. */
+ void (*setup_regs)(struct ispstat *stat, void *priv);
+
+ /* Enable/Disable module. */
+ void (*enable)(struct ispstat *stat, int enable);
+
+ /* Verify is module is busy. */
+ int (*busy)(struct ispstat *stat);
+
+ /* Used for specific operations during generic buf process task. */
+ int (*buf_process)(struct ispstat *stat);
+};
+
+enum ispstat_state_t {
+ ISPSTAT_DISABLED = 0,
+ ISPSTAT_DISABLING,
+ ISPSTAT_ENABLED,
+ ISPSTAT_ENABLING,
+ ISPSTAT_SUSPENDED,
+};
+
+struct ispstat {
+ struct v4l2_subdev subdev;
+ struct media_pad pad; /* sink pad */
+
+ /* Control */
+ unsigned configured:1;
+ unsigned update:1;
+ unsigned buf_processing:1;
+ unsigned sbl_ovl_recover:1;
+ u8 inc_config;
+ atomic_t buf_err;
+ enum ispstat_state_t state; /* enabling/disabling state */
+ struct isp_device *isp;
+ void *priv; /* pointer to priv config struct */
+ void *recover_priv; /* pointer to recover priv configuration */
+ struct mutex ioctl_lock; /* serialize private ioctl */
+
+ const struct ispstat_ops *ops;
+
+ /* Buffer */
+ u8 wait_acc_frames;
+ u16 config_counter;
+ u32 frame_number;
+ u32 buf_size;
+ u32 buf_alloc_size;
+ struct dma_chan *dma_ch;
+ unsigned long event_type;
+ struct ispstat_buffer *buf;
+ struct ispstat_buffer *active_buf;
+ struct ispstat_buffer *locked_buf;
+};
+
+struct ispstat_generic_config {
+ /*
+ * Fields must be in the same order as in:
+ * - omap3isp_h3a_aewb_config
+ * - omap3isp_h3a_af_config
+ * - omap3isp_hist_config
+ */
+ u32 buf_size;
+ u16 config_counter;
+};
+
+int omap3isp_stat_config(struct ispstat *stat, void *new_conf);
+int omap3isp_stat_request_statistics(struct ispstat *stat,
+ struct omap3isp_stat_data *data);
+int omap3isp_stat_request_statistics_time32(struct ispstat *stat,
+ struct omap3isp_stat_data_time32 *data);
+int omap3isp_stat_init(struct ispstat *stat, const char *name,
+ const struct v4l2_subdev_ops *sd_ops);
+void omap3isp_stat_cleanup(struct ispstat *stat);
+int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable);
+
+int omap3isp_stat_busy(struct ispstat *stat);
+int omap3isp_stat_pcr_busy(struct ispstat *stat);
+void omap3isp_stat_suspend(struct ispstat *stat);
+void omap3isp_stat_resume(struct ispstat *stat);
+int omap3isp_stat_enable(struct ispstat *stat, u8 enable);
+void omap3isp_stat_sbl_overflow(struct ispstat *stat);
+void omap3isp_stat_isr(struct ispstat *stat);
+void omap3isp_stat_isr_frame_sync(struct ispstat *stat);
+void omap3isp_stat_dma_isr(struct ispstat *stat);
+int omap3isp_stat_register_entities(struct ispstat *stat,
+ struct v4l2_device *vdev);
+void omap3isp_stat_unregister_entities(struct ispstat *stat);
+
+#endif /* OMAP3_ISP_STAT_H */
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
new file mode 100644
index 000000000..9d228eac2
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -0,0 +1,1500 @@
+/*
+ * ispvideo.c
+ *
+ * TI OMAP3 ISP - Generic video node
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "ispvideo.h"
+#include "isp.h"
+
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+/*
+ * NOTE: When adding new media bus codes, always remember to add
+ * corresponding in-memory formats to the table below!!!
+ */
+static struct isp_format_info formats[] = {
+ { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
+ V4L2_PIX_FMT_GREY, 8, 1, },
+ { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
+ V4L2_PIX_FMT_Y10, 10, 2, },
+ { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
+ V4L2_PIX_FMT_Y12, 12, 2, },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
+ V4L2_PIX_FMT_SBGGR8, 8, 1, },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
+ V4L2_PIX_FMT_SGBRG8, 8, 1, },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
+ V4L2_PIX_FMT_SGRBG8, 8, 1, },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
+ V4L2_PIX_FMT_SRGGB8, 8, 1, },
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10, 0,
+ V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGBRG10_1X10, 0,
+ V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_1X10, 0,
+ V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_1X10, 0,
+ V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
+ V4L2_PIX_FMT_SBGGR10, 10, 2, },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
+ V4L2_PIX_FMT_SGBRG10, 10, 2, },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
+ V4L2_PIX_FMT_SGRBG10, 10, 2, },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
+ V4L2_PIX_FMT_SRGGB10, 10, 2, },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
+ V4L2_PIX_FMT_SBGGR12, 12, 2, },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
+ V4L2_PIX_FMT_SGBRG12, 12, 2, },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
+ V4L2_PIX_FMT_SGRBG12, 12, 2, },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
+ V4L2_PIX_FMT_SRGGB12, 12, 2, },
+ { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16, 0,
+ V4L2_PIX_FMT_UYVY, 16, 2, },
+ { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16, 0,
+ V4L2_PIX_FMT_YUYV, 16, 2, },
+ { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8, 0,
+ V4L2_PIX_FMT_UYVY, 8, 2, },
+ { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8, 0,
+ V4L2_PIX_FMT_YUYV, 8, 2, },
+ /* Empty entry to catch the unsupported pixel code (0) used by the CCDC
+ * module and avoid NULL pointer dereferences.
+ */
+ { 0, }
+};
+
+const struct isp_format_info *omap3isp_video_format_info(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].code == code)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
+ * @video: ISP video instance
+ * @mbus: v4l2_mbus_framefmt format (input)
+ * @pix: v4l2_pix_format format (output)
+ *
+ * Fill the output pix structure with information from the input mbus format.
+ * The bytesperline and sizeimage fields are computed from the requested bytes
+ * per line value in the pix format and information from the video instance.
+ *
+ * Return the number of padding bytes at end of line.
+ */
+static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
+ const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix)
+{
+ unsigned int bpl = pix->bytesperline;
+ unsigned int min_bpl;
+ unsigned int i;
+
+ memset(pix, 0, sizeof(*pix));
+ pix->width = mbus->width;
+ pix->height = mbus->height;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].code == mbus->code)
+ break;
+ }
+
+ if (WARN_ON(i == ARRAY_SIZE(formats)))
+ return 0;
+
+ min_bpl = pix->width * formats[i].bpp;
+
+ /* Clamp the requested bytes per line value. If the maximum bytes per
+ * line value is zero, the module doesn't support user configurable line
+ * sizes. Override the requested value with the minimum in that case.
+ */
+ if (video->bpl_max)
+ bpl = clamp(bpl, min_bpl, video->bpl_max);
+ else
+ bpl = min_bpl;
+
+ if (!video->bpl_zero_padding || bpl != min_bpl)
+ bpl = ALIGN(bpl, video->bpl_alignment);
+
+ pix->pixelformat = formats[i].pixelformat;
+ pix->bytesperline = bpl;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->colorspace = mbus->colorspace;
+ pix->field = mbus->field;
+
+ return bpl - min_bpl;
+}
+
+static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
+ struct v4l2_mbus_framefmt *mbus)
+{
+ unsigned int i;
+
+ memset(mbus, 0, sizeof(*mbus));
+ mbus->width = pix->width;
+ mbus->height = pix->height;
+
+ /* Skip the last format in the loop so that it will be selected if no
+ * match is found.
+ */
+ for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
+ if (formats[i].pixelformat == pix->pixelformat)
+ break;
+ }
+
+ mbus->code = formats[i].code;
+ mbus->colorspace = pix->colorspace;
+ mbus->field = pix->field;
+}
+
+static struct v4l2_subdev *
+isp_video_remote_subdev(struct isp_video *video, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(&video->pad);
+
+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+/* Return a pointer to the ISP video instance at the far end of the pipeline. */
+static int isp_video_get_graph_data(struct isp_video *video,
+ struct isp_pipeline *pipe)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &video->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct isp_video *far_end = NULL;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret) {
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+
+ media_graph_walk_start(&graph, entity);
+
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct isp_video *__video;
+
+ media_entity_enum_set(&pipe->ent_enum, entity);
+
+ if (far_end != NULL)
+ continue;
+
+ if (entity == &video->video.entity)
+ continue;
+
+ if (!is_media_entity_v4l2_video_device(entity))
+ continue;
+
+ __video = to_isp_video(media_entity_to_video_device(entity));
+ if (__video->type != video->type)
+ far_end = __video;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ media_graph_walk_cleanup(&graph);
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ pipe->input = far_end;
+ pipe->output = video;
+ } else {
+ if (far_end == NULL)
+ return -EPIPE;
+
+ pipe->input = video;
+ pipe->output = far_end;
+ }
+
+ return 0;
+}
+
+static int
+__isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ u32 pad;
+ int ret;
+
+ subdev = isp_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ fmt.pad = pad;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ mutex_unlock(&video->mutex);
+
+ if (ret)
+ return ret;
+
+ format->type = video->type;
+ return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
+}
+
+static int
+isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
+{
+ struct v4l2_format format;
+ int ret;
+
+ memcpy(&format, &vfh->format, sizeof(format));
+ ret = __isp_video_get_format(video, &format);
+ if (ret < 0)
+ return ret;
+
+ if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
+ vfh->format.fmt.pix.height != format.fmt.pix.height ||
+ vfh->format.fmt.pix.width != format.fmt.pix.width ||
+ vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
+ vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage ||
+ vfh->format.fmt.pix.field != format.fmt.pix.field)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video queue operations
+ */
+
+static int isp_video_queue_setup(struct vb2_queue *queue,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
+ struct isp_video *video = vfh->video;
+
+ *num_planes = 1;
+
+ sizes[0] = vfh->format.fmt.pix.sizeimage;
+ if (sizes[0] == 0)
+ return -EINVAL;
+
+ *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
+
+ return 0;
+}
+
+static int isp_video_buffer_prepare(struct vb2_buffer *buf)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
+ struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
+ struct isp_buffer *buffer = to_isp_buffer(vbuf);
+ struct isp_video *video = vfh->video;
+ dma_addr_t addr;
+
+ /* Refuse to prepare the buffer is the video node has registered an
+ * error. We don't need to take any lock here as the operation is
+ * inherently racy. The authoritative check will be performed in the
+ * queue handler, which can't return an error, this check is just a best
+ * effort to notify userspace as early as possible.
+ */
+ if (unlikely(video->error))
+ return -EIO;
+
+ addr = vb2_dma_contig_plane_dma_addr(buf, 0);
+ if (!IS_ALIGNED(addr, 32)) {
+ dev_dbg(video->isp->dev,
+ "Buffer address must be aligned to 32 bytes boundary.\n");
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(&buffer->vb.vb2_buf, 0,
+ vfh->format.fmt.pix.sizeimage);
+ buffer->dma = addr;
+
+ return 0;
+}
+
+/*
+ * isp_video_buffer_queue - Add buffer to streaming queue
+ * @buf: Video buffer
+ *
+ * In memory-to-memory mode, start streaming on the pipeline if buffers are
+ * queued on both the input and the output, if the pipeline isn't already busy.
+ * If the pipeline is busy, it will be restarted in the output module interrupt
+ * handler.
+ */
+static void isp_video_buffer_queue(struct vb2_buffer *buf)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
+ struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
+ struct isp_buffer *buffer = to_isp_buffer(vbuf);
+ struct isp_video *video = vfh->video;
+ struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+ enum isp_pipeline_state state;
+ unsigned long flags;
+ unsigned int empty;
+ unsigned int start;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+
+ if (unlikely(video->error)) {
+ vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return;
+ }
+
+ empty = list_empty(&video->dmaqueue);
+ list_add_tail(&buffer->irqlist, &video->dmaqueue);
+
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ if (empty) {
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISP_PIPELINE_QUEUE_OUTPUT;
+ else
+ state = ISP_PIPELINE_QUEUE_INPUT;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state |= state;
+ video->ops->queue(video, buffer);
+ video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
+
+ start = isp_pipeline_ready(pipe);
+ if (start)
+ pipe->state |= ISP_PIPELINE_STREAM;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ if (start)
+ omap3isp_pipeline_set_stream(pipe,
+ ISP_PIPELINE_STREAM_SINGLESHOT);
+ }
+}
+
+/*
+ * omap3isp_video_return_buffers - Return all queued buffers to videobuf2
+ * @video: ISP video object
+ * @state: new state for the returned buffers
+ *
+ * Return all buffers queued on the video node to videobuf2 in the given state.
+ * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error
+ * when starting the stream, or VB2_BUF_STATE_ERROR otherwise.
+ *
+ * The function must be called with the video irqlock held.
+ */
+static void omap3isp_video_return_buffers(struct isp_video *video,
+ enum vb2_buffer_state state)
+{
+ while (!list_empty(&video->dmaqueue)) {
+ struct isp_buffer *buf;
+
+ buf = list_first_entry(&video->dmaqueue,
+ struct isp_buffer, irqlist);
+ list_del(&buf->irqlist);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+}
+
+static int isp_video_start_streaming(struct vb2_queue *queue,
+ unsigned int count)
+{
+ struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
+ struct isp_video *video = vfh->video;
+ struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+ unsigned long flags;
+ int ret;
+
+ /* In sensor-to-memory mode, the stream can be started synchronously
+ * to the stream on command. In memory-to-memory mode, it will be
+ * started when buffers are queued on both the input and output.
+ */
+ if (pipe->input)
+ return 0;
+
+ ret = omap3isp_pipeline_set_stream(pipe,
+ ISP_PIPELINE_STREAM_CONTINUOUS);
+ if (ret < 0) {
+ spin_lock_irqsave(&video->irqlock, flags);
+ omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return ret;
+ }
+
+ spin_lock_irqsave(&video->irqlock, flags);
+ if (list_empty(&video->dmaqueue))
+ video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ return 0;
+}
+
+static const struct vb2_ops isp_video_queue_ops = {
+ .queue_setup = isp_video_queue_setup,
+ .buf_prepare = isp_video_buffer_prepare,
+ .buf_queue = isp_video_buffer_queue,
+ .start_streaming = isp_video_start_streaming,
+};
+
+/*
+ * omap3isp_video_buffer_next - Complete the current buffer and return the next
+ * @video: ISP video object
+ *
+ * Remove the current video buffer from the DMA queue and fill its timestamp and
+ * field count before handing it back to videobuf2.
+ *
+ * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no
+ * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
+ * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE.
+ *
+ * The DMA queue is expected to contain at least one buffer.
+ *
+ * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
+ * empty.
+ */
+struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+ enum vb2_buffer_state vb_state;
+ struct isp_buffer *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+ if (WARN_ON(list_empty(&video->dmaqueue))) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return NULL;
+ }
+
+ buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
+ irqlist);
+ list_del(&buf->irqlist);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+
+ /* Do frame number propagation only if this is the output video node.
+ * Frame number either comes from the CSI receivers or it gets
+ * incremented here if H3A is not active.
+ * Note: There is no guarantee that the output buffer will finish
+ * first, so the input number might lag behind by 1 in some cases.
+ */
+ if (video == pipe->output && !pipe->do_propagation)
+ buf->vb.sequence =
+ atomic_inc_return(&pipe->frame_number);
+ else
+ buf->vb.sequence = atomic_read(&pipe->frame_number);
+
+ if (pipe->field != V4L2_FIELD_NONE)
+ buf->vb.sequence /= 2;
+
+ buf->vb.field = pipe->field;
+
+ /* Report pipeline errors to userspace on the capture device side. */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
+ vb_state = VB2_BUF_STATE_ERROR;
+ pipe->error = false;
+ } else {
+ vb_state = VB2_BUF_STATE_DONE;
+ }
+
+ vb2_buffer_done(&buf->vb.vb2_buf, vb_state);
+
+ spin_lock_irqsave(&video->irqlock, flags);
+
+ if (list_empty(&video->dmaqueue)) {
+ enum isp_pipeline_state state;
+
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISP_PIPELINE_QUEUE_OUTPUT
+ | ISP_PIPELINE_STREAM;
+ else
+ state = ISP_PIPELINE_QUEUE_INPUT
+ | ISP_PIPELINE_STREAM;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~state;
+ if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
+ video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+ return NULL;
+ }
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
+ spin_lock(&pipe->lock);
+ pipe->state &= ~ISP_PIPELINE_STREAM;
+ spin_unlock(&pipe->lock);
+ }
+
+ buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
+ irqlist);
+
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ return buf;
+}
+
+/*
+ * omap3isp_video_cancel_stream - Cancel stream on a video node
+ * @video: ISP video object
+ *
+ * Cancelling a stream returns all buffers queued on the video node to videobuf2
+ * in the erroneous state and makes sure no new buffer can be queued.
+ */
+void omap3isp_video_cancel_stream(struct isp_video *video)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+ omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR);
+ video->error = true;
+ spin_unlock_irqrestore(&video->irqlock, flags);
+}
+
+/*
+ * omap3isp_video_resume - Perform resume operation on the buffers
+ * @video: ISP video object
+ * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
+ *
+ * This function is intended to be used on suspend/resume scenario. It
+ * requests video queue layer to discard buffers marked as DONE if it's in
+ * continuous mode and requests ISP modules to queue again the ACTIVE buffer
+ * if there's any.
+ */
+void omap3isp_video_resume(struct isp_video *video, int continuous)
+{
+ struct isp_buffer *buf = NULL;
+
+ if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ mutex_lock(&video->queue_lock);
+ vb2_discard_done(video->queue);
+ mutex_unlock(&video->queue_lock);
+ }
+
+ if (!list_empty(&video->dmaqueue)) {
+ buf = list_first_entry(&video->dmaqueue,
+ struct isp_buffer, irqlist);
+ video->ops->queue(video, buf);
+ video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
+ } else {
+ if (continuous)
+ video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ struct isp_video *video = video_drvdata(file);
+
+ strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, video->video.name, sizeof(cap->card));
+ strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ else
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int
+isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+
+ if (format->type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->mutex);
+ *format = vfh->format;
+ mutex_unlock(&video->mutex);
+
+ return 0;
+}
+
+static int
+isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ struct v4l2_mbus_framefmt fmt;
+
+ if (format->type != video->type)
+ return -EINVAL;
+
+ /* Replace unsupported field orders with sane defaults. */
+ switch (format->fmt.pix.field) {
+ case V4L2_FIELD_NONE:
+ /* Progressive is supported everywhere. */
+ break;
+ case V4L2_FIELD_ALTERNATE:
+ /* ALTERNATE is not supported on output nodes. */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ format->fmt.pix.field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ /* The ISP has no concept of video standard, select the
+ * top-bottom order when the unqualified interlaced order is
+ * requested.
+ */
+ format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB;
+ /* Fall-through */
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ /* Interlaced orders are only supported at the CCDC output. */
+ if (video != &video->isp->isp_ccdc.video_out)
+ format->fmt.pix.field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ default:
+ /* All other field orders are currently unsupported, default to
+ * progressive.
+ */
+ format->fmt.pix.field = V4L2_FIELD_NONE;
+ break;
+ }
+
+ /* Fill the bytesperline and sizeimage fields by converting to media bus
+ * format and back to pixel format.
+ */
+ isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
+ isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
+
+ mutex_lock(&video->mutex);
+ vfh->format = *format;
+ mutex_unlock(&video->mutex);
+
+ return 0;
+}
+
+static int
+isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct isp_video *video = video_drvdata(file);
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ u32 pad;
+ int ret;
+
+ if (format->type != video->type)
+ return -EINVAL;
+
+ subdev = isp_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
+
+ fmt.pad = pad;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+
+ isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
+ return 0;
+}
+
+static int
+isp_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ struct isp_video *video = video_drvdata(file);
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ };
+ u32 pad;
+ int ret;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ subdev = isp_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ /* Try the get selection operation first and fallback to get format if not
+ * implemented.
+ */
+ sdsel.pad = pad;
+ ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
+ if (!ret)
+ sel->r = sdsel.r;
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ format.pad = pad;
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format.format.width;
+ sel->r.height = format.format.height;
+
+ return 0;
+}
+
+static int
+isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ struct isp_video *video = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ .flags = sel->flags,
+ .r = sel->r,
+ };
+ u32 pad;
+ int ret;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ subdev = isp_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ sdsel.pad = pad;
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
+ mutex_unlock(&video->mutex);
+ if (!ret)
+ sel->r = sdsel.r;
+
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+}
+
+static int
+isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+
+ if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ video->type != a->type)
+ return -EINVAL;
+
+ memset(a, 0, sizeof(*a));
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.output.timeperframe = vfh->timeperframe;
+
+ return 0;
+}
+
+static int
+isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+
+ if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ video->type != a->type)
+ return -EINVAL;
+
+ if (a->parm.output.timeperframe.denominator == 0)
+ a->parm.output.timeperframe.denominator = 1;
+
+ vfh->timeperframe = a->parm.output.timeperframe;
+
+ return 0;
+}
+
+static int
+isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&video->queue_lock);
+ ret = vb2_reqbufs(&vfh->queue, rb);
+ mutex_unlock(&video->queue_lock);
+
+ return ret;
+}
+
+static int
+isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&video->queue_lock);
+ ret = vb2_querybuf(&vfh->queue, b);
+ mutex_unlock(&video->queue_lock);
+
+ return ret;
+}
+
+static int
+isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&video->queue_lock);
+ ret = vb2_qbuf(&vfh->queue, b);
+ mutex_unlock(&video->queue_lock);
+
+ return ret;
+}
+
+static int
+isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&video->queue_lock);
+ ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
+ mutex_unlock(&video->queue_lock);
+
+ return ret;
+}
+
+static int isp_video_check_external_subdevs(struct isp_video *video,
+ struct isp_pipeline *pipe)
+{
+ struct isp_device *isp = video->isp;
+ struct media_entity *ents[] = {
+ &isp->isp_csi2a.subdev.entity,
+ &isp->isp_csi2c.subdev.entity,
+ &isp->isp_ccp2.subdev.entity,
+ &isp->isp_ccdc.subdev.entity
+ };
+ struct media_pad *source_pad;
+ struct media_entity *source = NULL;
+ struct media_entity *sink;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_ext_controls ctrls;
+ struct v4l2_ext_control ctrl;
+ unsigned int i;
+ int ret;
+
+ /* Memory-to-memory pipelines have no external subdev. */
+ if (pipe->input != NULL)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ents); i++) {
+ /* Is the entity part of the pipeline? */
+ if (!media_entity_enum_test(&pipe->ent_enum, ents[i]))
+ continue;
+
+ /* ISP entities have always sink pad == 0. Find source. */
+ source_pad = media_entity_remote_pad(&ents[i]->pads[0]);
+ if (source_pad == NULL)
+ continue;
+
+ source = source_pad->entity;
+ sink = ents[i];
+ break;
+ }
+
+ if (!source) {
+ dev_warn(isp->dev, "can't find source, failing now\n");
+ return -EINVAL;
+ }
+
+ if (!is_media_entity_v4l2_subdev(source))
+ return 0;
+
+ pipe->external = media_entity_to_v4l2_subdev(source);
+
+ fmt.pad = source_pad->index;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
+ pad, get_fmt, NULL, &fmt);
+ if (unlikely(ret < 0)) {
+ dev_warn(isp->dev, "get_fmt returned null!\n");
+ return ret;
+ }
+
+ pipe->external_width =
+ omap3isp_video_format_info(fmt.format.code)->width;
+
+ memset(&ctrls, 0, sizeof(ctrls));
+ memset(&ctrl, 0, sizeof(ctrl));
+
+ ctrl.id = V4L2_CID_PIXEL_RATE;
+
+ ctrls.count = 1;
+ ctrls.controls = &ctrl;
+
+ ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
+ if (ret < 0) {
+ dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
+ pipe->external->name);
+ return ret;
+ }
+
+ pipe->external_rate = ctrl.value64;
+
+ if (media_entity_enum_test(&pipe->ent_enum,
+ &isp->isp_ccdc.subdev.entity)) {
+ unsigned int rate = UINT_MAX;
+ /*
+ * Check that maximum allowed CCDC pixel rate isn't
+ * exceeded by the pixel rate.
+ */
+ omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
+ if (pipe->external_rate > rate)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/*
+ * Stream management
+ *
+ * Every ISP pipeline has a single input and a single output. The input can be
+ * either a sensor or a video node. The output is always a video node.
+ *
+ * As every pipeline has an output video node, the ISP video objects at the
+ * pipeline output stores the pipeline state. It tracks the streaming state of
+ * both the input and output, as well as the availability of buffers.
+ *
+ * In sensor-to-memory mode, frames are always available at the pipeline input.
+ * Starting the sensor usually requires I2C transfers and must be done in
+ * interruptible context. The pipeline is started and stopped synchronously
+ * to the stream on/off commands. All modules in the pipeline will get their
+ * subdev set stream handler called. The module at the end of the pipeline must
+ * delay starting the hardware until buffers are available at its output.
+ *
+ * In memory-to-memory mode, starting/stopping the stream requires
+ * synchronization between the input and output. ISP modules can't be stopped
+ * in the middle of a frame, and at least some of the modules seem to become
+ * busy as soon as they're started, even if they don't receive a frame start
+ * event. For that reason frames need to be processed in single-shot mode. The
+ * driver needs to wait until a frame is completely processed and written to
+ * memory before restarting the pipeline for the next frame. Pipelined
+ * processing might be possible but requires more testing.
+ *
+ * Stream start must be delayed until buffers are available at both the input
+ * and output. The pipeline must be started in the videobuf queue callback with
+ * the buffers queue spinlock held. The modules subdev set stream operation must
+ * not sleep.
+ */
+static int
+isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ enum isp_pipeline_state state;
+ struct isp_pipeline *pipe;
+ unsigned long flags;
+ int ret;
+
+ if (type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->stream_lock);
+
+ /* Start streaming on the pipeline. No link touching an entity in the
+ * pipeline can be activated or deactivated once streaming is started.
+ */
+ pipe = video->video.entity.pipe
+ ? to_isp_pipeline(&video->video.entity) : &video->pipe;
+
+ ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev);
+ if (ret)
+ goto err_enum_init;
+
+ /* TODO: Implement PM QoS */
+ pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
+ pipe->max_rate = pipe->l3_ick;
+
+ ret = media_pipeline_start(&video->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto err_pipeline_start;
+
+ /* Verify that the currently configured format matches the output of
+ * the connected subdev.
+ */
+ ret = isp_video_check_format(video, vfh);
+ if (ret < 0)
+ goto err_check_format;
+
+ video->bpl_padding = ret;
+ video->bpl_value = vfh->format.fmt.pix.bytesperline;
+
+ ret = isp_video_get_graph_data(video, pipe);
+ if (ret < 0)
+ goto err_check_format;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
+ else
+ state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
+
+ ret = isp_video_check_external_subdevs(video, pipe);
+ if (ret < 0)
+ goto err_check_format;
+
+ pipe->error = false;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~ISP_PIPELINE_STREAM;
+ pipe->state |= state;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ /* Set the maximum time per frame as the value requested by userspace.
+ * This is a soft limit that can be overridden if the hardware doesn't
+ * support the request limit.
+ */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pipe->max_timeperframe = vfh->timeperframe;
+
+ video->queue = &vfh->queue;
+ INIT_LIST_HEAD(&video->dmaqueue);
+ atomic_set(&pipe->frame_number, -1);
+ pipe->field = vfh->format.fmt.pix.field;
+
+ mutex_lock(&video->queue_lock);
+ ret = vb2_streamon(&vfh->queue, type);
+ mutex_unlock(&video->queue_lock);
+ if (ret < 0)
+ goto err_check_format;
+
+ mutex_unlock(&video->stream_lock);
+
+ return 0;
+
+err_check_format:
+ media_pipeline_stop(&video->video.entity);
+err_pipeline_start:
+ /* TODO: Implement PM QoS */
+ /* The DMA queue must be emptied here, otherwise CCDC interrupts that
+ * will get triggered the next time the CCDC is powered up will try to
+ * access buffers that might have been freed but still present in the
+ * DMA queue. This can easily get triggered if the above
+ * omap3isp_pipeline_set_stream() call fails on a system with a
+ * free-running sensor.
+ */
+ INIT_LIST_HEAD(&video->dmaqueue);
+ video->queue = NULL;
+
+ media_entity_enum_cleanup(&pipe->ent_enum);
+
+err_enum_init:
+ mutex_unlock(&video->stream_lock);
+
+ return ret;
+}
+
+static int
+isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+ enum isp_pipeline_state state;
+ unsigned int streaming;
+ unsigned long flags;
+
+ if (type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->stream_lock);
+
+ /* Make sure we're not streaming yet. */
+ mutex_lock(&video->queue_lock);
+ streaming = vb2_is_streaming(&vfh->queue);
+ mutex_unlock(&video->queue_lock);
+
+ if (!streaming)
+ goto done;
+
+ /* Update the pipeline state. */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISP_PIPELINE_STREAM_OUTPUT
+ | ISP_PIPELINE_QUEUE_OUTPUT;
+ else
+ state = ISP_PIPELINE_STREAM_INPUT
+ | ISP_PIPELINE_QUEUE_INPUT;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~state;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ /* Stop the stream. */
+ omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
+ omap3isp_video_cancel_stream(video);
+
+ mutex_lock(&video->queue_lock);
+ vb2_streamoff(&vfh->queue, type);
+ mutex_unlock(&video->queue_lock);
+ video->queue = NULL;
+ video->error = false;
+
+ /* TODO: Implement PM QoS */
+ media_pipeline_stop(&video->video.entity);
+
+ media_entity_enum_cleanup(&pipe->ent_enum);
+
+done:
+ mutex_unlock(&video->stream_lock);
+ return 0;
+}
+
+static int
+isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
+{
+ if (input->index > 0)
+ return -EINVAL;
+
+ strlcpy(input->name, "camera", sizeof(input->name));
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int
+isp_video_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ *input = 0;
+
+ return 0;
+}
+
+static int
+isp_video_s_input(struct file *file, void *fh, unsigned int input)
+{
+ return input == 0 ? 0 : -EINVAL;
+}
+
+static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
+ .vidioc_querycap = isp_video_querycap,
+ .vidioc_g_fmt_vid_cap = isp_video_get_format,
+ .vidioc_s_fmt_vid_cap = isp_video_set_format,
+ .vidioc_try_fmt_vid_cap = isp_video_try_format,
+ .vidioc_g_fmt_vid_out = isp_video_get_format,
+ .vidioc_s_fmt_vid_out = isp_video_set_format,
+ .vidioc_try_fmt_vid_out = isp_video_try_format,
+ .vidioc_g_selection = isp_video_get_selection,
+ .vidioc_s_selection = isp_video_set_selection,
+ .vidioc_g_parm = isp_video_get_param,
+ .vidioc_s_parm = isp_video_set_param,
+ .vidioc_reqbufs = isp_video_reqbufs,
+ .vidioc_querybuf = isp_video_querybuf,
+ .vidioc_qbuf = isp_video_qbuf,
+ .vidioc_dqbuf = isp_video_dqbuf,
+ .vidioc_streamon = isp_video_streamon,
+ .vidioc_streamoff = isp_video_streamoff,
+ .vidioc_enum_input = isp_video_enum_input,
+ .vidioc_g_input = isp_video_g_input,
+ .vidioc_s_input = isp_video_s_input,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static int isp_video_open(struct file *file)
+{
+ struct isp_video *video = video_drvdata(file);
+ struct isp_video_fh *handle;
+ struct vb2_queue *queue;
+ int ret = 0;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (handle == NULL)
+ return -ENOMEM;
+
+ v4l2_fh_init(&handle->vfh, &video->video);
+ v4l2_fh_add(&handle->vfh);
+
+ /* If this is the first user, initialise the pipeline. */
+ if (omap3isp_get(video->isp) == NULL) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ ret = v4l2_pipeline_pm_use(&video->video.entity, 1);
+ if (ret < 0) {
+ omap3isp_put(video->isp);
+ goto done;
+ }
+
+ queue = &handle->queue;
+ queue->type = video->type;
+ queue->io_modes = VB2_MMAP | VB2_USERPTR;
+ queue->drv_priv = handle;
+ queue->ops = &isp_video_queue_ops;
+ queue->mem_ops = &vb2_dma_contig_memops;
+ queue->buf_struct_size = sizeof(struct isp_buffer);
+ queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ queue->dev = video->isp->dev;
+
+ ret = vb2_queue_init(&handle->queue);
+ if (ret < 0) {
+ omap3isp_put(video->isp);
+ goto done;
+ }
+
+ memset(&handle->format, 0, sizeof(handle->format));
+ handle->format.type = video->type;
+ handle->timeperframe.denominator = 1;
+
+ handle->video = video;
+ file->private_data = &handle->vfh;
+
+done:
+ if (ret < 0) {
+ v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
+ kfree(handle);
+ }
+
+ return ret;
+}
+
+static int isp_video_release(struct file *file)
+{
+ struct isp_video *video = video_drvdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+ struct isp_video_fh *handle = to_isp_video_fh(vfh);
+
+ /* Disable streaming and free the buffers queue resources. */
+ isp_video_streamoff(file, vfh, video->type);
+
+ mutex_lock(&video->queue_lock);
+ vb2_queue_release(&handle->queue);
+ mutex_unlock(&video->queue_lock);
+
+ v4l2_pipeline_pm_use(&video->video.entity, 0);
+
+ /* Release the file handle. */
+ v4l2_fh_del(vfh);
+ v4l2_fh_exit(vfh);
+ kfree(handle);
+ file->private_data = NULL;
+
+ omap3isp_put(video->isp);
+
+ return 0;
+}
+
+static __poll_t isp_video_poll(struct file *file, poll_table *wait)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
+ struct isp_video *video = video_drvdata(file);
+ __poll_t ret;
+
+ mutex_lock(&video->queue_lock);
+ ret = vb2_poll(&vfh->queue, file, wait);
+ mutex_unlock(&video->queue_lock);
+
+ return ret;
+}
+
+static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
+
+ return vb2_mmap(&vfh->queue, vma);
+}
+
+static const struct v4l2_file_operations isp_video_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = isp_video_open,
+ .release = isp_video_release,
+ .poll = isp_video_poll,
+ .mmap = isp_video_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * ISP video core
+ */
+
+static const struct isp_video_operations isp_video_dummy_ops = {
+};
+
+int omap3isp_video_init(struct isp_video *video, const char *name)
+{
+ const char *direction;
+ int ret;
+
+ switch (video->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ direction = "output";
+ video->pad.flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ direction = "input";
+ video->pad.flags = MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ video->video.vfl_dir = VFL_DIR_TX;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&video->mutex);
+ atomic_set(&video->active, 0);
+
+ spin_lock_init(&video->pipe.lock);
+ mutex_init(&video->stream_lock);
+ mutex_init(&video->queue_lock);
+ spin_lock_init(&video->irqlock);
+
+ /* Initialize the video device. */
+ if (video->ops == NULL)
+ video->ops = &isp_video_dummy_ops;
+
+ video->video.fops = &isp_video_fops;
+ snprintf(video->video.name, sizeof(video->video.name),
+ "OMAP3 ISP %s %s", name, direction);
+ video->video.vfl_type = VFL_TYPE_GRABBER;
+ video->video.release = video_device_release_empty;
+ video->video.ioctl_ops = &isp_video_ioctl_ops;
+ video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
+
+ video_set_drvdata(&video->video, video);
+
+ return 0;
+}
+
+void omap3isp_video_cleanup(struct isp_video *video)
+{
+ media_entity_cleanup(&video->video.entity);
+ mutex_destroy(&video->queue_lock);
+ mutex_destroy(&video->stream_lock);
+ mutex_destroy(&video->mutex);
+}
+
+int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
+{
+ int ret;
+
+ video->video.v4l2_dev = vdev;
+
+ ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ dev_err(video->isp->dev,
+ "%s: could not register video device (%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+void omap3isp_video_unregister(struct isp_video *video)
+{
+ if (video_is_registered(&video->video))
+ video_unregister_device(&video->video);
+}
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
new file mode 100644
index 000000000..f6a2082b4
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -0,0 +1,210 @@
+/*
+ * ispvideo.h
+ *
+ * TI OMAP3 ISP - Generic video node
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP3_ISP_VIDEO_H
+#define OMAP3_ISP_VIDEO_H
+
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
+
+#define ISP_VIDEO_DRIVER_NAME "ispvideo"
+#define ISP_VIDEO_DRIVER_VERSION "0.0.2"
+
+struct isp_device;
+struct isp_video;
+struct v4l2_mbus_framefmt;
+struct v4l2_pix_format;
+
+/*
+ * struct isp_format_info - ISP media bus format information
+ * @code: V4L2 media bus format code
+ * @truncated: V4L2 media bus format code for the same format truncated to 10
+ * bits. Identical to @code if the format is 10 bits wide or less.
+ * @uncompressed: V4L2 media bus format code for the corresponding uncompressed
+ * format. Identical to @code if the format is not DPCM compressed.
+ * @flavor: V4L2 media bus format code for the same pixel layout but
+ * shifted to be 8 bits per pixel. =0 if format is not shiftable.
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @width: Bits per pixel (when transferred over a bus)
+ * @bpp: Bytes per pixel (when stored in memory)
+ */
+struct isp_format_info {
+ u32 code;
+ u32 truncated;
+ u32 uncompressed;
+ u32 flavor;
+ u32 pixelformat;
+ unsigned int width;
+ unsigned int bpp;
+};
+
+enum isp_pipeline_stream_state {
+ ISP_PIPELINE_STREAM_STOPPED = 0,
+ ISP_PIPELINE_STREAM_CONTINUOUS = 1,
+ ISP_PIPELINE_STREAM_SINGLESHOT = 2,
+};
+
+enum isp_pipeline_state {
+ /* The stream has been started on the input video node. */
+ ISP_PIPELINE_STREAM_INPUT = 1,
+ /* The stream has been started on the output video node. */
+ ISP_PIPELINE_STREAM_OUTPUT = 2,
+ /* At least one buffer is queued on the input video node. */
+ ISP_PIPELINE_QUEUE_INPUT = 4,
+ /* At least one buffer is queued on the output video node. */
+ ISP_PIPELINE_QUEUE_OUTPUT = 8,
+ /* The input entity is idle, ready to be started. */
+ ISP_PIPELINE_IDLE_INPUT = 16,
+ /* The output entity is idle, ready to be started. */
+ ISP_PIPELINE_IDLE_OUTPUT = 32,
+ /* The pipeline is currently streaming. */
+ ISP_PIPELINE_STREAM = 64,
+};
+
+/*
+ * struct isp_pipeline - An ISP hardware pipeline
+ * @field: The field being processed by the pipeline
+ * @error: A hardware error occurred during capture
+ * @ent_enum: Entities in the pipeline
+ */
+struct isp_pipeline {
+ struct media_pipeline pipe;
+ spinlock_t lock; /* Pipeline state and queue flags */
+ unsigned int state;
+ enum isp_pipeline_stream_state stream_state;
+ struct isp_video *input;
+ struct isp_video *output;
+ struct media_entity_enum ent_enum;
+ unsigned long l3_ick;
+ unsigned int max_rate;
+ enum v4l2_field field;
+ atomic_t frame_number;
+ bool do_propagation; /* of frame number */
+ bool error;
+ struct v4l2_fract max_timeperframe;
+ struct v4l2_subdev *external;
+ unsigned int external_rate;
+ unsigned int external_width;
+};
+
+#define to_isp_pipeline(__e) \
+ container_of((__e)->pipe, struct isp_pipeline, pipe)
+
+static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
+{
+ return pipe->state == (ISP_PIPELINE_STREAM_INPUT |
+ ISP_PIPELINE_STREAM_OUTPUT |
+ ISP_PIPELINE_QUEUE_INPUT |
+ ISP_PIPELINE_QUEUE_OUTPUT |
+ ISP_PIPELINE_IDLE_INPUT |
+ ISP_PIPELINE_IDLE_OUTPUT);
+}
+
+/**
+ * struct isp_buffer - ISP video buffer
+ * @vb: videobuf2 buffer
+ * @irqlist: List head for insertion into IRQ queue
+ * @dma: DMA address
+ */
+struct isp_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head irqlist;
+ dma_addr_t dma;
+};
+
+#define to_isp_buffer(buf) container_of(buf, struct isp_buffer, vb)
+
+enum isp_video_dmaqueue_flags {
+ /* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */
+ ISP_VIDEO_DMAQUEUE_UNDERRUN = (1 << 0),
+ /* Set when queuing buffer to an empty DMA queue */
+ ISP_VIDEO_DMAQUEUE_QUEUED = (1 << 1),
+};
+
+#define isp_video_dmaqueue_flags_clr(video) \
+ ({ (video)->dmaqueue_flags = 0; })
+
+/*
+ * struct isp_video_operations - ISP video operations
+ * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
+ * if there was no buffer previously queued.
+ */
+struct isp_video_operations {
+ int(*queue)(struct isp_video *video, struct isp_buffer *buffer);
+};
+
+struct isp_video {
+ struct video_device video;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+
+ struct mutex mutex; /* format and crop settings */
+ atomic_t active;
+
+ struct isp_device *isp;
+
+ unsigned int capture_mem;
+ unsigned int bpl_alignment; /* alignment value */
+ unsigned int bpl_zero_padding; /* whether the alignment is optional */
+ unsigned int bpl_max; /* maximum bytes per line value */
+ unsigned int bpl_value; /* bytes per line value */
+ unsigned int bpl_padding; /* padding at end of line */
+
+ /* Pipeline state */
+ struct isp_pipeline pipe;
+ struct mutex stream_lock; /* pipeline and stream states */
+ bool error;
+
+ /* Video buffers queue */
+ struct vb2_queue *queue;
+ struct mutex queue_lock; /* protects the queue */
+ spinlock_t irqlock; /* protects dmaqueue */
+ struct list_head dmaqueue;
+ enum isp_video_dmaqueue_flags dmaqueue_flags;
+
+ const struct isp_video_operations *ops;
+};
+
+#define to_isp_video(vdev) container_of(vdev, struct isp_video, video)
+
+struct isp_video_fh {
+ struct v4l2_fh vfh;
+ struct isp_video *video;
+ struct vb2_queue queue;
+ struct v4l2_format format;
+ struct v4l2_fract timeperframe;
+};
+
+#define to_isp_video_fh(fh) container_of(fh, struct isp_video_fh, vfh)
+#define isp_video_queue_to_isp_video_fh(q) \
+ container_of(q, struct isp_video_fh, queue)
+
+int omap3isp_video_init(struct isp_video *video, const char *name);
+void omap3isp_video_cleanup(struct isp_video *video);
+int omap3isp_video_register(struct isp_video *video,
+ struct v4l2_device *vdev);
+void omap3isp_video_unregister(struct isp_video *video);
+struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video);
+void omap3isp_video_cancel_stream(struct isp_video *video);
+void omap3isp_video_resume(struct isp_video *video, int continuous);
+struct media_pad *omap3isp_video_remote_pad(struct isp_video *video);
+
+const struct isp_format_info *
+omap3isp_video_format_info(u32 code);
+
+#endif /* OMAP3_ISP_VIDEO_H */
diff --git a/drivers/media/platform/omap3isp/luma_enhance_table.h b/drivers/media/platform/omap3isp/luma_enhance_table.h
new file mode 100644
index 000000000..81c5b1566
--- /dev/null
+++ b/drivers/media/platform/omap3isp/luma_enhance_table.h
@@ -0,0 +1,32 @@
+/*
+ * luma_enhance_table.h
+ *
+ * TI OMAP3 ISP - Luminance enhancement table
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552,
+1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552,
+1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552,
+1047552, 1047552, 1047552, 1047552, 1048575, 1047551, 1046527, 1045503,
+1044479, 1043455, 1042431, 1041407, 1040383, 1039359, 1038335, 1037311,
+1036287, 1035263, 1034239, 1033215, 1032191, 1031167, 1030143, 1028096,
+1028096, 1028096, 1028096, 1028096, 1028096, 1028096, 1028096, 1028096,
+1028096, 1028100, 1032196, 1036292, 1040388, 1044484, 0, 0,
+ 0, 5, 5125, 10245, 15365, 20485, 25605, 30720,
+ 30720, 30720, 30720, 30720, 30720, 30720, 30720, 30720,
+ 30720, 30720, 31743, 30719, 29695, 28671, 27647, 26623,
+ 25599, 24575, 23551, 22527, 21503, 20479, 19455, 18431,
+ 17407, 16383, 15359, 14335, 13311, 12287, 11263, 10239,
+ 9215, 8191, 7167, 6143, 5119, 4095, 3071, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024
diff --git a/drivers/media/platform/omap3isp/noise_filter_table.h b/drivers/media/platform/omap3isp/noise_filter_table.h
new file mode 100644
index 000000000..5073f9847
--- /dev/null
+++ b/drivers/media/platform/omap3isp/noise_filter_table.h
@@ -0,0 +1,20 @@
+/*
+ * noise_filter_table.h
+ *
+ * TI OMAP3 ISP - Noise filter table
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31
diff --git a/drivers/media/platform/omap3isp/omap3isp.h b/drivers/media/platform/omap3isp/omap3isp.h
new file mode 100644
index 000000000..9fb4d5bce
--- /dev/null
+++ b/drivers/media/platform/omap3isp/omap3isp.h
@@ -0,0 +1,138 @@
+/*
+ * omap3isp.h
+ *
+ * TI OMAP3 ISP - Bus Configuration
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __OMAP3ISP_H__
+#define __OMAP3ISP_H__
+
+enum isp_interface_type {
+ ISP_INTERFACE_PARALLEL,
+ ISP_INTERFACE_CSI2A_PHY2,
+ ISP_INTERFACE_CCP2B_PHY1,
+ ISP_INTERFACE_CCP2B_PHY2,
+ ISP_INTERFACE_CSI2C_PHY1,
+};
+
+/**
+ * struct isp_parallel_cfg - Parallel interface configuration
+ * @data_lane_shift: Data lane shifter
+ * 0 - CAMEXT[13:0] -> CAM[13:0]
+ * 2 - CAMEXT[13:2] -> CAM[11:0]
+ * 4 - CAMEXT[13:4] -> CAM[9:0]
+ * 6 - CAMEXT[13:6] -> CAM[7:0]
+ * @clk_pol: Pixel clock polarity
+ * 0 - Sample on rising edge, 1 - Sample on falling edge
+ * @hs_pol: Horizontal synchronization polarity
+ * 0 - Active high, 1 - Active low
+ * @vs_pol: Vertical synchronization polarity
+ * 0 - Active high, 1 - Active low
+ * @fld_pol: Field signal polarity
+ * 0 - Positive, 1 - Negative
+ * @data_pol: Data polarity
+ * 0 - Normal, 1 - One's complement
+ * @bt656: Data contain BT.656 embedded synchronization
+ */
+struct isp_parallel_cfg {
+ unsigned int data_lane_shift:3;
+ unsigned int clk_pol:1;
+ unsigned int hs_pol:1;
+ unsigned int vs_pol:1;
+ unsigned int fld_pol:1;
+ unsigned int data_pol:1;
+ unsigned int bt656:1;
+};
+
+enum {
+ ISP_CCP2_PHY_DATA_CLOCK = 0,
+ ISP_CCP2_PHY_DATA_STROBE = 1,
+};
+
+enum {
+ ISP_CCP2_MODE_MIPI = 0,
+ ISP_CCP2_MODE_CCP2 = 1,
+};
+
+/**
+ * struct isp_csiphy_lane: CCP2/CSI2 lane position and polarity
+ * @pos: position of the lane
+ * @pol: polarity of the lane
+ */
+struct isp_csiphy_lane {
+ u8 pos;
+ u8 pol;
+};
+
+#define ISP_CSIPHY1_NUM_DATA_LANES 1
+#define ISP_CSIPHY2_NUM_DATA_LANES 2
+
+/**
+ * struct isp_csiphy_lanes_cfg - CCP2/CSI2 lane configuration
+ * @data: Configuration of one or two data lanes
+ * @clk: Clock lane configuration
+ */
+struct isp_csiphy_lanes_cfg {
+ struct isp_csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
+ struct isp_csiphy_lane clk;
+};
+
+/**
+ * struct isp_ccp2_cfg - CCP2 interface configuration
+ * @strobe_clk_pol: Strobe/clock polarity
+ * 0 - Non Inverted, 1 - Inverted
+ * @crc: Enable the cyclic redundancy check
+ * @ccp2_mode: Enable CCP2 compatibility mode
+ * ISP_CCP2_MODE_MIPI - MIPI-CSI1 mode
+ * ISP_CCP2_MODE_CCP2 - CCP2 mode
+ * @phy_layer: Physical layer selection
+ * ISP_CCP2_PHY_DATA_CLOCK - Data/clock physical layer
+ * ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
+ * @vpclk_div: Video port output clock control
+ */
+struct isp_ccp2_cfg {
+ unsigned int strobe_clk_pol:1;
+ unsigned int crc:1;
+ unsigned int ccp2_mode:1;
+ unsigned int phy_layer:1;
+ unsigned int vpclk_div:2;
+ unsigned int vp_clk_pol:1;
+ struct isp_csiphy_lanes_cfg lanecfg;
+};
+
+/**
+ * struct isp_csi2_cfg - CSI2 interface configuration
+ * @crc: Enable the cyclic redundancy check
+ * @lanecfg: CSI-2 lane configuration
+ * @num_data_lanes: The number of data lanes in use
+ */
+struct isp_csi2_cfg {
+ unsigned crc:1;
+ struct isp_csiphy_lanes_cfg lanecfg;
+ u8 num_data_lanes;
+};
+
+struct isp_bus_cfg {
+ enum isp_interface_type interface;
+ union {
+ struct isp_parallel_cfg parallel;
+ struct isp_ccp2_cfg ccp2;
+ struct isp_csi2_cfg csi2;
+ } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
+};
+
+#endif /* __OMAP3ISP_H__ */
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
new file mode 100644
index 000000000..0281b8e53
--- /dev/null
+++ b/drivers/media/platform/pxa_camera.c
@@ -0,0 +1,2596 @@
+/*
+ * V4L2 Driver for PXA camera host
+ *
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ * Copyright (C) 2016, Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/pxa-dma.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-clk.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fwnode.h>
+
+#include <media/videobuf2-dma-sg.h>
+
+#include <linux/videodev2.h>
+
+#include <linux/platform_data/media/camera-pxa.h>
+
+#define PXA_CAM_VERSION "0.0.6"
+#define PXA_CAM_DRV_NAME "pxa27x-camera"
+
+#define DEFAULT_WIDTH 640
+#define DEFAULT_HEIGHT 480
+
+/* Camera Interface */
+#define CICR0 0x0000
+#define CICR1 0x0004
+#define CICR2 0x0008
+#define CICR3 0x000C
+#define CICR4 0x0010
+#define CISR 0x0014
+#define CIFR 0x0018
+#define CITOR 0x001C
+#define CIBR0 0x0028
+#define CIBR1 0x0030
+#define CIBR2 0x0038
+
+#define CICR0_DMAEN (1 << 31) /* DMA request enable */
+#define CICR0_PAR_EN (1 << 30) /* Parity enable */
+#define CICR0_SL_CAP_EN (1 << 29) /* Capture enable for slave mode */
+#define CICR0_ENB (1 << 28) /* Camera interface enable */
+#define CICR0_DIS (1 << 27) /* Camera interface disable */
+#define CICR0_SIM (0x7 << 24) /* Sensor interface mode mask */
+#define CICR0_TOM (1 << 9) /* Time-out mask */
+#define CICR0_RDAVM (1 << 8) /* Receive-data-available mask */
+#define CICR0_FEM (1 << 7) /* FIFO-empty mask */
+#define CICR0_EOLM (1 << 6) /* End-of-line mask */
+#define CICR0_PERRM (1 << 5) /* Parity-error mask */
+#define CICR0_QDM (1 << 4) /* Quick-disable mask */
+#define CICR0_CDM (1 << 3) /* Disable-done mask */
+#define CICR0_SOFM (1 << 2) /* Start-of-frame mask */
+#define CICR0_EOFM (1 << 1) /* End-of-frame mask */
+#define CICR0_FOM (1 << 0) /* FIFO-overrun mask */
+
+#define CICR1_TBIT (1 << 31) /* Transparency bit */
+#define CICR1_RGBT_CONV (0x3 << 29) /* RGBT conversion mask */
+#define CICR1_PPL (0x7ff << 15) /* Pixels per line mask */
+#define CICR1_RGB_CONV (0x7 << 12) /* RGB conversion mask */
+#define CICR1_RGB_F (1 << 11) /* RGB format */
+#define CICR1_YCBCR_F (1 << 10) /* YCbCr format */
+#define CICR1_RGB_BPP (0x7 << 7) /* RGB bis per pixel mask */
+#define CICR1_RAW_BPP (0x3 << 5) /* Raw bis per pixel mask */
+#define CICR1_COLOR_SP (0x3 << 3) /* Color space mask */
+#define CICR1_DW (0x7 << 0) /* Data width mask */
+
+#define CICR2_BLW (0xff << 24) /* Beginning-of-line pixel clock
+ wait count mask */
+#define CICR2_ELW (0xff << 16) /* End-of-line pixel clock
+ wait count mask */
+#define CICR2_HSW (0x3f << 10) /* Horizontal sync pulse width mask */
+#define CICR2_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock
+ wait count mask */
+#define CICR2_FSW (0x7 << 0) /* Frame stabilization
+ wait count mask */
+
+#define CICR3_BFW (0xff << 24) /* Beginning-of-frame line clock
+ wait count mask */
+#define CICR3_EFW (0xff << 16) /* End-of-frame line clock
+ wait count mask */
+#define CICR3_VSW (0x3f << 10) /* Vertical sync pulse width mask */
+#define CICR3_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock
+ wait count mask */
+#define CICR3_LPF (0x7ff << 0) /* Lines per frame mask */
+
+#define CICR4_MCLK_DLY (0x3 << 24) /* MCLK Data Capture Delay mask */
+#define CICR4_PCLK_EN (1 << 23) /* Pixel clock enable */
+#define CICR4_PCP (1 << 22) /* Pixel clock polarity */
+#define CICR4_HSP (1 << 21) /* Horizontal sync polarity */
+#define CICR4_VSP (1 << 20) /* Vertical sync polarity */
+#define CICR4_MCLK_EN (1 << 19) /* MCLK enable */
+#define CICR4_FR_RATE (0x7 << 8) /* Frame rate mask */
+#define CICR4_DIV (0xff << 0) /* Clock divisor mask */
+
+#define CISR_FTO (1 << 15) /* FIFO time-out */
+#define CISR_RDAV_2 (1 << 14) /* Channel 2 receive data available */
+#define CISR_RDAV_1 (1 << 13) /* Channel 1 receive data available */
+#define CISR_RDAV_0 (1 << 12) /* Channel 0 receive data available */
+#define CISR_FEMPTY_2 (1 << 11) /* Channel 2 FIFO empty */
+#define CISR_FEMPTY_1 (1 << 10) /* Channel 1 FIFO empty */
+#define CISR_FEMPTY_0 (1 << 9) /* Channel 0 FIFO empty */
+#define CISR_EOL (1 << 8) /* End of line */
+#define CISR_PAR_ERR (1 << 7) /* Parity error */
+#define CISR_CQD (1 << 6) /* Camera interface quick disable */
+#define CISR_CDD (1 << 5) /* Camera interface disable done */
+#define CISR_SOF (1 << 4) /* Start of frame */
+#define CISR_EOF (1 << 3) /* End of frame */
+#define CISR_IFO_2 (1 << 2) /* FIFO overrun for Channel 2 */
+#define CISR_IFO_1 (1 << 1) /* FIFO overrun for Channel 1 */
+#define CISR_IFO_0 (1 << 0) /* FIFO overrun for Channel 0 */
+
+#define CIFR_FLVL2 (0x7f << 23) /* FIFO 2 level mask */
+#define CIFR_FLVL1 (0x7f << 16) /* FIFO 1 level mask */
+#define CIFR_FLVL0 (0xff << 8) /* FIFO 0 level mask */
+#define CIFR_THL_0 (0x3 << 4) /* Threshold Level for Channel 0 FIFO */
+#define CIFR_RESET_F (1 << 3) /* Reset input FIFOs */
+#define CIFR_FEN2 (1 << 2) /* FIFO enable for channel 2 */
+#define CIFR_FEN1 (1 << 1) /* FIFO enable for channel 1 */
+#define CIFR_FEN0 (1 << 0) /* FIFO enable for channel 0 */
+
+#define CICR0_SIM_MP (0 << 24)
+#define CICR0_SIM_SP (1 << 24)
+#define CICR0_SIM_MS (2 << 24)
+#define CICR0_SIM_EP (3 << 24)
+#define CICR0_SIM_ES (4 << 24)
+
+#define CICR1_DW_VAL(x) ((x) & CICR1_DW) /* Data bus width */
+#define CICR1_PPL_VAL(x) (((x) << 15) & CICR1_PPL) /* Pixels per line */
+#define CICR1_COLOR_SP_VAL(x) (((x) << 3) & CICR1_COLOR_SP) /* color space */
+#define CICR1_RGB_BPP_VAL(x) (((x) << 7) & CICR1_RGB_BPP) /* bpp for rgb */
+#define CICR1_RGBT_CONV_VAL(x) (((x) << 29) & CICR1_RGBT_CONV) /* rgbt conv */
+
+#define CICR2_BLW_VAL(x) (((x) << 24) & CICR2_BLW) /* Beginning-of-line pixel clock wait count */
+#define CICR2_ELW_VAL(x) (((x) << 16) & CICR2_ELW) /* End-of-line pixel clock wait count */
+#define CICR2_HSW_VAL(x) (((x) << 10) & CICR2_HSW) /* Horizontal sync pulse width */
+#define CICR2_BFPW_VAL(x) (((x) << 3) & CICR2_BFPW) /* Beginning-of-frame pixel clock wait count */
+#define CICR2_FSW_VAL(x) (((x) << 0) & CICR2_FSW) /* Frame stabilization wait count */
+
+#define CICR3_BFW_VAL(x) (((x) << 24) & CICR3_BFW) /* Beginning-of-frame line clock wait count */
+#define CICR3_EFW_VAL(x) (((x) << 16) & CICR3_EFW) /* End-of-frame line clock wait count */
+#define CICR3_VSW_VAL(x) (((x) << 11) & CICR3_VSW) /* Vertical sync pulse width */
+#define CICR3_LPF_VAL(x) (((x) << 0) & CICR3_LPF) /* Lines per frame */
+
+#define CICR0_IRQ_MASK (CICR0_TOM | CICR0_RDAVM | CICR0_FEM | CICR0_EOLM | \
+ CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \
+ CICR0_EOFM | CICR0_FOM)
+
+#define sensor_call(cam, o, f, args...) \
+ v4l2_subdev_call(cam->sensor, o, f, ##args)
+
+/*
+ * Format handling
+ */
+
+/**
+ * enum pxa_mbus_packing - data packing types on the media-bus
+ * @PXA_MBUS_PACKING_NONE: no packing, bit-for-bit transfer to RAM, one
+ * sample represents one pixel
+ * @PXA_MBUS_PACKING_2X8_PADHI: 16 bits transferred in 2 8-bit samples, in the
+ * possibly incomplete byte high bits are padding
+ * @PXA_MBUS_PACKING_EXTEND16: sample width (e.g., 10 bits) has to be extended
+ * to 16 bits
+ */
+enum pxa_mbus_packing {
+ PXA_MBUS_PACKING_NONE,
+ PXA_MBUS_PACKING_2X8_PADHI,
+ PXA_MBUS_PACKING_EXTEND16,
+};
+
+/**
+ * enum pxa_mbus_order - sample order on the media bus
+ * @PXA_MBUS_ORDER_LE: least significant sample first
+ * @PXA_MBUS_ORDER_BE: most significant sample first
+ */
+enum pxa_mbus_order {
+ PXA_MBUS_ORDER_LE,
+ PXA_MBUS_ORDER_BE,
+};
+
+/**
+ * enum pxa_mbus_layout - planes layout in memory
+ * @PXA_MBUS_LAYOUT_PACKED: color components packed
+ * @PXA_MBUS_LAYOUT_PLANAR_2Y_U_V: YUV components stored in 3 planes (4:2:2)
+ * @PXA_MBUS_LAYOUT_PLANAR_2Y_C: YUV components stored in a luma and a
+ * chroma plane (C plane is half the size
+ * of Y plane)
+ * @PXA_MBUS_LAYOUT_PLANAR_Y_C: YUV components stored in a luma and a
+ * chroma plane (C plane is the same size
+ * as Y plane)
+ */
+enum pxa_mbus_layout {
+ PXA_MBUS_LAYOUT_PACKED = 0,
+ PXA_MBUS_LAYOUT_PLANAR_2Y_U_V,
+ PXA_MBUS_LAYOUT_PLANAR_2Y_C,
+ PXA_MBUS_LAYOUT_PLANAR_Y_C,
+};
+
+/**
+ * struct pxa_mbus_pixelfmt - Data format on the media bus
+ * @name: Name of the format
+ * @fourcc: Fourcc code, that will be obtained if the data is
+ * stored in memory in the following way:
+ * @packing: Type of sample-packing, that has to be used
+ * @order: Sample order when storing in memory
+ * @layout: Planes layout in memory
+ * @bits_per_sample: How many bits the bridge has to sample
+ */
+struct pxa_mbus_pixelfmt {
+ const char *name;
+ u32 fourcc;
+ enum pxa_mbus_packing packing;
+ enum pxa_mbus_order order;
+ enum pxa_mbus_layout layout;
+ u8 bits_per_sample;
+};
+
+/**
+ * struct pxa_mbus_lookup - Lookup FOURCC IDs by mediabus codes for pass-through
+ * @code: mediabus pixel-code
+ * @fmt: pixel format description
+ */
+struct pxa_mbus_lookup {
+ u32 code;
+ struct pxa_mbus_pixelfmt fmt;
+};
+
+static const struct pxa_mbus_lookup mbus_fmt[] = {
+{
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .name = "RGB555",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .name = "RGB555X",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_BE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .name = "RGB565",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .name = "RGB565X",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_BE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .name = "Bayer 8 BGGR",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .name = "Bayer 8 GBRG",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .name = "Bayer 8 GRBG",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .name = "Bayer 8 RGGB",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .name = "Grey",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .name = "Grey 10bit",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_BE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .name = "RGB444",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_BE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY 16bit",
+ .bits_per_sample = 16,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY 16bit",
+ .bits_per_sample = 16,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV 16bit",
+ .bits_per_sample = 16,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU 16bit",
+ .bits_per_sample = 16,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8,
+ .name = "Bayer 10 BGGR DPCM 8",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .name = "Bayer 10 GBRG",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .name = "Bayer 10 GRBG",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .name = "Bayer 10 RGGB",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .name = "Bayer 12 BGGR",
+ .bits_per_sample = 12,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .name = "Bayer 12 GBRG",
+ .bits_per_sample = 12,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .name = "Bayer 12 GRBG",
+ .bits_per_sample = 12,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .name = "Bayer 12 RGGB",
+ .bits_per_sample = 12,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+},
+};
+
+static s32 pxa_mbus_bytes_per_line(u32 width, const struct pxa_mbus_pixelfmt *mf)
+{
+ if (mf->layout != PXA_MBUS_LAYOUT_PACKED)
+ return width * mf->bits_per_sample / 8;
+
+ switch (mf->packing) {
+ case PXA_MBUS_PACKING_NONE:
+ return width * mf->bits_per_sample / 8;
+ case PXA_MBUS_PACKING_2X8_PADHI:
+ case PXA_MBUS_PACKING_EXTEND16:
+ return width * 2;
+ }
+ return -EINVAL;
+}
+
+static s32 pxa_mbus_image_size(const struct pxa_mbus_pixelfmt *mf,
+ u32 bytes_per_line, u32 height)
+{
+ if (mf->layout == PXA_MBUS_LAYOUT_PACKED)
+ return bytes_per_line * height;
+
+ switch (mf->packing) {
+ case PXA_MBUS_PACKING_2X8_PADHI:
+ return bytes_per_line * height * 2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct pxa_mbus_pixelfmt *pxa_mbus_find_fmtdesc(
+ u32 code,
+ const struct pxa_mbus_lookup *lookup,
+ int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ if (lookup[i].code == code)
+ return &lookup[i].fmt;
+
+ return NULL;
+}
+
+static const struct pxa_mbus_pixelfmt *pxa_mbus_get_fmtdesc(
+ u32 code)
+{
+ return pxa_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt));
+}
+
+static unsigned int pxa_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
+ unsigned int flags)
+{
+ unsigned long common_flags;
+ bool hsync = true, vsync = true, pclk, data, mode;
+ bool mipi_lanes, mipi_clock;
+
+ common_flags = cfg->flags & flags;
+
+ switch (cfg->type) {
+ case V4L2_MBUS_PARALLEL:
+ hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ /* fall through */
+ case V4L2_MBUS_BT656:
+ pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW);
+ mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE);
+ return (!hsync || !vsync || !pclk || !data || !mode) ?
+ 0 : common_flags;
+ case V4L2_MBUS_CSI2:
+ mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES;
+ mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK |
+ V4L2_MBUS_CSI2_CONTINUOUS_CLOCK);
+ return (!mipi_lanes || !mipi_clock) ? 0 : common_flags;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * struct pxa_camera_format_xlate - match between host and sensor formats
+ * @code: code of a sensor provided format
+ * @host_fmt: host format after host translation from code
+ *
+ * Host and sensor translation structure. Used in table of host and sensor
+ * formats matchings in pxa_camera_device. A host can override the generic list
+ * generation by implementing get_formats(), and use it for format checks and
+ * format setup.
+ */
+struct pxa_camera_format_xlate {
+ u32 code;
+ const struct pxa_mbus_pixelfmt *host_fmt;
+};
+
+/*
+ * Structures
+ */
+enum pxa_camera_active_dma {
+ DMA_Y = 0x1,
+ DMA_U = 0x2,
+ DMA_V = 0x4,
+};
+
+/* buffer for one video frame */
+struct pxa_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head queue;
+ u32 code;
+ int nb_planes;
+ /* our descriptor lists for Y, U and V channels */
+ struct dma_async_tx_descriptor *descs[3];
+ dma_cookie_t cookie[3];
+ struct scatterlist *sg[3];
+ int sg_len[3];
+ size_t plane_sizes[3];
+ int inwork;
+ enum pxa_camera_active_dma active_dma;
+};
+
+struct pxa_camera_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct v4l2_async_notifier notifier;
+ struct vb2_queue vb2_vq;
+ struct v4l2_subdev *sensor;
+ struct pxa_camera_format_xlate *user_formats;
+ const struct pxa_camera_format_xlate *current_fmt;
+ struct v4l2_pix_format current_pix;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_async_subdev *asds[1];
+
+ /*
+ * PXA27x is only supposed to handle one camera on its Quick Capture
+ * interface. If anyone ever builds hardware to enable more than
+ * one camera, they will have to modify this driver too
+ */
+ struct clk *clk;
+
+ unsigned int irq;
+ void __iomem *base;
+
+ int channels;
+ struct dma_chan *dma_chans[3];
+
+ struct pxacamera_platform_data *pdata;
+ struct resource *res;
+ unsigned long platform_flags;
+ unsigned long ciclk;
+ unsigned long mclk;
+ u32 mclk_divisor;
+ struct v4l2_clk *mclk_clk;
+ u16 width_flags; /* max 10 bits */
+
+ struct list_head capture;
+
+ spinlock_t lock;
+ struct mutex mlock;
+ unsigned int buf_sequence;
+
+ struct pxa_buffer *active;
+ struct tasklet_struct task_eof;
+
+ u32 save_cicr[5];
+};
+
+struct pxa_cam {
+ unsigned long flags;
+};
+
+static const char *pxa_cam_driver_description = "PXA_Camera";
+
+/*
+ * Format translation functions
+ */
+static const struct pxa_camera_format_xlate
+*pxa_mbus_xlate_by_fourcc(struct pxa_camera_format_xlate *user_formats,
+ unsigned int fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; user_formats[i].code; i++)
+ if (user_formats[i].host_fmt->fourcc == fourcc)
+ return user_formats + i;
+ return NULL;
+}
+
+static struct pxa_camera_format_xlate *pxa_mbus_build_fmts_xlate(
+ struct v4l2_device *v4l2_dev, struct v4l2_subdev *subdev,
+ int (*get_formats)(struct v4l2_device *, unsigned int,
+ struct pxa_camera_format_xlate *xlate))
+{
+ unsigned int i, fmts = 0, raw_fmts = 0;
+ int ret;
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct pxa_camera_format_xlate *user_formats;
+
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code)) {
+ raw_fmts++;
+ code.index++;
+ }
+
+ /*
+ * First pass - only count formats this host-sensor
+ * configuration can provide
+ */
+ for (i = 0; i < raw_fmts; i++) {
+ ret = get_formats(v4l2_dev, i, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ fmts += ret;
+ }
+
+ if (!fmts)
+ return ERR_PTR(-ENXIO);
+
+ user_formats = kcalloc(fmts + 1, sizeof(*user_formats), GFP_KERNEL);
+ if (!user_formats)
+ return ERR_PTR(-ENOMEM);
+
+ /* Second pass - actually fill data formats */
+ fmts = 0;
+ for (i = 0; i < raw_fmts; i++) {
+ ret = get_formats(v4l2_dev, i, user_formats + fmts);
+ if (ret < 0)
+ goto egfmt;
+ fmts += ret;
+ }
+ user_formats[fmts].code = 0;
+
+ return user_formats;
+egfmt:
+ kfree(user_formats);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Videobuf operations
+ */
+static struct pxa_buffer *vb2_to_pxa_buffer(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ return container_of(vbuf, struct pxa_buffer, vbuf);
+}
+
+static struct device *pcdev_to_dev(struct pxa_camera_dev *pcdev)
+{
+ return pcdev->v4l2_dev.dev;
+}
+
+static struct pxa_camera_dev *v4l2_dev_to_pcdev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct pxa_camera_dev, v4l2_dev);
+}
+
+static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
+ enum pxa_camera_active_dma act_dma);
+
+static void pxa_camera_dma_irq_y(void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+
+ pxa_camera_dma_irq(pcdev, DMA_Y);
+}
+
+static void pxa_camera_dma_irq_u(void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+
+ pxa_camera_dma_irq(pcdev, DMA_U);
+}
+
+static void pxa_camera_dma_irq_v(void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+
+ pxa_camera_dma_irq(pcdev, DMA_V);
+}
+
+/**
+ * pxa_init_dma_channel - init dma descriptors
+ * @pcdev: pxa camera device
+ * @buf: pxa camera buffer
+ * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
+ * @sg: dma scatter list
+ * @sglen: dma scatter list length
+ *
+ * Prepares the pxa dma descriptors to transfer one camera channel.
+ *
+ * Returns 0 if success or -ENOMEM if no memory is available
+ */
+static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
+ struct pxa_buffer *buf, int channel,
+ struct scatterlist *sg, int sglen)
+{
+ struct dma_chan *dma_chan = pcdev->dma_chans[channel];
+ struct dma_async_tx_descriptor *tx;
+
+ tx = dmaengine_prep_slave_sg(dma_chan, sg, sglen, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_REUSE);
+ if (!tx) {
+ dev_err(pcdev_to_dev(pcdev),
+ "dmaengine_prep_slave_sg failed\n");
+ goto fail;
+ }
+
+ tx->callback_param = pcdev;
+ switch (channel) {
+ case 0:
+ tx->callback = pxa_camera_dma_irq_y;
+ break;
+ case 1:
+ tx->callback = pxa_camera_dma_irq_u;
+ break;
+ case 2:
+ tx->callback = pxa_camera_dma_irq_v;
+ break;
+ }
+
+ buf->descs[channel] = tx;
+ return 0;
+fail:
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s (vb=%p) dma_tx=%p\n",
+ __func__, buf, tx);
+
+ return -ENOMEM;
+}
+
+static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
+ struct pxa_buffer *buf)
+{
+ buf->active_dma = DMA_Y;
+ if (buf->nb_planes == 3)
+ buf->active_dma |= DMA_U | DMA_V;
+}
+
+/**
+ * pxa_dma_start_channels - start DMA channel for active buffer
+ * @pcdev: pxa camera device
+ *
+ * Initialize DMA channels to the beginning of the active video buffer, and
+ * start these channels.
+ */
+static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
+{
+ int i;
+
+ for (i = 0; i < pcdev->channels; i++) {
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s (channel=%d)\n", __func__, i);
+ dma_async_issue_pending(pcdev->dma_chans[i]);
+ }
+}
+
+static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
+{
+ int i;
+
+ for (i = 0; i < pcdev->channels; i++) {
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s (channel=%d)\n", __func__, i);
+ dmaengine_terminate_all(pcdev->dma_chans[i]);
+ }
+}
+
+static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev,
+ struct pxa_buffer *buf)
+{
+ int i;
+
+ for (i = 0; i < pcdev->channels; i++) {
+ buf->cookie[i] = dmaengine_submit(buf->descs[i]);
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s (channel=%d) : submit vb=%p cookie=%d\n",
+ __func__, i, buf, buf->descs[i]->cookie);
+ }
+}
+
+/**
+ * pxa_camera_start_capture - start video capturing
+ * @pcdev: camera device
+ *
+ * Launch capturing. DMA channels should not be active yet. They should get
+ * activated at the end of frame interrupt, to capture only whole frames, and
+ * never begin the capture of a partial frame.
+ */
+static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
+{
+ unsigned long cicr0;
+
+ dev_dbg(pcdev_to_dev(pcdev), "%s\n", __func__);
+ __raw_writel(__raw_readl(pcdev->base + CISR), pcdev->base + CISR);
+ /* Enable End-Of-Frame Interrupt */
+ cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
+ cicr0 &= ~CICR0_EOFM;
+ __raw_writel(cicr0, pcdev->base + CICR0);
+}
+
+static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev)
+{
+ unsigned long cicr0;
+
+ pxa_dma_stop_channels(pcdev);
+
+ cicr0 = __raw_readl(pcdev->base + CICR0) & ~CICR0_ENB;
+ __raw_writel(cicr0, pcdev->base + CICR0);
+
+ pcdev->active = NULL;
+ dev_dbg(pcdev_to_dev(pcdev), "%s\n", __func__);
+}
+
+static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
+ struct pxa_buffer *buf,
+ enum vb2_buffer_state state)
+{
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ /* _init is used to debug races, see comment in pxa_camera_reqbufs() */
+ list_del_init(&buf->queue);
+ vb->timestamp = ktime_get_ns();
+ vbuf->sequence = pcdev->buf_sequence++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ dev_dbg(pcdev_to_dev(pcdev), "%s dequeued buffer (buf=0x%p)\n",
+ __func__, buf);
+
+ if (list_empty(&pcdev->capture)) {
+ pxa_camera_stop_capture(pcdev);
+ return;
+ }
+
+ pcdev->active = list_entry(pcdev->capture.next,
+ struct pxa_buffer, queue);
+}
+
+/**
+ * pxa_camera_check_link_miss - check missed DMA linking
+ * @pcdev: camera device
+ * @last_submitted: an opaque DMA cookie for last submitted
+ * @last_issued: an opaque DMA cookie for last issued
+ *
+ * The DMA chaining is done with DMA running. This means a tiny temporal window
+ * remains, where a buffer is queued on the chain, while the chain is already
+ * stopped. This means the tailed buffer would never be transferred by DMA.
+ * This function restarts the capture for this corner case, where :
+ * - DADR() == DADDR_STOP
+ * - a videobuffer is queued on the pcdev->capture list
+ *
+ * Please check the "DMA hot chaining timeslice issue" in
+ * Documentation/media/v4l-drivers/pxa_camera.rst
+ *
+ * Context: should only be called within the dma irq handler
+ */
+static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev,
+ dma_cookie_t last_submitted,
+ dma_cookie_t last_issued)
+{
+ bool is_dma_stopped = last_submitted != last_issued;
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s : top queued buffer=%p, is_dma_stopped=%d\n",
+ __func__, pcdev->active, is_dma_stopped);
+
+ if (pcdev->active && is_dma_stopped)
+ pxa_camera_start_capture(pcdev);
+}
+
+static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
+ enum pxa_camera_active_dma act_dma)
+{
+ struct pxa_buffer *buf, *last_buf;
+ unsigned long flags;
+ u32 camera_status, overrun;
+ int chan;
+ enum dma_status last_status;
+ dma_cookie_t last_issued;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ camera_status = __raw_readl(pcdev->base + CISR);
+ dev_dbg(pcdev_to_dev(pcdev), "camera dma irq, cisr=0x%x dma=%d\n",
+ camera_status, act_dma);
+ overrun = CISR_IFO_0;
+ if (pcdev->channels == 3)
+ overrun |= CISR_IFO_1 | CISR_IFO_2;
+
+ /*
+ * pcdev->active should not be NULL in DMA irq handler.
+ *
+ * But there is one corner case : if capture was stopped due to an
+ * overrun of channel 1, and at that same channel 2 was completed.
+ *
+ * When handling the overrun in DMA irq for channel 1, we'll stop the
+ * capture and restart it (and thus set pcdev->active to NULL). But the
+ * DMA irq handler will already be pending for channel 2. So on entering
+ * the DMA irq handler for channel 2 there will be no active buffer, yet
+ * that is normal.
+ */
+ if (!pcdev->active)
+ goto out;
+
+ buf = pcdev->active;
+ WARN_ON(buf->inwork || list_empty(&buf->queue));
+
+ /*
+ * It's normal if the last frame creates an overrun, as there
+ * are no more DMA descriptors to fetch from QCI fifos
+ */
+ switch (act_dma) {
+ case DMA_U:
+ chan = 1;
+ break;
+ case DMA_V:
+ chan = 2;
+ break;
+ default:
+ chan = 0;
+ break;
+ }
+ last_buf = list_entry(pcdev->capture.prev,
+ struct pxa_buffer, queue);
+ last_status = dma_async_is_tx_complete(pcdev->dma_chans[chan],
+ last_buf->cookie[chan],
+ NULL, &last_issued);
+ if (camera_status & overrun &&
+ last_status != DMA_COMPLETE) {
+ dev_dbg(pcdev_to_dev(pcdev), "FIFO overrun! CISR: %x\n",
+ camera_status);
+ pxa_camera_stop_capture(pcdev);
+ list_for_each_entry(buf, &pcdev->capture, queue)
+ pxa_dma_add_tail_buf(pcdev, buf);
+ pxa_camera_start_capture(pcdev);
+ goto out;
+ }
+ buf->active_dma &= ~act_dma;
+ if (!buf->active_dma) {
+ pxa_camera_wakeup(pcdev, buf, VB2_BUF_STATE_DONE);
+ pxa_camera_check_link_miss(pcdev, last_buf->cookie[chan],
+ last_issued);
+ }
+
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static u32 mclk_get_divisor(struct platform_device *pdev,
+ struct pxa_camera_dev *pcdev)
+{
+ unsigned long mclk = pcdev->mclk;
+ u32 div;
+ unsigned long lcdclk;
+
+ lcdclk = clk_get_rate(pcdev->clk);
+ pcdev->ciclk = lcdclk;
+
+ /* mclk <= ciclk / 4 (27.4.2) */
+ if (mclk > lcdclk / 4) {
+ mclk = lcdclk / 4;
+ dev_warn(&pdev->dev,
+ "Limiting master clock to %lu\n", mclk);
+ }
+
+ /* We verify mclk != 0, so if anyone breaks it, here comes their Oops */
+ div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
+
+ /* If we're not supplying MCLK, leave it at 0 */
+ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+ pcdev->mclk = lcdclk / (2 * (div + 1));
+
+ dev_dbg(&pdev->dev, "LCD clock %luHz, target freq %luHz, divisor %u\n",
+ lcdclk, mclk, div);
+
+ return div;
+}
+
+static void recalculate_fifo_timeout(struct pxa_camera_dev *pcdev,
+ unsigned long pclk)
+{
+ /* We want a timeout > 1 pixel time, not ">=" */
+ u32 ciclk_per_pixel = pcdev->ciclk / pclk + 1;
+
+ __raw_writel(ciclk_per_pixel, pcdev->base + CITOR);
+}
+
+static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
+{
+ u32 cicr4 = 0;
+
+ /* disable all interrupts */
+ __raw_writel(0x3ff, pcdev->base + CICR0);
+
+ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+ cicr4 |= CICR4_PCLK_EN;
+ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+ cicr4 |= CICR4_MCLK_EN;
+ if (pcdev->platform_flags & PXA_CAMERA_PCP)
+ cicr4 |= CICR4_PCP;
+ if (pcdev->platform_flags & PXA_CAMERA_HSP)
+ cicr4 |= CICR4_HSP;
+ if (pcdev->platform_flags & PXA_CAMERA_VSP)
+ cicr4 |= CICR4_VSP;
+
+ __raw_writel(pcdev->mclk_divisor | cicr4, pcdev->base + CICR4);
+
+ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+ /* Initialise the timeout under the assumption pclk = mclk */
+ recalculate_fifo_timeout(pcdev, pcdev->mclk);
+ else
+ /* "Safe default" - 13MHz */
+ recalculate_fifo_timeout(pcdev, 13000000);
+
+ clk_prepare_enable(pcdev->clk);
+}
+
+static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
+{
+ clk_disable_unprepare(pcdev->clk);
+}
+
+static void pxa_camera_eof(unsigned long arg)
+{
+ struct pxa_camera_dev *pcdev = (struct pxa_camera_dev *)arg;
+ unsigned long cifr;
+ struct pxa_buffer *buf;
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "Camera interrupt status 0x%x\n",
+ __raw_readl(pcdev->base + CISR));
+
+ /* Reset the FIFOs */
+ cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
+ __raw_writel(cifr, pcdev->base + CIFR);
+
+ pcdev->active = list_first_entry(&pcdev->capture,
+ struct pxa_buffer, queue);
+ buf = pcdev->active;
+ pxa_videobuf_set_actdma(pcdev, buf);
+
+ pxa_dma_start_channels(pcdev);
+}
+
+static irqreturn_t pxa_camera_irq(int irq, void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+ unsigned long status, cicr0;
+
+ status = __raw_readl(pcdev->base + CISR);
+ dev_dbg(pcdev_to_dev(pcdev),
+ "Camera interrupt status 0x%lx\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ __raw_writel(status, pcdev->base + CISR);
+
+ if (status & CISR_EOF) {
+ cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM;
+ __raw_writel(cicr0, pcdev->base + CICR0);
+ tasklet_schedule(&pcdev->task_eof);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int test_platform_param(struct pxa_camera_dev *pcdev,
+ unsigned char buswidth, unsigned long *flags)
+{
+ /*
+ * Platform specified synchronization and pixel clock polarities are
+ * only a recommendation and are only used during probing. The PXA270
+ * quick capture interface supports both.
+ */
+ *flags = (pcdev->platform_flags & PXA_CAMERA_MASTER ?
+ V4L2_MBUS_MASTER : V4L2_MBUS_SLAVE) |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW |
+ V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING;
+
+ /* If requested data width is supported by the platform, use it */
+ if ((1 << (buswidth - 1)) & pcdev->width_flags)
+ return 0;
+
+ return -EINVAL;
+}
+
+static void pxa_camera_setup_cicr(struct pxa_camera_dev *pcdev,
+ unsigned long flags, __u32 pixfmt)
+{
+ unsigned long dw, bpp;
+ u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top;
+ int ret = sensor_call(pcdev, sensor, g_skip_top_lines, &y_skip_top);
+
+ if (ret < 0)
+ y_skip_top = 0;
+
+ /*
+ * Datawidth is now guaranteed to be equal to one of the three values.
+ * We fix bit-per-pixel equal to data-width...
+ */
+ switch (pcdev->current_fmt->host_fmt->bits_per_sample) {
+ case 10:
+ dw = 4;
+ bpp = 0x40;
+ break;
+ case 9:
+ dw = 3;
+ bpp = 0x20;
+ break;
+ default:
+ /*
+ * Actually it can only be 8 now,
+ * default is just to silence compiler warnings
+ */
+ case 8:
+ dw = 2;
+ bpp = 0;
+ }
+
+ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+ cicr4 |= CICR4_PCLK_EN;
+ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+ cicr4 |= CICR4_MCLK_EN;
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cicr4 |= CICR4_PCP;
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cicr4 |= CICR4_HSP;
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cicr4 |= CICR4_VSP;
+
+ cicr0 = __raw_readl(pcdev->base + CICR0);
+ if (cicr0 & CICR0_ENB)
+ __raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0);
+
+ cicr1 = CICR1_PPL_VAL(pcdev->current_pix.width - 1) | bpp | dw;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_YUV422P:
+ pcdev->channels = 3;
+ cicr1 |= CICR1_YCBCR_F;
+ /*
+ * Normally, pxa bus wants as input UYVY format. We allow all
+ * reorderings of the YUV422 format, as no processing is done,
+ * and the YUV stream is just passed through without any
+ * transformation. Note that UYVY is the only format that
+ * should be used if pxa framebuffer Overlay2 is used.
+ */
+ /* fall through */
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ cicr1 |= CICR1_COLOR_SP_VAL(2);
+ break;
+ case V4L2_PIX_FMT_RGB555:
+ cicr1 |= CICR1_RGB_BPP_VAL(1) | CICR1_RGBT_CONV_VAL(2) |
+ CICR1_TBIT | CICR1_COLOR_SP_VAL(1);
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ cicr1 |= CICR1_COLOR_SP_VAL(1) | CICR1_RGB_BPP_VAL(2);
+ break;
+ }
+
+ cicr2 = 0;
+ cicr3 = CICR3_LPF_VAL(pcdev->current_pix.height - 1) |
+ CICR3_BFW_VAL(min((u32)255, y_skip_top));
+ cicr4 |= pcdev->mclk_divisor;
+
+ __raw_writel(cicr1, pcdev->base + CICR1);
+ __raw_writel(cicr2, pcdev->base + CICR2);
+ __raw_writel(cicr3, pcdev->base + CICR3);
+ __raw_writel(cicr4, pcdev->base + CICR4);
+
+ /* CIF interrupts are not used, only DMA */
+ cicr0 = (cicr0 & CICR0_ENB) | (pcdev->platform_flags & PXA_CAMERA_MASTER ?
+ CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP));
+ cicr0 |= CICR0_DMAEN | CICR0_IRQ_MASK;
+ __raw_writel(cicr0, pcdev->base + CICR0);
+}
+
+/*
+ * Videobuf2 section
+ */
+static void pxa_buffer_cleanup(struct pxa_buffer *buf)
+{
+ int i;
+
+ for (i = 0; i < 3 && buf->descs[i]; i++) {
+ dmaengine_desc_free(buf->descs[i]);
+ kfree(buf->sg[i]);
+ buf->descs[i] = NULL;
+ buf->sg[i] = NULL;
+ buf->sg_len[i] = 0;
+ buf->plane_sizes[i] = 0;
+ }
+ buf->nb_planes = 0;
+}
+
+static int pxa_buffer_init(struct pxa_camera_dev *pcdev,
+ struct pxa_buffer *buf)
+{
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
+ int nb_channels = pcdev->channels;
+ int i, ret = 0;
+ unsigned long size = vb2_plane_size(vb, 0);
+
+ switch (nb_channels) {
+ case 1:
+ buf->plane_sizes[0] = size;
+ break;
+ case 3:
+ buf->plane_sizes[0] = size / 2;
+ buf->plane_sizes[1] = size / 4;
+ buf->plane_sizes[2] = size / 4;
+ break;
+ default:
+ return -EINVAL;
+ };
+ buf->nb_planes = nb_channels;
+
+ ret = sg_split(sgt->sgl, sgt->nents, 0, nb_channels,
+ buf->plane_sizes, buf->sg, buf->sg_len, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(pcdev_to_dev(pcdev),
+ "sg_split failed: %d\n", ret);
+ return ret;
+ }
+ for (i = 0; i < nb_channels; i++) {
+ ret = pxa_init_dma_channel(pcdev, buf, i,
+ buf->sg[i], buf->sg_len[i]);
+ if (ret) {
+ pxa_buffer_cleanup(buf);
+ return ret;
+ }
+ }
+ INIT_LIST_HEAD(&buf->queue);
+
+ return ret;
+}
+
+static void pxac_vb2_cleanup(struct vb2_buffer *vb)
+{
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s(vb=%p)\n", __func__, vb);
+ pxa_buffer_cleanup(buf);
+}
+
+static void pxac_vb2_queue(struct vb2_buffer *vb)
+{
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s(vb=%p) nb_channels=%d size=%lu active=%p\n",
+ __func__, vb, pcdev->channels, vb2_get_plane_payload(vb, 0),
+ pcdev->active);
+
+ list_add_tail(&buf->queue, &pcdev->capture);
+
+ pxa_dma_add_tail_buf(pcdev, buf);
+}
+
+/*
+ * Please check the DMA prepared buffer structure in :
+ * Documentation/media/v4l-drivers/pxa_camera.rst
+ * Please check also in pxa_camera_check_link_miss() to understand why DMA chain
+ * modification while DMA chain is running will work anyway.
+ */
+static int pxac_vb2_prepare(struct vb2_buffer *vb)
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+ int ret = 0;
+#ifdef DEBUG
+ int i;
+#endif
+
+ switch (pcdev->channels) {
+ case 1:
+ case 3:
+ vb2_set_plane_payload(vb, 0, pcdev->current_pix.sizeimage);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s (vb=%p) nb_channels=%d size=%lu\n",
+ __func__, vb, pcdev->channels, vb2_get_plane_payload(vb, 0));
+
+ WARN_ON(!pcdev->current_fmt);
+
+#ifdef DEBUG
+ /*
+ * This can be useful if you want to see if we actually fill
+ * the buffer with something
+ */
+ for (i = 0; i < vb->num_planes; i++)
+ memset((void *)vb2_plane_vaddr(vb, i),
+ 0xaa, vb2_get_plane_payload(vb, i));
+#endif
+
+ /*
+ * I think, in buf_prepare you only have to protect global data,
+ * the actual buffer is yours
+ */
+ buf->inwork = 0;
+ pxa_videobuf_set_actdma(pcdev, buf);
+
+ return ret;
+}
+
+static int pxac_vb2_init(struct vb2_buffer *vb)
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s(nb_channels=%d)\n",
+ __func__, pcdev->channels);
+
+ return pxa_buffer_init(pcdev, buf);
+}
+
+static int pxac_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbufs,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vq);
+ int size = pcdev->current_pix.sizeimage;
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s(vq=%p nbufs=%d num_planes=%d size=%d)\n",
+ __func__, vq, *nbufs, *num_planes, size);
+ /*
+ * Called from VIDIOC_REQBUFS or in compatibility mode For YUV422P
+ * format, even if there are 3 planes Y, U and V, we reply there is only
+ * one plane, containing Y, U and V data, one after the other.
+ */
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *num_planes = 1;
+ switch (pcdev->channels) {
+ case 1:
+ case 3:
+ sizes[0] = size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!*nbufs)
+ *nbufs = 1;
+
+ return 0;
+}
+
+static int pxac_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vq);
+
+ dev_dbg(pcdev_to_dev(pcdev), "%s(count=%d) active=%p\n",
+ __func__, count, pcdev->active);
+
+ pcdev->buf_sequence = 0;
+ if (!pcdev->active)
+ pxa_camera_start_capture(pcdev);
+
+ return 0;
+}
+
+static void pxac_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vq);
+ struct pxa_buffer *buf, *tmp;
+
+ dev_dbg(pcdev_to_dev(pcdev), "%s active=%p\n",
+ __func__, pcdev->active);
+ pxa_camera_stop_capture(pcdev);
+
+ list_for_each_entry_safe(buf, tmp, &pcdev->capture, queue)
+ pxa_camera_wakeup(pcdev, buf, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops pxac_vb2_ops = {
+ .queue_setup = pxac_vb2_queue_setup,
+ .buf_init = pxac_vb2_init,
+ .buf_prepare = pxac_vb2_prepare,
+ .buf_queue = pxac_vb2_queue,
+ .buf_cleanup = pxac_vb2_cleanup,
+ .start_streaming = pxac_vb2_start_streaming,
+ .stop_streaming = pxac_vb2_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int pxa_camera_init_videobuf2(struct pxa_camera_dev *pcdev)
+{
+ int ret;
+ struct vb2_queue *vq = &pcdev->vb2_vq;
+
+ memset(vq, 0, sizeof(*vq));
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ vq->drv_priv = pcdev;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->buf_struct_size = sizeof(struct pxa_buffer);
+ vq->dev = pcdev->v4l2_dev.dev;
+
+ vq->ops = &pxac_vb2_ops;
+ vq->mem_ops = &vb2_dma_sg_memops;
+ vq->lock = &pcdev->mlock;
+
+ ret = vb2_queue_init(vq);
+ dev_dbg(pcdev_to_dev(pcdev),
+ "vb2_queue_init(vq=%p): %d\n", vq, ret);
+
+ return ret;
+}
+
+/*
+ * Video ioctls section
+ */
+static int pxa_camera_set_bus_param(struct pxa_camera_dev *pcdev)
+{
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ u32 pixfmt = pcdev->current_fmt->host_fmt->fourcc;
+ unsigned long bus_flags, common_flags;
+ int ret;
+
+ ret = test_platform_param(pcdev,
+ pcdev->current_fmt->host_fmt->bits_per_sample,
+ &bus_flags);
+ if (ret < 0)
+ return ret;
+
+ ret = sensor_call(pcdev, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = pxa_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(pcdev_to_dev(pcdev),
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = bus_flags;
+ }
+
+ pcdev->channels = 1;
+
+ /* Make choises, based on platform preferences */
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+ if (pcdev->platform_flags & PXA_CAMERA_HSP)
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+ if (pcdev->platform_flags & PXA_CAMERA_VSP)
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+ if (pcdev->platform_flags & PXA_CAMERA_PCP)
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+ else
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ }
+
+ cfg.flags = common_flags;
+ ret = sensor_call(pcdev, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(pcdev_to_dev(pcdev),
+ "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
+ return ret;
+ }
+
+ pxa_camera_setup_cicr(pcdev, common_flags, pixfmt);
+
+ return 0;
+}
+
+static int pxa_camera_try_bus_param(struct pxa_camera_dev *pcdev,
+ unsigned char buswidth)
+{
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long bus_flags, common_flags;
+ int ret = test_platform_param(pcdev, buswidth, &bus_flags);
+
+ if (ret < 0)
+ return ret;
+
+ ret = sensor_call(pcdev, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = pxa_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(pcdev_to_dev(pcdev),
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret == -ENOIOCTLCMD) {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static const struct pxa_mbus_pixelfmt pxa_camera_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .name = "Planar YUV422 16 bit",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PLANAR_2Y_U_V,
+ },
+};
+
+/* This will be corrected as we get more formats */
+static bool pxa_camera_packing_supported(const struct pxa_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == PXA_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == PXA_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == PXA_MBUS_PACKING_EXTEND16);
+}
+
+static int pxa_camera_get_formats(struct v4l2_device *v4l2_dev,
+ unsigned int idx,
+ struct pxa_camera_format_xlate *xlate)
+{
+ struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(v4l2_dev);
+ int formats = 0, ret;
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .index = idx,
+ };
+ const struct pxa_mbus_pixelfmt *fmt;
+
+ ret = sensor_call(pcdev, pad, enum_mbus_code, NULL, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = pxa_mbus_get_fmtdesc(code.code);
+ if (!fmt) {
+ dev_err(pcdev_to_dev(pcdev),
+ "Invalid format code #%u: %d\n", idx, code.code);
+ return 0;
+ }
+
+ /* This also checks support for the requested bits-per-sample */
+ ret = pxa_camera_try_bus_param(pcdev, fmt->bits_per_sample);
+ if (ret < 0)
+ return 0;
+
+ switch (code.code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = &pxa_camera_formats[0];
+ xlate->code = code.code;
+ xlate++;
+ dev_dbg(pcdev_to_dev(pcdev),
+ "Providing format %s using code %d\n",
+ pxa_camera_formats[0].name, code.code);
+ }
+ /* fall through */
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ if (xlate)
+ dev_dbg(pcdev_to_dev(pcdev),
+ "Providing format %s packed\n",
+ fmt->name);
+ break;
+ default:
+ if (!pxa_camera_packing_supported(fmt))
+ return 0;
+ if (xlate)
+ dev_dbg(pcdev_to_dev(pcdev),
+ "Providing format %s in pass-through mode\n",
+ fmt->name);
+ break;
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code.code;
+ xlate++;
+ }
+
+ return formats;
+}
+
+static int pxa_camera_build_formats(struct pxa_camera_dev *pcdev)
+{
+ struct pxa_camera_format_xlate *xlate;
+
+ xlate = pxa_mbus_build_fmts_xlate(&pcdev->v4l2_dev, pcdev->sensor,
+ pxa_camera_get_formats);
+ if (IS_ERR(xlate))
+ return PTR_ERR(xlate);
+
+ pcdev->user_formats = xlate;
+ return 0;
+}
+
+static void pxa_camera_destroy_formats(struct pxa_camera_dev *pcdev)
+{
+ kfree(pcdev->user_formats);
+}
+
+static int pxa_camera_check_frame(u32 width, u32 height)
+{
+ /* limit to pxa hardware capabilities */
+ return height < 32 || height > 2048 || width < 48 || width > 2048 ||
+ (width & 0x01);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int pxac_vidioc_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(file);
+
+ if (reg->reg > CIBR2)
+ return -ERANGE;
+
+ reg->val = __raw_readl(pcdev->base + reg->reg);
+ reg->size = sizeof(__u32);
+ return 0;
+}
+
+static int pxac_vidioc_s_register(struct file *file, void *priv,
+ const struct v4l2_dbg_register *reg)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(file);
+
+ if (reg->reg > CIBR2)
+ return -ERANGE;
+ if (reg->size != sizeof(__u32))
+ return -EINVAL;
+ __raw_writel(reg->val, pcdev->base + reg->reg);
+ return 0;
+}
+#endif
+
+static int pxac_vidioc_enum_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ const struct pxa_mbus_pixelfmt *format;
+ unsigned int idx;
+
+ for (idx = 0; pcdev->user_formats[idx].code; idx++);
+ if (f->index >= idx)
+ return -EINVAL;
+
+ format = pcdev->user_formats[f->index].host_fmt;
+ f->pixelformat = format->fourcc;
+ return 0;
+}
+
+static int pxac_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ pix->width = pcdev->current_pix.width;
+ pix->height = pcdev->current_pix.height;
+ pix->bytesperline = pcdev->current_pix.bytesperline;
+ pix->sizeimage = pcdev->current_pix.sizeimage;
+ pix->field = pcdev->current_pix.field;
+ pix->pixelformat = pcdev->current_fmt->host_fmt->fourcc;
+ pix->colorspace = pcdev->current_pix.colorspace;
+ dev_dbg(pcdev_to_dev(pcdev), "current_fmt->fourcc: 0x%08x\n",
+ pcdev->current_fmt->host_fmt->fourcc);
+ return 0;
+}
+
+static int pxac_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ const struct pxa_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ struct v4l2_mbus_framefmt *mf = &format.format;
+ __u32 pixfmt = pix->pixelformat;
+ int ret;
+
+ xlate = pxa_mbus_xlate_by_fourcc(pcdev->user_formats, pixfmt);
+ if (!xlate) {
+ dev_warn(pcdev_to_dev(pcdev), "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ /*
+ * Limit to pxa hardware capabilities. YUV422P planar format requires
+ * images size to be a multiple of 16 bytes. If not, zeros will be
+ * inserted between Y and U planes, and U and V planes, which violates
+ * the YUV422P standard.
+ */
+ v4l_bound_align_image(&pix->width, 48, 2048, 1,
+ &pix->height, 32, 2048, 0,
+ pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
+
+ v4l2_fill_mbus_format(mf, pix, xlate->code);
+ ret = sensor_call(pcdev, pad, set_fmt, &pad_cfg, &format);
+ if (ret < 0)
+ return ret;
+
+ v4l2_fill_pix_format(pix, mf);
+
+ /* Only progressive video supported so far */
+ switch (mf->field) {
+ case V4L2_FIELD_ANY:
+ case V4L2_FIELD_NONE:
+ pix->field = V4L2_FIELD_NONE;
+ break;
+ default:
+ /* TODO: support interlaced at least in pass-through mode */
+ dev_err(pcdev_to_dev(pcdev), "Field type %d unsupported.\n",
+ mf->field);
+ return -EINVAL;
+ }
+
+ ret = pxa_mbus_bytes_per_line(pix->width, xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ pix->bytesperline = ret;
+ ret = pxa_mbus_image_size(xlate->host_fmt, pix->bytesperline,
+ pix->height);
+ if (ret < 0)
+ return ret;
+
+ pix->sizeimage = ret;
+ return 0;
+}
+
+static int pxac_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ const struct pxa_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ unsigned long flags;
+ int ret, is_busy;
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "s_fmt_vid_cap(pix=%dx%d:%x)\n",
+ pix->width, pix->height, pix->pixelformat);
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+ is_busy = pcdev->active || vb2_is_busy(&pcdev->vb2_vq);
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+
+ if (is_busy)
+ return -EBUSY;
+
+ ret = pxac_vidioc_try_fmt_vid_cap(filp, priv, f);
+ if (ret)
+ return ret;
+
+ xlate = pxa_mbus_xlate_by_fourcc(pcdev->user_formats,
+ pix->pixelformat);
+ v4l2_fill_mbus_format(&format.format, pix, xlate->code);
+ ret = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+ if (ret < 0) {
+ dev_warn(pcdev_to_dev(pcdev),
+ "Failed to configure for format %x\n",
+ pix->pixelformat);
+ } else if (pxa_camera_check_frame(pix->width, pix->height)) {
+ dev_warn(pcdev_to_dev(pcdev),
+ "Camera driver produced an unsupported frame %dx%d\n",
+ pix->width, pix->height);
+ return -EINVAL;
+ }
+
+ pcdev->current_fmt = xlate;
+ pcdev->current_pix = *pix;
+
+ ret = pxa_camera_set_bus_param(pcdev);
+ return ret;
+}
+
+static int pxac_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->bus_info, "platform:pxa-camera", sizeof(cap->bus_info));
+ strlcpy(cap->driver, PXA_CAM_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int pxac_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index > 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, "Camera", sizeof(i->name));
+
+ return 0;
+}
+
+static int pxac_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int pxac_vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int pxac_sensor_set_power(struct pxa_camera_dev *pcdev, int on)
+{
+ int ret;
+
+ ret = sensor_call(pcdev, core, s_power, on);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ if (ret) {
+ dev_warn(pcdev_to_dev(pcdev),
+ "Failed to put subdevice in %s mode: %d\n",
+ on ? "normal operation" : "power saving", ret);
+ }
+
+ return ret;
+}
+
+static int pxac_fops_camera_open(struct file *filp)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ int ret;
+
+ mutex_lock(&pcdev->mlock);
+ ret = v4l2_fh_open(filp);
+ if (ret < 0)
+ goto out;
+
+ if (!v4l2_fh_is_singular_file(filp))
+ goto out;
+
+ ret = pxac_sensor_set_power(pcdev, 1);
+ if (ret)
+ v4l2_fh_release(filp);
+out:
+ mutex_unlock(&pcdev->mlock);
+ return ret;
+}
+
+static int pxac_fops_camera_release(struct file *filp)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ int ret;
+ bool fh_singular;
+
+ mutex_lock(&pcdev->mlock);
+
+ fh_singular = v4l2_fh_is_singular_file(filp);
+
+ ret = _vb2_fop_release(filp, NULL);
+
+ if (fh_singular)
+ ret = pxac_sensor_set_power(pcdev, 0);
+
+ mutex_unlock(&pcdev->mlock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations pxa_camera_fops = {
+ .owner = THIS_MODULE,
+ .open = pxac_fops_camera_open,
+ .release = pxac_fops_camera_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops pxa_camera_ioctl_ops = {
+ .vidioc_querycap = pxac_vidioc_querycap,
+
+ .vidioc_enum_input = pxac_vidioc_enum_input,
+ .vidioc_g_input = pxac_vidioc_g_input,
+ .vidioc_s_input = pxac_vidioc_s_input,
+
+ .vidioc_enum_fmt_vid_cap = pxac_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = pxac_vidioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = pxac_vidioc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = pxac_vidioc_try_fmt_vid_cap,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = pxac_vidioc_g_register,
+ .vidioc_s_register = pxac_vidioc_s_register,
+#endif
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_clk_ops pxa_camera_mclk_ops = {
+};
+
+static const struct video_device pxa_camera_videodev_template = {
+ .name = "pxa-camera",
+ .minor = -1,
+ .fops = &pxa_camera_fops,
+ .ioctl_ops = &pxa_camera_ioctl_ops,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING,
+};
+
+static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ int err;
+ struct v4l2_device *v4l2_dev = notifier->v4l2_dev;
+ struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(v4l2_dev);
+ struct video_device *vdev = &pcdev->vdev;
+ struct v4l2_pix_format *pix = &pcdev->current_pix;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mf = &format.format;
+
+ dev_info(pcdev_to_dev(pcdev), "%s(): trying to bind a device\n",
+ __func__);
+ mutex_lock(&pcdev->mlock);
+ *vdev = pxa_camera_videodev_template;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->lock = &pcdev->mlock;
+ pcdev->sensor = subdev;
+ pcdev->vdev.queue = &pcdev->vb2_vq;
+ pcdev->vdev.v4l2_dev = &pcdev->v4l2_dev;
+ pcdev->vdev.ctrl_handler = subdev->ctrl_handler;
+ video_set_drvdata(&pcdev->vdev, pcdev);
+
+ err = pxa_camera_build_formats(pcdev);
+ if (err) {
+ dev_err(pcdev_to_dev(pcdev), "building formats failed: %d\n",
+ err);
+ goto out;
+ }
+
+ pcdev->current_fmt = pcdev->user_formats;
+ pix->field = V4L2_FIELD_NONE;
+ pix->width = DEFAULT_WIDTH;
+ pix->height = DEFAULT_HEIGHT;
+ pix->bytesperline =
+ pxa_mbus_bytes_per_line(pix->width,
+ pcdev->current_fmt->host_fmt);
+ pix->sizeimage =
+ pxa_mbus_image_size(pcdev->current_fmt->host_fmt,
+ pix->bytesperline, pix->height);
+ pix->pixelformat = pcdev->current_fmt->host_fmt->fourcc;
+ v4l2_fill_mbus_format(mf, pix, pcdev->current_fmt->code);
+
+ err = pxac_sensor_set_power(pcdev, 1);
+ if (err)
+ goto out;
+
+ err = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+ if (err)
+ goto out_sensor_poweroff;
+
+ v4l2_fill_pix_format(pix, mf);
+ pr_info("%s(): colorspace=0x%x pixfmt=0x%x\n",
+ __func__, pix->colorspace, pix->pixelformat);
+
+ err = pxa_camera_init_videobuf2(pcdev);
+ if (err)
+ goto out_sensor_poweroff;
+
+ err = video_register_device(&pcdev->vdev, VFL_TYPE_GRABBER, -1);
+ if (err) {
+ v4l2_err(v4l2_dev, "register video device failed: %d\n", err);
+ pcdev->sensor = NULL;
+ } else {
+ dev_info(pcdev_to_dev(pcdev),
+ "PXA Camera driver attached to camera %s\n",
+ subdev->name);
+ }
+
+out_sensor_poweroff:
+ err = pxac_sensor_set_power(pcdev, 0);
+out:
+ mutex_unlock(&pcdev->mlock);
+ return err;
+}
+
+static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(notifier->v4l2_dev);
+
+ mutex_lock(&pcdev->mlock);
+ dev_info(pcdev_to_dev(pcdev),
+ "PXA Camera driver detached from camera %s\n",
+ subdev->name);
+
+ /* disable capture, disable interrupts */
+ __raw_writel(0x3ff, pcdev->base + CICR0);
+
+ /* Stop DMA engine */
+ pxa_dma_stop_channels(pcdev);
+
+ pxa_camera_destroy_formats(pcdev);
+
+ if (pcdev->mclk_clk) {
+ v4l2_clk_unregister(pcdev->mclk_clk);
+ pcdev->mclk_clk = NULL;
+ }
+
+ video_unregister_device(&pcdev->vdev);
+ pcdev->sensor = NULL;
+
+ mutex_unlock(&pcdev->mlock);
+}
+
+static const struct v4l2_async_notifier_operations pxa_camera_sensor_ops = {
+ .bound = pxa_camera_sensor_bound,
+ .unbind = pxa_camera_sensor_unbind,
+};
+
+/*
+ * Driver probe, remove, suspend and resume operations
+ */
+static int pxa_camera_suspend(struct device *dev)
+{
+ struct pxa_camera_dev *pcdev = dev_get_drvdata(dev);
+ int i = 0, ret = 0;
+
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR0);
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR1);
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR2);
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
+
+ if (pcdev->sensor)
+ ret = pxac_sensor_set_power(pcdev, 0);
+
+ return ret;
+}
+
+static int pxa_camera_resume(struct device *dev)
+{
+ struct pxa_camera_dev *pcdev = dev_get_drvdata(dev);
+ int i = 0, ret = 0;
+
+ __raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0);
+ __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1);
+ __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2);
+ __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
+ __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
+
+ if (pcdev->sensor) {
+ ret = pxac_sensor_set_power(pcdev, 1);
+ }
+
+ /* Restart frame capture if active buffer exists */
+ if (!ret && pcdev->active)
+ pxa_camera_start_capture(pcdev);
+
+ return ret;
+}
+
+static int pxa_camera_pdata_from_dt(struct device *dev,
+ struct pxa_camera_dev *pcdev,
+ struct v4l2_async_subdev *asd)
+{
+ u32 mclk_rate;
+ struct device_node *remote, *np = dev->of_node;
+ struct v4l2_fwnode_endpoint ep;
+ int err = of_property_read_u32(np, "clock-frequency",
+ &mclk_rate);
+ if (!err) {
+ pcdev->platform_flags |= PXA_CAMERA_MCLK_EN;
+ pcdev->mclk = mclk_rate;
+ }
+
+ np = of_graph_get_next_endpoint(np, NULL);
+ if (!np) {
+ dev_err(dev, "could not find endpoint\n");
+ return -EINVAL;
+ }
+
+ err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
+ if (err) {
+ dev_err(dev, "could not parse endpoint\n");
+ goto out;
+ }
+
+ switch (ep.bus.parallel.bus_width) {
+ case 4:
+ pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_4;
+ break;
+ case 5:
+ pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_5;
+ break;
+ case 8:
+ pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_8;
+ break;
+ case 9:
+ pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_9;
+ break;
+ case 10:
+ pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
+ break;
+ default:
+ break;
+ }
+
+ if (ep.bus.parallel.flags & V4L2_MBUS_MASTER)
+ pcdev->platform_flags |= PXA_CAMERA_MASTER;
+ if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ pcdev->platform_flags |= PXA_CAMERA_HSP;
+ if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ pcdev->platform_flags |= PXA_CAMERA_VSP;
+ if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ pcdev->platform_flags |= PXA_CAMERA_PCLK_EN | PXA_CAMERA_PCP;
+ if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
+
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ remote = of_graph_get_remote_port(np);
+ if (remote) {
+ asd->match.fwnode = of_fwnode_handle(remote);
+ of_node_put(remote);
+ } else {
+ dev_notice(dev, "no remote for %pOF\n", np);
+ }
+
+out:
+ of_node_put(np);
+
+ return err;
+}
+
+static int pxa_camera_probe(struct platform_device *pdev)
+{
+ struct pxa_camera_dev *pcdev;
+ struct resource *res;
+ void __iomem *base;
+ struct dma_slave_config config = {
+ .src_addr_width = 0,
+ .src_maxburst = 8,
+ .direction = DMA_DEV_TO_MEM,
+ };
+ char clk_name[V4L2_CLK_NAME_SIZE];
+ int irq;
+ int err = 0, i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0)
+ return -ENODEV;
+
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
+ if (!pcdev) {
+ dev_err(&pdev->dev, "Could not allocate pcdev\n");
+ return -ENOMEM;
+ }
+
+ pcdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pcdev->clk))
+ return PTR_ERR(pcdev->clk);
+
+ pcdev->res = res;
+
+ pcdev->pdata = pdev->dev.platform_data;
+ if (pdev->dev.of_node && !pcdev->pdata) {
+ err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
+ } else {
+ pcdev->platform_flags = pcdev->pdata->flags;
+ pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
+ pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
+ pcdev->asd.match.i2c.adapter_id =
+ pcdev->pdata->sensor_i2c_adapter_id;
+ pcdev->asd.match.i2c.address = pcdev->pdata->sensor_i2c_address;
+ }
+ if (err < 0)
+ return err;
+
+ if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 |
+ PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) {
+ /*
+ * Platform hasn't set available data widths. This is bad.
+ * Warn and use a default.
+ */
+ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available data widths, using default 10 bit\n");
+ pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
+ }
+ if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)
+ pcdev->width_flags = 1 << 7;
+ if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9)
+ pcdev->width_flags |= 1 << 8;
+ if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10)
+ pcdev->width_flags |= 1 << 9;
+ if (!pcdev->mclk) {
+ dev_warn(&pdev->dev,
+ "mclk == 0! Please, fix your platform data. Using default 20MHz\n");
+ pcdev->mclk = 20000000;
+ }
+
+ pcdev->mclk_divisor = mclk_get_divisor(pdev, pcdev);
+
+ INIT_LIST_HEAD(&pcdev->capture);
+ spin_lock_init(&pcdev->lock);
+ mutex_init(&pcdev->mlock);
+
+ /*
+ * Request the regions.
+ */
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ pcdev->irq = irq;
+ pcdev->base = base;
+
+ /* request dma */
+ pcdev->dma_chans[0] = dma_request_slave_channel(&pdev->dev, "CI_Y");
+ if (!pcdev->dma_chans[0]) {
+ dev_err(&pdev->dev, "Can't request DMA for Y\n");
+ return -ENODEV;
+ }
+
+ pcdev->dma_chans[1] = dma_request_slave_channel(&pdev->dev, "CI_U");
+ if (!pcdev->dma_chans[1]) {
+ dev_err(&pdev->dev, "Can't request DMA for Y\n");
+ err = -ENODEV;
+ goto exit_free_dma_y;
+ }
+
+ pcdev->dma_chans[2] = dma_request_slave_channel(&pdev->dev, "CI_V");
+ if (!pcdev->dma_chans[2]) {
+ dev_err(&pdev->dev, "Can't request DMA for V\n");
+ err = -ENODEV;
+ goto exit_free_dma_u;
+ }
+
+ for (i = 0; i < 3; i++) {
+ config.src_addr = pcdev->res->start + CIBR0 + i * 8;
+ err = dmaengine_slave_config(pcdev->dma_chans[i], &config);
+ if (err < 0) {
+ dev_err(&pdev->dev, "dma slave config failed: %d\n",
+ err);
+ goto exit_free_dma;
+ }
+ }
+
+ /* request irq */
+ err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0,
+ PXA_CAM_DRV_NAME, pcdev);
+ if (err) {
+ dev_err(&pdev->dev, "Camera interrupt register failed\n");
+ goto exit_free_dma;
+ }
+
+ tasklet_init(&pcdev->task_eof, pxa_camera_eof, (unsigned long)pcdev);
+
+ pxa_camera_activate(pcdev);
+
+ dev_set_drvdata(&pdev->dev, pcdev);
+ err = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
+ if (err)
+ goto exit_deactivate;
+
+ pcdev->asds[0] = &pcdev->asd;
+ pcdev->notifier.subdevs = pcdev->asds;
+ pcdev->notifier.num_subdevs = 1;
+ pcdev->notifier.ops = &pxa_camera_sensor_ops;
+
+ if (!of_have_populated_dt())
+ pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
+
+ err = pxa_camera_init_videobuf2(pcdev);
+ if (err)
+ goto exit_free_v4l2dev;
+
+ if (pcdev->mclk) {
+ v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
+ pcdev->asd.match.i2c.adapter_id,
+ pcdev->asd.match.i2c.address);
+
+ pcdev->mclk_clk = v4l2_clk_register(&pxa_camera_mclk_ops,
+ clk_name, NULL);
+ if (IS_ERR(pcdev->mclk_clk)) {
+ err = PTR_ERR(pcdev->mclk_clk);
+ goto exit_free_v4l2dev;
+ }
+ }
+
+ err = v4l2_async_notifier_register(&pcdev->v4l2_dev, &pcdev->notifier);
+ if (err)
+ goto exit_free_clk;
+
+ return 0;
+exit_free_clk:
+ v4l2_clk_unregister(pcdev->mclk_clk);
+exit_free_v4l2dev:
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+exit_deactivate:
+ pxa_camera_deactivate(pcdev);
+exit_free_dma:
+ dma_release_channel(pcdev->dma_chans[2]);
+exit_free_dma_u:
+ dma_release_channel(pcdev->dma_chans[1]);
+exit_free_dma_y:
+ dma_release_channel(pcdev->dma_chans[0]);
+ return err;
+}
+
+static int pxa_camera_remove(struct platform_device *pdev)
+{
+ struct pxa_camera_dev *pcdev = dev_get_drvdata(&pdev->dev);
+
+ pxa_camera_deactivate(pcdev);
+ dma_release_channel(pcdev->dma_chans[0]);
+ dma_release_channel(pcdev->dma_chans[1]);
+ dma_release_channel(pcdev->dma_chans[2]);
+
+ v4l2_async_notifier_unregister(&pcdev->notifier);
+
+ if (pcdev->mclk_clk) {
+ v4l2_clk_unregister(pcdev->mclk_clk);
+ pcdev->mclk_clk = NULL;
+ }
+
+ v4l2_device_unregister(&pcdev->v4l2_dev);
+
+ dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
+
+ return 0;
+}
+
+static const struct dev_pm_ops pxa_camera_pm = {
+ .suspend = pxa_camera_suspend,
+ .resume = pxa_camera_resume,
+};
+
+static const struct of_device_id pxa_camera_of_match[] = {
+ { .compatible = "marvell,pxa270-qci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pxa_camera_of_match);
+
+static struct platform_driver pxa_camera_driver = {
+ .driver = {
+ .name = PXA_CAM_DRV_NAME,
+ .pm = &pxa_camera_pm,
+ .of_match_table = of_match_ptr(pxa_camera_of_match),
+ },
+ .probe = pxa_camera_probe,
+ .remove = pxa_camera_remove,
+};
+
+module_platform_driver(pxa_camera_driver);
+
+MODULE_DESCRIPTION("PXA27x SoC Camera Host driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PXA_CAM_VERSION);
+MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile
new file mode 100644
index 000000000..f5e6e255f
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -0,0 +1,15 @@
+# Makefile for Qualcomm CAMSS driver
+
+qcom-camss-objs += \
+ camss.o \
+ camss-csid.o \
+ camss-csiphy-2ph-1-0.o \
+ camss-csiphy-3ph-1-0.o \
+ camss-csiphy.o \
+ camss-ispif.o \
+ camss-vfe-4-1.o \
+ camss-vfe-4-7.o \
+ camss-vfe.o \
+ camss-video.o \
+
+obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom-camss.o
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
new file mode 100644
index 000000000..a5ae85674
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -0,0 +1,1392 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-csid.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "camss-csid.h"
+#include "camss.h"
+
+#define MSM_CSID_NAME "msm_csid"
+
+#define CAMSS_CSID_HW_VERSION 0x0
+#define CAMSS_CSID_CORE_CTRL_0 0x004
+#define CAMSS_CSID_CORE_CTRL_1 0x008
+#define CAMSS_CSID_RST_CMD(v) ((v) == CAMSS_8x16 ? 0x00c : 0x010)
+#define CAMSS_CSID_CID_LUT_VC_n(v, n) \
+ (((v) == CAMSS_8x16 ? 0x010 : 0x014) + 0x4 * (n))
+#define CAMSS_CSID_CID_n_CFG(v, n) \
+ (((v) == CAMSS_8x16 ? 0x020 : 0x024) + 0x4 * (n))
+#define CAMSS_CSID_CID_n_CFG_ISPIF_EN BIT(0)
+#define CAMSS_CSID_CID_n_CFG_RDI_EN BIT(1)
+#define CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT 4
+#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_8 (0 << 8)
+#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16 (1 << 8)
+#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB (0 << 9)
+#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_MSB (1 << 9)
+#define CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP (0 << 10)
+#define CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING (1 << 10)
+#define CAMSS_CSID_IRQ_CLEAR_CMD(v) ((v) == CAMSS_8x16 ? 0x060 : 0x064)
+#define CAMSS_CSID_IRQ_MASK(v) ((v) == CAMSS_8x16 ? 0x064 : 0x068)
+#define CAMSS_CSID_IRQ_STATUS(v) ((v) == CAMSS_8x16 ? 0x068 : 0x06c)
+#define CAMSS_CSID_TG_CTRL(v) ((v) == CAMSS_8x16 ? 0x0a0 : 0x0a8)
+#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436
+#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437
+#define CAMSS_CSID_TG_VC_CFG(v) ((v) == CAMSS_8x16 ? 0x0a4 : 0x0ac)
+#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff
+#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f
+#define CAMSS_CSID_TG_DT_n_CGG_0(v, n) \
+ (((v) == CAMSS_8x16 ? 0x0ac : 0x0b4) + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_1(v, n) \
+ (((v) == CAMSS_8x16 ? 0x0b0 : 0x0b8) + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_2(v, n) \
+ (((v) == CAMSS_8x16 ? 0x0b4 : 0x0bc) + 0xc * (n))
+
+#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12
+#define DATA_TYPE_YUV422_8BIT 0x1e
+#define DATA_TYPE_RAW_6BIT 0x28
+#define DATA_TYPE_RAW_8BIT 0x2a
+#define DATA_TYPE_RAW_10BIT 0x2b
+#define DATA_TYPE_RAW_12BIT 0x2c
+#define DATA_TYPE_RAW_14BIT 0x2d
+
+#define DECODE_FORMAT_UNCOMPRESSED_6_BIT 0x0
+#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
+#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2
+#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3
+#define DECODE_FORMAT_UNCOMPRESSED_14_BIT 0x8
+
+#define CSID_RESET_TIMEOUT_MS 500
+
+struct csid_format {
+ u32 code;
+ u8 data_type;
+ u8 decode_format;
+ u8 bpp;
+ u8 spp; /* bus samples per pixel */
+};
+
+static const struct csid_format csid_formats_8x16[] = {
+ {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_Y10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+};
+
+static const struct csid_format csid_formats_8x96[] = {
+ {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_Y10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+};
+
+static u32 csid_find_code(u32 *code, unsigned int n_code,
+ unsigned int index, u32 req_code)
+{
+ int i;
+
+ if (!req_code && (index >= n_code))
+ return 0;
+
+ for (i = 0; i < n_code; i++)
+ if (req_code) {
+ if (req_code == code[i])
+ return req_code;
+ } else {
+ if (i == index)
+ return code[i];
+ }
+
+ return code[0];
+}
+
+static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
+ unsigned int index, u32 src_req_code)
+{
+ if (csid->camss->version == CAMSS_8x16) {
+ if (index > 0)
+ return 0;
+
+ return sink_code;
+ } else if (csid->camss->version == CAMSS_8x96) {
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_Y10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ default:
+ if (index > 0)
+ return 0;
+
+ return sink_code;
+ }
+ } else {
+ return 0;
+ }
+}
+
+static const struct csid_format *csid_get_fmt_entry(
+ const struct csid_format *formats,
+ unsigned int nformat,
+ u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < nformat; i++)
+ if (code == formats[i].code)
+ return &formats[i];
+
+ WARN(1, "Unknown format\n");
+
+ return &formats[0];
+}
+
+/*
+ * csid_isr - CSID module interrupt handler
+ * @irq: Interrupt line
+ * @dev: CSID device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csid_isr(int irq, void *dev)
+{
+ struct csid_device *csid = dev;
+ enum camss_version ver = csid->camss->version;
+ u32 value;
+
+ value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS(ver));
+ writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD(ver));
+
+ if ((value >> 11) & 0x1)
+ complete(&csid->reset_complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csid_set_clock_rates - Calculate and set clock rates on CSID module
+ * @csiphy: CSID device
+ */
+static int csid_set_clock_rates(struct csid_device *csid)
+{
+ struct device *dev = csid->camss->dev;
+ u32 pixel_clock;
+ int i, j;
+ int ret;
+
+ ret = camss_get_pixel_clock(&csid->subdev.entity, &pixel_clock);
+ if (ret)
+ pixel_clock = 0;
+
+ for (i = 0; i < csid->nclocks; i++) {
+ struct camss_clock *clock = &csid->clock[i];
+
+ if (!strcmp(clock->name, "csi0") ||
+ !strcmp(clock->name, "csi1") ||
+ !strcmp(clock->name, "csi2") ||
+ !strcmp(clock->name, "csi3")) {
+ const struct csid_format *f = csid_get_fmt_entry(
+ csid->formats,
+ csid->nformats,
+ csid->fmt[MSM_CSIPHY_PAD_SINK].code);
+ u8 num_lanes = csid->phy.lane_cnt;
+ u64 min_rate = pixel_clock * f->bpp /
+ (2 * num_lanes * 4);
+ long rate;
+
+ camss_add_clock_margin(&min_rate);
+
+ for (j = 0; j < clock->nfreqs; j++)
+ if (min_rate < clock->freq[j])
+ break;
+
+ if (j == clock->nfreqs) {
+ dev_err(dev,
+ "Pixel clock is too high for CSID\n");
+ return -EINVAL;
+ }
+
+ /* if sensor pixel clock is not available */
+ /* set highest possible CSID clock rate */
+ if (min_rate == 0)
+ j = clock->nfreqs - 1;
+
+ rate = clk_round_rate(clock->clk, clock->freq[j]);
+ if (rate < 0) {
+ dev_err(dev, "clk round rate failed: %ld\n",
+ rate);
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(clock->clk, rate);
+ if (ret < 0) {
+ dev_err(dev, "clk set rate failed: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * csid_reset - Trigger reset on CSID module and wait to complete
+ * @csid: CSID device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_reset(struct csid_device *csid)
+{
+ unsigned long time;
+
+ reinit_completion(&csid->reset_complete);
+
+ writel_relaxed(0x7fff, csid->base +
+ CAMSS_CSID_RST_CMD(csid->camss->version));
+
+ time = wait_for_completion_timeout(&csid->reset_complete,
+ msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(csid->camss->dev, "CSID reset timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * csid_set_power - Power on/off CSID module
+ * @sd: CSID V4L2 subdevice
+ * @on: Requested power state
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct device *dev = csid->camss->dev;
+ int ret;
+
+ if (on) {
+ u32 hw_version;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_enable(csid->vdda);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ ret = csid_set_clock_rates(csid);
+ if (ret < 0) {
+ regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ ret = camss_enable_clocks(csid->nclocks, csid->clock, dev);
+ if (ret < 0) {
+ regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ enable_irq(csid->irq);
+
+ ret = csid_reset(csid);
+ if (ret < 0) {
+ disable_irq(csid->irq);
+ camss_disable_clocks(csid->nclocks, csid->clock);
+ regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION);
+ dev_dbg(dev, "CSID HW Version = 0x%08x\n", hw_version);
+ } else {
+ disable_irq(csid->irq);
+ camss_disable_clocks(csid->nclocks, csid->clock);
+ ret = regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
+ }
+
+ return ret;
+}
+
+/*
+ * csid_set_stream - Enable/disable streaming on CSID module
+ * @sd: CSID V4L2 subdevice
+ * @enable: Requested streaming state
+ *
+ * Main configuration of CSID module is also done here.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct csid_testgen_config *tg = &csid->testgen;
+ enum camss_version ver = csid->camss->version;
+ u32 val;
+
+ if (enable) {
+ u8 vc = 0; /* Virtual Channel 0 */
+ u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */
+ u8 dt, dt_shift, df;
+ int ret;
+
+ ret = v4l2_ctrl_handler_setup(&csid->ctrls);
+ if (ret < 0) {
+ dev_err(csid->camss->dev,
+ "could not sync v4l2 controls: %d\n", ret);
+ return ret;
+ }
+
+ if (!tg->enabled &&
+ !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
+ return -ENOLINK;
+
+ if (tg->enabled) {
+ /* Config Test Generator */
+ struct v4l2_mbus_framefmt *f =
+ &csid->fmt[MSM_CSID_PAD_SRC];
+ const struct csid_format *format = csid_get_fmt_entry(
+ csid->formats, csid->nformats, f->code);
+ u32 num_bytes_per_line =
+ f->width * format->bpp * format->spp / 8;
+ u32 num_lines = f->height;
+
+ /* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */
+ /* 1:0 VC */
+ val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) |
+ ((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13);
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_VC_CFG(ver));
+
+ /* 28:16 bytes per lines, 12:0 num of lines */
+ val = ((num_bytes_per_line & 0x1fff) << 16) |
+ (num_lines & 0x1fff);
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_DT_n_CGG_0(ver, 0));
+
+ dt = format->data_type;
+
+ /* 5:0 data type */
+ val = dt;
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_DT_n_CGG_1(ver, 0));
+
+ /* 2:0 output test pattern */
+ val = tg->payload_mode;
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_DT_n_CGG_2(ver, 0));
+
+ df = format->decode_format;
+ } else {
+ struct v4l2_mbus_framefmt *f =
+ &csid->fmt[MSM_CSID_PAD_SINK];
+ const struct csid_format *format = csid_get_fmt_entry(
+ csid->formats, csid->nformats, f->code);
+ struct csid_phy_config *phy = &csid->phy;
+
+ val = phy->lane_cnt - 1;
+ val |= phy->lane_assign << 4;
+
+ writel_relaxed(val,
+ csid->base + CAMSS_CSID_CORE_CTRL_0);
+
+ val = phy->csiphy_id << 17;
+ val |= 0x9;
+
+ writel_relaxed(val,
+ csid->base + CAMSS_CSID_CORE_CTRL_1);
+
+ dt = format->data_type;
+ df = format->decode_format;
+ }
+
+ /* Config LUT */
+
+ dt_shift = (cid % 4) * 8;
+
+ val = readl_relaxed(csid->base +
+ CAMSS_CSID_CID_LUT_VC_n(ver, vc));
+ val &= ~(0xff << dt_shift);
+ val |= dt << dt_shift;
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_CID_LUT_VC_n(ver, vc));
+
+ val = CAMSS_CSID_CID_n_CFG_ISPIF_EN;
+ val |= CAMSS_CSID_CID_n_CFG_RDI_EN;
+ val |= df << CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT;
+ val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP;
+
+ if (csid->camss->version == CAMSS_8x96) {
+ u32 sink_code = csid->fmt[MSM_CSID_PAD_SINK].code;
+ u32 src_code = csid->fmt[MSM_CSID_PAD_SRC].code;
+
+ if ((sink_code == MEDIA_BUS_FMT_SBGGR10_1X10 &&
+ src_code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) ||
+ (sink_code == MEDIA_BUS_FMT_Y10_1X10 &&
+ src_code == MEDIA_BUS_FMT_Y10_2X8_PADHI_LE)) {
+ val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING;
+ val |= CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16;
+ val |= CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB;
+ }
+ }
+
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_CID_n_CFG(ver, cid));
+
+ if (tg->enabled) {
+ val = CAMSS_CSID_TG_CTRL_ENABLE;
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_CTRL(ver));
+ }
+ } else {
+ if (tg->enabled) {
+ val = CAMSS_CSID_TG_CTRL_DISABLE;
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_CTRL(ver));
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * __csid_get_format - Get pointer to format structure
+ * @csid: CSID device
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad from which format is requested
+ * @which: TRY or ACTIVE format
+ *
+ * Return pointer to TRY or ACTIVE format structure
+ */
+static struct v4l2_mbus_framefmt *
+__csid_get_format(struct csid_device *csid,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csid->subdev, cfg, pad);
+
+ return &csid->fmt[pad];
+}
+
+/*
+ * csid_try_format - Handle try format by pad subdev method
+ * @csid: CSID device
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad on which format is requested
+ * @fmt: pointer to v4l2 format structure
+ * @which: wanted subdev format
+ */
+static void csid_try_format(struct csid_device *csid,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ unsigned int i;
+
+ switch (pad) {
+ case MSM_CSID_PAD_SINK:
+ /* Set format on sink pad */
+
+ for (i = 0; i < csid->nformats; i++)
+ if (fmt->code == csid->formats[i].code)
+ break;
+
+ /* If not found, use UYVY as default */
+ if (i >= csid->nformats)
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ break;
+
+ case MSM_CSID_PAD_SRC:
+ if (csid->testgen_mode->cur.val == 0) {
+ /* Test generator is disabled, */
+ /* keep pad formats in sync */
+ u32 code = fmt->code;
+
+ *fmt = *__csid_get_format(csid, cfg,
+ MSM_CSID_PAD_SINK, which);
+ fmt->code = csid_src_pad_code(csid, fmt->code, 0, code);
+ } else {
+ /* Test generator is enabled, set format on source */
+ /* pad to allow test generator usage */
+
+ for (i = 0; i < csid->nformats; i++)
+ if (csid->formats[i].code == fmt->code)
+ break;
+
+ /* If not found, use UYVY as default */
+ if (i >= csid->nformats)
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+
+ fmt->field = V4L2_FIELD_NONE;
+ }
+ break;
+ }
+
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+/*
+ * csid_enum_mbus_code - Handle pixel format enumeration
+ * @sd: CSID V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csid_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+
+ if (code->pad == MSM_CSID_PAD_SINK) {
+ if (code->index >= csid->nformats)
+ return -EINVAL;
+
+ code->code = csid->formats[code->index].code;
+ } else {
+ if (csid->testgen_mode->cur.val == 0) {
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ sink_fmt = __csid_get_format(csid, cfg,
+ MSM_CSID_PAD_SINK,
+ code->which);
+
+ code->code = csid_src_pad_code(csid, sink_fmt->code,
+ code->index, 0);
+ if (!code->code)
+ return -EINVAL;
+ } else {
+ if (code->index >= csid->nformats)
+ return -EINVAL;
+
+ code->code = csid->formats[code->index].code;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * csid_enum_frame_size - Handle frame size enumeration
+ * @sd: CSID V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: pointer to v4l2_subdev_frame_size_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csid_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ csid_try_format(csid, cfg, fse->pad, &format, fse->which);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ csid_try_format(csid, cfg, fse->pad, &format, fse->which);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * csid_get_format - Handle get format by pads subdev method
+ * @sd: CSID V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int csid_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csid_get_format(csid, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+/*
+ * csid_set_format - Handle set format by pads subdev method
+ * @sd: CSID V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int csid_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csid_get_format(csid, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ csid_try_format(csid, cfg, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == MSM_CSID_PAD_SINK) {
+ format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SRC,
+ fmt->which);
+
+ *format = fmt->format;
+ csid_try_format(csid, cfg, MSM_CSID_PAD_SRC, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * csid_init_formats - Initialize formats on all pads
+ * @sd: CSID V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format = {
+ .pad = MSM_CSID_PAD_SINK,
+ .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .width = 1920,
+ .height = 1080
+ }
+ };
+
+ return csid_set_format(sd, fh ? fh->pad : NULL, &format);
+}
+
+static const char * const csid_test_pattern_menu[] = {
+ "Disabled",
+ "Incrementing",
+ "Alternating 0x55/0xAA",
+ "All Zeros 0x00",
+ "All Ones 0xFF",
+ "Pseudo-random Data",
+};
+
+/*
+ * csid_set_test_pattern - Set test generator's pattern mode
+ * @csid: CSID device
+ * @value: desired test pattern mode
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_set_test_pattern(struct csid_device *csid, s32 value)
+{
+ struct csid_testgen_config *tg = &csid->testgen;
+
+ /* If CSID is linked to CSIPHY, do not allow to enable test generator */
+ if (value && media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
+ return -EBUSY;
+
+ tg->enabled = !!value;
+
+ switch (value) {
+ case 1:
+ tg->payload_mode = CSID_PAYLOAD_MODE_INCREMENTING;
+ break;
+ case 2:
+ tg->payload_mode = CSID_PAYLOAD_MODE_ALTERNATING_55_AA;
+ break;
+ case 3:
+ tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ZEROES;
+ break;
+ case 4:
+ tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ONES;
+ break;
+ case 5:
+ tg->payload_mode = CSID_PAYLOAD_MODE_RANDOM;
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * csid_s_ctrl - Handle set control subdev method
+ * @ctrl: pointer to v4l2 control structure
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct csid_device *csid = container_of(ctrl->handler,
+ struct csid_device, ctrls);
+ int ret = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ ret = csid_set_test_pattern(csid, ctrl->val);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops csid_ctrl_ops = {
+ .s_ctrl = csid_s_ctrl,
+};
+
+/*
+ * msm_csid_subdev_init - Initialize CSID device structure and resources
+ * @csid: CSID device
+ * @res: CSID module resources table
+ * @id: CSID module id
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
+ const struct resources *res, u8 id)
+{
+ struct device *dev = camss->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *r;
+ int i, j;
+ int ret;
+
+ csid->camss = camss;
+ csid->id = id;
+
+ if (camss->version == CAMSS_8x16) {
+ csid->formats = csid_formats_8x16;
+ csid->nformats =
+ ARRAY_SIZE(csid_formats_8x16);
+ } else if (camss->version == CAMSS_8x96) {
+ csid->formats = csid_formats_8x96;
+ csid->nformats =
+ ARRAY_SIZE(csid_formats_8x96);
+ } else {
+ return -EINVAL;
+ }
+
+ /* Memory */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
+ csid->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(csid->base)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(csid->base);
+ }
+
+ /* Interrupt */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ res->interrupt[0]);
+ if (!r) {
+ dev_err(dev, "missing IRQ\n");
+ return -EINVAL;
+ }
+
+ csid->irq = r->start;
+ snprintf(csid->irq_name, sizeof(csid->irq_name), "%s_%s%d",
+ dev_name(dev), MSM_CSID_NAME, csid->id);
+ ret = devm_request_irq(dev, csid->irq, csid_isr,
+ IRQF_TRIGGER_RISING, csid->irq_name, csid);
+ if (ret < 0) {
+ dev_err(dev, "request_irq failed: %d\n", ret);
+ return ret;
+ }
+
+ disable_irq(csid->irq);
+
+ /* Clocks */
+
+ csid->nclocks = 0;
+ while (res->clock[csid->nclocks])
+ csid->nclocks++;
+
+ csid->clock = devm_kcalloc(dev, csid->nclocks, sizeof(*csid->clock),
+ GFP_KERNEL);
+ if (!csid->clock)
+ return -ENOMEM;
+
+ for (i = 0; i < csid->nclocks; i++) {
+ struct camss_clock *clock = &csid->clock[i];
+
+ clock->clk = devm_clk_get(dev, res->clock[i]);
+ if (IS_ERR(clock->clk))
+ return PTR_ERR(clock->clk);
+
+ clock->name = res->clock[i];
+
+ clock->nfreqs = 0;
+ while (res->clock_rate[i][clock->nfreqs])
+ clock->nfreqs++;
+
+ if (!clock->nfreqs) {
+ clock->freq = NULL;
+ continue;
+ }
+
+ clock->freq = devm_kcalloc(dev,
+ clock->nfreqs,
+ sizeof(*clock->freq),
+ GFP_KERNEL);
+ if (!clock->freq)
+ return -ENOMEM;
+
+ for (j = 0; j < clock->nfreqs; j++)
+ clock->freq[j] = res->clock_rate[i][j];
+ }
+
+ /* Regulator */
+
+ csid->vdda = devm_regulator_get(dev, res->regulator[0]);
+ if (IS_ERR(csid->vdda)) {
+ dev_err(dev, "could not get regulator\n");
+ return PTR_ERR(csid->vdda);
+ }
+
+ init_completion(&csid->reset_complete);
+
+ return 0;
+}
+
+/*
+ * msm_csid_get_csid_id - Get CSID HW module id
+ * @entity: Pointer to CSID media entity structure
+ * @id: Return CSID HW module id here
+ */
+void msm_csid_get_csid_id(struct media_entity *entity, u8 *id)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+
+ *id = csid->id;
+}
+
+/*
+ * csid_get_lane_assign - Calculate CSI2 lane assign configuration parameter
+ * @lane_cfg - CSI2 lane configuration
+ *
+ * Return lane assign
+ */
+static u32 csid_get_lane_assign(struct csiphy_lanes_cfg *lane_cfg)
+{
+ u32 lane_assign = 0;
+ int i;
+
+ for (i = 0; i < lane_cfg->num_data; i++)
+ lane_assign |= lane_cfg->data[i].pos << (i * 4);
+
+ return lane_assign;
+}
+
+/*
+ * csid_link_setup - Setup CSID connections
+ * @entity: Pointer to media entity structure
+ * @local: Pointer to local pad
+ * @remote: Pointer to remote pad
+ * @flags: Link flags
+ *
+ * Return 0 on success
+ */
+static int csid_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ if (media_entity_remote_pad(local))
+ return -EBUSY;
+
+ if ((local->flags & MEDIA_PAD_FL_SINK) &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+ struct v4l2_subdev *sd;
+ struct csid_device *csid;
+ struct csiphy_device *csiphy;
+ struct csiphy_lanes_cfg *lane_cfg;
+ struct v4l2_subdev_format format = { 0 };
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ csid = v4l2_get_subdevdata(sd);
+
+ /* If test generator is enabled */
+ /* do not allow a link from CSIPHY to CSID */
+ if (csid->testgen_mode->cur.val != 0)
+ return -EBUSY;
+
+ sd = media_entity_to_v4l2_subdev(remote->entity);
+ csiphy = v4l2_get_subdevdata(sd);
+
+ /* If a sensor is not linked to CSIPHY */
+ /* do no allow a link from CSIPHY to CSID */
+ if (!csiphy->cfg.csi2)
+ return -EPERM;
+
+ csid->phy.csiphy_id = csiphy->id;
+
+ lane_cfg = &csiphy->cfg.csi2->lane_cfg;
+ csid->phy.lane_cnt = lane_cfg->num_data;
+ csid->phy.lane_assign = csid_get_lane_assign(lane_cfg);
+
+ /* Reset format on source pad to sink pad format */
+ format.pad = MSM_CSID_PAD_SRC;
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ csid_set_format(&csid->subdev, NULL, &format);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops csid_core_ops = {
+ .s_power = csid_set_power,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops csid_video_ops = {
+ .s_stream = csid_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csid_pad_ops = {
+ .enum_mbus_code = csid_enum_mbus_code,
+ .enum_frame_size = csid_enum_frame_size,
+ .get_fmt = csid_get_format,
+ .set_fmt = csid_set_format,
+};
+
+static const struct v4l2_subdev_ops csid_v4l2_ops = {
+ .core = &csid_core_ops,
+ .video = &csid_video_ops,
+ .pad = &csid_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csid_v4l2_internal_ops = {
+ .open = csid_init_formats,
+};
+
+static const struct media_entity_operations csid_media_ops = {
+ .link_setup = csid_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * msm_csid_register_entity - Register subdev node for CSID module
+ * @csid: CSID device
+ * @v4l2_dev: V4L2 device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_csid_register_entity(struct csid_device *csid,
+ struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd = &csid->subdev;
+ struct media_pad *pads = csid->pads;
+ struct device *dev = csid->camss->dev;
+ int ret;
+
+ v4l2_subdev_init(sd, &csid_v4l2_ops);
+ sd->internal_ops = &csid_v4l2_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
+ MSM_CSID_NAME, csid->id);
+ v4l2_set_subdevdata(sd, csid);
+
+ ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
+ return ret;
+ }
+
+ csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
+ &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(csid_test_pattern_menu) - 1, 0, 0,
+ csid_test_pattern_menu);
+
+ if (csid->ctrls.error) {
+ dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
+ ret = csid->ctrls.error;
+ goto free_ctrl;
+ }
+
+ csid->subdev.ctrl_handler = &csid->ctrls;
+
+ ret = csid_init_formats(sd, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init format: %d\n", ret);
+ goto free_ctrl;
+ }
+
+ pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[MSM_CSID_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.function = MEDIA_ENT_F_IO_V4L;
+ sd->entity.ops = &csid_media_ops;
+ ret = media_entity_pads_init(&sd->entity, MSM_CSID_PADS_NUM, pads);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init media entity: %d\n", ret);
+ goto free_ctrl;
+ }
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdev: %d\n", ret);
+ goto media_cleanup;
+ }
+
+ return 0;
+
+media_cleanup:
+ media_entity_cleanup(&sd->entity);
+free_ctrl:
+ v4l2_ctrl_handler_free(&csid->ctrls);
+
+ return ret;
+}
+
+/*
+ * msm_csid_unregister_entity - Unregister CSID module subdev node
+ * @csid: CSID device
+ */
+void msm_csid_unregister_entity(struct csid_device *csid)
+{
+ v4l2_device_unregister_subdev(&csid->subdev);
+ media_entity_cleanup(&csid->subdev.entity);
+ v4l2_ctrl_handler_free(&csid->ctrls);
+}
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
new file mode 100644
index 000000000..1824b3745
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss-csid.h
+ *
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
+ *
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#ifndef QC_MSM_CAMSS_CSID_H
+#define QC_MSM_CAMSS_CSID_H
+
+#include <linux/clk.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#define MSM_CSID_PAD_SINK 0
+#define MSM_CSID_PAD_SRC 1
+#define MSM_CSID_PADS_NUM 2
+
+enum csid_payload_mode {
+ CSID_PAYLOAD_MODE_INCREMENTING = 0,
+ CSID_PAYLOAD_MODE_ALTERNATING_55_AA = 1,
+ CSID_PAYLOAD_MODE_ALL_ZEROES = 2,
+ CSID_PAYLOAD_MODE_ALL_ONES = 3,
+ CSID_PAYLOAD_MODE_RANDOM = 4,
+ CSID_PAYLOAD_MODE_USER_SPECIFIED = 5,
+};
+
+struct csid_testgen_config {
+ u8 enabled;
+ enum csid_payload_mode payload_mode;
+};
+
+struct csid_phy_config {
+ u8 csiphy_id;
+ u8 lane_cnt;
+ u32 lane_assign;
+};
+
+struct csid_device {
+ struct camss *camss;
+ u8 id;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_CSID_PADS_NUM];
+ void __iomem *base;
+ u32 irq;
+ char irq_name[30];
+ struct camss_clock *clock;
+ int nclocks;
+ struct regulator *vdda;
+ struct completion reset_complete;
+ struct csid_testgen_config testgen;
+ struct csid_phy_config phy;
+ struct v4l2_mbus_framefmt fmt[MSM_CSID_PADS_NUM];
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *testgen_mode;
+ const struct csid_format *formats;
+ unsigned int nformats;
+};
+
+struct resources;
+
+int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
+ const struct resources *res, u8 id);
+
+int msm_csid_register_entity(struct csid_device *csid,
+ struct v4l2_device *v4l2_dev);
+
+void msm_csid_unregister_entity(struct csid_device *csid);
+
+void msm_csid_get_csid_id(struct media_entity *entity, u8 *id);
+
+#endif /* QC_MSM_CAMSS_CSID_H */
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
new file mode 100644
index 000000000..12bce391d
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-csiphy-2ph-1-0.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module 2phase v1.0
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ */
+
+#include "camss-csiphy.h"
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
+#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
+#define CAMSS_CSI_PHY_GLBL_RESET 0x140
+#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144
+#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164
+#define CAMSS_CSI_PHY_HW_VERSION 0x188
+#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n))
+#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n))
+#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n))
+#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec
+#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4
+
+static void csiphy_hw_version_read(struct csiphy_device *csiphy,
+ struct device *dev)
+{
+ u8 hw_version = readl_relaxed(csiphy->base +
+ CAMSS_CSI_PHY_HW_VERSION);
+
+ dev_dbg(dev, "CSIPHY HW Version = 0x%02x\n", hw_version);
+}
+
+/*
+ * csiphy_reset - Perform software reset on CSIPHY module
+ * @csiphy: CSIPHY device
+ */
+static void csiphy_reset(struct csiphy_device *csiphy)
+{
+ writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+ usleep_range(5000, 8000);
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+}
+
+/*
+ * csiphy_settle_cnt_calc - Calculate settle count value
+ *
+ * Helper function to calculate settle count value. This is
+ * based on the CSI2 T_hs_settle parameter which in turn
+ * is calculated based on the CSI2 transmitter pixel clock
+ * frequency.
+ *
+ * Return settle count value or 0 if the CSI2 pixel clock
+ * frequency is not available
+ */
+static u8 csiphy_settle_cnt_calc(u32 pixel_clock, u8 bpp, u8 num_lanes,
+ u32 timer_clk_rate)
+{
+ u32 mipi_clock; /* Hz */
+ u32 ui; /* ps */
+ u32 timer_period; /* ps */
+ u32 t_hs_prepare_max; /* ps */
+ u32 t_hs_prepare_zero_min; /* ps */
+ u32 t_hs_settle; /* ps */
+ u8 settle_cnt;
+
+ mipi_clock = pixel_clock * bpp / (2 * num_lanes);
+ ui = div_u64(1000000000000LL, mipi_clock);
+ ui /= 2;
+ t_hs_prepare_max = 85000 + 6 * ui;
+ t_hs_prepare_zero_min = 145000 + 10 * ui;
+ t_hs_settle = (t_hs_prepare_max + t_hs_prepare_zero_min) / 2;
+
+ timer_period = div_u64(1000000000000LL, timer_clk_rate);
+ settle_cnt = t_hs_settle / timer_period - 1;
+
+ return settle_cnt;
+}
+
+static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg,
+ u32 pixel_clock, u8 bpp, u8 lane_mask)
+{
+ struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ u8 settle_cnt;
+ u8 val, l = 0;
+ int i = 0;
+
+ settle_cnt = csiphy_settle_cnt_calc(pixel_clock, bpp, c->num_data,
+ csiphy->timer_clk_rate);
+
+ writel_relaxed(0x1, csiphy->base +
+ CAMSS_CSI_PHY_GLBL_T_INIT_CFG0);
+ writel_relaxed(0x1, csiphy->base +
+ CAMSS_CSI_PHY_T_WAKEUP_CFG0);
+
+ val = 0x1;
+ val |= lane_mask << 1;
+ writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+
+ val = cfg->combo_mode << 4;
+ writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+
+ for (i = 0; i <= c->num_data; i++) {
+ if (i == c->num_data)
+ l = c->clk.pos;
+ else
+ l = c->data[i].pos;
+
+ writel_relaxed(0x10, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG2(l));
+ writel_relaxed(settle_cnt, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG3(l));
+ writel_relaxed(0x3f, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_MASKn(l));
+ writel_relaxed(0x3f, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(l));
+ }
+}
+
+static void csiphy_lanes_disable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg)
+{
+ struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ u8 l = 0;
+ int i = 0;
+
+ for (i = 0; i <= c->num_data; i++) {
+ if (i == c->num_data)
+ l = c->clk.pos;
+ else
+ l = c->data[i].pos;
+
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG2(l));
+ }
+
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+}
+
+/*
+ * csiphy_isr - CSIPHY module interrupt handler
+ * @irq: Interrupt line
+ * @dev: CSIPHY device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csiphy_isr(int irq, void *dev)
+{
+ struct csiphy_device *csiphy = dev;
+ u8 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 val = readl_relaxed(csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_STATUSn(i));
+ writel_relaxed(val, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
+ writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
+ }
+
+ return IRQ_HANDLED;
+}
+
+const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = {
+ .hw_version_read = csiphy_hw_version_read,
+ .reset = csiphy_reset,
+ .lanes_enable = csiphy_lanes_enable,
+ .lanes_disable = csiphy_lanes_disable,
+ .isr = csiphy_isr,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
new file mode 100644
index 000000000..2e65caf1e
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-csiphy-3ph-1-0.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module 3phase v1.0
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ */
+
+#include "camss-csiphy.h"
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
+#define CSIPHY_3PH_LNn_CFG2(n) (0x004 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT BIT(3)
+#define CSIPHY_3PH_LNn_CFG3(n) (0x008 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG4(n) (0x00c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS 0xa4
+#define CSIPHY_3PH_LNn_CFG5(n) (0x010 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG5_T_HS_DTERM 0x02
+#define CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT 0x50
+#define CSIPHY_3PH_LNn_TEST_IMP(n) (0x01c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP 0xa
+#define CSIPHY_3PH_LNn_MISC1(n) (0x028 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_MISC1_IS_CLKLANE BIT(2)
+#define CSIPHY_3PH_LNn_CFG6(n) (0x02c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT BIT(0)
+#define CSIPHY_3PH_LNn_CFG7(n) (0x030 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG7_SWI_T_INIT 0x2
+#define CSIPHY_3PH_LNn_CFG8(n) (0x034 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP BIT(0)
+#define CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE BIT(1)
+#define CSIPHY_3PH_LNn_CFG9(n) (0x038 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP 0x1
+#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15(n) (0x03c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL 0xb8
+
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(n) (0x800 + 0x4 * (n))
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B BIT(0)
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID BIT(1)
+#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(n) (0x8b0 + 0x4 * (n))
+
+static void csiphy_hw_version_read(struct csiphy_device *csiphy,
+ struct device *dev)
+{
+ u32 hw_version;
+
+ writel(CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID,
+ csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+
+ hw_version = readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(12));
+ hw_version |= readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(13)) << 8;
+ hw_version |= readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(14)) << 16;
+ hw_version |= readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(15)) << 24;
+
+ dev_err(dev, "CSIPHY 3PH HW Version = 0x%08x\n", hw_version);
+}
+
+/*
+ * csiphy_reset - Perform software reset on CSIPHY module
+ * @csiphy: CSIPHY device
+ */
+static void csiphy_reset(struct csiphy_device *csiphy)
+{
+ writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+ usleep_range(5000, 8000);
+ writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+}
+
+static irqreturn_t csiphy_isr(int irq, void *dev)
+{
+ struct csiphy_device *csiphy = dev;
+ int i;
+
+ for (i = 0; i < 11; i++) {
+ int c = i + 22;
+ u8 val = readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(i));
+
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(c));
+ }
+
+ writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
+ writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
+
+ for (i = 22; i < 33; i++)
+ writel_relaxed(0x0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csiphy_settle_cnt_calc - Calculate settle count value
+ *
+ * Helper function to calculate settle count value. This is
+ * based on the CSI2 T_hs_settle parameter which in turn
+ * is calculated based on the CSI2 transmitter pixel clock
+ * frequency.
+ *
+ * Return settle count value or 0 if the CSI2 pixel clock
+ * frequency is not available
+ */
+static u8 csiphy_settle_cnt_calc(u32 pixel_clock, u8 bpp, u8 num_lanes,
+ u32 timer_clk_rate)
+{
+ u32 mipi_clock; /* Hz */
+ u32 ui; /* ps */
+ u32 timer_period; /* ps */
+ u32 t_hs_prepare_max; /* ps */
+ u32 t_hs_settle; /* ps */
+ u8 settle_cnt;
+
+ mipi_clock = pixel_clock * bpp / (2 * num_lanes);
+ ui = div_u64(1000000000000LL, mipi_clock);
+ ui /= 2;
+ t_hs_prepare_max = 85000 + 6 * ui;
+ t_hs_settle = t_hs_prepare_max;
+
+ timer_period = div_u64(1000000000000LL, timer_clk_rate);
+ settle_cnt = t_hs_settle / timer_period - 6;
+
+ return settle_cnt;
+}
+
+static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg,
+ u32 pixel_clock, u8 bpp, u8 lane_mask)
+{
+ struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ u8 settle_cnt;
+ u8 val, l = 0;
+ int i;
+
+ settle_cnt = csiphy_settle_cnt_calc(pixel_clock, bpp, c->num_data,
+ csiphy->timer_clk_rate);
+
+ val = BIT(c->clk.pos);
+ for (i = 0; i < c->num_data; i++)
+ val |= BIT(c->data[i].pos * 2);
+
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+
+ val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+
+ for (i = 0; i <= c->num_data; i++) {
+ if (i == c->num_data)
+ l = 7;
+ else
+ l = c->data[i].pos * 2;
+
+ val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
+ val |= 0x17;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
+
+ val = CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG2(l));
+
+ val = settle_cnt;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG3(l));
+
+ val = CSIPHY_3PH_LNn_CFG5_T_HS_DTERM |
+ CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG5(l));
+
+ val = CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG6(l));
+
+ val = CSIPHY_3PH_LNn_CFG7_SWI_T_INIT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG7(l));
+
+ val = CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP |
+ CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG8(l));
+
+ val = CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG9(l));
+
+ val = CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_TEST_IMP(l));
+
+ val = CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL;
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_LNn_CSI_LANE_CTRL15(l));
+ }
+
+ val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
+
+ val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG4(l));
+
+ val = CSIPHY_3PH_LNn_MISC1_IS_CLKLANE;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_MISC1(l));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(11));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(12));
+
+ val = 0xfb;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(13));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(14));
+
+ val = 0x7f;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(15));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(16));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(17));
+
+ val = 0xef;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(18));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(19));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(20));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(21));
+}
+
+static void csiphy_lanes_disable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg)
+{
+ writel_relaxed(0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+
+ writel_relaxed(0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+}
+
+const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = {
+ .hw_version_read = csiphy_hw_version_read,
+ .reset = csiphy_reset,
+ .lanes_enable = csiphy_lanes_enable,
+ .lanes_disable = csiphy_lanes_disable,
+ .isr = csiphy_isr,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
new file mode 100644
index 000000000..3c5b9082a
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-csiphy.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "camss-csiphy.h"
+#include "camss.h"
+
+#define MSM_CSIPHY_NAME "msm_csiphy"
+
+struct csiphy_format {
+ u32 code;
+ u8 bpp;
+};
+
+static const struct csiphy_format csiphy_formats_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+};
+
+static const struct csiphy_format csiphy_formats_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+};
+
+/*
+ * csiphy_get_bpp - map media bus format to bits per pixel
+ * @formats: supported media bus formats array
+ * @nformats: size of @formats array
+ * @code: media bus format code
+ *
+ * Return number of bits per pixel
+ */
+static u8 csiphy_get_bpp(const struct csiphy_format *formats,
+ unsigned int nformats, u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < nformats; i++)
+ if (code == formats[i].code)
+ return formats[i].bpp;
+
+ WARN(1, "Unknown format\n");
+
+ return formats[0].bpp;
+}
+
+/*
+ * csiphy_set_clock_rates - Calculate and set clock rates on CSIPHY module
+ * @csiphy: CSIPHY device
+ */
+static int csiphy_set_clock_rates(struct csiphy_device *csiphy)
+{
+ struct device *dev = csiphy->camss->dev;
+ u32 pixel_clock;
+ int i, j;
+ int ret;
+
+ ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock);
+ if (ret)
+ pixel_clock = 0;
+
+ for (i = 0; i < csiphy->nclocks; i++) {
+ struct camss_clock *clock = &csiphy->clock[i];
+
+ if (!strcmp(clock->name, "csiphy0_timer") ||
+ !strcmp(clock->name, "csiphy1_timer") ||
+ !strcmp(clock->name, "csiphy2_timer")) {
+ u8 bpp = csiphy_get_bpp(csiphy->formats,
+ csiphy->nformats,
+ csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
+ u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data;
+ u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4);
+ long round_rate;
+
+ camss_add_clock_margin(&min_rate);
+
+ for (j = 0; j < clock->nfreqs; j++)
+ if (min_rate < clock->freq[j])
+ break;
+
+ if (j == clock->nfreqs) {
+ dev_err(dev,
+ "Pixel clock is too high for CSIPHY\n");
+ return -EINVAL;
+ }
+
+ /* if sensor pixel clock is not available */
+ /* set highest possible CSIPHY clock rate */
+ if (min_rate == 0)
+ j = clock->nfreqs - 1;
+
+ round_rate = clk_round_rate(clock->clk, clock->freq[j]);
+ if (round_rate < 0) {
+ dev_err(dev, "clk round rate failed: %ld\n",
+ round_rate);
+ return -EINVAL;
+ }
+
+ csiphy->timer_clk_rate = round_rate;
+
+ ret = clk_set_rate(clock->clk, csiphy->timer_clk_rate);
+ if (ret < 0) {
+ dev_err(dev, "clk set rate failed: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * csiphy_set_power - Power on/off CSIPHY module
+ * @sd: CSIPHY V4L2 subdevice
+ * @on: Requested power state
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csiphy_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ struct device *dev = csiphy->camss->dev;
+
+ if (on) {
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ ret = csiphy_set_clock_rates(csiphy);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ ret = camss_enable_clocks(csiphy->nclocks, csiphy->clock, dev);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ enable_irq(csiphy->irq);
+
+ csiphy->ops->reset(csiphy);
+
+ csiphy->ops->hw_version_read(csiphy, dev);
+ } else {
+ disable_irq(csiphy->irq);
+
+ camss_disable_clocks(csiphy->nclocks, csiphy->clock);
+
+ pm_runtime_put_sync(dev);
+ }
+
+ return 0;
+}
+
+/*
+ * csiphy_get_lane_mask - Calculate CSI2 lane mask configuration parameter
+ * @lane_cfg - CSI2 lane configuration
+ *
+ * Return lane mask
+ */
+static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg)
+{
+ u8 lane_mask;
+ int i;
+
+ lane_mask = 1 << lane_cfg->clk.pos;
+
+ for (i = 0; i < lane_cfg->num_data; i++)
+ lane_mask |= 1 << lane_cfg->data[i].pos;
+
+ return lane_mask;
+}
+
+/*
+ * csiphy_stream_on - Enable streaming on CSIPHY module
+ * @csiphy: CSIPHY device
+ *
+ * Helper function to enable streaming on CSIPHY module.
+ * Main configuration of CSIPHY module is also done here.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csiphy_stream_on(struct csiphy_device *csiphy)
+{
+ struct csiphy_config *cfg = &csiphy->cfg;
+ u32 pixel_clock;
+ u8 lane_mask = csiphy_get_lane_mask(&cfg->csi2->lane_cfg);
+ u8 bpp = csiphy_get_bpp(csiphy->formats, csiphy->nformats,
+ csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
+ u8 val;
+ int ret;
+
+ ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock);
+ if (ret) {
+ dev_err(csiphy->camss->dev,
+ "Cannot get CSI2 transmitter's pixel clock\n");
+ return -EINVAL;
+ }
+ if (!pixel_clock) {
+ dev_err(csiphy->camss->dev,
+ "Got pixel clock == 0, cannot continue\n");
+ return -EINVAL;
+ }
+
+ val = readl_relaxed(csiphy->base_clk_mux);
+ if (cfg->combo_mode && (lane_mask & 0x18) == 0x18) {
+ val &= ~0xf0;
+ val |= cfg->csid_id << 4;
+ } else {
+ val &= ~0xf;
+ val |= cfg->csid_id;
+ }
+ writel_relaxed(val, csiphy->base_clk_mux);
+ wmb();
+
+ csiphy->ops->lanes_enable(csiphy, cfg, pixel_clock, bpp, lane_mask);
+
+ return 0;
+}
+
+/*
+ * csiphy_stream_off - Disable streaming on CSIPHY module
+ * @csiphy: CSIPHY device
+ *
+ * Helper function to disable streaming on CSIPHY module
+ */
+static void csiphy_stream_off(struct csiphy_device *csiphy)
+{
+ csiphy->ops->lanes_disable(csiphy, &csiphy->cfg);
+}
+
+
+/*
+ * csiphy_set_stream - Enable/disable streaming on CSIPHY module
+ * @sd: CSIPHY V4L2 subdevice
+ * @enable: Requested streaming state
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csiphy_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (enable)
+ ret = csiphy_stream_on(csiphy);
+ else
+ csiphy_stream_off(csiphy);
+
+ return ret;
+}
+
+/*
+ * __csiphy_get_format - Get pointer to format structure
+ * @csiphy: CSIPHY device
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad from which format is requested
+ * @which: TRY or ACTIVE format
+ *
+ * Return pointer to TRY or ACTIVE format structure
+ */
+static struct v4l2_mbus_framefmt *
+__csiphy_get_format(struct csiphy_device *csiphy,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csiphy->subdev, cfg, pad);
+
+ return &csiphy->fmt[pad];
+}
+
+/*
+ * csiphy_try_format - Handle try format by pad subdev method
+ * @csiphy: CSIPHY device
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad on which format is requested
+ * @fmt: pointer to v4l2 format structure
+ * @which: wanted subdev format
+ */
+static void csiphy_try_format(struct csiphy_device *csiphy,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ unsigned int i;
+
+ switch (pad) {
+ case MSM_CSIPHY_PAD_SINK:
+ /* Set format on sink pad */
+
+ for (i = 0; i < csiphy->nformats; i++)
+ if (fmt->code == csiphy->formats[i].code)
+ break;
+
+ /* If not found, use UYVY as default */
+ if (i >= csiphy->nformats)
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ break;
+
+ case MSM_CSIPHY_PAD_SRC:
+ /* Set and return a format same as sink pad */
+
+ *fmt = *__csiphy_get_format(csiphy, cfg, MSM_CSID_PAD_SINK,
+ which);
+
+ break;
+ }
+}
+
+/*
+ * csiphy_enum_mbus_code - Handle pixel format enumeration
+ * @sd: CSIPHY V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == MSM_CSIPHY_PAD_SINK) {
+ if (code->index >= csiphy->nformats)
+ return -EINVAL;
+
+ code->code = csiphy->formats[code->index].code;
+ } else {
+ if (code->index > 0)
+ return -EINVAL;
+
+ format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SINK,
+ code->which);
+
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+/*
+ * csiphy_enum_frame_size - Handle frame size enumeration
+ * @sd: CSIPHY V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: pointer to v4l2_subdev_frame_size_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * csiphy_get_format - Handle get format by pads subdev method
+ * @sd: CSIPHY V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int csiphy_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+/*
+ * csiphy_set_format - Handle set format by pads subdev method
+ * @sd: CSIPHY V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int csiphy_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ csiphy_try_format(csiphy, cfg, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == MSM_CSIPHY_PAD_SINK) {
+ format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC,
+ fmt->which);
+
+ *format = fmt->format;
+ csiphy_try_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * csiphy_init_formats - Initialize formats on all pads
+ * @sd: CSIPHY V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csiphy_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format = {
+ .pad = MSM_CSIPHY_PAD_SINK,
+ .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .width = 1920,
+ .height = 1080
+ }
+ };
+
+ return csiphy_set_format(sd, fh ? fh->pad : NULL, &format);
+}
+
+/*
+ * msm_csiphy_subdev_init - Initialize CSIPHY device structure and resources
+ * @csiphy: CSIPHY device
+ * @res: CSIPHY module resources table
+ * @id: CSIPHY module id
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_csiphy_subdev_init(struct camss *camss,
+ struct csiphy_device *csiphy,
+ const struct resources *res, u8 id)
+{
+ struct device *dev = camss->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *r;
+ int i, j;
+ int ret;
+
+ csiphy->camss = camss;
+ csiphy->id = id;
+ csiphy->cfg.combo_mode = 0;
+
+ if (camss->version == CAMSS_8x16) {
+ csiphy->ops = &csiphy_ops_2ph_1_0;
+ csiphy->formats = csiphy_formats_8x16;
+ csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x16);
+ } else if (camss->version == CAMSS_8x96) {
+ csiphy->ops = &csiphy_ops_3ph_1_0;
+ csiphy->formats = csiphy_formats_8x96;
+ csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x96);
+ } else {
+ return -EINVAL;
+ }
+
+ /* Memory */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
+ csiphy->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(csiphy->base)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(csiphy->base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[1]);
+ csiphy->base_clk_mux = devm_ioremap_resource(dev, r);
+ if (IS_ERR(csiphy->base_clk_mux)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(csiphy->base_clk_mux);
+ }
+
+ /* Interrupt */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ res->interrupt[0]);
+ if (!r) {
+ dev_err(dev, "missing IRQ\n");
+ return -EINVAL;
+ }
+
+ csiphy->irq = r->start;
+ snprintf(csiphy->irq_name, sizeof(csiphy->irq_name), "%s_%s%d",
+ dev_name(dev), MSM_CSIPHY_NAME, csiphy->id);
+
+ ret = devm_request_irq(dev, csiphy->irq, csiphy->ops->isr,
+ IRQF_TRIGGER_RISING, csiphy->irq_name, csiphy);
+ if (ret < 0) {
+ dev_err(dev, "request_irq failed: %d\n", ret);
+ return ret;
+ }
+
+ disable_irq(csiphy->irq);
+
+ /* Clocks */
+
+ csiphy->nclocks = 0;
+ while (res->clock[csiphy->nclocks])
+ csiphy->nclocks++;
+
+ csiphy->clock = devm_kcalloc(dev,
+ csiphy->nclocks, sizeof(*csiphy->clock),
+ GFP_KERNEL);
+ if (!csiphy->clock)
+ return -ENOMEM;
+
+ for (i = 0; i < csiphy->nclocks; i++) {
+ struct camss_clock *clock = &csiphy->clock[i];
+
+ clock->clk = devm_clk_get(dev, res->clock[i]);
+ if (IS_ERR(clock->clk))
+ return PTR_ERR(clock->clk);
+
+ clock->name = res->clock[i];
+
+ clock->nfreqs = 0;
+ while (res->clock_rate[i][clock->nfreqs])
+ clock->nfreqs++;
+
+ if (!clock->nfreqs) {
+ clock->freq = NULL;
+ continue;
+ }
+
+ clock->freq = devm_kcalloc(dev,
+ clock->nfreqs,
+ sizeof(*clock->freq),
+ GFP_KERNEL);
+ if (!clock->freq)
+ return -ENOMEM;
+
+ for (j = 0; j < clock->nfreqs; j++)
+ clock->freq[j] = res->clock_rate[i][j];
+ }
+
+ return 0;
+}
+
+/*
+ * csiphy_link_setup - Setup CSIPHY connections
+ * @entity: Pointer to media entity structure
+ * @local: Pointer to local pad
+ * @remote: Pointer to remote pad
+ * @flags: Link flags
+ *
+ * Rreturn 0 on success
+ */
+static int csiphy_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if ((local->flags & MEDIA_PAD_FL_SOURCE) &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+ struct v4l2_subdev *sd;
+ struct csiphy_device *csiphy;
+ struct csid_device *csid;
+
+ if (media_entity_remote_pad(local))
+ return -EBUSY;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ csiphy = v4l2_get_subdevdata(sd);
+
+ sd = media_entity_to_v4l2_subdev(remote->entity);
+ csid = v4l2_get_subdevdata(sd);
+
+ csiphy->cfg.csid_id = csid->id;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops csiphy_core_ops = {
+ .s_power = csiphy_set_power,
+};
+
+static const struct v4l2_subdev_video_ops csiphy_video_ops = {
+ .s_stream = csiphy_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csiphy_pad_ops = {
+ .enum_mbus_code = csiphy_enum_mbus_code,
+ .enum_frame_size = csiphy_enum_frame_size,
+ .get_fmt = csiphy_get_format,
+ .set_fmt = csiphy_set_format,
+};
+
+static const struct v4l2_subdev_ops csiphy_v4l2_ops = {
+ .core = &csiphy_core_ops,
+ .video = &csiphy_video_ops,
+ .pad = &csiphy_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csiphy_v4l2_internal_ops = {
+ .open = csiphy_init_formats,
+};
+
+static const struct media_entity_operations csiphy_media_ops = {
+ .link_setup = csiphy_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * msm_csiphy_register_entity - Register subdev node for CSIPHY module
+ * @csiphy: CSIPHY device
+ * @v4l2_dev: V4L2 device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_csiphy_register_entity(struct csiphy_device *csiphy,
+ struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd = &csiphy->subdev;
+ struct media_pad *pads = csiphy->pads;
+ struct device *dev = csiphy->camss->dev;
+ int ret;
+
+ v4l2_subdev_init(sd, &csiphy_v4l2_ops);
+ sd->internal_ops = &csiphy_v4l2_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
+ MSM_CSIPHY_NAME, csiphy->id);
+ v4l2_set_subdevdata(sd, csiphy);
+
+ ret = csiphy_init_formats(sd, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init format: %d\n", ret);
+ return ret;
+ }
+
+ pads[MSM_CSIPHY_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[MSM_CSIPHY_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.function = MEDIA_ENT_F_IO_V4L;
+ sd->entity.ops = &csiphy_media_ops;
+ ret = media_entity_pads_init(&sd->entity, MSM_CSIPHY_PADS_NUM, pads);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init media entity: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdev: %d\n", ret);
+ media_entity_cleanup(&sd->entity);
+ }
+
+ return ret;
+}
+
+/*
+ * msm_csiphy_unregister_entity - Unregister CSIPHY module subdev node
+ * @csiphy: CSIPHY device
+ */
+void msm_csiphy_unregister_entity(struct csiphy_device *csiphy)
+{
+ v4l2_device_unregister_subdev(&csiphy->subdev);
+ media_entity_cleanup(&csiphy->subdev.entity);
+}
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
new file mode 100644
index 000000000..376f865ad
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss-csiphy.h
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ */
+#ifndef QC_MSM_CAMSS_CSIPHY_H
+#define QC_MSM_CAMSS_CSIPHY_H
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#define MSM_CSIPHY_PAD_SINK 0
+#define MSM_CSIPHY_PAD_SRC 1
+#define MSM_CSIPHY_PADS_NUM 2
+
+struct csiphy_lane {
+ u8 pos;
+ u8 pol;
+};
+
+struct csiphy_lanes_cfg {
+ int num_data;
+ struct csiphy_lane *data;
+ struct csiphy_lane clk;
+};
+
+struct csiphy_csi2_cfg {
+ struct csiphy_lanes_cfg lane_cfg;
+};
+
+struct csiphy_config {
+ u8 combo_mode;
+ u8 csid_id;
+ struct csiphy_csi2_cfg *csi2;
+};
+
+struct csiphy_device;
+
+struct csiphy_hw_ops {
+ void (*hw_version_read)(struct csiphy_device *csiphy,
+ struct device *dev);
+ void (*reset)(struct csiphy_device *csiphy);
+ void (*lanes_enable)(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg,
+ u32 pixel_clock, u8 bpp, u8 lane_mask);
+ void (*lanes_disable)(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg);
+ irqreturn_t (*isr)(int irq, void *dev);
+};
+
+struct csiphy_device {
+ struct camss *camss;
+ u8 id;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_CSIPHY_PADS_NUM];
+ void __iomem *base;
+ void __iomem *base_clk_mux;
+ u32 irq;
+ char irq_name[30];
+ struct camss_clock *clock;
+ int nclocks;
+ u32 timer_clk_rate;
+ struct csiphy_config cfg;
+ struct v4l2_mbus_framefmt fmt[MSM_CSIPHY_PADS_NUM];
+ const struct csiphy_hw_ops *ops;
+ const struct csiphy_format *formats;
+ unsigned int nformats;
+};
+
+struct resources;
+
+int msm_csiphy_subdev_init(struct camss *camss,
+ struct csiphy_device *csiphy,
+ const struct resources *res, u8 id);
+
+int msm_csiphy_register_entity(struct csiphy_device *csiphy,
+ struct v4l2_device *v4l2_dev);
+
+void msm_csiphy_unregister_entity(struct csiphy_device *csiphy);
+
+extern const struct csiphy_hw_ops csiphy_ops_2ph_1_0;
+extern const struct csiphy_hw_ops csiphy_ops_3ph_1_0;
+
+#endif /* QC_MSM_CAMSS_CSIPHY_H */
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
new file mode 100644
index 000000000..1f33b4eb1
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -0,0 +1,1373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-ispif.c
+ *
+ * Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "camss-ispif.h"
+#include "camss.h"
+
+#define MSM_ISPIF_NAME "msm_ispif"
+
+#define ISPIF_RST_CMD_0 0x008
+#define ISPIF_RST_CMD_0_STROBED_RST_EN (1 << 0)
+#define ISPIF_RST_CMD_0_MISC_LOGIC_RST (1 << 1)
+#define ISPIF_RST_CMD_0_SW_REG_RST (1 << 2)
+#define ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST (1 << 3)
+#define ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST (1 << 4)
+#define ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST (1 << 5)
+#define ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST (1 << 6)
+#define ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST (1 << 7)
+#define ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST (1 << 8)
+#define ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST (1 << 9)
+#define ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST (1 << 10)
+#define ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST (1 << 11)
+#define ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST (1 << 12)
+#define ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST (1 << 16)
+#define ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST (1 << 17)
+#define ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST (1 << 18)
+#define ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST (1 << 19)
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x01c
+#define ISPIF_VFE_m_CTRL_0(m) (0x200 + 0x200 * (m))
+#define ISPIF_VFE_m_CTRL_0_PIX0_LINE_BUF_EN (1 << 6)
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE 0x00001249
+#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK 0x00001fff
+#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE 0x02492000
+#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK 0x03ffe000
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20c + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE 0x00001249
+#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK 0x00001fff
+#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE 0x02492000
+#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK 0x03ffe000
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE 0x00001249
+#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK 0x00001fff
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21c + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW (1 << 12)
+#define ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW (1 << 25)
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW (1 << 12)
+#define ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW (1 << 25)
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW (1 << 12)
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + 0x200 * (m))
+#define ISPIF_VFE_m_INTF_INPUT_SEL(m) (0x244 + 0x200 * (m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + 0x200 * (m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24c + 0x200 * (m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) \
+ (0x254 + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) \
+ (0x264 + 0x200 * (m) + 0x4 * (n))
+/* PACK_CFG registers are 8x96 only */
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(m, n) \
+ (0x270 + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(m, n) \
+ (0x27c + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(c) \
+ (1 << ((cid % 8) * 4))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) \
+ (0x2c0 + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) \
+ (0x2d0 + 0x200 * (m) + 0x4 * (n))
+
+#define CSI_PIX_CLK_MUX_SEL 0x000
+#define CSI_RDI_CLK_MUX_SEL 0x008
+
+#define ISPIF_TIMEOUT_SLEEP_US 1000
+#define ISPIF_TIMEOUT_ALL_US 1000000
+#define ISPIF_RESET_TIMEOUT_MS 500
+
+enum ispif_intf_cmd {
+ CMD_DISABLE_FRAME_BOUNDARY = 0x0,
+ CMD_ENABLE_FRAME_BOUNDARY = 0x1,
+ CMD_DISABLE_IMMEDIATELY = 0x2,
+ CMD_ALL_DISABLE_IMMEDIATELY = 0xaaaaaaaa,
+ CMD_ALL_NO_CHANGE = 0xffffffff,
+};
+
+static const u32 ispif_formats_8x16[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_Y10_1X10,
+};
+
+static const u32 ispif_formats_8x96[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
+};
+
+/*
+ * ispif_isr_8x96 - ISPIF module interrupt handler for 8x96
+ * @irq: Interrupt line
+ * @dev: ISPIF device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t ispif_isr_8x96(int irq, void *dev)
+{
+ struct ispif_device *ispif = dev;
+ u32 value0, value1, value2, value3, value4, value5;
+
+ value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0));
+ value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0));
+ value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0));
+ value3 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(1));
+ value4 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(1));
+ value5 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(1));
+
+ writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0));
+ writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0));
+ writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0));
+ writel_relaxed(value3, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(1));
+ writel_relaxed(value4, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(1));
+ writel_relaxed(value5, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(1));
+
+ writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
+
+ if ((value0 >> 27) & 0x1)
+ complete(&ispif->reset_complete);
+
+ if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 pix0 overflow\n");
+
+ if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi0 overflow\n");
+
+ if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 pix1 overflow\n");
+
+ if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi1 overflow\n");
+
+ if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi2 overflow\n");
+
+ if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 pix0 overflow\n");
+
+ if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 rdi0 overflow\n");
+
+ if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 pix1 overflow\n");
+
+ if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 rdi1 overflow\n");
+
+ if (unlikely(value5 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 rdi2 overflow\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ispif_isr_8x16 - ISPIF module interrupt handler for 8x16
+ * @irq: Interrupt line
+ * @dev: ISPIF device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t ispif_isr_8x16(int irq, void *dev)
+{
+ struct ispif_device *ispif = dev;
+ u32 value0, value1, value2;
+
+ value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0));
+ value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0));
+ value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0));
+
+ writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0));
+ writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0));
+ writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0));
+
+ writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
+
+ if ((value0 >> 27) & 0x1)
+ complete(&ispif->reset_complete);
+
+ if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 pix0 overflow\n");
+
+ if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi0 overflow\n");
+
+ if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 pix1 overflow\n");
+
+ if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi1 overflow\n");
+
+ if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi2 overflow\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ispif_reset - Trigger reset on ISPIF module and wait to complete
+ * @ispif: ISPIF device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int ispif_reset(struct ispif_device *ispif)
+{
+ unsigned long time;
+ u32 val;
+ int ret;
+
+ ret = camss_pm_domain_on(to_camss(ispif), PM_DOMAIN_VFE0);
+ if (ret < 0)
+ return ret;
+
+ ret = camss_pm_domain_on(to_camss(ispif), PM_DOMAIN_VFE1);
+ if (ret < 0)
+ return ret;
+
+ ret = camss_enable_clocks(ispif->nclocks_for_reset,
+ ispif->clock_for_reset,
+ to_device(ispif));
+ if (ret < 0)
+ return ret;
+
+ reinit_completion(&ispif->reset_complete);
+
+ val = ISPIF_RST_CMD_0_STROBED_RST_EN |
+ ISPIF_RST_CMD_0_MISC_LOGIC_RST |
+ ISPIF_RST_CMD_0_SW_REG_RST |
+ ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST |
+ ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST |
+ ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST |
+ ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST |
+ ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST |
+ ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST |
+ ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST |
+ ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST |
+ ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST |
+ ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST |
+ ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST |
+ ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST |
+ ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST |
+ ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST;
+
+ writel_relaxed(val, ispif->base + ISPIF_RST_CMD_0);
+
+ time = wait_for_completion_timeout(&ispif->reset_complete,
+ msecs_to_jiffies(ISPIF_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(to_device(ispif), "ISPIF reset timeout\n");
+ ret = -EIO;
+ }
+
+ camss_disable_clocks(ispif->nclocks_for_reset, ispif->clock_for_reset);
+
+ camss_pm_domain_off(to_camss(ispif), PM_DOMAIN_VFE0);
+ camss_pm_domain_off(to_camss(ispif), PM_DOMAIN_VFE1);
+
+ return ret;
+}
+
+/*
+ * ispif_set_power - Power on/off ISPIF module
+ * @sd: ISPIF V4L2 subdevice
+ * @on: Requested power state
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int ispif_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct ispif_line *line = v4l2_get_subdevdata(sd);
+ struct ispif_device *ispif = line->ispif;
+ struct device *dev = to_device(ispif);
+ int ret = 0;
+
+ mutex_lock(&ispif->power_lock);
+
+ if (on) {
+ if (ispif->power_count) {
+ /* Power is already on */
+ ispif->power_count++;
+ goto exit;
+ }
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto exit;
+
+ ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ goto exit;
+ }
+
+ ret = ispif_reset(ispif);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ camss_disable_clocks(ispif->nclocks, ispif->clock);
+ goto exit;
+ }
+
+ ispif->intf_cmd[line->vfe_id].cmd_0 = CMD_ALL_NO_CHANGE;
+ ispif->intf_cmd[line->vfe_id].cmd_1 = CMD_ALL_NO_CHANGE;
+
+ ispif->power_count++;
+ } else {
+ if (ispif->power_count == 0) {
+ dev_err(dev, "ispif power off on power_count == 0\n");
+ goto exit;
+ } else if (ispif->power_count == 1) {
+ camss_disable_clocks(ispif->nclocks, ispif->clock);
+ pm_runtime_put_sync(dev);
+ }
+
+ ispif->power_count--;
+ }
+
+exit:
+ mutex_unlock(&ispif->power_lock);
+
+ return ret;
+}
+
+/*
+ * ispif_select_clk_mux - Select clock for PIX/RDI interface
+ * @ispif: ISPIF device
+ * @intf: VFE interface
+ * @csid: CSID HW module id
+ * @vfe: VFE HW module id
+ * @enable: enable or disable the selected clock
+ */
+static void ispif_select_clk_mux(struct ispif_device *ispif,
+ enum ispif_intf intf, u8 csid,
+ u8 vfe, u8 enable)
+{
+ u32 val;
+
+ switch (intf) {
+ case PIX0:
+ val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL);
+ val &= ~(0xf << (vfe * 8));
+ if (enable)
+ val |= (csid << (vfe * 8));
+ writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL);
+ break;
+
+ case RDI0:
+ val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ val &= ~(0xf << (vfe * 12));
+ if (enable)
+ val |= (csid << (vfe * 12));
+ writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ break;
+
+ case PIX1:
+ val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL);
+ val &= ~(0xf << (4 + (vfe * 8)));
+ if (enable)
+ val |= (csid << (4 + (vfe * 8)));
+ writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL);
+ break;
+
+ case RDI1:
+ val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ val &= ~(0xf << (4 + (vfe * 12)));
+ if (enable)
+ val |= (csid << (4 + (vfe * 12)));
+ writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ break;
+
+ case RDI2:
+ val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ val &= ~(0xf << (8 + (vfe * 12)));
+ if (enable)
+ val |= (csid << (8 + (vfe * 12)));
+ writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ break;
+ }
+
+ mb();
+}
+
+/*
+ * ispif_validate_intf_status - Validate current status of PIX/RDI interface
+ * @ispif: ISPIF device
+ * @intf: VFE interface
+ * @vfe: VFE HW module id
+ *
+ * Return 0 when interface is idle or -EBUSY otherwise
+ */
+static int ispif_validate_intf_status(struct ispif_device *ispif,
+ enum ispif_intf intf, u8 vfe)
+{
+ int ret = 0;
+ u32 val = 0;
+
+ switch (intf) {
+ case PIX0:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0));
+ break;
+ case RDI0:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0));
+ break;
+ case PIX1:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1));
+ break;
+ case RDI1:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1));
+ break;
+ case RDI2:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2));
+ break;
+ }
+
+ if ((val & 0xf) != 0xf) {
+ dev_err(to_device(ispif), "%s: ispif is busy: 0x%x\n",
+ __func__, val);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+/*
+ * ispif_wait_for_stop - Wait for PIX/RDI interface to stop
+ * @ispif: ISPIF device
+ * @intf: VFE interface
+ * @vfe: VFE HW module id
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int ispif_wait_for_stop(struct ispif_device *ispif,
+ enum ispif_intf intf, u8 vfe)
+{
+ u32 addr = 0;
+ u32 stop_flag = 0;
+ int ret;
+
+ switch (intf) {
+ case PIX0:
+ addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0);
+ break;
+ case RDI0:
+ addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0);
+ break;
+ case PIX1:
+ addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1);
+ break;
+ case RDI1:
+ addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1);
+ break;
+ case RDI2:
+ addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2);
+ break;
+ }
+
+ ret = readl_poll_timeout(ispif->base + addr,
+ stop_flag,
+ (stop_flag & 0xf) == 0xf,
+ ISPIF_TIMEOUT_SLEEP_US,
+ ISPIF_TIMEOUT_ALL_US);
+ if (ret < 0)
+ dev_err(to_device(ispif), "%s: ispif stop timeout\n",
+ __func__);
+
+ return ret;
+}
+
+/*
+ * ispif_select_csid - Select CSID HW module for input from
+ * @ispif: ISPIF device
+ * @intf: VFE interface
+ * @csid: CSID HW module id
+ * @vfe: VFE HW module id
+ * @enable: enable or disable the selected input
+ */
+static void ispif_select_csid(struct ispif_device *ispif, enum ispif_intf intf,
+ u8 csid, u8 vfe, u8 enable)
+{
+ u32 val;
+
+ val = readl_relaxed(ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe));
+ switch (intf) {
+ case PIX0:
+ val &= ~(BIT(1) | BIT(0));
+ if (enable)
+ val |= csid;
+ break;
+ case RDI0:
+ val &= ~(BIT(5) | BIT(4));
+ if (enable)
+ val |= (csid << 4);
+ break;
+ case PIX1:
+ val &= ~(BIT(9) | BIT(8));
+ if (enable)
+ val |= (csid << 8);
+ break;
+ case RDI1:
+ val &= ~(BIT(13) | BIT(12));
+ if (enable)
+ val |= (csid << 12);
+ break;
+ case RDI2:
+ val &= ~(BIT(21) | BIT(20));
+ if (enable)
+ val |= (csid << 20);
+ break;
+ }
+
+ writel(val, ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe));
+}
+
+/*
+ * ispif_select_cid - Enable/disable desired CID
+ * @ispif: ISPIF device
+ * @intf: VFE interface
+ * @cid: desired CID to enable/disable
+ * @vfe: VFE HW module id
+ * @enable: enable or disable the desired CID
+ */
+static void ispif_select_cid(struct ispif_device *ispif, enum ispif_intf intf,
+ u8 cid, u8 vfe, u8 enable)
+{
+ u32 cid_mask = 1 << cid;
+ u32 addr = 0;
+ u32 val;
+
+ switch (intf) {
+ case PIX0:
+ addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 0);
+ break;
+ case RDI0:
+ addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 0);
+ break;
+ case PIX1:
+ addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 1);
+ break;
+ case RDI1:
+ addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 1);
+ break;
+ case RDI2:
+ addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 2);
+ break;
+ }
+
+ val = readl_relaxed(ispif->base + addr);
+ if (enable)
+ val |= cid_mask;
+ else
+ val &= ~cid_mask;
+
+ writel(val, ispif->base + addr);
+}
+
+/*
+ * ispif_config_irq - Enable/disable interrupts for PIX/RDI interface
+ * @ispif: ISPIF device
+ * @intf: VFE interface
+ * @vfe: VFE HW module id
+ * @enable: enable or disable
+ */
+static void ispif_config_irq(struct ispif_device *ispif, enum ispif_intf intf,
+ u8 vfe, u8 enable)
+{
+ u32 val;
+
+ switch (intf) {
+ case PIX0:
+ val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
+ val &= ~ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK;
+ if (enable)
+ val |= ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE;
+ writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
+ writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe));
+ break;
+ case RDI0:
+ val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
+ val &= ~ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK;
+ if (enable)
+ val |= ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE;
+ writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
+ writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe));
+ break;
+ case PIX1:
+ val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
+ val &= ~ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK;
+ if (enable)
+ val |= ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE;
+ writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
+ writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe));
+ break;
+ case RDI1:
+ val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
+ val &= ~ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK;
+ if (enable)
+ val |= ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE;
+ writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
+ writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe));
+ break;
+ case RDI2:
+ val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe));
+ val &= ~ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK;
+ if (enable)
+ val |= ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE;
+ writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe));
+ writel_relaxed(ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(vfe));
+ break;
+ }
+
+ writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
+}
+
+/*
+ * ispif_config_pack - Config packing for PRDI mode
+ * @ispif: ISPIF device
+ * @code: media bus format code
+ * @intf: VFE interface
+ * @cid: desired CID to handle
+ * @vfe: VFE HW module id
+ * @enable: enable or disable
+ */
+static void ispif_config_pack(struct ispif_device *ispif, u32 code,
+ enum ispif_intf intf, u8 cid, u8 vfe, u8 enable)
+{
+ u32 addr, val;
+
+ if (code != MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE &&
+ code != MEDIA_BUS_FMT_Y10_2X8_PADHI_LE)
+ return;
+
+ switch (intf) {
+ case RDI0:
+ if (cid < 8)
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 0);
+ else
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 0);
+ break;
+ case RDI1:
+ if (cid < 8)
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 1);
+ else
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 1);
+ break;
+ case RDI2:
+ if (cid < 8)
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 2);
+ else
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 2);
+ break;
+ default:
+ return;
+ }
+
+ if (enable)
+ val = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(cid);
+ else
+ val = 0;
+
+ writel_relaxed(val, ispif->base + addr);
+}
+
+/*
+ * ispif_set_intf_cmd - Set command to enable/disable interface
+ * @ispif: ISPIF device
+ * @cmd: interface command
+ * @intf: VFE interface
+ * @vfe: VFE HW module id
+ * @vc: virtual channel
+ */
+static void ispif_set_intf_cmd(struct ispif_device *ispif, u8 cmd,
+ enum ispif_intf intf, u8 vfe, u8 vc)
+{
+ u32 *val;
+
+ if (intf == RDI2) {
+ val = &ispif->intf_cmd[vfe].cmd_1;
+ *val &= ~(0x3 << (vc * 2 + 8));
+ *val |= (cmd << (vc * 2 + 8));
+ wmb();
+ writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe));
+ wmb();
+ } else {
+ val = &ispif->intf_cmd[vfe].cmd_0;
+ *val &= ~(0x3 << (vc * 2 + intf * 8));
+ *val |= (cmd << (vc * 2 + intf * 8));
+ wmb();
+ writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe));
+ wmb();
+ }
+}
+
+/*
+ * ispif_set_stream - Enable/disable streaming on ISPIF module
+ * @sd: ISPIF V4L2 subdevice
+ * @enable: Requested streaming state
+ *
+ * Main configuration of ISPIF module is also done here.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ispif_line *line = v4l2_get_subdevdata(sd);
+ struct ispif_device *ispif = line->ispif;
+ enum ispif_intf intf = line->interface;
+ u8 csid = line->csid_id;
+ u8 vfe = line->vfe_id;
+ u8 vc = 0; /* Virtual Channel 0 */
+ u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */
+ int ret;
+
+ if (enable) {
+ if (!media_entity_remote_pad(&line->pads[MSM_ISPIF_PAD_SINK]))
+ return -ENOLINK;
+
+ /* Config */
+
+ mutex_lock(&ispif->config_lock);
+ ispif_select_clk_mux(ispif, intf, csid, vfe, 1);
+
+ ret = ispif_validate_intf_status(ispif, intf, vfe);
+ if (ret < 0) {
+ mutex_unlock(&ispif->config_lock);
+ return ret;
+ }
+
+ ispif_select_csid(ispif, intf, csid, vfe, 1);
+ ispif_select_cid(ispif, intf, cid, vfe, 1);
+ ispif_config_irq(ispif, intf, vfe, 1);
+ if (to_camss(ispif)->version == CAMSS_8x96)
+ ispif_config_pack(ispif,
+ line->fmt[MSM_ISPIF_PAD_SINK].code,
+ intf, cid, vfe, 1);
+ ispif_set_intf_cmd(ispif, CMD_ENABLE_FRAME_BOUNDARY,
+ intf, vfe, vc);
+ } else {
+ mutex_lock(&ispif->config_lock);
+ ispif_set_intf_cmd(ispif, CMD_DISABLE_FRAME_BOUNDARY,
+ intf, vfe, vc);
+ mutex_unlock(&ispif->config_lock);
+
+ ret = ispif_wait_for_stop(ispif, intf, vfe);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ispif->config_lock);
+ if (to_camss(ispif)->version == CAMSS_8x96)
+ ispif_config_pack(ispif,
+ line->fmt[MSM_ISPIF_PAD_SINK].code,
+ intf, cid, vfe, 0);
+ ispif_config_irq(ispif, intf, vfe, 0);
+ ispif_select_cid(ispif, intf, cid, vfe, 0);
+ ispif_select_csid(ispif, intf, csid, vfe, 0);
+ ispif_select_clk_mux(ispif, intf, csid, vfe, 0);
+ }
+
+ mutex_unlock(&ispif->config_lock);
+
+ return 0;
+}
+
+/*
+ * __ispif_get_format - Get pointer to format structure
+ * @ispif: ISPIF line
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad from which format is requested
+ * @which: TRY or ACTIVE format
+ *
+ * Return pointer to TRY or ACTIVE format structure
+ */
+static struct v4l2_mbus_framefmt *
+__ispif_get_format(struct ispif_line *line,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
+
+ return &line->fmt[pad];
+}
+
+/*
+ * ispif_try_format - Handle try format by pad subdev method
+ * @ispif: ISPIF line
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad on which format is requested
+ * @fmt: pointer to v4l2 format structure
+ * @which: wanted subdev format
+ */
+static void ispif_try_format(struct ispif_line *line,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ unsigned int i;
+
+ switch (pad) {
+ case MSM_ISPIF_PAD_SINK:
+ /* Set format on sink pad */
+
+ for (i = 0; i < line->nformats; i++)
+ if (fmt->code == line->formats[i])
+ break;
+
+ /* If not found, use UYVY as default */
+ if (i >= line->nformats)
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ break;
+
+ case MSM_ISPIF_PAD_SRC:
+ /* Set and return a format same as sink pad */
+
+ *fmt = *__ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK,
+ which);
+
+ break;
+ }
+
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+/*
+ * ispif_enum_mbus_code - Handle pixel format enumeration
+ * @sd: ISPIF V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ispif_line *line = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == MSM_ISPIF_PAD_SINK) {
+ if (code->index >= line->nformats)
+ return -EINVAL;
+
+ code->code = line->formats[code->index];
+ } else {
+ if (code->index > 0)
+ return -EINVAL;
+
+ format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK,
+ code->which);
+
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+/*
+ * ispif_enum_frame_size - Handle frame size enumeration
+ * @sd: ISPIF V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: pointer to v4l2_subdev_frame_size_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ispif_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct ispif_line *line = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ispif_try_format(line, cfg, fse->pad, &format, fse->which);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ispif_try_format(line, cfg, fse->pad, &format, fse->which);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ispif_get_format - Handle get format by pads subdev method
+ * @sd: ISPIF V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int ispif_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ispif_line *line = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ispif_get_format(line, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+/*
+ * ispif_set_format - Handle set format by pads subdev method
+ * @sd: ISPIF V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int ispif_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ispif_line *line = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ispif_get_format(line, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ispif_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == MSM_ISPIF_PAD_SINK) {
+ format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SRC,
+ fmt->which);
+
+ *format = fmt->format;
+ ispif_try_format(line, cfg, MSM_ISPIF_PAD_SRC, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+/*
+ * ispif_init_formats - Initialize formats on all pads
+ * @sd: ISPIF V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format = {
+ .pad = MSM_ISPIF_PAD_SINK,
+ .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .width = 1920,
+ .height = 1080
+ }
+ };
+
+ return ispif_set_format(sd, fh ? fh->pad : NULL, &format);
+}
+
+/*
+ * msm_ispif_subdev_init - Initialize ISPIF device structure and resources
+ * @ispif: ISPIF device
+ * @res: ISPIF module resources table
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_ispif_subdev_init(struct ispif_device *ispif,
+ const struct resources_ispif *res)
+{
+ struct device *dev = to_device(ispif);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *r;
+ int i;
+ int ret;
+
+ /* Number of ISPIF lines - same as number of CSID hardware modules */
+ if (to_camss(ispif)->version == CAMSS_8x16)
+ ispif->line_num = 2;
+ else if (to_camss(ispif)->version == CAMSS_8x96)
+ ispif->line_num = 4;
+ else
+ return -EINVAL;
+
+ ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
+ GFP_KERNEL);
+ if (!ispif->line)
+ return -ENOMEM;
+
+ for (i = 0; i < ispif->line_num; i++) {
+ ispif->line[i].ispif = ispif;
+ ispif->line[i].id = i;
+
+ if (to_camss(ispif)->version == CAMSS_8x16) {
+ ispif->line[i].formats = ispif_formats_8x16;
+ ispif->line[i].nformats =
+ ARRAY_SIZE(ispif_formats_8x16);
+ } else if (to_camss(ispif)->version == CAMSS_8x96) {
+ ispif->line[i].formats = ispif_formats_8x96;
+ ispif->line[i].nformats =
+ ARRAY_SIZE(ispif_formats_8x96);
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ /* Memory */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
+ ispif->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(ispif->base)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(ispif->base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[1]);
+ ispif->base_clk_mux = devm_ioremap_resource(dev, r);
+ if (IS_ERR(ispif->base_clk_mux)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(ispif->base_clk_mux);
+ }
+
+ /* Interrupt */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res->interrupt);
+
+ if (!r) {
+ dev_err(dev, "missing IRQ\n");
+ return -EINVAL;
+ }
+
+ ispif->irq = r->start;
+ snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s",
+ dev_name(dev), MSM_ISPIF_NAME);
+ if (to_camss(ispif)->version == CAMSS_8x16)
+ ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x16,
+ IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
+ else if (to_camss(ispif)->version == CAMSS_8x96)
+ ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x96,
+ IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
+ else
+ ret = -EINVAL;
+ if (ret < 0) {
+ dev_err(dev, "request_irq failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Clocks */
+
+ ispif->nclocks = 0;
+ while (res->clock[ispif->nclocks])
+ ispif->nclocks++;
+
+ ispif->clock = devm_kcalloc(dev,
+ ispif->nclocks, sizeof(*ispif->clock),
+ GFP_KERNEL);
+ if (!ispif->clock)
+ return -ENOMEM;
+
+ for (i = 0; i < ispif->nclocks; i++) {
+ struct camss_clock *clock = &ispif->clock[i];
+
+ clock->clk = devm_clk_get(dev, res->clock[i]);
+ if (IS_ERR(clock->clk))
+ return PTR_ERR(clock->clk);
+
+ clock->freq = NULL;
+ clock->nfreqs = 0;
+ }
+
+ ispif->nclocks_for_reset = 0;
+ while (res->clock_for_reset[ispif->nclocks_for_reset])
+ ispif->nclocks_for_reset++;
+
+ ispif->clock_for_reset = devm_kcalloc(dev,
+ ispif->nclocks_for_reset,
+ sizeof(*ispif->clock_for_reset),
+ GFP_KERNEL);
+ if (!ispif->clock_for_reset)
+ return -ENOMEM;
+
+ for (i = 0; i < ispif->nclocks_for_reset; i++) {
+ struct camss_clock *clock = &ispif->clock_for_reset[i];
+
+ clock->clk = devm_clk_get(dev, res->clock_for_reset[i]);
+ if (IS_ERR(clock->clk))
+ return PTR_ERR(clock->clk);
+
+ clock->freq = NULL;
+ clock->nfreqs = 0;
+ }
+
+ mutex_init(&ispif->power_lock);
+ ispif->power_count = 0;
+
+ mutex_init(&ispif->config_lock);
+
+ init_completion(&ispif->reset_complete);
+
+ return 0;
+}
+
+/*
+ * ispif_get_intf - Get ISPIF interface to use by VFE line id
+ * @line_id: VFE line id that the ISPIF line is connected to
+ *
+ * Return ISPIF interface to use
+ */
+static enum ispif_intf ispif_get_intf(enum vfe_line_id line_id)
+{
+ switch (line_id) {
+ case (VFE_LINE_RDI0):
+ return RDI0;
+ case (VFE_LINE_RDI1):
+ return RDI1;
+ case (VFE_LINE_RDI2):
+ return RDI2;
+ case (VFE_LINE_PIX):
+ return PIX0;
+ default:
+ return RDI0;
+ }
+}
+
+/*
+ * ispif_link_setup - Setup ISPIF connections
+ * @entity: Pointer to media entity structure
+ * @local: Pointer to local pad
+ * @remote: Pointer to remote pad
+ * @flags: Link flags
+ *
+ * Return 0 on success
+ */
+static int ispif_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (media_entity_remote_pad(local))
+ return -EBUSY;
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ struct v4l2_subdev *sd;
+ struct ispif_line *line;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ line = v4l2_get_subdevdata(sd);
+
+ msm_csid_get_csid_id(remote->entity, &line->csid_id);
+ } else { /* MEDIA_PAD_FL_SOURCE */
+ struct v4l2_subdev *sd;
+ struct ispif_line *line;
+ enum vfe_line_id id;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ line = v4l2_get_subdevdata(sd);
+
+ msm_vfe_get_vfe_id(remote->entity, &line->vfe_id);
+ msm_vfe_get_vfe_line_id(remote->entity, &id);
+ line->interface = ispif_get_intf(id);
+ }
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops ispif_core_ops = {
+ .s_power = ispif_set_power,
+};
+
+static const struct v4l2_subdev_video_ops ispif_video_ops = {
+ .s_stream = ispif_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ispif_pad_ops = {
+ .enum_mbus_code = ispif_enum_mbus_code,
+ .enum_frame_size = ispif_enum_frame_size,
+ .get_fmt = ispif_get_format,
+ .set_fmt = ispif_set_format,
+};
+
+static const struct v4l2_subdev_ops ispif_v4l2_ops = {
+ .core = &ispif_core_ops,
+ .video = &ispif_video_ops,
+ .pad = &ispif_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ispif_v4l2_internal_ops = {
+ .open = ispif_init_formats,
+};
+
+static const struct media_entity_operations ispif_media_ops = {
+ .link_setup = ispif_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * msm_ispif_register_entities - Register subdev node for ISPIF module
+ * @ispif: ISPIF device
+ * @v4l2_dev: V4L2 device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_ispif_register_entities(struct ispif_device *ispif,
+ struct v4l2_device *v4l2_dev)
+{
+ struct device *dev = to_device(ispif);
+ int ret;
+ int i;
+
+ for (i = 0; i < ispif->line_num; i++) {
+ struct v4l2_subdev *sd = &ispif->line[i].subdev;
+ struct media_pad *pads = ispif->line[i].pads;
+
+ v4l2_subdev_init(sd, &ispif_v4l2_ops);
+ sd->internal_ops = &ispif_v4l2_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
+ MSM_ISPIF_NAME, i);
+ v4l2_set_subdevdata(sd, &ispif->line[i]);
+
+ ret = ispif_init_formats(sd, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init format: %d\n", ret);
+ goto error;
+ }
+
+ pads[MSM_ISPIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[MSM_ISPIF_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.function = MEDIA_ENT_F_IO_V4L;
+ sd->entity.ops = &ispif_media_ops;
+ ret = media_entity_pads_init(&sd->entity, MSM_ISPIF_PADS_NUM,
+ pads);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init media entity: %d\n", ret);
+ goto error;
+ }
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdev: %d\n", ret);
+ media_entity_cleanup(&sd->entity);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (i--; i >= 0; i--) {
+ struct v4l2_subdev *sd = &ispif->line[i].subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ }
+
+ return ret;
+}
+
+/*
+ * msm_ispif_unregister_entities - Unregister ISPIF module subdev node
+ * @ispif: ISPIF device
+ */
+void msm_ispif_unregister_entities(struct ispif_device *ispif)
+{
+ int i;
+
+ mutex_destroy(&ispif->power_lock);
+ mutex_destroy(&ispif->config_lock);
+
+ for (i = 0; i < ispif->line_num; i++) {
+ struct v4l2_subdev *sd = &ispif->line[i].subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ }
+}
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.h b/drivers/media/platform/qcom/camss/camss-ispif.h
new file mode 100644
index 000000000..1a5ba2425
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-ispif.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss-ispif.h
+ *
+ * Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#ifndef QC_MSM_CAMSS_ISPIF_H
+#define QC_MSM_CAMSS_ISPIF_H
+
+#include <linux/clk.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#define MSM_ISPIF_PAD_SINK 0
+#define MSM_ISPIF_PAD_SRC 1
+#define MSM_ISPIF_PADS_NUM 2
+
+#define MSM_ISPIF_VFE_NUM 2
+
+enum ispif_intf {
+ PIX0,
+ RDI0,
+ PIX1,
+ RDI1,
+ RDI2
+};
+
+struct ispif_intf_cmd_reg {
+ u32 cmd_0;
+ u32 cmd_1;
+};
+
+struct ispif_line {
+ struct ispif_device *ispif;
+ u8 id;
+ u8 csid_id;
+ u8 vfe_id;
+ enum ispif_intf interface;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_ISPIF_PADS_NUM];
+ struct v4l2_mbus_framefmt fmt[MSM_ISPIF_PADS_NUM];
+ const u32 *formats;
+ unsigned int nformats;
+};
+
+struct ispif_device {
+ void __iomem *base;
+ void __iomem *base_clk_mux;
+ u32 irq;
+ char irq_name[30];
+ struct camss_clock *clock;
+ int nclocks;
+ struct camss_clock *clock_for_reset;
+ int nclocks_for_reset;
+ struct completion reset_complete;
+ int power_count;
+ struct mutex power_lock;
+ struct ispif_intf_cmd_reg intf_cmd[MSM_ISPIF_VFE_NUM];
+ struct mutex config_lock;
+ unsigned int line_num;
+ struct ispif_line *line;
+};
+
+struct resources_ispif;
+
+int msm_ispif_subdev_init(struct ispif_device *ispif,
+ const struct resources_ispif *res);
+
+int msm_ispif_register_entities(struct ispif_device *ispif,
+ struct v4l2_device *v4l2_dev);
+
+void msm_ispif_unregister_entities(struct ispif_device *ispif);
+
+#endif /* QC_MSM_CAMSS_ISPIF_H */
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
new file mode 100644
index 000000000..174a36be6
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe-4-1.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.1
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "camss-vfe.h"
+
+#define VFE_0_HW_VERSION 0x000
+
+#define VFE_0_GLOBAL_RESET_CMD 0x00c
+#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
+#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
+#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3)
+#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4)
+#define VFE_0_GLOBAL_RESET_CMD_TIMER BIT(5)
+#define VFE_0_GLOBAL_RESET_CMD_PM BIT(6)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(7)
+#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(8)
+
+#define VFE_0_MODULE_CFG 0x018
+#define VFE_0_MODULE_CFG_DEMUX BIT(2)
+#define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE BIT(3)
+#define VFE_0_MODULE_CFG_SCALE_ENC BIT(23)
+#define VFE_0_MODULE_CFG_CROP_ENC BIT(27)
+
+#define VFE_0_CORE_CFG 0x01c
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
+
+#define VFE_0_IRQ_CMD 0x024
+#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0)
+
+#define VFE_0_IRQ_MASK_0 0x028
+#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1)
+#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_MASK_1 0x02c
+#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0)
+#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
+#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_CLEAR_0 0x030
+#define VFE_0_IRQ_CLEAR_1 0x034
+
+#define VFE_0_IRQ_STATUS_0 0x038
+#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_STATUS_1 0x03c
+#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_COMPOSITE_MASK_0 0x40
+#define VFE_0_VIOLATION_STATUS 0x48
+
+#define VFE_0_BUS_CMD 0x4c
+#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x)
+
+#define VFE_0_BUS_CFG 0x050
+
+#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2))
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(1)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2)
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
+ (0x088 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
+ (0x08c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
+
+#define VFE_0_BUS_PING_PONG_STATUS 0x268
+
+#define VFE_0_BUS_BDG_CMD 0x2c0
+#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
+
+#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
+#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
+#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
+#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
+#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
+#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
+#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
+#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
+#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
+#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5
+
+#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
+#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2)
+#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
+#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) BIT(16 + (r))
+
+#define VFE_0_CAMIF_CMD 0x2f4
+#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
+#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
+#define VFE_0_CAMIF_CMD_NO_CHANGE 3
+#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2)
+#define VFE_0_CAMIF_CFG 0x2f8
+#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6)
+#define VFE_0_CAMIF_FRAME_CFG 0x300
+#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304
+#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308
+#define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c
+#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314
+#define VFE_0_CAMIF_STATUS 0x31c
+#define VFE_0_CAMIF_STATUS_HALT BIT(31)
+
+#define VFE_0_REG_UPDATE 0x378
+#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n))
+#define VFE_0_REG_UPDATE_line_n(n) \
+ ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
+
+#define VFE_0_DEMUX_CFG 0x424
+#define VFE_0_DEMUX_CFG_PERIOD 0x3
+#define VFE_0_DEMUX_GAIN_0 0x428
+#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
+#define VFE_0_DEMUX_GAIN_1 0x42c
+#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
+#define VFE_0_DEMUX_EVEN_CFG 0x438
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
+#define VFE_0_DEMUX_ODD_CFG 0x43c
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
+
+#define VFE_0_SCALE_ENC_Y_CFG 0x75c
+#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760
+#define VFE_0_SCALE_ENC_Y_H_PHASE 0x764
+#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c
+#define VFE_0_SCALE_ENC_Y_V_PHASE 0x770
+#define VFE_0_SCALE_ENC_CBCR_CFG 0x778
+#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c
+#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780
+#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790
+#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794
+
+#define VFE_0_CROP_ENC_Y_WIDTH 0x854
+#define VFE_0_CROP_ENC_Y_HEIGHT 0x858
+#define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c
+#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860
+
+#define VFE_0_CLAMP_ENC_MAX_CFG 0x874
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
+#define VFE_0_CLAMP_ENC_MIN_CFG 0x878
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
+
+#define VFE_0_CGC_OVERRIDE_1 0x974
+#define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) BIT(x)
+
+#define CAMIF_TIMEOUT_SLEEP_US 1000
+#define CAMIF_TIMEOUT_ALL_US 1000000
+
+#define MSM_VFE_VFE0_UB_SIZE 1023
+#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
+
+static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+{
+ u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
+
+ dev_dbg(dev, "VFE HW Version = 0x%08x\n", hw_version);
+}
+
+static u16 vfe_get_ub_size(u8 vfe_id)
+{
+ if (vfe_id == 0)
+ return MSM_VFE_VFE0_UB_SIZE_RDI;
+
+ return 0;
+}
+
+static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits & ~clr_bits, vfe->base + reg);
+}
+
+static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits | set_bits, vfe->base + reg);
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
+ VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
+ VFE_0_GLOBAL_RESET_CMD_PM |
+ VFE_0_GLOBAL_RESET_CMD_TIMER |
+ VFE_0_GLOBAL_RESET_CMD_REGISTER |
+ VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
+ VFE_0_GLOBAL_RESET_CMD_BUS |
+ VFE_0_GLOBAL_RESET_CMD_CAMIF |
+ VFE_0_GLOBAL_RESET_CMD_CORE;
+
+ writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
+}
+
+static void vfe_halt_request(struct vfe_device *vfe)
+{
+ writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
+ vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_halt_clear(struct vfe_device *vfe)
+{
+ writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+}
+
+static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
+}
+
+#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
+
+static int vfe_word_per_line(u32 format, u32 pixel_per_line)
+{
+ int val = 0;
+
+ switch (format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ val = CALC_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CALC_WORD(pixel_per_line, 2, 8);
+ break;
+ }
+
+ return val;
+}
+
+static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
+ u16 *width, u16 *height, u16 *bytesperline)
+{
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ if (plane == 1)
+ *height /= 2;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ break;
+ }
+}
+
+static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
+ struct v4l2_pix_format_mplane *pix,
+ u8 plane, u32 enable)
+{
+ u32 reg;
+
+ if (enable) {
+ u16 width = 0, height = 0, bytesperline = 0, wpl;
+
+ vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
+
+ wpl = vfe_word_per_line(pix->pixelformat, width);
+
+ reg = height - 1;
+ reg |= ((wpl + 1) / 2 - 1) << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+
+ wpl = vfe_word_per_line(pix->pixelformat, bytesperline);
+
+ reg = 0x3;
+ reg |= (height - 1) << 4;
+ reg |= wpl << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ } else {
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ }
+}
+
+static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+
+ reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
+
+ reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
+ & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
+
+ writel_relaxed(reg,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+}
+
+static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
+ u32 pattern)
+{
+ writel_relaxed(pattern,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
+}
+
+static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm,
+ u16 offset, u16 depth)
+{
+ u32 reg;
+
+ reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
+ depth;
+ writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
+}
+
+static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
+{
+ wmb();
+ writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
+ wmb();
+}
+
+static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
+}
+
+static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
+}
+
+static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
+
+ return (reg >> wm) & 0x1;
+}
+
+static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
+{
+ if (enable)
+ writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG);
+ else
+ writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
+}
+
+static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
+ VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
+{
+ writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
+ vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
+}
+
+static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
+ u8 enable)
+{
+ struct vfe_line *line = container_of(output, struct vfe_line, output);
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ unsigned int i;
+
+ for (i = 0; i < output->wm_num; i++) {
+ if (i == 0) {
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ } else if (i == 1) {
+ reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+ } else {
+ /* On current devices output->wm_num is always <= 2 */
+ break;
+ }
+
+ if (output->wm_idx[i] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
+ reg);
+ }
+}
+
+static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line,
+ u8 enable)
+{
+ /* empty */
+}
+static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
+{
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
+ VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
+
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
+ cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
+ wmb();
+ writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
+ wmb();
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
+}
+
+static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id line_id, u8 enable)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
+ VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
+ VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ }
+}
+
+static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
+ enum vfe_line_id line_id, u8 enable)
+{
+ struct vfe_output *output = &vfe->line[line_id].output;
+ unsigned int i;
+ u32 irq_en0;
+ u32 irq_en1;
+ u32 comp_mask = 0;
+
+ irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
+ irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
+ for (i = 0; i < output->wm_num; i++) {
+ irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
+ output->wm_idx[i]);
+ comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
+ }
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ }
+}
+
+static void vfe_enable_irq_common(struct vfe_device *vfe)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
+ VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
+
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+}
+
+static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val, even_cfg, odd_cfg;
+
+ writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
+
+ val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
+
+ val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
+ break;
+ }
+
+ writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
+ writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
+}
+
+static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
+{
+ if (input / output >= 16)
+ return 0;
+
+ if (input / output >= 8)
+ return 1;
+
+ if (input / output >= 4)
+ return 2;
+
+ return 3;
+}
+
+static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 input, output;
+ u8 interp_reso;
+ u32 phase_mult;
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width;
+ output = line->compose.width;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height;
+ output = line->compose.height;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width;
+ output = line->compose.width / 2;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height;
+ output = line->compose.height;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
+ output = line->compose.height / 2;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
+}
+
+static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 first, last;
+
+ first = line->crop.left;
+ last = line->crop.left + line->crop.width - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
+
+ first = line->crop.left / 2;
+ last = line->crop.left / 2 + line->crop.width / 2 - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
+ first = line->crop.top / 2;
+ last = line->crop.top / 2 + line->crop.height / 2 - 1;
+ }
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
+}
+
+static void vfe_set_clamp_cfg(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
+
+ val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
+}
+
+static void vfe_set_qos(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
+ u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
+
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
+ writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static void vfe_set_ds(struct vfe_device *vfe)
+{
+ /* empty */
+}
+
+static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm);
+
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val);
+ else
+ vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val);
+
+ wmb();
+}
+
+static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val;
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
+ break;
+ }
+
+ writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2;
+ val |= line->fmt[MSM_VFE_PAD_SINK].height << 16;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
+
+ val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
+
+ val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
+}
+
+static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable)
+{
+ u32 cmd;
+
+ cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE;
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+ wmb();
+
+ if (enable)
+ cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY;
+ else
+ cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY;
+
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+}
+
+static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
+{
+ u32 val = VFE_0_MODULE_CFG_DEMUX |
+ VFE_0_MODULE_CFG_CHROMA_UPSAMPLE |
+ VFE_0_MODULE_CFG_SCALE_ENC |
+ VFE_0_MODULE_CFG_CROP_ENC;
+
+ if (enable)
+ writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG);
+ else
+ writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG);
+}
+
+static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
+ val,
+ (val & VFE_0_CAMIF_STATUS_HALT),
+ CAMIF_TIMEOUT_SLEEP_US,
+ CAMIF_TIMEOUT_ALL_US);
+ if (ret < 0)
+ dev_err(dev, "%s: camif stop timeout\n", __func__);
+
+ return ret;
+}
+
+static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
+{
+ *value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
+ *value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
+
+ writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0);
+ writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1);
+
+ wmb();
+ writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
+}
+
+static void vfe_violation_read(struct vfe_device *vfe)
+{
+ u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
+
+ pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
+}
+
+/*
+ * vfe_isr - ISPIF module interrupt handler
+ * @irq: Interrupt line
+ * @dev: VFE device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 value0, value1;
+ int i, j;
+
+ vfe->ops->isr_read(vfe, &value0, &value1);
+
+ trace_printk("VFE: status0 = 0x%08x, status1 = 0x%08x\n",
+ value0, value1);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
+ vfe->isr_ops.reset_ack(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION)
+ vfe->ops->violation_read(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
+ vfe->isr_ops.halt_ack(vfe);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
+ vfe->isr_ops.reg_update(vfe, i);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
+ vfe->isr_ops.sof(vfe, VFE_LINE_PIX);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+ if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
+ vfe->isr_ops.sof(vfe, i);
+
+ for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
+ vfe->isr_ops.comp_done(vfe, i);
+ for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
+ if (vfe->wm_output_map[j] == VFE_LINE_PIX)
+ value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
+ }
+
+ for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
+ vfe->isr_ops.wm_done(vfe, i);
+
+ return IRQ_HANDLED;
+}
+
+const struct vfe_hw_ops vfe_ops_4_1 = {
+ .hw_version_read = vfe_hw_version_read,
+ .get_ub_size = vfe_get_ub_size,
+ .global_reset = vfe_global_reset,
+ .halt_request = vfe_halt_request,
+ .halt_clear = vfe_halt_clear,
+ .wm_enable = vfe_wm_enable,
+ .wm_frame_based = vfe_wm_frame_based,
+ .wm_line_based = vfe_wm_line_based,
+ .wm_set_framedrop_period = vfe_wm_set_framedrop_period,
+ .wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern,
+ .wm_set_ub_cfg = vfe_wm_set_ub_cfg,
+ .bus_reload_wm = vfe_bus_reload_wm,
+ .wm_set_ping_addr = vfe_wm_set_ping_addr,
+ .wm_set_pong_addr = vfe_wm_set_pong_addr,
+ .wm_get_ping_pong_status = vfe_wm_get_ping_pong_status,
+ .bus_enable_wr_if = vfe_bus_enable_wr_if,
+ .bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi,
+ .wm_set_subsample = vfe_wm_set_subsample,
+ .bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi,
+ .set_xbar_cfg = vfe_set_xbar_cfg,
+ .set_realign_cfg = vfe_set_realign_cfg,
+ .set_rdi_cid = vfe_set_rdi_cid,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+ .enable_irq_wm_line = vfe_enable_irq_wm_line,
+ .enable_irq_pix_line = vfe_enable_irq_pix_line,
+ .enable_irq_common = vfe_enable_irq_common,
+ .set_demux_cfg = vfe_set_demux_cfg,
+ .set_scale_cfg = vfe_set_scale_cfg,
+ .set_crop_cfg = vfe_set_crop_cfg,
+ .set_clamp_cfg = vfe_set_clamp_cfg,
+ .set_qos = vfe_set_qos,
+ .set_ds = vfe_set_ds,
+ .set_cgc_override = vfe_set_cgc_override,
+ .set_camif_cfg = vfe_set_camif_cfg,
+ .set_camif_cmd = vfe_set_camif_cmd,
+ .set_module_cfg = vfe_set_module_cfg,
+ .camif_wait_for_stop = vfe_camif_wait_for_stop,
+ .isr_read = vfe_isr_read,
+ .violation_read = vfe_violation_read,
+ .isr = vfe_isr,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
new file mode 100644
index 000000000..0dca8bf92
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -0,0 +1,1141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe-4-7.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.7
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "camss-vfe.h"
+
+#define VFE_0_HW_VERSION 0x000
+
+#define VFE_0_GLOBAL_RESET_CMD 0x018
+#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
+#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
+#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3)
+#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4)
+#define VFE_0_GLOBAL_RESET_CMD_PM BIT(5)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(6)
+#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(7)
+#define VFE_0_GLOBAL_RESET_CMD_DSP BIT(8)
+#define VFE_0_GLOBAL_RESET_CMD_IDLE_CGC BIT(9)
+
+#define VFE_0_MODULE_LENS_EN 0x040
+#define VFE_0_MODULE_LENS_EN_DEMUX BIT(2)
+#define VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE BIT(3)
+
+#define VFE_0_MODULE_ZOOM_EN 0x04c
+#define VFE_0_MODULE_ZOOM_EN_SCALE_ENC BIT(1)
+#define VFE_0_MODULE_ZOOM_EN_CROP_ENC BIT(2)
+#define VFE_0_MODULE_ZOOM_EN_REALIGN_BUF BIT(9)
+
+#define VFE_0_CORE_CFG 0x050
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
+#define VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN BIT(4)
+
+#define VFE_0_IRQ_CMD 0x058
+#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0)
+
+#define VFE_0_IRQ_MASK_0 0x05c
+#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1)
+#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_MASK_1 0x060
+#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0)
+#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
+#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_CLEAR_0 0x064
+#define VFE_0_IRQ_CLEAR_1 0x068
+
+#define VFE_0_IRQ_STATUS_0 0x06c
+#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_STATUS_1 0x070
+#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_COMPOSITE_MASK_0 0x074
+#define VFE_0_VIOLATION_STATUS 0x07c
+
+#define VFE_0_BUS_CMD 0x80
+#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x)
+
+#define VFE_0_BUS_CFG 0x084
+
+#define VFE_0_BUS_XBAR_CFG_x(x) (0x90 + 0x4 * ((x) / 2))
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(2)
+#define VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN BIT(3)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTRA (0x1 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER (0x2 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0x0
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 0xc
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 0xd
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 0xe
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x0a0 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x0a4 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x0ac + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x0b4 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT 1
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2)
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x0b8 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x0bc + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x0c0 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
+ (0x0c4 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
+ (0x0c8 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
+
+#define VFE_0_BUS_PING_PONG_STATUS 0x338
+
+#define VFE_0_BUS_BDG_CMD 0x400
+#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
+
+#define VFE_0_BUS_BDG_QOS_CFG_0 0x404
+#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa9aaa9
+#define VFE_0_BUS_BDG_QOS_CFG_1 0x408
+#define VFE_0_BUS_BDG_QOS_CFG_2 0x40c
+#define VFE_0_BUS_BDG_QOS_CFG_3 0x410
+#define VFE_0_BUS_BDG_QOS_CFG_4 0x414
+#define VFE_0_BUS_BDG_QOS_CFG_5 0x418
+#define VFE_0_BUS_BDG_QOS_CFG_6 0x41c
+#define VFE_0_BUS_BDG_QOS_CFG_7 0x420
+#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa9
+
+#define VFE_0_BUS_BDG_DS_CFG_0 0x424
+#define VFE_0_BUS_BDG_DS_CFG_0_CFG 0xcccc0011
+#define VFE_0_BUS_BDG_DS_CFG_1 0x428
+#define VFE_0_BUS_BDG_DS_CFG_2 0x42c
+#define VFE_0_BUS_BDG_DS_CFG_3 0x430
+#define VFE_0_BUS_BDG_DS_CFG_4 0x434
+#define VFE_0_BUS_BDG_DS_CFG_5 0x438
+#define VFE_0_BUS_BDG_DS_CFG_6 0x43c
+#define VFE_0_BUS_BDG_DS_CFG_7 0x440
+#define VFE_0_BUS_BDG_DS_CFG_8 0x444
+#define VFE_0_BUS_BDG_DS_CFG_9 0x448
+#define VFE_0_BUS_BDG_DS_CFG_10 0x44c
+#define VFE_0_BUS_BDG_DS_CFG_11 0x450
+#define VFE_0_BUS_BDG_DS_CFG_12 0x454
+#define VFE_0_BUS_BDG_DS_CFG_13 0x458
+#define VFE_0_BUS_BDG_DS_CFG_14 0x45c
+#define VFE_0_BUS_BDG_DS_CFG_15 0x460
+#define VFE_0_BUS_BDG_DS_CFG_16 0x464
+#define VFE_0_BUS_BDG_DS_CFG_16_CFG 0x40000103
+
+#define VFE_0_RDI_CFG_x(x) (0x46c + (0x4 * (x)))
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
+#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2)
+#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
+
+#define VFE_0_CAMIF_CMD 0x478
+#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
+#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
+#define VFE_0_CAMIF_CMD_NO_CHANGE 3
+#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2)
+#define VFE_0_CAMIF_CFG 0x47c
+#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6)
+#define VFE_0_CAMIF_FRAME_CFG 0x484
+#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x488
+#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x48c
+#define VFE_0_CAMIF_SUBSAMPLE_CFG 0x490
+#define VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN 0x498
+#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x49c
+#define VFE_0_CAMIF_STATUS 0x4a4
+#define VFE_0_CAMIF_STATUS_HALT BIT(31)
+
+#define VFE_0_REG_UPDATE 0x4ac
+#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n))
+#define VFE_0_REG_UPDATE_line_n(n) \
+ ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
+
+#define VFE_0_DEMUX_CFG 0x560
+#define VFE_0_DEMUX_CFG_PERIOD 0x3
+#define VFE_0_DEMUX_GAIN_0 0x564
+#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
+#define VFE_0_DEMUX_GAIN_1 0x568
+#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
+#define VFE_0_DEMUX_EVEN_CFG 0x574
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
+#define VFE_0_DEMUX_ODD_CFG 0x578
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
+
+#define VFE_0_SCALE_ENC_Y_CFG 0x91c
+#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x920
+#define VFE_0_SCALE_ENC_Y_H_PHASE 0x924
+#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x934
+#define VFE_0_SCALE_ENC_Y_V_PHASE 0x938
+#define VFE_0_SCALE_ENC_CBCR_CFG 0x948
+#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x94c
+#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x950
+#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x960
+#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x964
+
+#define VFE_0_CROP_ENC_Y_WIDTH 0x974
+#define VFE_0_CROP_ENC_Y_HEIGHT 0x978
+#define VFE_0_CROP_ENC_CBCR_WIDTH 0x97c
+#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x980
+
+#define VFE_0_CLAMP_ENC_MAX_CFG 0x984
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
+#define VFE_0_CLAMP_ENC_MIN_CFG 0x988
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
+
+#define VFE_0_REALIGN_BUF_CFG 0xaac
+#define VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL BIT(2)
+#define VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL BIT(3)
+#define VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE BIT(4)
+
+#define CAMIF_TIMEOUT_SLEEP_US 1000
+#define CAMIF_TIMEOUT_ALL_US 1000000
+
+#define MSM_VFE_VFE0_UB_SIZE 2047
+#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
+#define MSM_VFE_VFE1_UB_SIZE 1535
+#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
+
+static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+{
+ u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
+
+ dev_err(dev, "VFE HW Version = 0x%08x\n", hw_version);
+}
+
+static u16 vfe_get_ub_size(u8 vfe_id)
+{
+ if (vfe_id == 0)
+ return MSM_VFE_VFE0_UB_SIZE_RDI;
+ else if (vfe_id == 1)
+ return MSM_VFE_VFE1_UB_SIZE_RDI;
+
+ return 0;
+}
+
+static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits & ~clr_bits, vfe->base + reg);
+}
+
+static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits | set_bits, vfe->base + reg);
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_IDLE_CGC |
+ VFE_0_GLOBAL_RESET_CMD_DSP |
+ VFE_0_GLOBAL_RESET_CMD_TESTGEN |
+ VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
+ VFE_0_GLOBAL_RESET_CMD_PM |
+ VFE_0_GLOBAL_RESET_CMD_REGISTER |
+ VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
+ VFE_0_GLOBAL_RESET_CMD_BUS |
+ VFE_0_GLOBAL_RESET_CMD_CAMIF |
+ VFE_0_GLOBAL_RESET_CMD_CORE;
+
+ writel_relaxed(BIT(31), vfe->base + VFE_0_IRQ_MASK_0);
+ wmb();
+ writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
+}
+
+static void vfe_halt_request(struct vfe_device *vfe)
+{
+ writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
+ vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_halt_clear(struct vfe_device *vfe)
+{
+ writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+}
+
+static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
+}
+
+#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
+
+static int vfe_word_per_line_by_pixel(u32 format, u32 pixel_per_line)
+{
+ int val = 0;
+
+ switch (format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ val = CALC_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CALC_WORD(pixel_per_line, 2, 8);
+ break;
+ }
+
+ return val;
+}
+
+static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
+{
+ return CALC_WORD(bytes_per_line, 1, 8);
+}
+
+static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
+ u16 *width, u16 *height, u16 *bytesperline)
+{
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ if (plane == 1)
+ *height /= 2;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_UYVY:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[plane].bytesperline;
+ break;
+
+ }
+}
+
+static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
+ struct v4l2_pix_format_mplane *pix,
+ u8 plane, u32 enable)
+{
+ u32 reg;
+
+ if (enable) {
+ u16 width = 0, height = 0, bytesperline = 0, wpl;
+
+ vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
+
+ wpl = vfe_word_per_line_by_pixel(pix->pixelformat, width);
+
+ reg = height - 1;
+ reg |= ((wpl + 3) / 4 - 1) << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+
+ wpl = vfe_word_per_line_by_bytes(bytesperline);
+
+ reg = 0x3;
+ reg |= (height - 1) << 2;
+ reg |= ((wpl + 1) / 2) << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ } else {
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ }
+}
+
+static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+
+ reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
+
+ reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
+ & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
+
+ writel_relaxed(reg,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+}
+
+static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
+ u32 pattern)
+{
+ writel_relaxed(pattern,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
+}
+
+static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm,
+ u16 offset, u16 depth)
+{
+ u32 reg;
+
+ reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
+ depth;
+ writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
+}
+
+static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
+{
+ wmb();
+ writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
+ wmb();
+}
+
+static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
+}
+
+static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
+}
+
+static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
+
+ return (reg >> wm) & 0x1;
+}
+
+static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
+{
+ if (enable)
+ writel_relaxed(0x101, vfe->base + VFE_0_BUS_CFG);
+ else
+ writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
+}
+
+static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
+ VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
+{
+ writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
+ vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
+}
+
+static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
+ u8 enable)
+{
+ struct vfe_line *line = container_of(output, struct vfe_line, output);
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+
+ switch (p) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+
+ if (output->wm_idx[0] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+
+ reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+
+ if (output->wm_idx[1] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
+ reg);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_UYVY:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN;
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
+
+ if (p == V4L2_PIX_FMT_YUYV || p == V4L2_PIX_FMT_YVYU)
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+
+ if (output->wm_idx[0] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line,
+ u8 enable)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 val = VFE_0_MODULE_ZOOM_EN_REALIGN_BUF;
+
+ if (p != V4L2_PIX_FMT_YUYV && p != V4L2_PIX_FMT_YVYU &&
+ p != V4L2_PIX_FMT_VYUY && p != V4L2_PIX_FMT_UYVY)
+ return;
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val);
+ return;
+ }
+
+ val = VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE;
+
+ if (p == V4L2_PIX_FMT_UYVY || p == V4L2_PIX_FMT_YUYV)
+ val |= VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL;
+ else
+ val |= VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL;
+
+ writel_relaxed(val, vfe->base + VFE_0_REALIGN_BUF_CFG);
+}
+
+static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
+{
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
+ VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
+
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
+ cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
+ wmb();
+ writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
+ wmb();
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
+}
+
+static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id line_id, u8 enable)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
+ VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
+ VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ }
+}
+
+static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
+ enum vfe_line_id line_id, u8 enable)
+{
+ struct vfe_output *output = &vfe->line[line_id].output;
+ unsigned int i;
+ u32 irq_en0;
+ u32 irq_en1;
+ u32 comp_mask = 0;
+
+ irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
+ irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
+ for (i = 0; i < output->wm_num; i++) {
+ irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
+ output->wm_idx[i]);
+ comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
+ }
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ }
+}
+
+static void vfe_enable_irq_common(struct vfe_device *vfe)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
+ VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
+
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+}
+
+static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val, even_cfg, odd_cfg;
+
+ writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
+
+ val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
+
+ val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
+ break;
+ }
+
+ writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
+ writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
+}
+
+static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
+{
+ if (input / output >= 16)
+ return 0;
+
+ if (input / output >= 8)
+ return 1;
+
+ if (input / output >= 4)
+ return 2;
+
+ return 3;
+}
+
+static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 input, output;
+ u8 interp_reso;
+ u32 phase_mult;
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
+ output = line->compose.width - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ output = line->compose.height - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
+ output = line->compose.width / 2 - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ output = line->compose.height - 1;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
+ output = line->compose.height / 2 - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
+}
+
+static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 first, last;
+
+ first = line->crop.left;
+ last = line->crop.left + line->crop.width - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
+
+ first = line->crop.left / 2;
+ last = line->crop.left / 2 + line->crop.width / 2 - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
+ first = line->crop.top / 2;
+ last = line->crop.top / 2 + line->crop.height / 2 - 1;
+ }
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
+}
+
+static void vfe_set_clamp_cfg(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
+
+ val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
+}
+
+static void vfe_set_qos(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
+ u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
+
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
+ writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static void vfe_set_ds(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_BUS_BDG_DS_CFG_0_CFG;
+ u32 val16 = VFE_0_BUS_BDG_DS_CFG_16_CFG;
+
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_0);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_1);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_2);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_3);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_4);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_5);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_6);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_7);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_8);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_9);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_10);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_11);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_12);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_13);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_14);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_15);
+ writel_relaxed(val16, vfe->base + VFE_0_BUS_BDG_DS_CFG_16);
+}
+
+static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ /* empty */
+}
+
+static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val;
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
+ break;
+ }
+
+ val |= VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN;
+ writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
+ val |= (line->fmt[MSM_VFE_PAD_SINK].height - 1) << 16;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
+
+ val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
+
+ val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
+}
+
+static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable)
+{
+ u32 cmd;
+
+ cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE;
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+ wmb();
+
+ if (enable)
+ cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY;
+ else
+ cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY;
+
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+}
+
+static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
+{
+ u32 val_lens = VFE_0_MODULE_LENS_EN_DEMUX |
+ VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE;
+ u32 val_zoom = VFE_0_MODULE_ZOOM_EN_SCALE_ENC |
+ VFE_0_MODULE_ZOOM_EN_CROP_ENC;
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_MODULE_LENS_EN, val_lens);
+ vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_MODULE_LENS_EN, val_lens);
+ vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
+ }
+}
+
+static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
+ val,
+ (val & VFE_0_CAMIF_STATUS_HALT),
+ CAMIF_TIMEOUT_SLEEP_US,
+ CAMIF_TIMEOUT_ALL_US);
+ if (ret < 0)
+ dev_err(dev, "%s: camif stop timeout\n", __func__);
+
+ return ret;
+}
+
+static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
+{
+ *value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
+ *value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
+
+ writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0);
+ writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1);
+
+ wmb();
+ writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
+}
+
+static void vfe_violation_read(struct vfe_device *vfe)
+{
+ u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
+
+ pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
+}
+
+/*
+ * vfe_isr - ISPIF module interrupt handler
+ * @irq: Interrupt line
+ * @dev: VFE device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 value0, value1;
+ int i, j;
+
+ vfe->ops->isr_read(vfe, &value0, &value1);
+
+ trace_printk("VFE: status0 = 0x%08x, status1 = 0x%08x\n",
+ value0, value1);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
+ vfe->isr_ops.reset_ack(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION)
+ vfe->ops->violation_read(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
+ vfe->isr_ops.halt_ack(vfe);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
+ vfe->isr_ops.reg_update(vfe, i);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
+ vfe->isr_ops.sof(vfe, VFE_LINE_PIX);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+ if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
+ vfe->isr_ops.sof(vfe, i);
+
+ for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
+ vfe->isr_ops.comp_done(vfe, i);
+ for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
+ if (vfe->wm_output_map[j] == VFE_LINE_PIX)
+ value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
+ }
+
+ for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
+ vfe->isr_ops.wm_done(vfe, i);
+
+ return IRQ_HANDLED;
+}
+
+const struct vfe_hw_ops vfe_ops_4_7 = {
+ .hw_version_read = vfe_hw_version_read,
+ .get_ub_size = vfe_get_ub_size,
+ .global_reset = vfe_global_reset,
+ .halt_request = vfe_halt_request,
+ .halt_clear = vfe_halt_clear,
+ .wm_enable = vfe_wm_enable,
+ .wm_frame_based = vfe_wm_frame_based,
+ .wm_line_based = vfe_wm_line_based,
+ .wm_set_framedrop_period = vfe_wm_set_framedrop_period,
+ .wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern,
+ .wm_set_ub_cfg = vfe_wm_set_ub_cfg,
+ .bus_reload_wm = vfe_bus_reload_wm,
+ .wm_set_ping_addr = vfe_wm_set_ping_addr,
+ .wm_set_pong_addr = vfe_wm_set_pong_addr,
+ .wm_get_ping_pong_status = vfe_wm_get_ping_pong_status,
+ .bus_enable_wr_if = vfe_bus_enable_wr_if,
+ .bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi,
+ .wm_set_subsample = vfe_wm_set_subsample,
+ .bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi,
+ .set_xbar_cfg = vfe_set_xbar_cfg,
+ .set_realign_cfg = vfe_set_realign_cfg,
+ .set_rdi_cid = vfe_set_rdi_cid,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+ .enable_irq_wm_line = vfe_enable_irq_wm_line,
+ .enable_irq_pix_line = vfe_enable_irq_pix_line,
+ .enable_irq_common = vfe_enable_irq_common,
+ .set_demux_cfg = vfe_set_demux_cfg,
+ .set_scale_cfg = vfe_set_scale_cfg,
+ .set_crop_cfg = vfe_set_crop_cfg,
+ .set_clamp_cfg = vfe_set_clamp_cfg,
+ .set_qos = vfe_set_qos,
+ .set_ds = vfe_set_ds,
+ .set_cgc_override = vfe_set_cgc_override,
+ .set_camif_cfg = vfe_set_camif_cfg,
+ .set_camif_cmd = vfe_set_camif_cmd,
+ .set_module_cfg = vfe_set_module_cfg,
+ .camif_wait_for_stop = vfe_camif_wait_for_stop,
+ .isr_read = vfe_isr_read,
+ .violation_read = vfe_violation_read,
+ .isr = vfe_isr,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
new file mode 100644
index 000000000..ed6a557de
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -0,0 +1,2342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock_types.h>
+#include <linux/spinlock.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "camss-vfe.h"
+#include "camss.h"
+
+#define MSM_VFE_NAME "msm_vfe"
+
+#define vfe_line_array(ptr_line) \
+ ((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)]))
+
+#define to_vfe(ptr_line) \
+ container_of(vfe_line_array(ptr_line), struct vfe_device, line)
+
+/* VFE reset timeout */
+#define VFE_RESET_TIMEOUT_MS 50
+/* VFE halt timeout */
+#define VFE_HALT_TIMEOUT_MS 100
+/* Max number of frame drop updates per frame */
+#define VFE_FRAME_DROP_UPDATES 5
+/* Frame drop value. NOTE: VAL + UPDATES should not exceed 31 */
+#define VFE_FRAME_DROP_VAL 20
+
+#define VFE_NEXT_SOF_MS 500
+
+#define SCALER_RATIO_MAX 16
+
+struct vfe_format {
+ u32 code;
+ u8 bpp;
+};
+
+static const struct vfe_format formats_rdi_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+};
+
+static const struct vfe_format formats_pix_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+};
+
+static const struct vfe_format formats_rdi_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, 16 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+ { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, 16 },
+};
+
+static const struct vfe_format formats_pix_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+};
+
+/*
+ * vfe_get_bpp - map media bus format to bits per pixel
+ * @formats: supported media bus formats array
+ * @nformats: size of @formats array
+ * @code: media bus format code
+ *
+ * Return number of bits per pixel
+ */
+static u8 vfe_get_bpp(const struct vfe_format *formats,
+ unsigned int nformats, u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < nformats; i++)
+ if (code == formats[i].code)
+ return formats[i].bpp;
+
+ WARN(1, "Unknown format\n");
+
+ return formats[0].bpp;
+}
+
+static u32 vfe_find_code(u32 *code, unsigned int n_code,
+ unsigned int index, u32 req_code)
+{
+ int i;
+
+ if (!req_code && (index >= n_code))
+ return 0;
+
+ for (i = 0; i < n_code; i++)
+ if (req_code) {
+ if (req_code == code[i])
+ return req_code;
+ } else {
+ if (i == index)
+ return code[i];
+ }
+
+ return code[0];
+}
+
+static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
+ unsigned int index, u32 src_req_code)
+{
+ struct vfe_device *vfe = to_vfe(line);
+
+ if (vfe->camss->version == CAMSS_8x16)
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ default:
+ if (index > 0)
+ return 0;
+
+ return sink_code;
+ }
+ else if (vfe->camss->version == CAMSS_8x96)
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ default:
+ if (index > 0)
+ return 0;
+
+ return sink_code;
+ }
+ else
+ return 0;
+}
+
+/*
+ * vfe_reset - Trigger reset on VFE module and wait to complete
+ * @vfe: VFE device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_reset(struct vfe_device *vfe)
+{
+ unsigned long time;
+
+ reinit_completion(&vfe->reset_complete);
+
+ vfe->ops->global_reset(vfe);
+
+ time = wait_for_completion_timeout(&vfe->reset_complete,
+ msecs_to_jiffies(VFE_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(vfe->camss->dev, "VFE reset timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * vfe_halt - Trigger halt on VFE module and wait to complete
+ * @vfe: VFE device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_halt(struct vfe_device *vfe)
+{
+ unsigned long time;
+
+ reinit_completion(&vfe->halt_complete);
+
+ vfe->ops->halt_request(vfe);
+
+ time = wait_for_completion_timeout(&vfe->halt_complete,
+ msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
+ if (!time) {
+ dev_err(vfe->camss->dev, "VFE halt timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void vfe_init_outputs(struct vfe_device *vfe)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
+ struct vfe_output *output = &vfe->line[i].output;
+
+ output->state = VFE_OUTPUT_OFF;
+ output->buf[0] = NULL;
+ output->buf[1] = NULL;
+ INIT_LIST_HEAD(&output->pending_bufs);
+ }
+}
+
+static void vfe_reset_output_maps(struct vfe_device *vfe)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
+ vfe->wm_output_map[i] = VFE_LINE_NONE;
+}
+
+static void vfe_output_init_addrs(struct vfe_device *vfe,
+ struct vfe_output *output, u8 sync)
+{
+ u32 ping_addr;
+ u32 pong_addr;
+ unsigned int i;
+
+ output->active_buf = 0;
+
+ for (i = 0; i < output->wm_num; i++) {
+ if (output->buf[0])
+ ping_addr = output->buf[0]->addr[i];
+ else
+ ping_addr = 0;
+
+ if (output->buf[1])
+ pong_addr = output->buf[1]->addr[i];
+ else
+ pong_addr = ping_addr;
+
+ vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
+ vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
+ if (sync)
+ vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
+ }
+}
+
+static void vfe_output_update_ping_addr(struct vfe_device *vfe,
+ struct vfe_output *output, u8 sync)
+{
+ u32 addr;
+ unsigned int i;
+
+ for (i = 0; i < output->wm_num; i++) {
+ if (output->buf[0])
+ addr = output->buf[0]->addr[i];
+ else
+ addr = 0;
+
+ vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i], addr);
+ if (sync)
+ vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
+ }
+}
+
+static void vfe_output_update_pong_addr(struct vfe_device *vfe,
+ struct vfe_output *output, u8 sync)
+{
+ u32 addr;
+ unsigned int i;
+
+ for (i = 0; i < output->wm_num; i++) {
+ if (output->buf[1])
+ addr = output->buf[1]->addr[i];
+ else
+ addr = 0;
+
+ vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i], addr);
+ if (sync)
+ vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
+ }
+
+}
+
+static int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ int ret = -EBUSY;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) {
+ if (vfe->wm_output_map[i] == VFE_LINE_NONE) {
+ vfe->wm_output_map[i] = line_id;
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int vfe_release_wm(struct vfe_device *vfe, u8 wm)
+{
+ if (wm >= ARRAY_SIZE(vfe->wm_output_map))
+ return -EINVAL;
+
+ vfe->wm_output_map[wm] = VFE_LINE_NONE;
+
+ return 0;
+}
+
+static void vfe_output_frame_drop(struct vfe_device *vfe,
+ struct vfe_output *output,
+ u32 drop_pattern)
+{
+ u8 drop_period;
+ unsigned int i;
+
+ /* We need to toggle update period to be valid on next frame */
+ output->drop_update_idx++;
+ output->drop_update_idx %= VFE_FRAME_DROP_UPDATES;
+ drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
+
+ for (i = 0; i < output->wm_num; i++) {
+ vfe->ops->wm_set_framedrop_period(vfe, output->wm_idx[i],
+ drop_period);
+ vfe->ops->wm_set_framedrop_pattern(vfe, output->wm_idx[i],
+ drop_pattern);
+ }
+ vfe->ops->reg_update(vfe,
+ container_of(output, struct vfe_line, output)->id);
+}
+
+static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output)
+{
+ struct camss_buffer *buffer = NULL;
+
+ if (!list_empty(&output->pending_bufs)) {
+ buffer = list_first_entry(&output->pending_bufs,
+ struct camss_buffer,
+ queue);
+ list_del(&buffer->queue);
+ }
+
+ return buffer;
+}
+
+/*
+ * vfe_buf_add_pending - Add output buffer to list of pending
+ * @output: VFE output
+ * @buffer: Video buffer
+ */
+static void vfe_buf_add_pending(struct vfe_output *output,
+ struct camss_buffer *buffer)
+{
+ INIT_LIST_HEAD(&buffer->queue);
+ list_add_tail(&buffer->queue, &output->pending_bufs);
+}
+
+/*
+ * vfe_buf_flush_pending - Flush all pending buffers.
+ * @output: VFE output
+ * @state: vb2 buffer state
+ */
+static void vfe_buf_flush_pending(struct vfe_output *output,
+ enum vb2_buffer_state state)
+{
+ struct camss_buffer *buf;
+ struct camss_buffer *t;
+
+ list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+}
+
+static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
+ struct vfe_output *output)
+{
+ switch (output->state) {
+ case VFE_OUTPUT_CONTINUOUS:
+ vfe_output_frame_drop(vfe, output, 3);
+ break;
+ case VFE_OUTPUT_SINGLE:
+ default:
+ dev_err_ratelimited(vfe->camss->dev,
+ "Next buf in wrong state! %d\n",
+ output->state);
+ break;
+ }
+}
+
+static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
+ struct vfe_output *output)
+{
+ switch (output->state) {
+ case VFE_OUTPUT_CONTINUOUS:
+ output->state = VFE_OUTPUT_SINGLE;
+ vfe_output_frame_drop(vfe, output, 1);
+ break;
+ case VFE_OUTPUT_SINGLE:
+ output->state = VFE_OUTPUT_STOPPING;
+ vfe_output_frame_drop(vfe, output, 0);
+ break;
+ default:
+ dev_err_ratelimited(vfe->camss->dev,
+ "Last buff in wrong state! %d\n",
+ output->state);
+ break;
+ }
+}
+
+static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
+ struct vfe_output *output,
+ struct camss_buffer *new_buf)
+{
+ int inactive_idx;
+
+ switch (output->state) {
+ case VFE_OUTPUT_SINGLE:
+ inactive_idx = !output->active_buf;
+
+ if (!output->buf[inactive_idx]) {
+ output->buf[inactive_idx] = new_buf;
+
+ if (inactive_idx)
+ vfe_output_update_pong_addr(vfe, output, 0);
+ else
+ vfe_output_update_ping_addr(vfe, output, 0);
+
+ vfe_output_frame_drop(vfe, output, 3);
+ output->state = VFE_OUTPUT_CONTINUOUS;
+ } else {
+ vfe_buf_add_pending(output, new_buf);
+ dev_err_ratelimited(vfe->camss->dev,
+ "Inactive buffer is busy\n");
+ }
+ break;
+
+ case VFE_OUTPUT_IDLE:
+ if (!output->buf[0]) {
+ output->buf[0] = new_buf;
+
+ vfe_output_init_addrs(vfe, output, 1);
+
+ vfe_output_frame_drop(vfe, output, 1);
+ output->state = VFE_OUTPUT_SINGLE;
+ } else {
+ vfe_buf_add_pending(output, new_buf);
+ dev_err_ratelimited(vfe->camss->dev,
+ "Output idle with buffer set!\n");
+ }
+ break;
+
+ case VFE_OUTPUT_CONTINUOUS:
+ default:
+ vfe_buf_add_pending(output, new_buf);
+ break;
+ }
+}
+
+static int vfe_get_output(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output;
+ struct v4l2_format *f = &line->video_out.active_fmt;
+ unsigned long flags;
+ int i;
+ int wm_idx;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ output = &line->output;
+ if (output->state != VFE_OUTPUT_OFF) {
+ dev_err(vfe->camss->dev, "Output is running\n");
+ goto error;
+ }
+ output->state = VFE_OUTPUT_RESERVED;
+
+ output->active_buf = 0;
+
+ switch (f->fmt.pix_mp.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ output->wm_num = 2;
+ break;
+ default:
+ output->wm_num = 1;
+ break;
+ }
+
+ for (i = 0; i < output->wm_num; i++) {
+ wm_idx = vfe_reserve_wm(vfe, line->id);
+ if (wm_idx < 0) {
+ dev_err(vfe->camss->dev, "Can not reserve wm\n");
+ goto error_get_wm;
+ }
+ output->wm_idx[i] = wm_idx;
+ }
+
+ output->drop_update_idx = 0;
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+
+error_get_wm:
+ for (i--; i >= 0; i--)
+ vfe_release_wm(vfe, output->wm_idx[i]);
+ output->state = VFE_OUTPUT_OFF;
+error:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return -EINVAL;
+}
+
+static int vfe_put_output(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ for (i = 0; i < output->wm_num; i++)
+ vfe_release_wm(vfe, output->wm_idx[i]);
+
+ output->state = VFE_OUTPUT_OFF;
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ return 0;
+}
+
+static int vfe_enable_output(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ const struct vfe_hw_ops *ops = vfe->ops;
+ unsigned long flags;
+ unsigned int i;
+ u16 ub_size;
+
+ ub_size = ops->get_ub_size(vfe->id);
+ if (!ub_size)
+ return -EINVAL;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ ops->reg_update_clear(vfe, line->id);
+
+ if (output->state != VFE_OUTPUT_RESERVED) {
+ dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
+ output->state);
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ return -EINVAL;
+ }
+ output->state = VFE_OUTPUT_IDLE;
+
+ output->buf[0] = vfe_buf_get_pending(output);
+ output->buf[1] = vfe_buf_get_pending(output);
+
+ if (!output->buf[0] && output->buf[1]) {
+ output->buf[0] = output->buf[1];
+ output->buf[1] = NULL;
+ }
+
+ if (output->buf[0])
+ output->state = VFE_OUTPUT_SINGLE;
+
+ if (output->buf[1])
+ output->state = VFE_OUTPUT_CONTINUOUS;
+
+ switch (output->state) {
+ case VFE_OUTPUT_SINGLE:
+ vfe_output_frame_drop(vfe, output, 1);
+ break;
+ case VFE_OUTPUT_CONTINUOUS:
+ vfe_output_frame_drop(vfe, output, 3);
+ break;
+ default:
+ vfe_output_frame_drop(vfe, output, 0);
+ break;
+ }
+
+ output->sequence = 0;
+ output->wait_sof = 0;
+ output->wait_reg_update = 0;
+ reinit_completion(&output->sof);
+ reinit_completion(&output->reg_update);
+
+ vfe_output_init_addrs(vfe, output, 0);
+
+ if (line->id != VFE_LINE_PIX) {
+ ops->set_cgc_override(vfe, output->wm_idx[0], 1);
+ ops->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
+ ops->bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
+ ops->wm_set_subsample(vfe, output->wm_idx[0]);
+ ops->set_rdi_cid(vfe, line->id, 0);
+ ops->wm_set_ub_cfg(vfe, output->wm_idx[0],
+ (ub_size + 1) * output->wm_idx[0], ub_size);
+ ops->wm_frame_based(vfe, output->wm_idx[0], 1);
+ ops->wm_enable(vfe, output->wm_idx[0], 1);
+ ops->bus_reload_wm(vfe, output->wm_idx[0]);
+ } else {
+ ub_size /= output->wm_num;
+ for (i = 0; i < output->wm_num; i++) {
+ ops->set_cgc_override(vfe, output->wm_idx[i], 1);
+ ops->wm_set_subsample(vfe, output->wm_idx[i]);
+ ops->wm_set_ub_cfg(vfe, output->wm_idx[i],
+ (ub_size + 1) * output->wm_idx[i],
+ ub_size);
+ ops->wm_line_based(vfe, output->wm_idx[i],
+ &line->video_out.active_fmt.fmt.pix_mp,
+ i, 1);
+ ops->wm_enable(vfe, output->wm_idx[i], 1);
+ ops->bus_reload_wm(vfe, output->wm_idx[i]);
+ }
+ ops->enable_irq_pix_line(vfe, 0, line->id, 1);
+ ops->set_module_cfg(vfe, 1);
+ ops->set_camif_cfg(vfe, line);
+ ops->set_realign_cfg(vfe, line, 1);
+ ops->set_xbar_cfg(vfe, output, 1);
+ ops->set_demux_cfg(vfe, line);
+ ops->set_scale_cfg(vfe, line);
+ ops->set_crop_cfg(vfe, line);
+ ops->set_clamp_cfg(vfe);
+ ops->set_camif_cmd(vfe, 1);
+ }
+
+ ops->reg_update(vfe, line->id);
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+static int vfe_disable_output(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ const struct vfe_hw_ops *ops = vfe->ops;
+ unsigned long flags;
+ unsigned long time;
+ unsigned int i;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ output->wait_sof = 1;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ time = wait_for_completion_timeout(&output->sof,
+ msecs_to_jiffies(VFE_NEXT_SOF_MS));
+ if (!time)
+ dev_err(vfe->camss->dev, "VFE sof timeout\n");
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ for (i = 0; i < output->wm_num; i++)
+ ops->wm_enable(vfe, output->wm_idx[i], 0);
+
+ ops->reg_update(vfe, line->id);
+ output->wait_reg_update = 1;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ time = wait_for_completion_timeout(&output->reg_update,
+ msecs_to_jiffies(VFE_NEXT_SOF_MS));
+ if (!time)
+ dev_err(vfe->camss->dev, "VFE reg update timeout\n");
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (line->id != VFE_LINE_PIX) {
+ ops->wm_frame_based(vfe, output->wm_idx[0], 0);
+ ops->bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0],
+ line->id);
+ ops->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
+ ops->set_cgc_override(vfe, output->wm_idx[0], 0);
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ } else {
+ for (i = 0; i < output->wm_num; i++) {
+ ops->wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
+ ops->set_cgc_override(vfe, output->wm_idx[i], 0);
+ }
+
+ ops->enable_irq_pix_line(vfe, 0, line->id, 0);
+ ops->set_module_cfg(vfe, 0);
+ ops->set_realign_cfg(vfe, line, 0);
+ ops->set_xbar_cfg(vfe, output, 0);
+
+ ops->set_camif_cmd(vfe, 0);
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ ops->camif_wait_for_stop(vfe, vfe->camss->dev);
+ }
+
+ return 0;
+}
+
+/*
+ * vfe_enable - Enable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_enable(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ int ret;
+
+ mutex_lock(&vfe->stream_lock);
+
+ if (!vfe->stream_count) {
+ vfe->ops->enable_irq_common(vfe);
+
+ vfe->ops->bus_enable_wr_if(vfe, 1);
+
+ vfe->ops->set_qos(vfe);
+
+ vfe->ops->set_ds(vfe);
+ }
+
+ vfe->stream_count++;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ ret = vfe_get_output(line);
+ if (ret < 0)
+ goto error_get_output;
+
+ ret = vfe_enable_output(line);
+ if (ret < 0)
+ goto error_enable_output;
+
+ vfe->was_streaming = 1;
+
+ return 0;
+
+
+error_enable_output:
+ vfe_put_output(line);
+
+error_get_output:
+ mutex_lock(&vfe->stream_lock);
+
+ if (vfe->stream_count == 1)
+ vfe->ops->bus_enable_wr_if(vfe, 0);
+
+ vfe->stream_count--;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ return ret;
+}
+
+/*
+ * vfe_disable - Disable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_disable(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+
+ vfe_disable_output(line);
+
+ vfe_put_output(line);
+
+ mutex_lock(&vfe->stream_lock);
+
+ if (vfe->stream_count == 1)
+ vfe->ops->bus_enable_wr_if(vfe, 0);
+
+ vfe->stream_count--;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ return 0;
+}
+
+/*
+ * vfe_isr_sof - Process start of frame interrupt
+ * @vfe: VFE Device
+ * @line_id: VFE line
+ */
+static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ struct vfe_output *output;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ output = &vfe->line[line_id].output;
+ if (output->wait_sof) {
+ output->wait_sof = 0;
+ complete(&output->sof);
+ }
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+}
+
+/*
+ * vfe_isr_reg_update - Process reg update interrupt
+ * @vfe: VFE Device
+ * @line_id: VFE line
+ */
+static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ struct vfe_output *output;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ vfe->ops->reg_update_clear(vfe, line_id);
+
+ output = &vfe->line[line_id].output;
+
+ if (output->wait_reg_update) {
+ output->wait_reg_update = 0;
+ complete(&output->reg_update);
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ return;
+ }
+
+ if (output->state == VFE_OUTPUT_STOPPING) {
+ /* Release last buffer when hw is idle */
+ if (output->last_buffer) {
+ vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
+ VB2_BUF_STATE_DONE);
+ output->last_buffer = NULL;
+ }
+ output->state = VFE_OUTPUT_IDLE;
+
+ /* Buffers received in stopping state are queued in */
+ /* dma pending queue, start next capture here */
+
+ output->buf[0] = vfe_buf_get_pending(output);
+ output->buf[1] = vfe_buf_get_pending(output);
+
+ if (!output->buf[0] && output->buf[1]) {
+ output->buf[0] = output->buf[1];
+ output->buf[1] = NULL;
+ }
+
+ if (output->buf[0])
+ output->state = VFE_OUTPUT_SINGLE;
+
+ if (output->buf[1])
+ output->state = VFE_OUTPUT_CONTINUOUS;
+
+ switch (output->state) {
+ case VFE_OUTPUT_SINGLE:
+ vfe_output_frame_drop(vfe, output, 2);
+ break;
+ case VFE_OUTPUT_CONTINUOUS:
+ vfe_output_frame_drop(vfe, output, 3);
+ break;
+ default:
+ vfe_output_frame_drop(vfe, output, 0);
+ break;
+ }
+
+ vfe_output_init_addrs(vfe, output, 1);
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+}
+
+/*
+ * vfe_isr_wm_done - Process write master done interrupt
+ * @vfe: VFE Device
+ * @wm: Write master id
+ */
+static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
+{
+ struct camss_buffer *ready_buf;
+ struct vfe_output *output;
+ dma_addr_t *new_addr;
+ unsigned long flags;
+ u32 active_index;
+ u64 ts = ktime_get_ns();
+ unsigned int i;
+
+ active_index = vfe->ops->wm_get_ping_pong_status(vfe, wm);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Received wm done for unmapped index\n");
+ goto out_unlock;
+ }
+ output = &vfe->line[vfe->wm_output_map[wm]].output;
+
+ if (output->active_buf == active_index) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Active buffer mismatch!\n");
+ goto out_unlock;
+ }
+ output->active_buf = active_index;
+
+ ready_buf = output->buf[!active_index];
+ if (!ready_buf) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Missing ready buf %d %d!\n",
+ !active_index, output->state);
+ goto out_unlock;
+ }
+
+ ready_buf->vb.vb2_buf.timestamp = ts;
+ ready_buf->vb.sequence = output->sequence++;
+
+ /* Get next buffer */
+ output->buf[!active_index] = vfe_buf_get_pending(output);
+ if (!output->buf[!active_index]) {
+ /* No next buffer - set same address */
+ new_addr = ready_buf->addr;
+ vfe_buf_update_wm_on_last(vfe, output);
+ } else {
+ new_addr = output->buf[!active_index]->addr;
+ vfe_buf_update_wm_on_next(vfe, output);
+ }
+
+ if (active_index)
+ for (i = 0; i < output->wm_num; i++)
+ vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i],
+ new_addr[i]);
+ else
+ for (i = 0; i < output->wm_num; i++)
+ vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i],
+ new_addr[i]);
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ if (output->state == VFE_OUTPUT_STOPPING)
+ output->last_buffer = ready_buf;
+ else
+ vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+}
+
+/*
+ * vfe_isr_wm_done - Process composite image done interrupt
+ * @vfe: VFE Device
+ * @comp: Composite image id
+ */
+static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
+ if (vfe->wm_output_map[i] == VFE_LINE_PIX) {
+ vfe_isr_wm_done(vfe, i);
+ break;
+ }
+}
+
+static inline void vfe_isr_reset_ack(struct vfe_device *vfe)
+{
+ complete(&vfe->reset_complete);
+}
+
+static inline void vfe_isr_halt_ack(struct vfe_device *vfe)
+{
+ complete(&vfe->halt_complete);
+ vfe->ops->halt_clear(vfe);
+}
+
+/*
+ * vfe_set_clock_rates - Calculate and set clock rates on VFE module
+ * @vfe: VFE device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_set_clock_rates(struct vfe_device *vfe)
+{
+ struct device *dev = vfe->camss->dev;
+ u32 pixel_clock[MSM_VFE_LINE_NUM];
+ int i, j;
+ int ret;
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
+ ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
+ &pixel_clock[i]);
+ if (ret)
+ pixel_clock[i] = 0;
+ }
+
+ for (i = 0; i < vfe->nclocks; i++) {
+ struct camss_clock *clock = &vfe->clock[i];
+
+ if (!strcmp(clock->name, "vfe0") ||
+ !strcmp(clock->name, "vfe1")) {
+ u64 min_rate = 0;
+ long rate;
+
+ for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) {
+ u32 tmp;
+ u8 bpp;
+
+ if (j == VFE_LINE_PIX) {
+ tmp = pixel_clock[j];
+ } else {
+ struct vfe_line *l = &vfe->line[j];
+
+ bpp = vfe_get_bpp(l->formats,
+ l->nformats,
+ l->fmt[MSM_VFE_PAD_SINK].code);
+ tmp = pixel_clock[j] * bpp / 64;
+ }
+
+ if (min_rate < tmp)
+ min_rate = tmp;
+ }
+
+ camss_add_clock_margin(&min_rate);
+
+ for (j = 0; j < clock->nfreqs; j++)
+ if (min_rate < clock->freq[j])
+ break;
+
+ if (j == clock->nfreqs) {
+ dev_err(dev,
+ "Pixel clock is too high for VFE");
+ return -EINVAL;
+ }
+
+ /* if sensor pixel clock is not available */
+ /* set highest possible VFE clock rate */
+ if (min_rate == 0)
+ j = clock->nfreqs - 1;
+
+ rate = clk_round_rate(clock->clk, clock->freq[j]);
+ if (rate < 0) {
+ dev_err(dev, "clk round rate failed: %ld\n",
+ rate);
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(clock->clk, rate);
+ if (ret < 0) {
+ dev_err(dev, "clk set rate failed: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * vfe_check_clock_rates - Check current clock rates on VFE module
+ * @vfe: VFE device
+ *
+ * Return 0 if current clock rates are suitable for a new pipeline
+ * or a negative error code otherwise
+ */
+static int vfe_check_clock_rates(struct vfe_device *vfe)
+{
+ u32 pixel_clock[MSM_VFE_LINE_NUM];
+ int i, j;
+ int ret;
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
+ ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
+ &pixel_clock[i]);
+ if (ret)
+ pixel_clock[i] = 0;
+ }
+
+ for (i = 0; i < vfe->nclocks; i++) {
+ struct camss_clock *clock = &vfe->clock[i];
+
+ if (!strcmp(clock->name, "vfe0") ||
+ !strcmp(clock->name, "vfe1")) {
+ u64 min_rate = 0;
+ unsigned long rate;
+
+ for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) {
+ u32 tmp;
+ u8 bpp;
+
+ if (j == VFE_LINE_PIX) {
+ tmp = pixel_clock[j];
+ } else {
+ struct vfe_line *l = &vfe->line[j];
+
+ bpp = vfe_get_bpp(l->formats,
+ l->nformats,
+ l->fmt[MSM_VFE_PAD_SINK].code);
+ tmp = pixel_clock[j] * bpp / 64;
+ }
+
+ if (min_rate < tmp)
+ min_rate = tmp;
+ }
+
+ camss_add_clock_margin(&min_rate);
+
+ rate = clk_get_rate(clock->clk);
+ if (rate < min_rate)
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * vfe_get - Power up and reset VFE module
+ * @vfe: VFE Device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_get(struct vfe_device *vfe)
+{
+ int ret;
+
+ mutex_lock(&vfe->power_lock);
+
+ if (vfe->power_count == 0) {
+ ret = camss_pm_domain_on(vfe->camss, vfe->id);
+ if (ret < 0)
+ goto error_pm_domain;
+
+ ret = pm_runtime_get_sync(vfe->camss->dev);
+ if (ret < 0)
+ goto error_pm_runtime_get;
+
+ ret = vfe_set_clock_rates(vfe);
+ if (ret < 0)
+ goto error_clocks;
+
+ ret = camss_enable_clocks(vfe->nclocks, vfe->clock,
+ vfe->camss->dev);
+ if (ret < 0)
+ goto error_clocks;
+
+ ret = vfe_reset(vfe);
+ if (ret < 0)
+ goto error_reset;
+
+ vfe_reset_output_maps(vfe);
+
+ vfe_init_outputs(vfe);
+ } else {
+ ret = vfe_check_clock_rates(vfe);
+ if (ret < 0)
+ goto error_clocks;
+ }
+ vfe->power_count++;
+
+ mutex_unlock(&vfe->power_lock);
+
+ return 0;
+
+error_reset:
+ camss_disable_clocks(vfe->nclocks, vfe->clock);
+
+error_clocks:
+ pm_runtime_put_sync(vfe->camss->dev);
+
+error_pm_runtime_get:
+ camss_pm_domain_off(vfe->camss, vfe->id);
+
+error_pm_domain:
+ mutex_unlock(&vfe->power_lock);
+
+ return ret;
+}
+
+/*
+ * vfe_put - Power down VFE module
+ * @vfe: VFE Device
+ */
+static void vfe_put(struct vfe_device *vfe)
+{
+ mutex_lock(&vfe->power_lock);
+
+ if (vfe->power_count == 0) {
+ dev_err(vfe->camss->dev, "vfe power off on power_count == 0\n");
+ goto exit;
+ } else if (vfe->power_count == 1) {
+ if (vfe->was_streaming) {
+ vfe->was_streaming = 0;
+ vfe_halt(vfe);
+ }
+ camss_disable_clocks(vfe->nclocks, vfe->clock);
+ pm_runtime_put_sync(vfe->camss->dev);
+ camss_pm_domain_off(vfe->camss, vfe->id);
+ }
+
+ vfe->power_count--;
+
+exit:
+ mutex_unlock(&vfe->power_lock);
+}
+
+/*
+ * vfe_queue_buffer - Add empty buffer
+ * @vid: Video device structure
+ * @buf: Buffer to be enqueued
+ *
+ * Add an empty buffer - depending on the current number of buffers it will be
+ * put in pending buffer queue or directly given to the hardware to be filled.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_queue_buffer(struct camss_video *vid,
+ struct camss_buffer *buf)
+{
+ struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output;
+ unsigned long flags;
+
+ output = &line->output;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ vfe_buf_update_wm_on_new(vfe, output, buf);
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+/*
+ * vfe_flush_buffers - Return all vb2 buffers
+ * @vid: Video device structure
+ * @state: vb2 buffer state of the returned buffers
+ *
+ * Return all buffers to vb2. This includes queued pending buffers (still
+ * unused) and any buffers given to the hardware but again still not used.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_flush_buffers(struct camss_video *vid,
+ enum vb2_buffer_state state)
+{
+ struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output;
+ unsigned long flags;
+
+ output = &line->output;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ vfe_buf_flush_pending(output, state);
+
+ if (output->buf[0])
+ vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state);
+
+ if (output->buf[1])
+ vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state);
+
+ if (output->last_buffer) {
+ vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state);
+ output->last_buffer = NULL;
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+/*
+ * vfe_set_power - Power on/off VFE module
+ * @sd: VFE V4L2 subdevice
+ * @on: Requested power state
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct vfe_line *line = v4l2_get_subdevdata(sd);
+ struct vfe_device *vfe = to_vfe(line);
+ int ret;
+
+ if (on) {
+ ret = vfe_get(vfe);
+ if (ret < 0)
+ return ret;
+
+ vfe->ops->hw_version_read(vfe, vfe->camss->dev);
+ } else {
+ vfe_put(vfe);
+ }
+
+ return 0;
+}
+
+/*
+ * vfe_set_stream - Enable/disable streaming on VFE module
+ * @sd: VFE V4L2 subdevice
+ * @enable: Requested streaming state
+ *
+ * Main configuration of VFE module is triggered here.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vfe_line *line = v4l2_get_subdevdata(sd);
+ struct vfe_device *vfe = to_vfe(line);
+ int ret;
+
+ if (enable) {
+ ret = vfe_enable(line);
+ if (ret < 0)
+ dev_err(vfe->camss->dev,
+ "Failed to enable vfe outputs\n");
+ } else {
+ ret = vfe_disable(line);
+ if (ret < 0)
+ dev_err(vfe->camss->dev,
+ "Failed to disable vfe outputs\n");
+ }
+
+ return ret;
+}
+
+/*
+ * __vfe_get_format - Get pointer to format structure
+ * @line: VFE line
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad from which format is requested
+ * @which: TRY or ACTIVE format
+ *
+ * Return pointer to TRY or ACTIVE format structure
+ */
+static struct v4l2_mbus_framefmt *
+__vfe_get_format(struct vfe_line *line,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
+
+ return &line->fmt[pad];
+}
+
+/*
+ * __vfe_get_compose - Get pointer to compose selection structure
+ * @line: VFE line
+ * @cfg: V4L2 subdev pad configuration
+ * @which: TRY or ACTIVE format
+ *
+ * Return pointer to TRY or ACTIVE compose rectangle structure
+ */
+static struct v4l2_rect *
+__vfe_get_compose(struct vfe_line *line,
+ struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_compose(&line->subdev, cfg,
+ MSM_VFE_PAD_SINK);
+
+ return &line->compose;
+}
+
+/*
+ * __vfe_get_crop - Get pointer to crop selection structure
+ * @line: VFE line
+ * @cfg: V4L2 subdev pad configuration
+ * @which: TRY or ACTIVE format
+ *
+ * Return pointer to TRY or ACTIVE crop rectangle structure
+ */
+static struct v4l2_rect *
+__vfe_get_crop(struct vfe_line *line,
+ struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&line->subdev, cfg,
+ MSM_VFE_PAD_SRC);
+
+ return &line->crop;
+}
+
+/*
+ * vfe_try_format - Handle try format by pad subdev method
+ * @line: VFE line
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad on which format is requested
+ * @fmt: pointer to v4l2 format structure
+ * @which: wanted subdev format
+ */
+static void vfe_try_format(struct vfe_line *line,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ unsigned int i;
+ u32 code;
+
+ switch (pad) {
+ case MSM_VFE_PAD_SINK:
+ /* Set format on sink pad */
+
+ for (i = 0; i < line->nformats; i++)
+ if (fmt->code == line->formats[i].code)
+ break;
+
+ /* If not found, use UYVY as default */
+ if (i >= line->nformats)
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ break;
+
+ case MSM_VFE_PAD_SRC:
+ /* Set and return a format same as sink pad */
+ code = fmt->code;
+
+ *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
+
+ fmt->code = vfe_src_pad_code(line, fmt->code, 0, code);
+
+ if (line->id == VFE_LINE_PIX) {
+ struct v4l2_rect *rect;
+
+ rect = __vfe_get_crop(line, cfg, which);
+
+ fmt->width = rect->width;
+ fmt->height = rect->height;
+ }
+
+ break;
+ }
+
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+/*
+ * vfe_try_compose - Handle try compose selection by pad subdev method
+ * @line: VFE line
+ * @cfg: V4L2 subdev pad configuration
+ * @rect: pointer to v4l2 rect structure
+ * @which: wanted subdev format
+ */
+static void vfe_try_compose(struct vfe_line *line,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_rect *rect,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
+
+ if (rect->width > fmt->width)
+ rect->width = fmt->width;
+
+ if (rect->height > fmt->height)
+ rect->height = fmt->height;
+
+ if (fmt->width > rect->width * SCALER_RATIO_MAX)
+ rect->width = (fmt->width + SCALER_RATIO_MAX - 1) /
+ SCALER_RATIO_MAX;
+
+ rect->width &= ~0x1;
+
+ if (fmt->height > rect->height * SCALER_RATIO_MAX)
+ rect->height = (fmt->height + SCALER_RATIO_MAX - 1) /
+ SCALER_RATIO_MAX;
+
+ if (rect->width < 16)
+ rect->width = 16;
+
+ if (rect->height < 4)
+ rect->height = 4;
+}
+
+/*
+ * vfe_try_crop - Handle try crop selection by pad subdev method
+ * @line: VFE line
+ * @cfg: V4L2 subdev pad configuration
+ * @rect: pointer to v4l2 rect structure
+ * @which: wanted subdev format
+ */
+static void vfe_try_crop(struct vfe_line *line,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_rect *rect,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_rect *compose;
+
+ compose = __vfe_get_compose(line, cfg, which);
+
+ if (rect->width > compose->width)
+ rect->width = compose->width;
+
+ if (rect->width + rect->left > compose->width)
+ rect->left = compose->width - rect->width;
+
+ if (rect->height > compose->height)
+ rect->height = compose->height;
+
+ if (rect->height + rect->top > compose->height)
+ rect->top = compose->height - rect->height;
+
+ /* wm in line based mode writes multiple of 16 horizontally */
+ rect->left += (rect->width & 0xf) >> 1;
+ rect->width &= ~0xf;
+
+ if (rect->width < 16) {
+ rect->left = 0;
+ rect->width = 16;
+ }
+
+ if (rect->height < 4) {
+ rect->top = 0;
+ rect->height = 4;
+ }
+}
+
+/*
+ * vfe_enum_mbus_code - Handle pixel format enumeration
+ * @sd: VFE V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ *
+ * return -EINVAL or zero on success
+ */
+static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vfe_line *line = v4l2_get_subdevdata(sd);
+
+ if (code->pad == MSM_VFE_PAD_SINK) {
+ if (code->index >= line->nformats)
+ return -EINVAL;
+
+ code->code = line->formats[code->index].code;
+ } else {
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ sink_fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
+ code->which);
+
+ code->code = vfe_src_pad_code(line, sink_fmt->code,
+ code->index, 0);
+ if (!code->code)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * vfe_enum_frame_size - Handle frame size enumeration
+ * @sd: VFE V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: pointer to v4l2_subdev_frame_size_enum structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int vfe_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vfe_line *line = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ vfe_try_format(line, cfg, fse->pad, &format, fse->which);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ vfe_try_format(line, cfg, fse->pad, &format, fse->which);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * vfe_get_format - Handle get format by pads subdev method
+ * @sd: VFE V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int vfe_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vfe_line *line = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int vfe_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel);
+
+/*
+ * vfe_set_format - Handle set format by pads subdev method
+ * @sd: VFE V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int vfe_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vfe_line *line = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ if (fmt->pad == MSM_VFE_PAD_SINK) {
+ struct v4l2_subdev_selection sel = { 0 };
+ int ret;
+
+ /* Propagate the format from sink to source */
+ format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC,
+ fmt->which);
+
+ *format = fmt->format;
+ vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format,
+ fmt->which);
+
+ if (line->id != VFE_LINE_PIX)
+ return 0;
+
+ /* Reset sink pad compose selection */
+ sel.which = fmt->which;
+ sel.pad = MSM_VFE_PAD_SINK;
+ sel.target = V4L2_SEL_TGT_COMPOSE;
+ sel.r.width = fmt->format.width;
+ sel.r.height = fmt->format.height;
+ ret = vfe_set_selection(sd, cfg, &sel);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * vfe_get_selection - Handle get selection by pads subdev method
+ * @sd: VFE V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @sel: pointer to v4l2 subdev selection structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int vfe_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vfe_line *line = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_format fmt = { 0 };
+ struct v4l2_rect *rect;
+ int ret;
+
+ if (line->id != VFE_LINE_PIX)
+ return -EINVAL;
+
+ if (sel->pad == MSM_VFE_PAD_SINK)
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ fmt.pad = sel->pad;
+ fmt.which = sel->which;
+ ret = vfe_get_format(sd, cfg, &fmt);
+ if (ret < 0)
+ return ret;
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = fmt.format.width;
+ sel->r.height = fmt.format.height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ rect = __vfe_get_compose(line, cfg, sel->which);
+ if (rect == NULL)
+ return -EINVAL;
+
+ sel->r = *rect;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else if (sel->pad == MSM_VFE_PAD_SRC)
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ rect = __vfe_get_compose(line, cfg, sel->which);
+ if (rect == NULL)
+ return -EINVAL;
+
+ sel->r.left = rect->left;
+ sel->r.top = rect->top;
+ sel->r.width = rect->width;
+ sel->r.height = rect->height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ rect = __vfe_get_crop(line, cfg, sel->which);
+ if (rect == NULL)
+ return -EINVAL;
+
+ sel->r = *rect;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * vfe_set_selection - Handle set selection by pads subdev method
+ * @sd: VFE V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @sel: pointer to v4l2 subdev selection structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int vfe_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vfe_line *line = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *rect;
+ int ret;
+
+ if (line->id != VFE_LINE_PIX)
+ return -EINVAL;
+
+ if (sel->target == V4L2_SEL_TGT_COMPOSE &&
+ sel->pad == MSM_VFE_PAD_SINK) {
+ struct v4l2_subdev_selection crop = { 0 };
+
+ rect = __vfe_get_compose(line, cfg, sel->which);
+ if (rect == NULL)
+ return -EINVAL;
+
+ vfe_try_compose(line, cfg, &sel->r, sel->which);
+ *rect = sel->r;
+
+ /* Reset source crop selection */
+ crop.which = sel->which;
+ crop.pad = MSM_VFE_PAD_SRC;
+ crop.target = V4L2_SEL_TGT_CROP;
+ crop.r = *rect;
+ ret = vfe_set_selection(sd, cfg, &crop);
+ } else if (sel->target == V4L2_SEL_TGT_CROP &&
+ sel->pad == MSM_VFE_PAD_SRC) {
+ struct v4l2_subdev_format fmt = { 0 };
+
+ rect = __vfe_get_crop(line, cfg, sel->which);
+ if (rect == NULL)
+ return -EINVAL;
+
+ vfe_try_crop(line, cfg, &sel->r, sel->which);
+ *rect = sel->r;
+
+ /* Reset source pad format width and height */
+ fmt.which = sel->which;
+ fmt.pad = MSM_VFE_PAD_SRC;
+ ret = vfe_get_format(sd, cfg, &fmt);
+ if (ret < 0)
+ return ret;
+
+ fmt.format.width = rect->width;
+ fmt.format.height = rect->height;
+ ret = vfe_set_format(sd, cfg, &fmt);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * vfe_init_formats - Initialize formats on all pads
+ * @sd: VFE V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format = {
+ .pad = MSM_VFE_PAD_SINK,
+ .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .width = 1920,
+ .height = 1080
+ }
+ };
+
+ return vfe_set_format(sd, fh ? fh->pad : NULL, &format);
+}
+
+/*
+ * msm_vfe_subdev_init - Initialize VFE device structure and resources
+ * @vfe: VFE device
+ * @res: VFE module resources table
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
+ const struct resources *res, u8 id)
+{
+ struct device *dev = camss->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *r;
+ int i, j;
+ int ret;
+
+ vfe->isr_ops.reset_ack = vfe_isr_reset_ack;
+ vfe->isr_ops.halt_ack = vfe_isr_halt_ack;
+ vfe->isr_ops.reg_update = vfe_isr_reg_update;
+ vfe->isr_ops.sof = vfe_isr_sof;
+ vfe->isr_ops.comp_done = vfe_isr_comp_done;
+ vfe->isr_ops.wm_done = vfe_isr_wm_done;
+
+ if (camss->version == CAMSS_8x16)
+ vfe->ops = &vfe_ops_4_1;
+ else if (camss->version == CAMSS_8x96)
+ vfe->ops = &vfe_ops_4_7;
+ else
+ return -EINVAL;
+
+ /* Memory */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
+ vfe->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(vfe->base)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(vfe->base);
+ }
+
+ /* Interrupt */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ res->interrupt[0]);
+ if (!r) {
+ dev_err(dev, "missing IRQ\n");
+ return -EINVAL;
+ }
+
+ vfe->irq = r->start;
+ snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d",
+ dev_name(dev), MSM_VFE_NAME, vfe->id);
+ ret = devm_request_irq(dev, vfe->irq, vfe->ops->isr,
+ IRQF_TRIGGER_RISING, vfe->irq_name, vfe);
+ if (ret < 0) {
+ dev_err(dev, "request_irq failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Clocks */
+
+ vfe->nclocks = 0;
+ while (res->clock[vfe->nclocks])
+ vfe->nclocks++;
+
+ vfe->clock = devm_kcalloc(dev, vfe->nclocks, sizeof(*vfe->clock),
+ GFP_KERNEL);
+ if (!vfe->clock)
+ return -ENOMEM;
+
+ for (i = 0; i < vfe->nclocks; i++) {
+ struct camss_clock *clock = &vfe->clock[i];
+
+ clock->clk = devm_clk_get(dev, res->clock[i]);
+ if (IS_ERR(clock->clk))
+ return PTR_ERR(clock->clk);
+
+ clock->name = res->clock[i];
+
+ clock->nfreqs = 0;
+ while (res->clock_rate[i][clock->nfreqs])
+ clock->nfreqs++;
+
+ if (!clock->nfreqs) {
+ clock->freq = NULL;
+ continue;
+ }
+
+ clock->freq = devm_kcalloc(dev,
+ clock->nfreqs,
+ sizeof(*clock->freq),
+ GFP_KERNEL);
+ if (!clock->freq)
+ return -ENOMEM;
+
+ for (j = 0; j < clock->nfreqs; j++)
+ clock->freq[j] = res->clock_rate[i][j];
+ }
+
+ mutex_init(&vfe->power_lock);
+ vfe->power_count = 0;
+
+ mutex_init(&vfe->stream_lock);
+ vfe->stream_count = 0;
+
+ spin_lock_init(&vfe->output_lock);
+
+ vfe->camss = camss;
+ vfe->id = id;
+ vfe->reg_update = 0;
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
+ struct vfe_line *l = &vfe->line[i];
+
+ l->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ l->video_out.camss = camss;
+ l->id = i;
+ init_completion(&l->output.sof);
+ init_completion(&l->output.reg_update);
+
+ if (camss->version == CAMSS_8x16) {
+ if (i == VFE_LINE_PIX) {
+ l->formats = formats_pix_8x16;
+ l->nformats = ARRAY_SIZE(formats_pix_8x16);
+ } else {
+ l->formats = formats_rdi_8x16;
+ l->nformats = ARRAY_SIZE(formats_rdi_8x16);
+ }
+ } else if (camss->version == CAMSS_8x96) {
+ if (i == VFE_LINE_PIX) {
+ l->formats = formats_pix_8x96;
+ l->nformats = ARRAY_SIZE(formats_pix_8x96);
+ } else {
+ l->formats = formats_rdi_8x96;
+ l->nformats = ARRAY_SIZE(formats_rdi_8x96);
+ }
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ init_completion(&vfe->reset_complete);
+ init_completion(&vfe->halt_complete);
+
+ return 0;
+}
+
+/*
+ * msm_vfe_get_vfe_id - Get VFE HW module id
+ * @entity: Pointer to VFE media entity structure
+ * @id: Return CSID HW module id here
+ */
+void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id)
+{
+ struct v4l2_subdev *sd;
+ struct vfe_line *line;
+ struct vfe_device *vfe;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ line = v4l2_get_subdevdata(sd);
+ vfe = to_vfe(line);
+
+ *id = vfe->id;
+}
+
+/*
+ * msm_vfe_get_vfe_line_id - Get VFE line id by media entity
+ * @entity: Pointer to VFE media entity structure
+ * @id: Return VFE line id here
+ */
+void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id)
+{
+ struct v4l2_subdev *sd;
+ struct vfe_line *line;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ line = v4l2_get_subdevdata(sd);
+
+ *id = line->id;
+}
+
+/*
+ * vfe_link_setup - Setup VFE connections
+ * @entity: Pointer to media entity structure
+ * @local: Pointer to local pad
+ * @remote: Pointer to remote pad
+ * @flags: Link flags
+ *
+ * Return 0 on success
+ */
+static int vfe_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ if (media_entity_remote_pad(local))
+ return -EBUSY;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops vfe_core_ops = {
+ .s_power = vfe_set_power,
+};
+
+static const struct v4l2_subdev_video_ops vfe_video_ops = {
+ .s_stream = vfe_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops vfe_pad_ops = {
+ .enum_mbus_code = vfe_enum_mbus_code,
+ .enum_frame_size = vfe_enum_frame_size,
+ .get_fmt = vfe_get_format,
+ .set_fmt = vfe_set_format,
+ .get_selection = vfe_get_selection,
+ .set_selection = vfe_set_selection,
+};
+
+static const struct v4l2_subdev_ops vfe_v4l2_ops = {
+ .core = &vfe_core_ops,
+ .video = &vfe_video_ops,
+ .pad = &vfe_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = {
+ .open = vfe_init_formats,
+};
+
+static const struct media_entity_operations vfe_media_ops = {
+ .link_setup = vfe_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct camss_video_ops camss_vfe_video_ops = {
+ .queue_buffer = vfe_queue_buffer,
+ .flush_buffers = vfe_flush_buffers,
+};
+
+void msm_vfe_stop_streaming(struct vfe_device *vfe)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->line); i++)
+ msm_video_stop_streaming(&vfe->line[i].video_out);
+}
+
+/*
+ * msm_vfe_register_entities - Register subdev node for VFE module
+ * @vfe: VFE device
+ * @v4l2_dev: V4L2 device
+ *
+ * Initialize and register a subdev node for the VFE module. Then
+ * call msm_video_register() to register the video device node which
+ * will be connected to this subdev node. Then actually create the
+ * media link between them.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_vfe_register_entities(struct vfe_device *vfe,
+ struct v4l2_device *v4l2_dev)
+{
+ struct device *dev = vfe->camss->dev;
+ struct v4l2_subdev *sd;
+ struct media_pad *pads;
+ struct camss_video *video_out;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
+ char name[32];
+
+ sd = &vfe->line[i].subdev;
+ pads = vfe->line[i].pads;
+ video_out = &vfe->line[i].video_out;
+
+ v4l2_subdev_init(sd, &vfe_v4l2_ops);
+ sd->internal_ops = &vfe_v4l2_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ if (i == VFE_LINE_PIX)
+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s",
+ MSM_VFE_NAME, vfe->id, "pix");
+ else
+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d",
+ MSM_VFE_NAME, vfe->id, "rdi", i);
+
+ v4l2_set_subdevdata(sd, &vfe->line[i]);
+
+ ret = vfe_init_formats(sd, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init format: %d\n", ret);
+ goto error_init;
+ }
+
+ pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ sd->entity.ops = &vfe_media_ops;
+ ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM,
+ pads);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init media entity: %d\n", ret);
+ goto error_init;
+ }
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdev: %d\n", ret);
+ goto error_reg_subdev;
+ }
+
+ video_out->ops = &camss_vfe_video_ops;
+ video_out->bpl_alignment = 8;
+ video_out->line_based = 0;
+ if (i == VFE_LINE_PIX) {
+ video_out->bpl_alignment = 16;
+ video_out->line_based = 1;
+ }
+ snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d",
+ MSM_VFE_NAME, vfe->id, "video", i);
+ ret = msm_video_register(video_out, v4l2_dev, name,
+ i == VFE_LINE_PIX ? 1 : 0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register video node: %d\n",
+ ret);
+ goto error_reg_video;
+ }
+
+ ret = media_create_pad_link(
+ &sd->entity, MSM_VFE_PAD_SRC,
+ &video_out->vdev.entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret < 0) {
+ dev_err(dev, "Failed to link %s->%s entities: %d\n",
+ sd->entity.name, video_out->vdev.entity.name,
+ ret);
+ goto error_link;
+ }
+ }
+
+ return 0;
+
+error_link:
+ msm_video_unregister(video_out);
+
+error_reg_video:
+ v4l2_device_unregister_subdev(sd);
+
+error_reg_subdev:
+ media_entity_cleanup(&sd->entity);
+
+error_init:
+ for (i--; i >= 0; i--) {
+ sd = &vfe->line[i].subdev;
+ video_out = &vfe->line[i].video_out;
+
+ msm_video_unregister(video_out);
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ }
+
+ return ret;
+}
+
+/*
+ * msm_vfe_unregister_entities - Unregister VFE module subdev node
+ * @vfe: VFE device
+ */
+void msm_vfe_unregister_entities(struct vfe_device *vfe)
+{
+ int i;
+
+ mutex_destroy(&vfe->power_lock);
+ mutex_destroy(&vfe->stream_lock);
+
+ for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
+ struct v4l2_subdev *sd = &vfe->line[i].subdev;
+ struct camss_video *video_out = &vfe->line[i].video_out;
+
+ msm_video_unregister(video_out);
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ }
+}
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
new file mode 100644
index 000000000..0d10071ae
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss-vfe.h
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#ifndef QC_MSM_CAMSS_VFE_H
+#define QC_MSM_CAMSS_VFE_H
+
+#include <linux/clk.h>
+#include <linux/spinlock_types.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "camss-video.h"
+
+#define MSM_VFE_PAD_SINK 0
+#define MSM_VFE_PAD_SRC 1
+#define MSM_VFE_PADS_NUM 2
+
+#define MSM_VFE_LINE_NUM 4
+#define MSM_VFE_IMAGE_MASTERS_NUM 7
+#define MSM_VFE_COMPOSITE_IRQ_NUM 4
+
+enum vfe_output_state {
+ VFE_OUTPUT_OFF,
+ VFE_OUTPUT_RESERVED,
+ VFE_OUTPUT_SINGLE,
+ VFE_OUTPUT_CONTINUOUS,
+ VFE_OUTPUT_IDLE,
+ VFE_OUTPUT_STOPPING
+};
+
+enum vfe_line_id {
+ VFE_LINE_NONE = -1,
+ VFE_LINE_RDI0 = 0,
+ VFE_LINE_RDI1 = 1,
+ VFE_LINE_RDI2 = 2,
+ VFE_LINE_PIX = 3
+};
+
+struct vfe_output {
+ u8 wm_num;
+ u8 wm_idx[3];
+
+ int active_buf;
+ struct camss_buffer *buf[2];
+ struct camss_buffer *last_buffer;
+ struct list_head pending_bufs;
+
+ unsigned int drop_update_idx;
+
+ enum vfe_output_state state;
+ unsigned int sequence;
+ int wait_sof;
+ int wait_reg_update;
+ struct completion sof;
+ struct completion reg_update;
+};
+
+struct vfe_line {
+ enum vfe_line_id id;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_VFE_PADS_NUM];
+ struct v4l2_mbus_framefmt fmt[MSM_VFE_PADS_NUM];
+ struct v4l2_rect compose;
+ struct v4l2_rect crop;
+ struct camss_video video_out;
+ struct vfe_output output;
+ const struct vfe_format *formats;
+ unsigned int nformats;
+};
+
+struct vfe_device;
+
+struct vfe_hw_ops {
+ void (*hw_version_read)(struct vfe_device *vfe, struct device *dev);
+ u16 (*get_ub_size)(u8 vfe_id);
+ void (*global_reset)(struct vfe_device *vfe);
+ void (*halt_request)(struct vfe_device *vfe);
+ void (*halt_clear)(struct vfe_device *vfe);
+ void (*wm_enable)(struct vfe_device *vfe, u8 wm, u8 enable);
+ void (*wm_frame_based)(struct vfe_device *vfe, u8 wm, u8 enable);
+ void (*wm_line_based)(struct vfe_device *vfe, u32 wm,
+ struct v4l2_pix_format_mplane *pix,
+ u8 plane, u32 enable);
+ void (*wm_set_framedrop_period)(struct vfe_device *vfe, u8 wm, u8 per);
+ void (*wm_set_framedrop_pattern)(struct vfe_device *vfe, u8 wm,
+ u32 pattern);
+ void (*wm_set_ub_cfg)(struct vfe_device *vfe, u8 wm, u16 offset,
+ u16 depth);
+ void (*bus_reload_wm)(struct vfe_device *vfe, u8 wm);
+ void (*wm_set_ping_addr)(struct vfe_device *vfe, u8 wm, u32 addr);
+ void (*wm_set_pong_addr)(struct vfe_device *vfe, u8 wm, u32 addr);
+ int (*wm_get_ping_pong_status)(struct vfe_device *vfe, u8 wm);
+ void (*bus_enable_wr_if)(struct vfe_device *vfe, u8 enable);
+ void (*bus_connect_wm_to_rdi)(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id);
+ void (*wm_set_subsample)(struct vfe_device *vfe, u8 wm);
+ void (*bus_disconnect_wm_from_rdi)(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id);
+ void (*set_xbar_cfg)(struct vfe_device *vfe, struct vfe_output *output,
+ u8 enable);
+ void (*set_rdi_cid)(struct vfe_device *vfe, enum vfe_line_id id,
+ u8 cid);
+ void (*set_realign_cfg)(struct vfe_device *vfe, struct vfe_line *line,
+ u8 enable);
+ void (*reg_update)(struct vfe_device *vfe, enum vfe_line_id line_id);
+ void (*reg_update_clear)(struct vfe_device *vfe,
+ enum vfe_line_id line_id);
+ void (*enable_irq_wm_line)(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id line_id, u8 enable);
+ void (*enable_irq_pix_line)(struct vfe_device *vfe, u8 comp,
+ enum vfe_line_id line_id, u8 enable);
+ void (*enable_irq_common)(struct vfe_device *vfe);
+ void (*set_demux_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_scale_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_crop_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_clamp_cfg)(struct vfe_device *vfe);
+ void (*set_qos)(struct vfe_device *vfe);
+ void (*set_ds)(struct vfe_device *vfe);
+ void (*set_cgc_override)(struct vfe_device *vfe, u8 wm, u8 enable);
+ void (*set_camif_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_camif_cmd)(struct vfe_device *vfe, u8 enable);
+ void (*set_module_cfg)(struct vfe_device *vfe, u8 enable);
+ int (*camif_wait_for_stop)(struct vfe_device *vfe, struct device *dev);
+ void (*isr_read)(struct vfe_device *vfe, u32 *value0, u32 *value1);
+ void (*violation_read)(struct vfe_device *vfe);
+ irqreturn_t (*isr)(int irq, void *dev);
+};
+
+struct vfe_isr_ops {
+ void (*reset_ack)(struct vfe_device *vfe);
+ void (*halt_ack)(struct vfe_device *vfe);
+ void (*reg_update)(struct vfe_device *vfe, enum vfe_line_id line_id);
+ void (*sof)(struct vfe_device *vfe, enum vfe_line_id line_id);
+ void (*comp_done)(struct vfe_device *vfe, u8 comp);
+ void (*wm_done)(struct vfe_device *vfe, u8 wm);
+};
+
+struct vfe_device {
+ struct camss *camss;
+ u8 id;
+ void __iomem *base;
+ u32 irq;
+ char irq_name[30];
+ struct camss_clock *clock;
+ int nclocks;
+ struct completion reset_complete;
+ struct completion halt_complete;
+ struct mutex power_lock;
+ int power_count;
+ struct mutex stream_lock;
+ int stream_count;
+ spinlock_t output_lock;
+ enum vfe_line_id wm_output_map[MSM_VFE_IMAGE_MASTERS_NUM];
+ struct vfe_line line[MSM_VFE_LINE_NUM];
+ u32 reg_update;
+ u8 was_streaming;
+ const struct vfe_hw_ops *ops;
+ struct vfe_isr_ops isr_ops;
+};
+
+struct resources;
+
+int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
+ const struct resources *res, u8 id);
+
+int msm_vfe_register_entities(struct vfe_device *vfe,
+ struct v4l2_device *v4l2_dev);
+
+void msm_vfe_unregister_entities(struct vfe_device *vfe);
+
+void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id);
+void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id);
+
+void msm_vfe_stop_streaming(struct vfe_device *vfe);
+
+extern const struct vfe_hw_ops vfe_ops_4_1;
+extern const struct vfe_hw_ops vfe_ops_4_7;
+
+#endif /* QC_MSM_CAMSS_VFE_H */
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
new file mode 100644
index 000000000..e81ebeb05
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-video.c
+ *
+ * Qualcomm MSM Camera Subsystem - V4L2 device node
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#include <linux/slab.h>
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "camss-video.h"
+#include "camss.h"
+
+struct fract {
+ u8 numerator;
+ u8 denominator;
+};
+
+/*
+ * struct camss_format_info - ISP media bus format information
+ * @code: V4L2 media bus format code
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @planes: Number of planes
+ * @hsub: Horizontal subsampling (for each plane)
+ * @vsub: Vertical subsampling (for each plane)
+ * @bpp: Bits per pixel when stored in memory (for each plane)
+ */
+struct camss_format_info {
+ u32 code;
+ u32 pixelformat;
+ u8 planes;
+ struct fract hsub[3];
+ struct fract vsub[3];
+ unsigned int bpp[3];
+};
+
+static const struct camss_format_info formats_rdi_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+};
+
+static const struct camss_format_info formats_rdi_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_PIX_FMT_SBGGR10, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, V4L2_PIX_FMT_SBGGR14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, V4L2_PIX_FMT_SGBRG14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, V4L2_PIX_FMT_SGRBG14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, V4L2_PIX_FMT_SRGGB14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, V4L2_PIX_FMT_Y10, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+};
+
+static const struct camss_format_info formats_pix_8x16[] = {
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+};
+
+static const struct camss_format_info formats_pix_8x96[] = {
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+};
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static int video_find_format(u32 code, u32 pixelformat,
+ const struct camss_format_info *formats,
+ unsigned int nformats)
+{
+ int i;
+
+ for (i = 0; i < nformats; i++) {
+ if (formats[i].code == code &&
+ formats[i].pixelformat == pixelformat)
+ return i;
+ }
+
+ for (i = 0; i < nformats; i++)
+ if (formats[i].code == code)
+ return i;
+
+ WARN_ON(1);
+
+ return -EINVAL;
+}
+
+/*
+ * video_mbus_to_pix_mp - Convert v4l2_mbus_framefmt to v4l2_pix_format_mplane
+ * @mbus: v4l2_mbus_framefmt format (input)
+ * @pix: v4l2_pix_format_mplane format (output)
+ * @f: a pointer to formats array element to be used for the conversion
+ * @alignment: bytesperline alignment value
+ *
+ * Fill the output pix structure with information from the input mbus format.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int video_mbus_to_pix_mp(const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format_mplane *pix,
+ const struct camss_format_info *f,
+ unsigned int alignment)
+{
+ unsigned int i;
+ u32 bytesperline;
+
+ memset(pix, 0, sizeof(*pix));
+ v4l2_fill_pix_format_mplane(pix, mbus);
+ pix->pixelformat = f->pixelformat;
+ pix->num_planes = f->planes;
+ for (i = 0; i < pix->num_planes; i++) {
+ bytesperline = pix->width / f->hsub[i].numerator *
+ f->hsub[i].denominator * f->bpp[i] / 8;
+ bytesperline = ALIGN(bytesperline, alignment);
+ pix->plane_fmt[i].bytesperline = bytesperline;
+ pix->plane_fmt[i].sizeimage = pix->height /
+ f->vsub[i].numerator * f->vsub[i].denominator *
+ bytesperline;
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev *video_remote_subdev(struct camss_video *video,
+ u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(&video->pad);
+
+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int video_get_subdev_format(struct camss_video *video,
+ struct v4l2_format *format)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ u32 pad;
+ int ret;
+
+ subdev = video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EPIPE;
+
+ fmt.pad = pad;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ ret = video_find_format(fmt.format.code,
+ format->fmt.pix_mp.pixelformat,
+ video->formats, video->nformats);
+ if (ret < 0)
+ return ret;
+
+ format->type = video->type;
+
+ return video_mbus_to_pix_mp(&fmt.format, &format->fmt.pix_mp,
+ &video->formats[ret], video->bpl_alignment);
+}
+
+/* -----------------------------------------------------------------------------
+ * Video queue operations
+ */
+
+static int video_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct camss_video *video = vb2_get_drv_priv(q);
+ const struct v4l2_pix_format_mplane *format =
+ &video->active_fmt.fmt.pix_mp;
+ unsigned int i;
+
+ if (*num_planes) {
+ if (*num_planes != format->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *num_planes; i++)
+ if (sizes[i] < format->plane_fmt[i].sizeimage)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *num_planes = format->num_planes;
+
+ for (i = 0; i < *num_planes; i++)
+ sizes[i] = format->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static int video_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer,
+ vb);
+ const struct v4l2_pix_format_mplane *format =
+ &video->active_fmt.fmt.pix_mp;
+ struct sg_table *sgt;
+ unsigned int i;
+
+ for (i = 0; i < format->num_planes; i++) {
+ sgt = vb2_dma_sg_plane_desc(vb, i);
+ if (!sgt)
+ return -EFAULT;
+
+ buffer->addr[i] = sg_dma_address(sgt->sgl);
+ }
+
+ if (format->pixelformat == V4L2_PIX_FMT_NV12 ||
+ format->pixelformat == V4L2_PIX_FMT_NV21 ||
+ format->pixelformat == V4L2_PIX_FMT_NV16 ||
+ format->pixelformat == V4L2_PIX_FMT_NV61)
+ buffer->addr[1] = buffer->addr[0] +
+ format->plane_fmt[0].bytesperline *
+ format->height;
+
+ return 0;
+}
+
+static int video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ const struct v4l2_pix_format_mplane *format =
+ &video->active_fmt.fmt.pix_mp;
+ unsigned int i;
+
+ for (i = 0; i < format->num_planes; i++) {
+ if (format->plane_fmt[i].sizeimage > vb2_plane_size(vb, i))
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, i, format->plane_fmt[i].sizeimage);
+ }
+
+ vbuf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static void video_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer,
+ vb);
+
+ video->ops->queue_buffer(video, buffer);
+}
+
+static int video_check_format(struct camss_video *video)
+{
+ struct v4l2_pix_format_mplane *pix = &video->active_fmt.fmt.pix_mp;
+ struct v4l2_format format;
+ struct v4l2_pix_format_mplane *sd_pix = &format.fmt.pix_mp;
+ int ret;
+
+ sd_pix->pixelformat = pix->pixelformat;
+ ret = video_get_subdev_format(video, &format);
+ if (ret < 0)
+ return ret;
+
+ if (pix->pixelformat != sd_pix->pixelformat ||
+ pix->height != sd_pix->height ||
+ pix->width != sd_pix->width ||
+ pix->num_planes != sd_pix->num_planes ||
+ pix->field != format.fmt.pix_mp.field)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct camss_video *video = vb2_get_drv_priv(q);
+ struct video_device *vdev = &video->vdev;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ ret = media_pipeline_start(&vdev->entity, &video->pipe);
+ if (ret < 0)
+ return ret;
+
+ ret = video_check_format(video);
+ if (ret < 0)
+ goto error;
+
+ entity = &vdev->entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_pipeline_stop(&vdev->entity);
+
+ video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void video_stop_streaming(struct vb2_queue *q)
+{
+ struct camss_video *video = vb2_get_drv_priv(q);
+ struct video_device *vdev = &video->vdev;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+
+ entity = &vdev->entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ v4l2_subdev_call(subdev, video, s_stream, 0);
+ }
+
+ media_pipeline_stop(&vdev->entity);
+
+ video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops msm_video_vb2_q_ops = {
+ .queue_setup = video_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_init = video_buf_init,
+ .buf_prepare = video_buf_prepare,
+ .buf_queue = video_buf_queue,
+ .start_streaming = video_start_streaming,
+ .stop_streaming = video_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int video_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ strlcpy(cap->driver, "qcom-camss", sizeof(cap->driver));
+ strlcpy(cap->card, "Qualcomm Camera Subsystem", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(video->camss->dev));
+
+ return 0;
+}
+
+static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct camss_video *video = video_drvdata(file);
+ int i, j, k;
+
+ if (f->type != video->type)
+ return -EINVAL;
+
+ if (f->index >= video->nformats)
+ return -EINVAL;
+
+ /* find index "i" of "k"th unique pixelformat in formats array */
+ k = -1;
+ for (i = 0; i < video->nformats; i++) {
+ for (j = 0; j < i; j++) {
+ if (video->formats[i].pixelformat ==
+ video->formats[j].pixelformat)
+ break;
+ }
+
+ if (j == i)
+ k++;
+
+ if (k == f->index)
+ break;
+ }
+
+ if (k < f->index)
+ return -EINVAL;
+
+ f->pixelformat = video->formats[i].pixelformat;
+
+ return 0;
+}
+
+static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ *f = video->active_fmt;
+
+ return 0;
+}
+
+static int __video_try_fmt(struct camss_video *video, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp;
+ const struct camss_format_info *fi;
+ struct v4l2_plane_pix_format *p;
+ u32 bytesperline[3] = { 0 };
+ u32 sizeimage[3] = { 0 };
+ u32 width, height;
+ u32 bpl, lines;
+ int i, j;
+
+ pix_mp = &f->fmt.pix_mp;
+
+ if (video->line_based)
+ for (i = 0; i < pix_mp->num_planes && i < 3; i++) {
+ p = &pix_mp->plane_fmt[i];
+ bytesperline[i] = clamp_t(u32, p->bytesperline,
+ 1, 65528);
+ sizeimage[i] = clamp_t(u32, p->sizeimage,
+ bytesperline[i],
+ bytesperline[i] * 4096);
+ }
+
+ for (j = 0; j < video->nformats; j++)
+ if (pix_mp->pixelformat == video->formats[j].pixelformat)
+ break;
+
+ if (j == video->nformats)
+ j = 0; /* default format */
+
+ fi = &video->formats[j];
+ width = pix_mp->width;
+ height = pix_mp->height;
+
+ memset(pix_mp, 0, sizeof(*pix_mp));
+
+ pix_mp->pixelformat = fi->pixelformat;
+ pix_mp->width = clamp_t(u32, width, 1, 8191);
+ pix_mp->height = clamp_t(u32, height, 1, 8191);
+ pix_mp->num_planes = fi->planes;
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ bpl = pix_mp->width / fi->hsub[i].numerator *
+ fi->hsub[i].denominator * fi->bpp[i] / 8;
+ bpl = ALIGN(bpl, video->bpl_alignment);
+ pix_mp->plane_fmt[i].bytesperline = bpl;
+ pix_mp->plane_fmt[i].sizeimage = pix_mp->height /
+ fi->vsub[i].numerator * fi->vsub[i].denominator * bpl;
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
+ pix_mp->flags = 0;
+ pix_mp->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix_mp->colorspace);
+ pix_mp->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ pix_mp->colorspace, pix_mp->ycbcr_enc);
+ pix_mp->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix_mp->colorspace);
+
+ if (video->line_based)
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ p = &pix_mp->plane_fmt[i];
+ p->bytesperline = clamp_t(u32, p->bytesperline,
+ 1, 65528);
+ p->sizeimage = clamp_t(u32, p->sizeimage,
+ p->bytesperline,
+ p->bytesperline * 4096);
+ lines = p->sizeimage / p->bytesperline;
+
+ if (p->bytesperline < bytesperline[i])
+ p->bytesperline = ALIGN(bytesperline[i], 8);
+
+ if (p->sizeimage < p->bytesperline * lines)
+ p->sizeimage = p->bytesperline * lines;
+
+ if (p->sizeimage < sizeimage[i])
+ p->sizeimage = sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int video_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ return __video_try_fmt(video, f);
+}
+
+static int video_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct camss_video *video = video_drvdata(file);
+ int ret;
+
+ if (vb2_is_busy(&video->vb2_q))
+ return -EBUSY;
+
+ ret = __video_try_fmt(video, f);
+ if (ret < 0)
+ return ret;
+
+ video->active_fmt = *f;
+
+ return 0;
+}
+
+static int video_enum_input(struct file *file, void *fh,
+ struct v4l2_input *input)
+{
+ if (input->index > 0)
+ return -EINVAL;
+
+ strlcpy(input->name, "camera", sizeof(input->name));
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int video_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ *input = 0;
+
+ return 0;
+}
+
+static int video_s_input(struct file *file, void *fh, unsigned int input)
+{
+ return input == 0 ? 0 : -EINVAL;
+}
+
+static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = {
+ .vidioc_querycap = video_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = video_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = video_g_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = video_s_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = video_try_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_input = video_enum_input,
+ .vidioc_g_input = video_g_input,
+ .vidioc_s_input = video_s_input,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static int video_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct camss_video *video = video_drvdata(file);
+ struct v4l2_fh *vfh;
+ int ret;
+
+ mutex_lock(&video->lock);
+
+ vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
+ if (vfh == NULL) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+
+ v4l2_fh_init(vfh, vdev);
+ v4l2_fh_add(vfh);
+
+ file->private_data = vfh;
+
+ ret = v4l2_pipeline_pm_use(&vdev->entity, 1);
+ if (ret < 0) {
+ dev_err(video->camss->dev, "Failed to power up pipeline: %d\n",
+ ret);
+ goto error_pm_use;
+ }
+
+ mutex_unlock(&video->lock);
+
+ return 0;
+
+error_pm_use:
+ v4l2_fh_release(file);
+
+error_alloc:
+ mutex_unlock(&video->lock);
+
+ return ret;
+}
+
+static int video_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ vb2_fop_release(file);
+
+ v4l2_pipeline_pm_use(&vdev->entity, 0);
+
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct v4l2_file_operations msm_vid_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = video_open,
+ .release = video_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+/* -----------------------------------------------------------------------------
+ * CAMSS video core
+ */
+
+static void msm_video_release(struct video_device *vdev)
+{
+ struct camss_video *video = video_get_drvdata(vdev);
+
+ media_entity_cleanup(&vdev->entity);
+
+ mutex_destroy(&video->q_lock);
+ mutex_destroy(&video->lock);
+
+ if (atomic_dec_and_test(&video->camss->ref_count))
+ camss_delete(video->camss);
+}
+
+/*
+ * msm_video_init_format - Helper function to initialize format
+ * @video: struct camss_video
+ *
+ * Initialize pad format with default value.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int msm_video_init_format(struct camss_video *video)
+{
+ int ret;
+ struct v4l2_format format = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ .fmt.pix_mp = {
+ .width = 1920,
+ .height = 1080,
+ .pixelformat = video->formats[0].pixelformat,
+ },
+ };
+
+ ret = __video_try_fmt(video, &format);
+ if (ret < 0)
+ return ret;
+
+ video->active_fmt = format;
+
+ return 0;
+}
+
+/*
+ * msm_video_register - Register a video device node
+ * @video: struct camss_video
+ * @v4l2_dev: V4L2 device
+ * @name: name to be used for the video device node
+ *
+ * Initialize and register a video device node to a V4L2 device. Also
+ * initialize the vb2 queue.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+
+int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
+ const char *name, int is_pix)
+{
+ struct media_pad *pad = &video->pad;
+ struct video_device *vdev;
+ struct vb2_queue *q;
+ int ret;
+
+ vdev = &video->vdev;
+
+ mutex_init(&video->q_lock);
+
+ q = &video->vb2_q;
+ q->drv_priv = video;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->ops = &msm_video_vb2_q_ops;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->buf_struct_size = sizeof(struct camss_buffer);
+ q->dev = video->camss->dev;
+ q->lock = &video->q_lock;
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(v4l2_dev->dev, "Failed to init vb2 queue: %d\n", ret);
+ goto error_vb2_init;
+ }
+
+ pad->flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, pad);
+ if (ret < 0) {
+ dev_err(v4l2_dev->dev, "Failed to init video entity: %d\n",
+ ret);
+ goto error_media_init;
+ }
+
+ mutex_init(&video->lock);
+
+ if (video->camss->version == CAMSS_8x16) {
+ if (is_pix) {
+ video->formats = formats_pix_8x16;
+ video->nformats = ARRAY_SIZE(formats_pix_8x16);
+ } else {
+ video->formats = formats_rdi_8x16;
+ video->nformats = ARRAY_SIZE(formats_rdi_8x16);
+ }
+ } else if (video->camss->version == CAMSS_8x96) {
+ if (is_pix) {
+ video->formats = formats_pix_8x96;
+ video->nformats = ARRAY_SIZE(formats_pix_8x96);
+ } else {
+ video->formats = formats_rdi_8x96;
+ video->nformats = ARRAY_SIZE(formats_rdi_8x96);
+ }
+ } else {
+ ret = -EINVAL;
+ goto error_video_register;
+ }
+
+ ret = msm_video_init_format(video);
+ if (ret < 0) {
+ dev_err(v4l2_dev->dev, "Failed to init format: %d\n", ret);
+ goto error_video_register;
+ }
+
+ vdev->fops = &msm_vid_fops;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ vdev->ioctl_ops = &msm_vid_ioctl_ops;
+ vdev->release = msm_video_release;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = &video->vb2_q;
+ vdev->lock = &video->lock;
+ strlcpy(vdev->name, name, sizeof(vdev->name));
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(v4l2_dev->dev, "Failed to register video device: %d\n",
+ ret);
+ goto error_video_register;
+ }
+
+ video_set_drvdata(vdev, video);
+ atomic_inc(&video->camss->ref_count);
+
+ return 0;
+
+error_video_register:
+ media_entity_cleanup(&vdev->entity);
+ mutex_destroy(&video->lock);
+error_media_init:
+ vb2_queue_release(&video->vb2_q);
+error_vb2_init:
+ mutex_destroy(&video->q_lock);
+
+ return ret;
+}
+
+void msm_video_stop_streaming(struct camss_video *video)
+{
+ if (vb2_is_streaming(&video->vb2_q))
+ vb2_queue_release(&video->vb2_q);
+}
+
+void msm_video_unregister(struct camss_video *video)
+{
+ atomic_inc(&video->camss->ref_count);
+ video_unregister_device(&video->vdev);
+ atomic_dec(&video->camss->ref_count);
+}
diff --git a/drivers/media/platform/qcom/camss/camss-video.h b/drivers/media/platform/qcom/camss/camss-video.h
new file mode 100644
index 000000000..aa35e8cc6
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-video.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss-video.h
+ *
+ * Qualcomm MSM Camera Subsystem - V4L2 device node
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#ifndef QC_MSM_CAMSS_VIDEO_H
+#define QC_MSM_CAMSS_VIDEO_H
+
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-v4l2.h>
+
+struct camss_buffer {
+ struct vb2_v4l2_buffer vb;
+ dma_addr_t addr[3];
+ struct list_head queue;
+};
+
+struct camss_video;
+
+struct camss_video_ops {
+ int (*queue_buffer)(struct camss_video *vid, struct camss_buffer *buf);
+ int (*flush_buffers)(struct camss_video *vid,
+ enum vb2_buffer_state state);
+};
+
+struct camss_format_info;
+
+struct camss_video {
+ struct camss *camss;
+ struct vb2_queue vb2_q;
+ struct video_device vdev;
+ struct media_pad pad;
+ struct v4l2_format active_fmt;
+ enum v4l2_buf_type type;
+ struct media_pipeline pipe;
+ const struct camss_video_ops *ops;
+ struct mutex lock;
+ struct mutex q_lock;
+ unsigned int bpl_alignment;
+ unsigned int line_based;
+ const struct camss_format_info *formats;
+ unsigned int nformats;
+};
+
+void msm_video_stop_streaming(struct camss_video *video);
+
+int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
+ const char *name, int is_pix);
+
+void msm_video_unregister(struct camss_video *video);
+
+#endif /* QC_MSM_CAMSS_VIDEO_H */
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
new file mode 100644
index 000000000..669615fff
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -0,0 +1,1028 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss.c
+ *
+ * Qualcomm MSM Camera Subsystem - Core
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#include <linux/clk.h>
+#include <linux/media-bus-format.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-fwnode.h>
+
+#include "camss.h"
+
+#define CAMSS_CLOCK_MARGIN_NUMERATOR 105
+#define CAMSS_CLOCK_MARGIN_DENOMINATOR 100
+
+static const struct resources csiphy_res_8x16[] = {
+ /* CSIPHY0 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000 } },
+ .reg = { "csiphy0", "csiphy0_clk_mux" },
+ .interrupt = { "csiphy0" }
+ },
+
+ /* CSIPHY1 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000 } },
+ .reg = { "csiphy1", "csiphy1_clk_mux" },
+ .interrupt = { "csiphy1" }
+ }
+};
+
+static const struct resources csid_res_8x16[] = {
+ /* CSID0 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
+ "csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" }
+ },
+
+ /* CSID1 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
+ "csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" }
+ },
+};
+
+static const struct resources_ispif ispif_res_8x16 = {
+ /* ISPIF */
+ .clock = { "top_ahb", "ahb", "ispif_ahb",
+ "csi0", "csi0_pix", "csi0_rdi",
+ "csi1", "csi1_pix", "csi1_rdi" },
+ .clock_for_reset = { "vfe0", "csi_vfe0" },
+ .reg = { "ispif", "csi_clk_mux" },
+ .interrupt = "ispif"
+
+};
+
+static const struct resources vfe_res_8x16[] = {
+ /* VFE0 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "vfe0", "csi_vfe0",
+ "vfe_ahb", "vfe_axi", "ahb" },
+ .clock_rate = { { 0 },
+ { 50000000, 80000000, 100000000, 160000000,
+ 177780000, 200000000, 266670000, 320000000,
+ 400000000, 465000000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" }
+ }
+};
+
+static const struct resources csiphy_res_8x96[] = {
+ /* CSIPHY0 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 } },
+ .reg = { "csiphy0", "csiphy0_clk_mux" },
+ .interrupt = { "csiphy0" }
+ },
+
+ /* CSIPHY1 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 } },
+ .reg = { "csiphy1", "csiphy1_clk_mux" },
+ .interrupt = { "csiphy1" }
+ },
+
+ /* CSIPHY2 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy2_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 } },
+ .reg = { "csiphy2", "csiphy2_clk_mux" },
+ .interrupt = { "csiphy2" }
+ }
+};
+
+static const struct resources csid_res_8x96[] = {
+ /* CSID0 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
+ "csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" }
+ },
+
+ /* CSID1 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
+ "csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" }
+ },
+
+ /* CSID2 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi2_ahb", "ahb",
+ "csi2", "csi2_phy", "csi2_pix", "csi2_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" }
+ },
+
+ /* CSID3 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi3_ahb", "ahb",
+ "csi3", "csi3_phy", "csi3_pix", "csi3_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid3" },
+ .interrupt = { "csid3" }
+ }
+};
+
+static const struct resources_ispif ispif_res_8x96 = {
+ /* ISPIF */
+ .clock = { "top_ahb", "ahb", "ispif_ahb",
+ "csi0", "csi0_pix", "csi0_rdi",
+ "csi1", "csi1_pix", "csi1_rdi",
+ "csi2", "csi2_pix", "csi2_rdi",
+ "csi3", "csi3_pix", "csi3_rdi" },
+ .clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" },
+ .reg = { "ispif", "csi_clk_mux" },
+ .interrupt = "ispif"
+};
+
+static const struct resources vfe_res_8x96[] = {
+ /* VFE0 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ahb", "vfe0", "csi_vfe0", "vfe_ahb",
+ "vfe0_ahb", "vfe_axi", "vfe0_stream"},
+ .clock_rate = { { 0 },
+ { 0 },
+ { 75000000, 100000000, 300000000,
+ 320000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" }
+ },
+
+ /* VFE1 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ahb", "vfe1", "csi_vfe1", "vfe_ahb",
+ "vfe1_ahb", "vfe_axi", "vfe1_stream"},
+ .clock_rate = { { 0 },
+ { 0 },
+ { 75000000, 100000000, 300000000,
+ 320000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" }
+ }
+};
+
+/*
+ * camss_add_clock_margin - Add margin to clock frequency rate
+ * @rate: Clock frequency rate
+ *
+ * When making calculations with physical clock frequency values
+ * some safety margin must be added. Add it.
+ */
+inline void camss_add_clock_margin(u64 *rate)
+{
+ *rate *= CAMSS_CLOCK_MARGIN_NUMERATOR;
+ *rate = div_u64(*rate, CAMSS_CLOCK_MARGIN_DENOMINATOR);
+}
+
+/*
+ * camss_enable_clocks - Enable multiple clocks
+ * @nclocks: Number of clocks in clock array
+ * @clock: Clock array
+ * @dev: Device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int camss_enable_clocks(int nclocks, struct camss_clock *clock,
+ struct device *dev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < nclocks; i++) {
+ ret = clk_prepare_enable(clock[i].clk);
+ if (ret) {
+ dev_err(dev, "clock enable failed: %d\n", ret);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(clock[i].clk);
+
+ return ret;
+}
+
+/*
+ * camss_disable_clocks - Disable multiple clocks
+ * @nclocks: Number of clocks in clock array
+ * @clock: Clock array
+ */
+void camss_disable_clocks(int nclocks, struct camss_clock *clock)
+{
+ int i;
+
+ for (i = nclocks - 1; i >= 0; i--)
+ clk_disable_unprepare(clock[i].clk);
+}
+
+/*
+ * camss_find_sensor - Find a linked media entity which represents a sensor
+ * @entity: Media entity to start searching from
+ *
+ * Return a pointer to sensor media entity or NULL if not found
+ */
+static struct media_entity *camss_find_sensor(struct media_entity *entity)
+{
+ struct media_pad *pad;
+
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ return NULL;
+
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ return NULL;
+
+ entity = pad->entity;
+
+ if (entity->function == MEDIA_ENT_F_CAM_SENSOR)
+ return entity;
+ }
+}
+
+/*
+ * camss_get_pixel_clock - Get pixel clock rate from sensor
+ * @entity: Media entity in the current pipeline
+ * @pixel_clock: Received pixel clock value
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock)
+{
+ struct media_entity *sensor;
+ struct v4l2_subdev *subdev;
+ struct v4l2_ctrl *ctrl;
+
+ sensor = camss_find_sensor(entity);
+ if (!sensor)
+ return -ENODEV;
+
+ subdev = media_entity_to_v4l2_subdev(sensor);
+
+ ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
+
+ if (!ctrl)
+ return -EINVAL;
+
+ *pixel_clock = v4l2_ctrl_g_ctrl_int64(ctrl);
+
+ return 0;
+}
+
+int camss_pm_domain_on(struct camss *camss, int id)
+{
+ if (camss->version == CAMSS_8x96) {
+ camss->genpd_link[id] = device_link_add(camss->dev,
+ camss->genpd[id], DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+
+ if (!camss->genpd_link[id])
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void camss_pm_domain_off(struct camss *camss, int id)
+{
+ if (camss->version == CAMSS_8x96)
+ device_link_del(camss->genpd_link[id]);
+}
+
+/*
+ * camss_of_parse_endpoint_node - Parse port endpoint node
+ * @dev: Device
+ * @node: Device node to be parsed
+ * @csd: Parsed data from port endpoint node
+ *
+ * Return 0 on success or a negative error code on failure
+ */
+static int camss_of_parse_endpoint_node(struct device *dev,
+ struct device_node *node,
+ struct camss_async_subdev *csd)
+{
+ struct csiphy_lanes_cfg *lncfg = &csd->interface.csi2.lane_cfg;
+ struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2;
+ struct v4l2_fwnode_endpoint vep = { { 0 } };
+ unsigned int i;
+
+ v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
+
+ csd->interface.csiphy_id = vep.base.port;
+
+ mipi_csi2 = &vep.bus.mipi_csi2;
+ lncfg->clk.pos = mipi_csi2->clock_lane;
+ lncfg->clk.pol = mipi_csi2->lane_polarities[0];
+ lncfg->num_data = mipi_csi2->num_data_lanes;
+
+ lncfg->data = devm_kcalloc(dev,
+ lncfg->num_data, sizeof(*lncfg->data),
+ GFP_KERNEL);
+ if (!lncfg->data)
+ return -ENOMEM;
+
+ for (i = 0; i < lncfg->num_data; i++) {
+ lncfg->data[i].pos = mipi_csi2->data_lanes[i];
+ lncfg->data[i].pol = mipi_csi2->lane_polarities[i + 1];
+ }
+
+ return 0;
+}
+
+/*
+ * camss_of_parse_ports - Parse ports node
+ * @dev: Device
+ * @notifier: v4l2_device notifier data
+ *
+ * Return number of "port" nodes found in "ports" node
+ */
+static int camss_of_parse_ports(struct device *dev,
+ struct v4l2_async_notifier *notifier)
+{
+ struct device_node *node = NULL;
+ struct device_node *remote = NULL;
+ unsigned int size, i;
+ int ret;
+
+ while ((node = of_graph_get_next_endpoint(dev->of_node, node)))
+ if (of_device_is_available(node))
+ notifier->num_subdevs++;
+
+ of_node_put(node);
+ size = sizeof(*notifier->subdevs) * notifier->num_subdevs;
+ notifier->subdevs = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!notifier->subdevs) {
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ while ((node = of_graph_get_next_endpoint(dev->of_node, node))) {
+ struct camss_async_subdev *csd;
+
+ if (!of_device_is_available(node))
+ continue;
+
+ csd = devm_kzalloc(dev, sizeof(*csd), GFP_KERNEL);
+ if (!csd) {
+ of_node_put(node);
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ notifier->subdevs[i++] = &csd->asd;
+
+ ret = camss_of_parse_endpoint_node(dev, node, csd);
+ if (ret < 0) {
+ of_node_put(node);
+ return ret;
+ }
+
+ remote = of_graph_get_remote_port_parent(node);
+ if (!remote) {
+ dev_err(dev, "Cannot get remote parent\n");
+ of_node_put(node);
+ return -EINVAL;
+ }
+
+ csd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ csd->asd.match.fwnode = of_fwnode_handle(remote);
+ }
+ of_node_put(node);
+
+ return notifier->num_subdevs;
+}
+
+/*
+ * camss_init_subdevices - Initialize subdev structures and resources
+ * @camss: CAMSS device
+ *
+ * Return 0 on success or a negative error code on failure
+ */
+static int camss_init_subdevices(struct camss *camss)
+{
+ const struct resources *csiphy_res;
+ const struct resources *csid_res;
+ const struct resources_ispif *ispif_res;
+ const struct resources *vfe_res;
+ unsigned int i;
+ int ret;
+
+ if (camss->version == CAMSS_8x16) {
+ csiphy_res = csiphy_res_8x16;
+ csid_res = csid_res_8x16;
+ ispif_res = &ispif_res_8x16;
+ vfe_res = vfe_res_8x16;
+ } else if (camss->version == CAMSS_8x96) {
+ csiphy_res = csiphy_res_8x96;
+ csid_res = csid_res_8x96;
+ ispif_res = &ispif_res_8x96;
+ vfe_res = vfe_res_8x96;
+ } else {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < camss->csiphy_num; i++) {
+ ret = msm_csiphy_subdev_init(camss, &camss->csiphy[i],
+ &csiphy_res[i], i);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to init csiphy%d sub-device: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < camss->csid_num; i++) {
+ ret = msm_csid_subdev_init(camss, &camss->csid[i],
+ &csid_res[i], i);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to init csid%d sub-device: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ ret = msm_ispif_subdev_init(&camss->ispif, ispif_res);
+ if (ret < 0) {
+ dev_err(camss->dev, "Failed to init ispif sub-device: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (i = 0; i < camss->vfe_num; i++) {
+ ret = msm_vfe_subdev_init(camss, &camss->vfe[i],
+ &vfe_res[i], i);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Fail to init vfe%d sub-device: %d\n", i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * camss_register_entities - Register subdev nodes and create links
+ * @camss: CAMSS device
+ *
+ * Return 0 on success or a negative error code on failure
+ */
+static int camss_register_entities(struct camss *camss)
+{
+ int i, j, k;
+ int ret;
+
+ for (i = 0; i < camss->csiphy_num; i++) {
+ ret = msm_csiphy_register_entity(&camss->csiphy[i],
+ &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to register csiphy%d entity: %d\n",
+ i, ret);
+ goto err_reg_csiphy;
+ }
+ }
+
+ for (i = 0; i < camss->csid_num; i++) {
+ ret = msm_csid_register_entity(&camss->csid[i],
+ &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to register csid%d entity: %d\n",
+ i, ret);
+ goto err_reg_csid;
+ }
+ }
+
+ ret = msm_ispif_register_entities(&camss->ispif, &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev, "Failed to register ispif entities: %d\n",
+ ret);
+ goto err_reg_ispif;
+ }
+
+ for (i = 0; i < camss->vfe_num; i++) {
+ ret = msm_vfe_register_entities(&camss->vfe[i],
+ &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to register vfe%d entities: %d\n",
+ i, ret);
+ goto err_reg_vfe;
+ }
+ }
+
+ for (i = 0; i < camss->csiphy_num; i++) {
+ for (j = 0; j < camss->csid_num; j++) {
+ ret = media_create_pad_link(
+ &camss->csiphy[i].subdev.entity,
+ MSM_CSIPHY_PAD_SRC,
+ &camss->csid[j].subdev.entity,
+ MSM_CSID_PAD_SINK,
+ 0);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to link %s->%s entities: %d\n",
+ camss->csiphy[i].subdev.entity.name,
+ camss->csid[j].subdev.entity.name,
+ ret);
+ goto err_link;
+ }
+ }
+ }
+
+ for (i = 0; i < camss->csid_num; i++) {
+ for (j = 0; j < camss->ispif.line_num; j++) {
+ ret = media_create_pad_link(
+ &camss->csid[i].subdev.entity,
+ MSM_CSID_PAD_SRC,
+ &camss->ispif.line[j].subdev.entity,
+ MSM_ISPIF_PAD_SINK,
+ 0);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to link %s->%s entities: %d\n",
+ camss->csid[i].subdev.entity.name,
+ camss->ispif.line[j].subdev.entity.name,
+ ret);
+ goto err_link;
+ }
+ }
+ }
+
+ for (i = 0; i < camss->ispif.line_num; i++)
+ for (k = 0; k < camss->vfe_num; k++)
+ for (j = 0; j < ARRAY_SIZE(camss->vfe[k].line); j++) {
+ ret = media_create_pad_link(
+ &camss->ispif.line[i].subdev.entity,
+ MSM_ISPIF_PAD_SRC,
+ &camss->vfe[k].line[j].subdev.entity,
+ MSM_VFE_PAD_SINK,
+ 0);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to link %s->%s entities: %d\n",
+ camss->ispif.line[i].subdev.entity.name,
+ camss->vfe[k].line[j].subdev.entity.name,
+ ret);
+ goto err_link;
+ }
+ }
+
+ return 0;
+
+err_link:
+ i = camss->vfe_num;
+err_reg_vfe:
+ for (i--; i >= 0; i--)
+ msm_vfe_unregister_entities(&camss->vfe[i]);
+
+ msm_ispif_unregister_entities(&camss->ispif);
+err_reg_ispif:
+
+ i = camss->csid_num;
+err_reg_csid:
+ for (i--; i >= 0; i--)
+ msm_csid_unregister_entity(&camss->csid[i]);
+
+ i = camss->csiphy_num;
+err_reg_csiphy:
+ for (i--; i >= 0; i--)
+ msm_csiphy_unregister_entity(&camss->csiphy[i]);
+
+ return ret;
+}
+
+/*
+ * camss_unregister_entities - Unregister subdev nodes
+ * @camss: CAMSS device
+ *
+ * Return 0 on success or a negative error code on failure
+ */
+static void camss_unregister_entities(struct camss *camss)
+{
+ unsigned int i;
+
+ for (i = 0; i < camss->csiphy_num; i++)
+ msm_csiphy_unregister_entity(&camss->csiphy[i]);
+
+ for (i = 0; i < camss->csid_num; i++)
+ msm_csid_unregister_entity(&camss->csid[i]);
+
+ msm_ispif_unregister_entities(&camss->ispif);
+
+ for (i = 0; i < camss->vfe_num; i++)
+ msm_vfe_unregister_entities(&camss->vfe[i]);
+}
+
+static int camss_subdev_notifier_bound(struct v4l2_async_notifier *async,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct camss *camss = container_of(async, struct camss, notifier);
+ struct camss_async_subdev *csd =
+ container_of(asd, struct camss_async_subdev, asd);
+ u8 id = csd->interface.csiphy_id;
+ struct csiphy_device *csiphy = &camss->csiphy[id];
+
+ csiphy->cfg.csi2 = &csd->interface.csi2;
+ subdev->host_priv = csiphy;
+
+ return 0;
+}
+
+static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async)
+{
+ struct camss *camss = container_of(async, struct camss, notifier);
+ struct v4l2_device *v4l2_dev = &camss->v4l2_dev;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
+ if (sd->host_priv) {
+ struct media_entity *sensor = &sd->entity;
+ struct csiphy_device *csiphy =
+ (struct csiphy_device *) sd->host_priv;
+ struct media_entity *input = &csiphy->subdev.entity;
+ unsigned int i;
+
+ for (i = 0; i < sensor->num_pads; i++) {
+ if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+ if (i == sensor->num_pads) {
+ dev_err(camss->dev,
+ "No source pad in external entity\n");
+ return -EINVAL;
+ }
+
+ ret = media_create_pad_link(sensor, i,
+ input, MSM_CSIPHY_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to link %s->%s entities: %d\n",
+ sensor->name, input->name, ret);
+ return ret;
+ }
+ }
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
+ if (ret < 0)
+ return ret;
+
+ return media_device_register(&camss->media_dev);
+}
+
+static const struct v4l2_async_notifier_operations camss_subdev_notifier_ops = {
+ .bound = camss_subdev_notifier_bound,
+ .complete = camss_subdev_notifier_complete,
+};
+
+static const struct media_device_ops camss_media_ops = {
+ .link_notify = v4l2_pipeline_link_notify,
+};
+
+/*
+ * camss_probe - Probe CAMSS platform device
+ * @pdev: Pointer to CAMSS platform device
+ *
+ * Return 0 on success or a negative error code on failure
+ */
+static int camss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct camss *camss;
+ int ret;
+
+ camss = kzalloc(sizeof(*camss), GFP_KERNEL);
+ if (!camss)
+ return -ENOMEM;
+
+ atomic_set(&camss->ref_count, 0);
+ camss->dev = dev;
+ platform_set_drvdata(pdev, camss);
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm8916-camss")) {
+ camss->version = CAMSS_8x16;
+ camss->csiphy_num = 2;
+ camss->csid_num = 2;
+ camss->vfe_num = 1;
+ } else if (of_device_is_compatible(dev->of_node,
+ "qcom,msm8996-camss")) {
+ camss->version = CAMSS_8x96;
+ camss->csiphy_num = 3;
+ camss->csid_num = 4;
+ camss->vfe_num = 2;
+ } else {
+ return -EINVAL;
+ }
+
+ camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
+ sizeof(*camss->csiphy), GFP_KERNEL);
+ if (!camss->csiphy)
+ return -ENOMEM;
+
+ camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
+ GFP_KERNEL);
+ if (!camss->csid)
+ return -ENOMEM;
+
+ camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
+ GFP_KERNEL);
+ if (!camss->vfe)
+ return -ENOMEM;
+
+ ret = camss_of_parse_ports(dev, &camss->notifier);
+ if (ret < 0)
+ return ret;
+
+ ret = camss_init_subdevices(camss);
+ if (ret < 0)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dev, 0xffffffff);
+ if (ret)
+ return ret;
+
+ camss->media_dev.dev = camss->dev;
+ strlcpy(camss->media_dev.model, "Qualcomm Camera Subsystem",
+ sizeof(camss->media_dev.model));
+ camss->media_dev.ops = &camss_media_ops;
+ media_device_init(&camss->media_dev);
+
+ camss->v4l2_dev.mdev = &camss->media_dev;
+ ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
+ return ret;
+ }
+
+ ret = camss_register_entities(camss);
+ if (ret < 0)
+ goto err_register_entities;
+
+ if (camss->notifier.num_subdevs) {
+ camss->notifier.ops = &camss_subdev_notifier_ops;
+
+ ret = v4l2_async_notifier_register(&camss->v4l2_dev,
+ &camss->notifier);
+ if (ret) {
+ dev_err(dev,
+ "Failed to register async subdev nodes: %d\n",
+ ret);
+ goto err_register_subdevs;
+ }
+ } else {
+ ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdev nodes: %d\n",
+ ret);
+ goto err_register_subdevs;
+ }
+
+ ret = media_device_register(&camss->media_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register media device: %d\n",
+ ret);
+ goto err_register_subdevs;
+ }
+ }
+
+ if (camss->version == CAMSS_8x96) {
+ camss->genpd[PM_DOMAIN_VFE0] = dev_pm_domain_attach_by_id(
+ camss->dev, PM_DOMAIN_VFE0);
+ if (IS_ERR(camss->genpd[PM_DOMAIN_VFE0]))
+ return PTR_ERR(camss->genpd[PM_DOMAIN_VFE0]);
+
+ camss->genpd[PM_DOMAIN_VFE1] = dev_pm_domain_attach_by_id(
+ camss->dev, PM_DOMAIN_VFE1);
+ if (IS_ERR(camss->genpd[PM_DOMAIN_VFE1])) {
+ dev_pm_domain_detach(camss->genpd[PM_DOMAIN_VFE0],
+ true);
+ return PTR_ERR(camss->genpd[PM_DOMAIN_VFE1]);
+ }
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_register_subdevs:
+ camss_unregister_entities(camss);
+err_register_entities:
+ v4l2_device_unregister(&camss->v4l2_dev);
+
+ return ret;
+}
+
+void camss_delete(struct camss *camss)
+{
+ v4l2_device_unregister(&camss->v4l2_dev);
+ media_device_unregister(&camss->media_dev);
+ media_device_cleanup(&camss->media_dev);
+
+ pm_runtime_disable(camss->dev);
+
+ if (camss->version == CAMSS_8x96) {
+ dev_pm_domain_detach(camss->genpd[PM_DOMAIN_VFE0], true);
+ dev_pm_domain_detach(camss->genpd[PM_DOMAIN_VFE1], true);
+ }
+
+ kfree(camss);
+}
+
+/*
+ * camss_remove - Remove CAMSS platform device
+ * @pdev: Pointer to CAMSS platform device
+ *
+ * Always returns 0.
+ */
+static int camss_remove(struct platform_device *pdev)
+{
+ unsigned int i;
+
+ struct camss *camss = platform_get_drvdata(pdev);
+
+ for (i = 0; i < camss->vfe_num; i++)
+ msm_vfe_stop_streaming(&camss->vfe[i]);
+
+ v4l2_async_notifier_unregister(&camss->notifier);
+ camss_unregister_entities(camss);
+
+ if (atomic_read(&camss->ref_count) == 0)
+ camss_delete(camss);
+
+ return 0;
+}
+
+static const struct of_device_id camss_dt_match[] = {
+ { .compatible = "qcom,msm8916-camss" },
+ { .compatible = "qcom,msm8996-camss" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, camss_dt_match);
+
+static int __maybe_unused camss_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused camss_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops camss_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(camss_runtime_suspend, camss_runtime_resume, NULL)
+};
+
+static struct platform_driver qcom_camss_driver = {
+ .probe = camss_probe,
+ .remove = camss_remove,
+ .driver = {
+ .name = "qcom-camss",
+ .of_match_table = camss_dt_match,
+ .pm = &camss_pm_ops,
+ },
+};
+
+module_platform_driver(qcom_camss_driver);
+
+MODULE_ALIAS("platform:qcom-camss");
+MODULE_DESCRIPTION("Qualcomm Camera Subsystem driver");
+MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
new file mode 100644
index 000000000..418996d8d
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss.h
+ *
+ * Qualcomm MSM Camera Subsystem - Core
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#ifndef QC_MSM_CAMSS_H
+#define QC_MSM_CAMSS_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <linux/device.h>
+
+#include "camss-csid.h"
+#include "camss-csiphy.h"
+#include "camss-ispif.h"
+#include "camss-vfe.h"
+
+#define to_camss(ptr_module) \
+ container_of(ptr_module, struct camss, ptr_module)
+
+#define to_device(ptr_module) \
+ (to_camss(ptr_module)->dev)
+
+#define module_pointer(ptr_module, index) \
+ ((const struct ptr_module##_device (*)[]) &(ptr_module[-(index)]))
+
+#define to_camss_index(ptr_module, index) \
+ container_of(module_pointer(ptr_module, index), \
+ struct camss, ptr_module)
+
+#define to_device_index(ptr_module, index) \
+ (to_camss_index(ptr_module, index)->dev)
+
+#define CAMSS_RES_MAX 17
+
+struct resources {
+ char *regulator[CAMSS_RES_MAX];
+ char *clock[CAMSS_RES_MAX];
+ u32 clock_rate[CAMSS_RES_MAX][CAMSS_RES_MAX];
+ char *reg[CAMSS_RES_MAX];
+ char *interrupt[CAMSS_RES_MAX];
+};
+
+struct resources_ispif {
+ char *clock[CAMSS_RES_MAX];
+ char *clock_for_reset[CAMSS_RES_MAX];
+ char *reg[CAMSS_RES_MAX];
+ char *interrupt;
+};
+
+enum pm_domain {
+ PM_DOMAIN_VFE0,
+ PM_DOMAIN_VFE1,
+ PM_DOMAIN_COUNT
+};
+
+enum camss_version {
+ CAMSS_8x16,
+ CAMSS_8x96,
+};
+
+struct camss {
+ enum camss_version version;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_async_notifier notifier;
+ struct media_device media_dev;
+ struct device *dev;
+ int csiphy_num;
+ struct csiphy_device *csiphy;
+ int csid_num;
+ struct csid_device *csid;
+ struct ispif_device ispif;
+ int vfe_num;
+ struct vfe_device *vfe;
+ atomic_t ref_count;
+ struct device *genpd[PM_DOMAIN_COUNT];
+ struct device_link *genpd_link[PM_DOMAIN_COUNT];
+};
+
+struct camss_camera_interface {
+ u8 csiphy_id;
+ struct csiphy_csi2_cfg csi2;
+};
+
+struct camss_async_subdev {
+ struct camss_camera_interface interface;
+ struct v4l2_async_subdev asd;
+};
+
+struct camss_clock {
+ struct clk *clk;
+ const char *name;
+ u32 *freq;
+ u32 nfreqs;
+};
+
+void camss_add_clock_margin(u64 *rate);
+int camss_enable_clocks(int nclocks, struct camss_clock *clock,
+ struct device *dev);
+void camss_disable_clocks(int nclocks, struct camss_clock *clock);
+int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock);
+int camss_pm_domain_on(struct camss *camss, int id);
+void camss_pm_domain_off(struct camss *camss, int id);
+void camss_delete(struct camss *camss);
+
+#endif /* QC_MSM_CAMSS_H */
diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile
new file mode 100644
index 000000000..b44b11b03
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Qualcomm Venus driver
+
+venus-core-objs += core.o helpers.o firmware.o \
+ hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o \
+ hfi_parser.o
+
+venus-dec-objs += vdec.o vdec_ctrls.o
+venus-enc-objs += venc.o venc_ctrls.o
+
+obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-core.o
+obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-dec.o
+obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-enc.o
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
new file mode 100644
index 000000000..168f5af6a
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+
+#include "core.h"
+#include "vdec.h"
+#include "venc.h"
+#include "firmware.h"
+
+static void venus_event_notify(struct venus_core *core, u32 event)
+{
+ struct venus_inst *inst;
+
+ switch (event) {
+ case EVT_SYS_WATCHDOG_TIMEOUT:
+ case EVT_SYS_ERROR:
+ break;
+ default:
+ return;
+ }
+
+ mutex_lock(&core->lock);
+ core->sys_error = true;
+ list_for_each_entry(inst, &core->instances, list)
+ inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL);
+ mutex_unlock(&core->lock);
+
+ disable_irq_nosync(core->irq);
+
+ /*
+ * Delay recovery to ensure venus has completed any pending cache
+ * operations. Without this sleep, we see device reset when firmware is
+ * unloaded after a system error.
+ */
+ schedule_delayed_work(&core->work, msecs_to_jiffies(100));
+}
+
+static const struct hfi_core_ops venus_core_ops = {
+ .event_notify = venus_event_notify,
+};
+
+static void venus_sys_error_handler(struct work_struct *work)
+{
+ struct venus_core *core =
+ container_of(work, struct venus_core, work.work);
+ int ret = 0;
+
+ dev_warn(core->dev, "system error has occurred, starting recovery!\n");
+
+ pm_runtime_get_sync(core->dev);
+
+ hfi_core_deinit(core, true);
+ hfi_destroy(core);
+ mutex_lock(&core->lock);
+ venus_shutdown(core->dev);
+
+ pm_runtime_put_sync(core->dev);
+
+ ret |= hfi_create(core, &venus_core_ops);
+
+ pm_runtime_get_sync(core->dev);
+
+ ret |= venus_boot(core->dev, core->res->fwname);
+
+ ret |= hfi_core_resume(core, true);
+
+ enable_irq(core->irq);
+
+ mutex_unlock(&core->lock);
+
+ ret |= hfi_core_init(core);
+
+ pm_runtime_put_sync(core->dev);
+
+ if (ret) {
+ disable_irq_nosync(core->irq);
+ dev_warn(core->dev, "recovery failed (%d)\n", ret);
+ schedule_delayed_work(&core->work, msecs_to_jiffies(10));
+ return;
+ }
+
+ mutex_lock(&core->lock);
+ core->sys_error = false;
+ mutex_unlock(&core->lock);
+}
+
+static int venus_clks_get(struct venus_core *core)
+{
+ const struct venus_resources *res = core->res;
+ struct device *dev = core->dev;
+ unsigned int i;
+
+ for (i = 0; i < res->clks_num; i++) {
+ core->clks[i] = devm_clk_get(dev, res->clks[i]);
+ if (IS_ERR(core->clks[i]))
+ return PTR_ERR(core->clks[i]);
+ }
+
+ return 0;
+}
+
+static int venus_clks_enable(struct venus_core *core)
+{
+ const struct venus_resources *res = core->res;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < res->clks_num; i++) {
+ ret = clk_prepare_enable(core->clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ while (i--)
+ clk_disable_unprepare(core->clks[i]);
+
+ return ret;
+}
+
+static void venus_clks_disable(struct venus_core *core)
+{
+ const struct venus_resources *res = core->res;
+ unsigned int i = res->clks_num;
+
+ while (i--)
+ clk_disable_unprepare(core->clks[i]);
+}
+
+static u32 to_v4l2_codec_type(u32 codec)
+{
+ switch (codec) {
+ case HFI_VIDEO_CODEC_H264:
+ return V4L2_PIX_FMT_H264;
+ case HFI_VIDEO_CODEC_H263:
+ return V4L2_PIX_FMT_H263;
+ case HFI_VIDEO_CODEC_MPEG1:
+ return V4L2_PIX_FMT_MPEG1;
+ case HFI_VIDEO_CODEC_MPEG2:
+ return V4L2_PIX_FMT_MPEG2;
+ case HFI_VIDEO_CODEC_MPEG4:
+ return V4L2_PIX_FMT_MPEG4;
+ case HFI_VIDEO_CODEC_VC1:
+ return V4L2_PIX_FMT_VC1_ANNEX_G;
+ case HFI_VIDEO_CODEC_VP8:
+ return V4L2_PIX_FMT_VP8;
+ case HFI_VIDEO_CODEC_VP9:
+ return V4L2_PIX_FMT_VP9;
+ case HFI_VIDEO_CODEC_DIVX:
+ case HFI_VIDEO_CODEC_DIVX_311:
+ return V4L2_PIX_FMT_XVID;
+ default:
+ return 0;
+ }
+}
+
+static int venus_enumerate_codecs(struct venus_core *core, u32 type)
+{
+ const struct hfi_inst_ops dummy_ops = {};
+ struct venus_inst *inst;
+ u32 codec, codecs;
+ unsigned int i;
+ int ret;
+
+ if (core->res->hfi_version != HFI_VERSION_1XX)
+ return 0;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ mutex_init(&inst->lock);
+ inst->core = core;
+ inst->session_type = type;
+ if (type == VIDC_SESSION_TYPE_DEC)
+ codecs = core->dec_codecs;
+ else
+ codecs = core->enc_codecs;
+
+ ret = hfi_session_create(inst, &dummy_ops);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < MAX_CODEC_NUM; i++) {
+ codec = (1 << i) & codecs;
+ if (!codec)
+ continue;
+
+ ret = hfi_session_init(inst, to_v4l2_codec_type(codec));
+ if (ret)
+ goto done;
+
+ ret = hfi_session_deinit(inst);
+ if (ret)
+ goto done;
+ }
+
+done:
+ hfi_session_destroy(inst);
+err:
+ mutex_destroy(&inst->lock);
+ kfree(inst);
+
+ return ret;
+}
+
+static int venus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct venus_core *core;
+ struct resource *r;
+ int ret;
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ core->dev = dev;
+ platform_set_drvdata(pdev, core);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ core->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(core->base))
+ return PTR_ERR(core->base);
+
+ core->irq = platform_get_irq(pdev, 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ core->res = of_device_get_match_data(dev);
+ if (!core->res)
+ return -ENODEV;
+
+ ret = venus_clks_get(core);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dev, core->res->dma_mask);
+ if (ret)
+ return ret;
+
+ if (!dev->dma_parms) {
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+ }
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ INIT_LIST_HEAD(&core->instances);
+ mutex_init(&core->lock);
+ INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
+
+ ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, hfi_isr_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "venus", core);
+ if (ret)
+ return ret;
+
+ ret = hfi_create(core, &venus_core_ops);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_runtime_disable;
+
+ ret = venus_boot(dev, core->res->fwname);
+ if (ret)
+ goto err_runtime_disable;
+
+ ret = hfi_core_resume(core, true);
+ if (ret)
+ goto err_venus_shutdown;
+
+ ret = hfi_core_init(core);
+ if (ret)
+ goto err_venus_shutdown;
+
+ ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
+ if (ret)
+ goto err_venus_shutdown;
+
+ ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
+ if (ret)
+ goto err_venus_shutdown;
+
+ ret = v4l2_device_register(dev, &core->v4l2_dev);
+ if (ret)
+ goto err_core_deinit;
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ goto err_dev_unregister;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret) {
+ pm_runtime_get_noresume(dev);
+ goto err_dev_unregister;
+ }
+
+ return 0;
+
+err_dev_unregister:
+ v4l2_device_unregister(&core->v4l2_dev);
+err_core_deinit:
+ hfi_core_deinit(core, false);
+err_venus_shutdown:
+ venus_shutdown(dev);
+err_runtime_disable:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+ hfi_destroy(core);
+ return ret;
+}
+
+static int venus_remove(struct platform_device *pdev)
+{
+ struct venus_core *core = platform_get_drvdata(pdev);
+ struct device *dev = core->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ WARN_ON(ret < 0);
+
+ ret = hfi_core_deinit(core, true);
+ WARN_ON(ret);
+
+ hfi_destroy(core);
+ venus_shutdown(dev);
+ of_platform_depopulate(dev);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ v4l2_device_unregister(&core->v4l2_dev);
+
+ return ret;
+}
+
+static __maybe_unused int venus_runtime_suspend(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ ret = hfi_core_suspend(core);
+
+ venus_clks_disable(core);
+
+ return ret;
+}
+
+static __maybe_unused int venus_runtime_resume(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ ret = venus_clks_enable(core);
+ if (ret)
+ return ret;
+
+ ret = hfi_core_resume(core, false);
+ if (ret)
+ goto err_clks_disable;
+
+ return 0;
+
+err_clks_disable:
+ venus_clks_disable(core);
+ return ret;
+}
+
+static const struct dev_pm_ops venus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(venus_runtime_suspend, venus_runtime_resume, NULL)
+};
+
+static const struct freq_tbl msm8916_freq_table[] = {
+ { 352800, 228570000 }, /* 1920x1088 @ 30 + 1280x720 @ 30 */
+ { 244800, 160000000 }, /* 1920x1088 @ 30 */
+ { 108000, 100000000 }, /* 1280x720 @ 30 */
+};
+
+static const struct reg_val msm8916_reg_preset[] = {
+ { 0xe0020, 0x05555556 },
+ { 0xe0024, 0x05555556 },
+ { 0x80124, 0x00000003 },
+};
+
+static const struct venus_resources msm8916_res = {
+ .freq_tbl = msm8916_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(msm8916_freq_table),
+ .reg_tbl = msm8916_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(msm8916_reg_preset),
+ .clks = { "core", "iface", "bus", },
+ .clks_num = 3,
+ .max_load = 352800, /* 720p@30 + 1080p@30 */
+ .hfi_version = HFI_VERSION_1XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xddc00000 - 1,
+ .fwname = "qcom/venus-1.8/venus.mdt",
+};
+
+static const struct freq_tbl msm8996_freq_table[] = {
+ { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
+ { 972000, 520000000 }, /* 4k UHD @ 30 */
+ { 489600, 346666667 }, /* 1080p @ 60 */
+ { 244800, 150000000 }, /* 1080p @ 30 */
+ { 108000, 75000000 }, /* 720p @ 30 */
+};
+
+static const struct reg_val msm8996_reg_preset[] = {
+ { 0x80010, 0xffffffff },
+ { 0x80018, 0x00001556 },
+ { 0x8001C, 0x00001556 },
+};
+
+static const struct venus_resources msm8996_res = {
+ .freq_tbl = msm8996_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(msm8996_freq_table),
+ .reg_tbl = msm8996_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset),
+ .clks = {"core", "iface", "bus", "mbus" },
+ .clks_num = 4,
+ .max_load = 2563200,
+ .hfi_version = HFI_VERSION_3XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xddc00000 - 1,
+ .fwname = "qcom/venus-4.2/venus.mdt",
+};
+
+static const struct freq_tbl sdm845_freq_table[] = {
+ { 1944000, 380000000 }, /* 4k UHD @ 60 */
+ { 972000, 320000000 }, /* 4k UHD @ 30 */
+ { 489600, 200000000 }, /* 1080p @ 60 */
+ { 244800, 100000000 }, /* 1080p @ 30 */
+};
+
+static const struct venus_resources sdm845_res = {
+ .freq_tbl = sdm845_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
+ .clks = {"core", "iface", "bus" },
+ .clks_num = 3,
+ .max_load = 2563200,
+ .hfi_version = HFI_VERSION_4XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/venus-5.2/venus.mdt",
+};
+
+static const struct of_device_id venus_dt_match[] = {
+ { .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
+ { .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
+ { .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, venus_dt_match);
+
+static struct platform_driver qcom_venus_driver = {
+ .probe = venus_probe,
+ .remove = venus_remove,
+ .driver = {
+ .name = "qcom-venus",
+ .of_match_table = venus_dt_match,
+ .pm = &venus_pm_ops,
+ },
+};
+module_platform_driver(qcom_venus_driver);
+
+MODULE_ALIAS("platform:qcom-venus");
+MODULE_DESCRIPTION("Qualcomm Venus video encoder and decoder driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
new file mode 100644
index 000000000..2f02365f4
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VENUS_CORE_H_
+#define __VENUS_CORE_H_
+
+#include <linux/list.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#include "hfi.h"
+
+#define VIDC_CLKS_NUM_MAX 4
+
+struct freq_tbl {
+ unsigned int load;
+ unsigned long freq;
+};
+
+struct reg_val {
+ u32 reg;
+ u32 value;
+};
+
+struct venus_resources {
+ u64 dma_mask;
+ const struct freq_tbl *freq_tbl;
+ unsigned int freq_tbl_size;
+ const struct reg_val *reg_tbl;
+ unsigned int reg_tbl_size;
+ const char * const clks[VIDC_CLKS_NUM_MAX];
+ unsigned int clks_num;
+ enum hfi_version hfi_version;
+ u32 max_load;
+ unsigned int vmem_id;
+ u32 vmem_size;
+ u32 vmem_addr;
+ const char *fwname;
+};
+
+struct venus_format {
+ u32 pixfmt;
+ unsigned int num_planes;
+ u32 type;
+};
+
+#define MAX_PLANES 4
+#define MAX_FMT_ENTRIES 32
+#define MAX_CAP_ENTRIES 32
+#define MAX_ALLOC_MODE_ENTRIES 16
+#define MAX_CODEC_NUM 32
+
+struct raw_formats {
+ u32 buftype;
+ u32 fmt;
+};
+
+struct venus_caps {
+ u32 codec;
+ u32 domain;
+ bool cap_bufs_mode_dynamic;
+ unsigned int num_caps;
+ struct hfi_capability caps[MAX_CAP_ENTRIES];
+ unsigned int num_pl;
+ struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT];
+ unsigned int num_fmts;
+ struct raw_formats fmts[MAX_FMT_ENTRIES];
+ bool valid; /* used only for Venus v1xx */
+};
+
+/**
+ * struct venus_core - holds core parameters valid for all instances
+ *
+ * @base: IO memory base address
+ * @irq: Venus irq
+ * @clks: an array of struct clk pointers
+ * @core0_clk: a struct clk pointer for core0
+ * @core1_clk: a struct clk pointer for core1
+ * @core0_bus_clk: a struct clk pointer for core0 bus clock
+ * @core1_bus_clk: a struct clk pointer for core1 bus clock
+ * @vdev_dec: a reference to video device structure for decoder instances
+ * @vdev_enc: a reference to video device structure for encoder instances
+ * @v4l2_dev: a holder for v4l2 device structure
+ * @res: a reference to venus resources structure
+ * @dev: convenience struct device pointer
+ * @dev_dec: convenience struct device pointer for decoder device
+ * @dev_enc: convenience struct device pointer for encoder device
+ * @lock: a lock for this strucure
+ * @instances: a list_head of all instances
+ * @insts_count: num of instances
+ * @state: the state of the venus core
+ * @done: a completion for sync HFI operations
+ * @error: an error returned during last HFI sync operations
+ * @sys_error: an error flag that signal system error event
+ * @core_ops: the core operations
+ * @enc_codecs: encoders supported by this core
+ * @dec_codecs: decoders supported by this core
+ * @max_sessions_supported: holds the maximum number of sessions
+ * @core_caps: core capabilities
+ * @priv: a private filed for HFI operations
+ * @ops: the core HFI operations
+ * @work: a delayed work for handling system fatal error
+ */
+struct venus_core {
+ void __iomem *base;
+ int irq;
+ struct clk *clks[VIDC_CLKS_NUM_MAX];
+ struct clk *core0_clk;
+ struct clk *core1_clk;
+ struct clk *core0_bus_clk;
+ struct clk *core1_bus_clk;
+ struct video_device *vdev_dec;
+ struct video_device *vdev_enc;
+ struct v4l2_device v4l2_dev;
+ const struct venus_resources *res;
+ struct device *dev;
+ struct device *dev_dec;
+ struct device *dev_enc;
+ struct mutex lock;
+ struct list_head instances;
+ atomic_t insts_count;
+ unsigned int state;
+ struct completion done;
+ unsigned int error;
+ bool sys_error;
+ const struct hfi_core_ops *core_ops;
+ unsigned long enc_codecs;
+ unsigned long dec_codecs;
+ unsigned int max_sessions_supported;
+#define ENC_ROTATION_CAPABILITY 0x1
+#define ENC_SCALING_CAPABILITY 0x2
+#define ENC_DEINTERLACE_CAPABILITY 0x4
+#define DEC_MULTI_STREAM_CAPABILITY 0x8
+ unsigned int core_caps;
+ void *priv;
+ const struct hfi_ops *ops;
+ struct delayed_work work;
+ struct venus_caps caps[MAX_CODEC_NUM];
+ unsigned int codecs_count;
+};
+
+struct vdec_controls {
+ u32 post_loop_deb_mode;
+ u32 profile;
+ u32 level;
+};
+
+struct venc_controls {
+ u16 gop_size;
+ u32 num_p_frames;
+ u32 num_b_frames;
+ u32 bitrate_mode;
+ u32 bitrate;
+ u32 bitrate_peak;
+
+ u32 h264_i_period;
+ u32 h264_entropy_mode;
+ u32 h264_i_qp;
+ u32 h264_p_qp;
+ u32 h264_b_qp;
+ u32 h264_min_qp;
+ u32 h264_max_qp;
+ u32 h264_loop_filter_mode;
+ s32 h264_loop_filter_alpha;
+ s32 h264_loop_filter_beta;
+
+ u32 vp8_min_qp;
+ u32 vp8_max_qp;
+
+ u32 multi_slice_mode;
+ u32 multi_slice_max_bytes;
+ u32 multi_slice_max_mb;
+
+ u32 header_mode;
+
+ struct {
+ u32 mpeg4;
+ u32 h264;
+ u32 vpx;
+ u32 hevc;
+ } profile;
+ struct {
+ u32 mpeg4;
+ u32 h264;
+ u32 hevc;
+ } level;
+};
+
+struct venus_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ dma_addr_t dma_addr;
+ u32 size;
+ struct list_head reg_list;
+ u32 flags;
+ struct list_head ref_list;
+};
+
+#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
+
+/**
+ * struct venus_inst - holds per instance paramerters
+ *
+ * @list: used for attach an instance to the core
+ * @lock: instance lock
+ * @core: a reference to the core struct
+ * @dpbbufs: a list of decoded picture buffers
+ * @internalbufs: a list of internal bufferes
+ * @registeredbufs: a list of registered capture bufferes
+ * @delayed_process a list of delayed buffers
+ * @delayed_process_work: a work_struct for process delayed buffers
+ * @ctrl_handler: v4l control handler
+ * @controls: a union of decoder and encoder control parameters
+ * @fh: a holder of v4l file handle structure
+ * @streamon_cap: stream on flag for capture queue
+ * @streamon_out: stream on flag for output queue
+ * @width: current capture width
+ * @height: current capture height
+ * @out_width: current output width
+ * @out_height: current output height
+ * @colorspace: current color space
+ * @quantization: current quantization
+ * @xfer_func: current xfer function
+ * @fps: holds current FPS
+ * @timeperframe: holds current time per frame structure
+ * @fmt_out: a reference to output format structure
+ * @fmt_cap: a reference to capture format structure
+ * @num_input_bufs: holds number of input buffers
+ * @num_output_bufs: holds number of output buffers
+ * @input_buf_size holds input buffer size
+ * @output_buf_size: holds output buffer size
+ * @output2_buf_size: holds secondary decoder output buffer size
+ * @dpb_buftype: decoded picture buffer type
+ * @dpb_fmt: decoded picture buffer raw format
+ * @opb_buftype: output picture buffer type
+ * @opb_fmt: output picture buffer raw format
+ * @reconfig: a flag raised by decoder when the stream resolution changed
+ * @reconfig_width: holds the new width
+ * @reconfig_height: holds the new height
+ * @hfi_codec: current codec for this instance in HFI space
+ * @sequence_cap: a sequence counter for capture queue
+ * @sequence_out: a sequence counter for output queue
+ * @m2m_dev: a reference to m2m device structure
+ * @m2m_ctx: a reference to m2m context structure
+ * @state: current state of the instance
+ * @done: a completion for sync HFI operation
+ * @error: an error returned during last HFI sync operation
+ * @session_error: a flag rised by HFI interface in case of session error
+ * @ops: HFI operations
+ * @priv: a private for HFI operations callbacks
+ * @session_type: the type of the session (decoder or encoder)
+ * @hprop: a union used as a holder by get property
+ */
+struct venus_inst {
+ struct list_head list;
+ struct mutex lock;
+ struct venus_core *core;
+ struct list_head dpbbufs;
+ struct list_head internalbufs;
+ struct list_head registeredbufs;
+ struct list_head delayed_process;
+ struct work_struct delayed_process_work;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ union {
+ struct vdec_controls dec;
+ struct venc_controls enc;
+ } controls;
+ struct v4l2_fh fh;
+ unsigned int streamon_cap, streamon_out;
+ u32 width;
+ u32 height;
+ u32 out_width;
+ u32 out_height;
+ u32 colorspace;
+ u8 ycbcr_enc;
+ u8 quantization;
+ u8 xfer_func;
+ u64 fps;
+ struct v4l2_fract timeperframe;
+ const struct venus_format *fmt_out;
+ const struct venus_format *fmt_cap;
+ unsigned int num_input_bufs;
+ unsigned int num_output_bufs;
+ unsigned int input_buf_size;
+ unsigned int output_buf_size;
+ unsigned int output2_buf_size;
+ u32 dpb_buftype;
+ u32 dpb_fmt;
+ u32 opb_buftype;
+ u32 opb_fmt;
+ bool reconfig;
+ u32 reconfig_width;
+ u32 reconfig_height;
+ u32 hfi_codec;
+ u32 sequence_cap;
+ u32 sequence_out;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ unsigned int state;
+ struct completion done;
+ unsigned int error;
+ bool session_error;
+ const struct hfi_inst_ops *ops;
+ u32 session_type;
+ union hfi_get_property hprop;
+};
+
+#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
+#define IS_V3(core) ((core)->res->hfi_version == HFI_VERSION_3XX)
+#define IS_V4(core) ((core)->res->hfi_version == HFI_VERSION_4XX)
+
+#define ctrl_to_inst(ctrl) \
+ container_of((ctrl)->handler, struct venus_inst, ctrl_handler)
+
+static inline struct venus_inst *to_inst(struct file *filp)
+{
+ return container_of(filp->private_data, struct venus_inst, fh);
+}
+
+static inline void *to_hfi_priv(struct venus_core *core)
+{
+ return core->priv;
+}
+
+static inline struct venus_caps *
+venus_caps_by_codec(struct venus_core *core, u32 codec, u32 domain)
+{
+ unsigned int c;
+
+ for (c = 0; c < core->codecs_count; c++) {
+ if (core->caps[c].codec == codec &&
+ core->caps[c].domain == domain)
+ return &core->caps[c];
+ }
+
+ return NULL;
+}
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
new file mode 100644
index 000000000..c4a577848
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+#include "firmware.h"
+
+#define VENUS_PAS_ID 9
+#define VENUS_FW_MEM_SIZE (6 * SZ_1M)
+
+int venus_boot(struct device *dev, const char *fwname)
+{
+ const struct firmware *mdt;
+ struct device_node *node;
+ phys_addr_t mem_phys;
+ struct resource r;
+ ssize_t fw_size;
+ size_t mem_size;
+ void *mem_va;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) || !qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+ mem_size = resource_size(&r);
+
+ if (mem_size < VENUS_FW_MEM_SIZE)
+ return -EINVAL;
+
+ mem_va = memremap(r.start, mem_size, MEMREMAP_WC);
+ if (!mem_va) {
+ dev_err(dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, mem_size);
+ return -ENOMEM;
+ }
+
+ ret = request_firmware(&mdt, fwname, dev);
+ if (ret < 0)
+ goto err_unmap;
+
+ fw_size = qcom_mdt_get_size(mdt);
+ if (fw_size < 0) {
+ ret = fw_size;
+ release_firmware(mdt);
+ goto err_unmap;
+ }
+
+ ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys,
+ mem_size, NULL);
+
+ release_firmware(mdt);
+
+ if (ret)
+ goto err_unmap;
+
+ ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
+ if (ret)
+ goto err_unmap;
+
+err_unmap:
+ memunmap(mem_va);
+ return ret;
+}
+
+int venus_shutdown(struct device *dev)
+{
+ return qcom_scm_pas_shutdown(VENUS_PAS_ID);
+}
diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h
new file mode 100644
index 000000000..428efb56d
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/firmware.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_FIRMWARE_H__
+#define __VENUS_FIRMWARE_H__
+
+struct device;
+
+int venus_boot(struct device *dev, const char *fwname);
+int venus_shutdown(struct device *dev);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
new file mode 100644
index 000000000..cd3b96e6f
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -0,0 +1,1236 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-mem2mem.h>
+#include <asm/div64.h>
+
+#include "core.h"
+#include "helpers.h"
+#include "hfi_helper.h"
+#include "hfi_venus_io.h"
+
+struct intbuf {
+ struct list_head list;
+ u32 type;
+ size_t size;
+ void *va;
+ dma_addr_t da;
+ unsigned long attrs;
+};
+
+bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
+{
+ struct venus_core *core = inst->core;
+ u32 session_type = inst->session_type;
+ u32 codec;
+
+ switch (v4l2_pixfmt) {
+ case V4L2_PIX_FMT_H264:
+ codec = HFI_VIDEO_CODEC_H264;
+ break;
+ case V4L2_PIX_FMT_H263:
+ codec = HFI_VIDEO_CODEC_H263;
+ break;
+ case V4L2_PIX_FMT_MPEG1:
+ codec = HFI_VIDEO_CODEC_MPEG1;
+ break;
+ case V4L2_PIX_FMT_MPEG2:
+ codec = HFI_VIDEO_CODEC_MPEG2;
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ codec = HFI_VIDEO_CODEC_MPEG4;
+ break;
+ case V4L2_PIX_FMT_VC1_ANNEX_G:
+ case V4L2_PIX_FMT_VC1_ANNEX_L:
+ codec = HFI_VIDEO_CODEC_VC1;
+ break;
+ case V4L2_PIX_FMT_VP8:
+ codec = HFI_VIDEO_CODEC_VP8;
+ break;
+ case V4L2_PIX_FMT_VP9:
+ codec = HFI_VIDEO_CODEC_VP9;
+ break;
+ case V4L2_PIX_FMT_XVID:
+ codec = HFI_VIDEO_CODEC_DIVX;
+ break;
+ case V4L2_PIX_FMT_HEVC:
+ codec = HFI_VIDEO_CODEC_HEVC;
+ break;
+ default:
+ return false;
+ }
+
+ if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec)
+ return true;
+
+ if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(venus_helper_check_codec);
+
+static int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
+{
+ struct intbuf *buf;
+ int ret = 0;
+
+ list_for_each_entry(buf, &inst->dpbbufs, list) {
+ struct hfi_frame_data fdata;
+
+ memset(&fdata, 0, sizeof(fdata));
+ fdata.alloc_len = buf->size;
+ fdata.device_addr = buf->da;
+ fdata.buffer_type = buf->type;
+
+ ret = hfi_session_process_buf(inst, &fdata);
+ if (ret)
+ goto fail;
+ }
+
+fail:
+ return ret;
+}
+
+int venus_helper_free_dpb_bufs(struct venus_inst *inst)
+{
+ struct intbuf *buf, *n;
+
+ list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
+ list_del_init(&buf->list);
+ dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
+ buf->attrs);
+ kfree(buf);
+ }
+
+ INIT_LIST_HEAD(&inst->dpbbufs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_free_dpb_bufs);
+
+int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev;
+ enum hfi_version ver = core->res->hfi_version;
+ struct hfi_buffer_requirements bufreq;
+ u32 buftype = inst->dpb_buftype;
+ unsigned int dpb_size = 0;
+ struct intbuf *buf;
+ unsigned int i;
+ u32 count;
+ int ret;
+
+ /* no need to allocate dpb buffers */
+ if (!inst->dpb_fmt)
+ return 0;
+
+ if (inst->dpb_buftype == HFI_BUFFER_OUTPUT)
+ dpb_size = inst->output_buf_size;
+ else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2)
+ dpb_size = inst->output2_buf_size;
+
+ if (!dpb_size)
+ return 0;
+
+ ret = venus_helper_get_bufreq(inst, buftype, &bufreq);
+ if (ret)
+ return ret;
+
+ count = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+
+ for (i = 0; i < count; i++) {
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ buf->type = buftype;
+ buf->size = dpb_size;
+ buf->attrs = DMA_ATTR_WRITE_COMBINE |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
+ buf->attrs);
+ if (!buf->va) {
+ kfree(buf);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_add_tail(&buf->list, &inst->dpbbufs);
+ }
+
+ return 0;
+
+fail:
+ venus_helper_free_dpb_bufs(inst);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_alloc_dpb_bufs);
+
+static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev;
+ struct hfi_buffer_requirements bufreq;
+ struct hfi_buffer_desc bd;
+ struct intbuf *buf;
+ unsigned int i;
+ int ret;
+
+ ret = venus_helper_get_bufreq(inst, type, &bufreq);
+ if (ret)
+ return 0;
+
+ if (!bufreq.size)
+ return 0;
+
+ for (i = 0; i < bufreq.count_actual; i++) {
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ buf->type = bufreq.type;
+ buf->size = bufreq.size;
+ buf->attrs = DMA_ATTR_WRITE_COMBINE |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
+ buf->attrs);
+ if (!buf->va) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memset(&bd, 0, sizeof(bd));
+ bd.buffer_size = buf->size;
+ bd.buffer_type = buf->type;
+ bd.num_buffers = 1;
+ bd.device_addr = buf->da;
+
+ ret = hfi_session_set_buffers(inst, &bd);
+ if (ret) {
+ dev_err(dev, "set session buffers failed\n");
+ goto dma_free;
+ }
+
+ list_add_tail(&buf->list, &inst->internalbufs);
+ }
+
+ return 0;
+
+dma_free:
+ dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs);
+fail:
+ kfree(buf);
+ return ret;
+}
+
+static int intbufs_unset_buffers(struct venus_inst *inst)
+{
+ struct hfi_buffer_desc bd = {0};
+ struct intbuf *buf, *n;
+ int ret = 0;
+
+ list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
+ bd.buffer_size = buf->size;
+ bd.buffer_type = buf->type;
+ bd.num_buffers = 1;
+ bd.device_addr = buf->da;
+ bd.response_required = true;
+
+ ret = hfi_session_unset_buffers(inst, &bd);
+
+ list_del_init(&buf->list);
+ dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
+ buf->attrs);
+ kfree(buf);
+ }
+
+ return ret;
+}
+
+static const unsigned int intbuf_types_1xx[] = {
+ HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_1XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_1XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_1XX),
+ HFI_BUFFER_INTERNAL_PERSIST,
+ HFI_BUFFER_INTERNAL_PERSIST_1,
+};
+
+static const unsigned int intbuf_types_4xx[] = {
+ HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_4XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_4XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_4XX),
+ HFI_BUFFER_INTERNAL_PERSIST,
+ HFI_BUFFER_INTERNAL_PERSIST_1,
+};
+
+static int intbufs_alloc(struct venus_inst *inst)
+{
+ const unsigned int *intbuf;
+ size_t arr_sz, i;
+ int ret;
+
+ if (IS_V4(inst->core)) {
+ arr_sz = ARRAY_SIZE(intbuf_types_4xx);
+ intbuf = intbuf_types_4xx;
+ } else {
+ arr_sz = ARRAY_SIZE(intbuf_types_1xx);
+ intbuf = intbuf_types_1xx;
+ }
+
+ for (i = 0; i < arr_sz; i++) {
+ ret = intbufs_set_buffer(inst, intbuf[i]);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ intbufs_unset_buffers(inst);
+ return ret;
+}
+
+static int intbufs_free(struct venus_inst *inst)
+{
+ return intbufs_unset_buffers(inst);
+}
+
+static u32 load_per_instance(struct venus_inst *inst)
+{
+ u32 mbs;
+
+ if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
+ return 0;
+
+ mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
+
+ return mbs * inst->fps;
+}
+
+static u32 load_per_type(struct venus_core *core, u32 session_type)
+{
+ struct venus_inst *inst = NULL;
+ u32 mbs_per_sec = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->session_type != session_type)
+ continue;
+
+ mbs_per_sec += load_per_instance(inst);
+ }
+ mutex_unlock(&core->lock);
+
+ return mbs_per_sec;
+}
+
+static int load_scale_clocks(struct venus_core *core)
+{
+ const struct freq_tbl *table = core->res->freq_tbl;
+ unsigned int num_rows = core->res->freq_tbl_size;
+ unsigned long freq = table[0].freq;
+ struct clk *clk = core->clks[0];
+ struct device *dev = core->dev;
+ u32 mbs_per_sec;
+ unsigned int i;
+ int ret;
+
+ mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
+ load_per_type(core, VIDC_SESSION_TYPE_DEC);
+
+ if (mbs_per_sec > core->res->max_load)
+ dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
+ mbs_per_sec, core->res->max_load);
+
+ if (!mbs_per_sec && num_rows > 1) {
+ freq = table[num_rows - 1].freq;
+ goto set_freq;
+ }
+
+ for (i = 0; i < num_rows; i++) {
+ if (mbs_per_sec > table[i].load)
+ break;
+ freq = table[i].freq;
+ }
+
+set_freq:
+
+ ret = clk_set_rate(clk, freq);
+ if (ret)
+ goto err;
+
+ ret = clk_set_rate(core->core0_clk, freq);
+ if (ret)
+ goto err;
+
+ ret = clk_set_rate(core->core1_clk, freq);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
+ return ret;
+}
+
+static void fill_buffer_desc(const struct venus_buffer *buf,
+ struct hfi_buffer_desc *bd, bool response)
+{
+ memset(bd, 0, sizeof(*bd));
+ bd->buffer_type = HFI_BUFFER_OUTPUT;
+ bd->buffer_size = buf->size;
+ bd->num_buffers = 1;
+ bd->device_addr = buf->dma_addr;
+ bd->response_required = response;
+}
+
+static void return_buf_error(struct venus_inst *inst,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+
+ if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
+ else
+ v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf);
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+}
+
+static int
+session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ unsigned int type = vb->type;
+ struct hfi_frame_data fdata;
+ int ret;
+
+ memset(&fdata, 0, sizeof(fdata));
+ fdata.alloc_len = buf->size;
+ fdata.device_addr = buf->dma_addr;
+ fdata.timestamp = vb->timestamp;
+ do_div(fdata.timestamp, NSEC_PER_USEC);
+ fdata.flags = 0;
+ fdata.clnt_data = vbuf->vb2_buf.index;
+
+ if (!fdata.timestamp)
+ fdata.flags |= HFI_BUFFERFLAG_TIMESTAMPINVALID;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fdata.buffer_type = HFI_BUFFER_INPUT;
+ fdata.filled_len = vb2_get_plane_payload(vb, 0);
+ fdata.offset = vb->planes[0].data_offset;
+
+ if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
+ fdata.flags |= HFI_BUFFERFLAG_EOS;
+ } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ fdata.buffer_type = HFI_BUFFER_OUTPUT;
+ else
+ fdata.buffer_type = inst->opb_buftype;
+ fdata.filled_len = 0;
+ fdata.offset = 0;
+ }
+
+ ret = hfi_session_process_buf(inst, &fdata);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool is_dynamic_bufmode(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct venus_caps *caps;
+
+ caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
+ if (!caps)
+ return 0;
+
+ return caps->cap_bufs_mode_dynamic;
+}
+
+static int session_unregister_bufs(struct venus_inst *inst)
+{
+ struct venus_buffer *buf, *n;
+ struct hfi_buffer_desc bd;
+ int ret = 0;
+
+ if (is_dynamic_bufmode(inst))
+ return 0;
+
+ list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
+ fill_buffer_desc(buf, &bd, true);
+ ret = hfi_session_unset_buffers(inst, &bd);
+ list_del_init(&buf->reg_list);
+ }
+
+ return ret;
+}
+
+static int session_register_bufs(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev;
+ struct hfi_buffer_desc bd;
+ struct venus_buffer *buf;
+ int ret = 0;
+
+ if (is_dynamic_bufmode(inst))
+ return 0;
+
+ list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
+ fill_buffer_desc(buf, &bd, false);
+ ret = hfi_session_set_buffers(inst, &bd);
+ if (ret) {
+ dev_err(dev, "%s: set buffer failed\n", __func__);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
+{
+ switch (v4l2_fmt) {
+ case V4L2_PIX_FMT_NV12:
+ return HFI_COLOR_FORMAT_NV12;
+ case V4L2_PIX_FMT_NV21:
+ return HFI_COLOR_FORMAT_NV21;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
+ struct hfi_buffer_requirements *req)
+{
+ u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
+ union hfi_get_property hprop;
+ unsigned int i;
+ int ret;
+
+ if (req)
+ memset(req, 0, sizeof(*req));
+
+ ret = hfi_session_get_property(inst, ptype, &hprop);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) {
+ if (hprop.bufreq[i].type != type)
+ continue;
+
+ if (req)
+ memcpy(req, &hprop.bufreq[i], sizeof(*req));
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
+
+static u32 get_framesize_raw_nv12(u32 width, u32 height)
+{
+ u32 y_stride, uv_stride, y_plane;
+ u32 y_sclines, uv_sclines, uv_plane;
+ u32 size;
+
+ y_stride = ALIGN(width, 128);
+ uv_stride = ALIGN(width, 128);
+ y_sclines = ALIGN(height, 32);
+ uv_sclines = ALIGN(((height + 1) >> 1), 16);
+
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + SZ_4K;
+ size = y_plane + uv_plane + SZ_8K;
+
+ return ALIGN(size, SZ_4K);
+}
+
+static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height)
+{
+ u32 y_meta_stride, y_meta_plane;
+ u32 y_stride, y_plane;
+ u32 uv_meta_stride, uv_meta_plane;
+ u32 uv_stride, uv_plane;
+ u32 extradata = SZ_16K;
+
+ y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
+ y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(height, 8), 16);
+ y_meta_plane = ALIGN(y_meta_plane, SZ_4K);
+
+ y_stride = ALIGN(width, 128);
+ y_plane = ALIGN(y_stride * ALIGN(height, 32), SZ_4K);
+
+ uv_meta_stride = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
+ uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(height / 2, 8), 16);
+ uv_meta_plane = ALIGN(uv_meta_plane, SZ_4K);
+
+ uv_stride = ALIGN(width, 128);
+ uv_plane = ALIGN(uv_stride * ALIGN(height / 2, 32), SZ_4K);
+
+ return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane +
+ max(extradata, y_stride * 48), SZ_4K);
+}
+
+u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
+{
+ switch (hfi_fmt) {
+ case HFI_COLOR_FORMAT_NV12:
+ case HFI_COLOR_FORMAT_NV21:
+ return get_framesize_raw_nv12(width, height);
+ case HFI_COLOR_FORMAT_NV12_UBWC:
+ return get_framesize_raw_nv12_ubwc(width, height);
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_framesz_raw);
+
+u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height)
+{
+ u32 hfi_fmt, sz;
+ bool compressed;
+
+ switch (v4l2_fmt) {
+ case V4L2_PIX_FMT_MPEG:
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_H264_NO_SC:
+ case V4L2_PIX_FMT_H264_MVC:
+ case V4L2_PIX_FMT_H263:
+ case V4L2_PIX_FMT_MPEG1:
+ case V4L2_PIX_FMT_MPEG2:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_XVID:
+ case V4L2_PIX_FMT_VC1_ANNEX_G:
+ case V4L2_PIX_FMT_VC1_ANNEX_L:
+ case V4L2_PIX_FMT_VP8:
+ case V4L2_PIX_FMT_VP9:
+ case V4L2_PIX_FMT_HEVC:
+ compressed = true;
+ break;
+ default:
+ compressed = false;
+ break;
+ }
+
+ if (compressed) {
+ sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
+ return ALIGN(sz, SZ_4K);
+ }
+
+ hfi_fmt = to_hfi_raw_fmt(v4l2_fmt);
+ if (!hfi_fmt)
+ return 0;
+
+ return venus_helper_get_framesz_raw(hfi_fmt, width, height);
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_framesz);
+
+int venus_helper_set_input_resolution(struct venus_inst *inst,
+ unsigned int width, unsigned int height)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
+ struct hfi_framesize fs;
+
+ fs.buffer_type = HFI_BUFFER_INPUT;
+ fs.width = width;
+ fs.height = height;
+
+ return hfi_session_set_property(inst, ptype, &fs);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
+
+int venus_helper_set_output_resolution(struct venus_inst *inst,
+ unsigned int width, unsigned int height,
+ u32 buftype)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
+ struct hfi_framesize fs;
+
+ fs.buffer_type = buftype;
+ fs.width = width;
+ fs.height = height;
+
+ return hfi_session_set_property(inst, ptype, &fs);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
+
+int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_WORK_MODE;
+ struct hfi_video_work_mode wm;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ wm.video_work_mode = mode;
+
+ return hfi_session_set_property(inst, ptype, &wm);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
+
+int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
+{
+ const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
+ struct hfi_videocores_usage_type cu;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ cu.video_core_enable_mask = usage;
+
+ return hfi_session_set_property(inst, ptype, &cu);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
+
+int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
+ unsigned int output_bufs,
+ unsigned int output2_bufs)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
+ struct hfi_buffer_count_actual buf_count;
+ int ret;
+
+ buf_count.type = HFI_BUFFER_INPUT;
+ buf_count.count_actual = input_bufs;
+
+ ret = hfi_session_set_property(inst, ptype, &buf_count);
+ if (ret)
+ return ret;
+
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = output_bufs;
+
+ ret = hfi_session_set_property(inst, ptype, &buf_count);
+ if (ret)
+ return ret;
+
+ if (output2_bufs) {
+ buf_count.type = HFI_BUFFER_OUTPUT2;
+ buf_count.count_actual = output2_bufs;
+
+ ret = hfi_session_set_property(inst, ptype, &buf_count);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
+
+int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
+ u32 buftype)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
+ struct hfi_uncompressed_format_select fmt;
+
+ fmt.buffer_type = buftype;
+ fmt.format = hfi_format;
+
+ return hfi_session_set_property(inst, ptype, &fmt);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_raw_format);
+
+int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
+{
+ u32 hfi_format, buftype;
+
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC)
+ buftype = HFI_BUFFER_OUTPUT;
+ else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ buftype = HFI_BUFFER_INPUT;
+ else
+ return -EINVAL;
+
+ hfi_format = to_hfi_raw_fmt(pixfmt);
+ if (!hfi_format)
+ return -EINVAL;
+
+ return venus_helper_set_raw_format(inst, hfi_format, buftype);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
+
+int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
+ bool out2_en)
+{
+ struct hfi_multi_stream multi = {0};
+ u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+ int ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT;
+ multi.enable = out_en;
+
+ ret = hfi_session_set_property(inst, ptype, &multi);
+ if (ret)
+ return ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT2;
+ multi.enable = out2_en;
+
+ return hfi_session_set_property(inst, ptype, &multi);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_multistream);
+
+int venus_helper_set_dyn_bufmode(struct venus_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
+ struct hfi_buffer_alloc_mode mode;
+ int ret;
+
+ if (!is_dynamic_bufmode(inst))
+ return 0;
+
+ mode.type = HFI_BUFFER_OUTPUT;
+ mode.mode = HFI_BUFFER_MODE_DYNAMIC;
+
+ ret = hfi_session_set_property(inst, ptype, &mode);
+ if (ret)
+ return ret;
+
+ mode.type = HFI_BUFFER_OUTPUT2;
+
+ return hfi_session_set_property(inst, ptype, &mode);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_dyn_bufmode);
+
+int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
+ struct hfi_buffer_size_actual bufsz;
+
+ bufsz.type = buftype;
+ bufsz.size = bufsize;
+
+ return hfi_session_set_property(inst, ptype, &bufsz);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_bufsize);
+
+unsigned int venus_helper_get_opb_size(struct venus_inst *inst)
+{
+ /* the encoder has only one output */
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ return inst->output_buf_size;
+
+ if (inst->opb_buftype == HFI_BUFFER_OUTPUT)
+ return inst->output_buf_size;
+ else if (inst->opb_buftype == HFI_BUFFER_OUTPUT2)
+ return inst->output2_buf_size;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_opb_size);
+
+static void delayed_process_buf_func(struct work_struct *work)
+{
+ struct venus_buffer *buf, *n;
+ struct venus_inst *inst;
+ int ret;
+
+ inst = container_of(work, struct venus_inst, delayed_process_work);
+
+ mutex_lock(&inst->lock);
+
+ if (!(inst->streamon_out & inst->streamon_cap))
+ goto unlock;
+
+ list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) {
+ if (buf->flags & HFI_BUFFERFLAG_READONLY)
+ continue;
+
+ ret = session_process_buf(inst, &buf->vb);
+ if (ret)
+ return_buf_error(inst, &buf->vb);
+
+ list_del_init(&buf->ref_list);
+ }
+unlock:
+ mutex_unlock(&inst->lock);
+}
+
+void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx)
+{
+ struct venus_buffer *buf;
+
+ list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
+ if (buf->vb.vb2_buf.index == idx) {
+ buf->flags &= ~HFI_BUFFERFLAG_READONLY;
+ schedule_work(&inst->delayed_process_work);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref);
+
+void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf)
+{
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+
+ buf->flags |= HFI_BUFFERFLAG_READONLY;
+}
+EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref);
+
+static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+
+ if (buf->flags & HFI_BUFFERFLAG_READONLY) {
+ list_add_tail(&buf->ref_list, &inst->delayed_process);
+ schedule_work(&inst->delayed_process_work);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct vb2_v4l2_buffer *
+venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
+ else
+ return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
+}
+EXPORT_SYMBOL_GPL(venus_helper_find_buf);
+
+int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+ struct sg_table *sgt;
+
+ sgt = vb2_dma_sg_plane_desc(vb, 0);
+ if (!sgt)
+ return -EFAULT;
+
+ buf->size = vb2_plane_size(vb, 0);
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ list_add_tail(&buf->reg_list, &inst->registeredbufs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
+
+int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int out_buf_size = venus_helper_get_opb_size(inst);
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vb2_plane_size(vb, 0) < out_buf_size)
+ return -EINVAL;
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ vb2_plane_size(vb, 0) < inst->input_buf_size)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare);
+
+void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+
+ if (!(inst->streamon_out & inst->streamon_cap))
+ goto unlock;
+
+ ret = is_buf_refed(inst, vbuf);
+ if (ret)
+ goto unlock;
+
+ ret = session_process_buf(inst, vbuf);
+ if (ret)
+ return_buf_error(inst, vbuf);
+
+unlock:
+ mutex_unlock(&inst->lock);
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
+
+void venus_helper_buffers_done(struct venus_inst *inst,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *buf;
+
+ while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+ while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+}
+EXPORT_SYMBOL_GPL(venus_helper_buffers_done);
+
+void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ struct venus_core *core = inst->core;
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ if (inst->streamon_out & inst->streamon_cap) {
+ ret = hfi_session_stop(inst);
+ ret |= hfi_session_unload_res(inst);
+ ret |= session_unregister_bufs(inst);
+ ret |= intbufs_free(inst);
+ ret |= hfi_session_deinit(inst);
+
+ if (inst->session_error || core->sys_error)
+ ret = -EIO;
+
+ if (ret)
+ hfi_session_abort(inst);
+
+ venus_helper_free_dpb_bufs(inst);
+
+ load_scale_clocks(core);
+ INIT_LIST_HEAD(&inst->registeredbufs);
+ }
+
+ venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 0;
+ else
+ inst->streamon_cap = 0;
+
+ mutex_unlock(&inst->lock);
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
+
+int venus_helper_vb2_start_streaming(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ int ret;
+
+ ret = intbufs_alloc(inst);
+ if (ret)
+ return ret;
+
+ ret = session_register_bufs(inst);
+ if (ret)
+ goto err_bufs_free;
+
+ load_scale_clocks(core);
+
+ ret = hfi_session_load_res(inst);
+ if (ret)
+ goto err_unreg_bufs;
+
+ ret = hfi_session_start(inst);
+ if (ret)
+ goto err_unload_res;
+
+ ret = venus_helper_queue_dpb_bufs(inst);
+ if (ret)
+ goto err_session_stop;
+
+ return 0;
+
+err_session_stop:
+ hfi_session_stop(inst);
+err_unload_res:
+ hfi_session_unload_res(inst);
+err_unreg_bufs:
+ session_unregister_bufs(inst);
+err_bufs_free:
+ intbufs_free(inst);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming);
+
+void venus_helper_m2m_device_run(void *priv)
+{
+ struct venus_inst *inst = priv;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buf, *n;
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
+ ret = session_process_buf(inst, &buf->vb);
+ if (ret)
+ return_buf_error(inst, &buf->vb);
+ }
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
+ ret = session_process_buf(inst, &buf->vb);
+ if (ret)
+ return_buf_error(inst, &buf->vb);
+ }
+
+ mutex_unlock(&inst->lock);
+}
+EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run);
+
+void venus_helper_m2m_job_abort(void *priv)
+{
+ struct venus_inst *inst = priv;
+
+ v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort);
+
+void venus_helper_init_instance(struct venus_inst *inst)
+{
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
+ INIT_LIST_HEAD(&inst->delayed_process);
+ INIT_WORK(&inst->delayed_process_work,
+ delayed_process_buf_func);
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_init_instance);
+
+static bool find_fmt_from_caps(struct venus_caps *caps, u32 buftype, u32 fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < caps->num_fmts; i++) {
+ if (caps->fmts[i].buftype == buftype &&
+ caps->fmts[i].fmt == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
+ u32 *out_fmt, u32 *out2_fmt, bool ubwc)
+{
+ struct venus_core *core = inst->core;
+ struct venus_caps *caps;
+ u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt);
+ bool found, found_ubwc;
+
+ *out_fmt = *out2_fmt = 0;
+
+ if (!fmt)
+ return -EINVAL;
+
+ caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
+ if (!caps)
+ return -EINVAL;
+
+ if (ubwc) {
+ ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
+ found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
+ ubwc_fmt);
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
+
+ if (found_ubwc && found) {
+ *out_fmt = ubwc_fmt;
+ *out2_fmt = fmt;
+ return 0;
+ }
+ }
+
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
+ if (found) {
+ *out_fmt = fmt;
+ *out2_fmt = 0;
+ return 0;
+ }
+
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
+ if (found) {
+ *out_fmt = 0;
+ *out2_fmt = fmt;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
+
+int venus_helper_power_enable(struct venus_core *core, u32 session_type,
+ bool enable)
+{
+ void __iomem *ctrl, *stat;
+ u32 val;
+ int ret;
+
+ if (!IS_V3(core) && !IS_V4(core))
+ return 0;
+
+ if (IS_V3(core)) {
+ if (session_type == VIDC_SESSION_TYPE_DEC)
+ ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
+ else
+ ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
+ if (enable)
+ writel(0, ctrl);
+ else
+ writel(1, ctrl);
+
+ return 0;
+ }
+
+ if (session_type == VIDC_SESSION_TYPE_DEC) {
+ ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
+ stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
+ } else {
+ ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
+ stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
+ }
+
+ if (enable) {
+ writel(0, ctrl);
+
+ ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
+ if (ret)
+ return ret;
+ } else {
+ writel(1, ctrl);
+
+ ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_power_enable);
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
new file mode 100644
index 000000000..2475f284f
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HELPERS_H__
+#define __VENUS_HELPERS_H__
+
+#include <media/videobuf2-v4l2.h>
+
+struct venus_inst;
+
+bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt);
+struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst,
+ unsigned int type, u32 idx);
+void venus_helper_buffers_done(struct venus_inst *inst,
+ enum vb2_buffer_state state);
+int venus_helper_vb2_buf_init(struct vb2_buffer *vb);
+int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb);
+void venus_helper_vb2_buf_queue(struct vb2_buffer *vb);
+void venus_helper_vb2_stop_streaming(struct vb2_queue *q);
+int venus_helper_vb2_start_streaming(struct venus_inst *inst);
+void venus_helper_m2m_device_run(void *priv);
+void venus_helper_m2m_job_abort(void *priv);
+int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
+ struct hfi_buffer_requirements *req);
+u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height);
+u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height);
+int venus_helper_set_input_resolution(struct venus_inst *inst,
+ unsigned int width, unsigned int height);
+int venus_helper_set_output_resolution(struct venus_inst *inst,
+ unsigned int width, unsigned int height,
+ u32 buftype);
+int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
+int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage);
+int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
+ unsigned int output_bufs,
+ unsigned int output2_bufs);
+int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
+ u32 buftype);
+int venus_helper_set_color_format(struct venus_inst *inst, u32 fmt);
+int venus_helper_set_dyn_bufmode(struct venus_inst *inst);
+int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype);
+int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
+ bool out2_en);
+unsigned int venus_helper_get_opb_size(struct venus_inst *inst);
+void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf);
+void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx);
+void venus_helper_init_instance(struct venus_inst *inst);
+int venus_helper_get_out_fmts(struct venus_inst *inst, u32 fmt, u32 *out_fmt,
+ u32 *out2_fmt, bool ubwc);
+int venus_helper_alloc_dpb_bufs(struct venus_inst *inst);
+int venus_helper_free_dpb_bufs(struct venus_inst *inst);
+int venus_helper_power_enable(struct venus_core *core, u32 session_type,
+ bool enable);
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
new file mode 100644
index 000000000..8a99e2d82
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include "core.h"
+#include "hfi.h"
+#include "hfi_cmds.h"
+#include "hfi_venus.h"
+
+#define TIMEOUT msecs_to_jiffies(1000)
+
+static u32 to_codec_type(u32 pixfmt)
+{
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_H264_NO_SC:
+ return HFI_VIDEO_CODEC_H264;
+ case V4L2_PIX_FMT_H263:
+ return HFI_VIDEO_CODEC_H263;
+ case V4L2_PIX_FMT_MPEG1:
+ return HFI_VIDEO_CODEC_MPEG1;
+ case V4L2_PIX_FMT_MPEG2:
+ return HFI_VIDEO_CODEC_MPEG2;
+ case V4L2_PIX_FMT_MPEG4:
+ return HFI_VIDEO_CODEC_MPEG4;
+ case V4L2_PIX_FMT_VC1_ANNEX_G:
+ case V4L2_PIX_FMT_VC1_ANNEX_L:
+ return HFI_VIDEO_CODEC_VC1;
+ case V4L2_PIX_FMT_VP8:
+ return HFI_VIDEO_CODEC_VP8;
+ case V4L2_PIX_FMT_VP9:
+ return HFI_VIDEO_CODEC_VP9;
+ case V4L2_PIX_FMT_XVID:
+ return HFI_VIDEO_CODEC_DIVX;
+ case V4L2_PIX_FMT_HEVC:
+ return HFI_VIDEO_CODEC_HEVC;
+ default:
+ return 0;
+ }
+}
+
+int hfi_core_init(struct venus_core *core)
+{
+ int ret = 0;
+
+ mutex_lock(&core->lock);
+
+ if (core->state >= CORE_INIT)
+ goto unlock;
+
+ reinit_completion(&core->done);
+
+ ret = core->ops->core_init(core);
+ if (ret)
+ goto unlock;
+
+ ret = wait_for_completion_timeout(&core->done, TIMEOUT);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ ret = 0;
+
+ if (core->error != HFI_ERR_NONE) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ core->state = CORE_INIT;
+unlock:
+ mutex_unlock(&core->lock);
+ return ret;
+}
+
+int hfi_core_deinit(struct venus_core *core, bool blocking)
+{
+ int ret = 0, empty;
+
+ mutex_lock(&core->lock);
+
+ if (core->state == CORE_UNINIT)
+ goto unlock;
+
+ empty = list_empty(&core->instances);
+
+ if (!empty && !blocking) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (!empty) {
+ mutex_unlock(&core->lock);
+ wait_var_event(&core->insts_count,
+ !atomic_read(&core->insts_count));
+ mutex_lock(&core->lock);
+ }
+
+ if (!core->ops)
+ goto unlock;
+
+ ret = core->ops->core_deinit(core);
+
+ if (!ret)
+ core->state = CORE_UNINIT;
+
+unlock:
+ mutex_unlock(&core->lock);
+ return ret;
+}
+
+int hfi_core_suspend(struct venus_core *core)
+{
+ if (core->state != CORE_INIT)
+ return 0;
+
+ return core->ops->suspend(core);
+}
+
+int hfi_core_resume(struct venus_core *core, bool force)
+{
+ if (!force && core->state != CORE_INIT)
+ return 0;
+
+ return core->ops->resume(core);
+}
+
+int hfi_core_trigger_ssr(struct venus_core *core, u32 type)
+{
+ return core->ops->core_trigger_ssr(core, type);
+}
+
+int hfi_core_ping(struct venus_core *core)
+{
+ int ret;
+
+ mutex_lock(&core->lock);
+
+ ret = core->ops->core_ping(core, 0xbeef);
+ if (ret)
+ goto unlock;
+
+ ret = wait_for_completion_timeout(&core->done, TIMEOUT);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+ ret = 0;
+ if (core->error != HFI_ERR_NONE)
+ ret = -ENODEV;
+unlock:
+ mutex_unlock(&core->lock);
+ return ret;
+}
+
+static int wait_session_msg(struct venus_inst *inst)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&inst->done, TIMEOUT);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (inst->error != HFI_ERR_NONE)
+ return -EIO;
+
+ return 0;
+}
+
+int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
+{
+ struct venus_core *core = inst->core;
+
+ if (!ops)
+ return -EINVAL;
+
+ inst->state = INST_UNINIT;
+ init_completion(&inst->done);
+ inst->ops = ops;
+
+ mutex_lock(&core->lock);
+ list_add_tail(&inst->list, &core->instances);
+ atomic_inc(&core->insts_count);
+ mutex_unlock(&core->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_create);
+
+int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
+{
+ struct venus_core *core = inst->core;
+ const struct hfi_ops *ops = core->ops;
+ int ret;
+
+ inst->hfi_codec = to_codec_type(pixfmt);
+ reinit_completion(&inst->done);
+
+ ret = ops->session_init(inst, inst->session_type, inst->hfi_codec);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_INIT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_init);
+
+void hfi_session_destroy(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+
+ mutex_lock(&core->lock);
+ list_del_init(&inst->list);
+ if (atomic_dec_and_test(&core->insts_count))
+ wake_up_var(&core->insts_count);
+ mutex_unlock(&core->lock);
+}
+EXPORT_SYMBOL_GPL(hfi_session_destroy);
+
+int hfi_session_deinit(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state == INST_UNINIT)
+ return 0;
+
+ if (inst->state < INST_INIT)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_end(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_UNINIT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_deinit);
+
+int hfi_session_start(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state != INST_LOAD_RESOURCES)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_start(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_START;
+
+ return 0;
+}
+
+int hfi_session_stop(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state != INST_START)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_stop(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_STOP;
+
+ return 0;
+}
+
+int hfi_session_continue(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+
+ if (core->res->hfi_version == HFI_VERSION_1XX)
+ return 0;
+
+ return core->ops->session_continue(inst);
+}
+EXPORT_SYMBOL_GPL(hfi_session_continue);
+
+int hfi_session_abort(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_abort(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int hfi_session_load_res(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state != INST_INIT)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_load_res(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_LOAD_RESOURCES;
+
+ return 0;
+}
+
+int hfi_session_unload_res(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state != INST_STOP)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_release_res(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_RELEASE_RESOURCES;
+
+ return 0;
+}
+
+int hfi_session_flush(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_flush(inst, HFI_FLUSH_ALL);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_flush);
+
+int hfi_session_set_buffers(struct venus_inst *inst, struct hfi_buffer_desc *bd)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+
+ return ops->session_set_buffers(inst, bd);
+}
+
+int hfi_session_unset_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_unset_buffers(inst, bd);
+ if (ret)
+ return ret;
+
+ if (!bd->response_required)
+ return 0;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int hfi_session_get_property(struct venus_inst *inst, u32 ptype,
+ union hfi_get_property *hprop)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state < INST_INIT || inst->state >= INST_STOP)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_get_property(inst, ptype);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ *hprop = inst->hprop;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_get_property);
+
+int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+
+ if (inst->state < INST_INIT || inst->state >= INST_STOP)
+ return -EINVAL;
+
+ return ops->session_set_property(inst, ptype, pdata);
+}
+EXPORT_SYMBOL_GPL(hfi_session_set_property);
+
+int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+
+ if (fd->buffer_type == HFI_BUFFER_INPUT)
+ return ops->session_etb(inst, fd);
+ else if (fd->buffer_type == HFI_BUFFER_OUTPUT ||
+ fd->buffer_type == HFI_BUFFER_OUTPUT2)
+ return ops->session_ftb(inst, fd);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(hfi_session_process_buf);
+
+irqreturn_t hfi_isr_thread(int irq, void *dev_id)
+{
+ struct venus_core *core = dev_id;
+
+ return core->ops->isr_thread(core);
+}
+
+irqreturn_t hfi_isr(int irq, void *dev)
+{
+ struct venus_core *core = dev;
+
+ return core->ops->isr(core);
+}
+
+int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops)
+{
+ int ret;
+
+ if (!ops)
+ return -EINVAL;
+
+ atomic_set(&core->insts_count, 0);
+ core->core_ops = ops;
+ core->state = CORE_UNINIT;
+ init_completion(&core->done);
+ pkt_set_version(core->res->hfi_version);
+ ret = venus_hfi_create(core);
+
+ return ret;
+}
+
+void hfi_destroy(struct venus_core *core)
+{
+ venus_hfi_destroy(core);
+}
diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h
new file mode 100644
index 000000000..6038d8e0a
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __HFI_H__
+#define __HFI_H__
+
+#include <linux/interrupt.h>
+
+#include "hfi_helper.h"
+
+#define VIDC_SESSION_TYPE_VPE 0
+#define VIDC_SESSION_TYPE_ENC 1
+#define VIDC_SESSION_TYPE_DEC 2
+
+#define VIDC_RESOURCE_NONE 0
+#define VIDC_RESOURCE_OCMEM 1
+#define VIDC_RESOURCE_VMEM 2
+
+struct hfi_buffer_desc {
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 num_buffers;
+ u32 device_addr;
+ u32 extradata_addr;
+ u32 extradata_size;
+ u32 response_required;
+};
+
+struct hfi_frame_data {
+ u32 buffer_type;
+ u32 device_addr;
+ u32 extradata_addr;
+ u64 timestamp;
+ u32 flags;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 mark_target;
+ u32 mark_data;
+ u32 clnt_data;
+ u32 extradata_size;
+};
+
+union hfi_get_property {
+ struct hfi_profile_level profile_level;
+ struct hfi_buffer_requirements bufreq[HFI_BUFFER_TYPE_MAX];
+};
+
+/* HFI events */
+#define EVT_SYS_EVENT_CHANGE 1
+#define EVT_SYS_WATCHDOG_TIMEOUT 2
+#define EVT_SYS_ERROR 3
+#define EVT_SESSION_ERROR 4
+
+/* HFI event callback structure */
+struct hfi_event_data {
+ u32 error;
+ u32 height;
+ u32 width;
+ u32 event_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 tag;
+ u32 profile;
+ u32 level;
+ /* the following properties start appear from v4 onwards */
+ u32 bit_depth;
+ u32 pic_struct;
+ u32 colour_space;
+ u32 entropy_mode;
+ u32 buf_count;
+ struct {
+ u32 left, top;
+ u32 width, height;
+ } input_crop;
+};
+
+/* define core states */
+#define CORE_UNINIT 0
+#define CORE_INIT 1
+
+/* define instance states */
+#define INST_UNINIT 2
+#define INST_INIT 3
+#define INST_LOAD_RESOURCES 4
+#define INST_START 5
+#define INST_STOP 6
+#define INST_RELEASE_RESOURCES 7
+
+struct venus_core;
+struct venus_inst;
+
+struct hfi_core_ops {
+ void (*event_notify)(struct venus_core *core, u32 event);
+};
+
+struct hfi_inst_ops {
+ void (*buf_done)(struct venus_inst *inst, unsigned int buf_type,
+ u32 tag, u32 bytesused, u32 data_offset, u32 flags,
+ u32 hfi_flags, u64 timestamp_us);
+ void (*event_notify)(struct venus_inst *inst, u32 event,
+ struct hfi_event_data *data);
+};
+
+struct hfi_ops {
+ int (*core_init)(struct venus_core *core);
+ int (*core_deinit)(struct venus_core *core);
+ int (*core_ping)(struct venus_core *core, u32 cookie);
+ int (*core_trigger_ssr)(struct venus_core *core, u32 trigger_type);
+
+ int (*session_init)(struct venus_inst *inst, u32 session_type,
+ u32 codec);
+ int (*session_end)(struct venus_inst *inst);
+ int (*session_abort)(struct venus_inst *inst);
+ int (*session_flush)(struct venus_inst *inst, u32 flush_mode);
+ int (*session_start)(struct venus_inst *inst);
+ int (*session_stop)(struct venus_inst *inst);
+ int (*session_continue)(struct venus_inst *inst);
+ int (*session_etb)(struct venus_inst *inst, struct hfi_frame_data *fd);
+ int (*session_ftb)(struct venus_inst *inst, struct hfi_frame_data *fd);
+ int (*session_set_buffers)(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd);
+ int (*session_unset_buffers)(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd);
+ int (*session_load_res)(struct venus_inst *inst);
+ int (*session_release_res)(struct venus_inst *inst);
+ int (*session_parse_seq_hdr)(struct venus_inst *inst, u32 seq_hdr,
+ u32 seq_hdr_len);
+ int (*session_get_seq_hdr)(struct venus_inst *inst, u32 seq_hdr,
+ u32 seq_hdr_len);
+ int (*session_set_property)(struct venus_inst *inst, u32 ptype,
+ void *pdata);
+ int (*session_get_property)(struct venus_inst *inst, u32 ptype);
+
+ int (*resume)(struct venus_core *core);
+ int (*suspend)(struct venus_core *core);
+
+ /* interrupt operations */
+ irqreturn_t (*isr)(struct venus_core *core);
+ irqreturn_t (*isr_thread)(struct venus_core *core);
+};
+
+int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops);
+void hfi_destroy(struct venus_core *core);
+
+int hfi_core_init(struct venus_core *core);
+int hfi_core_deinit(struct venus_core *core, bool blocking);
+int hfi_core_suspend(struct venus_core *core);
+int hfi_core_resume(struct venus_core *core, bool force);
+int hfi_core_trigger_ssr(struct venus_core *core, u32 type);
+int hfi_core_ping(struct venus_core *core);
+int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops);
+void hfi_session_destroy(struct venus_inst *inst);
+int hfi_session_init(struct venus_inst *inst, u32 pixfmt);
+int hfi_session_deinit(struct venus_inst *inst);
+int hfi_session_start(struct venus_inst *inst);
+int hfi_session_stop(struct venus_inst *inst);
+int hfi_session_continue(struct venus_inst *inst);
+int hfi_session_abort(struct venus_inst *inst);
+int hfi_session_load_res(struct venus_inst *inst);
+int hfi_session_unload_res(struct venus_inst *inst);
+int hfi_session_flush(struct venus_inst *inst);
+int hfi_session_set_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd);
+int hfi_session_unset_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd);
+int hfi_session_get_property(struct venus_inst *inst, u32 ptype,
+ union hfi_get_property *hprop);
+int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata);
+int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *f);
+irqreturn_t hfi_isr_thread(int irq, void *dev_id);
+irqreturn_t hfi_isr(int irq, void *dev);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
new file mode 100644
index 000000000..e8389d8d8
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -0,0 +1,1250 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/hash.h>
+
+#include "hfi_cmds.h"
+
+static enum hfi_version hfi_ver;
+
+void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_INIT;
+ pkt->arch_type = arch_type;
+}
+
+void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_PC_PREP;
+}
+
+void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable)
+{
+ struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
+
+ pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR;
+ hfi->enable = enable;
+}
+
+void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode,
+ u32 config)
+{
+ struct hfi_debug_config *hfi;
+
+ pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
+ hfi = (struct hfi_debug_config *)&pkt->data[1];
+ hfi->config = config;
+ hfi->mode = mode;
+}
+
+void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode)
+{
+ pkt->hdr.size = sizeof(*pkt) + sizeof(u32);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE;
+ pkt->data[1] = mode;
+}
+
+int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
+ u32 addr, void *cookie)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_RESOURCE;
+ pkt->resource_handle = hash32_ptr(cookie);
+
+ switch (id) {
+ case VIDC_RESOURCE_OCMEM:
+ case VIDC_RESOURCE_VMEM: {
+ struct hfi_resource_ocmem *res =
+ (struct hfi_resource_ocmem *)&pkt->resource_data[0];
+
+ res->size = size;
+ res->mem = addr;
+ pkt->resource_type = HFI_RESOURCE_OCMEM;
+ pkt->hdr.size += sizeof(*res) - sizeof(u32);
+ break;
+ }
+ case VIDC_RESOURCE_NONE:
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id,
+ u32 size, void *cookie)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_RELEASE_RESOURCE;
+ pkt->resource_handle = hash32_ptr(cookie);
+
+ switch (id) {
+ case VIDC_RESOURCE_OCMEM:
+ case VIDC_RESOURCE_VMEM:
+ pkt->resource_type = HFI_RESOURCE_OCMEM;
+ break;
+ case VIDC_RESOURCE_NONE:
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_PING;
+ pkt->client_data = cookie;
+}
+
+void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable)
+{
+ struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
+
+ pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
+ hfi->enable = enable;
+}
+
+int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type)
+{
+ switch (trigger_type) {
+ case HFI_TEST_SSR_SW_ERR_FATAL:
+ case HFI_TEST_SSR_SW_DIV_BY_ZERO:
+ case HFI_TEST_SSR_HW_WDOG_IRQ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_TEST_SSR;
+ pkt->trigger_type = trigger_type;
+
+ return 0;
+}
+
+void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
+}
+
+int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie,
+ u32 session_type, u32 codec)
+{
+ if (!pkt || !cookie || !codec)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->session_domain = session_type;
+ pkt->session_codec = codec;
+
+ return 0;
+}
+
+void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie)
+{
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = pkt_type;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+}
+
+int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt,
+ void *cookie, struct hfi_buffer_desc *bd)
+{
+ unsigned int i;
+
+ if (!cookie || !pkt || !bd)
+ return -EINVAL;
+
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_BUFFERS;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->buffer_size = bd->buffer_size;
+ pkt->min_buffer_size = bd->buffer_size;
+ pkt->num_buffers = bd->num_buffers;
+
+ if (bd->buffer_type == HFI_BUFFER_OUTPUT ||
+ bd->buffer_type == HFI_BUFFER_OUTPUT2) {
+ struct hfi_buffer_info *bi;
+
+ pkt->extradata_size = bd->extradata_size;
+ pkt->shdr.hdr.size = sizeof(*pkt) - sizeof(u32) +
+ (bd->num_buffers * sizeof(*bi));
+ bi = (struct hfi_buffer_info *)pkt->buffer_info;
+ for (i = 0; i < pkt->num_buffers; i++) {
+ bi->buffer_addr = bd->device_addr;
+ bi->extradata_addr = bd->extradata_addr;
+ }
+ } else {
+ pkt->extradata_size = 0;
+ pkt->shdr.hdr.size = sizeof(*pkt) +
+ ((bd->num_buffers - 1) * sizeof(u32));
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->buffer_info[i] = bd->device_addr;
+ }
+
+ pkt->buffer_type = bd->buffer_type;
+
+ return 0;
+}
+
+int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt,
+ void *cookie, struct hfi_buffer_desc *bd)
+{
+ unsigned int i;
+
+ if (!cookie || !pkt || !bd)
+ return -EINVAL;
+
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->buffer_size = bd->buffer_size;
+ pkt->num_buffers = bd->num_buffers;
+
+ if (bd->buffer_type == HFI_BUFFER_OUTPUT ||
+ bd->buffer_type == HFI_BUFFER_OUTPUT2) {
+ struct hfi_buffer_info *bi;
+
+ bi = (struct hfi_buffer_info *)pkt->buffer_info;
+ for (i = 0; i < pkt->num_buffers; i++) {
+ bi->buffer_addr = bd->device_addr;
+ bi->extradata_addr = bd->extradata_addr;
+ }
+ pkt->shdr.hdr.size =
+ sizeof(struct hfi_session_set_buffers_pkt) -
+ sizeof(u32) + (bd->num_buffers * sizeof(*bi));
+ } else {
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->buffer_info[i] = bd->device_addr;
+
+ pkt->extradata_size = 0;
+ pkt->shdr.hdr.size =
+ sizeof(struct hfi_session_set_buffers_pkt) +
+ ((bd->num_buffers - 1) * sizeof(u32));
+ }
+
+ pkt->response_req = bd->response_required;
+ pkt->buffer_type = bd->buffer_type;
+
+ return 0;
+}
+
+int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt,
+ void *cookie, struct hfi_frame_data *in_frame)
+{
+ if (!cookie || !in_frame->device_addr)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp);
+ pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp);
+ pkt->flags = in_frame->flags;
+ pkt->mark_target = in_frame->mark_target;
+ pkt->mark_data = in_frame->mark_data;
+ pkt->offset = in_frame->offset;
+ pkt->alloc_len = in_frame->alloc_len;
+ pkt->filled_len = in_frame->filled_len;
+ pkt->input_tag = in_frame->clnt_data;
+ pkt->packet_buffer = in_frame->device_addr;
+
+ return 0;
+}
+
+int pkt_session_etb_encoder(
+ struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt,
+ void *cookie, struct hfi_frame_data *in_frame)
+{
+ if (!cookie || !in_frame->device_addr)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->view_id = 0;
+ pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp);
+ pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp);
+ pkt->flags = in_frame->flags;
+ pkt->mark_target = in_frame->mark_target;
+ pkt->mark_data = in_frame->mark_data;
+ pkt->offset = in_frame->offset;
+ pkt->alloc_len = in_frame->alloc_len;
+ pkt->filled_len = in_frame->filled_len;
+ pkt->input_tag = in_frame->clnt_data;
+ pkt->packet_buffer = in_frame->device_addr;
+ pkt->extradata_buffer = in_frame->extradata_addr;
+
+ return 0;
+}
+
+int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, void *cookie,
+ struct hfi_frame_data *out_frame)
+{
+ if (!cookie || !out_frame || !out_frame->device_addr)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FILL_BUFFER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+
+ if (out_frame->buffer_type == HFI_BUFFER_OUTPUT)
+ pkt->stream_id = 0;
+ else if (out_frame->buffer_type == HFI_BUFFER_OUTPUT2)
+ pkt->stream_id = 1;
+
+ pkt->output_tag = out_frame->clnt_data;
+ pkt->packet_buffer = out_frame->device_addr;
+ pkt->extradata_buffer = out_frame->extradata_addr;
+ pkt->alloc_len = out_frame->alloc_len;
+ pkt->filled_len = out_frame->filled_len;
+ pkt->offset = out_frame->offset;
+ pkt->data[0] = out_frame->extradata_size;
+
+ return 0;
+}
+
+int pkt_session_parse_seq_header(
+ struct hfi_session_parse_sequence_header_pkt *pkt,
+ void *cookie, u32 seq_hdr, u32 seq_hdr_len)
+{
+ if (!cookie || !seq_hdr || !seq_hdr_len)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->header_len = seq_hdr_len;
+ pkt->packet_buffer = seq_hdr;
+
+ return 0;
+}
+
+int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt,
+ void *cookie, u32 seq_hdr, u32 seq_hdr_len)
+{
+ if (!cookie || !seq_hdr || !seq_hdr_len)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_SEQUENCE_HEADER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->buffer_len = seq_hdr_len;
+ pkt->packet_buffer = seq_hdr;
+
+ return 0;
+}
+
+int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie, u32 type)
+{
+ switch (type) {
+ case HFI_FLUSH_INPUT:
+ case HFI_FLUSH_OUTPUT:
+ case HFI_FLUSH_OUTPUT2:
+ case HFI_FLUSH_ALL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->flush_type = type;
+
+ return 0;
+}
+
+static int pkt_session_get_property_1x(struct hfi_session_get_property_pkt *pkt,
+ void *cookie, u32 ptype)
+{
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+ pkt->data[0] = ptype;
+
+ return 0;
+}
+
+static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ void *prop_data;
+ int ret = 0;
+
+ if (!pkt || !cookie || !pdata)
+ return -EINVAL;
+
+ prop_data = &pkt->data[1];
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+ pkt->data[0] = ptype;
+
+ switch (ptype) {
+ case HFI_PROPERTY_CONFIG_FRAME_RATE: {
+ struct hfi_framerate *in = pdata, *frate = prop_data;
+
+ frate->buffer_type = in->buffer_type;
+ frate->framerate = in->framerate;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*frate);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT: {
+ struct hfi_uncompressed_format_select *in = pdata;
+ struct hfi_uncompressed_format_select *hfi = prop_data;
+
+ hfi->buffer_type = in->buffer_type;
+ hfi->format = in->format;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_FRAME_SIZE: {
+ struct hfi_framesize *in = pdata, *fsize = prop_data;
+
+ fsize->buffer_type = in->buffer_type;
+ fsize->height = in->height;
+ fsize->width = in->width;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fsize);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_REALTIME: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: {
+ struct hfi_buffer_count_actual *in = pdata, *count = prop_data;
+
+ count->count_actual = in->count_actual;
+ count->type = in->type;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL: {
+ struct hfi_buffer_size_actual *in = pdata, *sz = prop_data;
+
+ sz->size = in->size;
+ sz->type = in->type;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*sz);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL: {
+ struct hfi_buffer_display_hold_count_actual *in = pdata;
+ struct hfi_buffer_display_hold_count_actual *count = prop_data;
+
+ count->hold_count = in->hold_count;
+ count->type = in->type;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT: {
+ struct hfi_nal_stream_format_select *in = pdata;
+ struct hfi_nal_stream_format_select *fmt = prop_data;
+
+ fmt->format = in->format;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fmt);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER: {
+ u32 *in = pdata;
+
+ switch (*in) {
+ case HFI_OUTPUT_ORDER_DECODE:
+ case HFI_OUTPUT_ORDER_DISPLAY:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE: {
+ struct hfi_enable_picture *in = pdata, *en = prop_data;
+
+ en->picture_type = in->picture_type;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: {
+ struct hfi_enable *in = pdata;
+ struct hfi_enable *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: {
+ struct hfi_multi_stream *in = pdata, *multi = prop_data;
+
+ multi->buffer_type = in->buffer_type;
+ multi->enable = in->enable;
+ multi->width = in->width;
+ multi->height = in->height;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT: {
+ struct hfi_display_picture_buffer_count *in = pdata;
+ struct hfi_display_picture_buffer_count *count = prop_data;
+
+ count->count = in->count;
+ count->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_DIVX_FORMAT: {
+ u32 *in = pdata;
+
+ switch (*in) {
+ case HFI_DIVX_FORMAT_4:
+ case HFI_DIVX_FORMAT_5:
+ case HFI_DIVX_FORMAT_6:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME:
+ pkt->shdr.hdr.size += sizeof(u32);
+ break;
+ case HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER:
+ break;
+ case HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION:
+ break;
+ case HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE: {
+ struct hfi_bitrate *in = pdata, *brate = prop_data;
+
+ brate->bitrate = in->bitrate;
+ brate->layer_id = in->layer_id;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*brate);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE: {
+ struct hfi_bitrate *in = pdata, *hfi = prop_data;
+
+ hfi->bitrate = in->bitrate;
+ hfi->layer_id = in->layer_id;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: {
+ struct hfi_profile_level *in = pdata, *pl = prop_data;
+
+ pl->level = in->level;
+ pl->profile = in->profile;
+ if (pl->profile <= 0)
+ /* Profile not supported, falling back to high */
+ pl->profile = HFI_H264_PROFILE_HIGH;
+
+ if (!pl->level)
+ /* Level not supported, falling back to 1 */
+ pl->level = 1;
+
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*pl);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL: {
+ struct hfi_h264_entropy_control *in = pdata, *hfi = prop_data;
+
+ hfi->entropy_mode = in->entropy_mode;
+ if (hfi->entropy_mode == HFI_H264_ENTROPY_CABAC)
+ hfi->cabac_model = in->cabac_model;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_RATE_CONTROL: {
+ u32 *in = pdata;
+
+ switch (*in) {
+ case HFI_RATE_CONTROL_OFF:
+ case HFI_RATE_CONTROL_CBR_CFR:
+ case HFI_RATE_CONTROL_CBR_VFR:
+ case HFI_RATE_CONTROL_VBR_CFR:
+ case HFI_RATE_CONTROL_VBR_VFR:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION: {
+ struct hfi_mpeg4_time_resolution *in = pdata, *res = prop_data;
+
+ res->time_increment_resolution = in->time_increment_resolution;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*res);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION: {
+ struct hfi_mpeg4_header_extension *in = pdata, *ext = prop_data;
+
+ ext->header_extension = in->header_extension;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ext);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL: {
+ struct hfi_h264_db_control *in = pdata, *db = prop_data;
+
+ switch (in->mode) {
+ case HFI_H264_DB_MODE_DISABLE:
+ case HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY:
+ case HFI_H264_DB_MODE_ALL_BOUNDARY:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ db->mode = in->mode;
+ db->slice_alpha_offset = in->slice_alpha_offset;
+ db->slice_beta_offset = in->slice_beta_offset;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*db);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_SESSION_QP: {
+ struct hfi_quantization *in = pdata, *quant = prop_data;
+
+ quant->qp_i = in->qp_i;
+ quant->qp_p = in->qp_p;
+ quant->qp_b = in->qp_b;
+ quant->layer_id = in->layer_id;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE: {
+ struct hfi_quantization_range *in = pdata, *range = prop_data;
+ u32 min_qp, max_qp;
+
+ min_qp = in->min_qp;
+ max_qp = in->max_qp;
+
+ /* We'll be packing in the qp, so make sure we
+ * won't be losing data when masking
+ */
+ if (min_qp > 0xff || max_qp > 0xff) {
+ ret = -ERANGE;
+ break;
+ }
+
+ /* When creating the packet, pack the qp value as
+ * 0xiippbb, where ii = qp range for I-frames,
+ * pp = qp range for P-frames, etc.
+ */
+ range->min_qp = min_qp | min_qp << 8 | min_qp << 16;
+ range->max_qp = max_qp | max_qp << 8 | max_qp << 16;
+ range->layer_id = in->layer_id;
+
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG: {
+ struct hfi_vc1e_perf_cfg_type *in = pdata, *perf = prop_data;
+
+ memcpy(perf->search_range_x_subsampled,
+ in->search_range_x_subsampled,
+ sizeof(perf->search_range_x_subsampled));
+ memcpy(perf->search_range_y_subsampled,
+ in->search_range_y_subsampled,
+ sizeof(perf->search_range_y_subsampled));
+
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*perf);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES: {
+ struct hfi_max_num_b_frames *bframes = prop_data;
+ u32 *in = pdata;
+
+ bframes->max_num_b_frames = *in;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*bframes);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD: {
+ struct hfi_intra_period *in = pdata, *intra = prop_data;
+
+ intra->pframes = in->pframes;
+ intra->bframes = in->bframes;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD: {
+ struct hfi_idr_period *in = pdata, *idr = prop_data;
+
+ idr->idr_period = in->idr_period;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*idr);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: {
+ struct hfi_conceal_color *color = prop_data;
+ u32 *in = pdata;
+
+ color->conceal_color = *in;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VPE_OPERATIONS: {
+ struct hfi_operations_type *in = pdata, *ops = prop_data;
+
+ switch (in->rotation) {
+ case HFI_ROTATE_NONE:
+ case HFI_ROTATE_90:
+ case HFI_ROTATE_180:
+ case HFI_ROTATE_270:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ switch (in->flip) {
+ case HFI_FLIP_NONE:
+ case HFI_FLIP_HORIZONTAL:
+ case HFI_FLIP_VERTICAL:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ ops->rotation = in->rotation;
+ ops->flip = in->flip;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ops);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: {
+ struct hfi_intra_refresh *in = pdata, *intra = prop_data;
+
+ switch (in->mode) {
+ case HFI_INTRA_REFRESH_NONE:
+ case HFI_INTRA_REFRESH_ADAPTIVE:
+ case HFI_INTRA_REFRESH_CYCLIC:
+ case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE:
+ case HFI_INTRA_REFRESH_RANDOM:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ intra->mode = in->mode;
+ intra->air_mbs = in->air_mbs;
+ intra->air_ref = in->air_ref;
+ intra->cir_mbs = in->cir_mbs;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL: {
+ struct hfi_multi_slice_control *in = pdata, *multi = prop_data;
+
+ switch (in->multi_slice) {
+ case HFI_MULTI_SLICE_OFF:
+ case HFI_MULTI_SLICE_GOB:
+ case HFI_MULTI_SLICE_BY_MB_COUNT:
+ case HFI_MULTI_SLICE_BY_BYTE_COUNT:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ multi->multi_slice = in->multi_slice;
+ multi->slice_size = in->slice_size;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO: {
+ struct hfi_h264_vui_timing_info *in = pdata, *vui = prop_data;
+
+ vui->enable = in->enable;
+ vui->fixed_framerate = in->fixed_framerate;
+ vui->time_scale = in->time_scale;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*vui);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VPE_DEINTERLACE: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE: {
+ struct hfi_buffer_alloc_mode *in = pdata, *mode = prop_data;
+
+ mode->type = in->type;
+ mode->mode = in->mode;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mode);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD: {
+ struct hfi_scs_threshold *thres = prop_data;
+ u32 *in = pdata;
+
+ thres->threshold_value = *in;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*thres);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT: {
+ struct hfi_mvc_buffer_layout_descp_type *in = pdata;
+ struct hfi_mvc_buffer_layout_descp_type *mvc = prop_data;
+
+ switch (in->layout_type) {
+ case HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM:
+ case HFI_MVC_BUFFER_LAYOUT_SEQ:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mvc->layout_type = in->layout_type;
+ mvc->bright_view_first = in->bright_view_first;
+ mvc->ngap = in->ngap;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mvc);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_LTRMODE: {
+ struct hfi_ltr_mode *in = pdata, *ltr = prop_data;
+
+ switch (in->ltr_mode) {
+ case HFI_LTR_MODE_DISABLE:
+ case HFI_LTR_MODE_MANUAL:
+ case HFI_LTR_MODE_PERIODIC:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ ltr->ltr_mode = in->ltr_mode;
+ ltr->ltr_count = in->ltr_count;
+ ltr->trust_mode = in->trust_mode;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_USELTRFRAME: {
+ struct hfi_ltr_use *in = pdata, *ltr_use = prop_data;
+
+ ltr_use->frames = in->frames;
+ ltr_use->ref_ltr = in->ref_ltr;
+ ltr_use->use_constrnt = in->use_constrnt;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_use);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME: {
+ struct hfi_ltr_mark *in = pdata, *ltr_mark = prop_data;
+
+ ltr_mark->mark_frame = in->mark_frame;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_mark);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER: {
+ u32 *in = pdata;
+
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER: {
+ u32 *in = pdata;
+
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_INITIAL_QP: {
+ struct hfi_initial_quantization *in = pdata, *quant = prop_data;
+
+ quant->init_qp_enable = in->init_qp_enable;
+ quant->qp_i = in->qp_i;
+ quant->qp_p = in->qp_p;
+ quant->qp_b = in->qp_b;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION: {
+ struct hfi_vpe_color_space_conversion *in = pdata;
+ struct hfi_vpe_color_space_conversion *csc = prop_data;
+
+ memcpy(csc->csc_matrix, in->csc_matrix,
+ sizeof(csc->csc_matrix));
+ memcpy(csc->csc_bias, in->csc_bias, sizeof(csc->csc_bias));
+ memcpy(csc->csc_limit, in->csc_limit, sizeof(csc->csc_limit));
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*csc);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_PERF_MODE: {
+ u32 *in = pdata;
+
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER: {
+ u32 *in = pdata;
+
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE: {
+ struct hfi_hybrid_hierp *in = pdata, *hierp = prop_data;
+
+ hierp->layers = in->layers;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp);
+ break;
+ }
+
+ /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ case HFI_PROPERTY_CONFIG_PRIORITY:
+ case HFI_PROPERTY_CONFIG_BATCH_INFO:
+ case HFI_PROPERTY_SYS_IDLE_INDICATOR:
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+ case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED:
+ case HFI_PROPERTY_PARAM_CHROMA_SITE:
+ case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED:
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
+ case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
+ case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
+ case HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT:
+ case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE:
+ case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
+ case HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT:
+ case HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION:
+ case HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB:
+ case HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING:
+ case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO:
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+pkt_session_get_property_3xx(struct hfi_session_get_property_pkt *pkt,
+ void *cookie, u32 ptype)
+{
+ int ret = 0;
+
+ if (!pkt || !cookie)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(struct hfi_session_get_property_pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+
+ switch (ptype) {
+ case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
+ break;
+ default:
+ ret = pkt_session_get_property_1x(pkt, cookie, ptype);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+pkt_session_set_property_3xx(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ void *prop_data;
+ int ret = 0;
+
+ if (!pkt || !cookie || !pdata)
+ return -EINVAL;
+
+ prop_data = &pkt->data[1];
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+ pkt->data[0] = ptype;
+
+ /*
+ * Any session set property which is different in 3XX packetization
+ * should be added as a new case below. All unchanged session set
+ * properties will be handled in the default case.
+ */
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: {
+ struct hfi_multi_stream *in = pdata;
+ struct hfi_multi_stream_3x *multi = prop_data;
+
+ multi->buffer_type = in->buffer_type;
+ multi->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: {
+ struct hfi_intra_refresh *in = pdata;
+ struct hfi_intra_refresh_3x *intra = prop_data;
+
+ switch (in->mode) {
+ case HFI_INTRA_REFRESH_NONE:
+ case HFI_INTRA_REFRESH_ADAPTIVE:
+ case HFI_INTRA_REFRESH_CYCLIC:
+ case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE:
+ case HFI_INTRA_REFRESH_RANDOM:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ intra->mode = in->mode;
+ intra->mbs = in->cir_mbs;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
+ /* for 3xx fw version session_continue is used */
+ break;
+ default:
+ ret = pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ void *prop_data;
+
+ if (!pkt || !cookie || !pdata)
+ return -EINVAL;
+
+ prop_data = &pkt->data[1];
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+ pkt->data[0] = ptype;
+
+ /*
+ * Any session set property which is different in 3XX packetization
+ * should be added as a new case below. All unchanged session set
+ * properties will be handled in the default case.
+ */
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: {
+ struct hfi_buffer_count_actual *in = pdata;
+ struct hfi_buffer_count_actual_4xx *count = prop_data;
+
+ count->count_actual = in->count_actual;
+ count->type = in->type;
+ count->count_min_host = in->count_actual;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_WORK_MODE: {
+ struct hfi_video_work_mode *in = pdata, *wm = prop_data;
+
+ wm->video_work_mode = in->video_work_mode;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE: {
+ struct hfi_videocores_usage_type *in = pdata, *cu = prop_data;
+
+ cu->video_core_enable_mask = in->video_core_enable_mask;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
+ /* not implemented on Venus 4xx */
+ break;
+ default:
+ return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
+ }
+
+ return 0;
+}
+
+int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
+ void *cookie, u32 ptype)
+{
+ if (hfi_ver == HFI_VERSION_1XX)
+ return pkt_session_get_property_1x(pkt, cookie, ptype);
+
+ return pkt_session_get_property_3xx(pkt, cookie, ptype);
+}
+
+int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ if (hfi_ver == HFI_VERSION_1XX)
+ return pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
+
+ if (hfi_ver == HFI_VERSION_3XX)
+ return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
+
+ return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
+}
+
+void pkt_set_version(enum hfi_version version)
+{
+ hfi_ver = version;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h
new file mode 100644
index 000000000..f7617cf59
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_CMDS_H__
+#define __VENUS_HFI_CMDS_H__
+
+#include "hfi.h"
+
+/* commands */
+#define HFI_CMD_SYS_INIT 0x10001
+#define HFI_CMD_SYS_PC_PREP 0x10002
+#define HFI_CMD_SYS_SET_RESOURCE 0x10003
+#define HFI_CMD_SYS_RELEASE_RESOURCE 0x10004
+#define HFI_CMD_SYS_SET_PROPERTY 0x10005
+#define HFI_CMD_SYS_GET_PROPERTY 0x10006
+#define HFI_CMD_SYS_SESSION_INIT 0x10007
+#define HFI_CMD_SYS_SESSION_END 0x10008
+#define HFI_CMD_SYS_SET_BUFFERS 0x10009
+#define HFI_CMD_SYS_TEST_SSR 0x10101
+
+#define HFI_CMD_SESSION_SET_PROPERTY 0x11001
+#define HFI_CMD_SESSION_SET_BUFFERS 0x11002
+#define HFI_CMD_SESSION_GET_SEQUENCE_HEADER 0x11003
+
+#define HFI_CMD_SYS_SESSION_ABORT 0x210001
+#define HFI_CMD_SYS_PING 0x210002
+
+#define HFI_CMD_SESSION_LOAD_RESOURCES 0x211001
+#define HFI_CMD_SESSION_START 0x211002
+#define HFI_CMD_SESSION_STOP 0x211003
+#define HFI_CMD_SESSION_EMPTY_BUFFER 0x211004
+#define HFI_CMD_SESSION_FILL_BUFFER 0x211005
+#define HFI_CMD_SESSION_SUSPEND 0x211006
+#define HFI_CMD_SESSION_RESUME 0x211007
+#define HFI_CMD_SESSION_FLUSH 0x211008
+#define HFI_CMD_SESSION_GET_PROPERTY 0x211009
+#define HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER 0x21100a
+#define HFI_CMD_SESSION_RELEASE_BUFFERS 0x21100b
+#define HFI_CMD_SESSION_RELEASE_RESOURCES 0x21100c
+#define HFI_CMD_SESSION_CONTINUE 0x21100d
+#define HFI_CMD_SESSION_SYNC 0x21100e
+
+/* command packets */
+struct hfi_sys_init_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 arch_type;
+};
+
+struct hfi_sys_pc_prep_pkt {
+ struct hfi_pkt_hdr hdr;
+};
+
+struct hfi_sys_set_resource_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 resource_handle;
+ u32 resource_type;
+ u32 resource_data[1];
+};
+
+struct hfi_sys_release_resource_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 resource_type;
+ u32 resource_handle;
+};
+
+struct hfi_sys_set_property_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_sys_get_property_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_sys_set_buffers_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 num_buffers;
+ u32 buffer_addr[1];
+};
+
+struct hfi_sys_ping_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 client_data;
+};
+
+struct hfi_session_init_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 session_domain;
+ u32 session_codec;
+};
+
+struct hfi_session_end_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_abort_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_set_property_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[0];
+};
+
+struct hfi_session_set_buffers_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 min_buffer_size;
+ u32 num_buffers;
+ u32 buffer_info[1];
+};
+
+struct hfi_session_get_sequence_header_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_len;
+ u32 packet_buffer;
+};
+
+struct hfi_session_load_resources_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_start_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_stop_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_empty_buffer_compressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[1];
+};
+
+struct hfi_session_empty_buffer_uncompressed_plane0_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 view_id;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[1];
+};
+
+struct hfi_session_empty_buffer_uncompressed_plane1_pkt {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 packet_buffer2;
+ u32 data[1];
+};
+
+struct hfi_session_empty_buffer_uncompressed_plane2_pkt {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 packet_buffer3;
+ u32 data[1];
+};
+
+struct hfi_session_fill_buffer_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 stream_id;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 output_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[1];
+};
+
+struct hfi_session_flush_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 flush_type;
+};
+
+struct hfi_session_suspend_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_resume_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_get_property_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_session_release_buffer_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 response_req;
+ u32 num_buffers;
+ u32 buffer_info[1];
+};
+
+struct hfi_session_release_resources_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_parse_sequence_header_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 header_len;
+ u32 packet_buffer;
+};
+
+struct hfi_sfr {
+ u32 buf_size;
+ u8 data[1];
+};
+
+struct hfi_sys_test_ssr_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 trigger_type;
+};
+
+void pkt_set_version(enum hfi_version version);
+
+void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type);
+void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt);
+void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable);
+void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable);
+int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
+ u32 addr, void *cookie);
+int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id,
+ u32 size, void *cookie);
+void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode,
+ u32 config);
+void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode);
+void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie);
+void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt);
+int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type);
+int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie,
+ u32 session_type, u32 codec);
+void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie);
+int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt,
+ void *cookie, struct hfi_buffer_desc *bd);
+int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt,
+ void *cookie, struct hfi_buffer_desc *bd);
+int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt,
+ void *cookie, struct hfi_frame_data *input_frame);
+int pkt_session_etb_encoder(
+ struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt,
+ void *cookie, struct hfi_frame_data *input_frame);
+int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt,
+ void *cookie, struct hfi_frame_data *output_frame);
+int pkt_session_parse_seq_header(
+ struct hfi_session_parse_sequence_header_pkt *pkt,
+ void *cookie, u32 seq_hdr, u32 seq_hdr_len);
+int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt,
+ void *cookie, u32 seq_hdr, u32 seq_hdr_len);
+int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie,
+ u32 flush_mode);
+int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
+ void *cookie, u32 ptype);
+int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
new file mode 100644
index 000000000..15804ad7e
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -0,0 +1,1120 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_HELPER_H__
+#define __VENUS_HFI_HELPER_H__
+
+#define HFI_DOMAIN_BASE_COMMON 0
+
+#define HFI_DOMAIN_BASE_VDEC 0x1000000
+#define HFI_DOMAIN_BASE_VENC 0x2000000
+#define HFI_DOMAIN_BASE_VPE 0x3000000
+
+#define HFI_VIDEO_ARCH_OX 0x1
+
+#define HFI_ARCH_COMMON_OFFSET 0
+#define HFI_ARCH_OX_OFFSET 0x200000
+
+#define HFI_OX_BASE 0x1000000
+
+#define HFI_CMD_START_OFFSET 0x10000
+#define HFI_MSG_START_OFFSET 0x20000
+
+#define HFI_ERR_NONE 0x0
+#define HFI_ERR_SYS_FATAL 0x1
+#define HFI_ERR_SYS_INVALID_PARAMETER 0x2
+#define HFI_ERR_SYS_VERSION_MISMATCH 0x3
+#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES 0x4
+#define HFI_ERR_SYS_MAX_SESSIONS_REACHED 0x5
+#define HFI_ERR_SYS_UNSUPPORTED_CODEC 0x6
+#define HFI_ERR_SYS_SESSION_IN_USE 0x7
+#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE 0x8
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN 0x9
+
+#define HFI_ERR_SESSION_FATAL 0x1001
+#define HFI_ERR_SESSION_INVALID_PARAMETER 0x1002
+#define HFI_ERR_SESSION_BAD_POINTER 0x1003
+#define HFI_ERR_SESSION_INVALID_SESSION_ID 0x1004
+#define HFI_ERR_SESSION_INVALID_STREAM_ID 0x1005
+#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION 0x1006
+#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY 0x1007
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING 0x1008
+#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES 0x1009
+#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED 0x100a
+#define HFI_ERR_SESSION_STREAM_CORRUPT 0x100b
+#define HFI_ERR_SESSION_ENC_OVERFLOW 0x100c
+#define HFI_ERR_SESSION_UNSUPPORTED_STREAM 0x100d
+#define HFI_ERR_SESSION_CMDSIZE 0x100e
+#define HFI_ERR_SESSION_UNSUPPORT_CMD 0x100f
+#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE 0x1010
+#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL 0x1011
+#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR 0x1012
+#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED 0x1013
+
+#define HFI_EVENT_SYS_ERROR 0x1
+#define HFI_EVENT_SESSION_ERROR 0x2
+
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES 0x1000001
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES 0x1000002
+#define HFI_EVENT_SESSION_SEQUENCE_CHANGED 0x1000003
+#define HFI_EVENT_SESSION_PROPERTY_CHANGED 0x1000004
+#define HFI_EVENT_SESSION_LTRUSE_FAILED 0x1000005
+#define HFI_EVENT_RELEASE_BUFFER_REFERENCE 0x1000006
+
+#define HFI_BUFFERFLAG_EOS 0x00000001
+#define HFI_BUFFERFLAG_STARTTIME 0x00000002
+#define HFI_BUFFERFLAG_DECODEONLY 0x00000004
+#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
+#define HFI_BUFFERFLAG_ENDOFFRAME 0x00000010
+#define HFI_BUFFERFLAG_SYNCFRAME 0x00000020
+#define HFI_BUFFERFLAG_EXTRADATA 0x00000040
+#define HFI_BUFFERFLAG_CODECCONFIG 0x00000080
+#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
+#define HFI_BUFFERFLAG_READONLY 0x00000200
+#define HFI_BUFFERFLAG_ENDOFSUBFRAME 0x00000400
+#define HFI_BUFFERFLAG_EOSEQ 0x00200000
+#define HFI_BUFFERFLAG_MBAFF 0x08000000
+#define HFI_BUFFERFLAG_VPE_YUV_601_709_CSC_CLAMP 0x10000000
+#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000
+#define HFI_BUFFERFLAG_TEI 0x40000000
+#define HFI_BUFFERFLAG_DISCONTINUITY 0x80000000
+
+#define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING 0x1001001
+#define HFI_ERR_SESSION_SAME_STATE_OPERATION 0x1001002
+#define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED 0x1001003
+#define HFI_ERR_SESSION_START_CODE_NOT_FOUND 0x1001004
+
+#define HFI_FLUSH_INPUT 0x1000001
+#define HFI_FLUSH_OUTPUT 0x1000002
+#define HFI_FLUSH_OUTPUT2 0x1000003
+#define HFI_FLUSH_ALL 0x1000004
+
+#define HFI_EXTRADATA_NONE 0x00000000
+#define HFI_EXTRADATA_MB_QUANTIZATION 0x00000001
+#define HFI_EXTRADATA_INTERLACE_VIDEO 0x00000002
+#define HFI_EXTRADATA_VC1_FRAMEDISP 0x00000003
+#define HFI_EXTRADATA_VC1_SEQDISP 0x00000004
+#define HFI_EXTRADATA_TIMESTAMP 0x00000005
+#define HFI_EXTRADATA_S3D_FRAME_PACKING 0x00000006
+#define HFI_EXTRADATA_FRAME_RATE 0x00000007
+#define HFI_EXTRADATA_PANSCAN_WINDOW 0x00000008
+#define HFI_EXTRADATA_RECOVERY_POINT_SEI 0x00000009
+#define HFI_EXTRADATA_MPEG2_SEQDISP 0x0000000d
+#define HFI_EXTRADATA_STREAM_USERDATA 0x0000000e
+#define HFI_EXTRADATA_FRAME_QP 0x0000000f
+#define HFI_EXTRADATA_FRAME_BITS_INFO 0x00000010
+#define HFI_EXTRADATA_MULTISLICE_INFO 0x7f100000
+#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7f100001
+#define HFI_EXTRADATA_INDEX 0x7f100002
+#define HFI_EXTRADATA_METADATA_LTR 0x7f100004
+#define HFI_EXTRADATA_METADATA_FILLER 0x7fe00002
+
+#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000e
+#define HFI_INDEX_EXTRADATA_OUTPUT_CROP 0x0700000f
+#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM 0x07000010
+#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7f100003
+
+#define HFI_INTERLACE_FRAME_PROGRESSIVE 0x01
+#define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST 0x02
+#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST 0x04
+#define HFI_INTERLACE_FRAME_TOPFIELDFIRST 0x08
+#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST 0x10
+
+/*
+ * HFI_PROPERTY_PARAM_OX_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000
+ */
+#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL 0x201001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO 0x201002
+#define HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED 0x201003
+#define HFI_PROPERTY_PARAM_CHROMA_SITE 0x201004
+#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG 0x201005
+#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA 0x201006
+#define HFI_PROPERTY_PARAM_DIVX_FORMAT 0x201007
+#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE 0x201008
+#define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA 0x201009
+#define HFI_PROPERTY_PARAM_ERR_DETECTION_CODE_EXTRADATA 0x20100a
+#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED 0x20100b
+#define HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL 0x20100c
+#define HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL 0x20100d
+
+/*
+ * HFI_PROPERTY_CONFIG_OX_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x2000
+ */
+#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS 0x202001
+#define HFI_PROPERTY_CONFIG_REALTIME 0x202002
+#define HFI_PROPERTY_CONFIG_PRIORITY 0x202003
+#define HFI_PROPERTY_CONFIG_BATCH_INFO 0x202004
+
+/*
+ * HFI_PROPERTY_PARAM_VDEC_OX_START \
+ * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x3000
+ */
+#define HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER 0x1203001
+#define HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT 0x1203002
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT 0x1203003
+#define HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE 0x1203004
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER 0x1203005
+#define HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION 0x1203006
+#define HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB 0x1203007
+#define HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING 0x1203008
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO 0x1203009
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA 0x120300a
+#define HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA 0x120300b
+#define HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA 0x120300c
+#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE 0x120300d
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY 0x120300e
+#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA 0x1203011
+#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA 0x1203012
+#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA 0x1203013
+#define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA 0x1203014
+#define HFI_PROPERTY_PARAM_VDEC_AVC_SESSION_SELECT 0x1203015
+#define HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA 0x1203016
+#define HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA 0x1203017
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA 0x1203018
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA 0x1203019
+#define HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD 0x120301a
+
+/*
+ * HFI_PROPERTY_CONFIG_VDEC_OX_START
+ * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x0000
+ */
+#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER 0x1200001
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING 0x1200002
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP 0x1200003
+
+#define HFI_PROPERTY_CONFIG_VDEC_ENTROPY 0x1204004
+
+/*
+ * HFI_PROPERTY_PARAM_VENC_OX_START
+ * HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000
+ */
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO 0x2205001
+#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL 0x2205002
+#define HFI_PROPERTY_PARAM_VENC_LTR_INFO 0x2205003
+#define HFI_PROPERTY_PARAM_VENC_MBI_DUMPING 0x2205005
+
+/*
+ * HFI_PROPERTY_CONFIG_VENC_OX_START
+ * HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000
+ */
+#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP 0x2206001
+
+/*
+ * HFI_PROPERTY_PARAM_VPE_OX_START
+ * HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000
+ */
+#define HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION 0x3207001
+
+#define HFI_PROPERTY_CONFIG_VPE_OX_START \
+ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000)
+
+#define HFI_CHROMA_SITE_0 0x1000001
+#define HFI_CHROMA_SITE_1 0x1000002
+#define HFI_CHROMA_SITE_2 0x1000003
+#define HFI_CHROMA_SITE_3 0x1000004
+#define HFI_CHROMA_SITE_4 0x1000005
+#define HFI_CHROMA_SITE_5 0x1000006
+
+#define HFI_PRIORITY_LOW 10
+#define HFI_PRIOIRTY_MEDIUM 20
+#define HFI_PRIORITY_HIGH 30
+
+#define HFI_OUTPUT_ORDER_DISPLAY 0x1000001
+#define HFI_OUTPUT_ORDER_DECODE 0x1000002
+
+#define HFI_RATE_CONTROL_OFF 0x1000001
+#define HFI_RATE_CONTROL_VBR_VFR 0x1000002
+#define HFI_RATE_CONTROL_VBR_CFR 0x1000003
+#define HFI_RATE_CONTROL_CBR_VFR 0x1000004
+#define HFI_RATE_CONTROL_CBR_CFR 0x1000005
+
+#define HFI_VIDEO_CODEC_H264 0x00000002
+#define HFI_VIDEO_CODEC_H263 0x00000004
+#define HFI_VIDEO_CODEC_MPEG1 0x00000008
+#define HFI_VIDEO_CODEC_MPEG2 0x00000010
+#define HFI_VIDEO_CODEC_MPEG4 0x00000020
+#define HFI_VIDEO_CODEC_DIVX_311 0x00000040
+#define HFI_VIDEO_CODEC_DIVX 0x00000080
+#define HFI_VIDEO_CODEC_VC1 0x00000100
+#define HFI_VIDEO_CODEC_SPARK 0x00000200
+#define HFI_VIDEO_CODEC_VP8 0x00001000
+#define HFI_VIDEO_CODEC_HEVC 0x00002000
+#define HFI_VIDEO_CODEC_VP9 0x00004000
+#define HFI_VIDEO_CODEC_HEVC_HYBRID 0x80000000
+
+#define HFI_H264_PROFILE_BASELINE 0x00000001
+#define HFI_H264_PROFILE_MAIN 0x00000002
+#define HFI_H264_PROFILE_HIGH 0x00000004
+#define HFI_H264_PROFILE_STEREO_HIGH 0x00000008
+#define HFI_H264_PROFILE_MULTIVIEW_HIGH 0x00000010
+#define HFI_H264_PROFILE_CONSTRAINED_BASE 0x00000020
+#define HFI_H264_PROFILE_CONSTRAINED_HIGH 0x00000040
+
+#define HFI_H264_LEVEL_1 0x00000001
+#define HFI_H264_LEVEL_1b 0x00000002
+#define HFI_H264_LEVEL_11 0x00000004
+#define HFI_H264_LEVEL_12 0x00000008
+#define HFI_H264_LEVEL_13 0x00000010
+#define HFI_H264_LEVEL_2 0x00000020
+#define HFI_H264_LEVEL_21 0x00000040
+#define HFI_H264_LEVEL_22 0x00000080
+#define HFI_H264_LEVEL_3 0x00000100
+#define HFI_H264_LEVEL_31 0x00000200
+#define HFI_H264_LEVEL_32 0x00000400
+#define HFI_H264_LEVEL_4 0x00000800
+#define HFI_H264_LEVEL_41 0x00001000
+#define HFI_H264_LEVEL_42 0x00002000
+#define HFI_H264_LEVEL_5 0x00004000
+#define HFI_H264_LEVEL_51 0x00008000
+#define HFI_H264_LEVEL_52 0x00010000
+
+#define HFI_H263_PROFILE_BASELINE 0x00000001
+
+#define HFI_H263_LEVEL_10 0x00000001
+#define HFI_H263_LEVEL_20 0x00000002
+#define HFI_H263_LEVEL_30 0x00000004
+#define HFI_H263_LEVEL_40 0x00000008
+#define HFI_H263_LEVEL_45 0x00000010
+#define HFI_H263_LEVEL_50 0x00000020
+#define HFI_H263_LEVEL_60 0x00000040
+#define HFI_H263_LEVEL_70 0x00000080
+
+#define HFI_MPEG2_PROFILE_SIMPLE 0x00000001
+#define HFI_MPEG2_PROFILE_MAIN 0x00000002
+#define HFI_MPEG2_PROFILE_422 0x00000004
+#define HFI_MPEG2_PROFILE_SNR 0x00000008
+#define HFI_MPEG2_PROFILE_SPATIAL 0x00000010
+#define HFI_MPEG2_PROFILE_HIGH 0x00000020
+
+#define HFI_MPEG2_LEVEL_LL 0x00000001
+#define HFI_MPEG2_LEVEL_ML 0x00000002
+#define HFI_MPEG2_LEVEL_H14 0x00000004
+#define HFI_MPEG2_LEVEL_HL 0x00000008
+
+#define HFI_MPEG4_PROFILE_SIMPLE 0x00000001
+#define HFI_MPEG4_PROFILE_ADVANCEDSIMPLE 0x00000002
+
+#define HFI_MPEG4_LEVEL_0 0x00000001
+#define HFI_MPEG4_LEVEL_0b 0x00000002
+#define HFI_MPEG4_LEVEL_1 0x00000004
+#define HFI_MPEG4_LEVEL_2 0x00000008
+#define HFI_MPEG4_LEVEL_3 0x00000010
+#define HFI_MPEG4_LEVEL_4 0x00000020
+#define HFI_MPEG4_LEVEL_4a 0x00000040
+#define HFI_MPEG4_LEVEL_5 0x00000080
+#define HFI_MPEG4_LEVEL_6 0x00000100
+#define HFI_MPEG4_LEVEL_7 0x00000200
+#define HFI_MPEG4_LEVEL_8 0x00000400
+#define HFI_MPEG4_LEVEL_9 0x00000800
+#define HFI_MPEG4_LEVEL_3b 0x00001000
+
+#define HFI_VC1_PROFILE_SIMPLE 0x00000001
+#define HFI_VC1_PROFILE_MAIN 0x00000002
+#define HFI_VC1_PROFILE_ADVANCED 0x00000004
+
+#define HFI_VC1_LEVEL_LOW 0x00000001
+#define HFI_VC1_LEVEL_MEDIUM 0x00000002
+#define HFI_VC1_LEVEL_HIGH 0x00000004
+#define HFI_VC1_LEVEL_0 0x00000008
+#define HFI_VC1_LEVEL_1 0x00000010
+#define HFI_VC1_LEVEL_2 0x00000020
+#define HFI_VC1_LEVEL_3 0x00000040
+#define HFI_VC1_LEVEL_4 0x00000080
+
+#define HFI_VPX_PROFILE_SIMPLE 0x00000001
+#define HFI_VPX_PROFILE_ADVANCED 0x00000002
+#define HFI_VPX_PROFILE_VERSION_0 0x00000004
+#define HFI_VPX_PROFILE_VERSION_1 0x00000008
+#define HFI_VPX_PROFILE_VERSION_2 0x00000010
+#define HFI_VPX_PROFILE_VERSION_3 0x00000020
+
+#define HFI_DIVX_FORMAT_4 0x1
+#define HFI_DIVX_FORMAT_5 0x2
+#define HFI_DIVX_FORMAT_6 0x3
+
+#define HFI_DIVX_PROFILE_QMOBILE 0x00000001
+#define HFI_DIVX_PROFILE_MOBILE 0x00000002
+#define HFI_DIVX_PROFILE_MT 0x00000004
+#define HFI_DIVX_PROFILE_HT 0x00000008
+#define HFI_DIVX_PROFILE_HD 0x00000010
+
+#define HFI_HEVC_PROFILE_MAIN 0x00000001
+#define HFI_HEVC_PROFILE_MAIN10 0x00000002
+#define HFI_HEVC_PROFILE_MAIN_STILL_PIC 0x00000004
+
+#define HFI_HEVC_LEVEL_1 0x00000001
+#define HFI_HEVC_LEVEL_2 0x00000002
+#define HFI_HEVC_LEVEL_21 0x00000004
+#define HFI_HEVC_LEVEL_3 0x00000008
+#define HFI_HEVC_LEVEL_31 0x00000010
+#define HFI_HEVC_LEVEL_4 0x00000020
+#define HFI_HEVC_LEVEL_41 0x00000040
+#define HFI_HEVC_LEVEL_5 0x00000080
+#define HFI_HEVC_LEVEL_51 0x00000100
+#define HFI_HEVC_LEVEL_52 0x00000200
+#define HFI_HEVC_LEVEL_6 0x00000400
+#define HFI_HEVC_LEVEL_61 0x00000800
+#define HFI_HEVC_LEVEL_62 0x00001000
+
+#define HFI_HEVC_TIER_MAIN 0x1
+#define HFI_HEVC_TIER_HIGH0 0x2
+
+#define HFI_BUFFER_INPUT 0x1
+#define HFI_BUFFER_OUTPUT 0x2
+#define HFI_BUFFER_OUTPUT2 0x3
+#define HFI_BUFFER_INTERNAL_PERSIST 0x4
+#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5
+#define HFI_BUFFER_INTERNAL_SCRATCH(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0x6 : 0x1000001)
+#define HFI_BUFFER_INTERNAL_SCRATCH_1(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0x7 : 0x1000005)
+#define HFI_BUFFER_INTERNAL_SCRATCH_2(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0x8 : 0x1000006)
+#define HFI_BUFFER_EXTRADATA_INPUT(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0xc : 0x1000002)
+#define HFI_BUFFER_EXTRADATA_OUTPUT(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0xa : 0x1000003)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0xb : 0x1000004)
+#define HFI_BUFFER_TYPE_MAX 11
+
+#define HFI_BUFFER_MODE_STATIC 0x1000001
+#define HFI_BUFFER_MODE_RING 0x1000002
+#define HFI_BUFFER_MODE_DYNAMIC 0x1000003
+
+#define HFI_VENC_PERFMODE_MAX_QUALITY 0x1
+#define HFI_VENC_PERFMODE_POWER_SAVE 0x2
+
+/*
+ * HFI_PROPERTY_SYS_COMMON_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000
+ */
+#define HFI_PROPERTY_SYS_DEBUG_CONFIG 0x1
+#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO 0x2
+#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ 0x3
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR 0x4
+#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL 0x5
+#define HFI_PROPERTY_SYS_IMAGE_VERSION 0x6
+#define HFI_PROPERTY_SYS_CONFIG_COVERAGE 0x7
+
+/*
+ * HFI_PROPERTY_PARAM_COMMON_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000
+ */
+#define HFI_PROPERTY_PARAM_FRAME_SIZE 0x1001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO 0x1002
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT 0x1003
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED 0x1004
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT 0x1005
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED 0x1006
+#define HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED 0x1007
+#define HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED 0x1008
+#define HFI_PROPERTY_PARAM_CODEC_SUPPORTED 0x1009
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED 0x100a
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT 0x100b
+#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT 0x100c
+#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE 0x100d
+#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED 0x100e
+#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT 0x100f
+#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED 0x1010
+#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
+
+/*
+ * HFI_PROPERTY_CONFIG_COMMON_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000
+ */
+#define HFI_PROPERTY_CONFIG_FRAME_RATE 0x2001
+#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE 0x2002
+
+/*
+ * HFI_PROPERTY_PARAM_VDEC_COMMON_START
+ * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000
+ */
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001
+#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR 0x1003002
+#define HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2 0x1003003
+#define HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH 0x1003007
+#define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT 0x1003009
+#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE 0x100300a
+
+/*
+ * HFI_PROPERTY_CONFIG_VDEC_COMMON_START
+ * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000
+ */
+
+/*
+ * HFI_PROPERTY_PARAM_VENC_COMMON_START
+ * HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x5000
+ */
+#define HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE 0x2005001
+#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL 0x2005002
+#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL 0x2005003
+#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL 0x2005004
+#define HFI_PROPERTY_PARAM_VENC_H264_PICORDER_CNT_TYPE 0x2005005
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP 0x2005006
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION 0x2005007
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE 0x2005008
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION 0x2005009
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER 0x200500a
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION 0x200500b
+#define HFI_PROPERTY_PARAM_VENC_OPEN_GOP 0x200500c
+#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH 0x200500d
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL 0x200500e
+#define HFI_PROPERTY_PARAM_VENC_VBV_HRD_BUF_SIZE 0x200500f
+#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED 0x2005010
+#define HFI_PROPERTY_PARAM_VENC_ADVANCED 0x2005012
+#define HFI_PROPERTY_PARAM_VENC_H264_SPS_ID 0x2005014
+#define HFI_PROPERTY_PARAM_VENC_H264_PPS_ID 0x2005015
+#define HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL 0x2005016
+#define HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO 0x2005017
+#define HFI_PROPERTY_PARAM_VENC_NUMREF 0x2005018
+#define HFI_PROPERTY_PARAM_VENC_MULTIREF_P 0x2005019
+#define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT 0x200501b
+#define HFI_PROPERTY_PARAM_VENC_LTRMODE 0x200501c
+#define HFI_PROPERTY_PARAM_VENC_VIDEO_FULL_RANGE 0x200501d
+#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO 0x200501e
+#define HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG 0x200501f
+#define HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES 0x2005020
+#define HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC 0x2005021
+#define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY 0x2005023
+#define HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER 0x2005026
+#define HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP 0x2005027
+#define HFI_PROPERTY_PARAM_VENC_INITIAL_QP 0x2005028
+#define HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE 0x2005029
+#define HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER 0x200502c
+#define HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE 0x200502f
+
+/*
+ * HFI_PROPERTY_CONFIG_VENC_COMMON_START
+ * HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000
+ */
+#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE 0x2006001
+#define HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD 0x2006002
+#define HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD 0x2006003
+#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME 0x2006004
+#define HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE 0x2006005
+#define HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE 0x2006007
+#define HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER 0x2006008
+#define HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME 0x2006009
+#define HFI_PROPERTY_CONFIG_VENC_USELTRFRAME 0x200600a
+#define HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER 0x200600b
+#define HFI_PROPERTY_CONFIG_VENC_LTRPERIOD 0x200600c
+#define HFI_PROPERTY_CONFIG_VENC_PERF_MODE 0x200600e
+
+/*
+ * HFI_PROPERTY_PARAM_VPE_COMMON_START
+ * HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000
+ */
+
+/*
+ * HFI_PROPERTY_CONFIG_VPE_COMMON_START
+ * HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000
+ */
+#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE 0x3008001
+#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS 0x3008002
+
+enum hfi_version {
+ HFI_VERSION_1XX,
+ HFI_VERSION_3XX,
+ HFI_VERSION_4XX
+};
+
+struct hfi_buffer_info {
+ u32 buffer_addr;
+ u32 extradata_addr;
+};
+
+struct hfi_bitrate {
+ u32 bitrate;
+ u32 layer_id;
+};
+
+#define HFI_CAPABILITY_FRAME_WIDTH 0x01
+#define HFI_CAPABILITY_FRAME_HEIGHT 0x02
+#define HFI_CAPABILITY_MBS_PER_FRAME 0x03
+#define HFI_CAPABILITY_MBS_PER_SECOND 0x04
+#define HFI_CAPABILITY_FRAMERATE 0x05
+#define HFI_CAPABILITY_SCALE_X 0x06
+#define HFI_CAPABILITY_SCALE_Y 0x07
+#define HFI_CAPABILITY_BITRATE 0x08
+#define HFI_CAPABILITY_BFRAME 0x09
+#define HFI_CAPABILITY_PEAKBITRATE 0x0a
+#define HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS 0x10
+#define HFI_CAPABILITY_ENC_LTR_COUNT 0x11
+#define HFI_CAPABILITY_CP_OUTPUT2_THRESH 0x12
+#define HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS 0x13
+#define HFI_CAPABILITY_LCU_SIZE 0x14
+#define HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS 0x15
+#define HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE 0x16
+
+struct hfi_capability {
+ u32 capability_type;
+ u32 min;
+ u32 max;
+ u32 step_size;
+};
+
+struct hfi_capabilities {
+ u32 num_capabilities;
+ struct hfi_capability data[1];
+};
+
+#define HFI_DEBUG_MSG_LOW 0x01
+#define HFI_DEBUG_MSG_MEDIUM 0x02
+#define HFI_DEBUG_MSG_HIGH 0x04
+#define HFI_DEBUG_MSG_ERROR 0x08
+#define HFI_DEBUG_MSG_FATAL 0x10
+#define HFI_DEBUG_MSG_PERF 0x20
+
+#define HFI_DEBUG_MODE_QUEUE 0x01
+#define HFI_DEBUG_MODE_QDSS 0x02
+
+struct hfi_debug_config {
+ u32 config;
+ u32 mode;
+};
+
+struct hfi_enable {
+ u32 enable;
+};
+
+#define HFI_H264_DB_MODE_DISABLE 0x1
+#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY 0x2
+#define HFI_H264_DB_MODE_ALL_BOUNDARY 0x3
+
+struct hfi_h264_db_control {
+ u32 mode;
+ s32 slice_alpha_offset;
+ s32 slice_beta_offset;
+};
+
+#define HFI_H264_ENTROPY_CAVLC 0x1
+#define HFI_H264_ENTROPY_CABAC 0x2
+
+#define HFI_H264_CABAC_MODEL_0 0x1
+#define HFI_H264_CABAC_MODEL_1 0x2
+#define HFI_H264_CABAC_MODEL_2 0x3
+
+struct hfi_h264_entropy_control {
+ u32 entropy_mode;
+ u32 cabac_model;
+};
+
+struct hfi_framerate {
+ u32 buffer_type;
+ u32 framerate;
+};
+
+#define HFI_INTRA_REFRESH_NONE 0x1
+#define HFI_INTRA_REFRESH_CYCLIC 0x2
+#define HFI_INTRA_REFRESH_ADAPTIVE 0x3
+#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE 0x4
+#define HFI_INTRA_REFRESH_RANDOM 0x5
+
+struct hfi_intra_refresh {
+ u32 mode;
+ u32 air_mbs;
+ u32 air_ref;
+ u32 cir_mbs;
+};
+
+struct hfi_intra_refresh_3x {
+ u32 mode;
+ u32 mbs;
+};
+
+struct hfi_idr_period {
+ u32 idr_period;
+};
+
+struct hfi_operations_type {
+ u32 rotation;
+ u32 flip;
+};
+
+struct hfi_max_num_b_frames {
+ u32 max_num_b_frames;
+};
+
+struct hfi_vc1e_perf_cfg_type {
+ u32 search_range_x_subsampled[3];
+ u32 search_range_y_subsampled[3];
+};
+
+struct hfi_conceal_color {
+ u32 conceal_color;
+};
+
+struct hfi_intra_period {
+ u32 pframes;
+ u32 bframes;
+};
+
+struct hfi_mpeg4_header_extension {
+ u32 header_extension;
+};
+
+struct hfi_mpeg4_time_resolution {
+ u32 time_increment_resolution;
+};
+
+struct hfi_multi_stream {
+ u32 buffer_type;
+ u32 enable;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_multi_stream_3x {
+ u32 buffer_type;
+ u32 enable;
+};
+
+struct hfi_multi_view_format {
+ u32 views;
+ u32 view_order[1];
+};
+
+#define HFI_MULTI_SLICE_OFF 0x1
+#define HFI_MULTI_SLICE_BY_MB_COUNT 0x2
+#define HFI_MULTI_SLICE_BY_BYTE_COUNT 0x3
+#define HFI_MULTI_SLICE_GOB 0x4
+
+struct hfi_multi_slice_control {
+ u32 multi_slice;
+ u32 slice_size;
+};
+
+#define HFI_NAL_FORMAT_STARTCODES 0x01
+#define HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER 0x02
+#define HFI_NAL_FORMAT_ONE_BYTE_LENGTH 0x04
+#define HFI_NAL_FORMAT_TWO_BYTE_LENGTH 0x08
+#define HFI_NAL_FORMAT_FOUR_BYTE_LENGTH 0x10
+
+struct hfi_nal_stream_format {
+ u32 format;
+};
+
+struct hfi_nal_stream_format_select {
+ u32 format;
+};
+
+#define HFI_PICTURE_TYPE_I 0x01
+#define HFI_PICTURE_TYPE_P 0x02
+#define HFI_PICTURE_TYPE_B 0x04
+#define HFI_PICTURE_TYPE_IDR 0x08
+
+struct hfi_profile_level {
+ u32 profile;
+ u32 level;
+};
+
+#define HFI_MAX_PROFILE_COUNT 16
+
+struct hfi_profile_level_supported {
+ u32 profile_count;
+ struct hfi_profile_level profile_level[1];
+};
+
+struct hfi_quality_vs_speed {
+ u32 quality_vs_speed;
+};
+
+struct hfi_quantization {
+ u32 qp_i;
+ u32 qp_p;
+ u32 qp_b;
+ u32 layer_id;
+};
+
+struct hfi_initial_quantization {
+ u32 qp_i;
+ u32 qp_p;
+ u32 qp_b;
+ u32 init_qp_enable;
+};
+
+struct hfi_quantization_range {
+ u32 min_qp;
+ u32 max_qp;
+ u32 layer_id;
+};
+
+#define HFI_LTR_MODE_DISABLE 0x0
+#define HFI_LTR_MODE_MANUAL 0x1
+#define HFI_LTR_MODE_PERIODIC 0x2
+
+struct hfi_ltr_mode {
+ u32 ltr_mode;
+ u32 ltr_count;
+ u32 trust_mode;
+};
+
+struct hfi_ltr_use {
+ u32 ref_ltr;
+ u32 use_constrnt;
+ u32 frames;
+};
+
+struct hfi_ltr_mark {
+ u32 mark_frame;
+};
+
+struct hfi_framesize {
+ u32 buffer_type;
+ u32 width;
+ u32 height;
+};
+
+#define VIDC_CORE_ID_DEFAULT 0
+#define VIDC_CORE_ID_1 1
+#define VIDC_CORE_ID_2 2
+#define VIDC_CORE_ID_3 3
+
+struct hfi_videocores_usage_type {
+ u32 video_core_enable_mask;
+};
+
+#define VIDC_WORK_MODE_1 1
+#define VIDC_WORK_MODE_2 2
+
+struct hfi_video_work_mode {
+ u32 video_work_mode;
+};
+
+struct hfi_h264_vui_timing_info {
+ u32 enable;
+ u32 fixed_framerate;
+ u32 time_scale;
+};
+
+struct hfi_bit_depth {
+ u32 buffer_type;
+ u32 bit_depth;
+};
+
+struct hfi_picture_type {
+ u32 is_sync_frame;
+ u32 picture_type;
+};
+
+struct hfi_pic_struct {
+ u32 progressive_only;
+};
+
+struct hfi_colour_space {
+ u32 colour_space;
+};
+
+struct hfi_extradata_input_crop {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
+#define HFI_COLOR_FORMAT_MONOCHROME 0x01
+#define HFI_COLOR_FORMAT_NV12 0x02
+#define HFI_COLOR_FORMAT_NV21 0x03
+#define HFI_COLOR_FORMAT_NV12_4x4TILE 0x04
+#define HFI_COLOR_FORMAT_NV21_4x4TILE 0x05
+#define HFI_COLOR_FORMAT_YUYV 0x06
+#define HFI_COLOR_FORMAT_YVYU 0x07
+#define HFI_COLOR_FORMAT_UYVY 0x08
+#define HFI_COLOR_FORMAT_VYUY 0x09
+#define HFI_COLOR_FORMAT_RGB565 0x0a
+#define HFI_COLOR_FORMAT_BGR565 0x0b
+#define HFI_COLOR_FORMAT_RGB888 0x0c
+#define HFI_COLOR_FORMAT_BGR888 0x0d
+#define HFI_COLOR_FORMAT_YUV444 0x0e
+#define HFI_COLOR_FORMAT_RGBA8888 0x10
+
+#define HFI_COLOR_FORMAT_UBWC_BASE 0x8000
+#define HFI_COLOR_FORMAT_10_BIT_BASE 0x4000
+
+#define HFI_COLOR_FORMAT_YUV420_TP10 0x4002
+#define HFI_COLOR_FORMAT_NV12_UBWC 0x8002
+#define HFI_COLOR_FORMAT_YUV420_TP10_UBWC 0xc002
+#define HFI_COLOR_FORMAT_RGBA8888_UBWC 0x8010
+
+struct hfi_uncompressed_format_select {
+ u32 buffer_type;
+ u32 format;
+};
+
+struct hfi_uncompressed_plane_constraints {
+ u32 stride_multiples;
+ u32 max_stride;
+ u32 min_plane_buffer_height_multiple;
+ u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_info {
+ u32 format;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints plane_constraints[1];
+};
+
+struct hfi_uncompressed_format_supported {
+ u32 buffer_type;
+ u32 format_entries;
+ struct hfi_uncompressed_plane_info plane_info[1];
+};
+
+struct hfi_uncompressed_plane_actual {
+ int actual_stride;
+ u32 actual_plane_buffer_height;
+};
+
+struct hfi_uncompressed_plane_actual_info {
+ u32 buffer_type;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_actual plane_format[1];
+};
+
+struct hfi_uncompressed_plane_actual_constraints_info {
+ u32 buffer_type;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints plane_format[1];
+};
+
+struct hfi_codec_supported {
+ u32 dec_codecs;
+ u32 enc_codecs;
+};
+
+struct hfi_properties_supported {
+ u32 num_properties;
+ u32 properties[1];
+};
+
+struct hfi_max_sessions_supported {
+ u32 max_sessions;
+};
+
+#define HFI_MAX_MATRIX_COEFFS 9
+#define HFI_MAX_BIAS_COEFFS 3
+#define HFI_MAX_LIMIT_COEFFS 6
+
+struct hfi_vpe_color_space_conversion {
+ u32 csc_matrix[HFI_MAX_MATRIX_COEFFS];
+ u32 csc_bias[HFI_MAX_BIAS_COEFFS];
+ u32 csc_limit[HFI_MAX_LIMIT_COEFFS];
+};
+
+#define HFI_ROTATE_NONE 0x1
+#define HFI_ROTATE_90 0x2
+#define HFI_ROTATE_180 0x3
+#define HFI_ROTATE_270 0x4
+
+#define HFI_FLIP_NONE 0x1
+#define HFI_FLIP_HORIZONTAL 0x2
+#define HFI_FLIP_VERTICAL 0x3
+
+struct hfi_operations {
+ u32 rotate;
+ u32 flip;
+};
+
+#define HFI_RESOURCE_OCMEM 0x1
+
+struct hfi_resource_ocmem {
+ u32 size;
+ u32 mem;
+};
+
+struct hfi_resource_ocmem_requirement {
+ u32 session_domain;
+ u32 width;
+ u32 height;
+ u32 size;
+};
+
+struct hfi_resource_ocmem_requirement_info {
+ u32 num_entries;
+ struct hfi_resource_ocmem_requirement requirements[1];
+};
+
+struct hfi_property_sys_image_version_info_type {
+ u32 string_size;
+ u8 str_image_version[1];
+};
+
+struct hfi_codec_mask_supported {
+ u32 codecs;
+ u32 video_domains;
+};
+
+struct hfi_seq_header_info {
+ u32 max_hader_len;
+};
+
+struct hfi_aspect_ratio {
+ u32 aspect_width;
+ u32 aspect_height;
+};
+
+#define HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM 0
+#define HFI_MVC_BUFFER_LAYOUT_SIDEBYSIDE 1
+#define HFI_MVC_BUFFER_LAYOUT_SEQ 2
+
+struct hfi_mvc_buffer_layout_descp_type {
+ u32 layout_type;
+ u32 bright_view_first;
+ u32 ngap;
+};
+
+struct hfi_scs_threshold {
+ u32 threshold_value;
+};
+
+#define HFI_TEST_SSR_SW_ERR_FATAL 0x1
+#define HFI_TEST_SSR_SW_DIV_BY_ZERO 0x2
+#define HFI_TEST_SSR_HW_WDOG_IRQ 0x3
+
+struct hfi_buffer_alloc_mode {
+ u32 type;
+ u32 mode;
+};
+
+struct hfi_index_extradata_config {
+ u32 enable;
+ u32 index_extra_data_id;
+};
+
+struct hfi_extradata_header {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 type;
+ u32 data_size;
+ u8 data[1];
+};
+
+struct hfi_batch_info {
+ u32 input_batch_count;
+ u32 output_batch_count;
+};
+
+struct hfi_buffer_count_actual {
+ u32 type;
+ u32 count_actual;
+};
+
+struct hfi_buffer_count_actual_4xx {
+ u32 type;
+ u32 count_actual;
+ u32 count_min_host;
+};
+
+struct hfi_buffer_size_actual {
+ u32 type;
+ u32 size;
+};
+
+struct hfi_buffer_display_hold_count_actual {
+ u32 type;
+ u32 hold_count;
+};
+
+/* HFI 4XX reorder the fields, use these macros */
+#define HFI_BUFREQ_HOLD_COUNT(bufreq, ver) \
+ ((ver) == HFI_VERSION_4XX ? 0 : (bufreq)->hold_count)
+#define HFI_BUFREQ_COUNT_MIN(bufreq, ver) \
+ ((ver) == HFI_VERSION_4XX ? (bufreq)->hold_count : (bufreq)->count_min)
+#define HFI_BUFREQ_COUNT_MIN_HOST(bufreq, ver) \
+ ((ver) == HFI_VERSION_4XX ? (bufreq)->count_min : 0)
+
+struct hfi_buffer_requirements {
+ u32 type;
+ u32 size;
+ u32 region_size;
+ u32 hold_count;
+ u32 count_min;
+ u32 count_actual;
+ u32 contiguous;
+ u32 alignment;
+};
+
+struct hfi_data_payload {
+ u32 size;
+ u8 data[1];
+};
+
+struct hfi_enable_picture {
+ u32 picture_type;
+};
+
+struct hfi_display_picture_buffer_count {
+ int enable;
+ u32 count;
+};
+
+struct hfi_extra_data_header_config {
+ u32 type;
+ u32 buffer_type;
+ u32 version;
+ u32 port_index;
+ u32 client_extra_data_id;
+};
+
+struct hfi_interlace_format_supported {
+ u32 buffer_type;
+ u32 format;
+};
+
+struct hfi_buffer_alloc_mode_supported {
+ u32 buffer_type;
+ u32 num_entries;
+ u32 data[1];
+};
+
+struct hfi_mb_error_map {
+ u32 error_map_size;
+ u8 error_map[1];
+};
+
+struct hfi_metadata_pass_through {
+ int enable;
+ u32 size;
+};
+
+struct hfi_multi_view_select {
+ u32 view_index;
+};
+
+struct hfi_hybrid_hierp {
+ u32 layers;
+};
+
+struct hfi_pkt_hdr {
+ u32 size;
+ u32 pkt_type;
+};
+
+struct hfi_session_hdr_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 session_id;
+};
+
+struct hfi_session_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
new file mode 100644
index 000000000..0ecdaa15c
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/hash.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "core.h"
+#include "hfi.h"
+#include "hfi_helper.h"
+#include "hfi_msgs.h"
+#include "hfi_parser.h"
+
+static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ enum hfi_version ver = core->res->hfi_version;
+ struct hfi_event_data event = {0};
+ int num_properties_changed;
+ struct hfi_framesize *frame_sz;
+ struct hfi_profile_level *profile_level;
+ struct hfi_bit_depth *pixel_depth;
+ struct hfi_pic_struct *pic_struct;
+ struct hfi_colour_space *colour_info;
+ struct hfi_buffer_requirements *bufreq;
+ struct hfi_extradata_input_crop *crop;
+ u8 *data_ptr;
+ u32 ptype;
+
+ inst->error = HFI_ERR_NONE;
+
+ switch (pkt->event_data1) {
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES:
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES:
+ break;
+ default:
+ inst->error = HFI_ERR_SESSION_INVALID_PARAMETER;
+ goto done;
+ }
+
+ event.event_type = pkt->event_data1;
+
+ num_properties_changed = pkt->event_data2;
+ if (!num_properties_changed) {
+ inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
+ goto done;
+ }
+
+ data_ptr = (u8 *)&pkt->ext_event_data[0];
+ do {
+ ptype = *((u32 *)data_ptr);
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_FRAME_SIZE:
+ data_ptr += sizeof(u32);
+ frame_sz = (struct hfi_framesize *)data_ptr;
+ event.width = frame_sz->width;
+ event.height = frame_sz->height;
+ data_ptr += sizeof(*frame_sz);
+ break;
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+ data_ptr += sizeof(u32);
+ profile_level = (struct hfi_profile_level *)data_ptr;
+ event.profile = profile_level->profile;
+ event.level = profile_level->level;
+ data_ptr += sizeof(*profile_level);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
+ data_ptr += sizeof(u32);
+ pixel_depth = (struct hfi_bit_depth *)data_ptr;
+ event.bit_depth = pixel_depth->bit_depth;
+ data_ptr += sizeof(*pixel_depth);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
+ data_ptr += sizeof(u32);
+ pic_struct = (struct hfi_pic_struct *)data_ptr;
+ event.pic_struct = pic_struct->progressive_only;
+ data_ptr += sizeof(*pic_struct);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
+ data_ptr += sizeof(u32);
+ colour_info = (struct hfi_colour_space *)data_ptr;
+ event.colour_space = colour_info->colour_space;
+ data_ptr += sizeof(*colour_info);
+ break;
+ case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+ data_ptr += sizeof(u32);
+ event.entropy_mode = *(u32 *)data_ptr;
+ data_ptr += sizeof(u32);
+ break;
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ data_ptr += sizeof(u32);
+ bufreq = (struct hfi_buffer_requirements *)data_ptr;
+ event.buf_count = HFI_BUFREQ_COUNT_MIN(bufreq, ver);
+ data_ptr += sizeof(*bufreq);
+ break;
+ case HFI_INDEX_EXTRADATA_INPUT_CROP:
+ data_ptr += sizeof(u32);
+ crop = (struct hfi_extradata_input_crop *)data_ptr;
+ event.input_crop.left = crop->left;
+ event.input_crop.top = crop->top;
+ event.input_crop.width = crop->width;
+ event.input_crop.height = crop->height;
+ data_ptr += sizeof(*crop);
+ break;
+ default:
+ break;
+ }
+ num_properties_changed--;
+ } while (num_properties_changed > 0);
+
+done:
+ inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
+}
+
+static void event_release_buffer_ref(struct venus_core *core,
+ struct venus_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ struct hfi_event_data event = {0};
+ struct hfi_msg_event_release_buffer_ref_pkt *data;
+
+ data = (struct hfi_msg_event_release_buffer_ref_pkt *)
+ pkt->ext_event_data;
+
+ event.event_type = HFI_EVENT_RELEASE_BUFFER_REFERENCE;
+ event.packet_buffer = data->packet_buffer;
+ event.extradata_buffer = data->extradata_buffer;
+ event.tag = data->output_tag;
+
+ inst->error = HFI_ERR_NONE;
+ inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
+}
+
+static void event_sys_error(struct venus_core *core, u32 event,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ if (pkt)
+ dev_dbg(core->dev,
+ "sys error (session id:%x, data1:%x, data2:%x)\n",
+ pkt->shdr.session_id, pkt->event_data1,
+ pkt->event_data2);
+
+ core->core_ops->event_notify(core, event);
+}
+
+static void
+event_session_error(struct venus_core *core, struct venus_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ struct device *dev = core->dev;
+
+ dev_dbg(dev, "session error: event id:%x, session id:%x\n",
+ pkt->event_data1, pkt->shdr.session_id);
+
+ if (!inst)
+ return;
+
+ switch (pkt->event_data1) {
+ /* non fatal session errors */
+ case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
+ case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE:
+ case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+ case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
+ inst->error = HFI_ERR_NONE;
+ break;
+ default:
+ dev_err(dev, "session error: event id:%x (%x), session id:%x\n",
+ pkt->event_data1, pkt->event_data2,
+ pkt->shdr.session_id);
+
+ inst->error = pkt->event_data1;
+ inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL);
+ break;
+ }
+}
+
+static void hfi_event_notify(struct venus_core *core, struct venus_inst *inst,
+ void *packet)
+{
+ struct hfi_msg_event_notify_pkt *pkt = packet;
+
+ if (!packet)
+ return;
+
+ switch (pkt->event_id) {
+ case HFI_EVENT_SYS_ERROR:
+ event_sys_error(core, EVT_SYS_ERROR, pkt);
+ break;
+ case HFI_EVENT_SESSION_ERROR:
+ event_session_error(core, inst, pkt);
+ break;
+ case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
+ event_seq_changed(core, inst, pkt);
+ break;
+ case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
+ event_release_buffer_ref(core, inst, pkt);
+ break;
+ case HFI_EVENT_SESSION_PROPERTY_CHANGED:
+ break;
+ default:
+ break;
+ }
+}
+
+static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
+ void *packet)
+{
+ struct hfi_msg_sys_init_done_pkt *pkt = packet;
+ int rem_bytes;
+ u32 error;
+
+ error = pkt->error_type;
+ if (error != HFI_ERR_NONE)
+ goto done;
+
+ if (!pkt->num_properties) {
+ error = HFI_ERR_SYS_INVALID_PARAMETER;
+ goto done;
+ }
+
+ rem_bytes = pkt->hdr.size - sizeof(*pkt) + sizeof(u32);
+ if (rem_bytes <= 0) {
+ /* missing property data */
+ error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+ goto done;
+ }
+
+ error = hfi_parser(core, inst, pkt->data, rem_bytes);
+
+done:
+ core->error = error;
+ complete(&core->done);
+}
+
+static void
+sys_get_prop_image_version(struct device *dev,
+ struct hfi_msg_sys_property_info_pkt *pkt)
+{
+ int req_bytes;
+
+ req_bytes = pkt->hdr.size - sizeof(*pkt);
+
+ if (req_bytes < 128 || !pkt->data[1] || pkt->num_properties > 1)
+ /* bad packet */
+ return;
+
+ dev_dbg(dev, "F/W version: %s\n", (u8 *)&pkt->data[1]);
+}
+
+static void hfi_sys_property_info(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_sys_property_info_pkt *pkt = packet;
+ struct device *dev = core->dev;
+
+ if (!pkt->num_properties) {
+ dev_dbg(dev, "%s: no properties\n", __func__);
+ return;
+ }
+
+ switch (pkt->data[0]) {
+ case HFI_PROPERTY_SYS_IMAGE_VERSION:
+ sys_get_prop_image_version(dev, pkt);
+ break;
+ default:
+ dev_dbg(dev, "%s: unknown property data\n", __func__);
+ break;
+ }
+}
+
+static void hfi_sys_rel_resource_done(struct venus_core *core,
+ struct venus_inst *inst,
+ void *packet)
+{
+ struct hfi_msg_sys_release_resource_done_pkt *pkt = packet;
+
+ core->error = pkt->error_type;
+ complete(&core->done);
+}
+
+static void hfi_sys_ping_done(struct venus_core *core, struct venus_inst *inst,
+ void *packet)
+{
+ struct hfi_msg_sys_ping_ack_pkt *pkt = packet;
+
+ core->error = HFI_ERR_NONE;
+
+ if (pkt->client_data != 0xbeef)
+ core->error = HFI_ERR_SYS_FATAL;
+
+ complete(&core->done);
+}
+
+static void hfi_sys_idle_done(struct venus_core *core, struct venus_inst *inst,
+ void *packet)
+{
+ dev_dbg(core->dev, "sys idle\n");
+}
+
+static void hfi_sys_pc_prepare_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_sys_pc_prep_done_pkt *pkt = packet;
+
+ dev_dbg(core->dev, "pc prepare done (error %x)\n", pkt->error_type);
+}
+
+static unsigned int
+session_get_prop_profile_level(struct hfi_msg_session_property_info_pkt *pkt,
+ struct hfi_profile_level *profile_level)
+{
+ struct hfi_profile_level *hfi;
+ u32 req_bytes;
+
+ req_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
+
+ if (!req_bytes || req_bytes % sizeof(struct hfi_profile_level))
+ /* bad packet */
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ hfi = (struct hfi_profile_level *)&pkt->data[1];
+ profile_level->profile = hfi->profile;
+ profile_level->level = hfi->level;
+
+ return HFI_ERR_NONE;
+}
+
+static unsigned int
+session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
+ struct hfi_buffer_requirements *bufreq)
+{
+ struct hfi_buffer_requirements *buf_req;
+ u32 req_bytes;
+ unsigned int idx = 0;
+
+ req_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
+
+ if (!req_bytes || req_bytes % sizeof(*buf_req) || !pkt->data[1])
+ /* bad packet */
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ buf_req = (struct hfi_buffer_requirements *)&pkt->data[1];
+ if (!buf_req)
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ while (req_bytes) {
+ memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
+ idx++;
+
+ if (idx > HFI_BUFFER_TYPE_MAX)
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ req_bytes -= sizeof(struct hfi_buffer_requirements);
+ buf_req++;
+ }
+
+ return HFI_ERR_NONE;
+}
+
+static void hfi_session_prop_info(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_property_info_pkt *pkt = packet;
+ struct device *dev = core->dev;
+ union hfi_get_property *hprop = &inst->hprop;
+ unsigned int error = HFI_ERR_NONE;
+
+ if (!pkt->num_properties) {
+ error = HFI_ERR_SESSION_INVALID_PARAMETER;
+ dev_err(dev, "%s: no properties\n", __func__);
+ goto done;
+ }
+
+ switch (pkt->data[0]) {
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ memset(hprop->bufreq, 0, sizeof(hprop->bufreq));
+ error = session_get_prop_buf_req(pkt, hprop->bufreq);
+ break;
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+ memset(&hprop->profile_level, 0, sizeof(hprop->profile_level));
+ error = session_get_prop_profile_level(pkt,
+ &hprop->profile_level);
+ break;
+ case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+ break;
+ default:
+ dev_dbg(dev, "%s: unknown property id:%x\n", __func__,
+ pkt->data[0]);
+ return;
+ }
+
+done:
+ inst->error = error;
+ complete(&inst->done);
+}
+
+static void hfi_session_init_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_init_done_pkt *pkt = packet;
+ int rem_bytes;
+ u32 error;
+
+ error = pkt->error_type;
+ if (error != HFI_ERR_NONE)
+ goto done;
+
+ if (!IS_V1(core))
+ goto done;
+
+ rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt) + sizeof(u32);
+ if (rem_bytes <= 0) {
+ error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
+ goto done;
+ }
+
+ error = hfi_parser(core, inst, pkt->data, rem_bytes);
+done:
+ inst->error = error;
+ complete(&inst->done);
+}
+
+static void hfi_session_load_res_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_load_resources_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_flush_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_flush_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_etb_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_empty_buffer_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ inst->ops->buf_done(inst, HFI_BUFFER_INPUT, pkt->input_tag,
+ pkt->filled_len, pkt->offset, 0, 0, 0);
+}
+
+static void hfi_session_ftb_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ u32 session_type = inst->session_type;
+ u64 timestamp_us = 0;
+ u32 timestamp_hi = 0, timestamp_lo = 0;
+ unsigned int error;
+ u32 flags = 0, hfi_flags = 0, offset = 0, filled_len = 0;
+ u32 pic_type = 0, buffer_type = 0, output_tag = -1;
+
+ if (session_type == VIDC_SESSION_TYPE_ENC) {
+ struct hfi_msg_session_fbd_compressed_pkt *pkt = packet;
+
+ timestamp_hi = pkt->time_stamp_hi;
+ timestamp_lo = pkt->time_stamp_lo;
+ hfi_flags = pkt->flags;
+ offset = pkt->offset;
+ filled_len = pkt->filled_len;
+ pic_type = pkt->picture_type;
+ output_tag = pkt->output_tag;
+ buffer_type = HFI_BUFFER_OUTPUT;
+
+ error = pkt->error_type;
+ } else if (session_type == VIDC_SESSION_TYPE_DEC) {
+ struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt =
+ packet;
+
+ timestamp_hi = pkt->time_stamp_hi;
+ timestamp_lo = pkt->time_stamp_lo;
+ hfi_flags = pkt->flags;
+ offset = pkt->offset;
+ filled_len = pkt->filled_len;
+ pic_type = pkt->picture_type;
+ output_tag = pkt->output_tag;
+
+ if (pkt->stream_id == 0)
+ buffer_type = HFI_BUFFER_OUTPUT;
+ else if (pkt->stream_id == 1)
+ buffer_type = HFI_BUFFER_OUTPUT2;
+
+ error = pkt->error_type;
+ } else {
+ error = HFI_ERR_SESSION_INVALID_PARAMETER;
+ }
+
+ if (buffer_type != HFI_BUFFER_OUTPUT &&
+ buffer_type != HFI_BUFFER_OUTPUT2)
+ goto done;
+
+ if (hfi_flags & HFI_BUFFERFLAG_EOS)
+ flags |= V4L2_BUF_FLAG_LAST;
+
+ switch (pic_type) {
+ case HFI_PICTURE_IDR:
+ case HFI_PICTURE_I:
+ flags |= V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case HFI_PICTURE_P:
+ flags |= V4L2_BUF_FLAG_PFRAME;
+ break;
+ case HFI_PICTURE_B:
+ flags |= V4L2_BUF_FLAG_BFRAME;
+ break;
+ case HFI_FRAME_NOTCODED:
+ case HFI_UNUSED_PICT:
+ case HFI_FRAME_YUV:
+ default:
+ break;
+ }
+
+ if (!(hfi_flags & HFI_BUFFERFLAG_TIMESTAMPINVALID) && filled_len) {
+ timestamp_us = timestamp_hi;
+ timestamp_us = (timestamp_us << 32) | timestamp_lo;
+ }
+
+done:
+ inst->error = error;
+ inst->ops->buf_done(inst, buffer_type, output_tag, filled_len,
+ offset, flags, hfi_flags, timestamp_us);
+}
+
+static void hfi_session_start_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_start_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_stop_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_stop_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_rel_res_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_release_resources_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_rel_buf_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_release_buffers_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_end_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_end_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_abort_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_sys_session_abort_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_get_seq_hdr_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_get_sequence_hdr_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+struct hfi_done_handler {
+ u32 pkt;
+ u32 pkt_sz;
+ u32 pkt_sz2;
+ void (*done)(struct venus_core *, struct venus_inst *, void *);
+ bool is_sys_pkt;
+};
+
+static const struct hfi_done_handler handlers[] = {
+ {.pkt = HFI_MSG_EVENT_NOTIFY,
+ .pkt_sz = sizeof(struct hfi_msg_event_notify_pkt),
+ .done = hfi_event_notify,
+ },
+ {.pkt = HFI_MSG_SYS_INIT,
+ .pkt_sz = sizeof(struct hfi_msg_sys_init_done_pkt),
+ .done = hfi_sys_init_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_PROPERTY_INFO,
+ .pkt_sz = sizeof(struct hfi_msg_sys_property_info_pkt),
+ .done = hfi_sys_property_info,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_RELEASE_RESOURCE,
+ .pkt_sz = sizeof(struct hfi_msg_sys_release_resource_done_pkt),
+ .done = hfi_sys_rel_resource_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_PING_ACK,
+ .pkt_sz = sizeof(struct hfi_msg_sys_ping_ack_pkt),
+ .done = hfi_sys_ping_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_IDLE,
+ .pkt_sz = sizeof(struct hfi_msg_sys_idle_pkt),
+ .done = hfi_sys_idle_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_PC_PREP,
+ .pkt_sz = sizeof(struct hfi_msg_sys_pc_prep_done_pkt),
+ .done = hfi_sys_pc_prepare_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_SESSION_INIT,
+ .pkt_sz = sizeof(struct hfi_msg_session_init_done_pkt),
+ .done = hfi_session_init_done,
+ },
+ {.pkt = HFI_MSG_SYS_SESSION_END,
+ .pkt_sz = sizeof(struct hfi_msg_session_end_done_pkt),
+ .done = hfi_session_end_done,
+ },
+ {.pkt = HFI_MSG_SESSION_LOAD_RESOURCES,
+ .pkt_sz = sizeof(struct hfi_msg_session_load_resources_done_pkt),
+ .done = hfi_session_load_res_done,
+ },
+ {.pkt = HFI_MSG_SESSION_START,
+ .pkt_sz = sizeof(struct hfi_msg_session_start_done_pkt),
+ .done = hfi_session_start_done,
+ },
+ {.pkt = HFI_MSG_SESSION_STOP,
+ .pkt_sz = sizeof(struct hfi_msg_session_stop_done_pkt),
+ .done = hfi_session_stop_done,
+ },
+ {.pkt = HFI_MSG_SYS_SESSION_ABORT,
+ .pkt_sz = sizeof(struct hfi_msg_sys_session_abort_done_pkt),
+ .done = hfi_session_abort_done,
+ },
+ {.pkt = HFI_MSG_SESSION_EMPTY_BUFFER,
+ .pkt_sz = sizeof(struct hfi_msg_session_empty_buffer_done_pkt),
+ .done = hfi_session_etb_done,
+ },
+ {.pkt = HFI_MSG_SESSION_FILL_BUFFER,
+ .pkt_sz = sizeof(struct hfi_msg_session_fbd_uncompressed_plane0_pkt),
+ .pkt_sz2 = sizeof(struct hfi_msg_session_fbd_compressed_pkt),
+ .done = hfi_session_ftb_done,
+ },
+ {.pkt = HFI_MSG_SESSION_FLUSH,
+ .pkt_sz = sizeof(struct hfi_msg_session_flush_done_pkt),
+ .done = hfi_session_flush_done,
+ },
+ {.pkt = HFI_MSG_SESSION_PROPERTY_INFO,
+ .pkt_sz = sizeof(struct hfi_msg_session_property_info_pkt),
+ .done = hfi_session_prop_info,
+ },
+ {.pkt = HFI_MSG_SESSION_RELEASE_RESOURCES,
+ .pkt_sz = sizeof(struct hfi_msg_session_release_resources_done_pkt),
+ .done = hfi_session_rel_res_done,
+ },
+ {.pkt = HFI_MSG_SESSION_GET_SEQUENCE_HEADER,
+ .pkt_sz = sizeof(struct hfi_msg_session_get_sequence_hdr_done_pkt),
+ .done = hfi_session_get_seq_hdr_done,
+ },
+ {.pkt = HFI_MSG_SESSION_RELEASE_BUFFERS,
+ .pkt_sz = sizeof(struct hfi_msg_session_release_buffers_done_pkt),
+ .done = hfi_session_rel_buf_done,
+ },
+};
+
+void hfi_process_watchdog_timeout(struct venus_core *core)
+{
+ event_sys_error(core, EVT_SYS_WATCHDOG_TIMEOUT, NULL);
+}
+
+static struct venus_inst *to_instance(struct venus_core *core, u32 session_id)
+{
+ struct venus_inst *inst;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list)
+ if (hash32_ptr(inst) == session_id) {
+ mutex_unlock(&core->lock);
+ return inst;
+ }
+ mutex_unlock(&core->lock);
+
+ return NULL;
+}
+
+u32 hfi_process_msg_packet(struct venus_core *core, struct hfi_pkt_hdr *hdr)
+{
+ const struct hfi_done_handler *handler;
+ struct device *dev = core->dev;
+ struct venus_inst *inst;
+ bool found = false;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ handler = &handlers[i];
+ if (handler->pkt != hdr->pkt_type)
+ continue;
+ found = true;
+ break;
+ }
+
+ if (!found)
+ return hdr->pkt_type;
+
+ if (hdr->size && hdr->size < handler->pkt_sz &&
+ hdr->size < handler->pkt_sz2) {
+ dev_err(dev, "bad packet size (%d should be %d, pkt type:%x)\n",
+ hdr->size, handler->pkt_sz, hdr->pkt_type);
+
+ return hdr->pkt_type;
+ }
+
+ if (handler->is_sys_pkt) {
+ inst = NULL;
+ } else {
+ struct hfi_session_pkt *pkt;
+
+ pkt = (struct hfi_session_pkt *)hdr;
+ inst = to_instance(core, pkt->shdr.session_id);
+
+ if (!inst)
+ dev_warn(dev, "no valid instance(pkt session_id:%x, pkt:%x)\n",
+ pkt->shdr.session_id,
+ handler ? handler->pkt : 0);
+
+ /*
+ * Event of type HFI_EVENT_SYS_ERROR will not have any session
+ * associated with it
+ */
+ if (!inst && hdr->pkt_type != HFI_MSG_EVENT_NOTIFY) {
+ dev_err(dev, "got invalid session id:%x\n",
+ pkt->shdr.session_id);
+ goto invalid_session;
+ }
+ }
+
+ handler->done(core, inst, hdr);
+
+invalid_session:
+ return hdr->pkt_type;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.h b/drivers/media/platform/qcom/venus/hfi_msgs.h
new file mode 100644
index 000000000..14d9a3979
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_MSGS_H__
+#define __VENUS_HFI_MSGS_H__
+
+/* message calls */
+#define HFI_MSG_SYS_INIT 0x20001
+#define HFI_MSG_SYS_PC_PREP 0x20002
+#define HFI_MSG_SYS_RELEASE_RESOURCE 0x20003
+#define HFI_MSG_SYS_DEBUG 0x20004
+#define HFI_MSG_SYS_SESSION_INIT 0x20006
+#define HFI_MSG_SYS_SESSION_END 0x20007
+#define HFI_MSG_SYS_IDLE 0x20008
+#define HFI_MSG_SYS_COV 0x20009
+#define HFI_MSG_SYS_PROPERTY_INFO 0x2000a
+
+#define HFI_MSG_EVENT_NOTIFY 0x21001
+#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER 0x21002
+
+#define HFI_MSG_SYS_PING_ACK 0x220002
+#define HFI_MSG_SYS_SESSION_ABORT 0x220004
+
+#define HFI_MSG_SESSION_LOAD_RESOURCES 0x221001
+#define HFI_MSG_SESSION_START 0x221002
+#define HFI_MSG_SESSION_STOP 0x221003
+#define HFI_MSG_SESSION_SUSPEND 0x221004
+#define HFI_MSG_SESSION_RESUME 0x221005
+#define HFI_MSG_SESSION_FLUSH 0x221006
+#define HFI_MSG_SESSION_EMPTY_BUFFER 0x221007
+#define HFI_MSG_SESSION_FILL_BUFFER 0x221008
+#define HFI_MSG_SESSION_PROPERTY_INFO 0x221009
+#define HFI_MSG_SESSION_RELEASE_RESOURCES 0x22100a
+#define HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER 0x22100b
+#define HFI_MSG_SESSION_RELEASE_BUFFERS 0x22100c
+
+#define HFI_PICTURE_I 0x00000001
+#define HFI_PICTURE_P 0x00000002
+#define HFI_PICTURE_B 0x00000004
+#define HFI_PICTURE_IDR 0x00000008
+#define HFI_FRAME_NOTCODED 0x7f002000
+#define HFI_FRAME_YUV 0x7f004000
+#define HFI_UNUSED_PICT 0x10000000
+
+/* message packets */
+struct hfi_msg_event_notify_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 event_id;
+ u32 event_data1;
+ u32 event_data2;
+ u32 ext_event_data[1];
+};
+
+struct hfi_msg_event_release_buffer_ref_pkt {
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 output_tag;
+};
+
+struct hfi_msg_sys_init_done_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 error_type;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_sys_pc_prep_done_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 error_type;
+};
+
+struct hfi_msg_sys_release_resource_done_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 resource_handle;
+ u32 error_type;
+};
+
+struct hfi_msg_session_init_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_session_end_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_get_sequence_hdr_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 header_len;
+ u32 sequence_header;
+};
+
+struct hfi_msg_sys_session_abort_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_sys_idle_pkt {
+ struct hfi_pkt_hdr hdr;
+};
+
+struct hfi_msg_sys_ping_ack_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 client_data;
+};
+
+struct hfi_msg_sys_property_info_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_session_load_resources_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_start_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_stop_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_suspend_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_resume_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_flush_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 flush_type;
+};
+
+struct hfi_msg_session_empty_buffer_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 offset;
+ u32 filled_len;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[0];
+};
+
+struct hfi_msg_session_fbd_compressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 error_type;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ u32 output_tag;
+ u32 picture_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[0];
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane0_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 stream_id;
+ u32 view_id;
+ u32 error_type;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 frame_width;
+ u32 frame_height;
+ u32 start_x_coord;
+ u32 start_y_coord;
+ u32 input_tag;
+ u32 input_tag2;
+ u32 output_tag;
+ u32 picture_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[0];
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane1_pkt {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 packet_buffer2;
+ u32 data[0];
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane2_pkt {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 packet_buffer3;
+ u32 data[0];
+};
+
+struct hfi_msg_session_parse_sequence_header_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_session_property_info_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_session_release_resources_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_release_buffers_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 num_buffers;
+ u32 buffer_info[1];
+};
+
+struct hfi_msg_sys_debug_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 msg_type;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 msg_data[1];
+};
+
+struct hfi_msg_sys_coverage_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 msg_data[1];
+};
+
+struct venus_core;
+struct hfi_pkt_hdr;
+
+void hfi_process_watchdog_timeout(struct venus_core *core);
+u32 hfi_process_msg_packet(struct venus_core *core, struct hfi_pkt_hdr *hdr);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
new file mode 100644
index 000000000..7f515a4b9
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Linaro Ltd.
+ *
+ * Author: Stanimir Varbanov <stanimir.varbanov@linaro.org>
+ */
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "hfi_helper.h"
+#include "hfi_parser.h"
+
+typedef void (*func)(struct venus_caps *cap, const void *data,
+ unsigned int size);
+
+static void init_codecs(struct venus_core *core)
+{
+ struct venus_caps *caps = core->caps, *cap;
+ unsigned long bit;
+
+ for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
+ cap = &caps[core->codecs_count++];
+ cap->codec = BIT(bit);
+ cap->domain = VIDC_SESSION_TYPE_DEC;
+ cap->valid = false;
+ }
+
+ for_each_set_bit(bit, &core->enc_codecs, MAX_CODEC_NUM) {
+ cap = &caps[core->codecs_count++];
+ cap->codec = BIT(bit);
+ cap->domain = VIDC_SESSION_TYPE_ENC;
+ cap->valid = false;
+ }
+}
+
+static void for_each_codec(struct venus_caps *caps, unsigned int caps_num,
+ u32 codecs, u32 domain, func cb, void *data,
+ unsigned int size)
+{
+ struct venus_caps *cap;
+ unsigned int i;
+
+ for (i = 0; i < caps_num; i++) {
+ cap = &caps[i];
+ if (cap->valid && cap->domain == domain)
+ continue;
+ if (cap->codec & codecs && cap->domain == domain)
+ cb(cap, data, size);
+ }
+}
+
+static void
+fill_buf_mode(struct venus_caps *cap, const void *data, unsigned int num)
+{
+ const u32 *type = data;
+
+ if (*type == HFI_BUFFER_MODE_DYNAMIC)
+ cap->cap_bufs_mode_dynamic = true;
+}
+
+static void
+parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_buffer_alloc_mode_supported *mode = data;
+ u32 num_entries = mode->num_entries;
+ u32 *type;
+
+ if (num_entries > MAX_ALLOC_MODE_ENTRIES)
+ return;
+
+ type = mode->data;
+
+ while (num_entries--) {
+ if (mode->buffer_type == HFI_BUFFER_OUTPUT ||
+ mode->buffer_type == HFI_BUFFER_OUTPUT2)
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps),
+ codecs, domain, fill_buf_mode, type, 1);
+
+ type++;
+ }
+}
+
+static void fill_profile_level(struct venus_caps *cap, const void *data,
+ unsigned int num)
+{
+ const struct hfi_profile_level *pl = data;
+
+ memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
+ cap->num_pl += num;
+}
+
+static void
+parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_profile_level_supported *pl = data;
+ struct hfi_profile_level *proflevel = pl->profile_level;
+ struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {};
+
+ if (pl->profile_count > HFI_MAX_PROFILE_COUNT)
+ return;
+
+ memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel));
+
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ fill_profile_level, pl_arr, pl->profile_count);
+}
+
+static void
+fill_caps(struct venus_caps *cap, const void *data, unsigned int num)
+{
+ const struct hfi_capability *caps = data;
+
+ memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
+ cap->num_caps += num;
+}
+
+static void
+parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_capabilities *caps = data;
+ struct hfi_capability *cap = caps->data;
+ u32 num_caps = caps->num_capabilities;
+ struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {};
+
+ if (num_caps > MAX_CAP_ENTRIES)
+ return;
+
+ memcpy(caps_arr, cap, num_caps * sizeof(*cap));
+
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ fill_caps, caps_arr, num_caps);
+}
+
+static void fill_raw_fmts(struct venus_caps *cap, const void *fmts,
+ unsigned int num_fmts)
+{
+ const struct raw_formats *formats = fmts;
+
+ memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
+ cap->num_fmts += num_fmts;
+}
+
+static void
+parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_uncompressed_format_supported *fmt = data;
+ struct hfi_uncompressed_plane_info *pinfo = fmt->plane_info;
+ struct hfi_uncompressed_plane_constraints *constr;
+ struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {};
+ u32 entries = fmt->format_entries;
+ unsigned int i = 0;
+ u32 num_planes;
+
+ while (entries) {
+ num_planes = pinfo->num_planes;
+
+ rawfmts[i].fmt = pinfo->format;
+ rawfmts[i].buftype = fmt->buffer_type;
+ i++;
+
+ if (pinfo->num_planes > MAX_PLANES)
+ break;
+
+ pinfo = (void *)pinfo + sizeof(*constr) * num_planes +
+ 2 * sizeof(u32);
+ entries--;
+ }
+
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ fill_raw_fmts, rawfmts, i);
+}
+
+static void parse_codecs(struct venus_core *core, void *data)
+{
+ struct hfi_codec_supported *codecs = data;
+
+ core->dec_codecs = codecs->dec_codecs;
+ core->enc_codecs = codecs->enc_codecs;
+
+ if (IS_V1(core)) {
+ core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC;
+ core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
+ core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC;
+ }
+}
+
+static void parse_max_sessions(struct venus_core *core, const void *data)
+{
+ const struct hfi_max_sessions_supported *sessions = data;
+
+ core->max_sessions_supported = sessions->max_sessions;
+}
+
+static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
+{
+ struct hfi_codec_mask_supported *mask = data;
+
+ *codecs = mask->codecs;
+ *domain = mask->video_domains;
+}
+
+static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain)
+{
+ if (!inst || !IS_V1(inst->core))
+ return;
+
+ *codecs = inst->hfi_codec;
+ *domain = inst->session_type;
+}
+
+static void parser_fini(struct venus_inst *inst, u32 codecs, u32 domain)
+{
+ struct venus_caps *caps, *cap;
+ unsigned int i;
+ u32 dom;
+
+ if (!inst || !IS_V1(inst->core))
+ return;
+
+ caps = inst->core->caps;
+ dom = inst->session_type;
+
+ for (i = 0; i < MAX_CODEC_NUM; i++) {
+ cap = &caps[i];
+ if (cap->codec & codecs && cap->domain == dom)
+ cap->valid = true;
+ }
+}
+
+u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
+ u32 size)
+{
+ unsigned int words_count = size >> 2;
+ u32 *word = buf, *data, codecs = 0, domain = 0;
+
+ if (size % 4)
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ parser_init(inst, &codecs, &domain);
+
+ while (words_count) {
+ data = word + 1;
+
+ switch (*word) {
+ case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
+ parse_codecs(core, data);
+ init_codecs(core);
+ break;
+ case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED:
+ parse_max_sessions(core, data);
+ break;
+ case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED:
+ parse_codecs_mask(&codecs, &domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+ parse_raw_formats(core, codecs, domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
+ parse_caps(core, codecs, domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
+ parse_profile_level(core, codecs, domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED:
+ parse_alloc_mode(core, codecs, domain, data);
+ break;
+ default:
+ break;
+ }
+
+ word++;
+ words_count--;
+ }
+
+ parser_fini(inst, codecs, domain);
+
+ return HFI_ERR_NONE;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.h b/drivers/media/platform/qcom/venus/hfi_parser.h
new file mode 100644
index 000000000..3e931c747
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_parser.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Linaro Ltd. */
+#ifndef __VENUS_HFI_PARSER_H__
+#define __VENUS_HFI_PARSER_H__
+
+#include "core.h"
+
+u32 hfi_parser(struct venus_core *core, struct venus_inst *inst,
+ void *buf, u32 size);
+
+#define WHICH_CAP_MIN 0
+#define WHICH_CAP_MAX 1
+#define WHICH_CAP_STEP 2
+
+static inline u32 get_cap(struct venus_inst *inst, u32 type, u32 which)
+{
+ struct venus_core *core = inst->core;
+ struct hfi_capability *cap = NULL;
+ struct venus_caps *caps;
+ unsigned int i;
+
+ caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
+ if (!caps)
+ return 0;
+
+ for (i = 0; i < caps->num_caps; i++) {
+ if (caps->caps[i].capability_type == type) {
+ cap = &caps->caps[i];
+ break;
+ }
+ }
+
+ if (!cap)
+ return 0;
+
+ switch (which) {
+ case WHICH_CAP_MIN:
+ return cap->min;
+ case WHICH_CAP_MAX:
+ return cap->max;
+ case WHICH_CAP_STEP:
+ return cap->step_size;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline u32 cap_min(struct venus_inst *inst, u32 type)
+{
+ return get_cap(inst, type, WHICH_CAP_MIN);
+}
+
+static inline u32 cap_max(struct venus_inst *inst, u32 type)
+{
+ return get_cap(inst, type, WHICH_CAP_MAX);
+}
+
+static inline u32 cap_step(struct venus_inst *inst, u32 type)
+{
+ return get_cap(inst, type, WHICH_CAP_STEP);
+}
+
+static inline u32 frame_width_min(struct venus_inst *inst)
+{
+ return cap_min(inst, HFI_CAPABILITY_FRAME_WIDTH);
+}
+
+static inline u32 frame_width_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_FRAME_WIDTH);
+}
+
+static inline u32 frame_width_step(struct venus_inst *inst)
+{
+ return cap_step(inst, HFI_CAPABILITY_FRAME_WIDTH);
+}
+
+static inline u32 frame_height_min(struct venus_inst *inst)
+{
+ return cap_min(inst, HFI_CAPABILITY_FRAME_HEIGHT);
+}
+
+static inline u32 frame_height_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_FRAME_HEIGHT);
+}
+
+static inline u32 frame_height_step(struct venus_inst *inst)
+{
+ return cap_step(inst, HFI_CAPABILITY_FRAME_HEIGHT);
+}
+
+static inline u32 frate_min(struct venus_inst *inst)
+{
+ return cap_min(inst, HFI_CAPABILITY_FRAMERATE);
+}
+
+static inline u32 frate_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_FRAMERATE);
+}
+
+static inline u32 frate_step(struct venus_inst *inst)
+{
+ return cap_step(inst, HFI_CAPABILITY_FRAMERATE);
+}
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
new file mode 100644
index 000000000..fbcc67c10
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -0,0 +1,1628 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/qcom_scm.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "hfi_cmds.h"
+#include "hfi_msgs.h"
+#include "hfi_venus.h"
+#include "hfi_venus_io.h"
+
+#define HFI_MASK_QHDR_TX_TYPE 0xff000000
+#define HFI_MASK_QHDR_RX_TYPE 0x00ff0000
+#define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00
+#define HFI_MASK_QHDR_ID_TYPE 0x000000ff
+
+#define HFI_HOST_TO_CTRL_CMD_Q 0
+#define HFI_CTRL_TO_HOST_MSG_Q 1
+#define HFI_CTRL_TO_HOST_DBG_Q 2
+#define HFI_MASK_QHDR_STATUS 0x000000ff
+
+#define IFACEQ_NUM 3
+#define IFACEQ_CMD_IDX 0
+#define IFACEQ_MSG_IDX 1
+#define IFACEQ_DBG_IDX 2
+#define IFACEQ_MAX_BUF_COUNT 50
+#define IFACEQ_MAX_PARALLEL_CLNTS 16
+#define IFACEQ_DFLT_QHDR 0x01010000
+
+#define POLL_INTERVAL_US 50
+
+#define IFACEQ_MAX_PKT_SIZE 1024
+#define IFACEQ_MED_PKT_SIZE 768
+#define IFACEQ_MIN_PKT_SIZE 8
+#define IFACEQ_VAR_SMALL_PKT_SIZE 100
+#define IFACEQ_VAR_LARGE_PKT_SIZE 512
+#define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12)
+
+enum tzbsp_video_state {
+ TZBSP_VIDEO_STATE_SUSPEND = 0,
+ TZBSP_VIDEO_STATE_RESUME
+};
+
+struct hfi_queue_table_header {
+ u32 version;
+ u32 size;
+ u32 qhdr0_offset;
+ u32 qhdr_size;
+ u32 num_q;
+ u32 num_active_q;
+};
+
+struct hfi_queue_header {
+ u32 status;
+ u32 start_addr;
+ u32 type;
+ u32 q_size;
+ u32 pkt_size;
+ u32 pkt_drop_cnt;
+ u32 rx_wm;
+ u32 tx_wm;
+ u32 rx_req;
+ u32 tx_req;
+ u32 rx_irq_status;
+ u32 tx_irq_status;
+ u32 read_idx;
+ u32 write_idx;
+};
+
+#define IFACEQ_TABLE_SIZE \
+ (sizeof(struct hfi_queue_table_header) + \
+ sizeof(struct hfi_queue_header) * IFACEQ_NUM)
+
+#define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \
+ IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
+
+#define IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
+ (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) + \
+ ((i) * sizeof(struct hfi_queue_header)))
+
+#define QDSS_SIZE SZ_4K
+#define SFR_SIZE SZ_4K
+#define QUEUE_SIZE \
+ (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
+
+#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
+#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
+#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
+#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
+ ALIGNED_QDSS_SIZE, SZ_1M)
+
+struct mem_desc {
+ dma_addr_t da; /* device address */
+ void *kva; /* kernel virtual address */
+ u32 size;
+ unsigned long attrs;
+};
+
+struct iface_queue {
+ struct hfi_queue_header *qhdr;
+ struct mem_desc qmem;
+};
+
+enum venus_state {
+ VENUS_STATE_DEINIT = 1,
+ VENUS_STATE_INIT,
+};
+
+struct venus_hfi_device {
+ struct venus_core *core;
+ u32 irq_status;
+ u32 last_packet_type;
+ bool power_enabled;
+ bool suspended;
+ enum venus_state state;
+ /* serialize read / write to the shared memory */
+ struct mutex lock;
+ struct completion pwr_collapse_prep;
+ struct completion release_resource;
+ struct mem_desc ifaceq_table;
+ struct mem_desc sfr;
+ struct iface_queue queues[IFACEQ_NUM];
+ u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
+ u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
+};
+
+static bool venus_pkt_debug;
+static int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
+static bool venus_sys_idle_indicator;
+static bool venus_fw_low_power_mode = true;
+static int venus_hw_rsp_timeout = 1000;
+static bool venus_fw_coverage;
+
+static void venus_set_state(struct venus_hfi_device *hdev,
+ enum venus_state state)
+{
+ mutex_lock(&hdev->lock);
+ hdev->state = state;
+ mutex_unlock(&hdev->lock);
+}
+
+static bool venus_is_valid_state(struct venus_hfi_device *hdev)
+{
+ return hdev->state != VENUS_STATE_DEINIT;
+}
+
+static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
+{
+ size_t pkt_size = *(u32 *)packet;
+
+ if (!venus_pkt_debug)
+ return;
+
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
+ pkt_size, true);
+}
+
+static int venus_write_queue(struct venus_hfi_device *hdev,
+ struct iface_queue *queue,
+ void *packet, u32 *rx_req)
+{
+ struct hfi_queue_header *qhdr;
+ u32 dwords, new_wr_idx;
+ u32 empty_space, rd_idx, wr_idx, qsize;
+ u32 *wr_ptr;
+
+ if (!queue->qmem.kva)
+ return -EINVAL;
+
+ qhdr = queue->qhdr;
+ if (!qhdr)
+ return -EINVAL;
+
+ venus_dump_packet(hdev, packet);
+
+ dwords = (*(u32 *)packet) >> 2;
+ if (!dwords)
+ return -EINVAL;
+
+ rd_idx = qhdr->read_idx;
+ wr_idx = qhdr->write_idx;
+ qsize = qhdr->q_size;
+ /* ensure rd/wr indices's are read from memory */
+ rmb();
+
+ if (wr_idx >= rd_idx)
+ empty_space = qsize - (wr_idx - rd_idx);
+ else
+ empty_space = rd_idx - wr_idx;
+
+ if (empty_space <= dwords) {
+ qhdr->tx_req = 1;
+ /* ensure tx_req is updated in memory */
+ wmb();
+ return -ENOSPC;
+ }
+
+ qhdr->tx_req = 0;
+ /* ensure tx_req is updated in memory */
+ wmb();
+
+ new_wr_idx = wr_idx + dwords;
+ wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
+ if (new_wr_idx < qsize) {
+ memcpy(wr_ptr, packet, dwords << 2);
+ } else {
+ size_t len;
+
+ new_wr_idx -= qsize;
+ len = (dwords - new_wr_idx) << 2;
+ memcpy(wr_ptr, packet, len);
+ memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
+ }
+
+ /* make sure packet is written before updating the write index */
+ wmb();
+
+ qhdr->write_idx = new_wr_idx;
+ *rx_req = qhdr->rx_req ? 1 : 0;
+
+ /* make sure write index is updated before an interrupt is raised */
+ mb();
+
+ return 0;
+}
+
+static int venus_read_queue(struct venus_hfi_device *hdev,
+ struct iface_queue *queue, void *pkt, u32 *tx_req)
+{
+ struct hfi_queue_header *qhdr;
+ u32 dwords, new_rd_idx;
+ u32 rd_idx, wr_idx, type, qsize;
+ u32 *rd_ptr;
+ u32 recv_request = 0;
+ int ret = 0;
+
+ if (!queue->qmem.kva)
+ return -EINVAL;
+
+ qhdr = queue->qhdr;
+ if (!qhdr)
+ return -EINVAL;
+
+ type = qhdr->type;
+ rd_idx = qhdr->read_idx;
+ wr_idx = qhdr->write_idx;
+ qsize = qhdr->q_size;
+
+ /* make sure data is valid before using it */
+ rmb();
+
+ /*
+ * Do not set receive request for debug queue, if set, Venus generates
+ * interrupt for debug messages even when there is no response message
+ * available. In general debug queue will not become full as it is being
+ * emptied out for every interrupt from Venus. Venus will anyway
+ * generates interrupt if it is full.
+ */
+ if (type & HFI_CTRL_TO_HOST_MSG_Q)
+ recv_request = 1;
+
+ if (rd_idx == wr_idx) {
+ qhdr->rx_req = recv_request;
+ *tx_req = 0;
+ /* update rx_req field in memory */
+ wmb();
+ return -ENODATA;
+ }
+
+ rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
+ dwords = *rd_ptr >> 2;
+ if (!dwords)
+ return -EINVAL;
+
+ new_rd_idx = rd_idx + dwords;
+ if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
+ if (new_rd_idx < qsize) {
+ memcpy(pkt, rd_ptr, dwords << 2);
+ } else {
+ size_t len;
+
+ new_rd_idx -= qsize;
+ len = (dwords - new_rd_idx) << 2;
+ memcpy(pkt, rd_ptr, len);
+ memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
+ }
+ } else {
+ /* bad packet received, dropping */
+ new_rd_idx = qhdr->write_idx;
+ ret = -EBADMSG;
+ }
+
+ /* ensure the packet is read before updating read index */
+ rmb();
+
+ qhdr->read_idx = new_rd_idx;
+ /* ensure updating read index */
+ wmb();
+
+ rd_idx = qhdr->read_idx;
+ wr_idx = qhdr->write_idx;
+ /* ensure rd/wr indices are read from memory */
+ rmb();
+
+ if (rd_idx != wr_idx)
+ qhdr->rx_req = 0;
+ else
+ qhdr->rx_req = recv_request;
+
+ *tx_req = qhdr->tx_req ? 1 : 0;
+
+ /* ensure rx_req is stored to memory and tx_req is loaded from memory */
+ mb();
+
+ venus_dump_packet(hdev, pkt);
+
+ return ret;
+}
+
+static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
+ u32 size)
+{
+ struct device *dev = hdev->core->dev;
+
+ desc->attrs = DMA_ATTR_WRITE_COMBINE;
+ desc->size = ALIGN(size, SZ_4K);
+
+ desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
+ desc->attrs);
+ if (!desc->kva)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
+{
+ struct device *dev = hdev->core->dev;
+
+ dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
+}
+
+static void venus_writel(struct venus_hfi_device *hdev, u32 reg, u32 value)
+{
+ writel(value, hdev->core->base + reg);
+}
+
+static u32 venus_readl(struct venus_hfi_device *hdev, u32 reg)
+{
+ return readl(hdev->core->base + reg);
+}
+
+static void venus_set_registers(struct venus_hfi_device *hdev)
+{
+ const struct venus_resources *res = hdev->core->res;
+ const struct reg_val *tbl = res->reg_tbl;
+ unsigned int count = res->reg_tbl_size;
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ venus_writel(hdev, tbl[i].reg, tbl[i].value);
+}
+
+static void venus_soft_int(struct venus_hfi_device *hdev)
+{
+ venus_writel(hdev, CPU_IC_SOFTINT, BIT(CPU_IC_SOFTINT_H2A_SHIFT));
+}
+
+static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
+ void *pkt)
+{
+ struct device *dev = hdev->core->dev;
+ struct hfi_pkt_hdr *cmd_packet;
+ struct iface_queue *queue;
+ u32 rx_req;
+ int ret;
+
+ if (!venus_is_valid_state(hdev))
+ return -EINVAL;
+
+ cmd_packet = (struct hfi_pkt_hdr *)pkt;
+ hdev->last_packet_type = cmd_packet->pkt_type;
+
+ queue = &hdev->queues[IFACEQ_CMD_IDX];
+
+ ret = venus_write_queue(hdev, queue, pkt, &rx_req);
+ if (ret) {
+ dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
+ return ret;
+ }
+
+ if (rx_req)
+ venus_soft_int(hdev);
+
+ return 0;
+}
+
+static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt)
+{
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_iface_cmdq_write_nolock(hdev, pkt);
+ mutex_unlock(&hdev->lock);
+
+ return ret;
+}
+
+static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
+ u32 size, u32 addr, void *cookie)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct hfi_sys_set_resource_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ if (id == VIDC_RESOURCE_NONE)
+ return 0;
+
+ pkt = (struct hfi_sys_set_resource_pkt *)packet;
+
+ ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
+ if (ret)
+ return ret;
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_boot_core(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ static const unsigned int max_tries = 100;
+ u32 ctrl_status = 0;
+ unsigned int count = 0;
+ int ret = 0;
+
+ venus_writel(hdev, VIDC_CTRL_INIT, BIT(VIDC_CTRL_INIT_CTRL_SHIFT));
+ venus_writel(hdev, WRAPPER_INTR_MASK, WRAPPER_INTR_MASK_A2HVCODEC_MASK);
+ venus_writel(hdev, CPU_CS_SCIACMDARG3, 1);
+
+ while (!ctrl_status && count < max_tries) {
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
+ dev_err(dev, "invalid setting for UC_REGION\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ usleep_range(500, 1000);
+ count++;
+ }
+
+ if (count >= max_tries)
+ ret = -ETIMEDOUT;
+
+ return ret;
+}
+
+static u32 venus_hwversion(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ u32 ver = venus_readl(hdev, WRAPPER_HW_VERSION);
+ u32 major, minor, step;
+
+ major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
+ major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
+ minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
+ minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
+ step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
+
+ dev_dbg(dev, "venus hw version %x.%x.%x\n", major, minor, step);
+
+ return major;
+}
+
+static int venus_run(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ int ret;
+
+ /*
+ * Re-program all of the registers that get reset as a result of
+ * regulator_disable() and _enable()
+ */
+ venus_set_registers(hdev);
+
+ venus_writel(hdev, UC_REGION_ADDR, hdev->ifaceq_table.da);
+ venus_writel(hdev, UC_REGION_SIZE, SHARED_QSIZE);
+ venus_writel(hdev, CPU_CS_SCIACMDARG2, hdev->ifaceq_table.da);
+ venus_writel(hdev, CPU_CS_SCIACMDARG1, 0x01);
+ if (hdev->sfr.da)
+ venus_writel(hdev, SFR_ADDR, hdev->sfr.da);
+
+ ret = venus_boot_core(hdev);
+ if (ret) {
+ dev_err(dev, "failed to reset venus core\n");
+ return ret;
+ }
+
+ venus_hwversion(hdev);
+
+ return 0;
+}
+
+static int venus_halt_axi(struct venus_hfi_device *hdev)
+{
+ void __iomem *base = hdev->core->base;
+ struct device *dev = hdev->core->dev;
+ u32 val;
+ int ret;
+
+ if (IS_V4(hdev->core)) {
+ val = venus_readl(hdev, WRAPPER_CPU_AXI_HALT);
+ val |= WRAPPER_CPU_AXI_HALT_HALT;
+ venus_writel(hdev, WRAPPER_CPU_AXI_HALT, val);
+
+ ret = readl_poll_timeout(base + WRAPPER_CPU_AXI_HALT_STATUS,
+ val,
+ val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
+ POLL_INTERVAL_US,
+ VBIF_AXI_HALT_ACK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "AXI bus port halt timeout\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* Halt AXI and AXI IMEM VBIF Access */
+ val = venus_readl(hdev, VBIF_AXI_HALT_CTRL0);
+ val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
+ venus_writel(hdev, VBIF_AXI_HALT_CTRL0, val);
+
+ /* Request for AXI bus port halt */
+ ret = readl_poll_timeout(base + VBIF_AXI_HALT_CTRL1, val,
+ val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
+ POLL_INTERVAL_US,
+ VBIF_AXI_HALT_ACK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "AXI bus port halt timeout\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int venus_power_off(struct venus_hfi_device *hdev)
+{
+ int ret;
+
+ if (!hdev->power_enabled)
+ return 0;
+
+ ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0);
+ if (ret)
+ return ret;
+
+ ret = venus_halt_axi(hdev);
+ if (ret)
+ return ret;
+
+ hdev->power_enabled = false;
+
+ return 0;
+}
+
+static int venus_power_on(struct venus_hfi_device *hdev)
+{
+ int ret;
+
+ if (hdev->power_enabled)
+ return 0;
+
+ ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_RESUME, 0);
+ if (ret)
+ goto err;
+
+ ret = venus_run(hdev);
+ if (ret)
+ goto err_suspend;
+
+ hdev->power_enabled = true;
+
+ return 0;
+
+err_suspend:
+ qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0);
+err:
+ hdev->power_enabled = false;
+ return ret;
+}
+
+static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
+ void *pkt)
+{
+ struct iface_queue *queue;
+ u32 tx_req;
+ int ret;
+
+ if (!venus_is_valid_state(hdev))
+ return -EINVAL;
+
+ queue = &hdev->queues[IFACEQ_MSG_IDX];
+
+ ret = venus_read_queue(hdev, queue, pkt, &tx_req);
+ if (ret)
+ return ret;
+
+ if (tx_req)
+ venus_soft_int(hdev);
+
+ return 0;
+}
+
+static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
+{
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_iface_msgq_read_nolock(hdev, pkt);
+ mutex_unlock(&hdev->lock);
+
+ return ret;
+}
+
+static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
+ void *pkt)
+{
+ struct iface_queue *queue;
+ u32 tx_req;
+ int ret;
+
+ ret = venus_is_valid_state(hdev);
+ if (!ret)
+ return -EINVAL;
+
+ queue = &hdev->queues[IFACEQ_DBG_IDX];
+
+ ret = venus_read_queue(hdev, queue, pkt, &tx_req);
+ if (ret)
+ return ret;
+
+ if (tx_req)
+ venus_soft_int(hdev);
+
+ return 0;
+}
+
+static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
+{
+ int ret;
+
+ if (!pkt)
+ return -EINVAL;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_iface_dbgq_read_nolock(hdev, pkt);
+ mutex_unlock(&hdev->lock);
+
+ return ret;
+}
+
+static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
+{
+ qhdr->status = 1;
+ qhdr->type = IFACEQ_DFLT_QHDR;
+ qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
+ qhdr->pkt_size = 0;
+ qhdr->rx_wm = 1;
+ qhdr->tx_wm = 1;
+ qhdr->rx_req = 1;
+ qhdr->tx_req = 0;
+ qhdr->rx_irq_status = 0;
+ qhdr->tx_irq_status = 0;
+ qhdr->read_idx = 0;
+ qhdr->write_idx = 0;
+}
+
+static void venus_interface_queues_release(struct venus_hfi_device *hdev)
+{
+ mutex_lock(&hdev->lock);
+
+ venus_free(hdev, &hdev->ifaceq_table);
+ venus_free(hdev, &hdev->sfr);
+
+ memset(hdev->queues, 0, sizeof(hdev->queues));
+ memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
+ memset(&hdev->sfr, 0, sizeof(hdev->sfr));
+
+ mutex_unlock(&hdev->lock);
+}
+
+static int venus_interface_queues_init(struct venus_hfi_device *hdev)
+{
+ struct hfi_queue_table_header *tbl_hdr;
+ struct iface_queue *queue;
+ struct hfi_sfr *sfr;
+ struct mem_desc desc = {0};
+ unsigned int offset;
+ unsigned int i;
+ int ret;
+
+ ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
+ if (ret)
+ return ret;
+
+ hdev->ifaceq_table = desc;
+ offset = IFACEQ_TABLE_SIZE;
+
+ for (i = 0; i < IFACEQ_NUM; i++) {
+ queue = &hdev->queues[i];
+ queue->qmem.da = desc.da + offset;
+ queue->qmem.kva = desc.kva + offset;
+ queue->qmem.size = IFACEQ_QUEUE_SIZE;
+ offset += queue->qmem.size;
+ queue->qhdr =
+ IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
+
+ venus_set_qhdr_defaults(queue->qhdr);
+
+ queue->qhdr->start_addr = queue->qmem.da;
+
+ if (i == IFACEQ_CMD_IDX)
+ queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
+ else if (i == IFACEQ_MSG_IDX)
+ queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
+ else if (i == IFACEQ_DBG_IDX)
+ queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
+ }
+
+ tbl_hdr = hdev->ifaceq_table.kva;
+ tbl_hdr->version = 0;
+ tbl_hdr->size = IFACEQ_TABLE_SIZE;
+ tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
+ tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
+ tbl_hdr->num_q = IFACEQ_NUM;
+ tbl_hdr->num_active_q = IFACEQ_NUM;
+
+ /*
+ * Set receive request to zero on debug queue as there is no
+ * need of interrupt from video hardware for debug messages
+ */
+ queue = &hdev->queues[IFACEQ_DBG_IDX];
+ queue->qhdr->rx_req = 0;
+
+ ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
+ if (ret) {
+ hdev->sfr.da = 0;
+ } else {
+ hdev->sfr = desc;
+ sfr = hdev->sfr.kva;
+ sfr->buf_size = ALIGNED_SFR_SIZE;
+ }
+
+ /* ensure table and queue header structs are settled in memory */
+ wmb();
+
+ return 0;
+}
+
+static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_sys_set_property_pkt *)packet;
+
+ pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_sys_set_property_pkt *)packet;
+
+ pkt_sys_coverage_config(pkt, mode);
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
+ bool enable)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ if (!enable)
+ return 0;
+
+ pkt = (struct hfi_sys_set_property_pkt *)packet;
+
+ pkt_sys_idle_indicator(pkt, enable);
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
+ bool enable)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_sys_set_property_pkt *)packet;
+
+ pkt_sys_power_control(pkt, enable);
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_get_queue_size(struct venus_hfi_device *hdev,
+ unsigned int index)
+{
+ struct hfi_queue_header *qhdr;
+
+ if (index >= IFACEQ_NUM)
+ return -EINVAL;
+
+ qhdr = hdev->queues[index].qhdr;
+ if (!qhdr)
+ return -EINVAL;
+
+ return abs(qhdr->read_idx - qhdr->write_idx);
+}
+
+static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ int ret;
+
+ ret = venus_sys_set_debug(hdev, venus_fw_debug);
+ if (ret)
+ dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
+
+ /*
+ * Idle indicator is disabled by default on some 4xx firmware versions,
+ * enable it explicitly in order to make suspend functional by checking
+ * WFI (wait-for-interrupt) bit.
+ */
+ if (IS_V4(hdev->core))
+ venus_sys_idle_indicator = true;
+
+ ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
+ if (ret)
+ dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
+
+ ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
+ if (ret)
+ dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
+ ret);
+
+ return ret;
+}
+
+static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_pkt pkt;
+
+ pkt_session_cmd(&pkt, pkt_type, inst);
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ void *packet = hdev->dbg_buf;
+
+ while (!venus_iface_dbgq_read(hdev, packet)) {
+ struct hfi_msg_sys_coverage_pkt *pkt = packet;
+
+ if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
+ struct hfi_msg_sys_debug_pkt *pkt = packet;
+
+ dev_dbg(dev, "%s", pkt->msg_data);
+ }
+ }
+}
+
+static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
+ bool wait)
+{
+ unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
+ struct hfi_sys_pc_prep_pkt pkt;
+ int ret;
+
+ init_completion(&hdev->pwr_collapse_prep);
+
+ pkt_sys_pc_prep(&pkt);
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ if (ret)
+ return ret;
+
+ if (!wait)
+ return 0;
+
+ ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
+ if (!ret) {
+ venus_flush_debug_queue(hdev);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int venus_are_queues_empty(struct venus_hfi_device *hdev)
+{
+ int ret1, ret2;
+
+ ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
+ if (ret1 < 0)
+ return ret1;
+
+ ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
+ if (ret2 < 0)
+ return ret2;
+
+ if (!ret1 && !ret2)
+ return 1;
+
+ return 0;
+}
+
+static void venus_sfr_print(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ struct hfi_sfr *sfr = hdev->sfr.kva;
+ void *p;
+
+ if (!sfr)
+ return;
+
+ p = memchr(sfr->data, '\0', sfr->buf_size);
+ /*
+ * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
+ * that Venus is in the process of crashing.
+ */
+ if (!p)
+ sfr->data[sfr->buf_size - 1] = '\0';
+
+ dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
+}
+
+static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
+ void *packet)
+{
+ struct hfi_msg_event_notify_pkt *event_pkt = packet;
+
+ if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
+ return;
+
+ venus_set_state(hdev, VENUS_STATE_DEINIT);
+
+ /*
+ * Once SYS_ERROR received from HW, it is safe to halt the AXI.
+ * With SYS_ERROR, Venus FW may have crashed and HW might be
+ * active and causing unnecessary transactions. Hence it is
+ * safe to stop all AXI transactions from venus subsystem.
+ */
+ venus_halt_axi(hdev);
+ venus_sfr_print(hdev);
+}
+
+static irqreturn_t venus_isr_thread(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ const struct venus_resources *res;
+ void *pkt;
+ u32 msg_ret;
+
+ if (!hdev)
+ return IRQ_NONE;
+
+ res = hdev->core->res;
+ pkt = hdev->pkt_buf;
+
+ if (hdev->irq_status & WRAPPER_INTR_STATUS_A2HWD_MASK) {
+ venus_sfr_print(hdev);
+ hfi_process_watchdog_timeout(core);
+ }
+
+ while (!venus_iface_msgq_read(hdev, pkt)) {
+ msg_ret = hfi_process_msg_packet(core, pkt);
+ switch (msg_ret) {
+ case HFI_MSG_EVENT_NOTIFY:
+ venus_process_msg_sys_error(hdev, pkt);
+ break;
+ case HFI_MSG_SYS_INIT:
+ venus_hfi_core_set_resource(core, res->vmem_id,
+ res->vmem_size,
+ res->vmem_addr,
+ hdev);
+ break;
+ case HFI_MSG_SYS_RELEASE_RESOURCE:
+ complete(&hdev->release_resource);
+ break;
+ case HFI_MSG_SYS_PC_PREP:
+ complete(&hdev->pwr_collapse_prep);
+ break;
+ default:
+ break;
+ }
+ }
+
+ venus_flush_debug_queue(hdev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t venus_isr(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ u32 status;
+
+ if (!hdev)
+ return IRQ_NONE;
+
+ status = venus_readl(hdev, WRAPPER_INTR_STATUS);
+
+ if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
+ status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
+ status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
+ hdev->irq_status = status;
+
+ venus_writel(hdev, CPU_CS_A2HSOFTINTCLR, 1);
+ venus_writel(hdev, WRAPPER_INTR_CLEAR, status);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int venus_core_init(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct device *dev = core->dev;
+ struct hfi_sys_get_property_pkt version_pkt;
+ struct hfi_sys_init_pkt pkt;
+ int ret;
+
+ pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
+
+ venus_set_state(hdev, VENUS_STATE_INIT);
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ if (ret)
+ return ret;
+
+ pkt_sys_image_version(&version_pkt);
+
+ ret = venus_iface_cmdq_write(hdev, &version_pkt);
+ if (ret)
+ dev_warn(dev, "failed to send image version pkt to fw\n");
+
+ ret = venus_sys_set_default_properties(hdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_core_deinit(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+
+ venus_set_state(hdev, VENUS_STATE_DEINIT);
+ hdev->suspended = true;
+ hdev->power_enabled = false;
+
+ return 0;
+}
+
+static int venus_core_ping(struct venus_core *core, u32 cookie)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct hfi_sys_ping_pkt pkt;
+
+ pkt_sys_ping(&pkt, cookie);
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct hfi_sys_test_ssr_pkt pkt;
+ int ret;
+
+ ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_session_init(struct venus_inst *inst, u32 session_type,
+ u32 codec)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_init_pkt pkt;
+ int ret;
+
+ ret = pkt_session_init(&pkt, inst, session_type, codec);
+ if (ret)
+ goto err;
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ venus_flush_debug_queue(hdev);
+ return ret;
+}
+
+static int venus_session_end(struct venus_inst *inst)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct device *dev = hdev->core->dev;
+
+ if (venus_fw_coverage) {
+ if (venus_sys_set_coverage(hdev, venus_fw_coverage))
+ dev_warn(dev, "fw coverage msg ON failed\n");
+ }
+
+ return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END);
+}
+
+static int venus_session_abort(struct venus_inst *inst)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+
+ venus_flush_debug_queue(hdev);
+
+ return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT);
+}
+
+static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_flush_pkt pkt;
+ int ret;
+
+ ret = pkt_session_flush(&pkt, inst, flush_mode);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_session_start(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_START);
+}
+
+static int venus_session_stop(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_STOP);
+}
+
+static int venus_session_continue(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE);
+}
+
+static int venus_session_etb(struct venus_inst *inst,
+ struct hfi_frame_data *in_frame)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ u32 session_type = inst->session_type;
+ int ret;
+
+ if (session_type == VIDC_SESSION_TYPE_DEC) {
+ struct hfi_session_empty_buffer_compressed_pkt pkt;
+
+ ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
+ if (ret)
+ return ret;
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ } else if (session_type == VIDC_SESSION_TYPE_ENC) {
+ struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
+
+ ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
+ if (ret)
+ return ret;
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int venus_session_ftb(struct venus_inst *inst,
+ struct hfi_frame_data *out_frame)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_fill_buffer_pkt pkt;
+ int ret;
+
+ ret = pkt_session_ftb(&pkt, inst, out_frame);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_session_set_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_set_buffers_pkt *pkt;
+ u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
+ int ret;
+
+ if (bd->buffer_type == HFI_BUFFER_INPUT)
+ return 0;
+
+ pkt = (struct hfi_session_set_buffers_pkt *)packet;
+
+ ret = pkt_session_set_buffers(pkt, inst, bd);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, pkt);
+}
+
+static int venus_session_unset_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_release_buffer_pkt *pkt;
+ u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
+ int ret;
+
+ if (bd->buffer_type == HFI_BUFFER_INPUT)
+ return 0;
+
+ pkt = (struct hfi_session_release_buffer_pkt *)packet;
+
+ ret = pkt_session_unset_buffers(pkt, inst, bd);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, pkt);
+}
+
+static int venus_session_load_res(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES);
+}
+
+static int venus_session_release_res(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES);
+}
+
+static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
+ u32 seq_hdr_len)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_parse_sequence_header_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
+
+ ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
+ if (ret)
+ return ret;
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
+ u32 seq_hdr_len)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_get_sequence_header_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
+
+ ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, pkt);
+}
+
+static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
+ void *pdata)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_session_set_property_pkt *)packet;
+
+ ret = pkt_session_set_property(pkt, inst, ptype, pdata);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, pkt);
+}
+
+static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_get_property_pkt pkt;
+ int ret;
+
+ ret = pkt_session_get_property(&pkt, inst, ptype);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_resume(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ int ret = 0;
+
+ mutex_lock(&hdev->lock);
+
+ if (!hdev->suspended)
+ goto unlock;
+
+ ret = venus_power_on(hdev);
+
+unlock:
+ if (!ret)
+ hdev->suspended = false;
+
+ mutex_unlock(&hdev->lock);
+
+ return ret;
+}
+
+static int venus_suspend_1xx(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct device *dev = core->dev;
+ u32 ctrl_status;
+ int ret;
+
+ if (!hdev->power_enabled || hdev->suspended)
+ return 0;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_is_valid_state(hdev);
+ mutex_unlock(&hdev->lock);
+
+ if (!ret) {
+ dev_err(dev, "bad state, cannot suspend\n");
+ return -EINVAL;
+ }
+
+ ret = venus_prepare_power_collapse(hdev, true);
+ if (ret) {
+ dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&hdev->lock);
+
+ if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
+ mutex_unlock(&hdev->lock);
+ return -EINVAL;
+ }
+
+ ret = venus_are_queues_empty(hdev);
+ if (ret < 0 || !ret) {
+ mutex_unlock(&hdev->lock);
+ return -EINVAL;
+ }
+
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
+ mutex_unlock(&hdev->lock);
+ return -EINVAL;
+ }
+
+ ret = venus_power_off(hdev);
+ if (ret) {
+ mutex_unlock(&hdev->lock);
+ return ret;
+ }
+
+ hdev->suspended = true;
+
+ mutex_unlock(&hdev->lock);
+
+ return 0;
+}
+
+static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
+{
+ u32 ctrl_status, cpu_status;
+
+ cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+
+ if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
+ ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
+ return true;
+
+ return false;
+}
+
+static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
+{
+ u32 ctrl_status, cpu_status;
+
+ cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+
+ if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
+ ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
+ return true;
+
+ return false;
+}
+
+static int venus_suspend_3xx(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct device *dev = core->dev;
+ u32 ctrl_status;
+ bool val;
+ int ret;
+
+ if (!hdev->power_enabled || hdev->suspended)
+ return 0;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_is_valid_state(hdev);
+ mutex_unlock(&hdev->lock);
+
+ if (!ret) {
+ dev_err(dev, "bad state, cannot suspend\n");
+ return -EINVAL;
+ }
+
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
+ goto power_off;
+
+ /*
+ * Power collapse sequence for Venus 3xx and 4xx versions:
+ * 1. Check for ARM9 and video core to be idle by checking WFI bit
+ * (bit 0) in CPU status register and by checking Idle (bit 30) in
+ * Control status register for video core.
+ * 2. Send a command to prepare for power collapse.
+ * 3. Check for WFI and PC_READY bits.
+ */
+ ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
+ 1500, 100 * 1500);
+ if (ret)
+ return ret;
+
+ ret = venus_prepare_power_collapse(hdev, false);
+ if (ret) {
+ dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
+ return ret;
+ }
+
+ ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
+ 1500, 100 * 1500);
+ if (ret)
+ return ret;
+
+power_off:
+ mutex_lock(&hdev->lock);
+
+ ret = venus_power_off(hdev);
+ if (ret) {
+ dev_err(dev, "venus_power_off (%d)\n", ret);
+ mutex_unlock(&hdev->lock);
+ return ret;
+ }
+
+ hdev->suspended = true;
+
+ mutex_unlock(&hdev->lock);
+
+ return 0;
+}
+
+static int venus_suspend(struct venus_core *core)
+{
+ if (IS_V3(core) || IS_V4(core))
+ return venus_suspend_3xx(core);
+
+ return venus_suspend_1xx(core);
+}
+
+static const struct hfi_ops venus_hfi_ops = {
+ .core_init = venus_core_init,
+ .core_deinit = venus_core_deinit,
+ .core_ping = venus_core_ping,
+ .core_trigger_ssr = venus_core_trigger_ssr,
+
+ .session_init = venus_session_init,
+ .session_end = venus_session_end,
+ .session_abort = venus_session_abort,
+ .session_flush = venus_session_flush,
+ .session_start = venus_session_start,
+ .session_stop = venus_session_stop,
+ .session_continue = venus_session_continue,
+ .session_etb = venus_session_etb,
+ .session_ftb = venus_session_ftb,
+ .session_set_buffers = venus_session_set_buffers,
+ .session_unset_buffers = venus_session_unset_buffers,
+ .session_load_res = venus_session_load_res,
+ .session_release_res = venus_session_release_res,
+ .session_parse_seq_hdr = venus_session_parse_seq_hdr,
+ .session_get_seq_hdr = venus_session_get_seq_hdr,
+ .session_set_property = venus_session_set_property,
+ .session_get_property = venus_session_get_property,
+
+ .resume = venus_resume,
+ .suspend = venus_suspend,
+
+ .isr = venus_isr,
+ .isr_thread = venus_isr_thread,
+};
+
+void venus_hfi_destroy(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+
+ venus_interface_queues_release(hdev);
+ mutex_destroy(&hdev->lock);
+ kfree(hdev);
+ core->priv = NULL;
+ core->ops = NULL;
+}
+
+int venus_hfi_create(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev;
+ int ret;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ mutex_init(&hdev->lock);
+
+ hdev->core = core;
+ hdev->suspended = true;
+ core->priv = hdev;
+ core->ops = &venus_hfi_ops;
+ core->core_caps = ENC_ROTATION_CAPABILITY | ENC_SCALING_CAPABILITY |
+ ENC_DEINTERLACE_CAPABILITY |
+ DEC_MULTI_STREAM_CAPABILITY;
+
+ ret = venus_interface_queues_init(hdev);
+ if (ret)
+ goto err_kfree;
+
+ return 0;
+
+err_kfree:
+ kfree(hdev);
+ core->priv = NULL;
+ core->ops = NULL;
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.h b/drivers/media/platform/qcom/venus/hfi_venus.h
new file mode 100644
index 000000000..885923354
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_venus.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_VENUS_H__
+#define __VENUS_HFI_VENUS_H__
+
+struct venus_core;
+
+void venus_hfi_destroy(struct venus_core *core);
+int venus_hfi_create(struct venus_core *core);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
new file mode 100644
index 000000000..def0926a6
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_VENUS_IO_H__
+#define __VENUS_HFI_VENUS_IO_H__
+
+#define VBIF_BASE 0x80000
+
+#define VBIF_AXI_HALT_CTRL0 (VBIF_BASE + 0x208)
+#define VBIF_AXI_HALT_CTRL1 (VBIF_BASE + 0x20c)
+
+#define VBIF_AXI_HALT_CTRL0_HALT_REQ BIT(0)
+#define VBIF_AXI_HALT_CTRL1_HALT_ACK BIT(0)
+#define VBIF_AXI_HALT_ACK_TIMEOUT_US 500000
+
+#define CPU_BASE 0xc0000
+#define CPU_CS_BASE (CPU_BASE + 0x12000)
+#define CPU_IC_BASE (CPU_BASE + 0x1f000)
+
+#define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE + 0x1c)
+
+#define VIDC_CTRL_INIT (CPU_CS_BASE + 0x48)
+#define VIDC_CTRL_INIT_RESERVED_BITS31_1_MASK 0xfffffffe
+#define VIDC_CTRL_INIT_RESERVED_BITS31_1_SHIFT 1
+#define VIDC_CTRL_INIT_CTRL_MASK 0x1
+#define VIDC_CTRL_INIT_CTRL_SHIFT 0
+
+/* HFI control status */
+#define CPU_CS_SCIACMDARG0 (CPU_CS_BASE + 0x4c)
+#define CPU_CS_SCIACMDARG0_MASK 0xff
+#define CPU_CS_SCIACMDARG0_SHIFT 0x0
+#define CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK 0xfe
+#define CPU_CS_SCIACMDARG0_ERROR_STATUS_SHIFT 0x1
+#define CPU_CS_SCIACMDARG0_INIT_STATUS_MASK 0x1
+#define CPU_CS_SCIACMDARG0_INIT_STATUS_SHIFT 0x0
+#define CPU_CS_SCIACMDARG0_PC_READY BIT(8)
+#define CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK BIT(30)
+
+/* HFI queue table info */
+#define CPU_CS_SCIACMDARG1 (CPU_CS_BASE + 0x50)
+
+/* HFI queue table address */
+#define CPU_CS_SCIACMDARG2 (CPU_CS_BASE + 0x54)
+
+/* Venus cpu */
+#define CPU_CS_SCIACMDARG3 (CPU_CS_BASE + 0x58)
+
+#define SFR_ADDR (CPU_CS_BASE + 0x5c)
+#define MMAP_ADDR (CPU_CS_BASE + 0x60)
+#define UC_REGION_ADDR (CPU_CS_BASE + 0x64)
+#define UC_REGION_SIZE (CPU_CS_BASE + 0x68)
+
+#define CPU_IC_SOFTINT (CPU_IC_BASE + 0x18)
+#define CPU_IC_SOFTINT_H2A_MASK 0x8000
+#define CPU_IC_SOFTINT_H2A_SHIFT 0xf
+
+/* Venus wrapper */
+#define WRAPPER_BASE 0x000e0000
+
+#define WRAPPER_HW_VERSION (WRAPPER_BASE + 0x00)
+#define WRAPPER_HW_VERSION_MAJOR_VERSION_MASK 0x78000000
+#define WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28
+#define WRAPPER_HW_VERSION_MINOR_VERSION_MASK 0xfff0000
+#define WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16
+#define WRAPPER_HW_VERSION_STEP_VERSION_MASK 0xffff
+
+#define WRAPPER_CLOCK_CONFIG (WRAPPER_BASE + 0x04)
+
+#define WRAPPER_INTR_STATUS (WRAPPER_BASE + 0x0c)
+#define WRAPPER_INTR_STATUS_A2HWD_MASK 0x10
+#define WRAPPER_INTR_STATUS_A2HWD_SHIFT 0x4
+#define WRAPPER_INTR_STATUS_A2H_MASK 0x4
+#define WRAPPER_INTR_STATUS_A2H_SHIFT 0x2
+
+#define WRAPPER_INTR_MASK (WRAPPER_BASE + 0x10)
+#define WRAPPER_INTR_MASK_A2HWD_BASK 0x10
+#define WRAPPER_INTR_MASK_A2HWD_SHIFT 0x4
+#define WRAPPER_INTR_MASK_A2HVCODEC_MASK 0x8
+#define WRAPPER_INTR_MASK_A2HVCODEC_SHIFT 0x3
+#define WRAPPER_INTR_MASK_A2HCPU_MASK 0x4
+#define WRAPPER_INTR_MASK_A2HCPU_SHIFT 0x2
+
+#define WRAPPER_INTR_CLEAR (WRAPPER_BASE + 0x14)
+#define WRAPPER_INTR_CLEAR_A2HWD_MASK 0x10
+#define WRAPPER_INTR_CLEAR_A2HWD_SHIFT 0x4
+#define WRAPPER_INTR_CLEAR_A2H_MASK 0x4
+#define WRAPPER_INTR_CLEAR_A2H_SHIFT 0x2
+
+#define WRAPPER_POWER_STATUS (WRAPPER_BASE + 0x44)
+#define WRAPPER_VDEC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x48)
+#define WRAPPER_VENC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x4c)
+#define WRAPPER_VDEC_VENC_AHB_BRIDGE_SYNC_RESET (WRAPPER_BASE + 0x64)
+
+#define WRAPPER_CPU_CLOCK_CONFIG (WRAPPER_BASE + 0x2000)
+#define WRAPPER_CPU_AXI_HALT (WRAPPER_BASE + 0x2008)
+#define WRAPPER_CPU_AXI_HALT_HALT BIT(16)
+#define WRAPPER_CPU_AXI_HALT_STATUS (WRAPPER_BASE + 0x200c)
+#define WRAPPER_CPU_AXI_HALT_STATUS_IDLE BIT(24)
+
+#define WRAPPER_CPU_CGC_DIS (WRAPPER_BASE + 0x2010)
+#define WRAPPER_CPU_STATUS (WRAPPER_BASE + 0x2014)
+#define WRAPPER_CPU_STATUS_WFI BIT(0)
+#define WRAPPER_SW_RESET (WRAPPER_BASE + 0x3000)
+
+/* Venus 4xx */
+#define WRAPPER_VCODEC0_MMCC_POWER_STATUS (WRAPPER_BASE + 0x90)
+#define WRAPPER_VCODEC0_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x94)
+
+#define WRAPPER_VCODEC1_MMCC_POWER_STATUS (WRAPPER_BASE + 0x110)
+#define WRAPPER_VCODEC1_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x114)
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
new file mode 100644
index 000000000..e3972dbf4
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -0,0 +1,1263 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "hfi_venus_io.h"
+#include "hfi_parser.h"
+#include "core.h"
+#include "helpers.h"
+#include "vdec.h"
+
+/*
+ * Three resons to keep MPLANE formats (despite that the number of planes
+ * currently is one):
+ * - the MPLANE formats allow only one plane to be used
+ * - the downstream driver use MPLANE formats too
+ * - future firmware versions could add support for >1 planes
+ */
+static const struct venus_format vdec_formats[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_NV12,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG4,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H263,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_G,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VP8,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_XVID,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ },
+};
+
+static const struct venus_format *
+find_format(struct venus_inst *inst, u32 pixfmt, u32 type)
+{
+ const struct venus_format *fmt = vdec_formats;
+ unsigned int size = ARRAY_SIZE(vdec_formats);
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmt[i].pixfmt == pixfmt)
+ break;
+ }
+
+ if (i == size || fmt[i].type != type)
+ return NULL;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ !venus_helper_check_codec(inst, fmt[i].pixfmt))
+ return NULL;
+
+ return &fmt[i];
+}
+
+static const struct venus_format *
+find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type)
+{
+ const struct venus_format *fmt = vdec_formats;
+ unsigned int size = ARRAY_SIZE(vdec_formats);
+ unsigned int i, k = 0;
+
+ if (index > size)
+ return NULL;
+
+ for (i = 0; i < size; i++) {
+ bool valid;
+
+ if (fmt[i].type != type)
+ continue;
+ valid = type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
+ venus_helper_check_codec(inst, fmt[i].pixfmt);
+ if (k == index && valid)
+ break;
+ if (valid)
+ k++;
+ }
+
+ if (i == size)
+ return NULL;
+
+ return &fmt[i];
+}
+
+static const struct venus_format *
+vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
+ const struct venus_format *fmt;
+
+ memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+
+ fmt = find_format(inst, pixmp->pixelformat, f->type);
+ if (!fmt) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pixmp->pixelformat = V4L2_PIX_FMT_NV12;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pixmp->pixelformat = V4L2_PIX_FMT_H264;
+ else
+ return NULL;
+ fmt = find_format(inst, pixmp->pixelformat, f->type);
+ }
+
+ pixmp->width = clamp(pixmp->width, frame_width_min(inst),
+ frame_width_max(inst));
+ pixmp->height = clamp(pixmp->height, frame_height_min(inst),
+ frame_height_max(inst));
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pixmp->height = ALIGN(pixmp->height, 32);
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+ pixmp->num_planes = fmt->num_planes;
+ pixmp->flags = 0;
+
+ pfmt[0].sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
+ pixmp->width,
+ pixmp->height);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 128);
+ else
+ pfmt[0].bytesperline = 0;
+
+ return fmt;
+}
+
+static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ vdec_try_fmt_common(inst, f);
+
+ return 0;
+}
+
+static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt = NULL;
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fmt = inst->fmt_cap;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt = inst->fmt_out;
+
+ if (inst->reconfig) {
+ struct v4l2_format format = {};
+
+ inst->out_width = inst->reconfig_width;
+ inst->out_height = inst->reconfig_height;
+ inst->reconfig = false;
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = inst->fmt_cap->pixfmt;
+ format.fmt.pix_mp.width = inst->out_width;
+ format.fmt.pix_mp.height = inst->out_height;
+
+ vdec_try_fmt_common(inst, &format);
+
+ inst->width = format.fmt.pix_mp.width;
+ inst->height = format.fmt.pix_mp.height;
+ }
+
+ pixmp->pixelformat = fmt->pixfmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixmp->width = inst->width;
+ pixmp->height = inst->height;
+ pixmp->colorspace = inst->colorspace;
+ pixmp->ycbcr_enc = inst->ycbcr_enc;
+ pixmp->quantization = inst->quantization;
+ pixmp->xfer_func = inst->xfer_func;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixmp->width = inst->out_width;
+ pixmp->height = inst->out_height;
+ }
+
+ vdec_try_fmt_common(inst, f);
+
+ return 0;
+}
+
+static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_pix_format_mplane orig_pixmp;
+ const struct venus_format *fmt;
+ struct v4l2_format format;
+ u32 pixfmt_out = 0, pixfmt_cap = 0;
+
+ orig_pixmp = *pixmp;
+
+ fmt = vdec_try_fmt_common(inst, f);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixfmt_out = pixmp->pixelformat;
+ pixfmt_cap = inst->fmt_cap->pixfmt;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixfmt_cap = pixmp->pixelformat;
+ pixfmt_out = inst->fmt_out->pixfmt;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_out;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(inst, &format);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ inst->out_width = format.fmt.pix_mp.width;
+ inst->out_height = format.fmt.pix_mp.height;
+ inst->colorspace = pixmp->colorspace;
+ inst->ycbcr_enc = pixmp->ycbcr_enc;
+ inst->quantization = pixmp->quantization;
+ inst->xfer_func = pixmp->xfer_func;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_cap;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(inst, &format);
+
+ inst->width = format.fmt.pix_mp.width;
+ inst->height = format.fmt.pix_mp.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->fmt_out = fmt;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ inst->fmt_cap = fmt;
+
+ return 0;
+}
+
+static int
+vdec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ s->r.width = inst->out_width;
+ s->r.height = inst->out_height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ s->r.width = inst->width;
+ s->r.height = inst->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ s->r.width = inst->out_width;
+ s->r.height = inst->out_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ s->r.top = 0;
+ s->r.left = 0;
+
+ return 0;
+}
+
+static int
+vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, "qcom-venus", sizeof(cap->driver));
+ strlcpy(cap->card, "Qualcomm Venus video decoder", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ fmt = find_format_by_index(inst, f->index, f->type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixfmt;
+
+ return 0;
+}
+
+static int vdec_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct venus_inst *inst = to_inst(file);
+ struct v4l2_captureparm *cap = &a->parm.capture;
+ struct v4l2_fract *timeperframe = &cap->timeperframe;
+ u64 us_per_frame, fps;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+ if (!timeperframe->denominator)
+ timeperframe->denominator = inst->timeperframe.denominator;
+ if (!timeperframe->numerator)
+ timeperframe->numerator = inst->timeperframe.numerator;
+ cap->readbuffers = 0;
+ cap->extendedmode = 0;
+ cap->capability = V4L2_CAP_TIMEPERFRAME;
+ us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
+ do_div(us_per_frame, timeperframe->denominator);
+
+ if (!us_per_frame)
+ return -EINVAL;
+
+ fps = (u64)USEC_PER_SEC;
+ do_div(fps, us_per_frame);
+
+ inst->fps = fps;
+ inst->timeperframe = *timeperframe;
+
+ return 0;
+}
+
+static int vdec_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ fmt = find_format(inst, fsize->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!fmt) {
+ fmt = find_format(inst, fsize->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+
+ fsize->stepwise.min_width = frame_width_min(inst);
+ fsize->stepwise.max_width = frame_width_max(inst);
+ fsize->stepwise.step_width = frame_width_step(inst);
+ fsize->stepwise.min_height = frame_height_min(inst);
+ fsize->stepwise.max_height = frame_height_max(inst);
+ fsize->stepwise.step_height = frame_height_step(inst);
+
+ return 0;
+}
+
+static int vdec_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ struct venus_inst *inst = to_inst(file);
+ struct hfi_frame_data fdata = {0};
+ int ret;
+
+ ret = vdec_try_decoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ mutex_lock(&inst->lock);
+
+ /*
+ * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder
+ * input to signal EOS.
+ */
+ if (!(inst->streamon_out & inst->streamon_cap))
+ goto unlock;
+
+ fdata.buffer_type = HFI_BUFFER_INPUT;
+ fdata.flags |= HFI_BUFFERFLAG_EOS;
+ fdata.device_addr = 0xdeadbeef;
+
+ ret = hfi_session_process_buf(inst, &fdata);
+
+unlock:
+ mutex_unlock(&inst->lock);
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
+ .vidioc_querycap = vdec_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = vdec_enum_fmt,
+ .vidioc_enum_fmt_vid_out_mplane = vdec_enum_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vdec_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vdec_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vdec_try_fmt,
+ .vidioc_g_selection = vdec_g_selection,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_s_parm = vdec_s_parm,
+ .vidioc_enum_framesizes = vdec_enum_framesizes,
+ .vidioc_subscribe_event = vdec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_try_decoder_cmd = vdec_try_decoder_cmd,
+ .vidioc_decoder_cmd = vdec_decoder_cmd,
+};
+
+static int vdec_set_properties(struct venus_inst *inst)
+{
+ struct vdec_controls *ctr = &inst->controls.dec;
+ struct hfi_enable en = { .enable = 1 };
+ u32 ptype;
+ int ret;
+
+ if (ctr->post_loop_deb_mode) {
+ ptype = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+ ret = hfi_session_set_property(inst, ptype, &en);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+#define is_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_UBWC_BASE))
+
+static int vdec_output_conf(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct hfi_enable en = { .enable = 1 };
+ u32 width = inst->out_width;
+ u32 height = inst->out_height;
+ u32 out_fmt, out2_fmt;
+ bool ubwc = false;
+ u32 ptype;
+ int ret;
+
+ ret = venus_helper_set_work_mode(inst, VIDC_WORK_MODE_2);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_1);
+ if (ret)
+ return ret;
+
+ if (core->res->hfi_version == HFI_VERSION_1XX) {
+ ptype = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
+ ret = hfi_session_set_property(inst, ptype, &en);
+ if (ret)
+ return ret;
+ }
+
+ /* Force searching UBWC formats for bigger then HD resolutions */
+ if (width > 1920 && height > ALIGN(1080, 32))
+ ubwc = true;
+
+ /* For Venus v4 UBWC format is mandatory */
+ if (IS_V4(core))
+ ubwc = true;
+
+ ret = venus_helper_get_out_fmts(inst, inst->fmt_cap->pixfmt, &out_fmt,
+ &out2_fmt, ubwc);
+ if (ret)
+ return ret;
+
+ inst->output_buf_size =
+ venus_helper_get_framesz_raw(out_fmt, width, height);
+ inst->output2_buf_size =
+ venus_helper_get_framesz_raw(out2_fmt, width, height);
+
+ if (is_ubwc_fmt(out_fmt)) {
+ inst->opb_buftype = HFI_BUFFER_OUTPUT2;
+ inst->opb_fmt = out2_fmt;
+ inst->dpb_buftype = HFI_BUFFER_OUTPUT;
+ inst->dpb_fmt = out_fmt;
+ } else if (is_ubwc_fmt(out2_fmt)) {
+ inst->opb_buftype = HFI_BUFFER_OUTPUT;
+ inst->opb_fmt = out_fmt;
+ inst->dpb_buftype = HFI_BUFFER_OUTPUT2;
+ inst->dpb_fmt = out2_fmt;
+ } else {
+ inst->opb_buftype = HFI_BUFFER_OUTPUT;
+ inst->opb_fmt = out_fmt;
+ inst->dpb_buftype = 0;
+ inst->dpb_fmt = 0;
+ }
+
+ ret = venus_helper_set_raw_format(inst, inst->opb_fmt,
+ inst->opb_buftype);
+ if (ret)
+ return ret;
+
+ if (inst->dpb_fmt) {
+ ret = venus_helper_set_multistream(inst, false, true);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_raw_format(inst, inst->dpb_fmt,
+ inst->dpb_buftype);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_output_resolution(inst, width, height,
+ HFI_BUFFER_OUTPUT2);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_V3(core) || IS_V4(core)) {
+ if (inst->output2_buf_size) {
+ ret = venus_helper_set_bufsize(inst,
+ inst->output2_buf_size,
+ HFI_BUFFER_OUTPUT2);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->output_buf_size) {
+ ret = venus_helper_set_bufsize(inst,
+ inst->output_buf_size,
+ HFI_BUFFER_OUTPUT);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = venus_helper_set_dyn_bufmode(inst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int vdec_init_session(struct venus_inst *inst)
+{
+ int ret;
+
+ ret = hfi_session_init(inst, inst->fmt_out->pixfmt);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_input_resolution(inst, inst->out_width,
+ inst->out_height);
+ if (ret)
+ goto deinit;
+
+ ret = venus_helper_set_color_format(inst, inst->fmt_cap->pixfmt);
+ if (ret)
+ goto deinit;
+
+ return 0;
+deinit:
+ hfi_session_deinit(inst);
+ return ret;
+}
+
+static int vdec_num_buffers(struct venus_inst *inst, unsigned int *in_num,
+ unsigned int *out_num)
+{
+ enum hfi_version ver = inst->core->res->hfi_version;
+ struct hfi_buffer_requirements bufreq;
+ int ret;
+
+ *in_num = *out_num = 0;
+
+ ret = vdec_init_session(inst);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+ if (ret)
+ goto deinit;
+
+ *in_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+ if (ret)
+ goto deinit;
+
+ *out_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+
+deinit:
+ hfi_session_deinit(inst);
+
+ return ret;
+}
+
+static int vdec_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ unsigned int in_num, out_num;
+ int ret = 0;
+
+ if (*num_planes) {
+ unsigned int output_buf_size = venus_helper_get_opb_size(inst);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ *num_planes != inst->fmt_out->num_planes)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ *num_planes != inst->fmt_cap->num_planes)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ sizes[0] < inst->input_buf_size)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ sizes[0] < output_buf_size)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ ret = vdec_num_buffers(inst, &in_num, &out_num);
+ if (ret)
+ return ret;
+
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ *num_planes = inst->fmt_out->num_planes;
+ sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
+ inst->out_width,
+ inst->out_height);
+ inst->input_buf_size = sizes[0];
+ *num_buffers = max(*num_buffers, in_num);
+ inst->num_input_bufs = *num_buffers;
+ inst->num_output_bufs = out_num;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ *num_planes = inst->fmt_cap->num_planes;
+ sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt,
+ inst->width,
+ inst->height);
+ inst->output_buf_size = sizes[0];
+ *num_buffers = max(*num_buffers, out_num);
+ inst->num_output_bufs = *num_buffers;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int vdec_verify_conf(struct venus_inst *inst)
+{
+ enum hfi_version ver = inst->core->res->hfi_version;
+ struct hfi_buffer_requirements bufreq;
+ int ret;
+
+ if (!inst->num_input_bufs || !inst->num_output_bufs)
+ return -EINVAL;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+ if (ret)
+ return ret;
+
+ if (inst->num_output_bufs < bufreq.count_actual ||
+ inst->num_output_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
+ return -EINVAL;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+ if (ret)
+ return ret;
+
+ if (inst->num_input_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 1;
+ else
+ inst->streamon_cap = 1;
+
+ if (!(inst->streamon_out & inst->streamon_cap)) {
+ mutex_unlock(&inst->lock);
+ return 0;
+ }
+
+ venus_helper_init_instance(inst);
+
+ inst->reconfig = false;
+ inst->sequence_cap = 0;
+ inst->sequence_out = 0;
+
+ ret = vdec_init_session(inst);
+ if (ret)
+ goto bufs_done;
+
+ ret = vdec_set_properties(inst);
+ if (ret)
+ goto deinit_sess;
+
+ ret = vdec_output_conf(inst);
+ if (ret)
+ goto deinit_sess;
+
+ ret = vdec_verify_conf(inst);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
+ VB2_MAX_FRAME, VB2_MAX_FRAME);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_alloc_dpb_bufs(inst);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_vb2_start_streaming(inst);
+ if (ret)
+ goto deinit_sess;
+
+ mutex_unlock(&inst->lock);
+
+ return 0;
+
+deinit_sess:
+ hfi_session_deinit(inst);
+bufs_done:
+ venus_helper_buffers_done(inst, VB2_BUF_STATE_QUEUED);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 0;
+ else
+ inst->streamon_cap = 0;
+ mutex_unlock(&inst->lock);
+ return ret;
+}
+
+static const struct vb2_ops vdec_vb2_ops = {
+ .queue_setup = vdec_queue_setup,
+ .buf_init = venus_helper_vb2_buf_init,
+ .buf_prepare = venus_helper_vb2_buf_prepare,
+ .start_streaming = vdec_start_streaming,
+ .stop_streaming = venus_helper_vb2_stop_streaming,
+ .buf_queue = venus_helper_vb2_buf_queue,
+};
+
+static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
+ u32 tag, u32 bytesused, u32 data_offset, u32 flags,
+ u32 hfi_flags, u64 timestamp_us)
+{
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb;
+ unsigned int type;
+
+ if (buf_type == HFI_BUFFER_INPUT)
+ type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ else
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+ vbuf = venus_helper_find_buf(inst, type, tag);
+ if (!vbuf)
+ return;
+
+ vbuf->flags = flags;
+ vbuf->field = V4L2_FIELD_NONE;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ unsigned int opb_sz = venus_helper_get_opb_size(inst);
+
+ vb = &vbuf->vb2_buf;
+ vb2_set_plane_payload(vb, 0, bytesused ? : opb_sz);
+ vb->planes[0].data_offset = data_offset;
+ vb->timestamp = timestamp_us * NSEC_PER_USEC;
+ vbuf->sequence = inst->sequence_cap++;
+
+ if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
+ const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
+
+ v4l2_event_queue_fh(&inst->fh, &ev);
+ }
+ } else {
+ vbuf->sequence = inst->sequence_out++;
+ }
+
+ if (hfi_flags & HFI_BUFFERFLAG_READONLY)
+ venus_helper_acquire_buf_ref(vbuf);
+
+ if (hfi_flags & HFI_BUFFERFLAG_DATACORRUPT)
+ state = VB2_BUF_STATE_ERROR;
+
+ v4l2_m2m_buf_done(vbuf, state);
+}
+
+static void vdec_event_notify(struct venus_inst *inst, u32 event,
+ struct hfi_event_data *data)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_dec;
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
+
+ switch (event) {
+ case EVT_SESSION_ERROR:
+ inst->session_error = true;
+ dev_err(dev, "dec: event session error %x\n", inst->error);
+ break;
+ case EVT_SYS_EVENT_CHANGE:
+ switch (data->event_type) {
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES:
+ hfi_session_continue(inst);
+ dev_dbg(dev, "event sufficient resources\n");
+ break;
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES:
+ inst->reconfig_height = data->height;
+ inst->reconfig_width = data->width;
+ inst->reconfig = true;
+
+ v4l2_event_queue_fh(&inst->fh, &ev);
+
+ dev_dbg(dev, "event not sufficient resources (%ux%u)\n",
+ data->width, data->height);
+ break;
+ case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
+ venus_helper_release_buf_ref(inst, data->tag);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct hfi_inst_ops vdec_hfi_ops = {
+ .buf_done = vdec_buf_done,
+ .event_notify = vdec_event_notify,
+};
+
+static void vdec_inst_init(struct venus_inst *inst)
+{
+ inst->fmt_out = &vdec_formats[6];
+ inst->fmt_cap = &vdec_formats[0];
+ inst->width = 1280;
+ inst->height = ALIGN(720, 32);
+ inst->out_width = 1280;
+ inst->out_height = 720;
+ inst->fps = 30;
+ inst->timeperframe.numerator = 1;
+ inst->timeperframe.denominator = 30;
+ inst->hfi_codec = HFI_VIDEO_CODEC_H264;
+}
+
+static const struct v4l2_m2m_ops vdec_m2m_ops = {
+ .device_run = venus_helper_m2m_device_run,
+ .job_abort = venus_helper_m2m_job_abort,
+};
+
+static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct venus_inst *inst = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &vdec_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->drv_priv = inst;
+ src_vq->buf_struct_size = sizeof(struct venus_buffer);
+ src_vq->allow_zero_bytesused = 1;
+ src_vq->min_buffers_needed = 1;
+ src_vq->dev = inst->core->dev;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &vdec_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->drv_priv = inst;
+ dst_vq->buf_struct_size = sizeof(struct venus_buffer);
+ dst_vq->allow_zero_bytesused = 1;
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->dev = inst->core->dev;
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vdec_open(struct file *file)
+{
+ struct venus_core *core = video_drvdata(file);
+ struct venus_inst *inst;
+ int ret;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&inst->dpbbufs);
+ INIT_LIST_HEAD(&inst->registeredbufs);
+ INIT_LIST_HEAD(&inst->internalbufs);
+ INIT_LIST_HEAD(&inst->list);
+ mutex_init(&inst->lock);
+
+ inst->core = core;
+ inst->session_type = VIDC_SESSION_TYPE_DEC;
+ inst->num_output_bufs = 1;
+
+ venus_helper_init_instance(inst);
+
+ ret = pm_runtime_get_sync(core->dev_dec);
+ if (ret < 0)
+ goto err_free_inst;
+
+ ret = vdec_ctrl_init(inst);
+ if (ret)
+ goto err_put_sync;
+
+ ret = hfi_session_create(inst, &vdec_hfi_ops);
+ if (ret)
+ goto err_ctrl_deinit;
+
+ vdec_inst_init(inst);
+
+ /*
+ * create m2m device for every instance, the m2m context scheduling
+ * is made by firmware side so we do not need to care about.
+ */
+ inst->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops);
+ if (IS_ERR(inst->m2m_dev)) {
+ ret = PTR_ERR(inst->m2m_dev);
+ goto err_session_destroy;
+ }
+
+ inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
+ if (IS_ERR(inst->m2m_ctx)) {
+ ret = PTR_ERR(inst->m2m_ctx);
+ goto err_m2m_release;
+ }
+
+ v4l2_fh_init(&inst->fh, core->vdev_dec);
+
+ inst->fh.ctrl_handler = &inst->ctrl_handler;
+ v4l2_fh_add(&inst->fh);
+ inst->fh.m2m_ctx = inst->m2m_ctx;
+ file->private_data = &inst->fh;
+
+ return 0;
+
+err_m2m_release:
+ v4l2_m2m_release(inst->m2m_dev);
+err_session_destroy:
+ hfi_session_destroy(inst);
+err_ctrl_deinit:
+ vdec_ctrl_deinit(inst);
+err_put_sync:
+ pm_runtime_put_sync(core->dev_dec);
+err_free_inst:
+ kfree(inst);
+ return ret;
+}
+
+static int vdec_close(struct file *file)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+ v4l2_m2m_release(inst->m2m_dev);
+ vdec_ctrl_deinit(inst);
+ hfi_session_destroy(inst);
+ mutex_destroy(&inst->lock);
+ v4l2_fh_del(&inst->fh);
+ v4l2_fh_exit(&inst->fh);
+
+ pm_runtime_put_sync(inst->core->dev_dec);
+
+ kfree(inst);
+ return 0;
+}
+
+static const struct v4l2_file_operations vdec_fops = {
+ .owner = THIS_MODULE,
+ .open = vdec_open,
+ .release = vdec_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int vdec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct video_device *vdev;
+ struct venus_core *core;
+ int ret;
+
+ if (!dev->parent)
+ return -EPROBE_DEFER;
+
+ core = dev_get_drvdata(dev->parent);
+ if (!core)
+ return -EPROBE_DEFER;
+
+ if (IS_V3(core) || IS_V4(core)) {
+ core->core0_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(core->core0_clk))
+ return PTR_ERR(core->core0_clk);
+ }
+
+ if (IS_V4(core)) {
+ core->core0_bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(core->core0_bus_clk))
+ return PTR_ERR(core->core0_bus_clk);
+ }
+
+ platform_set_drvdata(pdev, core);
+
+ vdev = video_device_alloc();
+ if (!vdev)
+ return -ENOMEM;
+
+ strlcpy(vdev->name, "qcom-venus-decoder", sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = &vdec_fops;
+ vdev->ioctl_ops = &vdec_ioctl_ops;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->v4l2_dev = &core->v4l2_dev;
+ vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vdev_release;
+
+ core->vdev_dec = vdev;
+ core->dev_dec = dev;
+
+ video_set_drvdata(vdev, core);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+ return ret;
+}
+
+static int vdec_remove(struct platform_device *pdev)
+{
+ struct venus_core *core = dev_get_drvdata(pdev->dev.parent);
+
+ video_unregister_device(core->vdev_dec);
+ pm_runtime_disable(core->dev_dec);
+
+ return 0;
+}
+
+static __maybe_unused int vdec_runtime_suspend(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ if (IS_V1(core))
+ return 0;
+
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true);
+ if (ret)
+ return ret;
+
+ if (IS_V4(core))
+ clk_disable_unprepare(core->core0_bus_clk);
+
+ clk_disable_unprepare(core->core0_clk);
+
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
+}
+
+static __maybe_unused int vdec_runtime_resume(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ if (IS_V1(core))
+ return 0;
+
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(core->core0_clk);
+ if (ret)
+ goto err_power_disable;
+
+ if (IS_V4(core))
+ ret = clk_prepare_enable(core->core0_bus_clk);
+
+ if (ret)
+ goto err_unprepare_core0;
+
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
+
+err_unprepare_core0:
+ clk_disable_unprepare(core->core0_clk);
+err_power_disable:
+ venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
+ return ret;
+}
+
+static const struct dev_pm_ops vdec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(vdec_runtime_suspend, vdec_runtime_resume, NULL)
+};
+
+static const struct of_device_id vdec_dt_match[] = {
+ { .compatible = "venus-decoder" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, vdec_dt_match);
+
+static struct platform_driver qcom_venus_dec_driver = {
+ .probe = vdec_probe,
+ .remove = vdec_remove,
+ .driver = {
+ .name = "qcom-venus-decoder",
+ .of_match_table = vdec_dt_match,
+ .pm = &vdec_pm_ops,
+ },
+};
+module_platform_driver(qcom_venus_dec_driver);
+
+MODULE_ALIAS("platform:qcom-venus-decoder");
+MODULE_DESCRIPTION("Qualcomm Venus video decoder driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/qcom/venus/vdec.h b/drivers/media/platform/qcom/venus/vdec.h
new file mode 100644
index 000000000..84b672c54
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/vdec.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_VDEC_H__
+#define __VENUS_VDEC_H__
+
+struct venus_inst;
+
+int vdec_ctrl_init(struct venus_inst *inst);
+void vdec_ctrl_deinit(struct venus_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c
new file mode 100644
index 000000000..f4604b0cd
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+
+#include "core.h"
+#include "vdec.h"
+
+static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct venus_inst *inst = ctrl_to_inst(ctrl);
+ struct vdec_controls *ctr = &inst->controls.dec;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ ctr->post_loop_deb_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ ctr->profile = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ ctr->level = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct venus_inst *inst = ctrl_to_inst(ctrl);
+ struct vdec_controls *ctr = &inst->controls.dec;
+ union hfi_get_property hprop;
+ u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ ret = hfi_session_get_property(inst, ptype, &hprop);
+ if (!ret)
+ ctr->profile = hprop.profile_level.profile;
+ ctrl->val = ctr->profile;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ ret = hfi_session_get_property(inst, ptype, &hprop);
+ if (!ret)
+ ctr->level = hprop.profile_level.level;
+ ctrl->val = ctr->level;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ ctrl->val = ctr->post_loop_deb_mode;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ ctrl->val = inst->num_output_bufs;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vdec_ctrl_ops = {
+ .s_ctrl = vdec_op_s_ctrl,
+ .g_volatile_ctrl = vdec_op_g_volatile_ctrl,
+};
+
+int vdec_ctrl_init(struct venus_inst *inst)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 7);
+ if (ret)
+ return ret;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY,
+ ~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) |
+ (1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)),
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+ 0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+ 0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER, 0, 1, 1, 0);
+
+ ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 1);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ret = inst->ctrl_handler.error;
+ if (ret) {
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ return ret;
+ }
+
+ return 0;
+}
+
+void vdec_ctrl_deinit(struct venus_inst *inst)
+{
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+}
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
new file mode 100644
index 000000000..4197b311c
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -0,0 +1,1370 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+
+#include "hfi_venus_io.h"
+#include "hfi_parser.h"
+#include "core.h"
+#include "helpers.h"
+#include "venc.h"
+
+#define NUM_B_FRAMES_MAX 4
+
+/*
+ * Three resons to keep MPLANE formats (despite that the number of planes
+ * currently is one):
+ * - the MPLANE formats allow only one plane to be used
+ * - the downstream driver use MPLANE formats too
+ * - future firmware versions could add support for >1 planes
+ */
+static const struct venus_format venc_formats[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_NV12,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG4,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H263,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VP8,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+};
+
+static const struct venus_format *
+find_format(struct venus_inst *inst, u32 pixfmt, u32 type)
+{
+ const struct venus_format *fmt = venc_formats;
+ unsigned int size = ARRAY_SIZE(venc_formats);
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmt[i].pixfmt == pixfmt)
+ break;
+ }
+
+ if (i == size || fmt[i].type != type)
+ return NULL;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ !venus_helper_check_codec(inst, fmt[i].pixfmt))
+ return NULL;
+
+ return &fmt[i];
+}
+
+static const struct venus_format *
+find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type)
+{
+ const struct venus_format *fmt = venc_formats;
+ unsigned int size = ARRAY_SIZE(venc_formats);
+ unsigned int i, k = 0;
+
+ if (index > size)
+ return NULL;
+
+ for (i = 0; i < size; i++) {
+ bool valid;
+
+ if (fmt[i].type != type)
+ continue;
+ valid = type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ venus_helper_check_codec(inst, fmt[i].pixfmt);
+ if (k == index && valid)
+ break;
+ if (valid)
+ k++;
+ }
+
+ if (i == size)
+ return NULL;
+
+ return &fmt[i];
+}
+
+static int venc_v4l2_to_hfi(int id, int value)
+{
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0:
+ default:
+ return HFI_MPEG4_LEVEL_0;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B:
+ return HFI_MPEG4_LEVEL_0b;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_1:
+ return HFI_MPEG4_LEVEL_1;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_2:
+ return HFI_MPEG4_LEVEL_2;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_3:
+ return HFI_MPEG4_LEVEL_3;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_4:
+ return HFI_MPEG4_LEVEL_4;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_5:
+ return HFI_MPEG4_LEVEL_5;
+ }
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
+ default:
+ return HFI_MPEG4_PROFILE_SIMPLE;
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
+ return HFI_MPEG4_PROFILE_ADVANCEDSIMPLE;
+ }
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return HFI_H264_PROFILE_BASELINE;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ return HFI_H264_PROFILE_CONSTRAINED_BASE;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return HFI_H264_PROFILE_MAIN;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ default:
+ return HFI_H264_PROFILE_HIGH;
+ }
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return HFI_H264_LEVEL_1;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return HFI_H264_LEVEL_1b;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return HFI_H264_LEVEL_11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return HFI_H264_LEVEL_12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return HFI_H264_LEVEL_13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return HFI_H264_LEVEL_2;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return HFI_H264_LEVEL_21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return HFI_H264_LEVEL_22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return HFI_H264_LEVEL_3;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return HFI_H264_LEVEL_31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return HFI_H264_LEVEL_32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return HFI_H264_LEVEL_4;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return HFI_H264_LEVEL_41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return HFI_H264_LEVEL_42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ default:
+ return HFI_H264_LEVEL_5;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return HFI_H264_LEVEL_51;
+ }
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC:
+ default:
+ return HFI_H264_ENTROPY_CAVLC;
+ case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC:
+ return HFI_H264_ENTROPY_CABAC;
+ }
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ switch (value) {
+ case 0:
+ default:
+ return HFI_VPX_PROFILE_VERSION_0;
+ case 1:
+ return HFI_VPX_PROFILE_VERSION_1;
+ case 2:
+ return HFI_VPX_PROFILE_VERSION_2;
+ case 3:
+ return HFI_VPX_PROFILE_VERSION_3;
+ }
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED:
+ default:
+ return HFI_H264_DB_MODE_ALL_BOUNDARY;
+ case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED:
+ return HFI_H264_DB_MODE_DISABLE;
+ case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
+ return HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
+ }
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ default:
+ return HFI_HEVC_PROFILE_MAIN;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ return HFI_HEVC_PROFILE_MAIN_STILL_PIC;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ return HFI_HEVC_PROFILE_MAIN10;
+ }
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ default:
+ return HFI_HEVC_LEVEL_1;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return HFI_HEVC_LEVEL_2;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return HFI_HEVC_LEVEL_21;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return HFI_HEVC_LEVEL_3;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return HFI_HEVC_LEVEL_31;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return HFI_HEVC_LEVEL_4;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return HFI_HEVC_LEVEL_41;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return HFI_HEVC_LEVEL_5;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return HFI_HEVC_LEVEL_51;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ return HFI_HEVC_LEVEL_52;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
+ return HFI_HEVC_LEVEL_6;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
+ return HFI_HEVC_LEVEL_61;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
+ return HFI_HEVC_LEVEL_62;
+ }
+ }
+
+ return 0;
+}
+
+static int
+venc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, "qcom-venus", sizeof(cap->driver));
+ strlcpy(cap->card, "Qualcomm Venus video encoder", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int venc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ fmt = find_format_by_index(inst, f->index, f->type);
+
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixfmt;
+
+ return 0;
+}
+
+static const struct venus_format *
+venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
+ const struct venus_format *fmt;
+
+ memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+
+ fmt = find_format(inst, pixmp->pixelformat, f->type);
+ if (!fmt) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pixmp->pixelformat = V4L2_PIX_FMT_H264;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pixmp->pixelformat = V4L2_PIX_FMT_NV12;
+ else
+ return NULL;
+ fmt = find_format(inst, pixmp->pixelformat, f->type);
+ if (!fmt)
+ return NULL;
+ }
+
+ pixmp->width = clamp(pixmp->width, frame_width_min(inst),
+ frame_width_max(inst));
+ pixmp->height = clamp(pixmp->height, frame_height_min(inst),
+ frame_height_max(inst));
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pixmp->height = ALIGN(pixmp->height, 32);
+
+ pixmp->width = ALIGN(pixmp->width, 2);
+ pixmp->height = ALIGN(pixmp->height, 2);
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+ pixmp->num_planes = fmt->num_planes;
+ pixmp->flags = 0;
+
+ pfmt[0].sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
+ pixmp->width,
+ pixmp->height);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 128);
+ else
+ pfmt[0].bytesperline = 0;
+
+ return fmt;
+}
+
+static int venc_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ venc_try_fmt_common(inst, f);
+
+ return 0;
+}
+
+static int venc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_pix_format_mplane orig_pixmp;
+ const struct venus_format *fmt;
+ struct v4l2_format format;
+ u32 pixfmt_out = 0, pixfmt_cap = 0;
+
+ orig_pixmp = *pixmp;
+
+ fmt = venc_try_fmt_common(inst, f);
+ if (!fmt)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixfmt_out = pixmp->pixelformat;
+ pixfmt_cap = inst->fmt_cap->pixfmt;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixfmt_cap = pixmp->pixelformat;
+ pixfmt_out = inst->fmt_out->pixfmt;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_out;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ venc_try_fmt_common(inst, &format);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ inst->out_width = format.fmt.pix_mp.width;
+ inst->out_height = format.fmt.pix_mp.height;
+ inst->colorspace = pixmp->colorspace;
+ inst->ycbcr_enc = pixmp->ycbcr_enc;
+ inst->quantization = pixmp->quantization;
+ inst->xfer_func = pixmp->xfer_func;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_cap;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ venc_try_fmt_common(inst, &format);
+
+ inst->width = format.fmt.pix_mp.width;
+ inst->height = format.fmt.pix_mp.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->fmt_out = fmt;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ inst->fmt_cap = fmt;
+
+ return 0;
+}
+
+static int venc_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fmt = inst->fmt_cap;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt = inst->fmt_out;
+ else
+ return -EINVAL;
+
+ pixmp->pixelformat = fmt->pixfmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixmp->width = inst->width;
+ pixmp->height = inst->height;
+ pixmp->colorspace = inst->colorspace;
+ pixmp->ycbcr_enc = inst->ycbcr_enc;
+ pixmp->quantization = inst->quantization;
+ pixmp->xfer_func = inst->xfer_func;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixmp->width = inst->out_width;
+ pixmp->height = inst->out_height;
+ }
+
+ venc_try_fmt_common(inst, f);
+
+ return 0;
+}
+
+static int
+venc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r.width = inst->width;
+ s->r.height = inst->height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.width = inst->out_width;
+ s->r.height = inst->out_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ s->r.top = 0;
+ s->r.left = 0;
+
+ return 0;
+}
+
+static int
+venc_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (s->r.width != inst->out_width ||
+ s->r.height != inst->out_height ||
+ s->r.top != 0 || s->r.left != 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct venus_inst *inst = to_inst(file);
+ struct v4l2_outputparm *out = &a->parm.output;
+ struct v4l2_fract *timeperframe = &out->timeperframe;
+ u64 us_per_frame, fps;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ memset(out->reserved, 0, sizeof(out->reserved));
+
+ if (!timeperframe->denominator)
+ timeperframe->denominator = inst->timeperframe.denominator;
+ if (!timeperframe->numerator)
+ timeperframe->numerator = inst->timeperframe.numerator;
+
+ out->capability = V4L2_CAP_TIMEPERFRAME;
+
+ us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
+ do_div(us_per_frame, timeperframe->denominator);
+
+ if (!us_per_frame)
+ return -EINVAL;
+
+ fps = (u64)USEC_PER_SEC;
+ do_div(fps, us_per_frame);
+
+ inst->timeperframe = *timeperframe;
+ inst->fps = fps;
+
+ return 0;
+}
+
+static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ a->parm.output.capability |= V4L2_CAP_TIMEPERFRAME;
+ a->parm.output.timeperframe = inst->timeperframe;
+
+ return 0;
+}
+
+static int venc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+
+ fmt = find_format(inst, fsize->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!fmt) {
+ fmt = find_format(inst, fsize->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fsize->stepwise.min_width = frame_width_min(inst);
+ fsize->stepwise.max_width = frame_width_max(inst);
+ fsize->stepwise.step_width = frame_width_step(inst);
+ fsize->stepwise.min_height = frame_height_min(inst);
+ fsize->stepwise.max_height = frame_height_max(inst);
+ fsize->stepwise.step_height = frame_height_step(inst);
+
+ return 0;
+}
+
+static int venc_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+
+ fmt = find_format(inst, fival->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!fmt) {
+ fmt = find_format(inst, fival->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ if (fival->index)
+ return -EINVAL;
+
+ if (!fival->width || !fival->height)
+ return -EINVAL;
+
+ if (fival->width > frame_width_max(inst) ||
+ fival->width < frame_width_min(inst) ||
+ fival->height > frame_height_max(inst) ||
+ fival->height < frame_height_min(inst))
+ return -EINVAL;
+
+ fival->stepwise.min.numerator = 1;
+ fival->stepwise.min.denominator = frate_max(inst);
+ fival->stepwise.max.numerator = 1;
+ fival->stepwise.max.denominator = frate_min(inst);
+ fival->stepwise.step.numerator = 1;
+ fival->stepwise.step.denominator = frate_max(inst);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops venc_ioctl_ops = {
+ .vidioc_querycap = venc_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = venc_enum_fmt,
+ .vidioc_enum_fmt_vid_out_mplane = venc_enum_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = venc_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = venc_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = venc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = venc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = venc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = venc_try_fmt,
+ .vidioc_g_selection = venc_g_selection,
+ .vidioc_s_selection = venc_s_selection,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_s_parm = venc_s_parm,
+ .vidioc_g_parm = venc_g_parm,
+ .vidioc_enum_framesizes = venc_enum_framesizes,
+ .vidioc_enum_frameintervals = venc_enum_frameintervals,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int venc_set_properties(struct venus_inst *inst)
+{
+ struct venc_controls *ctr = &inst->controls.enc;
+ struct hfi_intra_period intra_period;
+ struct hfi_profile_level pl;
+ struct hfi_framerate frate;
+ struct hfi_bitrate brate;
+ struct hfi_idr_period idrp;
+ u32 ptype, rate_control, bitrate, profile = 0, level = 0;
+ int ret;
+
+ ret = venus_helper_set_work_mode(inst, VIDC_WORK_MODE_2);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_2);
+ if (ret)
+ return ret;
+
+ ptype = HFI_PROPERTY_CONFIG_FRAME_RATE;
+ frate.buffer_type = HFI_BUFFER_OUTPUT;
+ frate.framerate = inst->fps * (1 << 16);
+
+ ret = hfi_session_set_property(inst, ptype, &frate);
+ if (ret)
+ return ret;
+
+ if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) {
+ struct hfi_h264_vui_timing_info info;
+ struct hfi_h264_entropy_control entropy;
+ struct hfi_h264_db_control deblock;
+
+ ptype = HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO;
+ info.enable = 1;
+ info.fixed_framerate = 1;
+ info.time_scale = NSEC_PER_SEC;
+
+ ret = hfi_session_set_property(inst, ptype, &info);
+ if (ret)
+ return ret;
+
+ ptype = HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
+ entropy.entropy_mode = venc_v4l2_to_hfi(
+ V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ ctr->h264_entropy_mode);
+ entropy.cabac_model = HFI_H264_CABAC_MODEL_0;
+
+ ret = hfi_session_set_property(inst, ptype, &entropy);
+ if (ret)
+ return ret;
+
+ ptype = HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
+ deblock.mode = venc_v4l2_to_hfi(
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ ctr->h264_loop_filter_mode);
+ deblock.slice_alpha_offset = ctr->h264_loop_filter_alpha;
+ deblock.slice_beta_offset = ctr->h264_loop_filter_beta;
+
+ ret = hfi_session_set_property(inst, ptype, &deblock);
+ if (ret)
+ return ret;
+ }
+
+ /* IDR periodicity, n:
+ * n = 0 - only the first I-frame is IDR frame
+ * n = 1 - all I-frames will be IDR frames
+ * n > 1 - every n-th I-frame will be IDR frame
+ */
+ ptype = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
+ idrp.idr_period = 0;
+ ret = hfi_session_set_property(inst, ptype, &idrp);
+ if (ret)
+ return ret;
+
+ if (ctr->num_b_frames) {
+ u32 max_num_b_frames = NUM_B_FRAMES_MAX;
+
+ ptype = HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
+ ret = hfi_session_set_property(inst, ptype, &max_num_b_frames);
+ if (ret)
+ return ret;
+ }
+
+ ptype = HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD;
+ intra_period.pframes = ctr->num_p_frames;
+ intra_period.bframes = ctr->num_b_frames;
+
+ ret = hfi_session_set_property(inst, ptype, &intra_period);
+ if (ret)
+ return ret;
+
+ if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+ rate_control = HFI_RATE_CONTROL_VBR_CFR;
+ else
+ rate_control = HFI_RATE_CONTROL_CBR_CFR;
+
+ ptype = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
+ ret = hfi_session_set_property(inst, ptype, &rate_control);
+ if (ret)
+ return ret;
+
+ if (!ctr->bitrate)
+ bitrate = 64000;
+ else
+ bitrate = ctr->bitrate;
+
+ ptype = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
+ brate.bitrate = bitrate;
+ brate.layer_id = 0;
+
+ ret = hfi_session_set_property(inst, ptype, &brate);
+ if (ret)
+ return ret;
+
+ if (!ctr->bitrate_peak)
+ bitrate *= 2;
+ else
+ bitrate = ctr->bitrate_peak;
+
+ ptype = HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE;
+ brate.bitrate = bitrate;
+ brate.layer_id = 0;
+
+ ret = hfi_session_set_property(inst, ptype, &brate);
+ if (ret)
+ return ret;
+
+ if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) {
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ ctr->profile.h264);
+ level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ ctr->level.h264);
+ } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) {
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ ctr->profile.vpx);
+ level = 0;
+ } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_MPEG4) {
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ ctr->profile.mpeg4);
+ level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ ctr->level.mpeg4);
+ } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H263) {
+ profile = 0;
+ level = 0;
+ } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ ctr->profile.hevc);
+ level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ ctr->level.hevc);
+ }
+
+ ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+ pl.profile = profile;
+ pl.level = level;
+
+ ret = hfi_session_set_property(inst, ptype, &pl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venc_init_session(struct venus_inst *inst)
+{
+ int ret;
+
+ ret = hfi_session_init(inst, inst->fmt_cap->pixfmt);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_input_resolution(inst, inst->width,
+ inst->height);
+ if (ret)
+ goto deinit;
+
+ ret = venus_helper_set_output_resolution(inst, inst->width,
+ inst->height,
+ HFI_BUFFER_OUTPUT);
+ if (ret)
+ goto deinit;
+
+ ret = venus_helper_set_color_format(inst, inst->fmt_out->pixfmt);
+ if (ret)
+ goto deinit;
+
+ ret = venc_set_properties(inst);
+ if (ret)
+ goto deinit;
+
+ return 0;
+deinit:
+ hfi_session_deinit(inst);
+ return ret;
+}
+
+static int venc_out_num_buffers(struct venus_inst *inst, unsigned int *num)
+{
+ struct hfi_buffer_requirements bufreq;
+ int ret;
+
+ ret = venc_init_session(inst);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+
+ *num = bufreq.count_actual;
+
+ hfi_session_deinit(inst);
+
+ return ret;
+}
+
+static int venc_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ unsigned int num, min = 4;
+ int ret = 0;
+
+ if (*num_planes) {
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ *num_planes != inst->fmt_out->num_planes)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ *num_planes != inst->fmt_cap->num_planes)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ sizes[0] < inst->input_buf_size)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ sizes[0] < inst->output_buf_size)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ *num_planes = inst->fmt_out->num_planes;
+
+ ret = venc_out_num_buffers(inst, &num);
+ if (ret)
+ break;
+
+ num = max(num, min);
+ *num_buffers = max(*num_buffers, num);
+ inst->num_input_bufs = *num_buffers;
+
+ sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
+ inst->width,
+ inst->height);
+ inst->input_buf_size = sizes[0];
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ *num_planes = inst->fmt_cap->num_planes;
+ *num_buffers = max(*num_buffers, min);
+ inst->num_output_bufs = *num_buffers;
+ sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt,
+ inst->width,
+ inst->height);
+ inst->output_buf_size = sizes[0];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int venc_verify_conf(struct venus_inst *inst)
+{
+ enum hfi_version ver = inst->core->res->hfi_version;
+ struct hfi_buffer_requirements bufreq;
+ int ret;
+
+ if (!inst->num_input_bufs || !inst->num_output_bufs)
+ return -EINVAL;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+ if (ret)
+ return ret;
+
+ if (inst->num_output_bufs < bufreq.count_actual ||
+ inst->num_output_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
+ return -EINVAL;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+ if (ret)
+ return ret;
+
+ if (inst->num_input_bufs < bufreq.count_actual ||
+ inst->num_input_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 1;
+ else
+ inst->streamon_cap = 1;
+
+ if (!(inst->streamon_out & inst->streamon_cap)) {
+ mutex_unlock(&inst->lock);
+ return 0;
+ }
+
+ venus_helper_init_instance(inst);
+
+ inst->sequence_cap = 0;
+ inst->sequence_out = 0;
+
+ ret = venc_init_session(inst);
+ if (ret)
+ goto bufs_done;
+
+ ret = venc_set_properties(inst);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venc_verify_conf(inst);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
+ inst->num_output_bufs, 0);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_vb2_start_streaming(inst);
+ if (ret)
+ goto deinit_sess;
+
+ mutex_unlock(&inst->lock);
+
+ return 0;
+
+deinit_sess:
+ hfi_session_deinit(inst);
+bufs_done:
+ venus_helper_buffers_done(inst, VB2_BUF_STATE_QUEUED);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 0;
+ else
+ inst->streamon_cap = 0;
+ mutex_unlock(&inst->lock);
+ return ret;
+}
+
+static const struct vb2_ops venc_vb2_ops = {
+ .queue_setup = venc_queue_setup,
+ .buf_init = venus_helper_vb2_buf_init,
+ .buf_prepare = venus_helper_vb2_buf_prepare,
+ .start_streaming = venc_start_streaming,
+ .stop_streaming = venus_helper_vb2_stop_streaming,
+ .buf_queue = venus_helper_vb2_buf_queue,
+};
+
+static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
+ u32 tag, u32 bytesused, u32 data_offset, u32 flags,
+ u32 hfi_flags, u64 timestamp_us)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb;
+ unsigned int type;
+
+ if (buf_type == HFI_BUFFER_INPUT)
+ type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ else
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+ vbuf = venus_helper_find_buf(inst, type, tag);
+ if (!vbuf)
+ return;
+
+ vbuf->flags = flags;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ vb = &vbuf->vb2_buf;
+ vb2_set_plane_payload(vb, 0, bytesused + data_offset);
+ vb->planes[0].data_offset = data_offset;
+ vb->timestamp = timestamp_us * NSEC_PER_USEC;
+ vbuf->sequence = inst->sequence_cap++;
+ } else {
+ vbuf->sequence = inst->sequence_out++;
+ }
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+}
+
+static void venc_event_notify(struct venus_inst *inst, u32 event,
+ struct hfi_event_data *data)
+{
+ struct device *dev = inst->core->dev_enc;
+
+ if (event == EVT_SESSION_ERROR) {
+ inst->session_error = true;
+ dev_err(dev, "enc: event session error %x\n", inst->error);
+ }
+}
+
+static const struct hfi_inst_ops venc_hfi_ops = {
+ .buf_done = venc_buf_done,
+ .event_notify = venc_event_notify,
+};
+
+static const struct v4l2_m2m_ops venc_m2m_ops = {
+ .device_run = venus_helper_m2m_device_run,
+ .job_abort = venus_helper_m2m_job_abort,
+};
+
+static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct venus_inst *inst = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &venc_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->drv_priv = inst;
+ src_vq->buf_struct_size = sizeof(struct venus_buffer);
+ src_vq->allow_zero_bytesused = 1;
+ src_vq->min_buffers_needed = 1;
+ src_vq->dev = inst->core->dev;
+ if (inst->core->res->hfi_version == HFI_VERSION_1XX)
+ src_vq->bidirectional = 1;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &venc_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->drv_priv = inst;
+ dst_vq->buf_struct_size = sizeof(struct venus_buffer);
+ dst_vq->allow_zero_bytesused = 1;
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->dev = inst->core->dev;
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void venc_inst_init(struct venus_inst *inst)
+{
+ inst->fmt_cap = &venc_formats[2];
+ inst->fmt_out = &venc_formats[0];
+ inst->width = 1280;
+ inst->height = ALIGN(720, 32);
+ inst->out_width = 1280;
+ inst->out_height = 720;
+ inst->fps = 15;
+ inst->timeperframe.numerator = 1;
+ inst->timeperframe.denominator = 15;
+ inst->hfi_codec = HFI_VIDEO_CODEC_H264;
+}
+
+static int venc_open(struct file *file)
+{
+ struct venus_core *core = video_drvdata(file);
+ struct venus_inst *inst;
+ int ret;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&inst->dpbbufs);
+ INIT_LIST_HEAD(&inst->registeredbufs);
+ INIT_LIST_HEAD(&inst->internalbufs);
+ INIT_LIST_HEAD(&inst->list);
+ mutex_init(&inst->lock);
+
+ inst->core = core;
+ inst->session_type = VIDC_SESSION_TYPE_ENC;
+
+ venus_helper_init_instance(inst);
+
+ ret = pm_runtime_get_sync(core->dev_enc);
+ if (ret < 0)
+ goto err_free_inst;
+
+ ret = venc_ctrl_init(inst);
+ if (ret)
+ goto err_put_sync;
+
+ ret = hfi_session_create(inst, &venc_hfi_ops);
+ if (ret)
+ goto err_ctrl_deinit;
+
+ venc_inst_init(inst);
+
+ /*
+ * create m2m device for every instance, the m2m context scheduling
+ * is made by firmware side so we do not need to care about.
+ */
+ inst->m2m_dev = v4l2_m2m_init(&venc_m2m_ops);
+ if (IS_ERR(inst->m2m_dev)) {
+ ret = PTR_ERR(inst->m2m_dev);
+ goto err_session_destroy;
+ }
+
+ inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
+ if (IS_ERR(inst->m2m_ctx)) {
+ ret = PTR_ERR(inst->m2m_ctx);
+ goto err_m2m_release;
+ }
+
+ v4l2_fh_init(&inst->fh, core->vdev_enc);
+
+ inst->fh.ctrl_handler = &inst->ctrl_handler;
+ v4l2_fh_add(&inst->fh);
+ inst->fh.m2m_ctx = inst->m2m_ctx;
+ file->private_data = &inst->fh;
+
+ return 0;
+
+err_m2m_release:
+ v4l2_m2m_release(inst->m2m_dev);
+err_session_destroy:
+ hfi_session_destroy(inst);
+err_ctrl_deinit:
+ venc_ctrl_deinit(inst);
+err_put_sync:
+ pm_runtime_put_sync(core->dev_enc);
+err_free_inst:
+ kfree(inst);
+ return ret;
+}
+
+static int venc_close(struct file *file)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+ v4l2_m2m_release(inst->m2m_dev);
+ venc_ctrl_deinit(inst);
+ hfi_session_destroy(inst);
+ mutex_destroy(&inst->lock);
+ v4l2_fh_del(&inst->fh);
+ v4l2_fh_exit(&inst->fh);
+
+ pm_runtime_put_sync(inst->core->dev_enc);
+
+ kfree(inst);
+ return 0;
+}
+
+static const struct v4l2_file_operations venc_fops = {
+ .owner = THIS_MODULE,
+ .open = venc_open,
+ .release = venc_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int venc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct video_device *vdev;
+ struct venus_core *core;
+ int ret;
+
+ if (!dev->parent)
+ return -EPROBE_DEFER;
+
+ core = dev_get_drvdata(dev->parent);
+ if (!core)
+ return -EPROBE_DEFER;
+
+ if (IS_V3(core) || IS_V4(core)) {
+ core->core1_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(core->core1_clk))
+ return PTR_ERR(core->core1_clk);
+ }
+
+ if (IS_V4(core)) {
+ core->core1_bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(core->core1_bus_clk))
+ return PTR_ERR(core->core1_bus_clk);
+ }
+
+ platform_set_drvdata(pdev, core);
+
+ vdev = video_device_alloc();
+ if (!vdev)
+ return -ENOMEM;
+
+ strlcpy(vdev->name, "qcom-venus-encoder", sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = &venc_fops;
+ vdev->ioctl_ops = &venc_ioctl_ops;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->v4l2_dev = &core->v4l2_dev;
+ vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vdev_release;
+
+ core->vdev_enc = vdev;
+ core->dev_enc = dev;
+
+ video_set_drvdata(vdev, core);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+ return ret;
+}
+
+static int venc_remove(struct platform_device *pdev)
+{
+ struct venus_core *core = dev_get_drvdata(pdev->dev.parent);
+
+ video_unregister_device(core->vdev_enc);
+ pm_runtime_disable(core->dev_enc);
+
+ return 0;
+}
+
+static __maybe_unused int venc_runtime_suspend(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ if (IS_V1(core))
+ return 0;
+
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true);
+ if (ret)
+ return ret;
+
+ if (IS_V4(core))
+ clk_disable_unprepare(core->core1_bus_clk);
+
+ clk_disable_unprepare(core->core1_clk);
+
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
+}
+
+static __maybe_unused int venc_runtime_resume(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ if (IS_V1(core))
+ return 0;
+
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(core->core1_clk);
+ if (ret)
+ goto err_power_disable;
+
+ if (IS_V4(core))
+ ret = clk_prepare_enable(core->core1_bus_clk);
+
+ if (ret)
+ goto err_unprepare_core1;
+
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
+
+err_unprepare_core1:
+ clk_disable_unprepare(core->core1_clk);
+err_power_disable:
+ venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
+ return ret;
+}
+
+static const struct dev_pm_ops venc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(venc_runtime_suspend, venc_runtime_resume, NULL)
+};
+
+static const struct of_device_id venc_dt_match[] = {
+ { .compatible = "venus-encoder" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, venc_dt_match);
+
+static struct platform_driver qcom_venus_enc_driver = {
+ .probe = venc_probe,
+ .remove = venc_remove,
+ .driver = {
+ .name = "qcom-venus-encoder",
+ .of_match_table = venc_dt_match,
+ .pm = &venc_pm_ops,
+ },
+};
+module_platform_driver(qcom_venus_enc_driver);
+
+MODULE_ALIAS("platform:qcom-venus-encoder");
+MODULE_DESCRIPTION("Qualcomm Venus video encoder driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/qcom/venus/venc.h b/drivers/media/platform/qcom/venus/venc.h
new file mode 100644
index 000000000..9daca669f
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/venc.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_VENC_H__
+#define __VENUS_VENC_H__
+
+struct venus_inst;
+
+int venc_ctrl_init(struct venus_inst *inst);
+void venc_ctrl_deinit(struct venus_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
new file mode 100644
index 000000000..459101728
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+
+#include "core.h"
+#include "venc.h"
+
+#define BITRATE_MIN 32000
+#define BITRATE_MAX 160000000
+#define BITRATE_DEFAULT 1000000
+#define BITRATE_DEFAULT_PEAK (BITRATE_DEFAULT * 2)
+#define BITRATE_STEP 100
+#define SLICE_BYTE_SIZE_MAX 1024
+#define SLICE_BYTE_SIZE_MIN 1024
+#define SLICE_MB_SIZE_MAX 300
+#define INTRA_REFRESH_MBS_MAX 300
+#define AT_SLICE_BOUNDARY \
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
+
+static int venc_calc_bpframes(u32 gop_size, u32 conseq_b, u32 *bf, u32 *pf)
+{
+ u32 half = (gop_size - 1) >> 1;
+ u32 b, p, ratio;
+ bool found = false;
+
+ if (!gop_size)
+ return -EINVAL;
+
+ *bf = *pf = 0;
+
+ if (!conseq_b) {
+ *pf = gop_size - 1;
+ return 0;
+ }
+
+ b = p = half;
+
+ for (; b <= gop_size - 1; b++, p--) {
+ if (b % p)
+ continue;
+
+ ratio = b / p;
+
+ if (ratio == conseq_b) {
+ found = true;
+ break;
+ }
+
+ if (ratio > conseq_b)
+ break;
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ if (b + p + 1 != gop_size)
+ return -EINVAL;
+
+ *bf = b;
+ *pf = p;
+
+ return 0;
+}
+
+static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct venus_inst *inst = ctrl_to_inst(ctrl);
+ struct venc_controls *ctr = &inst->controls.enc;
+ u32 bframes;
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ ctr->bitrate_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctr->bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ ctr->bitrate_peak = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ ctr->h264_entropy_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ ctr->profile.mpeg4 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ ctr->profile.h264 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ ctr->profile.vpx = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ ctr->level.mpeg4 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ ctr->level.h264 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ ctr->h264_i_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ ctr->h264_p_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ ctr->h264_b_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ ctr->h264_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ ctr->h264_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ ctr->multi_slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ ctr->multi_slice_max_bytes = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ ctr->multi_slice_max_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ ctr->h264_loop_filter_alpha = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ ctr->h264_loop_filter_beta = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ ctr->h264_loop_filter_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ ctr->header_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ret = venc_calc_bpframes(ctrl->val, ctr->num_b_frames, &bframes,
+ &ctr->num_p_frames);
+ if (ret)
+ return ret;
+
+ ctr->gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ ctr->h264_i_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:
+ ctr->vp8_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:
+ ctr->vp8_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ ret = venc_calc_bpframes(ctr->gop_size, ctrl->val, &bframes,
+ &ctr->num_p_frames);
+ if (ret)
+ return ret;
+
+ ctr->num_b_frames = bframes;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops venc_ctrl_ops = {
+ .s_ctrl = venc_op_s_ctrl,
+};
+
+int venc_ctrl_init(struct venus_inst *inst)
+{
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 27);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ ~((1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+ (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)),
+ V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ 0, V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY,
+ ~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) |
+ (1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)),
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+ 0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+ 0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ AT_SLICE_BOUNDARY,
+ 0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ 1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
+ 0, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX,
+ BITRATE_STEP, BITRATE_DEFAULT);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, BITRATE_MIN, BITRATE_MAX,
+ BITRATE_STEP, BITRATE_DEFAULT_PEAK);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 28);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, 1, 51, 1, 30);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 1, 51, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 1, 51, 1, 51);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, SLICE_BYTE_SIZE_MIN,
+ SLICE_BYTE_SIZE_MAX, 1, SLICE_BYTE_SIZE_MIN);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1,
+ SLICE_MB_SIZE_MAX, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
+ 0, INTRA_REFRESH_MBS_MAX, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 12);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VPX_MIN_QP, 1, 128, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VPX_MAX_QP, 1, 128, 1, 128);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 4, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, 0, (1 << 16) - 1, 1, 0);
+
+ ret = inst->ctrl_handler.error;
+ if (ret)
+ goto err;
+
+ ret = v4l2_ctrl_handler_setup(&inst->ctrl_handler);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ return ret;
+}
+
+void venc_ctrl_deinit(struct venus_inst *inst)
+{
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+}
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
new file mode 100644
index 000000000..05c712e00
--- /dev/null
+++ b/drivers/media/platform/rcar-fcp.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * rcar-fcp.c -- R-Car Frame Compression Processor Driver
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/rcar-fcp.h>
+
+struct rcar_fcp_device {
+ struct list_head list;
+ struct device *dev;
+ struct device_dma_parameters dma_parms;
+};
+
+static LIST_HEAD(fcp_devices);
+static DEFINE_MUTEX(fcp_lock);
+
+/* -----------------------------------------------------------------------------
+ * Public API
+ */
+
+/**
+ * rcar_fcp_get - Find and acquire a reference to an FCP instance
+ * @np: Device node of the FCP instance
+ *
+ * Search the list of registered FCP instances for the instance corresponding to
+ * the given device node.
+ *
+ * Return a pointer to the FCP instance, or an ERR_PTR if the instance can't be
+ * found.
+ */
+struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np)
+{
+ struct rcar_fcp_device *fcp;
+
+ mutex_lock(&fcp_lock);
+
+ list_for_each_entry(fcp, &fcp_devices, list) {
+ if (fcp->dev->of_node != np)
+ continue;
+
+ get_device(fcp->dev);
+ goto done;
+ }
+
+ fcp = ERR_PTR(-EPROBE_DEFER);
+
+done:
+ mutex_unlock(&fcp_lock);
+ return fcp;
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_get);
+
+/**
+ * rcar_fcp_put - Release a reference to an FCP instance
+ * @fcp: The FCP instance
+ *
+ * Release the FCP instance acquired by a call to rcar_fcp_get().
+ */
+void rcar_fcp_put(struct rcar_fcp_device *fcp)
+{
+ if (fcp)
+ put_device(fcp->dev);
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_put);
+
+struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp)
+{
+ return fcp->dev;
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_get_device);
+
+/**
+ * rcar_fcp_enable - Enable an FCP
+ * @fcp: The FCP instance
+ *
+ * Before any memory access through an FCP is performed by a module, the FCP
+ * must be enabled by a call to this function. The enable calls are reference
+ * counted, each successful call must be followed by one rcar_fcp_disable()
+ * call when no more memory transfer can occur through the FCP.
+ *
+ * Return 0 on success or a negative error code if an error occurs. The enable
+ * reference count isn't increased when this function returns an error.
+ */
+int rcar_fcp_enable(struct rcar_fcp_device *fcp)
+{
+ int ret;
+
+ if (!fcp)
+ return 0;
+
+ ret = pm_runtime_get_sync(fcp->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(fcp->dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_enable);
+
+/**
+ * rcar_fcp_disable - Disable an FCP
+ * @fcp: The FCP instance
+ *
+ * This function is the counterpart of rcar_fcp_enable(). As enable calls are
+ * reference counted a disable call may not disable the FCP synchronously.
+ */
+void rcar_fcp_disable(struct rcar_fcp_device *fcp)
+{
+ if (fcp)
+ pm_runtime_put(fcp->dev);
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_disable);
+
+/* -----------------------------------------------------------------------------
+ * Platform Driver
+ */
+
+static int rcar_fcp_probe(struct platform_device *pdev)
+{
+ struct rcar_fcp_device *fcp;
+
+ fcp = devm_kzalloc(&pdev->dev, sizeof(*fcp), GFP_KERNEL);
+ if (fcp == NULL)
+ return -ENOMEM;
+
+ fcp->dev = &pdev->dev;
+
+ fcp->dev->dma_parms = &fcp->dma_parms;
+ dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32));
+
+ pm_runtime_enable(&pdev->dev);
+
+ mutex_lock(&fcp_lock);
+ list_add_tail(&fcp->list, &fcp_devices);
+ mutex_unlock(&fcp_lock);
+
+ platform_set_drvdata(pdev, fcp);
+
+ return 0;
+}
+
+static int rcar_fcp_remove(struct platform_device *pdev)
+{
+ struct rcar_fcp_device *fcp = platform_get_drvdata(pdev);
+
+ mutex_lock(&fcp_lock);
+ list_del(&fcp->list);
+ mutex_unlock(&fcp_lock);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_fcp_of_match[] = {
+ { .compatible = "renesas,fcpf" },
+ { .compatible = "renesas,fcpv" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rcar_fcp_of_match);
+
+static struct platform_driver rcar_fcp_platform_driver = {
+ .probe = rcar_fcp_probe,
+ .remove = rcar_fcp_remove,
+ .driver = {
+ .name = "rcar-fcp",
+ .of_match_table = rcar_fcp_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_platform_driver(rcar_fcp_platform_driver);
+
+MODULE_ALIAS("rcar-fcp");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas FCP Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
new file mode 100644
index 000000000..e3eb8fee2
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_RCAR_CSI2
+ tristate "R-Car MIPI CSI-2 Receiver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car MIPI CSI-2 receiver.
+ Supports R-Car Gen3 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-csi2.
+
+config VIDEO_RCAR_VIN
+ tristate "R-Car Video Input (VIN) Driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && MEDIA_CONTROLLER
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ ---help---
+ Support for Renesas R-Car Video Input (VIN) driver.
+ Supports R-Car Gen2 and Gen3 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-vin.
diff --git a/drivers/media/platform/rcar-vin/Makefile b/drivers/media/platform/rcar-vin/Makefile
new file mode 100644
index 000000000..00d809f5d
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+rcar-vin-objs = rcar-core.o rcar-dma.o rcar-v4l2.o
+
+obj-$(CONFIG_VIDEO_RCAR_CSI2) += rcar-csi2.o
+obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin.o
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
new file mode 100644
index 000000000..c389ba9ba
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -0,0 +1,1277 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+
+#include "rcar-vin.h"
+
+/*
+ * The companion CSI-2 receiver driver (rcar-csi2) is known
+ * and we know it has one source pad (pad 0) and four sink
+ * pads (pad 1-4). So to translate a pad on the remote
+ * CSI-2 receiver to/from the VIN internal channel number simply
+ * subtract/add one from the pad/channel number.
+ */
+#define rvin_group_csi_pad_to_channel(pad) ((pad) - 1)
+#define rvin_group_csi_channel_to_pad(channel) ((channel) + 1)
+
+/*
+ * Not all VINs are created equal, master VINs control the
+ * routing for other VIN's. We can figure out which VIN is
+ * master by looking at a VINs id.
+ */
+#define rvin_group_id_to_master(vin) ((vin) < 4 ? 0 : 4)
+
+#define v4l2_dev_to_vin(d) container_of(d, struct rvin_dev, v4l2_dev)
+
+/* -----------------------------------------------------------------------------
+ * Media Controller link notification
+ */
+
+/* group lock should be held when calling this function. */
+static int rvin_group_entity_to_csi_id(struct rvin_group *group,
+ struct media_entity *entity)
+{
+ struct v4l2_subdev *sd;
+ unsigned int i;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++)
+ if (group->csi[i].subdev == sd)
+ return i;
+
+ return -ENODEV;
+}
+
+static unsigned int rvin_group_get_mask(struct rvin_dev *vin,
+ enum rvin_csi_id csi_id,
+ unsigned char channel)
+{
+ const struct rvin_group_route *route;
+ unsigned int mask = 0;
+
+ for (route = vin->info->routes; route->mask; route++) {
+ if (route->vin == vin->id &&
+ route->csi == csi_id &&
+ route->channel == channel) {
+ vin_dbg(vin,
+ "Adding route: vin: %d csi: %d channel: %d\n",
+ route->vin, route->csi, route->channel);
+ mask |= route->mask;
+ }
+ }
+
+ return mask;
+}
+
+/*
+ * Link setup for the links between a VIN and a CSI-2 receiver is a bit
+ * complex. The reason for this is that the register controlling routing
+ * is not present in each VIN instance. There are special VINs which
+ * control routing for themselves and other VINs. There are not many
+ * different possible links combinations that can be enabled at the same
+ * time, therefor all already enabled links which are controlled by a
+ * master VIN need to be taken into account when making the decision
+ * if a new link can be enabled or not.
+ *
+ * 1. Find out which VIN the link the user tries to enable is connected to.
+ * 2. Lookup which master VIN controls the links for this VIN.
+ * 3. Start with a bitmask with all bits set.
+ * 4. For each previously enabled link from the master VIN bitwise AND its
+ * route mask (see documentation for mask in struct rvin_group_route)
+ * with the bitmask.
+ * 5. Bitwise AND the mask for the link the user tries to enable to the bitmask.
+ * 6. If the bitmask is not empty at this point the new link can be enabled
+ * while keeping all previous links enabled. Update the CHSEL value of the
+ * master VIN and inform the user that the link could be enabled.
+ *
+ * Please note that no link can be enabled if any VIN in the group is
+ * currently open.
+ */
+static int rvin_group_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct rvin_group *group = container_of(link->graph_obj.mdev,
+ struct rvin_group, mdev);
+ unsigned int master_id, channel, mask_new, i;
+ unsigned int mask = ~0;
+ struct media_entity *entity;
+ struct video_device *vdev;
+ struct media_pad *csi_pad;
+ struct rvin_dev *vin = NULL;
+ int csi_id, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
+ if (ret)
+ return ret;
+
+ /* Only care about link enablement for VIN nodes. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED) ||
+ !is_media_entity_v4l2_video_device(link->sink->entity))
+ return 0;
+
+ /*
+ * Don't allow link changes if any entity in the graph is
+ * streaming, modifying the CHSEL register fields can disrupt
+ * running streams.
+ */
+ media_device_for_each_entity(entity, &group->mdev)
+ if (entity->stream_count)
+ return -EBUSY;
+
+ mutex_lock(&group->lock);
+
+ /* Find the master VIN that controls the routes. */
+ vdev = media_entity_to_video_device(link->sink->entity);
+ vin = container_of(vdev, struct rvin_dev, vdev);
+ master_id = rvin_group_id_to_master(vin->id);
+
+ if (WARN_ON(!group->vin[master_id])) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Build a mask for already enabled links. */
+ for (i = master_id; i < master_id + 4; i++) {
+ if (!group->vin[i])
+ continue;
+
+ /* Get remote CSI-2, if any. */
+ csi_pad = media_entity_remote_pad(
+ &group->vin[i]->vdev.entity.pads[0]);
+ if (!csi_pad)
+ continue;
+
+ csi_id = rvin_group_entity_to_csi_id(group, csi_pad->entity);
+ channel = rvin_group_csi_pad_to_channel(csi_pad->index);
+
+ mask &= rvin_group_get_mask(group->vin[i], csi_id, channel);
+ }
+
+ /* Add the new link to the existing mask and check if it works. */
+ csi_id = rvin_group_entity_to_csi_id(group, link->source->entity);
+
+ if (csi_id == -ENODEV) {
+ struct v4l2_subdev *sd;
+
+ /*
+ * Make sure the source entity subdevice is registered as
+ * a parallel input of one of the enabled VINs if it is not
+ * one of the CSI-2 subdevices.
+ *
+ * No hardware configuration required for parallel inputs,
+ * we can return here.
+ */
+ sd = media_entity_to_v4l2_subdev(link->source->entity);
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (group->vin[i] && group->vin[i]->parallel &&
+ group->vin[i]->parallel->subdev == sd) {
+ group->vin[i]->is_csi = false;
+ ret = 0;
+ goto out;
+ }
+ }
+
+ vin_err(vin, "Subdevice %s not registered to any VIN\n",
+ link->source->entity->name);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ channel = rvin_group_csi_pad_to_channel(link->source->index);
+ mask_new = mask & rvin_group_get_mask(vin, csi_id, channel);
+ vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
+
+ if (!mask_new) {
+ ret = -EMLINK;
+ goto out;
+ }
+
+ /* New valid CHSEL found, set the new value. */
+ ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
+ if (ret)
+ goto out;
+
+ vin->is_csi = true;
+
+out:
+ mutex_unlock(&group->lock);
+
+ return ret;
+}
+
+static const struct media_device_ops rvin_media_ops = {
+ .link_notify = rvin_group_link_notify,
+};
+
+/* -----------------------------------------------------------------------------
+ * Gen3 CSI2 Group Allocator
+ */
+
+/* FIXME: This should if we find a system that supports more
+ * than one group for the whole system be replaced with a linked
+ * list of groups. And eventually all of this should be replaced
+ * with a global device allocator API.
+ *
+ * But for now this works as on all supported systems there will
+ * be only one group for all instances.
+ */
+
+static DEFINE_MUTEX(rvin_group_lock);
+static struct rvin_group *rvin_group_data;
+
+static void rvin_group_cleanup(struct rvin_group *group)
+{
+ media_device_unregister(&group->mdev);
+ media_device_cleanup(&group->mdev);
+ mutex_destroy(&group->lock);
+}
+
+static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin)
+{
+ struct media_device *mdev = &group->mdev;
+ const struct of_device_id *match;
+ struct device_node *np;
+ int ret;
+
+ mutex_init(&group->lock);
+
+ /* Count number of VINs in the system */
+ group->count = 0;
+ for_each_matching_node(np, vin->dev->driver->of_match_table)
+ if (of_device_is_available(np))
+ group->count++;
+
+ vin_dbg(vin, "found %u enabled VIN's in DT", group->count);
+
+ mdev->dev = vin->dev;
+ mdev->ops = &rvin_media_ops;
+
+ match = of_match_node(vin->dev->driver->of_match_table,
+ vin->dev->of_node);
+
+ strlcpy(mdev->driver_name, KBUILD_MODNAME, sizeof(mdev->driver_name));
+ strlcpy(mdev->model, match->compatible, sizeof(mdev->model));
+ snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s",
+ dev_name(mdev->dev));
+
+ media_device_init(mdev);
+
+ ret = media_device_register(&group->mdev);
+ if (ret)
+ rvin_group_cleanup(group);
+
+ return ret;
+}
+
+static void rvin_group_release(struct kref *kref)
+{
+ struct rvin_group *group =
+ container_of(kref, struct rvin_group, refcount);
+
+ mutex_lock(&rvin_group_lock);
+
+ rvin_group_data = NULL;
+
+ rvin_group_cleanup(group);
+
+ kfree(group);
+
+ mutex_unlock(&rvin_group_lock);
+}
+
+static int rvin_group_get(struct rvin_dev *vin)
+{
+ struct rvin_group *group;
+ u32 id;
+ int ret;
+
+ /* Make sure VIN id is present and sane */
+ ret = of_property_read_u32(vin->dev->of_node, "renesas,id", &id);
+ if (ret) {
+ vin_err(vin, "%pOF: No renesas,id property found\n",
+ vin->dev->of_node);
+ return -EINVAL;
+ }
+
+ if (id >= RCAR_VIN_NUM) {
+ vin_err(vin, "%pOF: Invalid renesas,id '%u'\n",
+ vin->dev->of_node, id);
+ return -EINVAL;
+ }
+
+ /* Join or create a VIN group */
+ mutex_lock(&rvin_group_lock);
+ if (rvin_group_data) {
+ group = rvin_group_data;
+ kref_get(&group->refcount);
+ } else {
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group) {
+ ret = -ENOMEM;
+ goto err_group;
+ }
+
+ ret = rvin_group_init(group, vin);
+ if (ret) {
+ kfree(group);
+ vin_err(vin, "Failed to initialize group\n");
+ goto err_group;
+ }
+
+ kref_init(&group->refcount);
+
+ rvin_group_data = group;
+ }
+ mutex_unlock(&rvin_group_lock);
+
+ /* Add VIN to group */
+ mutex_lock(&group->lock);
+
+ if (group->vin[id]) {
+ vin_err(vin, "Duplicate renesas,id property value %u\n", id);
+ mutex_unlock(&group->lock);
+ kref_put(&group->refcount, rvin_group_release);
+ return -EINVAL;
+ }
+
+ group->vin[id] = vin;
+
+ vin->id = id;
+ vin->group = group;
+ vin->v4l2_dev.mdev = &group->mdev;
+
+ mutex_unlock(&group->lock);
+
+ return 0;
+err_group:
+ mutex_unlock(&rvin_group_lock);
+ return ret;
+}
+
+static void rvin_group_put(struct rvin_dev *vin)
+{
+ struct rvin_group *group = vin->group;
+
+ mutex_lock(&group->lock);
+
+ vin->group = NULL;
+ vin->v4l2_dev.mdev = NULL;
+
+ if (WARN_ON(group->vin[vin->id] != vin))
+ goto out;
+
+ group->vin[vin->id] = NULL;
+out:
+ mutex_unlock(&group->lock);
+
+ kref_put(&group->refcount, rvin_group_release);
+}
+
+/* -----------------------------------------------------------------------------
+ * Async notifier
+ */
+
+static int rvin_find_pad(struct v4l2_subdev *sd, int direction)
+{
+ unsigned int pad;
+
+ if (sd->entity.num_pads <= 1)
+ return 0;
+
+ for (pad = 0; pad < sd->entity.num_pads; pad++)
+ if (sd->entity.pads[pad].flags & direction)
+ return pad;
+
+ return -EINVAL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Parallel async notifier
+ */
+
+/* The vin lock should be held when calling the subdevice attach and detach */
+static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
+ struct v4l2_subdev *subdev)
+{
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ /* Find source and sink pad of remote subdevice */
+ ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE);
+ if (ret < 0)
+ return ret;
+ vin->parallel->source_pad = ret;
+
+ ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK);
+ vin->parallel->sink_pad = ret < 0 ? 0 : ret;
+
+ if (vin->info->use_mc) {
+ vin->parallel->subdev = subdev;
+ return 0;
+ }
+
+ /* Find compatible subdevices mbus format */
+ vin->mbus_code = 0;
+ code.index = 0;
+ code.pad = vin->parallel->source_pad;
+ while (!vin->mbus_code &&
+ !v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code)) {
+ code.index++;
+ switch (code.code) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ vin->mbus_code = code.code;
+ vin_dbg(vin, "Found media bus format for %s: %d\n",
+ subdev->name, vin->mbus_code);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!vin->mbus_code) {
+ vin_err(vin, "Unsupported media bus format for %s\n",
+ subdev->name);
+ return -EINVAL;
+ }
+
+ /* Read tvnorms */
+ ret = v4l2_subdev_call(subdev, video, g_tvnorms, &vin->vdev.tvnorms);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ /* Read standard */
+ vin->std = V4L2_STD_UNKNOWN;
+ ret = v4l2_subdev_call(subdev, video, g_std, &vin->std);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ /* Add the controls */
+ ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler,
+ NULL);
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ return ret;
+ }
+
+ vin->vdev.ctrl_handler = &vin->ctrl_handler;
+
+ vin->parallel->subdev = subdev;
+
+ return 0;
+}
+
+static void rvin_parallel_subdevice_detach(struct rvin_dev *vin)
+{
+ rvin_v4l2_unregister(vin);
+ vin->parallel->subdev = NULL;
+
+ if (!vin->info->use_mc) {
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ vin->vdev.ctrl_handler = NULL;
+ }
+}
+
+static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ struct media_entity *source;
+ struct media_entity *sink;
+ int ret;
+
+ ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
+ if (ret < 0) {
+ vin_err(vin, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ if (!video_is_registered(&vin->vdev)) {
+ ret = rvin_v4l2_register(vin);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!vin->info->use_mc)
+ return 0;
+
+ /* If we're running with media-controller, link the subdevs. */
+ source = &vin->parallel->subdev->entity;
+ sink = &vin->vdev.entity;
+
+ ret = media_create_pad_link(source, vin->parallel->source_pad,
+ sink, vin->parallel->sink_pad, 0);
+ if (ret)
+ vin_err(vin, "Error adding link from %s to %s: %d\n",
+ source->name, sink->name, ret);
+
+ return ret;
+}
+
+static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+
+ vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name);
+
+ mutex_lock(&vin->lock);
+ rvin_parallel_subdevice_detach(vin);
+ mutex_unlock(&vin->lock);
+}
+
+static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ int ret;
+
+ mutex_lock(&vin->lock);
+ ret = rvin_parallel_subdevice_attach(vin, subdev);
+ mutex_unlock(&vin->lock);
+ if (ret)
+ return ret;
+
+ v4l2_set_subdev_hostdata(subdev, vin);
+
+ vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n",
+ subdev->name, vin->parallel->source_pad,
+ vin->parallel->sink_pad);
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations rvin_parallel_notify_ops = {
+ .bound = rvin_parallel_notify_bound,
+ .unbind = rvin_parallel_notify_unbind,
+ .complete = rvin_parallel_notify_complete,
+};
+
+static int rvin_parallel_parse_v4l2(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = dev_get_drvdata(dev);
+ struct rvin_parallel_entity *rvpe =
+ container_of(asd, struct rvin_parallel_entity, asd);
+
+ if (vep->base.port || vep->base.id)
+ return -ENOTCONN;
+
+ vin->parallel = rvpe;
+ vin->parallel->mbus_type = vep->bus_type;
+
+ switch (vin->parallel->mbus_type) {
+ case V4L2_MBUS_PARALLEL:
+ vin_dbg(vin, "Found PARALLEL media bus\n");
+ vin->parallel->mbus_flags = vep->bus.parallel.flags;
+ break;
+ case V4L2_MBUS_BT656:
+ vin_dbg(vin, "Found BT656 media bus\n");
+ vin->parallel->mbus_flags = 0;
+ break;
+ default:
+ vin_err(vin, "Unknown media bus type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvin_parallel_init(struct rvin_dev *vin)
+{
+ int ret;
+
+ ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(
+ vin->dev, &vin->notifier, sizeof(struct rvin_parallel_entity),
+ 0, rvin_parallel_parse_v4l2);
+ if (ret)
+ return ret;
+
+ /* If using mc, it's fine not to have any input registered. */
+ if (!vin->parallel)
+ return vin->info->use_mc ? 0 : -ENODEV;
+
+ vin_dbg(vin, "Found parallel subdevice %pOF\n",
+ to_of_node(vin->parallel->asd.match.fwnode));
+
+ vin->notifier.ops = &rvin_parallel_notify_ops;
+ ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
+ if (ret < 0) {
+ vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_notifier_cleanup(&vin->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Group async notifier
+ */
+
+static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ const struct rvin_group_route *route;
+ unsigned int i;
+ int ret;
+
+ ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
+ if (ret) {
+ vin_err(vin, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ /* Register all video nodes for the group. */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i] &&
+ !video_is_registered(&vin->group->vin[i]->vdev)) {
+ ret = rvin_v4l2_register(vin->group->vin[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Create all media device links between VINs and CSI-2's. */
+ mutex_lock(&vin->group->lock);
+ for (route = vin->info->routes; route->mask; route++) {
+ struct media_pad *source_pad, *sink_pad;
+ struct media_entity *source, *sink;
+ unsigned int source_idx;
+
+ /* Check that VIN is part of the group. */
+ if (!vin->group->vin[route->vin])
+ continue;
+
+ /* Check that VIN' master is part of the group. */
+ if (!vin->group->vin[rvin_group_id_to_master(route->vin)])
+ continue;
+
+ /* Check that CSI-2 is part of the group. */
+ if (!vin->group->csi[route->csi].subdev)
+ continue;
+
+ source = &vin->group->csi[route->csi].subdev->entity;
+ source_idx = rvin_group_csi_channel_to_pad(route->channel);
+ source_pad = &source->pads[source_idx];
+
+ sink = &vin->group->vin[route->vin]->vdev.entity;
+ sink_pad = &sink->pads[0];
+
+ /* Skip if link already exists. */
+ if (media_entity_find_link(source_pad, sink_pad))
+ continue;
+
+ ret = media_create_pad_link(source, source_idx, sink, 0, 0);
+ if (ret) {
+ vin_err(vin, "Error adding link from %s to %s\n",
+ source->name, sink->name);
+ break;
+ }
+ }
+ mutex_unlock(&vin->group->lock);
+
+ return ret;
+}
+
+static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ for (i = 0; i < RCAR_VIN_NUM; i++)
+ if (vin->group->vin[i])
+ rvin_v4l2_unregister(vin->group->vin[i]);
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->csi[i].fwnode != asd->match.fwnode)
+ continue;
+ vin->group->csi[i].subdev = NULL;
+ vin_dbg(vin, "Unbind CSI-2 %s from slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+}
+
+static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->csi[i].fwnode != asd->match.fwnode)
+ continue;
+ vin->group->csi[i].subdev = subdev;
+ vin_dbg(vin, "Bound CSI-2 %s to slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations rvin_group_notify_ops = {
+ .bound = rvin_group_notify_bound,
+ .unbind = rvin_group_notify_unbind,
+ .complete = rvin_group_notify_complete,
+};
+
+static int rvin_mc_parse_of_endpoint(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = dev_get_drvdata(dev);
+
+ if (vep->base.port != 1 || vep->base.id >= RVIN_CSI_MAX)
+ return -EINVAL;
+
+ if (!of_device_is_available(to_of_node(asd->match.fwnode))) {
+ vin_dbg(vin, "OF device %pOF disabled, ignoring\n",
+ to_of_node(asd->match.fwnode));
+ return -ENOTCONN;
+ }
+
+ if (vin->group->csi[vep->base.id].fwnode) {
+ vin_dbg(vin, "OF device %pOF already handled\n",
+ to_of_node(asd->match.fwnode));
+ return -ENOTCONN;
+ }
+
+ vin->group->csi[vep->base.id].fwnode = asd->match.fwnode;
+
+ vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
+ to_of_node(asd->match.fwnode), vep->base.id);
+
+ return 0;
+}
+
+static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
+{
+ unsigned int count = 0;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&vin->group->lock);
+
+ /* If not all VIN's are registered don't register the notifier. */
+ for (i = 0; i < RCAR_VIN_NUM; i++)
+ if (vin->group->vin[i])
+ count++;
+
+ if (vin->group->count != count) {
+ mutex_unlock(&vin->group->lock);
+ return 0;
+ }
+
+ /*
+ * Have all VIN's look for CSI-2 subdevices. Some subdevices will
+ * overlap but the parser function can handle it, so each subdevice
+ * will only be registered once with the group notifier.
+ */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (!vin->group->vin[i])
+ continue;
+
+ ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(
+ vin->group->vin[i]->dev, &vin->group->notifier,
+ sizeof(struct v4l2_async_subdev), 1,
+ rvin_mc_parse_of_endpoint);
+ if (ret) {
+ mutex_unlock(&vin->group->lock);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ if (!vin->group->notifier.num_subdevs)
+ return 0;
+
+ vin->group->notifier.ops = &rvin_group_notify_ops;
+ ret = v4l2_async_notifier_register(&vin->v4l2_dev,
+ &vin->group->notifier);
+ if (ret < 0) {
+ vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rvin_mc_init(struct rvin_dev *vin)
+{
+ int ret;
+
+ vin->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
+ if (ret)
+ return ret;
+
+ ret = rvin_group_get(vin);
+ if (ret)
+ return ret;
+
+ ret = rvin_mc_parse_of_graph(vin);
+ if (ret)
+ rvin_group_put(vin);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static const struct rvin_info rcar_info_h1 = {
+ .model = RCAR_H1,
+ .use_mc = false,
+ .max_width = 2048,
+ .max_height = 2048,
+};
+
+static const struct rvin_info rcar_info_m1 = {
+ .model = RCAR_M1,
+ .use_mc = false,
+ .max_width = 2048,
+ .max_height = 2048,
+};
+
+static const struct rvin_info rcar_info_gen2 = {
+ .model = RCAR_GEN2,
+ .use_mc = false,
+ .max_width = 2048,
+ .max_height = 2048,
+};
+
+static const struct rvin_group_route rcar_info_r8a7795_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 2, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
+ { .csi = RVIN_CSI41, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI41, .channel = 1, .vin = 4, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
+ { .csi = RVIN_CSI41, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI41, .channel = 0, .vin = 5, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 6, .mask = BIT(0) },
+ { .csi = RVIN_CSI41, .channel = 0, .vin = 6, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
+ { .csi = RVIN_CSI41, .channel = 2, .vin = 6, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
+ { .csi = RVIN_CSI41, .channel = 1, .vin = 7, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI41, .channel = 3, .vin = 7, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a7795 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a7795_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a7795es1_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI21, .channel = 0, .vin = 0, .mask = BIT(2) | BIT(5) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
+ { .csi = RVIN_CSI21, .channel = 0, .vin = 1, .mask = BIT(1) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
+ { .csi = RVIN_CSI21, .channel = 1, .vin = 1, .mask = BIT(5) },
+ { .csi = RVIN_CSI21, .channel = 0, .vin = 2, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
+ { .csi = RVIN_CSI21, .channel = 2, .vin = 2, .mask = BIT(5) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) },
+ { .csi = RVIN_CSI21, .channel = 1, .vin = 3, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
+ { .csi = RVIN_CSI21, .channel = 3, .vin = 3, .mask = BIT(5) },
+ { .csi = RVIN_CSI41, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI21, .channel = 0, .vin = 4, .mask = BIT(2) | BIT(5) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
+ { .csi = RVIN_CSI21, .channel = 0, .vin = 5, .mask = BIT(1) },
+ { .csi = RVIN_CSI41, .channel = 0, .vin = 5, .mask = BIT(2) },
+ { .csi = RVIN_CSI41, .channel = 1, .vin = 5, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
+ { .csi = RVIN_CSI21, .channel = 1, .vin = 5, .mask = BIT(5) },
+ { .csi = RVIN_CSI21, .channel = 0, .vin = 6, .mask = BIT(0) },
+ { .csi = RVIN_CSI41, .channel = 0, .vin = 6, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
+ { .csi = RVIN_CSI41, .channel = 2, .vin = 6, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
+ { .csi = RVIN_CSI21, .channel = 2, .vin = 6, .mask = BIT(5) },
+ { .csi = RVIN_CSI41, .channel = 1, .vin = 7, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) },
+ { .csi = RVIN_CSI21, .channel = 1, .vin = 7, .mask = BIT(2) },
+ { .csi = RVIN_CSI41, .channel = 3, .vin = 7, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { .csi = RVIN_CSI21, .channel = 3, .vin = 7, .mask = BIT(5) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a7795es1 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a7795es1_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a7796_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 5, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 5, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 6, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 6, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 7, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 7, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a7796 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a7796_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 2, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 4, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 5, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 6, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 6, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 6, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 7, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 7, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77965 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77965_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77970_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77970 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77970_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77995 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77995_routes,
+};
+
+static const struct of_device_id rvin_of_id_table[] = {
+ {
+ .compatible = "renesas,vin-r8a7778",
+ .data = &rcar_info_m1,
+ },
+ {
+ .compatible = "renesas,vin-r8a7779",
+ .data = &rcar_info_h1,
+ },
+ {
+ .compatible = "renesas,vin-r8a7790",
+ .data = &rcar_info_gen2,
+ },
+ {
+ .compatible = "renesas,vin-r8a7791",
+ .data = &rcar_info_gen2,
+ },
+ {
+ .compatible = "renesas,vin-r8a7793",
+ .data = &rcar_info_gen2,
+ },
+ {
+ .compatible = "renesas,vin-r8a7794",
+ .data = &rcar_info_gen2,
+ },
+ {
+ .compatible = "renesas,rcar-gen2-vin",
+ .data = &rcar_info_gen2,
+ },
+ {
+ .compatible = "renesas,vin-r8a7795",
+ .data = &rcar_info_r8a7795,
+ },
+ {
+ .compatible = "renesas,vin-r8a7796",
+ .data = &rcar_info_r8a7796,
+ },
+ {
+ .compatible = "renesas,vin-r8a77965",
+ .data = &rcar_info_r8a77965,
+ },
+ {
+ .compatible = "renesas,vin-r8a77970",
+ .data = &rcar_info_r8a77970,
+ },
+ {
+ .compatible = "renesas,vin-r8a77995",
+ .data = &rcar_info_r8a77995,
+ },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rvin_of_id_table);
+
+static const struct soc_device_attribute r8a7795es1[] = {
+ {
+ .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = &rcar_info_r8a7795es1,
+ },
+ { /* Sentinel */ }
+};
+
+static int rcar_vin_probe(struct platform_device *pdev)
+{
+ const struct soc_device_attribute *attr;
+ struct rvin_dev *vin;
+ struct resource *mem;
+ int irq, ret;
+
+ vin = devm_kzalloc(&pdev->dev, sizeof(*vin), GFP_KERNEL);
+ if (!vin)
+ return -ENOMEM;
+
+ vin->dev = &pdev->dev;
+ vin->info = of_device_get_match_data(&pdev->dev);
+
+ /*
+ * Special care is needed on r8a7795 ES1.x since it
+ * uses different routing than r8a7795 ES2.0.
+ */
+ attr = soc_device_match(r8a7795es1);
+ if (attr)
+ vin->info = attr->data;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL)
+ return -EINVAL;
+
+ vin->base = devm_ioremap_resource(vin->dev, mem);
+ if (IS_ERR(vin->base))
+ return PTR_ERR(vin->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = rvin_dma_register(vin, irq);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, vin);
+
+ if (vin->info->use_mc) {
+ ret = rvin_mc_init(vin);
+ if (ret)
+ goto error_dma_unregister;
+ }
+
+ ret = rvin_parallel_init(vin);
+ if (ret)
+ goto error_group_unregister;
+
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+error_group_unregister:
+ if (vin->info->use_mc) {
+ mutex_lock(&vin->group->lock);
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_notifier_unregister(&vin->group->notifier);
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
+ }
+ mutex_unlock(&vin->group->lock);
+ rvin_group_put(vin);
+ }
+
+error_dma_unregister:
+ rvin_dma_unregister(vin);
+
+ return ret;
+}
+
+static int rcar_vin_remove(struct platform_device *pdev)
+{
+ struct rvin_dev *vin = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ rvin_v4l2_unregister(vin);
+
+ v4l2_async_notifier_unregister(&vin->notifier);
+ v4l2_async_notifier_cleanup(&vin->notifier);
+
+ if (vin->info->use_mc) {
+ mutex_lock(&vin->group->lock);
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_notifier_unregister(&vin->group->notifier);
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
+ }
+ mutex_unlock(&vin->group->lock);
+ rvin_group_put(vin);
+ } else {
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ }
+
+ rvin_dma_unregister(vin);
+
+ return 0;
+}
+
+static struct platform_driver rcar_vin_driver = {
+ .driver = {
+ .name = "rcar-vin",
+ .of_match_table = rvin_of_id_table,
+ },
+ .probe = rcar_vin_probe,
+ .remove = rcar_vin_remove,
+};
+
+module_platform_driver(rcar_vin_driver);
+
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
new file mode 100644
index 000000000..127658c28
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -0,0 +1,1108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Renesas R-Car MIPI CSI-2 Receiver
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sys_soc.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+
+struct rcar_csi2;
+
+/* Register offsets and bits */
+
+/* Control Timing Select */
+#define TREF_REG 0x00
+#define TREF_TREF BIT(0)
+
+/* Software Reset */
+#define SRST_REG 0x04
+#define SRST_SRST BIT(0)
+
+/* PHY Operation Control */
+#define PHYCNT_REG 0x08
+#define PHYCNT_SHUTDOWNZ BIT(17)
+#define PHYCNT_RSTZ BIT(16)
+#define PHYCNT_ENABLECLK BIT(4)
+#define PHYCNT_ENABLE_3 BIT(3)
+#define PHYCNT_ENABLE_2 BIT(2)
+#define PHYCNT_ENABLE_1 BIT(1)
+#define PHYCNT_ENABLE_0 BIT(0)
+
+/* Checksum Control */
+#define CHKSUM_REG 0x0c
+#define CHKSUM_ECC_EN BIT(1)
+#define CHKSUM_CRC_EN BIT(0)
+
+/*
+ * Channel Data Type Select
+ * VCDT[0-15]: Channel 1 VCDT[16-31]: Channel 2
+ * VCDT2[0-15]: Channel 3 VCDT2[16-31]: Channel 4
+ */
+#define VCDT_REG 0x10
+#define VCDT2_REG 0x14
+#define VCDT_VCDTN_EN BIT(15)
+#define VCDT_SEL_VC(n) (((n) & 0x3) << 8)
+#define VCDT_SEL_DTN_ON BIT(6)
+#define VCDT_SEL_DT(n) (((n) & 0x3f) << 0)
+
+/* Frame Data Type Select */
+#define FRDT_REG 0x18
+
+/* Field Detection Control */
+#define FLD_REG 0x1c
+#define FLD_FLD_NUM(n) (((n) & 0xff) << 16)
+#define FLD_FLD_EN4 BIT(3)
+#define FLD_FLD_EN3 BIT(2)
+#define FLD_FLD_EN2 BIT(1)
+#define FLD_FLD_EN BIT(0)
+
+/* Automatic Standby Control */
+#define ASTBY_REG 0x20
+
+/* Long Data Type Setting 0 */
+#define LNGDT0_REG 0x28
+
+/* Long Data Type Setting 1 */
+#define LNGDT1_REG 0x2c
+
+/* Interrupt Enable */
+#define INTEN_REG 0x30
+
+/* Interrupt Source Mask */
+#define INTCLOSE_REG 0x34
+
+/* Interrupt Status Monitor */
+#define INTSTATE_REG 0x38
+#define INTSTATE_INT_ULPS_START BIT(7)
+#define INTSTATE_INT_ULPS_END BIT(6)
+
+/* Interrupt Error Status Monitor */
+#define INTERRSTATE_REG 0x3c
+
+/* Short Packet Data */
+#define SHPDAT_REG 0x40
+
+/* Short Packet Count */
+#define SHPCNT_REG 0x44
+
+/* LINK Operation Control */
+#define LINKCNT_REG 0x48
+#define LINKCNT_MONITOR_EN BIT(31)
+#define LINKCNT_REG_MONI_PACT_EN BIT(25)
+#define LINKCNT_ICLK_NONSTOP BIT(24)
+
+/* Lane Swap */
+#define LSWAP_REG 0x4c
+#define LSWAP_L3SEL(n) (((n) & 0x3) << 6)
+#define LSWAP_L2SEL(n) (((n) & 0x3) << 4)
+#define LSWAP_L1SEL(n) (((n) & 0x3) << 2)
+#define LSWAP_L0SEL(n) (((n) & 0x3) << 0)
+
+/* PHY Test Interface Write Register */
+#define PHTW_REG 0x50
+#define PHTW_DWEN BIT(24)
+#define PHTW_TESTDIN_DATA(n) (((n & 0xff)) << 16)
+#define PHTW_CWEN BIT(8)
+#define PHTW_TESTDIN_CODE(n) ((n & 0xff))
+
+struct phtw_value {
+ u16 data;
+ u16 code;
+};
+
+struct rcsi2_mbps_reg {
+ u16 mbps;
+ u16 reg;
+};
+
+static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = {
+ { .mbps = 80, .reg = 0x86 },
+ { .mbps = 90, .reg = 0x86 },
+ { .mbps = 100, .reg = 0x87 },
+ { .mbps = 110, .reg = 0x87 },
+ { .mbps = 120, .reg = 0x88 },
+ { .mbps = 130, .reg = 0x88 },
+ { .mbps = 140, .reg = 0x89 },
+ { .mbps = 150, .reg = 0x89 },
+ { .mbps = 160, .reg = 0x8a },
+ { .mbps = 170, .reg = 0x8a },
+ { .mbps = 180, .reg = 0x8b },
+ { .mbps = 190, .reg = 0x8b },
+ { .mbps = 205, .reg = 0x8c },
+ { .mbps = 220, .reg = 0x8d },
+ { .mbps = 235, .reg = 0x8e },
+ { .mbps = 250, .reg = 0x8e },
+ { /* sentinel */ },
+};
+
+static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x20 },
+ { .mbps = 100, .reg = 0x40 },
+ { .mbps = 110, .reg = 0x02 },
+ { .mbps = 130, .reg = 0x22 },
+ { .mbps = 140, .reg = 0x42 },
+ { .mbps = 150, .reg = 0x04 },
+ { .mbps = 170, .reg = 0x24 },
+ { .mbps = 180, .reg = 0x44 },
+ { .mbps = 200, .reg = 0x06 },
+ { .mbps = 220, .reg = 0x26 },
+ { .mbps = 240, .reg = 0x46 },
+ { .mbps = 250, .reg = 0x08 },
+ { .mbps = 270, .reg = 0x28 },
+ { .mbps = 300, .reg = 0x0a },
+ { .mbps = 330, .reg = 0x2a },
+ { .mbps = 360, .reg = 0x4a },
+ { .mbps = 400, .reg = 0x0c },
+ { .mbps = 450, .reg = 0x2c },
+ { .mbps = 500, .reg = 0x0e },
+ { .mbps = 550, .reg = 0x2e },
+ { .mbps = 600, .reg = 0x10 },
+ { .mbps = 650, .reg = 0x30 },
+ { .mbps = 700, .reg = 0x12 },
+ { .mbps = 750, .reg = 0x32 },
+ { .mbps = 800, .reg = 0x52 },
+ { .mbps = 850, .reg = 0x72 },
+ { .mbps = 900, .reg = 0x14 },
+ { .mbps = 950, .reg = 0x34 },
+ { .mbps = 1000, .reg = 0x54 },
+ { .mbps = 1050, .reg = 0x74 },
+ { .mbps = 1125, .reg = 0x16 },
+ { /* sentinel */ },
+};
+
+/* PHY Test Interface Clear */
+#define PHTC_REG 0x58
+#define PHTC_TESTCLR BIT(0)
+
+/* PHY Frequency Control */
+#define PHYPLL_REG 0x68
+#define PHYPLL_HSFREQRANGE(n) ((n) << 16)
+
+static const struct rcsi2_mbps_reg hsfreqrange_h3_v3h_m3n[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x10 },
+ { .mbps = 100, .reg = 0x20 },
+ { .mbps = 110, .reg = 0x30 },
+ { .mbps = 120, .reg = 0x01 },
+ { .mbps = 130, .reg = 0x11 },
+ { .mbps = 140, .reg = 0x21 },
+ { .mbps = 150, .reg = 0x31 },
+ { .mbps = 160, .reg = 0x02 },
+ { .mbps = 170, .reg = 0x12 },
+ { .mbps = 180, .reg = 0x22 },
+ { .mbps = 190, .reg = 0x32 },
+ { .mbps = 205, .reg = 0x03 },
+ { .mbps = 220, .reg = 0x13 },
+ { .mbps = 235, .reg = 0x23 },
+ { .mbps = 250, .reg = 0x33 },
+ { .mbps = 275, .reg = 0x04 },
+ { .mbps = 300, .reg = 0x14 },
+ { .mbps = 325, .reg = 0x25 },
+ { .mbps = 350, .reg = 0x35 },
+ { .mbps = 400, .reg = 0x05 },
+ { .mbps = 450, .reg = 0x16 },
+ { .mbps = 500, .reg = 0x26 },
+ { .mbps = 550, .reg = 0x37 },
+ { .mbps = 600, .reg = 0x07 },
+ { .mbps = 650, .reg = 0x18 },
+ { .mbps = 700, .reg = 0x28 },
+ { .mbps = 750, .reg = 0x39 },
+ { .mbps = 800, .reg = 0x09 },
+ { .mbps = 850, .reg = 0x19 },
+ { .mbps = 900, .reg = 0x29 },
+ { .mbps = 950, .reg = 0x3a },
+ { .mbps = 1000, .reg = 0x0a },
+ { .mbps = 1050, .reg = 0x1a },
+ { .mbps = 1100, .reg = 0x2a },
+ { .mbps = 1150, .reg = 0x3b },
+ { .mbps = 1200, .reg = 0x0b },
+ { .mbps = 1250, .reg = 0x1b },
+ { .mbps = 1300, .reg = 0x2b },
+ { .mbps = 1350, .reg = 0x3c },
+ { .mbps = 1400, .reg = 0x0c },
+ { .mbps = 1450, .reg = 0x1c },
+ { .mbps = 1500, .reg = 0x2c },
+ { /* sentinel */ },
+};
+
+static const struct rcsi2_mbps_reg hsfreqrange_m3w_h3es1[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x10 },
+ { .mbps = 100, .reg = 0x20 },
+ { .mbps = 110, .reg = 0x30 },
+ { .mbps = 120, .reg = 0x01 },
+ { .mbps = 130, .reg = 0x11 },
+ { .mbps = 140, .reg = 0x21 },
+ { .mbps = 150, .reg = 0x31 },
+ { .mbps = 160, .reg = 0x02 },
+ { .mbps = 170, .reg = 0x12 },
+ { .mbps = 180, .reg = 0x22 },
+ { .mbps = 190, .reg = 0x32 },
+ { .mbps = 205, .reg = 0x03 },
+ { .mbps = 220, .reg = 0x13 },
+ { .mbps = 235, .reg = 0x23 },
+ { .mbps = 250, .reg = 0x33 },
+ { .mbps = 275, .reg = 0x04 },
+ { .mbps = 300, .reg = 0x14 },
+ { .mbps = 325, .reg = 0x05 },
+ { .mbps = 350, .reg = 0x15 },
+ { .mbps = 400, .reg = 0x25 },
+ { .mbps = 450, .reg = 0x06 },
+ { .mbps = 500, .reg = 0x16 },
+ { .mbps = 550, .reg = 0x07 },
+ { .mbps = 600, .reg = 0x17 },
+ { .mbps = 650, .reg = 0x08 },
+ { .mbps = 700, .reg = 0x18 },
+ { .mbps = 750, .reg = 0x09 },
+ { .mbps = 800, .reg = 0x19 },
+ { .mbps = 850, .reg = 0x29 },
+ { .mbps = 900, .reg = 0x39 },
+ { .mbps = 950, .reg = 0x0a },
+ { .mbps = 1000, .reg = 0x1a },
+ { .mbps = 1050, .reg = 0x2a },
+ { .mbps = 1100, .reg = 0x3a },
+ { .mbps = 1150, .reg = 0x0b },
+ { .mbps = 1200, .reg = 0x1b },
+ { .mbps = 1250, .reg = 0x2b },
+ { .mbps = 1300, .reg = 0x3b },
+ { .mbps = 1350, .reg = 0x0c },
+ { .mbps = 1400, .reg = 0x1c },
+ { .mbps = 1450, .reg = 0x2c },
+ { .mbps = 1500, .reg = 0x3c },
+ { /* sentinel */ },
+};
+
+/* PHY ESC Error Monitor */
+#define PHEERM_REG 0x74
+
+/* PHY Clock Lane Monitor */
+#define PHCLM_REG 0x78
+#define PHCLM_STOPSTATECKL BIT(0)
+
+/* PHY Data Lane Monitor */
+#define PHDLM_REG 0x7c
+
+/* CSI0CLK Frequency Configuration Preset Register */
+#define CSI0CLKFCPR_REG 0x260
+#define CSI0CLKFREQRANGE(n) ((n & 0x3f) << 16)
+
+struct rcar_csi2_format {
+ u32 code;
+ unsigned int datatype;
+ unsigned int bpp;
+};
+
+static const struct rcar_csi2_format rcar_csi2_formats[] = {
+ { .code = MEDIA_BUS_FMT_RGB888_1X24, .datatype = 0x24, .bpp = 24 },
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .bpp = 16 },
+ { .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = 0x1e, .bpp = 16 },
+ { .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = 0x1e, .bpp = 16 },
+ { .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = 0x1e, .bpp = 20 },
+};
+
+static const struct rcar_csi2_format *rcsi2_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rcar_csi2_formats); i++)
+ if (rcar_csi2_formats[i].code == code)
+ return &rcar_csi2_formats[i];
+
+ return NULL;
+}
+
+enum rcar_csi2_pads {
+ RCAR_CSI2_SINK,
+ RCAR_CSI2_SOURCE_VC0,
+ RCAR_CSI2_SOURCE_VC1,
+ RCAR_CSI2_SOURCE_VC2,
+ RCAR_CSI2_SOURCE_VC3,
+ NR_OF_RCAR_CSI2_PAD,
+};
+
+struct rcar_csi2_info {
+ int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps);
+ int (*confirm_start)(struct rcar_csi2 *priv);
+ const struct rcsi2_mbps_reg *hsfreqrange;
+ unsigned int csi0clkfreqrange;
+ bool clear_ulps;
+};
+
+struct rcar_csi2 {
+ struct device *dev;
+ void __iomem *base;
+ const struct rcar_csi2_info *info;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *remote;
+
+ struct v4l2_mbus_framefmt mf;
+
+ struct mutex lock;
+ int stream_count;
+
+ unsigned short lanes;
+ unsigned char lane_swap[4];
+};
+
+static inline struct rcar_csi2 *sd_to_csi2(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rcar_csi2, subdev);
+}
+
+static inline struct rcar_csi2 *notifier_to_csi2(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct rcar_csi2, notifier);
+}
+
+static u32 rcsi2_read(struct rcar_csi2 *priv, unsigned int reg)
+{
+ return ioread32(priv->base + reg);
+}
+
+static void rcsi2_write(struct rcar_csi2 *priv, unsigned int reg, u32 data)
+{
+ iowrite32(data, priv->base + reg);
+}
+
+static void rcsi2_reset(struct rcar_csi2 *priv)
+{
+ rcsi2_write(priv, SRST_REG, SRST_SRST);
+ usleep_range(100, 150);
+ rcsi2_write(priv, SRST_REG, 0);
+}
+
+static int rcsi2_wait_phy_start(struct rcar_csi2 *priv)
+{
+ unsigned int timeout;
+
+ /* Wait for the clock and data lanes to enter LP-11 state. */
+ for (timeout = 0; timeout <= 20; timeout++) {
+ const u32 lane_mask = (1 << priv->lanes) - 1;
+
+ if ((rcsi2_read(priv, PHCLM_REG) & PHCLM_STOPSTATECKL) &&
+ (rcsi2_read(priv, PHDLM_REG) & lane_mask) == lane_mask)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_err(priv->dev, "Timeout waiting for LP-11 state\n");
+
+ return -ETIMEDOUT;
+}
+
+static int rcsi2_set_phypll(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ const struct rcsi2_mbps_reg *hsfreq;
+ const struct rcsi2_mbps_reg *hsfreq_prev = NULL;
+
+ for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++) {
+ if (hsfreq->mbps >= mbps)
+ break;
+ hsfreq_prev = hsfreq;
+ }
+
+ if (!hsfreq->mbps) {
+ dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps);
+ return -ERANGE;
+ }
+
+ if (hsfreq_prev &&
+ ((mbps - hsfreq_prev->mbps) <= (hsfreq->mbps - mbps)))
+ hsfreq = hsfreq_prev;
+
+ rcsi2_write(priv, PHYPLL_REG, PHYPLL_HSFREQRANGE(hsfreq->reg));
+
+ return 0;
+}
+
+static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp)
+{
+ struct v4l2_subdev *source;
+ struct v4l2_ctrl *ctrl;
+ u64 mbps;
+
+ if (!priv->remote)
+ return -ENODEV;
+
+ source = priv->remote;
+
+ /* Read the pixel rate control from remote. */
+ ctrl = v4l2_ctrl_find(source->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl) {
+ dev_err(priv->dev, "no pixel rate control in subdev %s\n",
+ source->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Calculate the phypll in mbps.
+ * link_freq = (pixel_rate * bits_per_sample) / (2 * nr_of_lanes)
+ * bps = link_freq * 2
+ */
+ mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * bpp;
+ do_div(mbps, priv->lanes * 1000000);
+
+ return mbps;
+}
+
+static int rcsi2_start(struct rcar_csi2 *priv)
+{
+ const struct rcar_csi2_format *format;
+ u32 phycnt, vcdt = 0, vcdt2 = 0;
+ unsigned int i;
+ int mbps, ret;
+
+ dev_dbg(priv->dev, "Input size (%ux%u%c)\n",
+ priv->mf.width, priv->mf.height,
+ priv->mf.field == V4L2_FIELD_NONE ? 'p' : 'i');
+
+ /* Code is validated in set_fmt. */
+ format = rcsi2_code_to_fmt(priv->mf.code);
+ if (!format)
+ return -EINVAL;
+
+ /*
+ * Enable all Virtual Channels.
+ *
+ * NOTE: It's not possible to get individual datatype for each
+ * source virtual channel. Once this is possible in V4L2
+ * it should be used here.
+ */
+ for (i = 0; i < 4; i++) {
+ u32 vcdt_part;
+
+ vcdt_part = VCDT_SEL_VC(i) | VCDT_VCDTN_EN | VCDT_SEL_DTN_ON |
+ VCDT_SEL_DT(format->datatype);
+
+ /* Store in correct reg and offset. */
+ if (i < 2)
+ vcdt |= vcdt_part << ((i % 2) * 16);
+ else
+ vcdt2 |= vcdt_part << ((i % 2) * 16);
+ }
+
+ phycnt = PHYCNT_ENABLECLK;
+ phycnt |= (1 << priv->lanes) - 1;
+
+ mbps = rcsi2_calc_mbps(priv, format->bpp);
+ if (mbps < 0)
+ return mbps;
+
+ /* Init */
+ rcsi2_write(priv, TREF_REG, TREF_TREF);
+ rcsi2_reset(priv);
+ rcsi2_write(priv, PHTC_REG, 0);
+
+ /* Configure */
+ rcsi2_write(priv, FLD_REG, FLD_FLD_NUM(2) | FLD_FLD_EN4 |
+ FLD_FLD_EN3 | FLD_FLD_EN2 | FLD_FLD_EN);
+ rcsi2_write(priv, VCDT_REG, vcdt);
+ rcsi2_write(priv, VCDT2_REG, vcdt2);
+ /* Lanes are zero indexed. */
+ rcsi2_write(priv, LSWAP_REG,
+ LSWAP_L0SEL(priv->lane_swap[0] - 1) |
+ LSWAP_L1SEL(priv->lane_swap[1] - 1) |
+ LSWAP_L2SEL(priv->lane_swap[2] - 1) |
+ LSWAP_L3SEL(priv->lane_swap[3] - 1));
+
+ /* Start */
+ if (priv->info->init_phtw) {
+ ret = priv->info->init_phtw(priv, mbps);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->info->hsfreqrange) {
+ ret = rcsi2_set_phypll(priv, mbps);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->info->csi0clkfreqrange)
+ rcsi2_write(priv, CSI0CLKFCPR_REG,
+ CSI0CLKFREQRANGE(priv->info->csi0clkfreqrange));
+
+ rcsi2_write(priv, PHYCNT_REG, phycnt);
+ rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN |
+ LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP);
+ rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ);
+ rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ | PHYCNT_RSTZ);
+
+ ret = rcsi2_wait_phy_start(priv);
+ if (ret)
+ return ret;
+
+ /* Confirm start */
+ if (priv->info->confirm_start) {
+ ret = priv->info->confirm_start(priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear Ultra Low Power interrupt. */
+ if (priv->info->clear_ulps)
+ rcsi2_write(priv, INTSTATE_REG,
+ INTSTATE_INT_ULPS_START |
+ INTSTATE_INT_ULPS_END);
+ return 0;
+}
+
+static void rcsi2_stop(struct rcar_csi2 *priv)
+{
+ rcsi2_write(priv, PHYCNT_REG, 0);
+
+ rcsi2_reset(priv);
+
+ rcsi2_write(priv, PHTC_REG, PHTC_TESTCLR);
+}
+
+static int rcsi2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+ struct v4l2_subdev *nextsd;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->remote) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ nextsd = priv->remote;
+
+ if (enable && priv->stream_count == 0) {
+ pm_runtime_get_sync(priv->dev);
+
+ ret = rcsi2_start(priv);
+ if (ret) {
+ pm_runtime_put(priv->dev);
+ goto out;
+ }
+
+ ret = v4l2_subdev_call(nextsd, video, s_stream, 1);
+ if (ret) {
+ rcsi2_stop(priv);
+ pm_runtime_put(priv->dev);
+ goto out;
+ }
+ } else if (!enable && priv->stream_count == 1) {
+ rcsi2_stop(priv);
+ v4l2_subdev_call(nextsd, video, s_stream, 0);
+ pm_runtime_put(priv->dev);
+ }
+
+ priv->stream_count += enable ? 1 : -1;
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ if (!rcsi2_code_to_fmt(format->format.code))
+ format->format.code = rcar_csi2_formats[0].code;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ priv->mf = format->format;
+ } else {
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ *framefmt = format->format;
+ }
+
+ return 0;
+}
+
+static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ format->format = priv->mf;
+ else
+ format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops rcar_csi2_video_ops = {
+ .s_stream = rcsi2_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops rcar_csi2_pad_ops = {
+ .set_fmt = rcsi2_set_pad_format,
+ .get_fmt = rcsi2_get_pad_format,
+};
+
+static const struct v4l2_subdev_ops rcar_csi2_subdev_ops = {
+ .video = &rcar_csi2_video_ops,
+ .pad = &rcar_csi2_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Async handling and registration of subdevices and links.
+ */
+
+static int rcsi2_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_csi2 *priv = notifier_to_csi2(notifier);
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(priv->dev, "Failed to find pad for %s\n", subdev->name);
+ return pad;
+ }
+
+ priv->remote = subdev;
+
+ dev_dbg(priv->dev, "Bound %s pad: %d\n", subdev->name, pad);
+
+ return media_create_pad_link(&subdev->entity, pad,
+ &priv->subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void rcsi2_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_csi2 *priv = notifier_to_csi2(notifier);
+
+ priv->remote = NULL;
+
+ dev_dbg(priv->dev, "Unbind %s\n", subdev->name);
+}
+
+static const struct v4l2_async_notifier_operations rcar_csi2_notify_ops = {
+ .bound = rcsi2_notify_bound,
+ .unbind = rcsi2_notify_unbind,
+};
+
+static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ unsigned int i;
+
+ /* Only port 0 endpoint 0 is valid. */
+ if (vep->base.port || vep->base.id)
+ return -ENOTCONN;
+
+ if (vep->bus_type != V4L2_MBUS_CSI2) {
+ dev_err(priv->dev, "Unsupported bus: %u\n", vep->bus_type);
+ return -EINVAL;
+ }
+
+ priv->lanes = vep->bus.mipi_csi2.num_data_lanes;
+ if (priv->lanes != 1 && priv->lanes != 2 && priv->lanes != 4) {
+ dev_err(priv->dev, "Unsupported number of data-lanes: %u\n",
+ priv->lanes);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->lane_swap); i++) {
+ priv->lane_swap[i] = i < priv->lanes ?
+ vep->bus.mipi_csi2.data_lanes[i] : i;
+
+ /* Check for valid lane number. */
+ if (priv->lane_swap[i] < 1 || priv->lane_swap[i] > 4) {
+ dev_err(priv->dev, "data-lanes must be in 1-4 range\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rcsi2_parse_dt(struct rcar_csi2 *priv)
+{
+ struct device_node *ep;
+ struct v4l2_fwnode_endpoint v4l2_ep;
+ int ret;
+
+ ep = of_graph_get_endpoint_by_regs(priv->dev->of_node, 0, 0);
+ if (!ep) {
+ dev_err(priv->dev, "Not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &v4l2_ep);
+ if (ret) {
+ dev_err(priv->dev, "Could not parse v4l2 endpoint\n");
+ of_node_put(ep);
+ return -EINVAL;
+ }
+
+ ret = rcsi2_parse_v4l2(priv, &v4l2_ep);
+ if (ret) {
+ of_node_put(ep);
+ return ret;
+ }
+
+ priv->asd.match.fwnode =
+ fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
+ priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+
+ of_node_put(ep);
+
+ priv->notifier.subdevs = devm_kzalloc(priv->dev,
+ sizeof(*priv->notifier.subdevs),
+ GFP_KERNEL);
+ if (!priv->notifier.subdevs)
+ return -ENOMEM;
+
+ priv->notifier.num_subdevs = 1;
+ priv->notifier.subdevs[0] = &priv->asd;
+ priv->notifier.ops = &rcar_csi2_notify_ops;
+
+ dev_dbg(priv->dev, "Found '%pOF'\n",
+ to_of_node(priv->asd.match.fwnode));
+
+ return v4l2_async_subdev_notifier_register(&priv->subdev,
+ &priv->notifier);
+}
+
+/* -----------------------------------------------------------------------------
+ * PHTW initialization sequences.
+ *
+ * NOTE: Magic values are from the datasheet and lack documentation.
+ */
+
+static int rcsi2_phtw_write(struct rcar_csi2 *priv, u16 data, u16 code)
+{
+ unsigned int timeout;
+
+ rcsi2_write(priv, PHTW_REG,
+ PHTW_DWEN | PHTW_TESTDIN_DATA(data) |
+ PHTW_CWEN | PHTW_TESTDIN_CODE(code));
+
+ /* Wait for DWEN and CWEN to be cleared by hardware. */
+ for (timeout = 0; timeout <= 20; timeout++) {
+ if (!(rcsi2_read(priv, PHTW_REG) & (PHTW_DWEN | PHTW_CWEN)))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_err(priv->dev, "Timeout waiting for PHTW_DWEN and/or PHTW_CWEN\n");
+
+ return -ETIMEDOUT;
+}
+
+static int rcsi2_phtw_write_array(struct rcar_csi2 *priv,
+ const struct phtw_value *values)
+{
+ const struct phtw_value *value;
+ int ret;
+
+ for (value = values; value->data || value->code; value++) {
+ ret = rcsi2_phtw_write(priv, value->data, value->code);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rcsi2_phtw_write_mbps(struct rcar_csi2 *priv, unsigned int mbps,
+ const struct rcsi2_mbps_reg *values, u16 code)
+{
+ const struct rcsi2_mbps_reg *value;
+ const struct rcsi2_mbps_reg *prev_value = NULL;
+
+ for (value = values; value->mbps; value++) {
+ if (value->mbps >= mbps)
+ break;
+ prev_value = value;
+ }
+
+ if (prev_value &&
+ ((mbps - prev_value->mbps) <= (value->mbps - mbps)))
+ value = prev_value;
+
+ if (!value->mbps) {
+ dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps);
+ return -ERANGE;
+ }
+
+ return rcsi2_phtw_write(priv, value->reg, code);
+}
+
+static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ static const struct phtw_value step1[] = {
+ { .data = 0xcc, .code = 0xe2 },
+ { .data = 0x01, .code = 0xe3 },
+ { .data = 0x11, .code = 0xe4 },
+ { .data = 0x01, .code = 0xe5 },
+ { .data = 0x10, .code = 0x04 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step2[] = {
+ { .data = 0x38, .code = 0x08 },
+ { .data = 0x01, .code = 0x00 },
+ { .data = 0x4b, .code = 0xac },
+ { .data = 0x03, .code = 0x00 },
+ { .data = 0x80, .code = 0x07 },
+ { /* sentinel */ },
+ };
+
+ int ret;
+
+ ret = rcsi2_phtw_write_array(priv, step1);
+ if (ret)
+ return ret;
+
+ if (mbps <= 250) {
+ ret = rcsi2_phtw_write(priv, 0x39, 0x05);
+ if (ret)
+ return ret;
+
+ ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_h3_v3h_m3n,
+ 0xf1);
+ if (ret)
+ return ret;
+ }
+
+ return rcsi2_phtw_write_array(priv, step2);
+}
+
+static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
+}
+
+static int rcsi2_confirm_start_v3m_e3(struct rcar_csi2 *priv)
+{
+ static const struct phtw_value step1[] = {
+ { .data = 0xed, .code = 0x34 },
+ { .data = 0xed, .code = 0x44 },
+ { .data = 0xed, .code = 0x54 },
+ { .data = 0xed, .code = 0x84 },
+ { .data = 0xed, .code = 0x94 },
+ { /* sentinel */ },
+ };
+
+ return rcsi2_phtw_write_array(priv, step1);
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver.
+ */
+
+static const struct media_entity_operations rcar_csi2_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int rcsi2_probe_resources(struct rcar_csi2 *priv,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return 0;
+}
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a7795 = {
+ .init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
+ .hsfreqrange = hsfreqrange_h3_v3h_m3n,
+ .csi0clkfreqrange = 0x20,
+ .clear_ulps = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a7795es1 = {
+ .hsfreqrange = hsfreqrange_m3w_h3es1,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a7796 = {
+ .hsfreqrange = hsfreqrange_m3w_h3es1,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {
+ .init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
+ .hsfreqrange = hsfreqrange_h3_v3h_m3n,
+ .csi0clkfreqrange = 0x20,
+ .clear_ulps = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = {
+ .init_phtw = rcsi2_init_phtw_v3m_e3,
+ .confirm_start = rcsi2_confirm_start_v3m_e3,
+};
+
+static const struct of_device_id rcar_csi2_of_table[] = {
+ {
+ .compatible = "renesas,r8a7795-csi2",
+ .data = &rcar_csi2_info_r8a7795,
+ },
+ {
+ .compatible = "renesas,r8a7796-csi2",
+ .data = &rcar_csi2_info_r8a7796,
+ },
+ {
+ .compatible = "renesas,r8a77965-csi2",
+ .data = &rcar_csi2_info_r8a77965,
+ },
+ {
+ .compatible = "renesas,r8a77970-csi2",
+ .data = &rcar_csi2_info_r8a77970,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rcar_csi2_of_table);
+
+static const struct soc_device_attribute r8a7795es1[] = {
+ {
+ .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = &rcar_csi2_info_r8a7795es1,
+ },
+ { /* sentinel */ },
+};
+
+static int rcsi2_probe(struct platform_device *pdev)
+{
+ const struct soc_device_attribute *attr;
+ struct rcar_csi2 *priv;
+ unsigned int i;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->info = of_device_get_match_data(&pdev->dev);
+
+ /*
+ * r8a7795 ES1.x behaves differently than the ES2.0+ but doesn't
+ * have it's own compatible string.
+ */
+ attr = soc_device_match(r8a7795es1);
+ if (attr)
+ priv->info = attr->data;
+
+ priv->dev = &pdev->dev;
+
+ mutex_init(&priv->lock);
+ priv->stream_count = 0;
+
+ ret = rcsi2_probe_resources(priv, pdev);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get resources\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = rcsi2_parse_dt(priv);
+ if (ret)
+ return ret;
+
+ priv->subdev.owner = THIS_MODULE;
+ priv->subdev.dev = &pdev->dev;
+ v4l2_subdev_init(&priv->subdev, &rcar_csi2_subdev_ops);
+ v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
+ snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+ KBUILD_MODNAME, dev_name(&pdev->dev));
+ priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ priv->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ priv->subdev.entity.ops = &rcar_csi2_entity_ops;
+
+ priv->pads[RCAR_CSI2_SINK].flags = MEDIA_PAD_FL_SINK;
+ for (i = RCAR_CSI2_SOURCE_VC0; i < NR_OF_RCAR_CSI2_PAD; i++)
+ priv->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->subdev.entity, NR_OF_RCAR_CSI2_PAD,
+ priv->pads);
+ if (ret)
+ goto error;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = v4l2_async_register_subdev(&priv->subdev);
+ if (ret < 0)
+ goto error;
+
+ dev_info(priv->dev, "%d lanes found\n", priv->lanes);
+
+ return 0;
+
+error:
+ v4l2_async_notifier_unregister(&priv->notifier);
+ v4l2_async_notifier_cleanup(&priv->notifier);
+
+ return ret;
+}
+
+static int rcsi2_remove(struct platform_device *pdev)
+{
+ struct rcar_csi2 *priv = platform_get_drvdata(pdev);
+
+ v4l2_async_notifier_unregister(&priv->notifier);
+ v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_unregister_subdev(&priv->subdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver rcar_csi2_pdrv = {
+ .remove = rcsi2_remove,
+ .probe = rcsi2_probe,
+ .driver = {
+ .name = "rcar-csi2",
+ .of_match_table = rcar_csi2_of_table,
+ },
+};
+
+module_platform_driver(rcar_csi2_pdrv);
+
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_DESCRIPTION("Renesas R-Car MIPI CSI-2 receiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
new file mode 100644
index 000000000..70a8cc433
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "rcar-vin.h"
+
+/* -----------------------------------------------------------------------------
+ * HW Functions
+ */
+
+/* Register offsets for R-Car VIN */
+#define VNMC_REG 0x00 /* Video n Main Control Register */
+#define VNMS_REG 0x04 /* Video n Module Status Register */
+#define VNFC_REG 0x08 /* Video n Frame Capture Register */
+#define VNSLPRC_REG 0x0C /* Video n Start Line Pre-Clip Register */
+#define VNELPRC_REG 0x10 /* Video n End Line Pre-Clip Register */
+#define VNSPPRC_REG 0x14 /* Video n Start Pixel Pre-Clip Register */
+#define VNEPPRC_REG 0x18 /* Video n End Pixel Pre-Clip Register */
+#define VNIS_REG 0x2C /* Video n Image Stride Register */
+#define VNMB_REG(m) (0x30 + ((m) << 2)) /* Video n Memory Base m Register */
+#define VNIE_REG 0x40 /* Video n Interrupt Enable Register */
+#define VNINTS_REG 0x44 /* Video n Interrupt Status Register */
+#define VNSI_REG 0x48 /* Video n Scanline Interrupt Register */
+#define VNMTC_REG 0x4C /* Video n Memory Transfer Control Register */
+#define VNDMR_REG 0x58 /* Video n Data Mode Register */
+#define VNDMR2_REG 0x5C /* Video n Data Mode Register 2 */
+#define VNUVAOF_REG 0x60 /* Video n UV Address Offset Register */
+
+/* Register offsets specific for Gen2 */
+#define VNSLPOC_REG 0x1C /* Video n Start Line Post-Clip Register */
+#define VNELPOC_REG 0x20 /* Video n End Line Post-Clip Register */
+#define VNSPPOC_REG 0x24 /* Video n Start Pixel Post-Clip Register */
+#define VNEPPOC_REG 0x28 /* Video n End Pixel Post-Clip Register */
+#define VNYS_REG 0x50 /* Video n Y Scale Register */
+#define VNXS_REG 0x54 /* Video n X Scale Register */
+#define VNC1A_REG 0x80 /* Video n Coefficient Set C1A Register */
+#define VNC1B_REG 0x84 /* Video n Coefficient Set C1B Register */
+#define VNC1C_REG 0x88 /* Video n Coefficient Set C1C Register */
+#define VNC2A_REG 0x90 /* Video n Coefficient Set C2A Register */
+#define VNC2B_REG 0x94 /* Video n Coefficient Set C2B Register */
+#define VNC2C_REG 0x98 /* Video n Coefficient Set C2C Register */
+#define VNC3A_REG 0xA0 /* Video n Coefficient Set C3A Register */
+#define VNC3B_REG 0xA4 /* Video n Coefficient Set C3B Register */
+#define VNC3C_REG 0xA8 /* Video n Coefficient Set C3C Register */
+#define VNC4A_REG 0xB0 /* Video n Coefficient Set C4A Register */
+#define VNC4B_REG 0xB4 /* Video n Coefficient Set C4B Register */
+#define VNC4C_REG 0xB8 /* Video n Coefficient Set C4C Register */
+#define VNC5A_REG 0xC0 /* Video n Coefficient Set C5A Register */
+#define VNC5B_REG 0xC4 /* Video n Coefficient Set C5B Register */
+#define VNC5C_REG 0xC8 /* Video n Coefficient Set C5C Register */
+#define VNC6A_REG 0xD0 /* Video n Coefficient Set C6A Register */
+#define VNC6B_REG 0xD4 /* Video n Coefficient Set C6B Register */
+#define VNC6C_REG 0xD8 /* Video n Coefficient Set C6C Register */
+#define VNC7A_REG 0xE0 /* Video n Coefficient Set C7A Register */
+#define VNC7B_REG 0xE4 /* Video n Coefficient Set C7B Register */
+#define VNC7C_REG 0xE8 /* Video n Coefficient Set C7C Register */
+#define VNC8A_REG 0xF0 /* Video n Coefficient Set C8A Register */
+#define VNC8B_REG 0xF4 /* Video n Coefficient Set C8B Register */
+#define VNC8C_REG 0xF8 /* Video n Coefficient Set C8C Register */
+
+/* Register offsets specific for Gen3 */
+#define VNCSI_IFMD_REG 0x20 /* Video n CSI2 Interface Mode Register */
+
+/* Register bit fields for R-Car VIN */
+/* Video n Main Control Register bits */
+#define VNMC_DPINE (1 << 27) /* Gen3 specific */
+#define VNMC_SCLE (1 << 26) /* Gen3 specific */
+#define VNMC_FOC (1 << 21)
+#define VNMC_YCAL (1 << 19)
+#define VNMC_INF_YUV8_BT656 (0 << 16)
+#define VNMC_INF_YUV8_BT601 (1 << 16)
+#define VNMC_INF_YUV10_BT656 (2 << 16)
+#define VNMC_INF_YUV10_BT601 (3 << 16)
+#define VNMC_INF_YUV16 (5 << 16)
+#define VNMC_INF_RGB888 (6 << 16)
+#define VNMC_VUP (1 << 10)
+#define VNMC_IM_ODD (0 << 3)
+#define VNMC_IM_ODD_EVEN (1 << 3)
+#define VNMC_IM_EVEN (2 << 3)
+#define VNMC_IM_FULL (3 << 3)
+#define VNMC_BPS (1 << 1)
+#define VNMC_ME (1 << 0)
+
+/* Video n Module Status Register bits */
+#define VNMS_FBS_MASK (3 << 3)
+#define VNMS_FBS_SHIFT 3
+#define VNMS_FS (1 << 2)
+#define VNMS_AV (1 << 1)
+#define VNMS_CA (1 << 0)
+
+/* Video n Frame Capture Register bits */
+#define VNFC_C_FRAME (1 << 1)
+#define VNFC_S_FRAME (1 << 0)
+
+/* Video n Interrupt Enable Register bits */
+#define VNIE_FIE (1 << 4)
+#define VNIE_EFE (1 << 1)
+
+/* Video n Data Mode Register bits */
+#define VNDMR_EXRGB (1 << 8)
+#define VNDMR_BPSM (1 << 4)
+#define VNDMR_DTMD_YCSEP (1 << 1)
+#define VNDMR_DTMD_ARGB1555 (1 << 0)
+
+/* Video n Data Mode Register 2 bits */
+#define VNDMR2_VPS (1 << 30)
+#define VNDMR2_HPS (1 << 29)
+#define VNDMR2_CES (1 << 28)
+#define VNDMR2_FTEV (1 << 17)
+#define VNDMR2_VLV(n) ((n & 0xf) << 12)
+
+/* Video n CSI2 Interface Mode Register (Gen3) */
+#define VNCSI_IFMD_DES1 (1 << 26)
+#define VNCSI_IFMD_DES0 (1 << 25)
+#define VNCSI_IFMD_CSI_CHSEL(n) (((n) & 0xf) << 0)
+#define VNCSI_IFMD_CSI_CHSEL_MASK 0xf
+
+struct rvin_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+#define to_buf_list(vb2_buffer) (&container_of(vb2_buffer, \
+ struct rvin_buffer, \
+ vb)->list)
+
+static void rvin_write(struct rvin_dev *vin, u32 value, u32 offset)
+{
+ iowrite32(value, vin->base + offset);
+}
+
+static u32 rvin_read(struct rvin_dev *vin, u32 offset)
+{
+ return ioread32(vin->base + offset);
+}
+
+/* -----------------------------------------------------------------------------
+ * Crop and Scaling Gen2
+ */
+
+struct vin_coeff {
+ unsigned short xs_value;
+ u32 coeff_set[24];
+};
+
+static const struct vin_coeff vin_coeff_set[] = {
+ { 0x0000, {
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000 },
+ },
+ { 0x1000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x000003f8, 0x00000403, 0x3de0d9f0,
+ 0x001fffed, 0x00000804, 0x3cc1f9c3,
+ 0x001003de, 0x00000c01, 0x3cb34d7f,
+ 0x002003d2, 0x00000c00, 0x3d24a92d,
+ 0x00200bca, 0x00000bff, 0x3df600d2,
+ 0x002013cc, 0x000007ff, 0x3ed70c7e,
+ 0x00100fde, 0x00000000, 0x3f87c036 },
+ },
+ { 0x1200, {
+ 0x002ffff1, 0x002ffff1, 0x02a0a9c8,
+ 0x002003e7, 0x001ffffa, 0x000185bc,
+ 0x002007dc, 0x000003ff, 0x3e52859c,
+ 0x00200bd4, 0x00000002, 0x3d53996b,
+ 0x00100fd0, 0x00000403, 0x3d04ad2d,
+ 0x00000bd5, 0x00000403, 0x3d35ace7,
+ 0x3ff003e4, 0x00000801, 0x3dc674a1,
+ 0x3fffe800, 0x00000800, 0x3e76f461 },
+ },
+ { 0x1400, {
+ 0x00100be3, 0x00100be3, 0x04d1359a,
+ 0x00000fdb, 0x002003ed, 0x0211fd93,
+ 0x00000fd6, 0x002003f4, 0x0002d97b,
+ 0x000007d6, 0x002ffffb, 0x3e93b956,
+ 0x3ff003da, 0x001003ff, 0x3db49926,
+ 0x3fffefe9, 0x00100001, 0x3d655cee,
+ 0x3fffd400, 0x00000003, 0x3d65f4b6,
+ 0x000fb421, 0x00000402, 0x3dc6547e },
+ },
+ { 0x1600, {
+ 0x00000bdd, 0x00000bdd, 0x06519578,
+ 0x3ff007da, 0x00000be3, 0x03c24973,
+ 0x3ff003d9, 0x00000be9, 0x01b30d5f,
+ 0x3ffff7df, 0x001003f1, 0x0003c542,
+ 0x000fdfec, 0x001003f7, 0x3ec4711d,
+ 0x000fc400, 0x002ffffd, 0x3df504f1,
+ 0x001fa81a, 0x002ffc00, 0x3d957cc2,
+ 0x002f8c3c, 0x00100000, 0x3db5c891 },
+ },
+ { 0x1800, {
+ 0x3ff003dc, 0x3ff003dc, 0x0791e558,
+ 0x000ff7dd, 0x3ff007de, 0x05328554,
+ 0x000fe7e3, 0x3ff00be2, 0x03232546,
+ 0x000fd7ee, 0x000007e9, 0x0143bd30,
+ 0x001fb800, 0x000007ee, 0x00044511,
+ 0x002fa015, 0x000007f4, 0x3ef4bcee,
+ 0x002f8832, 0x001003f9, 0x3e4514c7,
+ 0x001f7853, 0x001003fd, 0x3de54c9f },
+ },
+ { 0x1a00, {
+ 0x000fefe0, 0x000fefe0, 0x08721d3c,
+ 0x001fdbe7, 0x000ffbde, 0x0652a139,
+ 0x001fcbf0, 0x000003df, 0x0463292e,
+ 0x002fb3ff, 0x3ff007e3, 0x0293a91d,
+ 0x002f9c12, 0x3ff00be7, 0x01241905,
+ 0x001f8c29, 0x000007ed, 0x3fe470eb,
+ 0x000f7c46, 0x000007f2, 0x3f04b8ca,
+ 0x3fef7865, 0x000007f6, 0x3e74e4a8 },
+ },
+ { 0x1c00, {
+ 0x001fd3e9, 0x001fd3e9, 0x08f23d26,
+ 0x002fbff3, 0x001fe3e4, 0x0712ad23,
+ 0x002fa800, 0x000ff3e0, 0x05631d1b,
+ 0x001f9810, 0x000ffbe1, 0x03b3890d,
+ 0x000f8c23, 0x000003e3, 0x0233e8fa,
+ 0x3fef843b, 0x000003e7, 0x00f430e4,
+ 0x3fbf8456, 0x3ff00bea, 0x00046cc8,
+ 0x3f8f8c72, 0x3ff00bef, 0x3f3490ac },
+ },
+ { 0x1e00, {
+ 0x001fbbf4, 0x001fbbf4, 0x09425112,
+ 0x001fa800, 0x002fc7ed, 0x0792b110,
+ 0x000f980e, 0x001fdbe6, 0x0613110a,
+ 0x3fff8c20, 0x001fe7e3, 0x04a368fd,
+ 0x3fcf8c33, 0x000ff7e2, 0x0343b8ed,
+ 0x3f9f8c4a, 0x000fffe3, 0x0203f8da,
+ 0x3f5f9c61, 0x000003e6, 0x00e428c5,
+ 0x3f1fb07b, 0x000003eb, 0x3fe440af },
+ },
+ { 0x2000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x3fff980c, 0x001fb7f5, 0x0812b0ff,
+ 0x3fdf901c, 0x001fc7ed, 0x06b2fcfa,
+ 0x3faf902d, 0x001fd3e8, 0x055348f1,
+ 0x3f7f983f, 0x001fe3e5, 0x04038ce3,
+ 0x3f3fa454, 0x001fefe3, 0x02e3c8d1,
+ 0x3f0fb86a, 0x001ff7e4, 0x01c3e8c0,
+ 0x3ecfd880, 0x000fffe6, 0x00c404ac },
+ },
+ { 0x2200, {
+ 0x3fdf9c0b, 0x3fdf9c0b, 0x09725cf4,
+ 0x3fbf9818, 0x3fffa400, 0x0842a8f1,
+ 0x3f8f9827, 0x000fb3f7, 0x0702f0ec,
+ 0x3f5fa037, 0x000fc3ef, 0x05d330e4,
+ 0x3f2fac49, 0x001fcfea, 0x04a364d9,
+ 0x3effc05c, 0x001fdbe7, 0x038394ca,
+ 0x3ecfdc6f, 0x001fe7e6, 0x0273b0bb,
+ 0x3ea00083, 0x001fefe6, 0x0183c0a9 },
+ },
+ { 0x2400, {
+ 0x3f9fa014, 0x3f9fa014, 0x098260e6,
+ 0x3f7f9c23, 0x3fcf9c0a, 0x08629ce5,
+ 0x3f4fa431, 0x3fefa400, 0x0742d8e1,
+ 0x3f1fb440, 0x3fffb3f8, 0x062310d9,
+ 0x3eefc850, 0x000fbbf2, 0x050340d0,
+ 0x3ecfe062, 0x000fcbec, 0x041364c2,
+ 0x3ea00073, 0x001fd3ea, 0x03037cb5,
+ 0x3e902086, 0x001fdfe8, 0x022388a5 },
+ },
+ { 0x2600, {
+ 0x3f5fa81e, 0x3f5fa81e, 0x096258da,
+ 0x3f3fac2b, 0x3f8fa412, 0x088290d8,
+ 0x3f0fbc38, 0x3fafa408, 0x0772c8d5,
+ 0x3eefcc47, 0x3fcfa800, 0x0672f4ce,
+ 0x3ecfe456, 0x3fefaffa, 0x05531cc6,
+ 0x3eb00066, 0x3fffbbf3, 0x047334bb,
+ 0x3ea01c77, 0x000fc7ee, 0x039348ae,
+ 0x3ea04486, 0x000fd3eb, 0x02b350a1 },
+ },
+ { 0x2800, {
+ 0x3f2fb426, 0x3f2fb426, 0x094250ce,
+ 0x3f0fc032, 0x3f4fac1b, 0x086284cd,
+ 0x3eefd040, 0x3f7fa811, 0x0782acc9,
+ 0x3ecfe84c, 0x3f9fa807, 0x06a2d8c4,
+ 0x3eb0005b, 0x3fbfac00, 0x05b2f4bc,
+ 0x3eb0186a, 0x3fdfb3fa, 0x04c308b4,
+ 0x3eb04077, 0x3fefbbf4, 0x03f31ca8,
+ 0x3ec06884, 0x000fbff2, 0x03031c9e },
+ },
+ { 0x2a00, {
+ 0x3f0fc42d, 0x3f0fc42d, 0x090240c4,
+ 0x3eefd439, 0x3f2fb822, 0x08526cc2,
+ 0x3edfe845, 0x3f4fb018, 0x078294bf,
+ 0x3ec00051, 0x3f6fac0f, 0x06b2b4bb,
+ 0x3ec0185f, 0x3f8fac07, 0x05e2ccb4,
+ 0x3ec0386b, 0x3fafac00, 0x0502e8ac,
+ 0x3ed05c77, 0x3fcfb3fb, 0x0432f0a3,
+ 0x3ef08482, 0x3fdfbbf6, 0x0372f898 },
+ },
+ { 0x2c00, {
+ 0x3eefdc31, 0x3eefdc31, 0x08e238b8,
+ 0x3edfec3d, 0x3f0fc828, 0x082258b9,
+ 0x3ed00049, 0x3f1fc01e, 0x077278b6,
+ 0x3ed01455, 0x3f3fb815, 0x06c294b2,
+ 0x3ed03460, 0x3f5fb40d, 0x0602acac,
+ 0x3ef0506c, 0x3f7fb006, 0x0542c0a4,
+ 0x3f107476, 0x3f9fb400, 0x0472c89d,
+ 0x3f309c80, 0x3fbfb7fc, 0x03b2cc94 },
+ },
+ { 0x2e00, {
+ 0x3eefec37, 0x3eefec37, 0x088220b0,
+ 0x3ee00041, 0x3effdc2d, 0x07f244ae,
+ 0x3ee0144c, 0x3f0fd023, 0x07625cad,
+ 0x3ef02c57, 0x3f1fc81a, 0x06c274a9,
+ 0x3f004861, 0x3f3fbc13, 0x060288a6,
+ 0x3f20686b, 0x3f5fb80c, 0x05529c9e,
+ 0x3f408c74, 0x3f6fb805, 0x04b2ac96,
+ 0x3f80ac7e, 0x3f8fb800, 0x0402ac8e },
+ },
+ { 0x3000, {
+ 0x3ef0003a, 0x3ef0003a, 0x084210a6,
+ 0x3ef01045, 0x3effec32, 0x07b228a7,
+ 0x3f00284e, 0x3f0fdc29, 0x073244a4,
+ 0x3f104058, 0x3f0fd420, 0x06a258a2,
+ 0x3f305c62, 0x3f2fc818, 0x0612689d,
+ 0x3f508069, 0x3f3fc011, 0x05728496,
+ 0x3f80a072, 0x3f4fc00a, 0x04d28c90,
+ 0x3fc0c07b, 0x3f6fbc04, 0x04429088 },
+ },
+ { 0x3200, {
+ 0x3f00103e, 0x3f00103e, 0x07f1fc9e,
+ 0x3f102447, 0x3f000035, 0x0782149d,
+ 0x3f203c4f, 0x3f0ff02c, 0x07122c9c,
+ 0x3f405458, 0x3f0fe424, 0x06924099,
+ 0x3f607061, 0x3f1fd41d, 0x06024c97,
+ 0x3f909068, 0x3f2fcc16, 0x05726490,
+ 0x3fc0b070, 0x3f3fc80f, 0x04f26c8a,
+ 0x0000d077, 0x3f4fc409, 0x04627484 },
+ },
+ { 0x3400, {
+ 0x3f202040, 0x3f202040, 0x07a1e898,
+ 0x3f303449, 0x3f100c38, 0x0741fc98,
+ 0x3f504c50, 0x3f10002f, 0x06e21495,
+ 0x3f706459, 0x3f1ff028, 0x06722492,
+ 0x3fa08060, 0x3f1fe421, 0x05f2348f,
+ 0x3fd09c67, 0x3f1fdc19, 0x05824c89,
+ 0x0000bc6e, 0x3f2fd014, 0x04f25086,
+ 0x0040dc74, 0x3f3fcc0d, 0x04825c7f },
+ },
+ { 0x3600, {
+ 0x3f403042, 0x3f403042, 0x0761d890,
+ 0x3f504848, 0x3f301c3b, 0x0701f090,
+ 0x3f805c50, 0x3f200c33, 0x06a2008f,
+ 0x3fa07458, 0x3f10002b, 0x06520c8d,
+ 0x3fd0905e, 0x3f1ff424, 0x05e22089,
+ 0x0000ac65, 0x3f1fe81d, 0x05823483,
+ 0x0030cc6a, 0x3f2fdc18, 0x04f23c81,
+ 0x0080e871, 0x3f2fd412, 0x0482407c },
+ },
+ { 0x3800, {
+ 0x3f604043, 0x3f604043, 0x0721c88a,
+ 0x3f80544a, 0x3f502c3c, 0x06d1d88a,
+ 0x3fb06851, 0x3f301c35, 0x0681e889,
+ 0x3fd08456, 0x3f30082f, 0x0611fc88,
+ 0x00009c5d, 0x3f200027, 0x05d20884,
+ 0x0030b863, 0x3f2ff421, 0x05621880,
+ 0x0070d468, 0x3f2fe81b, 0x0502247c,
+ 0x00c0ec6f, 0x3f2fe015, 0x04a22877 },
+ },
+ { 0x3a00, {
+ 0x3f904c44, 0x3f904c44, 0x06e1b884,
+ 0x3fb0604a, 0x3f70383e, 0x0691c885,
+ 0x3fe07451, 0x3f502c36, 0x0661d483,
+ 0x00009055, 0x3f401831, 0x0601ec81,
+ 0x0030a85b, 0x3f300c2a, 0x05b1f480,
+ 0x0070c061, 0x3f300024, 0x0562047a,
+ 0x00b0d867, 0x3f3ff41e, 0x05020c77,
+ 0x00f0f46b, 0x3f2fec19, 0x04a21474 },
+ },
+ { 0x3c00, {
+ 0x3fb05c43, 0x3fb05c43, 0x06c1b07e,
+ 0x3fe06c4b, 0x3f902c3f, 0x0681c081,
+ 0x0000844f, 0x3f703838, 0x0631cc7d,
+ 0x00309855, 0x3f602433, 0x05d1d47e,
+ 0x0060b459, 0x3f50142e, 0x0581e47b,
+ 0x00a0c85f, 0x3f400828, 0x0531f078,
+ 0x00e0e064, 0x3f300021, 0x0501fc73,
+ 0x00b0fc6a, 0x3f3ff41d, 0x04a20873 },
+ },
+ { 0x3e00, {
+ 0x3fe06444, 0x3fe06444, 0x0681a07a,
+ 0x00007849, 0x3fc0503f, 0x0641b07a,
+ 0x0020904d, 0x3fa0403a, 0x05f1c07a,
+ 0x0060a453, 0x3f803034, 0x05c1c878,
+ 0x0090b858, 0x3f70202f, 0x0571d477,
+ 0x00d0d05d, 0x3f501829, 0x0531e073,
+ 0x0110e462, 0x3f500825, 0x04e1e471,
+ 0x01510065, 0x3f40001f, 0x04a1f06d },
+ },
+ { 0x4000, {
+ 0x00007044, 0x00007044, 0x06519476,
+ 0x00208448, 0x3fe05c3f, 0x0621a476,
+ 0x0050984d, 0x3fc04c3a, 0x05e1b075,
+ 0x0080ac52, 0x3fa03c35, 0x05a1b875,
+ 0x00c0c056, 0x3f803030, 0x0561c473,
+ 0x0100d45b, 0x3f70202b, 0x0521d46f,
+ 0x0140e860, 0x3f601427, 0x04d1d46e,
+ 0x01810064, 0x3f500822, 0x0491dc6b },
+ },
+ { 0x5000, {
+ 0x0110a442, 0x0110a442, 0x0551545e,
+ 0x0140b045, 0x00e0983f, 0x0531585f,
+ 0x0160c047, 0x00c08c3c, 0x0511645e,
+ 0x0190cc4a, 0x00908039, 0x04f1685f,
+ 0x01c0dc4c, 0x00707436, 0x04d1705e,
+ 0x0200e850, 0x00506833, 0x04b1785b,
+ 0x0230f453, 0x00305c30, 0x0491805a,
+ 0x02710056, 0x0010542d, 0x04718059 },
+ },
+ { 0x6000, {
+ 0x01c0bc40, 0x01c0bc40, 0x04c13052,
+ 0x01e0c841, 0x01a0b43d, 0x04c13851,
+ 0x0210cc44, 0x0180a83c, 0x04a13453,
+ 0x0230d845, 0x0160a03a, 0x04913c52,
+ 0x0260e047, 0x01409838, 0x04714052,
+ 0x0280ec49, 0x01208c37, 0x04514c50,
+ 0x02b0f44b, 0x01008435, 0x04414c50,
+ 0x02d1004c, 0x00e07c33, 0x0431544f },
+ },
+ { 0x7000, {
+ 0x0230c83e, 0x0230c83e, 0x04711c4c,
+ 0x0250d03f, 0x0210c43c, 0x0471204b,
+ 0x0270d840, 0x0200b83c, 0x0451244b,
+ 0x0290dc42, 0x01e0b43a, 0x0441244c,
+ 0x02b0e443, 0x01c0b038, 0x0441284b,
+ 0x02d0ec44, 0x01b0a438, 0x0421304a,
+ 0x02f0f445, 0x0190a036, 0x04213449,
+ 0x0310f847, 0x01709c34, 0x04213848 },
+ },
+ { 0x8000, {
+ 0x0280d03d, 0x0280d03d, 0x04310c48,
+ 0x02a0d43e, 0x0270c83c, 0x04311047,
+ 0x02b0dc3e, 0x0250c83a, 0x04311447,
+ 0x02d0e040, 0x0240c03a, 0x04211446,
+ 0x02e0e840, 0x0220bc39, 0x04111847,
+ 0x0300e842, 0x0210b438, 0x04012445,
+ 0x0310f043, 0x0200b037, 0x04012045,
+ 0x0330f444, 0x01e0ac36, 0x03f12445 },
+ },
+ { 0xefff, {
+ 0x0340dc3a, 0x0340dc3a, 0x03b0ec40,
+ 0x0340e03a, 0x0330e039, 0x03c0f03e,
+ 0x0350e03b, 0x0330dc39, 0x03c0ec3e,
+ 0x0350e43a, 0x0320dc38, 0x03c0f43e,
+ 0x0360e43b, 0x0320d839, 0x03b0f03e,
+ 0x0360e83b, 0x0310d838, 0x03c0fc3b,
+ 0x0370e83b, 0x0310d439, 0x03a0f83d,
+ 0x0370e83c, 0x0300d438, 0x03b0fc3c },
+ }
+};
+
+static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs)
+{
+ int i;
+ const struct vin_coeff *p_prev_set = NULL;
+ const struct vin_coeff *p_set = NULL;
+
+ /* Look for suitable coefficient values */
+ for (i = 0; i < ARRAY_SIZE(vin_coeff_set); i++) {
+ p_prev_set = p_set;
+ p_set = &vin_coeff_set[i];
+
+ if (xs < p_set->xs_value)
+ break;
+ }
+
+ /* Use previous value if its XS value is closer */
+ if (p_prev_set && p_set &&
+ xs - p_prev_set->xs_value < p_set->xs_value - xs)
+ p_set = p_prev_set;
+
+ /* Set coefficient registers */
+ rvin_write(vin, p_set->coeff_set[0], VNC1A_REG);
+ rvin_write(vin, p_set->coeff_set[1], VNC1B_REG);
+ rvin_write(vin, p_set->coeff_set[2], VNC1C_REG);
+
+ rvin_write(vin, p_set->coeff_set[3], VNC2A_REG);
+ rvin_write(vin, p_set->coeff_set[4], VNC2B_REG);
+ rvin_write(vin, p_set->coeff_set[5], VNC2C_REG);
+
+ rvin_write(vin, p_set->coeff_set[6], VNC3A_REG);
+ rvin_write(vin, p_set->coeff_set[7], VNC3B_REG);
+ rvin_write(vin, p_set->coeff_set[8], VNC3C_REG);
+
+ rvin_write(vin, p_set->coeff_set[9], VNC4A_REG);
+ rvin_write(vin, p_set->coeff_set[10], VNC4B_REG);
+ rvin_write(vin, p_set->coeff_set[11], VNC4C_REG);
+
+ rvin_write(vin, p_set->coeff_set[12], VNC5A_REG);
+ rvin_write(vin, p_set->coeff_set[13], VNC5B_REG);
+ rvin_write(vin, p_set->coeff_set[14], VNC5C_REG);
+
+ rvin_write(vin, p_set->coeff_set[15], VNC6A_REG);
+ rvin_write(vin, p_set->coeff_set[16], VNC6B_REG);
+ rvin_write(vin, p_set->coeff_set[17], VNC6C_REG);
+
+ rvin_write(vin, p_set->coeff_set[18], VNC7A_REG);
+ rvin_write(vin, p_set->coeff_set[19], VNC7B_REG);
+ rvin_write(vin, p_set->coeff_set[20], VNC7C_REG);
+
+ rvin_write(vin, p_set->coeff_set[21], VNC8A_REG);
+ rvin_write(vin, p_set->coeff_set[22], VNC8B_REG);
+ rvin_write(vin, p_set->coeff_set[23], VNC8C_REG);
+}
+
+static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
+{
+ u32 xs, ys;
+
+ /* Set scaling coefficient */
+ ys = 0;
+ if (vin->crop.height != vin->compose.height)
+ ys = (4096 * vin->crop.height) / vin->compose.height;
+ rvin_write(vin, ys, VNYS_REG);
+
+ xs = 0;
+ if (vin->crop.width != vin->compose.width)
+ xs = (4096 * vin->crop.width) / vin->compose.width;
+
+ /* Horizontal upscaling is up to double size */
+ if (xs > 0 && xs < 2048)
+ xs = 2048;
+
+ rvin_write(vin, xs, VNXS_REG);
+
+ /* Horizontal upscaling is done out by scaling down from double size */
+ if (xs < 4096)
+ xs *= 2;
+
+ rvin_set_coeff(vin, xs);
+
+ /* Set Start/End Pixel/Line Post-Clip */
+ rvin_write(vin, 0, VNSPPOC_REG);
+ rvin_write(vin, 0, VNSLPOC_REG);
+ rvin_write(vin, vin->format.width - 1, VNEPPOC_REG);
+ switch (vin->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG);
+ break;
+ default:
+ rvin_write(vin, vin->format.height - 1, VNELPOC_REG);
+ break;
+ }
+
+ vin_dbg(vin,
+ "Pre-Clip: %ux%u@%u:%u YS: %d XS: %d Post-Clip: %ux%u@%u:%u\n",
+ vin->crop.width, vin->crop.height, vin->crop.left,
+ vin->crop.top, ys, xs, vin->format.width, vin->format.height,
+ 0, 0);
+}
+
+void rvin_crop_scale_comp(struct rvin_dev *vin)
+{
+ /* Set Start/End Pixel/Line Pre-Clip */
+ rvin_write(vin, vin->crop.left, VNSPPRC_REG);
+ rvin_write(vin, vin->crop.left + vin->crop.width - 1, VNEPPRC_REG);
+
+ switch (vin->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ rvin_write(vin, vin->crop.top / 2, VNSLPRC_REG);
+ rvin_write(vin, (vin->crop.top + vin->crop.height) / 2 - 1,
+ VNELPRC_REG);
+ break;
+ default:
+ rvin_write(vin, vin->crop.top, VNSLPRC_REG);
+ rvin_write(vin, vin->crop.top + vin->crop.height - 1,
+ VNELPRC_REG);
+ break;
+ }
+
+ /* TODO: Add support for the UDS scaler. */
+ if (vin->info->model != RCAR_GEN3)
+ rvin_crop_scale_comp_gen2(vin);
+
+ if (vin->format.pixelformat == V4L2_PIX_FMT_NV16)
+ rvin_write(vin, ALIGN(vin->format.width, 0x20), VNIS_REG);
+ else
+ rvin_write(vin, ALIGN(vin->format.width, 0x10), VNIS_REG);
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware setup
+ */
+
+static int rvin_setup(struct rvin_dev *vin)
+{
+ u32 vnmc, dmr, dmr2, interrupts;
+ bool progressive = false, output_is_yuv = false, input_is_yuv = false;
+
+ switch (vin->format.field) {
+ case V4L2_FIELD_TOP:
+ vnmc = VNMC_IM_ODD;
+ break;
+ case V4L2_FIELD_BOTTOM:
+ vnmc = VNMC_IM_EVEN;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ /* Default to TB */
+ vnmc = VNMC_IM_FULL;
+ /* Use BT if video standard can be read and is 60 Hz format */
+ if (!vin->info->use_mc && vin->std & V4L2_STD_525_60)
+ vnmc = VNMC_IM_FULL | VNMC_FOC;
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ vnmc = VNMC_IM_FULL;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ vnmc = VNMC_IM_FULL | VNMC_FOC;
+ break;
+ case V4L2_FIELD_NONE:
+ vnmc = VNMC_IM_ODD_EVEN;
+ progressive = true;
+ break;
+ default:
+ vnmc = VNMC_IM_ODD;
+ break;
+ }
+
+ /*
+ * Input interface
+ */
+ switch (vin->mbus_code) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ /* BT.601/BT.1358 16bit YCbCr422 */
+ vnmc |= VNMC_INF_YUV16;
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ vnmc |= VNMC_INF_YUV16 | VNMC_YCAL;
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ /* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
+ if (!vin->is_csi &&
+ vin->parallel->mbus_type == V4L2_MBUS_BT656)
+ vnmc |= VNMC_INF_YUV8_BT656;
+ else
+ vnmc |= VNMC_INF_YUV8_BT601;
+
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ vnmc |= VNMC_INF_RGB888;
+ break;
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ /* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
+ if (!vin->is_csi &&
+ vin->parallel->mbus_type == V4L2_MBUS_BT656)
+ vnmc |= VNMC_INF_YUV10_BT656;
+ else
+ vnmc |= VNMC_INF_YUV10_BT601;
+
+ input_is_yuv = true;
+ break;
+ default:
+ break;
+ }
+
+ /* Enable VSYNC Field Toogle mode after one VSYNC input */
+ if (vin->info->model == RCAR_GEN3)
+ dmr2 = VNDMR2_FTEV;
+ else
+ dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
+
+ if (!vin->is_csi) {
+ /* Hsync Signal Polarity Select */
+ if (!(vin->parallel->mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_HPS;
+
+ /* Vsync Signal Polarity Select */
+ if (!(vin->parallel->mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_VPS;
+
+ /* Data Enable Polarity Select */
+ if (vin->parallel->mbus_flags & V4L2_MBUS_DATA_ENABLE_LOW)
+ dmr2 |= VNDMR2_CES;
+ }
+
+ /*
+ * Output format
+ */
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV16:
+ rvin_write(vin,
+ ALIGN(vin->format.width * vin->format.height, 0x80),
+ VNUVAOF_REG);
+ dmr = VNDMR_DTMD_YCSEP;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ dmr = VNDMR_BPSM;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ dmr = 0;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_XRGB555:
+ dmr = VNDMR_DTMD_ARGB1555;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ dmr = 0;
+ break;
+ case V4L2_PIX_FMT_XBGR32:
+ /* Note: not supported on M1 */
+ dmr = VNDMR_EXRGB;
+ break;
+ default:
+ vin_err(vin, "Invalid pixelformat (0x%x)\n",
+ vin->format.pixelformat);
+ return -EINVAL;
+ }
+
+ /* Always update on field change */
+ vnmc |= VNMC_VUP;
+
+ /* If input and output use the same colorspace, use bypass mode */
+ if (input_is_yuv == output_is_yuv)
+ vnmc |= VNMC_BPS;
+
+ if (vin->info->model == RCAR_GEN3) {
+ /* Select between CSI-2 and parallel input */
+ if (vin->is_csi)
+ vnmc &= ~VNMC_DPINE;
+ else
+ vnmc |= VNMC_DPINE;
+ }
+
+ /* Progressive or interlaced mode */
+ interrupts = progressive ? VNIE_FIE : VNIE_EFE;
+
+ /* Ack interrupts */
+ rvin_write(vin, interrupts, VNINTS_REG);
+ /* Enable interrupts */
+ rvin_write(vin, interrupts, VNIE_REG);
+ /* Start capturing */
+ rvin_write(vin, dmr, VNDMR_REG);
+ rvin_write(vin, dmr2, VNDMR2_REG);
+
+ /* Enable module */
+ rvin_write(vin, vnmc | VNMC_ME, VNMC_REG);
+
+ return 0;
+}
+
+static void rvin_disable_interrupts(struct rvin_dev *vin)
+{
+ rvin_write(vin, 0, VNIE_REG);
+}
+
+static u32 rvin_get_interrupt_status(struct rvin_dev *vin)
+{
+ return rvin_read(vin, VNINTS_REG);
+}
+
+static void rvin_ack_interrupt(struct rvin_dev *vin)
+{
+ rvin_write(vin, rvin_read(vin, VNINTS_REG), VNINTS_REG);
+}
+
+static bool rvin_capture_active(struct rvin_dev *vin)
+{
+ return rvin_read(vin, VNMS_REG) & VNMS_CA;
+}
+
+static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
+{
+ const struct rvin_video_format *fmt;
+ int offsetx, offsety;
+ dma_addr_t offset;
+
+ fmt = rvin_format_from_pixel(vin->format.pixelformat);
+
+ /*
+ * There is no HW support for composition do the beast we can
+ * by modifying the buffer offset
+ */
+ offsetx = vin->compose.left * fmt->bpp;
+ offsety = vin->compose.top * vin->format.bytesperline;
+ offset = addr + offsetx + offsety;
+
+ /*
+ * The address needs to be 128 bytes aligned. Driver should never accept
+ * settings that do not satisfy this in the first place...
+ */
+ if (WARN_ON((offsetx | offsety | offset) & HW_BUFFER_MASK))
+ return;
+
+ rvin_write(vin, offset, VNMB_REG(slot));
+}
+
+/*
+ * Moves a buffer from the queue to the HW slot. If no buffer is
+ * available use the scratch buffer. The scratch buffer is never
+ * returned to userspace, its only function is to enable the capture
+ * loop to keep running.
+ */
+static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
+{
+ struct rvin_buffer *buf;
+ struct vb2_v4l2_buffer *vbuf;
+ dma_addr_t phys_addr;
+
+ /* A already populated slot shall never be overwritten. */
+ if (WARN_ON(vin->queue_buf[slot] != NULL))
+ return;
+
+ vin_dbg(vin, "Filling HW slot: %d\n", slot);
+
+ if (list_empty(&vin->buf_list)) {
+ vin->queue_buf[slot] = NULL;
+ phys_addr = vin->scratch_phys;
+ } else {
+ /* Keep track of buffer we give to HW */
+ buf = list_entry(vin->buf_list.next, struct rvin_buffer, list);
+ vbuf = &buf->vb;
+ list_del_init(to_buf_list(vbuf));
+ vin->queue_buf[slot] = vbuf;
+
+ /* Setup DMA */
+ phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ }
+
+ rvin_set_slot_addr(vin, slot, phys_addr);
+}
+
+static int rvin_capture_start(struct rvin_dev *vin)
+{
+ int slot, ret;
+
+ for (slot = 0; slot < HW_BUFFER_NUM; slot++)
+ rvin_fill_hw_slot(vin, slot);
+
+ rvin_crop_scale_comp(vin);
+
+ ret = rvin_setup(vin);
+ if (ret)
+ return ret;
+
+ vin_dbg(vin, "Starting to capture\n");
+
+ /* Continuous Frame Capture Mode */
+ rvin_write(vin, VNFC_C_FRAME, VNFC_REG);
+
+ vin->state = STARTING;
+
+ return 0;
+}
+
+static void rvin_capture_stop(struct rvin_dev *vin)
+{
+ /* Set continuous & single transfer off */
+ rvin_write(vin, 0, VNFC_REG);
+
+ /* Disable module */
+ rvin_write(vin, rvin_read(vin, VNMC_REG) & ~VNMC_ME, VNMC_REG);
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA Functions
+ */
+
+#define RVIN_TIMEOUT_MS 100
+#define RVIN_RETRIES 10
+
+static irqreturn_t rvin_irq(int irq, void *data)
+{
+ struct rvin_dev *vin = data;
+ u32 int_status, vnms;
+ int slot;
+ unsigned int handled = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ int_status = rvin_get_interrupt_status(vin);
+ if (!int_status)
+ goto done;
+
+ rvin_ack_interrupt(vin);
+ handled = 1;
+
+ /* Nothing to do if capture status is 'STOPPED' */
+ if (vin->state == STOPPED) {
+ vin_dbg(vin, "IRQ while state stopped\n");
+ goto done;
+ }
+
+ /* Nothing to do if capture status is 'STOPPING' */
+ if (vin->state == STOPPING) {
+ vin_dbg(vin, "IRQ while state stopping\n");
+ goto done;
+ }
+
+ /* Prepare for capture and update state */
+ vnms = rvin_read(vin, VNMS_REG);
+ slot = (vnms & VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
+
+ /*
+ * To hand buffers back in a known order to userspace start
+ * to capture first from slot 0.
+ */
+ if (vin->state == STARTING) {
+ if (slot != 0) {
+ vin_dbg(vin, "Starting sync slot: %d\n", slot);
+ goto done;
+ }
+
+ vin_dbg(vin, "Capture start synced!\n");
+ vin->state = RUNNING;
+ }
+
+ /* Capture frame */
+ if (vin->queue_buf[slot]) {
+ vin->queue_buf[slot]->field = vin->format.field;
+ vin->queue_buf[slot]->sequence = vin->sequence;
+ vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ vin->queue_buf[slot] = NULL;
+ } else {
+ /* Scratch buffer was used, dropping frame. */
+ vin_dbg(vin, "Dropping frame %u\n", vin->sequence);
+ }
+
+ vin->sequence++;
+
+ /* Prepare for next frame */
+ rvin_fill_hw_slot(vin, slot);
+done:
+ spin_unlock_irqrestore(&vin->qlock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+/* Need to hold qlock before calling */
+static void return_all_buffers(struct rvin_dev *vin,
+ enum vb2_buffer_state state)
+{
+ struct rvin_buffer *buf, *node;
+ int i;
+
+ for (i = 0; i < HW_BUFFER_NUM; i++) {
+ if (vin->queue_buf[i]) {
+ vb2_buffer_done(&vin->queue_buf[i]->vb2_buf,
+ state);
+ vin->queue_buf[i] = NULL;
+ }
+ }
+
+ list_for_each_entry_safe(buf, node, &vin->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+}
+
+static int rvin_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vq);
+
+ /* Make sure the image size is large enough. */
+ if (*nplanes)
+ return sizes[0] < vin->format.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = vin->format.sizeimage;
+
+ return 0;
+};
+
+static int rvin_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = vin->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ vin_err(vin, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void rvin_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rvin_dev *vin = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ list_add_tail(to_buf_list(vbuf), &vin->buf_list);
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+}
+
+static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
+ struct media_pad *pad)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmt.pad = pad->index;
+ if (v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt))
+ return -EPIPE;
+
+ switch (fmt.format.code) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ vin->mbus_code = fmt.format.code;
+ break;
+ default:
+ return -EPIPE;
+ }
+
+ switch (fmt.format.field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ /* Supported natively */
+ break;
+ case V4L2_FIELD_ALTERNATE:
+ switch (vin->format.field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_NONE:
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ /* Use VIN hardware to combine the two fields */
+ fmt.format.height *= 2;
+ break;
+ default:
+ return -EPIPE;
+ }
+ break;
+ default:
+ return -EPIPE;
+ }
+
+ if (fmt.format.width != vin->format.width ||
+ fmt.format.height != vin->format.height ||
+ fmt.format.code != vin->mbus_code)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int rvin_set_stream(struct rvin_dev *vin, int on)
+{
+ struct media_pipeline *pipe;
+ struct media_device *mdev;
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+ int ret;
+
+ /* No media controller used, simply pass operation to subdevice. */
+ if (!vin->info->use_mc) {
+ ret = v4l2_subdev_call(vin->parallel->subdev, video, s_stream,
+ on);
+
+ return ret == -ENOIOCTLCMD ? 0 : ret;
+ }
+
+ pad = media_entity_remote_pad(&vin->pad);
+ if (!pad)
+ return -EPIPE;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (!on) {
+ media_pipeline_stop(&vin->vdev.entity);
+ return v4l2_subdev_call(sd, video, s_stream, 0);
+ }
+
+ ret = rvin_mc_validate_format(vin, sd, pad);
+ if (ret)
+ return ret;
+
+ /*
+ * The graph lock needs to be taken to protect concurrent
+ * starts of multiple VIN instances as they might share
+ * a common subdevice down the line and then should use
+ * the same pipe.
+ */
+ mdev = vin->vdev.entity.graph_obj.mdev;
+ mutex_lock(&mdev->graph_mutex);
+ pipe = sd->entity.pipe ? sd->entity.pipe : &vin->vdev.pipe;
+ ret = __media_pipeline_start(&vin->vdev.entity, pipe);
+ mutex_unlock(&mdev->graph_mutex);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ if (ret)
+ media_pipeline_stop(&vin->vdev.entity);
+
+ return ret;
+}
+
+static int rvin_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vq);
+ unsigned long flags;
+ int ret;
+
+ /* Allocate scratch buffer. */
+ vin->scratch = dma_alloc_coherent(vin->dev, vin->format.sizeimage,
+ &vin->scratch_phys, GFP_KERNEL);
+ if (!vin->scratch) {
+ spin_lock_irqsave(&vin->qlock, flags);
+ return_all_buffers(vin, VB2_BUF_STATE_QUEUED);
+ spin_unlock_irqrestore(&vin->qlock, flags);
+ vin_err(vin, "Failed to allocate scratch buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = rvin_set_stream(vin, 1);
+ if (ret) {
+ spin_lock_irqsave(&vin->qlock, flags);
+ return_all_buffers(vin, VB2_BUF_STATE_QUEUED);
+ spin_unlock_irqrestore(&vin->qlock, flags);
+ goto out;
+ }
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ vin->sequence = 0;
+
+ ret = rvin_capture_start(vin);
+ if (ret) {
+ return_all_buffers(vin, VB2_BUF_STATE_QUEUED);
+ rvin_set_stream(vin, 0);
+ }
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+out:
+ if (ret)
+ dma_free_coherent(vin->dev, vin->format.sizeimage, vin->scratch,
+ vin->scratch_phys);
+
+ return ret;
+}
+
+static void rvin_stop_streaming(struct vb2_queue *vq)
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vq);
+ unsigned long flags;
+ int retries = 0;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ vin->state = STOPPING;
+
+ /* Wait for streaming to stop */
+ while (retries++ < RVIN_RETRIES) {
+
+ rvin_capture_stop(vin);
+
+ /* Check if HW is stopped */
+ if (!rvin_capture_active(vin)) {
+ vin->state = STOPPED;
+ break;
+ }
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+ msleep(RVIN_TIMEOUT_MS);
+ spin_lock_irqsave(&vin->qlock, flags);
+ }
+
+ if (vin->state != STOPPED) {
+ /*
+ * If this happens something have gone horribly wrong.
+ * Set state to stopped to prevent the interrupt handler
+ * to make things worse...
+ */
+ vin_err(vin, "Failed stop HW, something is seriously broken\n");
+ vin->state = STOPPED;
+ }
+
+ /* Release all active buffers */
+ return_all_buffers(vin, VB2_BUF_STATE_ERROR);
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+
+ rvin_set_stream(vin, 0);
+
+ /* disable interrupts */
+ rvin_disable_interrupts(vin);
+
+ /* Free scratch buffer. */
+ dma_free_coherent(vin->dev, vin->format.sizeimage, vin->scratch,
+ vin->scratch_phys);
+}
+
+static const struct vb2_ops rvin_qops = {
+ .queue_setup = rvin_queue_setup,
+ .buf_prepare = rvin_buffer_prepare,
+ .buf_queue = rvin_buffer_queue,
+ .start_streaming = rvin_start_streaming,
+ .stop_streaming = rvin_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+void rvin_dma_unregister(struct rvin_dev *vin)
+{
+ mutex_destroy(&vin->lock);
+
+ v4l2_device_unregister(&vin->v4l2_dev);
+}
+
+int rvin_dma_register(struct rvin_dev *vin, int irq)
+{
+ struct vb2_queue *q = &vin->queue;
+ int i, ret;
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(vin->dev, &vin->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&vin->lock);
+ INIT_LIST_HEAD(&vin->buf_list);
+
+ spin_lock_init(&vin->qlock);
+
+ vin->state = STOPPED;
+
+ for (i = 0; i < HW_BUFFER_NUM; i++)
+ vin->queue_buf[i] = NULL;
+
+ /* buffer queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ q->lock = &vin->lock;
+ q->drv_priv = vin;
+ q->buf_struct_size = sizeof(struct rvin_buffer);
+ q->ops = &rvin_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 4;
+ q->dev = vin->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ vin_err(vin, "failed to initialize VB2 queue\n");
+ goto error;
+ }
+
+ /* irq */
+ ret = devm_request_irq(vin->dev, irq, rvin_irq, IRQF_SHARED,
+ KBUILD_MODNAME, vin);
+ if (ret) {
+ vin_err(vin, "failed to request irq\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ rvin_dma_unregister(vin);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Gen3 CHSEL manipulation
+ */
+
+/*
+ * There is no need to have locking around changing the routing
+ * as it's only possible to do so when no VIN in the group is
+ * streaming so nothing can race with the VNMC register.
+ */
+int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
+{
+ u32 ifmd, vnmc;
+ int ret;
+
+ ret = pm_runtime_get_sync(vin->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(vin->dev);
+ return ret;
+ }
+
+ /* Make register writes take effect immediately. */
+ vnmc = rvin_read(vin, VNMC_REG);
+ rvin_write(vin, vnmc & ~VNMC_VUP, VNMC_REG);
+
+ ifmd = VNCSI_IFMD_DES1 | VNCSI_IFMD_DES0 | VNCSI_IFMD_CSI_CHSEL(chsel);
+
+ rvin_write(vin, ifmd, VNCSI_IFMD_REG);
+
+ vin_dbg(vin, "Set IFMD 0x%x\n", ifmd);
+
+ /* Restore VNMC. */
+ rvin_write(vin, vnmc, VNMC_REG);
+
+ pm_runtime_put(vin->dev);
+
+ return ret;
+}
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
new file mode 100644
index 000000000..1236e6e82
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -0,0 +1,1033 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-rect.h>
+
+#include "rcar-vin.h"
+
+#define RVIN_DEFAULT_FORMAT V4L2_PIX_FMT_YUYV
+#define RVIN_DEFAULT_WIDTH 800
+#define RVIN_DEFAULT_HEIGHT 600
+#define RVIN_DEFAULT_FIELD V4L2_FIELD_NONE
+#define RVIN_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB
+
+/* -----------------------------------------------------------------------------
+ * Format Conversions
+ */
+
+static const struct rvin_video_format rvin_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB555,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .bpp = 4,
+ },
+};
+
+const struct rvin_video_format *rvin_format_from_pixel(u32 pixelformat)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rvin_formats); i++)
+ if (rvin_formats[i].fourcc == pixelformat)
+ return rvin_formats + i;
+
+ return NULL;
+}
+
+static u32 rvin_format_bytesperline(struct v4l2_pix_format *pix)
+{
+ const struct rvin_video_format *fmt;
+
+ fmt = rvin_format_from_pixel(pix->pixelformat);
+
+ if (WARN_ON(!fmt))
+ return -EINVAL;
+
+ return pix->width * fmt->bpp;
+}
+
+static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix)
+{
+ if (pix->pixelformat == V4L2_PIX_FMT_NV16)
+ return pix->bytesperline * pix->height * 2;
+
+ return pix->bytesperline * pix->height;
+}
+
+static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
+{
+ u32 walign;
+
+ if (!rvin_format_from_pixel(pix->pixelformat) ||
+ (vin->info->model == RCAR_M1 &&
+ pix->pixelformat == V4L2_PIX_FMT_XBGR32))
+ pix->pixelformat = RVIN_DEFAULT_FORMAT;
+
+ switch (pix->field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ break;
+ case V4L2_FIELD_ALTERNATE:
+ /*
+ * Driver does not (yet) support outputting ALTERNATE to a
+ * userspace. It does support outputting INTERLACED so use
+ * the VIN hardware to combine the two fields.
+ */
+ pix->field = V4L2_FIELD_INTERLACED;
+ pix->height *= 2;
+ break;
+ default:
+ pix->field = RVIN_DEFAULT_FIELD;
+ break;
+ }
+
+ /* HW limit width to a multiple of 32 (2^5) for NV16 else 2 (2^1) */
+ walign = vin->format.pixelformat == V4L2_PIX_FMT_NV16 ? 5 : 1;
+
+ /* Limit to VIN capabilities */
+ v4l_bound_align_image(&pix->width, 2, vin->info->max_width, walign,
+ &pix->height, 4, vin->info->max_height, 2, 0);
+
+ pix->bytesperline = rvin_format_bytesperline(pix);
+ pix->sizeimage = rvin_format_sizeimage(pix);
+
+ vin_dbg(vin, "Format %ux%u bpl: %u size: %u\n",
+ pix->width, pix->height, pix->bytesperline, pix->sizeimage);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2
+ */
+
+static int rvin_reset_format(struct rvin_dev *vin)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = vin->parallel->source_pad,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ v4l2_fill_pix_format(&vin->format, &fmt.format);
+
+ rvin_format_align(vin, &vin->format);
+
+ vin->source.top = 0;
+ vin->source.left = 0;
+ vin->source.width = vin->format.width;
+ vin->source.height = vin->format.height;
+
+ vin->crop = vin->source;
+ vin->compose = vin->source;
+
+ return 0;
+}
+
+static int rvin_try_format(struct rvin_dev *vin, u32 which,
+ struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_rect *compose)
+{
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ struct v4l2_subdev_pad_config *pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = which,
+ .pad = vin->parallel->source_pad,
+ };
+ enum v4l2_field field;
+ u32 width, height;
+ int ret;
+
+ pad_cfg = v4l2_subdev_alloc_pad_config(sd);
+ if (pad_cfg == NULL)
+ return -ENOMEM;
+
+ if (!rvin_format_from_pixel(pix->pixelformat) ||
+ (vin->info->model == RCAR_M1 &&
+ pix->pixelformat == V4L2_PIX_FMT_XBGR32))
+ pix->pixelformat = RVIN_DEFAULT_FORMAT;
+
+ v4l2_fill_mbus_format(&format.format, pix, vin->mbus_code);
+
+ /* Allow the video device to override field and to scale */
+ field = pix->field;
+ width = pix->width;
+ height = pix->height;
+
+ ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto done;
+ ret = 0;
+
+ v4l2_fill_pix_format(pix, &format.format);
+
+ if (crop) {
+ crop->top = 0;
+ crop->left = 0;
+ crop->width = pix->width;
+ crop->height = pix->height;
+
+ /*
+ * If source is ALTERNATE the driver will use the VIN hardware
+ * to INTERLACE it. The crop height then needs to be doubled.
+ */
+ if (pix->field == V4L2_FIELD_ALTERNATE)
+ crop->height *= 2;
+ }
+
+ if (field != V4L2_FIELD_ANY)
+ pix->field = field;
+
+ pix->width = width;
+ pix->height = height;
+
+ rvin_format_align(vin, pix);
+
+ if (compose) {
+ compose->top = 0;
+ compose->left = 0;
+ compose->width = pix->width;
+ compose->height = pix->height;
+ }
+done:
+ v4l2_subdev_free_pad_config(pad_cfg);
+
+ return ret;
+}
+
+static int rvin_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vin->dev));
+ return 0;
+}
+
+static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL,
+ NULL);
+}
+
+static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_rect crop, compose;
+ int ret;
+
+ if (vb2_is_busy(&vin->queue))
+ return -EBUSY;
+
+ ret = rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
+ &crop, &compose);
+ if (ret)
+ return ret;
+
+ vin->format = f->fmt.pix;
+ vin->crop = crop;
+ vin->compose = compose;
+ vin->source = crop;
+
+ return 0;
+}
+
+static int rvin_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ f->fmt.pix = vin->format;
+
+ return 0;
+}
+
+static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(rvin_formats))
+ return -EINVAL;
+
+ f->pixelformat = rvin_formats[f->index].fourcc;
+
+ return 0;
+}
+
+static int rvin_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = s->r.top = 0;
+ s->r.width = vin->source.width;
+ s->r.height = vin->source.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r = vin->crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.left = s->r.top = 0;
+ s->r.width = vin->format.width;
+ s->r.height = vin->format.height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r = vin->compose;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvin_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ const struct rvin_video_format *fmt;
+ struct v4l2_rect r = s->r;
+ struct v4l2_rect max_rect;
+ struct v4l2_rect min_rect = {
+ .width = 6,
+ .height = 2,
+ };
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ v4l2_rect_set_min_size(&r, &min_rect);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* Can't crop outside of source input */
+ max_rect.top = max_rect.left = 0;
+ max_rect.width = vin->source.width;
+ max_rect.height = vin->source.height;
+ v4l2_rect_map_inside(&r, &max_rect);
+
+ v4l_bound_align_image(&r.width, 6, vin->source.width, 0,
+ &r.height, 2, vin->source.height, 0, 0);
+
+ r.top = clamp_t(s32, r.top, 0, vin->source.height - r.height);
+ r.left = clamp_t(s32, r.left, 0, vin->source.width - r.width);
+
+ vin->crop = s->r = r;
+
+ vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
+ r.width, r.height, r.left, r.top,
+ vin->source.width, vin->source.height);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ /* Make sure compose rect fits inside output format */
+ max_rect.top = max_rect.left = 0;
+ max_rect.width = vin->format.width;
+ max_rect.height = vin->format.height;
+ v4l2_rect_map_inside(&r, &max_rect);
+
+ /*
+ * Composing is done by adding a offset to the buffer address,
+ * the HW wants this address to be aligned to HW_BUFFER_MASK.
+ * Make sure the top and left values meets this requirement.
+ */
+ while ((r.top * vin->format.bytesperline) & HW_BUFFER_MASK)
+ r.top--;
+
+ fmt = rvin_format_from_pixel(vin->format.pixelformat);
+ while ((r.left * fmt->bpp) & HW_BUFFER_MASK)
+ r.left--;
+
+ vin->compose = s->r = r;
+
+ vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n",
+ r.width, r.height, r.left, r.top,
+ vin->format.width, vin->format.height);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* HW supports modifying configuration while running */
+ rvin_crop_scale_comp(vin);
+
+ return 0;
+}
+
+static int rvin_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *crop)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, video, g_pixelaspect, &crop->pixelaspect);
+}
+
+static int rvin_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (i->index != 0)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(sd, video, g_input_status, &i->status);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+ if (v4l2_subdev_has_op(sd, pad, dv_timings_cap)) {
+ i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+ i->std = 0;
+ } else {
+ i->capabilities = V4L2_IN_CAP_STD;
+ i->std = vin->vdev.tvnorms;
+ }
+
+ strlcpy(i->name, "Camera", sizeof(i->name));
+
+ return 0;
+}
+
+static int rvin_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int rvin_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int rvin_querystd(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd, video, querystd, a);
+}
+
+static int rvin_s_std(struct file *file, void *priv, v4l2_std_id a)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ ret = v4l2_subdev_call(vin_to_source(vin), video, s_std, a);
+ if (ret < 0)
+ return ret;
+
+ vin->std = a;
+
+ /* Changing the standard will change the width/height */
+ return rvin_reset_format(vin);
+}
+
+static int rvin_g_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ if (v4l2_subdev_has_op(vin_to_source(vin), pad, dv_timings_cap))
+ return -ENOIOCTLCMD;
+
+ *a = vin->std;
+
+ return 0;
+}
+
+static int rvin_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_event_subscribe(fh, sub, 4, NULL);
+ }
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (timings->pad)
+ return -EINVAL;
+
+ timings->pad = vin->parallel->sink_pad;
+
+ ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
+
+ timings->pad = 0;
+
+ return ret;
+}
+
+static int rvin_s_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ ret = v4l2_subdev_call(sd, video, s_dv_timings, timings);
+ if (ret)
+ return ret;
+
+ /* Changing the timings will change the width/height */
+ return rvin_reset_format(vin);
+}
+
+static int rvin_g_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd, video, g_dv_timings, timings);
+}
+
+static int rvin_query_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd, video, query_dv_timings, timings);
+}
+
+static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (cap->pad)
+ return -EINVAL;
+
+ cap->pad = vin->parallel->sink_pad;
+
+ ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
+
+ cap->pad = 0;
+
+ return ret;
+}
+
+static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (edid->pad)
+ return -EINVAL;
+
+ edid->pad = vin->parallel->sink_pad;
+
+ ret = v4l2_subdev_call(sd, pad, get_edid, edid);
+
+ edid->pad = 0;
+
+ return ret;
+}
+
+static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (edid->pad)
+ return -EINVAL;
+
+ edid->pad = vin->parallel->sink_pad;
+
+ ret = v4l2_subdev_call(sd, pad, set_edid, edid);
+
+ edid->pad = 0;
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
+ .vidioc_querycap = rvin_querycap,
+ .vidioc_try_fmt_vid_cap = rvin_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = rvin_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = rvin_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap,
+
+ .vidioc_g_selection = rvin_g_selection,
+ .vidioc_s_selection = rvin_s_selection,
+
+ .vidioc_cropcap = rvin_cropcap,
+
+ .vidioc_enum_input = rvin_enum_input,
+ .vidioc_g_input = rvin_g_input,
+ .vidioc_s_input = rvin_s_input,
+
+ .vidioc_dv_timings_cap = rvin_dv_timings_cap,
+ .vidioc_enum_dv_timings = rvin_enum_dv_timings,
+ .vidioc_g_dv_timings = rvin_g_dv_timings,
+ .vidioc_s_dv_timings = rvin_s_dv_timings,
+ .vidioc_query_dv_timings = rvin_query_dv_timings,
+
+ .vidioc_g_edid = rvin_g_edid,
+ .vidioc_s_edid = rvin_s_edid,
+
+ .vidioc_querystd = rvin_querystd,
+ .vidioc_g_std = rvin_g_std,
+ .vidioc_s_std = rvin_s_std,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = rvin_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Media Controller
+ */
+
+static void rvin_mc_try_format(struct rvin_dev *vin,
+ struct v4l2_pix_format *pix)
+{
+ /*
+ * The V4L2 specification clearly documents the colorspace fields
+ * as being set by drivers for capture devices. Using the values
+ * supplied by userspace thus wouldn't comply with the API. Until
+ * the API is updated force fixed vaules.
+ */
+ pix->colorspace = RVIN_DEFAULT_COLORSPACE;
+ pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
+ pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
+ pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace,
+ pix->ycbcr_enc);
+
+ rvin_format_align(vin, pix);
+}
+
+static int rvin_mc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ rvin_mc_try_format(vin, &f->fmt.pix);
+
+ return 0;
+}
+
+static int rvin_mc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ if (vb2_is_busy(&vin->queue))
+ return -EBUSY;
+
+ rvin_mc_try_format(vin, &f->fmt.pix);
+
+ vin->format = f->fmt.pix;
+
+ vin->crop.top = 0;
+ vin->crop.left = 0;
+ vin->crop.width = vin->format.width;
+ vin->crop.height = vin->format.height;
+ vin->compose = vin->crop;
+
+ return 0;
+}
+
+static int rvin_mc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index != 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, "Camera", sizeof(i->name));
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops rvin_mc_ioctl_ops = {
+ .vidioc_querycap = rvin_querycap,
+ .vidioc_try_fmt_vid_cap = rvin_mc_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = rvin_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = rvin_mc_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap,
+
+ .vidioc_enum_input = rvin_mc_enum_input,
+ .vidioc_g_input = rvin_g_input,
+ .vidioc_s_input = rvin_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = rvin_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * File Operations
+ */
+
+static int rvin_power_on(struct rvin_dev *vin)
+{
+ int ret;
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ pm_runtime_get_sync(vin->v4l2_dev.dev);
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+ return 0;
+}
+
+static int rvin_power_off(struct rvin_dev *vin)
+{
+ int ret;
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ ret = v4l2_subdev_call(sd, core, s_power, 0);
+
+ pm_runtime_put(vin->v4l2_dev.dev);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+static int rvin_initialize_device(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = vin->format.width,
+ .height = vin->format.height,
+ .field = vin->format.field,
+ .colorspace = vin->format.colorspace,
+ .pixelformat = vin->format.pixelformat,
+ },
+ };
+
+ ret = rvin_power_on(vin);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_enable(&vin->vdev.dev);
+ ret = pm_runtime_resume(&vin->vdev.dev);
+ if (ret < 0 && ret != -ENOSYS)
+ goto eresume;
+
+ /*
+ * Try to configure with default parameters. Notice: this is the
+ * very first open, so, we cannot race against other calls,
+ * apart from someone else calling open() simultaneously, but
+ * .host_lock is protecting us against it.
+ */
+ ret = rvin_s_fmt_vid_cap(file, NULL, &f);
+ if (ret < 0)
+ goto esfmt;
+
+ v4l2_ctrl_handler_setup(&vin->ctrl_handler);
+
+ return 0;
+esfmt:
+ pm_runtime_disable(&vin->vdev.dev);
+eresume:
+ rvin_power_off(vin);
+
+ return ret;
+}
+
+static int rvin_open(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&vin->lock);
+
+ file->private_data = vin;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ if (rvin_initialize_device(file)) {
+ v4l2_fh_release(file);
+ ret = -ENODEV;
+ }
+
+unlock:
+ mutex_unlock(&vin->lock);
+ return ret;
+}
+
+static int rvin_release(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&vin->lock);
+
+ /* Save the singular status before we call the clean-up helper */
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ /* the release helper will cleanup any on-going streaming */
+ ret = _vb2_fop_release(file, NULL);
+
+ /*
+ * If this was the last open file.
+ * Then de-initialize hw module.
+ */
+ if (fh_singular) {
+ pm_runtime_suspend(&vin->vdev.dev);
+ pm_runtime_disable(&vin->vdev.dev);
+ rvin_power_off(vin);
+ }
+
+ mutex_unlock(&vin->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations rvin_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = rvin_open,
+ .release = rvin_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media controller file operations
+ */
+
+static int rvin_mc_open(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ ret = mutex_lock_interruptible(&vin->lock);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(vin->dev);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = v4l2_pipeline_pm_use(&vin->vdev.entity, 1);
+ if (ret < 0)
+ goto err_pm;
+
+ file->private_data = vin;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ goto err_v4l2pm;
+
+ mutex_unlock(&vin->lock);
+
+ return 0;
+err_v4l2pm:
+ v4l2_pipeline_pm_use(&vin->vdev.entity, 0);
+err_pm:
+ pm_runtime_put(vin->dev);
+err_unlock:
+ mutex_unlock(&vin->lock);
+
+ return ret;
+}
+
+static int rvin_mc_release(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&vin->lock);
+
+ /* the release helper will cleanup any on-going streaming. */
+ ret = _vb2_fop_release(file, NULL);
+
+ v4l2_pipeline_pm_use(&vin->vdev.entity, 0);
+ pm_runtime_put(vin->dev);
+
+ mutex_unlock(&vin->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations rvin_mc_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = rvin_mc_open,
+ .release = rvin_mc_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+void rvin_v4l2_unregister(struct rvin_dev *vin)
+{
+ if (!video_is_registered(&vin->vdev))
+ return;
+
+ v4l2_info(&vin->v4l2_dev, "Removing %s\n",
+ video_device_node_name(&vin->vdev));
+
+ /* Checks internaly if vdev have been init or not */
+ video_unregister_device(&vin->vdev);
+}
+
+static void rvin_notify(struct v4l2_subdev *sd,
+ unsigned int notification, void *arg)
+{
+ struct rvin_dev *vin =
+ container_of(sd->v4l2_dev, struct rvin_dev, v4l2_dev);
+
+ switch (notification) {
+ case V4L2_DEVICE_NOTIFY_EVENT:
+ v4l2_event_queue(&vin->vdev, arg);
+ break;
+ default:
+ break;
+ }
+}
+
+int rvin_v4l2_register(struct rvin_dev *vin)
+{
+ struct video_device *vdev = &vin->vdev;
+ int ret;
+
+ vin->v4l2_dev.notify = rvin_notify;
+
+ /* video node */
+ vdev->v4l2_dev = &vin->v4l2_dev;
+ vdev->queue = &vin->queue;
+ snprintf(vdev->name, sizeof(vdev->name), "VIN%u output", vin->id);
+ vdev->release = video_device_release_empty;
+ vdev->lock = &vin->lock;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+
+ /* Set a default format */
+ vin->format.pixelformat = RVIN_DEFAULT_FORMAT;
+ vin->format.width = RVIN_DEFAULT_WIDTH;
+ vin->format.height = RVIN_DEFAULT_HEIGHT;
+ vin->format.field = RVIN_DEFAULT_FIELD;
+ vin->format.colorspace = RVIN_DEFAULT_COLORSPACE;
+
+ if (vin->info->use_mc) {
+ vdev->fops = &rvin_mc_fops;
+ vdev->ioctl_ops = &rvin_mc_ioctl_ops;
+ } else {
+ vdev->fops = &rvin_fops;
+ vdev->ioctl_ops = &rvin_ioctl_ops;
+ rvin_reset_format(vin);
+ }
+
+ rvin_format_align(vin, &vin->format);
+
+ ret = video_register_device(&vin->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ vin_err(vin, "Failed to register video device\n");
+ return ret;
+ }
+
+ video_set_drvdata(&vin->vdev, vin);
+
+ v4l2_info(&vin->v4l2_dev, "Device registered as %s\n",
+ video_device_node_name(&vin->vdev));
+
+ return ret;
+}
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
new file mode 100644
index 000000000..0b13b34d0
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ */
+
+#ifndef __RCAR_VIN__
+#define __RCAR_VIN__
+
+#include <linux/kref.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+
+/* Number of HW buffers */
+#define HW_BUFFER_NUM 3
+
+/* Address alignment mask for HW buffers */
+#define HW_BUFFER_MASK 0x7f
+
+/* Max number on VIN instances that can be in a system */
+#define RCAR_VIN_NUM 8
+
+struct rvin_group;
+
+enum model_id {
+ RCAR_H1,
+ RCAR_M1,
+ RCAR_GEN2,
+ RCAR_GEN3,
+};
+
+enum rvin_csi_id {
+ RVIN_CSI20,
+ RVIN_CSI21,
+ RVIN_CSI40,
+ RVIN_CSI41,
+ RVIN_CSI_MAX,
+};
+
+/**
+ * STOPPED - No operation in progress
+ * STARTING - Capture starting up
+ * RUNNING - Operation in progress have buffers
+ * STOPPING - Stopping operation
+ */
+enum rvin_dma_state {
+ STOPPED = 0,
+ STARTING,
+ RUNNING,
+ STOPPING,
+};
+
+/**
+ * struct rvin_video_format - Data format stored in memory
+ * @fourcc: Pixelformat
+ * @bpp: Bytes per pixel
+ */
+struct rvin_video_format {
+ u32 fourcc;
+ u8 bpp;
+};
+
+/**
+ * struct rvin_parallel_entity - Parallel video input endpoint descriptor
+ * @asd: sub-device descriptor for async framework
+ * @subdev: subdevice matched using async framework
+ * @mbus_type: media bus type
+ * @mbus_flags: media bus configuration flags
+ * @source_pad: source pad of remote subdevice
+ * @sink_pad: sink pad of remote subdevice
+ *
+ */
+struct rvin_parallel_entity {
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+
+ enum v4l2_mbus_type mbus_type;
+ unsigned int mbus_flags;
+
+ unsigned int source_pad;
+ unsigned int sink_pad;
+};
+
+/**
+ * struct rvin_group_route - describes a route from a channel of a
+ * CSI-2 receiver to a VIN
+ *
+ * @csi: CSI-2 receiver ID.
+ * @channel: Output channel of the CSI-2 receiver.
+ * @vin: VIN ID.
+ * @mask: Bitmask of the different CHSEL register values that
+ * allow for a route from @csi + @chan to @vin.
+ *
+ * .. note::
+ * Each R-Car CSI-2 receiver has four output channels facing the VIN
+ * devices, each channel can carry one CSI-2 Virtual Channel (VC).
+ * There is no correlation between channel number and CSI-2 VC. It's
+ * up to the CSI-2 receiver driver to configure which VC is output
+ * on which channel, the VIN devices only care about output channels.
+ *
+ * There are in some cases multiple CHSEL register settings which would
+ * allow for the same route from @csi + @channel to @vin. For example
+ * on R-Car H3 both the CHSEL values 0 and 3 allow for a route from
+ * CSI40/VC0 to VIN0. All possible CHSEL values for a route need to be
+ * recorded as a bitmask in @mask, in this example bit 0 and 3 should
+ * be set.
+ */
+struct rvin_group_route {
+ enum rvin_csi_id csi;
+ unsigned int channel;
+ unsigned int vin;
+ unsigned int mask;
+};
+
+/**
+ * struct rvin_info - Information about the particular VIN implementation
+ * @model: VIN model
+ * @use_mc: use media controller instead of controlling subdevice
+ * @max_width: max input width the VIN supports
+ * @max_height: max input height the VIN supports
+ * @routes: list of possible routes from the CSI-2 recivers to
+ * all VINs. The list mush be NULL terminated.
+ */
+struct rvin_info {
+ enum model_id model;
+ bool use_mc;
+
+ unsigned int max_width;
+ unsigned int max_height;
+ const struct rvin_group_route *routes;
+};
+
+/**
+ * struct rvin_dev - Renesas VIN device structure
+ * @dev: (OF) device
+ * @base: device I/O register space remapped to virtual memory
+ * @info: info about VIN instance
+ *
+ * @vdev: V4L2 video device associated with VIN
+ * @v4l2_dev: V4L2 device
+ * @ctrl_handler: V4L2 control handler
+ * @notifier: V4L2 asynchronous subdevs notifier
+ *
+ * @parallel: parallel input subdevice descriptor
+ *
+ * @group: Gen3 CSI group
+ * @id: Gen3 group id for this VIN
+ * @pad: media pad for the video device entity
+ *
+ * @lock: protects @queue
+ * @queue: vb2 buffers queue
+ * @scratch: cpu address for scratch buffer
+ * @scratch_phys: physical address of the scratch buffer
+ *
+ * @qlock: protects @queue_buf, @buf_list, @sequence
+ * @state
+ * @queue_buf: Keeps track of buffers given to HW slot
+ * @buf_list: list of queued buffers
+ * @sequence: V4L2 buffers sequence number
+ * @state: keeps track of operation state
+ *
+ * @is_csi: flag to mark the VIN as using a CSI-2 subdevice
+ *
+ * @mbus_code: media bus format code
+ * @format: active V4L2 pixel format
+ *
+ * @crop: active cropping
+ * @compose: active composing
+ * @source: active size of the video source
+ * @std: active video standard of the video source
+ */
+struct rvin_dev {
+ struct device *dev;
+ void __iomem *base;
+ const struct rvin_info *info;
+
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_async_notifier notifier;
+
+ struct rvin_parallel_entity *parallel;
+
+ struct rvin_group *group;
+ unsigned int id;
+ struct media_pad pad;
+
+ struct mutex lock;
+ struct vb2_queue queue;
+ void *scratch;
+ dma_addr_t scratch_phys;
+
+ spinlock_t qlock;
+ struct vb2_v4l2_buffer *queue_buf[HW_BUFFER_NUM];
+ struct list_head buf_list;
+ unsigned int sequence;
+ enum rvin_dma_state state;
+
+ bool is_csi;
+
+ u32 mbus_code;
+ struct v4l2_pix_format format;
+
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ struct v4l2_rect source;
+ v4l2_std_id std;
+};
+
+#define vin_to_source(vin) ((vin)->parallel->subdev)
+
+/* Debug */
+#define vin_dbg(d, fmt, arg...) dev_dbg(d->dev, fmt, ##arg)
+#define vin_info(d, fmt, arg...) dev_info(d->dev, fmt, ##arg)
+#define vin_warn(d, fmt, arg...) dev_warn(d->dev, fmt, ##arg)
+#define vin_err(d, fmt, arg...) dev_err(d->dev, fmt, ##arg)
+
+/**
+ * struct rvin_group - VIN CSI2 group information
+ * @refcount: number of VIN instances using the group
+ *
+ * @mdev: media device which represents the group
+ *
+ * @lock: protects the count, notifier, vin and csi members
+ * @count: number of enabled VIN instances found in DT
+ * @notifier: group notifier for CSI-2 async subdevices
+ * @vin: VIN instances which are part of the group
+ * @csi: array of pairs of fwnode and subdev pointers
+ * to all CSI-2 subdevices.
+ */
+struct rvin_group {
+ struct kref refcount;
+
+ struct media_device mdev;
+
+ struct mutex lock;
+ unsigned int count;
+ struct v4l2_async_notifier notifier;
+ struct rvin_dev *vin[RCAR_VIN_NUM];
+
+ struct {
+ struct fwnode_handle *fwnode;
+ struct v4l2_subdev *subdev;
+ } csi[RVIN_CSI_MAX];
+};
+
+int rvin_dma_register(struct rvin_dev *vin, int irq);
+void rvin_dma_unregister(struct rvin_dev *vin);
+
+int rvin_v4l2_register(struct rvin_dev *vin);
+void rvin_v4l2_unregister(struct rvin_dev *vin);
+
+const struct rvin_video_format *rvin_format_from_pixel(u32 pixelformat);
+
+/* Cropping, composing and scaling */
+void rvin_crop_scale_comp(struct rvin_dev *vin);
+
+int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel);
+
+#endif
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
new file mode 100644
index 000000000..81413ab52
--- /dev/null
+++ b/drivers/media/platform/rcar_drif.c
@@ -0,0 +1,1499 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * R-Car Gen3 Digital Radio Interface (DRIF) driver
+ *
+ * Copyright (C) 2017 Renesas Electronics Corporation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * The R-Car DRIF is a receive only MSIOF like controller with an
+ * external master device driving the SCK. It receives data into a FIFO,
+ * then this driver uses the SYS-DMAC engine to move the data from
+ * the device to memory.
+ *
+ * Each DRIF channel DRIFx (as per datasheet) contains two internal
+ * channels DRIFx0 & DRIFx1 within itself with each having its own resources
+ * like module clk, register set, irq and dma. These internal channels share
+ * common CLK & SYNC from master. The two data pins D0 & D1 shall be
+ * considered to represent the two internal channels. This internal split
+ * is not visible to the master device.
+ *
+ * Depending on the master device, a DRIF channel can use
+ * (1) both internal channels (D0 & D1) to receive data in parallel (or)
+ * (2) one internal channel (D0 or D1) to receive data
+ *
+ * The primary design goal of this controller is to act as a Digital Radio
+ * Interface that receives digital samples from a tuner device. Hence the
+ * driver exposes the device as a V4L2 SDR device. In order to qualify as
+ * a V4L2 SDR device, it should possess a tuner interface as mandated by the
+ * framework. This driver expects a tuner driver (sub-device) to bind
+ * asynchronously with this device and the combined drivers shall expose
+ * a V4L2 compliant SDR device. The DRIF driver is independent of the
+ * tuner vendor.
+ *
+ * The DRIF h/w can support I2S mode and Frame start synchronization pulse mode.
+ * This driver is tested for I2S mode only because of the availability of
+ * suitable master devices. Hence, not all configurable options of DRIF h/w
+ * like lsb/msb first, syncdl, dtdl etc. are exposed via DT and I2S defaults
+ * are used. These can be exposed later if needed after testing.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/ioctl.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-vmalloc.h>
+
+/* DRIF register offsets */
+#define RCAR_DRIF_SITMDR1 0x00
+#define RCAR_DRIF_SITMDR2 0x04
+#define RCAR_DRIF_SITMDR3 0x08
+#define RCAR_DRIF_SIRMDR1 0x10
+#define RCAR_DRIF_SIRMDR2 0x14
+#define RCAR_DRIF_SIRMDR3 0x18
+#define RCAR_DRIF_SICTR 0x28
+#define RCAR_DRIF_SIFCTR 0x30
+#define RCAR_DRIF_SISTR 0x40
+#define RCAR_DRIF_SIIER 0x44
+#define RCAR_DRIF_SIRFDR 0x60
+
+#define RCAR_DRIF_RFOVF BIT(3) /* Receive FIFO overflow */
+#define RCAR_DRIF_RFUDF BIT(4) /* Receive FIFO underflow */
+#define RCAR_DRIF_RFSERR BIT(5) /* Receive frame sync error */
+#define RCAR_DRIF_REOF BIT(7) /* Frame reception end */
+#define RCAR_DRIF_RDREQ BIT(12) /* Receive data xfer req */
+#define RCAR_DRIF_RFFUL BIT(13) /* Receive FIFO full */
+
+/* SIRMDR1 */
+#define RCAR_DRIF_SIRMDR1_SYNCMD_FRAME (0 << 28)
+#define RCAR_DRIF_SIRMDR1_SYNCMD_LR (3 << 28)
+
+#define RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH (0 << 25)
+#define RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW (1 << 25)
+
+#define RCAR_DRIF_SIRMDR1_MSB_FIRST (0 << 24)
+#define RCAR_DRIF_SIRMDR1_LSB_FIRST (1 << 24)
+
+#define RCAR_DRIF_SIRMDR1_DTDL_0 (0 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_1 (1 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_2 (2 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_0PT5 (5 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_1PT5 (6 << 20)
+
+#define RCAR_DRIF_SIRMDR1_SYNCDL_0 (0 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_1 (1 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_2 (2 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_3 (3 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_0PT5 (5 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_1PT5 (6 << 20)
+
+#define RCAR_DRIF_MDR_GRPCNT(n) (((n) - 1) << 30)
+#define RCAR_DRIF_MDR_BITLEN(n) (((n) - 1) << 24)
+#define RCAR_DRIF_MDR_WDCNT(n) (((n) - 1) << 16)
+
+/* Hidden Transmit register that controls CLK & SYNC */
+#define RCAR_DRIF_SITMDR1_PCON BIT(30)
+
+#define RCAR_DRIF_SICTR_RX_RISING_EDGE BIT(26)
+#define RCAR_DRIF_SICTR_RX_EN BIT(8)
+#define RCAR_DRIF_SICTR_RESET BIT(0)
+
+/* Constants */
+#define RCAR_DRIF_NUM_HWBUFS 32
+#define RCAR_DRIF_MAX_DEVS 4
+#define RCAR_DRIF_DEFAULT_NUM_HWBUFS 16
+#define RCAR_DRIF_DEFAULT_HWBUF_SIZE (4 * PAGE_SIZE)
+#define RCAR_DRIF_MAX_CHANNEL 2
+#define RCAR_SDR_BUFFER_SIZE SZ_64K
+
+/* Internal buffer status flags */
+#define RCAR_DRIF_BUF_DONE BIT(0) /* DMA completed */
+#define RCAR_DRIF_BUF_OVERFLOW BIT(1) /* Overflow detected */
+
+#define to_rcar_drif_buf_pair(sdr, ch_num, idx) \
+ (&((sdr)->ch[!(ch_num)]->buf[(idx)]))
+
+#define for_each_rcar_drif_channel(ch, ch_mask) \
+ for_each_set_bit(ch, ch_mask, RCAR_DRIF_MAX_CHANNEL)
+
+/* Debug */
+#define rdrif_dbg(sdr, fmt, arg...) \
+ dev_dbg(sdr->v4l2_dev.dev, fmt, ## arg)
+
+#define rdrif_err(sdr, fmt, arg...) \
+ dev_err(sdr->v4l2_dev.dev, fmt, ## arg)
+
+/* Stream formats */
+struct rcar_drif_format {
+ u32 pixelformat;
+ u32 buffersize;
+ u32 bitlen;
+ u32 wdcnt;
+ u32 num_ch;
+};
+
+/* Format descriptions for capture */
+static const struct rcar_drif_format formats[] = {
+ {
+ .pixelformat = V4L2_SDR_FMT_PCU16BE,
+ .buffersize = RCAR_SDR_BUFFER_SIZE,
+ .bitlen = 16,
+ .wdcnt = 1,
+ .num_ch = 2,
+ },
+ {
+ .pixelformat = V4L2_SDR_FMT_PCU18BE,
+ .buffersize = RCAR_SDR_BUFFER_SIZE,
+ .bitlen = 18,
+ .wdcnt = 1,
+ .num_ch = 2,
+ },
+ {
+ .pixelformat = V4L2_SDR_FMT_PCU20BE,
+ .buffersize = RCAR_SDR_BUFFER_SIZE,
+ .bitlen = 20,
+ .wdcnt = 1,
+ .num_ch = 2,
+ },
+};
+
+/* Buffer for a received frame from one or both internal channels */
+struct rcar_drif_frame_buf {
+ /* Common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+/* OF graph endpoint's V4L2 async data */
+struct rcar_drif_graph_ep {
+ struct v4l2_subdev *subdev; /* Async matched subdev */
+ struct v4l2_async_subdev asd; /* Async sub-device descriptor */
+};
+
+/* DMA buffer */
+struct rcar_drif_hwbuf {
+ void *addr; /* CPU-side address */
+ unsigned int status; /* Buffer status flags */
+};
+
+/* Internal channel */
+struct rcar_drif {
+ struct rcar_drif_sdr *sdr; /* Group device */
+ struct platform_device *pdev; /* Channel's pdev */
+ void __iomem *base; /* Base register address */
+ resource_size_t start; /* I/O resource offset */
+ struct dma_chan *dmach; /* Reserved DMA channel */
+ struct clk *clk; /* Module clock */
+ struct rcar_drif_hwbuf buf[RCAR_DRIF_NUM_HWBUFS]; /* H/W bufs */
+ dma_addr_t dma_handle; /* Handle for all bufs */
+ unsigned int num; /* Channel number */
+ bool acting_sdr; /* Channel acting as SDR device */
+};
+
+/* DRIF V4L2 SDR */
+struct rcar_drif_sdr {
+ struct device *dev; /* Platform device */
+ struct video_device *vdev; /* V4L2 SDR device */
+ struct v4l2_device v4l2_dev; /* V4L2 device */
+
+ /* Videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+ spinlock_t dma_lock; /* To serialize DMA cb of channels */
+
+ struct mutex v4l2_mutex; /* To serialize ioctls */
+ struct mutex vb_queue_mutex; /* To serialize streaming ioctls */
+ struct v4l2_ctrl_handler ctrl_hdl; /* SDR control handler */
+ struct v4l2_async_notifier notifier; /* For subdev (tuner) */
+ struct rcar_drif_graph_ep ep; /* Endpoint V4L2 async data */
+
+ /* Current V4L2 SDR format ptr */
+ const struct rcar_drif_format *fmt;
+
+ /* Device tree SYNC properties */
+ u32 mdr1;
+
+ /* Internals */
+ struct rcar_drif *ch[RCAR_DRIF_MAX_CHANNEL]; /* DRIFx0,1 */
+ unsigned long hw_ch_mask; /* Enabled channels per DT */
+ unsigned long cur_ch_mask; /* Used channels for an SDR FMT */
+ u32 num_hw_ch; /* Num of DT enabled channels */
+ u32 num_cur_ch; /* Num of used channels */
+ u32 hwbuf_size; /* Each DMA buffer size */
+ u32 produced; /* Buffers produced by sdr dev */
+};
+
+/* Register access functions */
+static void rcar_drif_write(struct rcar_drif *ch, u32 offset, u32 data)
+{
+ writel(data, ch->base + offset);
+}
+
+static u32 rcar_drif_read(struct rcar_drif *ch, u32 offset)
+{
+ return readl(ch->base + offset);
+}
+
+/* Release DMA channels */
+static void rcar_drif_release_dmachannels(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
+ if (sdr->ch[i]->dmach) {
+ dma_release_channel(sdr->ch[i]->dmach);
+ sdr->ch[i]->dmach = NULL;
+ }
+}
+
+/* Allocate DMA channels */
+static int rcar_drif_alloc_dmachannels(struct rcar_drif_sdr *sdr)
+{
+ struct dma_slave_config dma_cfg;
+ unsigned int i;
+ int ret;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ struct rcar_drif *ch = sdr->ch[i];
+
+ ch->dmach = dma_request_slave_channel(&ch->pdev->dev, "rx");
+ if (!ch->dmach) {
+ rdrif_err(sdr, "ch%u: dma channel req failed\n", i);
+ ret = -ENODEV;
+ goto dmach_error;
+ }
+
+ /* Configure slave */
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = (phys_addr_t)(ch->start + RCAR_DRIF_SIRFDR);
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ret = dmaengine_slave_config(ch->dmach, &dma_cfg);
+ if (ret) {
+ rdrif_err(sdr, "ch%u: dma slave config failed\n", i);
+ goto dmach_error;
+ }
+ }
+ return 0;
+
+dmach_error:
+ rcar_drif_release_dmachannels(sdr);
+ return ret;
+}
+
+/* Release queued vb2 buffers */
+static void rcar_drif_release_queued_bufs(struct rcar_drif_sdr *sdr,
+ enum vb2_buffer_state state)
+{
+ struct rcar_drif_frame_buf *fbuf, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
+ list_for_each_entry_safe(fbuf, tmp, &sdr->queued_bufs, list) {
+ list_del(&fbuf->list);
+ vb2_buffer_done(&fbuf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+}
+
+/* Set MDR defaults */
+static inline void rcar_drif_set_mdr1(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ /* Set defaults for enabled internal channels */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ /* Refer MSIOF section in manual for this register setting */
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SITMDR1,
+ RCAR_DRIF_SITMDR1_PCON);
+
+ /* Setup MDR1 value */
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR1, sdr->mdr1);
+
+ rdrif_dbg(sdr, "ch%u: mdr1 = 0x%08x",
+ i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR1));
+ }
+}
+
+/* Set DRIF receive format */
+static int rcar_drif_set_format(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ rdrif_dbg(sdr, "setfmt: bitlen %u wdcnt %u num_ch %u\n",
+ sdr->fmt->bitlen, sdr->fmt->wdcnt, sdr->fmt->num_ch);
+
+ /* Sanity check */
+ if (sdr->fmt->num_ch > sdr->num_cur_ch) {
+ rdrif_err(sdr, "fmt num_ch %u cur_ch %u mismatch\n",
+ sdr->fmt->num_ch, sdr->num_cur_ch);
+ return -EINVAL;
+ }
+
+ /* Setup group, bitlen & wdcnt */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ u32 mdr;
+
+ /* Two groups */
+ mdr = RCAR_DRIF_MDR_GRPCNT(2) |
+ RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) |
+ RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt);
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR2, mdr);
+
+ mdr = RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) |
+ RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt);
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR3, mdr);
+
+ rdrif_dbg(sdr, "ch%u: new mdr[2,3] = 0x%08x, 0x%08x\n",
+ i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR2),
+ rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR3));
+ }
+ return 0;
+}
+
+/* Release DMA buffers */
+static void rcar_drif_release_buf(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ struct rcar_drif *ch = sdr->ch[i];
+
+ /* First entry contains the dma buf ptr */
+ if (ch->buf[0].addr) {
+ dma_free_coherent(&ch->pdev->dev,
+ sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
+ ch->buf[0].addr, ch->dma_handle);
+ ch->buf[0].addr = NULL;
+ }
+ }
+}
+
+/* Request DMA buffers */
+static int rcar_drif_request_buf(struct rcar_drif_sdr *sdr)
+{
+ int ret = -ENOMEM;
+ unsigned int i, j;
+ void *addr;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ struct rcar_drif *ch = sdr->ch[i];
+
+ /* Allocate DMA buffers */
+ addr = dma_alloc_coherent(&ch->pdev->dev,
+ sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
+ &ch->dma_handle, GFP_KERNEL);
+ if (!addr) {
+ rdrif_err(sdr,
+ "ch%u: dma alloc failed. num hwbufs %u size %u\n",
+ i, RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size);
+ goto error;
+ }
+
+ /* Split the chunk and populate bufctxt */
+ for (j = 0; j < RCAR_DRIF_NUM_HWBUFS; j++) {
+ ch->buf[j].addr = addr + (j * sdr->hwbuf_size);
+ ch->buf[j].status = 0;
+ }
+ }
+ return 0;
+error:
+ return ret;
+}
+
+/* Setup vb_queue minimum buffer requirements */
+static int rcar_drif_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+
+ /* Need at least 16 buffers */
+ if (vq->num_buffers + *num_buffers < 16)
+ *num_buffers = 16 - vq->num_buffers;
+
+ *num_planes = 1;
+ sizes[0] = PAGE_ALIGN(sdr->fmt->buffersize);
+ rdrif_dbg(sdr, "num_bufs %d sizes[0] %d\n", *num_buffers, sizes[0]);
+
+ return 0;
+}
+
+/* Enqueue buffer */
+static void rcar_drif_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vb->vb2_queue);
+ struct rcar_drif_frame_buf *fbuf =
+ container_of(vbuf, struct rcar_drif_frame_buf, vb);
+ unsigned long flags;
+
+ rdrif_dbg(sdr, "buf_queue idx %u\n", vb->index);
+ spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
+ list_add_tail(&fbuf->list, &sdr->queued_bufs);
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+}
+
+/* Get a frame buf from list */
+static struct rcar_drif_frame_buf *
+rcar_drif_get_fbuf(struct rcar_drif_sdr *sdr)
+{
+ struct rcar_drif_frame_buf *fbuf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
+ fbuf = list_first_entry_or_null(&sdr->queued_bufs, struct
+ rcar_drif_frame_buf, list);
+ if (!fbuf) {
+ /*
+ * App is late in enqueing buffers. Samples lost & there will
+ * be a gap in sequence number when app recovers
+ */
+ rdrif_dbg(sdr, "\napp late: prod %u\n", sdr->produced);
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+ return NULL;
+ }
+ list_del(&fbuf->list);
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+
+ return fbuf;
+}
+
+/* Helpers to set/clear buf pair status */
+static inline bool rcar_drif_bufs_done(struct rcar_drif_hwbuf **buf)
+{
+ return (buf[0]->status & buf[1]->status & RCAR_DRIF_BUF_DONE);
+}
+
+static inline bool rcar_drif_bufs_overflow(struct rcar_drif_hwbuf **buf)
+{
+ return ((buf[0]->status | buf[1]->status) & RCAR_DRIF_BUF_OVERFLOW);
+}
+
+static inline void rcar_drif_bufs_clear(struct rcar_drif_hwbuf **buf,
+ unsigned int bit)
+{
+ unsigned int i;
+
+ for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++)
+ buf[i]->status &= ~bit;
+}
+
+/* Channel DMA complete */
+static void rcar_drif_channel_complete(struct rcar_drif *ch, u32 idx)
+{
+ u32 str;
+
+ ch->buf[idx].status |= RCAR_DRIF_BUF_DONE;
+
+ /* Check for DRIF errors */
+ str = rcar_drif_read(ch, RCAR_DRIF_SISTR);
+ if (unlikely(str & RCAR_DRIF_RFOVF)) {
+ /* Writing the same clears it */
+ rcar_drif_write(ch, RCAR_DRIF_SISTR, str);
+
+ /* Overflow: some samples are lost */
+ ch->buf[idx].status |= RCAR_DRIF_BUF_OVERFLOW;
+ }
+}
+
+/* DMA callback for each stage */
+static void rcar_drif_dma_complete(void *dma_async_param)
+{
+ struct rcar_drif *ch = dma_async_param;
+ struct rcar_drif_sdr *sdr = ch->sdr;
+ struct rcar_drif_hwbuf *buf[RCAR_DRIF_MAX_CHANNEL];
+ struct rcar_drif_frame_buf *fbuf;
+ bool overflow = false;
+ u32 idx, produced;
+ unsigned int i;
+
+ spin_lock(&sdr->dma_lock);
+
+ /* DMA can be terminated while the callback was waiting on lock */
+ if (!vb2_is_streaming(&sdr->vb_queue)) {
+ spin_unlock(&sdr->dma_lock);
+ return;
+ }
+
+ idx = sdr->produced % RCAR_DRIF_NUM_HWBUFS;
+ rcar_drif_channel_complete(ch, idx);
+
+ if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL) {
+ buf[0] = ch->num ? to_rcar_drif_buf_pair(sdr, ch->num, idx) :
+ &ch->buf[idx];
+ buf[1] = ch->num ? &ch->buf[idx] :
+ to_rcar_drif_buf_pair(sdr, ch->num, idx);
+
+ /* Check if both DMA buffers are done */
+ if (!rcar_drif_bufs_done(buf)) {
+ spin_unlock(&sdr->dma_lock);
+ return;
+ }
+
+ /* Clear buf done status */
+ rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_DONE);
+
+ if (rcar_drif_bufs_overflow(buf)) {
+ overflow = true;
+ /* Clear the flag in status */
+ rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_OVERFLOW);
+ }
+ } else {
+ buf[0] = &ch->buf[idx];
+ if (buf[0]->status & RCAR_DRIF_BUF_OVERFLOW) {
+ overflow = true;
+ /* Clear the flag in status */
+ buf[0]->status &= ~RCAR_DRIF_BUF_OVERFLOW;
+ }
+ }
+
+ /* Buffer produced for consumption */
+ produced = sdr->produced++;
+ spin_unlock(&sdr->dma_lock);
+
+ rdrif_dbg(sdr, "ch%u: prod %u\n", ch->num, produced);
+
+ /* Get fbuf */
+ fbuf = rcar_drif_get_fbuf(sdr);
+ if (!fbuf)
+ return;
+
+ for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++)
+ memcpy(vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0) +
+ i * sdr->hwbuf_size, buf[i]->addr, sdr->hwbuf_size);
+
+ fbuf->vb.field = V4L2_FIELD_NONE;
+ fbuf->vb.sequence = produced;
+ fbuf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, sdr->fmt->buffersize);
+
+ /* Set error state on overflow */
+ vb2_buffer_done(&fbuf->vb.vb2_buf,
+ overflow ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+}
+
+static int rcar_drif_qbuf(struct rcar_drif *ch)
+{
+ struct rcar_drif_sdr *sdr = ch->sdr;
+ dma_addr_t addr = ch->dma_handle;
+ struct dma_async_tx_descriptor *rxd;
+ dma_cookie_t cookie;
+ int ret = -EIO;
+
+ /* Setup cyclic DMA with given buffers */
+ rxd = dmaengine_prep_dma_cyclic(ch->dmach, addr,
+ sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
+ sdr->hwbuf_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxd) {
+ rdrif_err(sdr, "ch%u: prep dma cyclic failed\n", ch->num);
+ return ret;
+ }
+
+ /* Submit descriptor */
+ rxd->callback = rcar_drif_dma_complete;
+ rxd->callback_param = ch;
+ cookie = dmaengine_submit(rxd);
+ if (dma_submit_error(cookie)) {
+ rdrif_err(sdr, "ch%u: dma submit failed\n", ch->num);
+ return ret;
+ }
+
+ dma_async_issue_pending(ch->dmach);
+ return 0;
+}
+
+/* Enable reception */
+static int rcar_drif_enable_rx(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+ u32 ctr;
+ int ret = -EINVAL;
+
+ /*
+ * When both internal channels are enabled, they can be synchronized
+ * only by the master
+ */
+
+ /* Enable receive */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR);
+ ctr |= (RCAR_DRIF_SICTR_RX_RISING_EDGE |
+ RCAR_DRIF_SICTR_RX_EN);
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr);
+ }
+
+ /* Check receive enabled */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR,
+ ctr, ctr & RCAR_DRIF_SICTR_RX_EN, 7, 100000);
+ if (ret) {
+ rdrif_err(sdr, "ch%u: rx en failed. ctr 0x%08x\n", i,
+ rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR));
+ break;
+ }
+ }
+ return ret;
+}
+
+/* Disable reception */
+static void rcar_drif_disable_rx(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+ u32 ctr;
+ int ret;
+
+ /* Disable receive */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR);
+ ctr &= ~RCAR_DRIF_SICTR_RX_EN;
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr);
+ }
+
+ /* Check receive disabled */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR,
+ ctr, !(ctr & RCAR_DRIF_SICTR_RX_EN), 7, 100000);
+ if (ret)
+ dev_warn(&sdr->vdev->dev,
+ "ch%u: failed to disable rx. ctr 0x%08x\n",
+ i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR));
+ }
+}
+
+/* Stop channel */
+static void rcar_drif_stop_channel(struct rcar_drif *ch)
+{
+ /* Disable DMA receive interrupt */
+ rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00000000);
+
+ /* Terminate all DMA transfers */
+ dmaengine_terminate_sync(ch->dmach);
+}
+
+/* Stop receive operation */
+static void rcar_drif_stop(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ /* Disable Rx */
+ rcar_drif_disable_rx(sdr);
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
+ rcar_drif_stop_channel(sdr->ch[i]);
+}
+
+/* Start channel */
+static int rcar_drif_start_channel(struct rcar_drif *ch)
+{
+ struct rcar_drif_sdr *sdr = ch->sdr;
+ u32 ctr, str;
+ int ret;
+
+ /* Reset receive */
+ rcar_drif_write(ch, RCAR_DRIF_SICTR, RCAR_DRIF_SICTR_RESET);
+ ret = readl_poll_timeout(ch->base + RCAR_DRIF_SICTR, ctr,
+ !(ctr & RCAR_DRIF_SICTR_RESET), 7, 100000);
+ if (ret) {
+ rdrif_err(sdr, "ch%u: failed to reset rx. ctr 0x%08x\n",
+ ch->num, rcar_drif_read(ch, RCAR_DRIF_SICTR));
+ return ret;
+ }
+
+ /* Queue buffers for DMA */
+ ret = rcar_drif_qbuf(ch);
+ if (ret)
+ return ret;
+
+ /* Clear status register flags */
+ str = RCAR_DRIF_RFFUL | RCAR_DRIF_REOF | RCAR_DRIF_RFSERR |
+ RCAR_DRIF_RFUDF | RCAR_DRIF_RFOVF;
+ rcar_drif_write(ch, RCAR_DRIF_SISTR, str);
+
+ /* Enable DMA receive interrupt */
+ rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00009000);
+
+ return ret;
+}
+
+/* Start receive operation */
+static int rcar_drif_start(struct rcar_drif_sdr *sdr)
+{
+ unsigned long enabled = 0;
+ unsigned int i;
+ int ret;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = rcar_drif_start_channel(sdr->ch[i]);
+ if (ret)
+ goto start_error;
+ enabled |= BIT(i);
+ }
+
+ ret = rcar_drif_enable_rx(sdr);
+ if (ret)
+ goto enable_error;
+
+ sdr->produced = 0;
+ return ret;
+
+enable_error:
+ rcar_drif_disable_rx(sdr);
+start_error:
+ for_each_rcar_drif_channel(i, &enabled)
+ rcar_drif_stop_channel(sdr->ch[i]);
+
+ return ret;
+}
+
+/* Start streaming */
+static int rcar_drif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+ unsigned long enabled = 0;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&sdr->v4l2_mutex);
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = clk_prepare_enable(sdr->ch[i]->clk);
+ if (ret)
+ goto error;
+ enabled |= BIT(i);
+ }
+
+ /* Set default MDRx settings */
+ rcar_drif_set_mdr1(sdr);
+
+ /* Set new format */
+ ret = rcar_drif_set_format(sdr);
+ if (ret)
+ goto error;
+
+ if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL)
+ sdr->hwbuf_size = sdr->fmt->buffersize / RCAR_DRIF_MAX_CHANNEL;
+ else
+ sdr->hwbuf_size = sdr->fmt->buffersize;
+
+ rdrif_dbg(sdr, "num hwbufs %u, hwbuf_size %u\n",
+ RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size);
+
+ /* Alloc DMA channel */
+ ret = rcar_drif_alloc_dmachannels(sdr);
+ if (ret)
+ goto error;
+
+ /* Request buffers */
+ ret = rcar_drif_request_buf(sdr);
+ if (ret)
+ goto error;
+
+ /* Start Rx */
+ ret = rcar_drif_start(sdr);
+ if (ret)
+ goto error;
+
+ mutex_unlock(&sdr->v4l2_mutex);
+
+ return ret;
+
+error:
+ rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_QUEUED);
+ rcar_drif_release_buf(sdr);
+ rcar_drif_release_dmachannels(sdr);
+ for_each_rcar_drif_channel(i, &enabled)
+ clk_disable_unprepare(sdr->ch[i]->clk);
+
+ mutex_unlock(&sdr->v4l2_mutex);
+
+ return ret;
+}
+
+/* Stop streaming */
+static void rcar_drif_stop_streaming(struct vb2_queue *vq)
+{
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+ unsigned int i;
+
+ mutex_lock(&sdr->v4l2_mutex);
+
+ /* Stop hardware streaming */
+ rcar_drif_stop(sdr);
+
+ /* Return all queued buffers to vb2 */
+ rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_ERROR);
+
+ /* Release buf */
+ rcar_drif_release_buf(sdr);
+
+ /* Release DMA channel resources */
+ rcar_drif_release_dmachannels(sdr);
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
+ clk_disable_unprepare(sdr->ch[i]->clk);
+
+ mutex_unlock(&sdr->v4l2_mutex);
+}
+
+/* Vb2 ops */
+static const struct vb2_ops rcar_drif_vb2_ops = {
+ .queue_setup = rcar_drif_queue_setup,
+ .buf_queue = rcar_drif_buf_queue,
+ .start_streaming = rcar_drif_start_streaming,
+ .stop_streaming = rcar_drif_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int rcar_drif_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, sdr->vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ sdr->vdev->name);
+
+ return 0;
+}
+
+static int rcar_drif_set_default_format(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ /* Matching fmt based on required channels is set as default */
+ if (sdr->num_hw_ch == formats[i].num_ch) {
+ sdr->fmt = &formats[i];
+ sdr->cur_ch_mask = sdr->hw_ch_mask;
+ sdr->num_cur_ch = sdr->num_hw_ch;
+ dev_dbg(sdr->dev, "default fmt[%u]: mask %lu num %u\n",
+ i, sdr->cur_ch_mask, sdr->num_cur_ch);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int rcar_drif_enum_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].pixelformat;
+
+ return 0;
+}
+
+static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
+ f->fmt.sdr.buffersize = sdr->fmt->buffersize;
+
+ return 0;
+}
+
+static int rcar_drif_s_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+ struct vb2_queue *q = &sdr->vb_queue;
+ unsigned int i;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ i = 0; /* Set the 1st format as default on no match */
+
+ sdr->fmt = &formats[i];
+ f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+
+ /*
+ * If a format demands one channel only out of two
+ * enabled channels, pick the 0th channel.
+ */
+ if (formats[i].num_ch < sdr->num_hw_ch) {
+ sdr->cur_ch_mask = BIT(0);
+ sdr->num_cur_ch = formats[i].num_ch;
+ } else {
+ sdr->cur_ch_mask = sdr->hw_ch_mask;
+ sdr->num_cur_ch = sdr->num_hw_ch;
+ }
+
+ rdrif_dbg(sdr, "cur: idx %u mask %lu num %u\n",
+ i, sdr->cur_ch_mask, sdr->num_cur_ch);
+
+ return 0;
+}
+
+static int rcar_drif_try_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ return 0;
+ }
+ }
+
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+ f->fmt.sdr.buffersize = formats[0].buffersize;
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+
+ return 0;
+}
+
+/* Tuner subdev ioctls */
+static int rcar_drif_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, enum_freq_bands, band);
+}
+
+static int rcar_drif_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, g_frequency, f);
+}
+
+static int rcar_drif_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, s_frequency, f);
+}
+
+static int rcar_drif_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *vt)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, g_tuner, vt);
+}
+
+static int rcar_drif_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *vt)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, s_tuner, vt);
+}
+
+static const struct v4l2_ioctl_ops rcar_drif_ioctl_ops = {
+ .vidioc_querycap = rcar_drif_querycap,
+
+ .vidioc_enum_fmt_sdr_cap = rcar_drif_enum_fmt_sdr_cap,
+ .vidioc_g_fmt_sdr_cap = rcar_drif_g_fmt_sdr_cap,
+ .vidioc_s_fmt_sdr_cap = rcar_drif_s_fmt_sdr_cap,
+ .vidioc_try_fmt_sdr_cap = rcar_drif_try_fmt_sdr_cap,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_s_frequency = rcar_drif_s_frequency,
+ .vidioc_g_frequency = rcar_drif_g_frequency,
+ .vidioc_s_tuner = rcar_drif_s_tuner,
+ .vidioc_g_tuner = rcar_drif_g_tuner,
+ .vidioc_enum_freq_bands = rcar_drif_enum_freq_bands,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+static const struct v4l2_file_operations rcar_drif_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static int rcar_drif_sdr_register(struct rcar_drif_sdr *sdr)
+{
+ int ret;
+
+ /* Init video_device structure */
+ sdr->vdev = video_device_alloc();
+ if (!sdr->vdev)
+ return -ENOMEM;
+
+ snprintf(sdr->vdev->name, sizeof(sdr->vdev->name), "R-Car DRIF");
+ sdr->vdev->fops = &rcar_drif_fops;
+ sdr->vdev->ioctl_ops = &rcar_drif_ioctl_ops;
+ sdr->vdev->release = video_device_release;
+ sdr->vdev->lock = &sdr->v4l2_mutex;
+ sdr->vdev->queue = &sdr->vb_queue;
+ sdr->vdev->queue->lock = &sdr->vb_queue_mutex;
+ sdr->vdev->ctrl_handler = &sdr->ctrl_hdl;
+ sdr->vdev->v4l2_dev = &sdr->v4l2_dev;
+ sdr->vdev->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ video_set_drvdata(sdr->vdev, sdr);
+
+ /* Register V4L2 SDR device */
+ ret = video_register_device(sdr->vdev, VFL_TYPE_SDR, -1);
+ if (ret) {
+ video_device_release(sdr->vdev);
+ sdr->vdev = NULL;
+ dev_err(sdr->dev, "failed video_register_device (%d)\n", ret);
+ }
+
+ return ret;
+}
+
+static void rcar_drif_sdr_unregister(struct rcar_drif_sdr *sdr)
+{
+ video_unregister_device(sdr->vdev);
+ sdr->vdev = NULL;
+}
+
+/* Sub-device bound callback */
+static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+
+ if (sdr->ep.asd.match.fwnode !=
+ of_fwnode_handle(subdev->dev->of_node)) {
+ rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
+ return -EINVAL;
+ }
+
+ v4l2_set_subdev_hostdata(subdev, sdr);
+ sdr->ep.subdev = subdev;
+ rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
+
+ return 0;
+}
+
+/* Sub-device unbind callback */
+static void rcar_drif_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+
+ if (sdr->ep.subdev != subdev) {
+ rdrif_err(sdr, "subdev %s is not bound\n", subdev->name);
+ return;
+ }
+
+ /* Free ctrl handler if initialized */
+ v4l2_ctrl_handler_free(&sdr->ctrl_hdl);
+ sdr->v4l2_dev.ctrl_handler = NULL;
+ sdr->ep.subdev = NULL;
+
+ rcar_drif_sdr_unregister(sdr);
+ rdrif_dbg(sdr, "unbind asd %s\n", subdev->name);
+}
+
+/* Sub-device registered notification callback */
+static int rcar_drif_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+ int ret;
+
+ /*
+ * The subdev tested at this point uses 4 controls. Using 10 as a worst
+ * case scenario hint. When less controls are needed there will be some
+ * unused memory and when more controls are needed the framework uses
+ * hash to manage controls within this number.
+ */
+ ret = v4l2_ctrl_handler_init(&sdr->ctrl_hdl, 10);
+ if (ret)
+ return -ENOMEM;
+
+ sdr->v4l2_dev.ctrl_handler = &sdr->ctrl_hdl;
+ ret = v4l2_device_register_subdev_nodes(&sdr->v4l2_dev);
+ if (ret) {
+ rdrif_err(sdr, "failed: register subdev nodes ret %d\n", ret);
+ goto error;
+ }
+
+ ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl,
+ sdr->ep.subdev->ctrl_handler, NULL);
+ if (ret) {
+ rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret);
+ goto error;
+ }
+
+ ret = rcar_drif_sdr_register(sdr);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ v4l2_ctrl_handler_free(&sdr->ctrl_hdl);
+
+ return ret;
+}
+
+static const struct v4l2_async_notifier_operations rcar_drif_notify_ops = {
+ .bound = rcar_drif_notify_bound,
+ .unbind = rcar_drif_notify_unbind,
+ .complete = rcar_drif_notify_complete,
+};
+
+/* Read endpoint properties */
+static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr,
+ struct fwnode_handle *fwnode)
+{
+ u32 val;
+
+ /* Set the I2S defaults for SIRMDR1*/
+ sdr->mdr1 = RCAR_DRIF_SIRMDR1_SYNCMD_LR | RCAR_DRIF_SIRMDR1_MSB_FIRST |
+ RCAR_DRIF_SIRMDR1_DTDL_1 | RCAR_DRIF_SIRMDR1_SYNCDL_0;
+
+ /* Parse sync polarity from endpoint */
+ if (!fwnode_property_read_u32(fwnode, "sync-active", &val))
+ sdr->mdr1 |= val ? RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH :
+ RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW;
+ else
+ sdr->mdr1 |= RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH; /* default */
+
+ dev_dbg(sdr->dev, "mdr1 0x%08x\n", sdr->mdr1);
+}
+
+/* Parse sub-devs (tuner) to find a matching device */
+static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
+{
+ struct v4l2_async_notifier *notifier = &sdr->notifier;
+ struct fwnode_handle *fwnode, *ep;
+
+ notifier->subdevs = devm_kzalloc(sdr->dev, sizeof(*notifier->subdevs),
+ GFP_KERNEL);
+ if (!notifier->subdevs)
+ return -ENOMEM;
+
+ ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node),
+ NULL);
+ if (!ep)
+ return 0;
+
+ notifier->subdevs[notifier->num_subdevs] = &sdr->ep.asd;
+ fwnode = fwnode_graph_get_remote_port_parent(ep);
+ if (!fwnode) {
+ dev_warn(sdr->dev, "bad remote port parent\n");
+ fwnode_handle_put(ep);
+ return -EINVAL;
+ }
+
+ sdr->ep.asd.match.fwnode = fwnode;
+ sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ notifier->num_subdevs++;
+
+ /* Get the endpoint properties */
+ rcar_drif_get_ep_properties(sdr, ep);
+
+ fwnode_handle_put(fwnode);
+ fwnode_handle_put(ep);
+
+ return 0;
+}
+
+/* Check if the given device is the primary bond */
+static bool rcar_drif_primary_bond(struct platform_device *pdev)
+{
+ return of_property_read_bool(pdev->dev.of_node, "renesas,primary-bond");
+}
+
+/* Check if both devices of the bond are enabled */
+static struct device_node *rcar_drif_bond_enabled(struct platform_device *p)
+{
+ struct device_node *np;
+
+ np = of_parse_phandle(p->dev.of_node, "renesas,bonding", 0);
+ if (np && of_device_is_available(np))
+ return np;
+
+ return NULL;
+}
+
+/* Check if the bonded device is probed */
+static int rcar_drif_bond_available(struct rcar_drif_sdr *sdr,
+ struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct rcar_drif *ch;
+ int ret = 0;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(sdr->dev, "failed to get bonded device from node\n");
+ return -ENODEV;
+ }
+
+ device_lock(&pdev->dev);
+ ch = platform_get_drvdata(pdev);
+ if (ch) {
+ /* Update sdr data in the bonded device */
+ ch->sdr = sdr;
+
+ /* Update sdr with bonded device data */
+ sdr->ch[ch->num] = ch;
+ sdr->hw_ch_mask |= BIT(ch->num);
+ } else {
+ /* Defer */
+ dev_info(sdr->dev, "defer probe\n");
+ ret = -EPROBE_DEFER;
+ }
+ device_unlock(&pdev->dev);
+
+ put_device(&pdev->dev);
+
+ return ret;
+}
+
+/* V4L2 SDR device probe */
+static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
+{
+ int ret;
+
+ /* Validate any supported format for enabled channels */
+ ret = rcar_drif_set_default_format(sdr);
+ if (ret) {
+ dev_err(sdr->dev, "failed to set default format\n");
+ return ret;
+ }
+
+ /* Set defaults */
+ sdr->hwbuf_size = RCAR_DRIF_DEFAULT_HWBUF_SIZE;
+
+ mutex_init(&sdr->v4l2_mutex);
+ mutex_init(&sdr->vb_queue_mutex);
+ spin_lock_init(&sdr->queued_bufs_lock);
+ spin_lock_init(&sdr->dma_lock);
+ INIT_LIST_HEAD(&sdr->queued_bufs);
+
+ /* Init videobuf2 queue structure */
+ sdr->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
+ sdr->vb_queue.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF;
+ sdr->vb_queue.drv_priv = sdr;
+ sdr->vb_queue.buf_struct_size = sizeof(struct rcar_drif_frame_buf);
+ sdr->vb_queue.ops = &rcar_drif_vb2_ops;
+ sdr->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ sdr->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ /* Init videobuf2 queue */
+ ret = vb2_queue_init(&sdr->vb_queue);
+ if (ret) {
+ dev_err(sdr->dev, "failed: vb2_queue_init ret %d\n", ret);
+ return ret;
+ }
+
+ /* Register the v4l2_device */
+ ret = v4l2_device_register(sdr->dev, &sdr->v4l2_dev);
+ if (ret) {
+ dev_err(sdr->dev, "failed: v4l2_device_register ret %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Parse subdevs after v4l2_device_register because if the subdev
+ * is already probed, bound and complete will be called immediately
+ */
+ ret = rcar_drif_parse_subdevs(sdr);
+ if (ret)
+ goto error;
+
+ sdr->notifier.ops = &rcar_drif_notify_ops;
+
+ /* Register notifier */
+ ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
+ if (ret < 0) {
+ dev_err(sdr->dev, "failed: notifier register ret %d\n", ret);
+ goto error;
+ }
+
+ return ret;
+
+error:
+ v4l2_device_unregister(&sdr->v4l2_dev);
+
+ return ret;
+}
+
+/* V4L2 SDR device remove */
+static void rcar_drif_sdr_remove(struct rcar_drif_sdr *sdr)
+{
+ v4l2_async_notifier_unregister(&sdr->notifier);
+ v4l2_device_unregister(&sdr->v4l2_dev);
+}
+
+/* DRIF channel probe */
+static int rcar_drif_probe(struct platform_device *pdev)
+{
+ struct rcar_drif_sdr *sdr;
+ struct device_node *np;
+ struct rcar_drif *ch;
+ struct resource *res;
+ int ret;
+
+ /* Reserve memory for enabled channel */
+ ch = devm_kzalloc(&pdev->dev, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ ch->pdev = pdev;
+
+ /* Module clock */
+ ch->clk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(ch->clk)) {
+ ret = PTR_ERR(ch->clk);
+ dev_err(&pdev->dev, "clk get failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Register map */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ch->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ch->base)) {
+ ret = PTR_ERR(ch->base);
+ dev_err(&pdev->dev, "ioremap failed (%d)\n", ret);
+ return ret;
+ }
+ ch->start = res->start;
+ platform_set_drvdata(pdev, ch);
+
+ /* Check if both channels of the bond are enabled */
+ np = rcar_drif_bond_enabled(pdev);
+ if (np) {
+ /* Check if current channel acting as primary-bond */
+ if (!rcar_drif_primary_bond(pdev)) {
+ ch->num = 1; /* Primary bond is channel 0 always */
+ of_node_put(np);
+ return 0;
+ }
+ }
+
+ /* Reserve memory for SDR structure */
+ sdr = devm_kzalloc(&pdev->dev, sizeof(*sdr), GFP_KERNEL);
+ if (!sdr) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+ ch->sdr = sdr;
+ sdr->dev = &pdev->dev;
+
+ /* Establish links between SDR and channel(s) */
+ sdr->ch[ch->num] = ch;
+ sdr->hw_ch_mask = BIT(ch->num);
+ if (np) {
+ /* Check if bonded device is ready */
+ ret = rcar_drif_bond_available(sdr, np);
+ of_node_put(np);
+ if (ret)
+ return ret;
+ }
+ sdr->num_hw_ch = hweight_long(sdr->hw_ch_mask);
+
+ return rcar_drif_sdr_probe(sdr);
+}
+
+/* DRIF channel remove */
+static int rcar_drif_remove(struct platform_device *pdev)
+{
+ struct rcar_drif *ch = platform_get_drvdata(pdev);
+ struct rcar_drif_sdr *sdr = ch->sdr;
+
+ /* Channel 0 will be the SDR instance */
+ if (ch->num)
+ return 0;
+
+ /* SDR instance */
+ rcar_drif_sdr_remove(sdr);
+
+ return 0;
+}
+
+/* FIXME: Implement suspend/resume support */
+static int __maybe_unused rcar_drif_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused rcar_drif_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend,
+ rcar_drif_resume);
+
+static const struct of_device_id rcar_drif_of_table[] = {
+ { .compatible = "renesas,rcar-gen3-drif" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rcar_drif_of_table);
+
+#define RCAR_DRIF_DRV_NAME "rcar_drif"
+static struct platform_driver rcar_drif_driver = {
+ .driver = {
+ .name = RCAR_DRIF_DRV_NAME,
+ .of_match_table = of_match_ptr(rcar_drif_of_table),
+ .pm = &rcar_drif_pm_ops,
+ },
+ .probe = rcar_drif_probe,
+ .remove = rcar_drif_remove,
+};
+
+module_platform_driver(rcar_drif_driver);
+
+MODULE_DESCRIPTION("Renesas R-Car Gen3 DRIF driver");
+MODULE_ALIAS("platform:" RCAR_DRIF_DRV_NAME);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>");
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
new file mode 100644
index 000000000..2bd5898a6
--- /dev/null
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -0,0 +1,2453 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Renesas R-Car Fine Display Processor
+ *
+ * Video format converter and frame deinterlacer device.
+ *
+ * Author: Kieran Bingham, <kieran@bingham.xyz>
+ * Copyright (c) 2016 Renesas Electronics Corporation.
+ *
+ * This code is developed and inspired from the vim2m, rcar_jpu,
+ * m2m-deinterlace, and vsp1 drivers.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <media/rcar-fcp.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+static unsigned int debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activate debug info");
+
+/* Minimum and maximum frame width/height */
+#define FDP1_MIN_W 80U
+#define FDP1_MIN_H 80U
+
+#define FDP1_MAX_W 3840U
+#define FDP1_MAX_H 2160U
+
+#define FDP1_MAX_PLANES 3U
+#define FDP1_MAX_STRIDE 8190U
+
+/* Flags that indicate a format can be used for capture/output */
+#define FDP1_CAPTURE BIT(0)
+#define FDP1_OUTPUT BIT(1)
+
+#define DRIVER_NAME "rcar_fdp1"
+
+/* Number of Job's to have available on the processing queue */
+#define FDP1_NUMBER_JOBS 8
+
+#define dprintk(fdp1, fmt, arg...) \
+ v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+/*
+ * FDP1 registers and bits
+ */
+
+/* FDP1 start register - Imm */
+#define FD1_CTL_CMD 0x0000
+#define FD1_CTL_CMD_STRCMD BIT(0)
+
+/* Sync generator register - Imm */
+#define FD1_CTL_SGCMD 0x0004
+#define FD1_CTL_SGCMD_SGEN BIT(0)
+
+/* Register set end register - Imm */
+#define FD1_CTL_REGEND 0x0008
+#define FD1_CTL_REGEND_REGEND BIT(0)
+
+/* Channel activation register - Vupdt */
+#define FD1_CTL_CHACT 0x000c
+#define FD1_CTL_CHACT_SMW BIT(9)
+#define FD1_CTL_CHACT_WR BIT(8)
+#define FD1_CTL_CHACT_SMR BIT(3)
+#define FD1_CTL_CHACT_RD2 BIT(2)
+#define FD1_CTL_CHACT_RD1 BIT(1)
+#define FD1_CTL_CHACT_RD0 BIT(0)
+
+/* Operation Mode Register - Vupdt */
+#define FD1_CTL_OPMODE 0x0010
+#define FD1_CTL_OPMODE_PRG BIT(4)
+#define FD1_CTL_OPMODE_VIMD_INTERRUPT (0 << 0)
+#define FD1_CTL_OPMODE_VIMD_BESTEFFORT (1 << 0)
+#define FD1_CTL_OPMODE_VIMD_NOINTERRUPT (2 << 0)
+
+#define FD1_CTL_VPERIOD 0x0014
+#define FD1_CTL_CLKCTRL 0x0018
+#define FD1_CTL_CLKCTRL_CSTP_N BIT(0)
+
+/* Software reset register */
+#define FD1_CTL_SRESET 0x001c
+#define FD1_CTL_SRESET_SRST BIT(0)
+
+/* Control status register (V-update-status) */
+#define FD1_CTL_STATUS 0x0024
+#define FD1_CTL_STATUS_VINT_CNT_MASK GENMASK(31, 16)
+#define FD1_CTL_STATUS_VINT_CNT_SHIFT 16
+#define FD1_CTL_STATUS_SGREGSET BIT(10)
+#define FD1_CTL_STATUS_SGVERR BIT(9)
+#define FD1_CTL_STATUS_SGFREND BIT(8)
+#define FD1_CTL_STATUS_BSY BIT(0)
+
+#define FD1_CTL_VCYCLE_STAT 0x0028
+
+/* Interrupt enable register */
+#define FD1_CTL_IRQENB 0x0038
+/* Interrupt status register */
+#define FD1_CTL_IRQSTA 0x003c
+/* Interrupt control register */
+#define FD1_CTL_IRQFSET 0x0040
+
+/* Common IRQ Bit settings */
+#define FD1_CTL_IRQ_VERE BIT(16)
+#define FD1_CTL_IRQ_VINTE BIT(4)
+#define FD1_CTL_IRQ_FREE BIT(0)
+#define FD1_CTL_IRQ_MASK (FD1_CTL_IRQ_VERE | \
+ FD1_CTL_IRQ_VINTE | \
+ FD1_CTL_IRQ_FREE)
+
+/* RPF */
+#define FD1_RPF_SIZE 0x0060
+#define FD1_RPF_SIZE_MASK GENMASK(12, 0)
+#define FD1_RPF_SIZE_H_SHIFT 16
+#define FD1_RPF_SIZE_V_SHIFT 0
+
+#define FD1_RPF_FORMAT 0x0064
+#define FD1_RPF_FORMAT_CIPM BIT(16)
+#define FD1_RPF_FORMAT_RSPYCS BIT(13)
+#define FD1_RPF_FORMAT_RSPUVS BIT(12)
+#define FD1_RPF_FORMAT_CF BIT(8)
+
+#define FD1_RPF_PSTRIDE 0x0068
+#define FD1_RPF_PSTRIDE_Y_SHIFT 16
+#define FD1_RPF_PSTRIDE_C_SHIFT 0
+
+/* RPF0 Source Component Y Address register */
+#define FD1_RPF0_ADDR_Y 0x006c
+
+/* RPF1 Current Picture Registers */
+#define FD1_RPF1_ADDR_Y 0x0078
+#define FD1_RPF1_ADDR_C0 0x007c
+#define FD1_RPF1_ADDR_C1 0x0080
+
+/* RPF2 next picture register */
+#define FD1_RPF2_ADDR_Y 0x0084
+
+#define FD1_RPF_SMSK_ADDR 0x0090
+#define FD1_RPF_SWAP 0x0094
+
+/* WPF */
+#define FD1_WPF_FORMAT 0x00c0
+#define FD1_WPF_FORMAT_PDV_SHIFT 24
+#define FD1_WPF_FORMAT_FCNL BIT(20)
+#define FD1_WPF_FORMAT_WSPYCS BIT(15)
+#define FD1_WPF_FORMAT_WSPUVS BIT(14)
+#define FD1_WPF_FORMAT_WRTM_601_16 (0 << 9)
+#define FD1_WPF_FORMAT_WRTM_601_0 (1 << 9)
+#define FD1_WPF_FORMAT_WRTM_709_16 (2 << 9)
+#define FD1_WPF_FORMAT_CSC BIT(8)
+
+#define FD1_WPF_RNDCTL 0x00c4
+#define FD1_WPF_RNDCTL_CBRM BIT(28)
+#define FD1_WPF_RNDCTL_CLMD_NOCLIP (0 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_16_235 (1 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_1_254 (2 << 12)
+
+#define FD1_WPF_PSTRIDE 0x00c8
+#define FD1_WPF_PSTRIDE_Y_SHIFT 16
+#define FD1_WPF_PSTRIDE_C_SHIFT 0
+
+/* WPF Destination picture */
+#define FD1_WPF_ADDR_Y 0x00cc
+#define FD1_WPF_ADDR_C0 0x00d0
+#define FD1_WPF_ADDR_C1 0x00d4
+#define FD1_WPF_SWAP 0x00d8
+#define FD1_WPF_SWAP_OSWAP_SHIFT 0
+#define FD1_WPF_SWAP_SSWAP_SHIFT 4
+
+/* WPF/RPF Common */
+#define FD1_RWPF_SWAP_BYTE BIT(0)
+#define FD1_RWPF_SWAP_WORD BIT(1)
+#define FD1_RWPF_SWAP_LWRD BIT(2)
+#define FD1_RWPF_SWAP_LLWD BIT(3)
+
+/* IPC */
+#define FD1_IPC_MODE 0x0100
+#define FD1_IPC_MODE_DLI BIT(8)
+#define FD1_IPC_MODE_DIM_ADAPT2D3D (0 << 0)
+#define FD1_IPC_MODE_DIM_FIXED2D (1 << 0)
+#define FD1_IPC_MODE_DIM_FIXED3D (2 << 0)
+#define FD1_IPC_MODE_DIM_PREVFIELD (3 << 0)
+#define FD1_IPC_MODE_DIM_NEXTFIELD (4 << 0)
+
+#define FD1_IPC_SMSK_THRESH 0x0104
+#define FD1_IPC_SMSK_THRESH_CONST 0x00010002
+
+#define FD1_IPC_COMB_DET 0x0108
+#define FD1_IPC_COMB_DET_CONST 0x00200040
+
+#define FD1_IPC_MOTDEC 0x010c
+#define FD1_IPC_MOTDEC_CONST 0x00008020
+
+/* DLI registers */
+#define FD1_IPC_DLI_BLEND 0x0120
+#define FD1_IPC_DLI_BLEND_CONST 0x0080ff02
+
+#define FD1_IPC_DLI_HGAIN 0x0124
+#define FD1_IPC_DLI_HGAIN_CONST 0x001000ff
+
+#define FD1_IPC_DLI_SPRS 0x0128
+#define FD1_IPC_DLI_SPRS_CONST 0x009004ff
+
+#define FD1_IPC_DLI_ANGLE 0x012c
+#define FD1_IPC_DLI_ANGLE_CONST 0x0004080c
+
+#define FD1_IPC_DLI_ISOPIX0 0x0130
+#define FD1_IPC_DLI_ISOPIX0_CONST 0xff10ff10
+
+#define FD1_IPC_DLI_ISOPIX1 0x0134
+#define FD1_IPC_DLI_ISOPIX1_CONST 0x0000ff10
+
+/* Sensor registers */
+#define FD1_IPC_SENSOR_TH0 0x0140
+#define FD1_IPC_SENSOR_TH0_CONST 0x20208080
+
+#define FD1_IPC_SENSOR_TH1 0x0144
+#define FD1_IPC_SENSOR_TH1_CONST 0
+
+#define FD1_IPC_SENSOR_CTL0 0x0170
+#define FD1_IPC_SENSOR_CTL0_CONST 0x00002201
+
+#define FD1_IPC_SENSOR_CTL1 0x0174
+#define FD1_IPC_SENSOR_CTL1_CONST 0
+
+#define FD1_IPC_SENSOR_CTL2 0x0178
+#define FD1_IPC_SENSOR_CTL2_X_SHIFT 16
+#define FD1_IPC_SENSOR_CTL2_Y_SHIFT 0
+
+#define FD1_IPC_SENSOR_CTL3 0x017c
+#define FD1_IPC_SENSOR_CTL3_0_SHIFT 16
+#define FD1_IPC_SENSOR_CTL3_1_SHIFT 0
+
+/* Line memory pixel number register */
+#define FD1_IPC_LMEM 0x01e0
+#define FD1_IPC_LMEM_LINEAR 1024
+#define FD1_IPC_LMEM_TILE 960
+
+/* Internal Data (HW Version) */
+#define FD1_IP_INTDATA 0x0800
+#define FD1_IP_H3_ES1 0x02010101
+#define FD1_IP_M3W 0x02010202
+#define FD1_IP_H3 0x02010203
+#define FD1_IP_M3N 0x02010204
+#define FD1_IP_E3 0x02010205
+
+/* LUTs */
+#define FD1_LUT_DIF_ADJ 0x1000
+#define FD1_LUT_SAD_ADJ 0x1400
+#define FD1_LUT_BLD_GAIN 0x1800
+#define FD1_LUT_DIF_GAIN 0x1c00
+#define FD1_LUT_MDET 0x2000
+
+/**
+ * struct fdp1_fmt - The FDP1 internal format data
+ * @fourcc: the fourcc code, to match the V4L2 API
+ * @bpp: bits per pixel per plane
+ * @num_planes: number of planes
+ * @hsub: horizontal subsampling factor
+ * @vsub: vertical subsampling factor
+ * @fmt: 7-bit format code for the fdp1 hardware
+ * @swap_yc: the Y and C components are swapped (Y comes before C)
+ * @swap_uv: the U and V components are swapped (V comes before U)
+ * @swap: swap register control
+ * @types: types of queue this format is applicable to
+ */
+struct fdp1_fmt {
+ u32 fourcc;
+ u8 bpp[3];
+ u8 num_planes;
+ u8 hsub;
+ u8 vsub;
+ u8 fmt;
+ bool swap_yc;
+ bool swap_uv;
+ u8 swap;
+ u8 types;
+};
+
+static const struct fdp1_fmt fdp1_formats[] = {
+ /* RGB formats are only supported by the Write Pixel Formatter */
+
+ { V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+
+ /* YUV Formats are supported by Read and Write Pixel Formatters */
+
+ { V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+};
+
+static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt)
+{
+ return fmt->fmt <= 0x1b; /* Last RGB code */
+}
+
+/*
+ * FDP1 Lookup tables range from 0...255 only
+ *
+ * Each table must be less than 256 entries, and all tables
+ * are padded out to 256 entries by duplicating the last value.
+ */
+static const u8 fdp1_diff_adj[] = {
+ 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+ 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+ 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_sad_adj[] = {
+ 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+ 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+ 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_bld_gain[] = {
+ 0x80,
+};
+
+static const u8 fdp1_dif_gain[] = {
+ 0x80,
+};
+
+static const u8 fdp1_mdet[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+};
+
+/* Per-queue, driver-specific private data */
+struct fdp1_q_data {
+ const struct fdp1_fmt *fmt;
+ struct v4l2_pix_format_mplane format;
+
+ unsigned int vsize;
+ unsigned int stride_y;
+ unsigned int stride_c;
+};
+
+static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat)
+{
+ const struct fdp1_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) {
+ fmt = &fdp1_formats[i];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+enum fdp1_deint_mode {
+ FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */
+ FDP1_ADAPT2D3D,
+ FDP1_FIXED2D,
+ FDP1_FIXED3D,
+ FDP1_PREVFIELD,
+ FDP1_NEXTFIELD,
+};
+
+#define FDP1_DEINT_MODE_USES_NEXT(mode) \
+ (mode == FDP1_ADAPT2D3D || \
+ mode == FDP1_FIXED3D || \
+ mode == FDP1_NEXTFIELD)
+
+#define FDP1_DEINT_MODE_USES_PREV(mode) \
+ (mode == FDP1_ADAPT2D3D || \
+ mode == FDP1_FIXED3D || \
+ mode == FDP1_PREVFIELD)
+
+/*
+ * FDP1 operates on potentially 3 fields, which are tracked
+ * from the VB buffers using this context structure.
+ * Will always be a field or a full frame, never two fields.
+ */
+struct fdp1_field_buffer {
+ struct vb2_v4l2_buffer *vb;
+ dma_addr_t addrs[3];
+
+ /* Should be NONE:TOP:BOTTOM only */
+ enum v4l2_field field;
+
+ /* Flag to indicate this is the last field in the vb */
+ bool last_field;
+
+ /* Buffer queue lists */
+ struct list_head list;
+};
+
+struct fdp1_buffer {
+ struct v4l2_m2m_buffer m2m_buf;
+ struct fdp1_field_buffer fields[2];
+ unsigned int num_fields;
+};
+
+static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct fdp1_buffer, m2m_buf.vb);
+}
+
+struct fdp1_job {
+ struct fdp1_field_buffer *previous;
+ struct fdp1_field_buffer *active;
+ struct fdp1_field_buffer *next;
+ struct fdp1_field_buffer *dst;
+
+ /* A job can only be on one list at a time */
+ struct list_head list;
+};
+
+struct fdp1_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+ spinlock_t device_process_lock;
+
+ void __iomem *regs;
+ unsigned int irq;
+ struct device *dev;
+
+ /* Job Queues */
+ struct fdp1_job jobs[FDP1_NUMBER_JOBS];
+ struct list_head free_job_list;
+ struct list_head queued_job_list;
+ struct list_head hw_job_list;
+
+ unsigned int clk_rate;
+
+ struct rcar_fcp_device *fcp;
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct fdp1_ctx {
+ struct v4l2_fh fh;
+ struct fdp1_dev *fdp1;
+
+ struct v4l2_ctrl_handler hdl;
+ unsigned int sequence;
+
+ /* Processed buffers in this transaction */
+ u8 num_processed;
+
+ /* Transaction length (i.e. how many buffers per transaction) */
+ u32 translen;
+
+ /* Abort requested by m2m */
+ int aborting;
+
+ /* Deinterlace processing mode */
+ enum fdp1_deint_mode deint_mode;
+
+ /*
+ * Adaptive 2D/3D mode uses a shared mask
+ * This is allocated at streamon, if the ADAPT2D3D mode
+ * is requested
+ */
+ unsigned int smsk_size;
+ dma_addr_t smsk_addr[2];
+ void *smsk_cpu;
+
+ /* Capture pipeline, can specify an alpha value
+ * for supported formats. 0-255 only
+ */
+ unsigned char alpha;
+
+ /* Source and destination queue data */
+ struct fdp1_q_data out_q; /* HW Source */
+ struct fdp1_q_data cap_q; /* HW Destination */
+
+ /*
+ * Field Queues
+ * Interlaced fields are used on 3 occasions, and tracked in this list.
+ *
+ * V4L2 Buffers are tracked inside the fdp1_buffer
+ * and released when the last 'field' completes
+ */
+ struct list_head fields_queue;
+ unsigned int buffers_queued;
+
+ /*
+ * For de-interlacing we need to track our previous buffer
+ * while preparing our job lists.
+ */
+ struct fdp1_field_buffer *previous;
+};
+
+static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct fdp1_ctx, fh);
+}
+
+static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->out_q;
+ else
+ return &ctx->cap_q;
+}
+
+/*
+ * list_remove_job: Take the first item off the specified job list
+ *
+ * Returns: pointer to a job, or NULL if the list is empty.
+ */
+static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1,
+ struct list_head *list)
+{
+ struct fdp1_job *job;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ job = list_first_entry_or_null(list, struct fdp1_job, list);
+ if (job)
+ list_del(&job->list);
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+ return job;
+}
+
+/*
+ * list_add_job: Add a job to the specified job list
+ *
+ * Returns: void - always succeeds
+ */
+static void list_add_job(struct fdp1_dev *fdp1,
+ struct list_head *list,
+ struct fdp1_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ list_add_tail(&job->list, list);
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+}
+
+static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->free_job_list);
+}
+
+static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ /* Ensure that all residue from previous jobs is gone */
+ memset(job, 0, sizeof(struct fdp1_job));
+
+ list_add_job(fdp1, &fdp1->free_job_list, job);
+}
+
+static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ list_add_job(fdp1, &fdp1->queued_job_list, job);
+}
+
+static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->queued_job_list);
+}
+
+static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ list_add_job(fdp1, &fdp1->hw_job_list, job);
+}
+
+static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->hw_job_list);
+}
+
+/*
+ * Buffer lists handling
+ */
+static void fdp1_field_complete(struct fdp1_ctx *ctx,
+ struct fdp1_field_buffer *fbuf)
+{
+ /* job->previous may be on the first field */
+ if (!fbuf)
+ return;
+
+ if (fbuf->last_field)
+ v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE);
+}
+
+static void fdp1_queue_field(struct fdp1_ctx *ctx,
+ struct fdp1_field_buffer *fbuf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ list_add_tail(&fbuf->list, &ctx->fields_queue);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ ctx->buffers_queued++;
+}
+
+static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx)
+{
+ struct fdp1_field_buffer *fbuf;
+ unsigned long flags;
+
+ ctx->buffers_queued--;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ fbuf = list_first_entry_or_null(&ctx->fields_queue,
+ struct fdp1_field_buffer, list);
+ if (fbuf)
+ list_del(&fbuf->list);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ return fbuf;
+}
+
+/*
+ * Return the next field in the queue - or NULL,
+ * without removing the item from the list
+ */
+static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx)
+{
+ struct fdp1_field_buffer *fbuf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ fbuf = list_first_entry_or_null(&ctx->fields_queue,
+ struct fdp1_field_buffer, list);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ return fbuf;
+}
+
+static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg)
+{
+ u32 value = ioread32(fdp1->regs + reg);
+
+ if (debug >= 2)
+ dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg);
+
+ return value;
+}
+
+static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg)
+{
+ if (debug >= 2)
+ dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg);
+
+ iowrite32(val, fdp1->regs + reg);
+}
+
+/* IPC registers are to be programmed with constant values */
+static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+
+ fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST, FD1_IPC_SMSK_THRESH);
+ fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST, FD1_IPC_COMB_DET);
+ fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST, FD1_IPC_MOTDEC);
+
+ fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST, FD1_IPC_DLI_BLEND);
+ fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST, FD1_IPC_DLI_HGAIN);
+ fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST, FD1_IPC_DLI_SPRS);
+ fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST, FD1_IPC_DLI_ANGLE);
+ fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST, FD1_IPC_DLI_ISOPIX0);
+ fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST, FD1_IPC_DLI_ISOPIX1);
+}
+
+
+static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ unsigned int x0, x1;
+ unsigned int hsize = src_q_data->format.width;
+ unsigned int vsize = src_q_data->format.height;
+
+ x0 = hsize / 3;
+ x1 = 2 * hsize / 3;
+
+ fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1);
+
+ fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) |
+ ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT),
+ FD1_IPC_SENSOR_CTL2);
+
+ fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) |
+ (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT),
+ FD1_IPC_SENSOR_CTL3);
+}
+
+/*
+ * fdp1_write_lut: Write a padded LUT to the hw
+ *
+ * FDP1 uses constant data for de-interlacing processing,
+ * with large tables. These hardware tables are all 256 bytes
+ * long, however they often contain repeated data at the end.
+ *
+ * The last byte of the table is written to all remaining entries.
+ */
+static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut,
+ unsigned int len, unsigned int base)
+{
+ unsigned int i;
+ u8 pad;
+
+ /* Tables larger than the hw are clipped */
+ len = min(len, 256u);
+
+ for (i = 0; i < len; i++)
+ fdp1_write(fdp1, lut[i], base + (i*4));
+
+ /* Tables are padded with the last entry */
+ pad = lut[i-1];
+
+ for (; i < 256; i++)
+ fdp1_write(fdp1, pad, base + (i*4));
+}
+
+static void fdp1_set_lut(struct fdp1_dev *fdp1)
+{
+ fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj),
+ FD1_LUT_DIF_ADJ);
+ fdp1_write_lut(fdp1, fdp1_sad_adj, ARRAY_SIZE(fdp1_sad_adj),
+ FD1_LUT_SAD_ADJ);
+ fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain),
+ FD1_LUT_BLD_GAIN);
+ fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain),
+ FD1_LUT_DIF_GAIN);
+ fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet),
+ FD1_LUT_MDET);
+}
+
+static void fdp1_configure_rpf(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ u32 picture_size;
+ u32 pstride;
+ u32 format;
+ u32 smsk_addr;
+
+ struct fdp1_q_data *q_data = &ctx->out_q;
+
+ /* Picture size is common to Source and Destination frames */
+ picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT)
+ | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT);
+
+ /* Strides */
+ pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT;
+ if (q_data->format.num_planes > 1)
+ pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT;
+
+ /* Format control */
+ format = q_data->fmt->fmt;
+ if (q_data->fmt->swap_yc)
+ format |= FD1_RPF_FORMAT_RSPYCS;
+
+ if (q_data->fmt->swap_uv)
+ format |= FD1_RPF_FORMAT_RSPUVS;
+
+ if (job->active->field == V4L2_FIELD_BOTTOM) {
+ format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */
+ smsk_addr = ctx->smsk_addr[0];
+ } else {
+ smsk_addr = ctx->smsk_addr[1];
+ }
+
+ /* Deint mode is non-zero when deinterlacing */
+ if (ctx->deint_mode)
+ format |= FD1_RPF_FORMAT_CIPM;
+
+ fdp1_write(fdp1, format, FD1_RPF_FORMAT);
+ fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP);
+ fdp1_write(fdp1, picture_size, FD1_RPF_SIZE);
+ fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE);
+ fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR);
+
+ /* Previous Field Channel (CH0) */
+ if (job->previous)
+ fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y);
+
+ /* Current Field Channel (CH1) */
+ fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y);
+ fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0);
+ fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1);
+
+ /* Next Field Channel (CH2) */
+ if (job->next)
+ fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y);
+}
+
+static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ struct fdp1_q_data *q_data = &ctx->cap_q;
+ u32 pstride;
+ u32 format;
+ u32 swap;
+ u32 rndctl;
+
+ pstride = q_data->format.plane_fmt[0].bytesperline
+ << FD1_WPF_PSTRIDE_Y_SHIFT;
+
+ if (q_data->format.num_planes > 1)
+ pstride |= q_data->format.plane_fmt[1].bytesperline
+ << FD1_WPF_PSTRIDE_C_SHIFT;
+
+ format = q_data->fmt->fmt; /* Output Format Code */
+
+ if (q_data->fmt->swap_yc)
+ format |= FD1_WPF_FORMAT_WSPYCS;
+
+ if (q_data->fmt->swap_uv)
+ format |= FD1_WPF_FORMAT_WSPUVS;
+
+ if (fdp1_fmt_is_rgb(q_data->fmt)) {
+ /* Enable Colour Space conversion */
+ format |= FD1_WPF_FORMAT_CSC;
+
+ /* Set WRTM */
+ if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709)
+ format |= FD1_WPF_FORMAT_WRTM_709_16;
+ else if (src_q_data->format.quantization ==
+ V4L2_QUANTIZATION_FULL_RANGE)
+ format |= FD1_WPF_FORMAT_WRTM_601_0;
+ else
+ format |= FD1_WPF_FORMAT_WRTM_601_16;
+ }
+
+ /* Set an alpha value into the Pad Value */
+ format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT;
+
+ /* Determine picture rounding and clipping */
+ rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */
+ rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP;
+
+ /* WPF Swap needs both ISWAP and OSWAP setting */
+ swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT;
+ swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT;
+
+ fdp1_write(fdp1, format, FD1_WPF_FORMAT);
+ fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL);
+ fdp1_write(fdp1, swap, FD1_WPF_SWAP);
+ fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE);
+
+ fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y);
+ fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0);
+ fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1);
+}
+
+static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT;
+ u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */
+ u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */
+
+ /* De-interlacing Mode */
+ switch (ctx->deint_mode) {
+ default:
+ case FDP1_PROGRESSIVE:
+ dprintk(fdp1, "Progressive Mode\n");
+ opmode |= FD1_CTL_OPMODE_PRG;
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ break;
+ case FDP1_ADAPT2D3D:
+ dprintk(fdp1, "Adapt2D3D Mode\n");
+ if (ctx->sequence == 0 || ctx->aborting)
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ else
+ ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D;
+
+ if (ctx->sequence > 1) {
+ channels |= FD1_CTL_CHACT_SMW;
+ channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+ }
+
+ if (ctx->sequence > 2)
+ channels |= FD1_CTL_CHACT_SMR;
+
+ break;
+ case FDP1_FIXED3D:
+ dprintk(fdp1, "Fixed 3D Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED3D;
+ /* Except for first and last frame, enable all channels */
+ if (!(ctx->sequence == 0 || ctx->aborting))
+ channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+ break;
+ case FDP1_FIXED2D:
+ dprintk(fdp1, "Fixed 2D Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ /* No extra channels enabled */
+ break;
+ case FDP1_PREVFIELD:
+ dprintk(fdp1, "Previous Field Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD;
+ channels |= FD1_CTL_CHACT_RD0; /* Previous */
+ break;
+ case FDP1_NEXTFIELD:
+ dprintk(fdp1, "Next Field Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD;
+ channels |= FD1_CTL_CHACT_RD2; /* Next */
+ break;
+ }
+
+ fdp1_write(fdp1, channels, FD1_CTL_CHACT);
+ fdp1_write(fdp1, opmode, FD1_CTL_OPMODE);
+ fdp1_write(fdp1, ipcmode, FD1_IPC_MODE);
+}
+
+/*
+ * fdp1_device_process() - Run the hardware
+ *
+ * Configure and start the hardware to generate a single frame
+ * of output given our input parameters.
+ */
+static int fdp1_device_process(struct fdp1_ctx *ctx)
+
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_job *job;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->device_process_lock, flags);
+
+ /* Get a job to process */
+ job = get_queued_job(fdp1);
+ if (!job) {
+ /*
+ * VINT can call us to see if we can queue another job.
+ * If we have no work to do, we simply return.
+ */
+ spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+ return 0;
+ }
+
+ /* First Frame only? ... */
+ fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL);
+
+ /* Set the mode, and configuration */
+ fdp1_configure_deint_mode(ctx, job);
+
+ /* DLI Static Configuration */
+ fdp1_set_ipc_dli(ctx);
+
+ /* Sensor Configuration */
+ fdp1_set_ipc_sensor(ctx);
+
+ /* Setup the source picture */
+ fdp1_configure_rpf(ctx, job);
+
+ /* Setup the destination picture */
+ fdp1_configure_wpf(ctx, job);
+
+ /* Line Memory Pixel Number Register for linear access */
+ fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM);
+
+ /* Enable Interrupts */
+ fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB);
+
+ /* Finally, the Immediate Registers */
+
+ /* This job is now in the HW queue */
+ queue_hw_job(fdp1, job);
+
+ /* Start the command */
+ fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD);
+
+ /* Registers will update to HW at next VINT */
+ fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND);
+
+ /* Enable VINT Generator */
+ fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD);
+
+ spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+
+ return 0;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/*
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int fdp1_m2m_job_ready(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ int srcbufs = 1;
+ int dstbufs = 1;
+
+ dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
+ v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
+
+ /* One output buffer is required for each field */
+ if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+ dstbufs = 2;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs
+ || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) {
+ dprintk(ctx->fdp1, "Not enough buffers available\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void fdp1_m2m_job_abort(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+
+ dprintk(ctx->fdp1, "+\n");
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+
+ /* Immediate abort sequence */
+ fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD);
+ fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET);
+}
+
+/*
+ * fdp1_prepare_job: Prepare and queue a new job for a single action of work
+ *
+ * Prepare the next field, (or frame in progressive) and an output
+ * buffer for the hardware to perform a single operation.
+ */
+static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct fdp1_buffer *fbuf;
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_job *job;
+ unsigned int buffers_required = 1;
+
+ dprintk(fdp1, "+\n");
+
+ if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode))
+ buffers_required = 2;
+
+ if (ctx->buffers_queued < buffers_required)
+ return NULL;
+
+ job = fdp1_job_alloc(fdp1);
+ if (!job) {
+ dprintk(fdp1, "No free jobs currently available\n");
+ return NULL;
+ }
+
+ job->active = fdp1_dequeue_field(ctx);
+ if (!job->active) {
+ /* Buffer check should prevent this ever happening */
+ dprintk(fdp1, "No input buffers currently available\n");
+
+ fdp1_job_free(fdp1, job);
+ return NULL;
+ }
+
+ dprintk(fdp1, "+ Buffer en-route...\n");
+
+ /* Source buffers have been prepared on our buffer_queue
+ * Prepare our Output buffer
+ */
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ fbuf = to_fdp1_buffer(vbuf);
+ job->dst = &fbuf->fields[0];
+
+ job->active->vb->sequence = ctx->sequence;
+ job->dst->vb->sequence = ctx->sequence;
+ ctx->sequence++;
+
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) {
+ job->previous = ctx->previous;
+
+ /* Active buffer becomes the next job's previous buffer */
+ ctx->previous = job->active;
+ }
+
+ if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) {
+ /* Must be called after 'active' is dequeued */
+ job->next = fdp1_peek_queued_field(ctx);
+ }
+
+ /* Transfer timestamps and flags from src->dst */
+
+ job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp;
+
+ job->dst->vb->flags = job->active->vb->flags &
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ /* Ideally, the frame-end function will just 'check' to see
+ * if there are more jobs instead
+ */
+ ctx->translen++;
+
+ /* Finally, Put this job on the processing queue */
+ queue_job(fdp1, job);
+
+ dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen);
+
+ return job;
+}
+
+/* fdp1_m2m_device_run() - prepares and starts the device for an M2M task
+ *
+ * A single input buffer is taken and serialised into our fdp1_buffer
+ * queue. The queue is then processed to create as many jobs as possible
+ * from our available input.
+ */
+static void fdp1_m2m_device_run(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct vb2_v4l2_buffer *src_vb;
+ struct fdp1_buffer *buf;
+ unsigned int i;
+
+ dprintk(fdp1, "+\n");
+
+ ctx->translen = 0;
+
+ /* Get our incoming buffer of either one or two fields, or one frame */
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ buf = to_fdp1_buffer(src_vb);
+
+ for (i = 0; i < buf->num_fields; i++) {
+ struct fdp1_field_buffer *fbuf = &buf->fields[i];
+
+ fdp1_queue_field(ctx, fbuf);
+ dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
+ i, fbuf->last_field);
+ }
+
+ /* Queue as many jobs as our data provides for */
+ while (fdp1_prepare_job(ctx))
+ ;
+
+ if (ctx->translen == 0) {
+ dprintk(fdp1, "No jobs were processed. M2M action complete\n");
+ v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+ }
+
+ /* Kick the job processing action */
+ fdp1_device_process(ctx);
+}
+
+/*
+ * device_frame_end:
+ *
+ * Handles the M2M level after a buffer completion event.
+ */
+static void device_frame_end(struct fdp1_dev *fdp1,
+ enum vb2_buffer_state state)
+{
+ struct fdp1_ctx *ctx;
+ unsigned long flags;
+ struct fdp1_job *job = get_hw_queued_job(fdp1);
+
+ dprintk(fdp1, "+\n");
+
+ ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev);
+
+ if (ctx == NULL) {
+ v4l2_err(&fdp1->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return;
+ }
+
+ ctx->num_processed++;
+
+ /*
+ * fdp1_field_complete will call buf_done only when the last vb2_buffer
+ * reference is complete
+ */
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+ fdp1_field_complete(ctx, job->previous);
+ else
+ fdp1_field_complete(ctx, job->active);
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ v4l2_m2m_buf_done(job->dst->vb, state);
+ job->dst = NULL;
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+ /* Move this job back to the free job list */
+ fdp1_job_free(fdp1, job);
+
+ dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
+ ctx->num_processed, ctx->translen);
+
+ if (ctx->num_processed == ctx->translen ||
+ ctx->aborting) {
+ dprintk(ctx->fdp1, "Finishing transaction\n");
+ ctx->num_processed = 0;
+ v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+ } else {
+ /*
+ * For pipelined performance support, this would
+ * be called from a VINT handler
+ */
+ fdp1_device_process(ctx);
+ }
+}
+
+/*
+ * video ioctls
+ */
+static int fdp1_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, DRIVER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", DRIVER_NAME);
+ return 0;
+}
+
+static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ unsigned int i, num;
+
+ num = 0;
+
+ for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) {
+ if (fdp1_formats[i].types & type) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ /* Format not found */
+ if (i >= ARRAY_SIZE(fdp1_formats))
+ return -EINVAL;
+
+ /* Format found */
+ f->pixelformat = fdp1_formats[i].fourcc;
+
+ return 0;
+}
+
+static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return fdp1_enum_fmt(f, FDP1_CAPTURE);
+}
+
+static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return fdp1_enum_fmt(f, FDP1_OUTPUT);
+}
+
+static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_q_data *q_data;
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ f->fmt.pix_mp = q_data->format;
+
+ return 0;
+}
+
+static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
+ const struct fdp1_fmt *fmt)
+{
+ unsigned int i;
+
+ /* Compute and clamp the stride and image size. */
+ for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) {
+ unsigned int hsub = i > 0 ? fmt->hsub : 1;
+ unsigned int vsub = i > 0 ? fmt->vsub : 1;
+ /* From VSP : TODO: Confirm alignment limits for FDP1 */
+ unsigned int align = 128;
+ unsigned int bpl;
+
+ bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
+ pix->width / hsub * fmt->bpp[i] / 8,
+ round_down(FDP1_MAX_STRIDE, align));
+
+ pix->plane_fmt[i].bytesperline = round_up(bpl, align);
+ pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
+ * pix->height / vsub;
+
+ memset(pix->plane_fmt[i].reserved, 0,
+ sizeof(pix->plane_fmt[i].reserved));
+ }
+
+ if (fmt->num_planes == 3) {
+ /* The two chroma planes must have the same stride. */
+ pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
+ pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
+
+ memset(pix->plane_fmt[2].reserved, 0,
+ sizeof(pix->plane_fmt[2].reserved));
+ }
+}
+
+static void fdp1_try_fmt_output(struct fdp1_ctx *ctx,
+ const struct fdp1_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix)
+{
+ const struct fdp1_fmt *fmt;
+ unsigned int width;
+ unsigned int height;
+
+ /* Validate the pixel format to ensure the output queue supports it. */
+ fmt = fdp1_find_format(pix->pixelformat);
+ if (!fmt || !(fmt->types & FDP1_OUTPUT))
+ fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->num_planes = fmt->num_planes;
+
+ /*
+ * Progressive video and all interlaced field orders are acceptable.
+ * Default to V4L2_FIELD_INTERLACED.
+ */
+ if (pix->field != V4L2_FIELD_NONE &&
+ pix->field != V4L2_FIELD_ALTERNATE &&
+ !V4L2_FIELD_HAS_BOTH(pix->field))
+ pix->field = V4L2_FIELD_INTERLACED;
+
+ /*
+ * The deinterlacer doesn't care about the colorspace, accept all values
+ * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion
+ * at the output of the deinterlacer supports a subset of encodings and
+ * quantization methods and will only be available when the colorspace
+ * allows it.
+ */
+ if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ /*
+ * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp
+ * them to the supported frame size range. The height boundary are
+ * related to the full frame, divide them by two when the format passes
+ * fields in separate buffers.
+ */
+ width = round_down(pix->width, fmt->hsub);
+ pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W);
+
+ height = round_down(pix->height, fmt->vsub);
+ if (pix->field == V4L2_FIELD_ALTERNATE)
+ pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2);
+ else
+ pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H);
+
+ fdp1_compute_stride(pix, fmt);
+}
+
+static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
+ const struct fdp1_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix)
+{
+ struct fdp1_q_data *src_data = &ctx->out_q;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ const struct fdp1_fmt *fmt;
+ bool allow_rgb;
+
+ /*
+ * Validate the pixel format. We can only accept RGB output formats if
+ * the input encoding and quantization are compatible with the format
+ * conversions supported by the hardware. The supported combinations are
+ *
+ * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE
+ * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE
+ * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE
+ */
+ colorspace = src_data->format.colorspace;
+
+ ycbcr_enc = src_data->format.ycbcr_enc;
+ if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
+
+ quantization = src_data->format.quantization;
+ if (quantization == V4L2_QUANTIZATION_DEFAULT)
+ quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace,
+ ycbcr_enc);
+
+ allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 ||
+ (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE);
+
+ fmt = fdp1_find_format(pix->pixelformat);
+ if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt)))
+ fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->num_planes = fmt->num_planes;
+ pix->field = V4L2_FIELD_NONE;
+
+ /*
+ * The colorspace on the capture queue is copied from the output queue
+ * as the hardware can't change the colorspace. It can convert YCbCr to
+ * RGB though, in which case the encoding and quantization are set to
+ * default values as anything else wouldn't make sense.
+ */
+ pix->colorspace = src_data->format.colorspace;
+ pix->xfer_func = src_data->format.xfer_func;
+
+ if (fdp1_fmt_is_rgb(fmt)) {
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ } else {
+ pix->ycbcr_enc = src_data->format.ycbcr_enc;
+ pix->quantization = src_data->format.quantization;
+ }
+
+ /*
+ * The frame width is identical to the output queue, and the height is
+ * either doubled or identical depending on whether the output queue
+ * field order contains one or two fields per frame.
+ */
+ pix->width = src_data->format.width;
+ if (src_data->format.field == V4L2_FIELD_ALTERNATE)
+ pix->height = 2 * src_data->format.height;
+ else
+ pix->height = src_data->format.height;
+
+ fdp1_compute_stride(pix, fmt);
+}
+
+static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
+ else
+ fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
+
+ dprintk(ctx->fdp1, "Try %s format: %4.4s (0x%08x) %ux%u field %u\n",
+ V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+ (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+ return 0;
+}
+
+static void fdp1_set_format(struct fdp1_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix,
+ enum v4l2_buf_type type)
+{
+ struct fdp1_q_data *q_data = get_q_data(ctx, type);
+ const struct fdp1_fmt *fmtinfo;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fdp1_try_fmt_output(ctx, &fmtinfo, pix);
+ else
+ fdp1_try_fmt_capture(ctx, &fmtinfo, pix);
+
+ q_data->fmt = fmtinfo;
+ q_data->format = *pix;
+
+ q_data->vsize = pix->height;
+ if (pix->field != V4L2_FIELD_NONE)
+ q_data->vsize /= 2;
+
+ q_data->stride_y = pix->plane_fmt[0].bytesperline;
+ q_data->stride_c = pix->plane_fmt[1].bytesperline;
+
+ /* Adjust strides for interleaved buffers */
+ if (pix->field == V4L2_FIELD_INTERLACED ||
+ pix->field == V4L2_FIELD_INTERLACED_TB ||
+ pix->field == V4L2_FIELD_INTERLACED_BT) {
+ q_data->stride_y *= 2;
+ q_data->stride_c *= 2;
+ }
+
+ /* Propagate the format from the output node to the capture node. */
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct fdp1_q_data *dst_data = &ctx->cap_q;
+
+ /*
+ * Copy the format, clear the per-plane bytes per line and image
+ * size, override the field and double the height if needed.
+ */
+ dst_data->format = q_data->format;
+ memset(dst_data->format.plane_fmt, 0,
+ sizeof(dst_data->format.plane_fmt));
+
+ dst_data->format.field = V4L2_FIELD_NONE;
+ if (pix->field == V4L2_FIELD_ALTERNATE)
+ dst_data->format.height *= 2;
+
+ fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format);
+
+ dst_data->vsize = dst_data->format.height;
+ dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline;
+ dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline;
+ }
+}
+
+static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
+
+ dprintk(ctx->fdp1, "Set %s format: %4.4s (0x%08x) %ux%u field %u\n",
+ V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+ (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+ return 0;
+}
+
+static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fdp1_ctx *ctx =
+ container_of(ctrl->handler, struct fdp1_ctx, hdl);
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+ ctrl->val = 2;
+ else
+ ctrl->val = 1;
+ return 0;
+ }
+
+ return 1;
+}
+
+static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fdp1_ctx *ctx =
+ container_of(ctrl->handler, struct fdp1_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->alpha = ctrl->val;
+ break;
+
+ case V4L2_CID_DEINTERLACING_MODE:
+ ctx->deint_mode = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fdp1_ctrl_ops = {
+ .s_ctrl = fdp1_s_ctrl,
+ .g_volatile_ctrl = fdp1_g_ctrl,
+};
+
+static const char * const fdp1_ctrl_deint_menu[] = {
+ "Progressive",
+ "Adaptive 2D/3D",
+ "Fixed 2D",
+ "Fixed 3D",
+ "Previous field",
+ "Next field",
+ NULL
+};
+
+static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
+ .vidioc_querycap = fdp1_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = fdp1_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out_mplane = fdp1_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_cap_mplane = fdp1_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = fdp1_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = fdp1_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = fdp1_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = fdp1_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = fdp1_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+
+static int fdp1_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_ctxs[])
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vq);
+ struct fdp1_q_data *q_data;
+ unsigned int i;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ if (*nplanes) {
+ if (*nplanes > FDP1_MAX_PLANES)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *nplanes = q_data->format.num_planes;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->format.plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data,
+ struct vb2_v4l2_buffer *vbuf,
+ unsigned int field_num)
+{
+ struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+ struct fdp1_field_buffer *fbuf = &buf->fields[field_num];
+ unsigned int num_fields;
+ unsigned int i;
+
+ num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+
+ fbuf->vb = vbuf;
+ fbuf->last_field = (field_num + 1) == num_fields;
+
+ for (i = 0; i < vbuf->vb2_buf.num_planes; ++i)
+ fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i);
+
+ switch (vbuf->field) {
+ case V4L2_FIELD_INTERLACED:
+ /*
+ * Interlaced means bottom-top for 60Hz TV standards (NTSC) and
+ * top-bottom for 50Hz. As TV standards are not applicable to
+ * the mem-to-mem API, use the height as a heuristic.
+ */
+ fbuf->field = (q_data->format.height < 576) == field_num
+ ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_SEQ_TB:
+ fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_SEQ_BT:
+ fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ break;
+ default:
+ fbuf->field = vbuf->field;
+ break;
+ }
+
+ /* Buffer is completed */
+ if (!field_num)
+ return;
+
+ /* Adjust buffer addresses for second field */
+ switch (vbuf->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ fbuf->addrs[i] +=
+ (i == 0 ? q_data->stride_y : q_data->stride_c);
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ fbuf->addrs[i] += q_data->vsize *
+ (i == 0 ? q_data->stride_y : q_data->stride_c);
+ break;
+ }
+}
+
+static int fdp1_buf_prepare(struct vb2_buffer *vb)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+ unsigned int i;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ bool field_valid = true;
+
+ /* Validate the buffer field. */
+ switch (q_data->format.field) {
+ case V4L2_FIELD_NONE:
+ if (vbuf->field != V4L2_FIELD_NONE)
+ field_valid = false;
+ break;
+
+ case V4L2_FIELD_ALTERNATE:
+ if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM)
+ field_valid = false;
+ break;
+
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ if (vbuf->field != q_data->format.field)
+ field_valid = false;
+ break;
+ }
+
+ if (!field_valid) {
+ dprintk(ctx->fdp1,
+ "buffer field %u invalid for format field %u\n",
+ vbuf->field, q_data->format.field);
+ return -EINVAL;
+ }
+ } else {
+ vbuf->field = V4L2_FIELD_NONE;
+ }
+
+ /* Validate the planes sizes. */
+ for (i = 0; i < q_data->format.num_planes; i++) {
+ unsigned long size = q_data->format.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dprintk(ctx->fdp1,
+ "data will not fit into plane [%u/%u] (%lu < %lu)\n",
+ i, q_data->format.num_planes,
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ /* We have known size formats all around */
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+ for (i = 0; i < buf->num_fields; ++i)
+ fdp1_buf_prepare_field(q_data, vbuf, i);
+
+ return 0;
+}
+
+static void fdp1_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+ struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * Force our deint_mode when we are progressive,
+ * ignoring any setting on the device from the user,
+ * Otherwise, lock in the requested de-interlace mode.
+ */
+ if (q_data->format.field == V4L2_FIELD_NONE)
+ ctx->deint_mode = FDP1_PROGRESSIVE;
+
+ if (ctx->deint_mode == FDP1_ADAPT2D3D) {
+ u32 stride;
+ dma_addr_t smsk_base;
+ const u32 bpp = 2; /* bytes per pixel */
+
+ stride = round_up(q_data->format.width, 8);
+
+ ctx->smsk_size = bpp * stride * q_data->vsize;
+
+ ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev,
+ ctx->smsk_size, &smsk_base, GFP_KERNEL);
+
+ if (ctx->smsk_cpu == NULL) {
+ dprintk(ctx->fdp1, "Failed to alloc smsk\n");
+ return -ENOMEM;
+ }
+
+ ctx->smsk_addr[0] = smsk_base;
+ ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2);
+ }
+ }
+
+ return 0;
+}
+
+static void fdp1_stop_streaming(struct vb2_queue *q)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned long flags;
+
+ while (1) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vbuf == NULL)
+ break;
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+ }
+
+ /* Empty Output queues */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /* Empty our internal queues */
+ struct fdp1_field_buffer *fbuf;
+
+ /* Free any queued buffers */
+ fbuf = fdp1_dequeue_field(ctx);
+ while (fbuf != NULL) {
+ fdp1_field_complete(ctx, fbuf);
+ fbuf = fdp1_dequeue_field(ctx);
+ }
+
+ /* Free smsk_data */
+ if (ctx->smsk_cpu) {
+ dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
+ ctx->smsk_cpu, ctx->smsk_addr[0]);
+ ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
+ ctx->smsk_cpu = NULL;
+ }
+
+ WARN(!list_empty(&ctx->fields_queue),
+ "Buffer queue not empty");
+ } else {
+ /* Empty Capture queues (Jobs) */
+ struct fdp1_job *job;
+
+ job = get_queued_job(ctx->fdp1);
+ while (job) {
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+ fdp1_field_complete(ctx, job->previous);
+ else
+ fdp1_field_complete(ctx, job->active);
+
+ v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR);
+ job->dst = NULL;
+
+ job = get_queued_job(ctx->fdp1);
+ }
+
+ /* Free any held buffer in the ctx */
+ fdp1_field_complete(ctx, ctx->previous);
+
+ WARN(!list_empty(&ctx->fdp1->queued_job_list),
+ "Queued Job List not empty");
+
+ WARN(!list_empty(&ctx->fdp1->hw_job_list),
+ "HW Job list not empty");
+ }
+}
+
+static const struct vb2_ops fdp1_qops = {
+ .queue_setup = fdp1_queue_setup,
+ .buf_prepare = fdp1_buf_prepare,
+ .buf_queue = fdp1_buf_queue,
+ .start_streaming = fdp1_start_streaming,
+ .stop_streaming = fdp1_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct fdp1_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+ src_vq->ops = &fdp1_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->fdp1->dev_mutex;
+ src_vq->dev = ctx->fdp1->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+ dst_vq->ops = &fdp1_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->fdp1->dev_mutex;
+ dst_vq->dev = ctx->fdp1->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int fdp1_open(struct file *file)
+{
+ struct fdp1_dev *fdp1 = video_drvdata(file);
+ struct v4l2_pix_format_mplane format;
+ struct fdp1_ctx *ctx = NULL;
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&fdp1->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->fdp1 = fdp1;
+
+ /* Initialise Queues */
+ INIT_LIST_HEAD(&ctx->fields_queue);
+
+ ctx->translen = 1;
+ ctx->sequence = 0;
+
+ /* Initialise controls */
+
+ v4l2_ctrl_handler_init(&ctx->hdl, 3);
+ v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_DEINTERLACING_MODE,
+ FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D,
+ fdp1_ctrl_deint_menu);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (ctx->hdl.error) {
+ ret = ctx->hdl.error;
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ goto done;
+ }
+
+ ctx->fh.ctrl_handler = &ctx->hdl;
+ v4l2_ctrl_handler_setup(&ctx->hdl);
+
+ /* Configure default parameters. */
+ memset(&format, 0, sizeof(format));
+ fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx);
+ goto done;
+ }
+
+ /* Perform any power management required */
+ pm_runtime_get_sync(fdp1->dev);
+
+ v4l2_fh_add(&ctx->fh);
+
+ dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
+
+done:
+ mutex_unlock(&fdp1->dev_mutex);
+ return ret;
+}
+
+static int fdp1_release(struct file *file)
+{
+ struct fdp1_dev *fdp1 = video_drvdata(file);
+ struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
+
+ dprintk(fdp1, "Releasing instance %p\n", ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ mutex_lock(&fdp1->dev_mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&fdp1->dev_mutex);
+ kfree(ctx);
+
+ pm_runtime_put(fdp1->dev);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations fdp1_fops = {
+ .owner = THIS_MODULE,
+ .open = fdp1_open,
+ .release = fdp1_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device fdp1_videodev = {
+ .name = DRIVER_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &fdp1_fops,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+ .ioctl_ops = &fdp1_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = fdp1_m2m_device_run,
+ .job_ready = fdp1_m2m_job_ready,
+ .job_abort = fdp1_m2m_job_abort,
+};
+
+static irqreturn_t fdp1_irq_handler(int irq, void *dev_id)
+{
+ struct fdp1_dev *fdp1 = dev_id;
+ u32 int_status;
+ u32 ctl_status;
+ u32 vint_cnt;
+ u32 cycles;
+
+ int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA);
+ cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT);
+ ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS);
+ vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >>
+ FD1_CTL_STATUS_VINT_CNT_SHIFT;
+
+ /* Clear interrupts */
+ fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA);
+
+ if (debug >= 2) {
+ dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status,
+ int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]",
+ int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]",
+ int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]");
+
+ dprintk(fdp1, "CycleStatus = %d (%dms)\n",
+ cycles, cycles/(fdp1->clk_rate/1000));
+
+ dprintk(fdp1,
+ "Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n",
+ ctl_status, vint_cnt,
+ ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "",
+ ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "",
+ ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "",
+ ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : "");
+ dprintk(fdp1, "***********************************\n");
+ }
+
+ /* Spurious interrupt */
+ if (!(FD1_CTL_IRQ_MASK & int_status))
+ return IRQ_NONE;
+
+ /* Work completed, release the frame */
+ if (FD1_CTL_IRQ_VERE & int_status)
+ device_frame_end(fdp1, VB2_BUF_STATE_ERROR);
+ else if (FD1_CTL_IRQ_FREE & int_status)
+ device_frame_end(fdp1, VB2_BUF_STATE_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static int fdp1_probe(struct platform_device *pdev)
+{
+ struct fdp1_dev *fdp1;
+ struct video_device *vfd;
+ struct device_node *fcp_node;
+ struct resource *res;
+ struct clk *clk;
+ unsigned int i;
+
+ int ret;
+ int hw_version;
+
+ fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL);
+ if (!fdp1)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fdp1->free_job_list);
+ INIT_LIST_HEAD(&fdp1->queued_job_list);
+ INIT_LIST_HEAD(&fdp1->hw_job_list);
+
+ /* Initialise the jobs on the free list */
+ for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++)
+ list_add(&fdp1->jobs[i].list, &fdp1->free_job_list);
+
+ mutex_init(&fdp1->dev_mutex);
+
+ spin_lock_init(&fdp1->irqlock);
+ spin_lock_init(&fdp1->device_process_lock);
+ fdp1->dev = &pdev->dev;
+ platform_set_drvdata(pdev, fdp1);
+
+ /* Memory-mapped registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fdp1->regs))
+ return PTR_ERR(fdp1->regs);
+
+ /* Interrupt service routine registration */
+ fdp1->irq = ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot find IRQ\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
+ dev_name(&pdev->dev), fdp1);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq);
+ return ret;
+ }
+
+ /* FCP */
+ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
+ if (fcp_node) {
+ fdp1->fcp = rcar_fcp_get(fcp_node);
+ of_node_put(fcp_node);
+ if (IS_ERR(fdp1->fcp)) {
+ dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
+ PTR_ERR(fdp1->fcp));
+ return PTR_ERR(fdp1->fcp);
+ }
+ }
+
+ /* Determine our clock rate */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ fdp1->clk_rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ /* V4L2 device registration */
+ ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
+ if (ret) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ /* M2M registration */
+ fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(fdp1->m2m_dev)) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(fdp1->m2m_dev);
+ goto unreg_dev;
+ }
+
+ /* Video registration */
+ fdp1->vfd = fdp1_videodev;
+ vfd = &fdp1->vfd;
+ vfd->lock = &fdp1->dev_mutex;
+ vfd->v4l2_dev = &fdp1->v4l2_dev;
+ video_set_drvdata(vfd, fdp1);
+ strlcpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+ goto release_m2m;
+ }
+
+ v4l2_info(&fdp1->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ /* Power up the cells to read HW */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(fdp1->dev);
+
+ hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
+ switch (hw_version) {
+ case FD1_IP_H3_ES1:
+ dprintk(fdp1, "FDP1 Version R-Car H3 ES1\n");
+ break;
+ case FD1_IP_M3W:
+ dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
+ break;
+ case FD1_IP_H3:
+ dprintk(fdp1, "FDP1 Version R-Car H3\n");
+ break;
+ case FD1_IP_M3N:
+ dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
+ break;
+ case FD1_IP_E3:
+ dprintk(fdp1, "FDP1 Version R-Car E3\n");
+ break;
+ default:
+ dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
+ hw_version);
+ }
+
+ /* Allow the hw to sleep until an open call puts it to use */
+ pm_runtime_put(fdp1->dev);
+
+ return 0;
+
+release_m2m:
+ v4l2_m2m_release(fdp1->m2m_dev);
+
+unreg_dev:
+ v4l2_device_unregister(&fdp1->v4l2_dev);
+
+ return ret;
+}
+
+static int fdp1_remove(struct platform_device *pdev)
+{
+ struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
+
+ v4l2_m2m_release(fdp1->m2m_dev);
+ video_unregister_device(&fdp1->vfd);
+ v4l2_device_unregister(&fdp1->v4l2_dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
+{
+ struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+ rcar_fcp_disable(fdp1->fcp);
+
+ return 0;
+}
+
+static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
+{
+ struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+ /* Program in the static LUTs */
+ fdp1_set_lut(fdp1);
+
+ return rcar_fcp_enable(fdp1->fcp);
+}
+
+static const struct dev_pm_ops fdp1_pm_ops = {
+ SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
+ fdp1_pm_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id fdp1_dt_ids[] = {
+ { .compatible = "renesas,fdp1" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
+
+static struct platform_driver fdp1_pdrv = {
+ .probe = fdp1_probe,
+ .remove = fdp1_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = fdp1_dt_ids,
+ .pm = &fdp1_pm_ops,
+ },
+};
+
+module_platform_driver(fdp1_pdrv);
+
+MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver");
+MODULE_AUTHOR("Kieran Bingham <kieran@bingham.xyz>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
new file mode 100644
index 000000000..e5c882423
--- /dev/null
+++ b/drivers/media/platform/rcar_jpu.c
@@ -0,0 +1,1768 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Mikhail Ulyanov
+ * Copyright (C) 2014-2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ *
+ * This is based on the drivers/media/platform/s5p-jpeg driver by
+ * Andrzej Pietrasiewicz and Jacek Anaszewski.
+ * Some portions of code inspired by VSP1 driver by Laurent Pinchart.
+ *
+ * TODO in order of priority:
+ * 1) Rotation
+ * 2) Cropping
+ * 3) V4L2_CID_JPEG_ACTIVE_MARKER
+ */
+
+#include <asm/unaligned.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+
+#define DRV_NAME "rcar_jpu"
+
+/*
+ * Align JPEG header end to cache line to make sure we will not have any issues
+ * with cache; additionally to requerment (33.3.27 R01UH0501EJ0100 Rev.1.00)
+ */
+#define JPU_JPEG_HDR_SIZE (ALIGN(0x258, L1_CACHE_BYTES))
+#define JPU_JPEG_MAX_BYTES_PER_PIXEL 2 /* 16 bit precision format */
+#define JPU_JPEG_MIN_SIZE 25 /* SOI + SOF + EOI */
+#define JPU_JPEG_QTBL_SIZE 0x40
+#define JPU_JPEG_HDCTBL_SIZE 0x1c
+#define JPU_JPEG_HACTBL_SIZE 0xb2
+#define JPU_JPEG_HEIGHT_OFFSET 0x91
+#define JPU_JPEG_WIDTH_OFFSET 0x93
+#define JPU_JPEG_SUBS_OFFSET 0x97
+#define JPU_JPEG_QTBL_LUM_OFFSET 0x07
+#define JPU_JPEG_QTBL_CHR_OFFSET 0x4c
+#define JPU_JPEG_HDCTBL_LUM_OFFSET 0xa4
+#define JPU_JPEG_HACTBL_LUM_OFFSET 0xc5
+#define JPU_JPEG_HDCTBL_CHR_OFFSET 0x17c
+#define JPU_JPEG_HACTBL_CHR_OFFSET 0x19d
+#define JPU_JPEG_PADDING_OFFSET 0x24f
+#define JPU_JPEG_LUM 0x00
+#define JPU_JPEG_CHR 0x01
+#define JPU_JPEG_DC 0x00
+#define JPU_JPEG_AC 0x10
+
+#define JPU_JPEG_422 0x21
+#define JPU_JPEG_420 0x22
+
+#define JPU_JPEG_DEFAULT_422_PIX_FMT V4L2_PIX_FMT_NV16M
+#define JPU_JPEG_DEFAULT_420_PIX_FMT V4L2_PIX_FMT_NV12M
+
+/* JPEG markers */
+#define TEM 0x01
+#define SOF0 0xc0
+#define RST 0xd0
+#define SOI 0xd8
+#define EOI 0xd9
+#define DHP 0xde
+#define DHT 0xc4
+#define COM 0xfe
+#define DQT 0xdb
+#define DRI 0xdd
+#define APP0 0xe0
+
+#define JPU_RESET_TIMEOUT 100 /* ms */
+#define JPU_JOB_TIMEOUT 300 /* ms */
+#define JPU_MAX_QUALITY 4
+#define JPU_WIDTH_MIN 16
+#define JPU_HEIGHT_MIN 16
+#define JPU_WIDTH_MAX 4096
+#define JPU_HEIGHT_MAX 4096
+#define JPU_MEMALIGN 8
+
+/* Flags that indicate a format can be used for capture/output */
+#define JPU_FMT_TYPE_OUTPUT 0
+#define JPU_FMT_TYPE_CAPTURE 1
+#define JPU_ENC_CAPTURE (1 << 0)
+#define JPU_ENC_OUTPUT (1 << 1)
+#define JPU_DEC_CAPTURE (1 << 2)
+#define JPU_DEC_OUTPUT (1 << 3)
+
+/*
+ * JPEG registers and bits
+ */
+
+/* JPEG code mode register */
+#define JCMOD 0x00
+#define JCMOD_PCTR (1 << 7)
+#define JCMOD_MSKIP_ENABLE (1 << 5)
+#define JCMOD_DSP_ENC (0 << 3)
+#define JCMOD_DSP_DEC (1 << 3)
+#define JCMOD_REDU (7 << 0)
+#define JCMOD_REDU_422 (1 << 0)
+#define JCMOD_REDU_420 (2 << 0)
+
+/* JPEG code command register */
+#define JCCMD 0x04
+#define JCCMD_SRST (1 << 12)
+#define JCCMD_JEND (1 << 2)
+#define JCCMD_JSRT (1 << 0)
+
+/* JPEG code quantanization table number register */
+#define JCQTN 0x0c
+#define JCQTN_SHIFT(t) (((t) - 1) << 1)
+
+/* JPEG code Huffman table number register */
+#define JCHTN 0x10
+#define JCHTN_AC_SHIFT(t) (((t) << 1) - 1)
+#define JCHTN_DC_SHIFT(t) (((t) - 1) << 1)
+
+#define JCVSZU 0x1c /* JPEG code vertical size upper register */
+#define JCVSZD 0x20 /* JPEG code vertical size lower register */
+#define JCHSZU 0x24 /* JPEG code horizontal size upper register */
+#define JCHSZD 0x28 /* JPEG code horizontal size lower register */
+#define JCSZ_MASK 0xff /* JPEG code h/v size register contains only 1 byte*/
+
+#define JCDTCU 0x2c /* JPEG code data count upper register */
+#define JCDTCM 0x30 /* JPEG code data count middle register */
+#define JCDTCD 0x34 /* JPEG code data count lower register */
+
+/* JPEG interrupt enable register */
+#define JINTE 0x38
+#define JINTE_ERR (7 << 5) /* INT5 + INT6 + INT7 */
+#define JINTE_TRANSF_COMPL (1 << 10)
+
+/* JPEG interrupt status register */
+#define JINTS 0x3c
+#define JINTS_MASK 0x7c68
+#define JINTS_ERR (1 << 5)
+#define JINTS_PROCESS_COMPL (1 << 6)
+#define JINTS_TRANSF_COMPL (1 << 10)
+
+#define JCDERR 0x40 /* JPEG code decode error register */
+#define JCDERR_MASK 0xf /* JPEG code decode error register mask*/
+
+/* JPEG interface encoding */
+#define JIFECNT 0x70
+#define JIFECNT_INFT_422 0
+#define JIFECNT_INFT_420 1
+#define JIFECNT_SWAP_WB (3 << 4) /* to JPU */
+
+#define JIFESYA1 0x74 /* encode source Y address register 1 */
+#define JIFESCA1 0x78 /* encode source C address register 1 */
+#define JIFESYA2 0x7c /* encode source Y address register 2 */
+#define JIFESCA2 0x80 /* encode source C address register 2 */
+#define JIFESMW 0x84 /* encode source memory width register */
+#define JIFESVSZ 0x88 /* encode source vertical size register */
+#define JIFESHSZ 0x8c /* encode source horizontal size register */
+#define JIFEDA1 0x90 /* encode destination address register 1 */
+#define JIFEDA2 0x94 /* encode destination address register 2 */
+
+/* JPEG decoding control register */
+#define JIFDCNT 0xa0
+#define JIFDCNT_SWAP_WB (3 << 1) /* from JPU */
+
+#define JIFDSA1 0xa4 /* decode source address register 1 */
+#define JIFDDMW 0xb0 /* decode destination memory width register */
+#define JIFDDVSZ 0xb4 /* decode destination vert. size register */
+#define JIFDDHSZ 0xb8 /* decode destination horiz. size register */
+#define JIFDDYA1 0xbc /* decode destination Y address register 1 */
+#define JIFDDCA1 0xc0 /* decode destination C address register 1 */
+
+#define JCQTBL(n) (0x10000 + (n) * 0x40) /* quantization tables regs */
+#define JCHTBD(n) (0x10100 + (n) * 0x100) /* Huffman table DC regs */
+#define JCHTBA(n) (0x10120 + (n) * 0x100) /* Huffman table AC regs */
+
+/**
+ * struct jpu - JPEG IP abstraction
+ * @mutex: the mutex protecting this structure
+ * @lock: spinlock protecting the device contexts
+ * @v4l2_dev: v4l2 device for mem2mem mode
+ * @vfd_encoder: video device node for encoder mem2mem mode
+ * @vfd_decoder: video device node for decoder mem2mem mode
+ * @m2m_dev: v4l2 mem2mem device data
+ * @curr: pointer to current context
+ * @regs: JPEG IP registers mapping
+ * @irq: JPEG IP irq
+ * @clk: JPEG IP clock
+ * @dev: JPEG IP struct device
+ * @ref_count: reference counter
+ */
+struct jpu {
+ struct mutex mutex;
+ spinlock_t lock;
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd_encoder;
+ struct video_device vfd_decoder;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct jpu_ctx *curr;
+
+ void __iomem *regs;
+ unsigned int irq;
+ struct clk *clk;
+ struct device *dev;
+ int ref_count;
+};
+
+/**
+ * struct jpu_buffer - driver's specific video buffer
+ * @buf: m2m buffer
+ * @compr_quality: destination image quality in compression mode
+ * @subsampling: source image subsampling in decompression mode
+ */
+struct jpu_buffer {
+ struct v4l2_m2m_buffer buf;
+ unsigned short compr_quality;
+ unsigned char subsampling;
+};
+
+/**
+ * struct jpu_fmt - driver's internal format data
+ * @fourcc: the fourcc code, 0 if not applicable
+ * @colorspace: the colorspace specifier
+ * @bpp: number of bits per pixel per plane
+ * @h_align: horizontal alignment order (align to 2^h_align)
+ * @v_align: vertical alignment order (align to 2^v_align)
+ * @subsampling: (horizontal:4 | vertical:4) subsampling factor
+ * @num_planes: number of planes
+ * @types: types of queue this format is applicable to
+ */
+struct jpu_fmt {
+ u32 fourcc;
+ u32 colorspace;
+ u8 bpp[2];
+ u8 h_align;
+ u8 v_align;
+ u8 subsampling;
+ u8 num_planes;
+ u16 types;
+};
+
+/**
+ * struct jpu_q_data - parameters of one queue
+ * @fmtinfo: driver-specific format of this queue
+ * @format: multiplanar format of this queue
+ * @sequence: sequence number
+ */
+struct jpu_q_data {
+ struct jpu_fmt *fmtinfo;
+ struct v4l2_pix_format_mplane format;
+ unsigned int sequence;
+};
+
+/**
+ * struct jpu_ctx - the device context data
+ * @jpu: JPEG IP device for this context
+ * @encoder: compression (encode) operation or decompression (decode)
+ * @compr_quality: destination image quality in compression (encode) mode
+ * @out_q: source (output) queue information
+ * @cap_q: destination (capture) queue information
+ * @fh: file handler
+ * @ctrl_handler: controls handler
+ */
+struct jpu_ctx {
+ struct jpu *jpu;
+ bool encoder;
+ unsigned short compr_quality;
+ struct jpu_q_data out_q;
+ struct jpu_q_data cap_q;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+ /**
+ * jpeg_buffer - description of memory containing input JPEG data
+ * @end: end position in the buffer
+ * @curr: current position in the buffer
+ */
+struct jpeg_buffer {
+ void *end;
+ void *curr;
+};
+
+static struct jpu_fmt jpu_formats[] = {
+ { V4L2_PIX_FMT_JPEG, V4L2_COLORSPACE_JPEG,
+ {0, 0}, 0, 0, 0, 1, JPU_ENC_CAPTURE | JPU_DEC_OUTPUT },
+ { V4L2_PIX_FMT_NV16M, V4L2_COLORSPACE_SRGB,
+ {8, 8}, 2, 2, JPU_JPEG_422, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+ { V4L2_PIX_FMT_NV12M, V4L2_COLORSPACE_SRGB,
+ {8, 4}, 2, 2, JPU_JPEG_420, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+ { V4L2_PIX_FMT_NV16, V4L2_COLORSPACE_SRGB,
+ {16, 0}, 2, 2, JPU_JPEG_422, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+ { V4L2_PIX_FMT_NV12, V4L2_COLORSPACE_SRGB,
+ {12, 0}, 2, 2, JPU_JPEG_420, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+};
+
+static const u8 zigzag[] = {
+ 0x03, 0x02, 0x0b, 0x13, 0x0a, 0x01, 0x00, 0x09,
+ 0x12, 0x1b, 0x23, 0x1a, 0x11, 0x08, 0x07, 0x06,
+ 0x0f, 0x10, 0x19, 0x22, 0x2b, 0x33, 0x2a, 0x21,
+ 0x18, 0x17, 0x0e, 0x05, 0x04, 0x0d, 0x16, 0x1f,
+ 0x20, 0x29, 0x32, 0x3b, 0x3a, 0x31, 0x28, 0x27,
+ 0x1e, 0x15, 0x0e, 0x14, 0x10, 0x26, 0x2f, 0x30,
+ 0x39, 0x38, 0x37, 0x2e, 0x25, 0x1c, 0x24, 0x2b,
+ 0x36, 0x3f, 0x3e, 0x35, 0x2c, 0x34, 0x3d, 0x3c
+};
+
+#define QTBL_SIZE (ALIGN(JPU_JPEG_QTBL_SIZE, \
+ sizeof(unsigned int)) / sizeof(unsigned int))
+#define HDCTBL_SIZE (ALIGN(JPU_JPEG_HDCTBL_SIZE, \
+ sizeof(unsigned int)) / sizeof(unsigned int))
+#define HACTBL_SIZE (ALIGN(JPU_JPEG_HACTBL_SIZE, \
+ sizeof(unsigned int)) / sizeof(unsigned int))
+/*
+ * Start of image; Quantization tables
+ * SOF0 (17 bytes payload) is Baseline DCT - Sample precision, height, width,
+ * Number of image components, (Ci:8 - Hi:4 - Vi:4 - Tq:8) * 3 - Y,Cb,Cr;
+ * Huffman tables; Padding with 0xff (33.3.27 R01UH0501EJ0100 Rev.1.00)
+ */
+#define JPU_JPEG_HDR_BLOB { \
+ 0xff, SOI, 0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_LUM, \
+ [JPU_JPEG_QTBL_LUM_OFFSET ... \
+ JPU_JPEG_QTBL_LUM_OFFSET + JPU_JPEG_QTBL_SIZE - 1] = 0x00, \
+ 0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_CHR, \
+ [JPU_JPEG_QTBL_CHR_OFFSET ... JPU_JPEG_QTBL_CHR_OFFSET + \
+ JPU_JPEG_QTBL_SIZE - 1] = 0x00, 0xff, SOF0, 0x00, 0x11, 0x08, \
+ [JPU_JPEG_HEIGHT_OFFSET ... JPU_JPEG_HEIGHT_OFFSET + 1] = 0x00, \
+ [JPU_JPEG_WIDTH_OFFSET ... JPU_JPEG_WIDTH_OFFSET + 1] = 0x00, \
+ 0x03, 0x01, [JPU_JPEG_SUBS_OFFSET] = 0x00, JPU_JPEG_LUM, \
+ 0x02, 0x11, JPU_JPEG_CHR, 0x03, 0x11, JPU_JPEG_CHR, \
+ 0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_DC, \
+ [JPU_JPEG_HDCTBL_LUM_OFFSET ... \
+ JPU_JPEG_HDCTBL_LUM_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
+ 0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_AC, \
+ [JPU_JPEG_HACTBL_LUM_OFFSET ... \
+ JPU_JPEG_HACTBL_LUM_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
+ 0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_DC, \
+ [JPU_JPEG_HDCTBL_CHR_OFFSET ... \
+ JPU_JPEG_HDCTBL_CHR_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
+ 0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_AC, \
+ [JPU_JPEG_HACTBL_CHR_OFFSET ... \
+ JPU_JPEG_HACTBL_CHR_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
+ [JPU_JPEG_PADDING_OFFSET ... JPU_JPEG_HDR_SIZE - 1] = 0xff \
+}
+
+static unsigned char jpeg_hdrs[JPU_MAX_QUALITY][JPU_JPEG_HDR_SIZE] = {
+ [0 ... JPU_MAX_QUALITY - 1] = JPU_JPEG_HDR_BLOB
+};
+
+static const unsigned int qtbl_lum[JPU_MAX_QUALITY][QTBL_SIZE] = {
+ {
+ 0x14101927, 0x322e3e44, 0x10121726, 0x26354144,
+ 0x19171f26, 0x35414444, 0x27262635, 0x41444444,
+ 0x32263541, 0x44444444, 0x2e354144, 0x44444444,
+ 0x3e414444, 0x44444444, 0x44444444, 0x44444444
+ },
+ {
+ 0x100b0b10, 0x171b1f1e, 0x0b0c0c0f, 0x1417171e,
+ 0x0b0c0d10, 0x171a232f, 0x100f1017, 0x1a252f40,
+ 0x1714171a, 0x27334040, 0x1b171a25, 0x33404040,
+ 0x1f17232f, 0x40404040, 0x1e1e2f40, 0x40404040
+ },
+ {
+ 0x0c08080c, 0x11151817, 0x0809090b, 0x0f131217,
+ 0x08090a0c, 0x13141b24, 0x0c0b0c15, 0x141c2435,
+ 0x110f1314, 0x1e27333b, 0x1513141c, 0x27333b3b,
+ 0x18121b24, 0x333b3b3b, 0x17172435, 0x3b3b3b3b
+ },
+ {
+ 0x08060608, 0x0c0e1011, 0x06060608, 0x0a0d0c0f,
+ 0x06060708, 0x0d0e1218, 0x0808080e, 0x0d131823,
+ 0x0c0a0d0d, 0x141a2227, 0x0e0d0e13, 0x1a222727,
+ 0x100c1318, 0x22272727, 0x110f1823, 0x27272727
+ }
+};
+
+static const unsigned int qtbl_chr[JPU_MAX_QUALITY][QTBL_SIZE] = {
+ {
+ 0x15192026, 0x36444444, 0x191c1826, 0x36444444,
+ 0x2018202b, 0x42444444, 0x26262b35, 0x44444444,
+ 0x36424444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444
+ },
+ {
+ 0x110f1115, 0x141a2630, 0x0f131211, 0x141a232b,
+ 0x11121416, 0x1a1e2e35, 0x1511161c, 0x1e273540,
+ 0x14141a1e, 0x27304040, 0x1a1a1e27, 0x303f4040,
+ 0x26232e35, 0x40404040, 0x302b3540, 0x40404040
+ },
+ {
+ 0x0d0b0d10, 0x14141d25, 0x0b0e0e0e, 0x10141a20,
+ 0x0d0e0f11, 0x14172328, 0x100e1115, 0x171e2832,
+ 0x14101417, 0x1e25323b, 0x1414171e, 0x25303b3b,
+ 0x1d1a2328, 0x323b3b3b, 0x25202832, 0x3b3b3b3b
+ },
+ {
+ 0x0908090b, 0x0e111318, 0x080a090b, 0x0e0d1116,
+ 0x09090d0e, 0x0d0f171a, 0x0b0b0e0e, 0x0f141a21,
+ 0x0e0e0d0f, 0x14182127, 0x110d0f14, 0x18202727,
+ 0x1311171a, 0x21272727, 0x18161a21, 0x27272727
+ }
+};
+
+static const unsigned int hdctbl_lum[HDCTBL_SIZE] = {
+ 0x00010501, 0x01010101, 0x01000000, 0x00000000,
+ 0x00010203, 0x04050607, 0x08090a0b
+};
+
+static const unsigned int hdctbl_chr[HDCTBL_SIZE] = {
+ 0x00010501, 0x01010101, 0x01000000, 0x00000000,
+ 0x00010203, 0x04050607, 0x08090a0b
+};
+
+static const unsigned int hactbl_lum[HACTBL_SIZE] = {
+ 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
+ 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
+ 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
+ 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
+ 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
+ 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
+ 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
+ 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
+};
+
+static const unsigned int hactbl_chr[HACTBL_SIZE] = {
+ 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
+ 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
+ 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
+ 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
+ 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
+ 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
+ 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
+ 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
+};
+
+static const char *error_to_text[16] = {
+ "Normal",
+ "SOI not detected",
+ "SOF1 to SOFF detected",
+ "Subsampling not detected",
+ "SOF accuracy error",
+ "DQT accuracy error",
+ "Component error 1",
+ "Component error 2",
+ "SOF0, DQT, and DHT not detected when SOS detected",
+ "SOS not detected",
+ "EOI not detected",
+ "Restart interval data number error detected",
+ "Image size error",
+ "Last MCU data number error",
+ "Block data number error",
+ "Unknown"
+};
+
+static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_v4l2_buffer *vb)
+{
+ struct v4l2_m2m_buffer *b =
+ container_of(vb, struct v4l2_m2m_buffer, vb);
+
+ return container_of(b, struct jpu_buffer, buf);
+}
+
+static u32 jpu_read(struct jpu *jpu, unsigned int reg)
+{
+ return ioread32(jpu->regs + reg);
+}
+
+static void jpu_write(struct jpu *jpu, u32 val, unsigned int reg)
+{
+ iowrite32(val, jpu->regs + reg);
+}
+
+static struct jpu_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
+{
+ return container_of(c->handler, struct jpu_ctx, ctrl_handler);
+}
+
+static struct jpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct jpu_ctx, fh);
+}
+
+static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl,
+ unsigned int len) {
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ jpu_write(jpu, tbl[i], reg + (i << 2));
+}
+
+static void jpu_set_qtbl(struct jpu *jpu, unsigned short quality)
+{
+ jpu_set_tbl(jpu, JCQTBL(0), qtbl_lum[quality], QTBL_SIZE);
+ jpu_set_tbl(jpu, JCQTBL(1), qtbl_chr[quality], QTBL_SIZE);
+}
+
+static void jpu_set_htbl(struct jpu *jpu)
+{
+ jpu_set_tbl(jpu, JCHTBD(0), hdctbl_lum, HDCTBL_SIZE);
+ jpu_set_tbl(jpu, JCHTBA(0), hactbl_lum, HACTBL_SIZE);
+ jpu_set_tbl(jpu, JCHTBD(1), hdctbl_chr, HDCTBL_SIZE);
+ jpu_set_tbl(jpu, JCHTBA(1), hactbl_chr, HACTBL_SIZE);
+}
+
+static int jpu_wait_reset(struct jpu *jpu)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(JPU_RESET_TIMEOUT);
+
+ while (jpu_read(jpu, JCCMD) & JCCMD_SRST) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(jpu->dev, "timed out in reset\n");
+ return -ETIMEDOUT;
+ }
+ schedule();
+ }
+
+ return 0;
+}
+
+static int jpu_reset(struct jpu *jpu)
+{
+ jpu_write(jpu, JCCMD_SRST, JCCMD);
+ return jpu_wait_reset(jpu);
+}
+
+/*
+ * ============================================================================
+ * video ioctl operations
+ * ============================================================================
+ */
+static void put_qtbl(u8 *p, const u8 *qtbl)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(zigzag); i++)
+ p[i] = *(qtbl + zigzag[i]);
+}
+
+static void put_htbl(u8 *p, const u8 *htbl, unsigned int len)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < len; i += 4)
+ for (j = 0; j < 4 && (i + j) < len; ++j)
+ p[i + j] = htbl[i + 3 - j];
+}
+
+static void jpu_generate_hdr(unsigned short quality, unsigned char *p)
+{
+ put_qtbl(p + JPU_JPEG_QTBL_LUM_OFFSET, (const u8 *)qtbl_lum[quality]);
+ put_qtbl(p + JPU_JPEG_QTBL_CHR_OFFSET, (const u8 *)qtbl_chr[quality]);
+
+ put_htbl(p + JPU_JPEG_HDCTBL_LUM_OFFSET, (const u8 *)hdctbl_lum,
+ JPU_JPEG_HDCTBL_SIZE);
+ put_htbl(p + JPU_JPEG_HACTBL_LUM_OFFSET, (const u8 *)hactbl_lum,
+ JPU_JPEG_HACTBL_SIZE);
+
+ put_htbl(p + JPU_JPEG_HDCTBL_CHR_OFFSET, (const u8 *)hdctbl_chr,
+ JPU_JPEG_HDCTBL_SIZE);
+ put_htbl(p + JPU_JPEG_HACTBL_CHR_OFFSET, (const u8 *)hactbl_chr,
+ JPU_JPEG_HACTBL_SIZE);
+}
+
+static int get_byte(struct jpeg_buffer *buf)
+{
+ if (buf->curr >= buf->end)
+ return -1;
+
+ return *(u8 *)buf->curr++;
+}
+
+static int get_word_be(struct jpeg_buffer *buf, unsigned int *word)
+{
+ if (buf->end - buf->curr < 2)
+ return -1;
+
+ *word = get_unaligned_be16(buf->curr);
+ buf->curr += 2;
+
+ return 0;
+}
+
+static void skip(struct jpeg_buffer *buf, unsigned long len)
+{
+ buf->curr += min((unsigned long)(buf->end - buf->curr), len);
+}
+
+static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
+ unsigned int *height)
+{
+ struct jpeg_buffer jpeg_buffer;
+ unsigned int word;
+ bool soi = false;
+
+ jpeg_buffer.end = buffer + size;
+ jpeg_buffer.curr = buffer;
+
+ /*
+ * basic size check and EOI - we don't want to let JPU cross
+ * buffer bounds in any case. Hope it's stopping by EOI.
+ */
+ if (size < JPU_JPEG_MIN_SIZE || *(u8 *)(buffer + size - 1) != EOI)
+ return 0;
+
+ for (;;) {
+ int c;
+
+ /* skip preceding filler bytes */
+ do
+ c = get_byte(&jpeg_buffer);
+ while (c == 0xff || c == 0);
+
+ if (!soi && c == SOI) {
+ soi = true;
+ continue;
+ } else if (soi != (c != SOI))
+ return 0;
+
+ switch (c) {
+ case SOF0: /* SOF0: baseline JPEG */
+ skip(&jpeg_buffer, 3); /* segment length and bpp */
+ if (get_word_be(&jpeg_buffer, height) ||
+ get_word_be(&jpeg_buffer, width) ||
+ get_byte(&jpeg_buffer) != 3) /* YCbCr only */
+ return 0;
+
+ skip(&jpeg_buffer, 1);
+ return get_byte(&jpeg_buffer);
+ case DHT:
+ case DQT:
+ case COM:
+ case DRI:
+ case APP0 ... APP0 + 0x0f:
+ if (get_word_be(&jpeg_buffer, &word))
+ return 0;
+ skip(&jpeg_buffer, (long)word - 2);
+ case 0:
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int jpu_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->encoder)
+ strlcpy(cap->card, DRV_NAME " encoder", sizeof(cap->card));
+ else
+ strlcpy(cap->card, DRV_NAME " decoder", sizeof(cap->card));
+
+ strlcpy(cap->driver, DRV_NAME, sizeof(cap->driver));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(ctx->jpu->dev));
+ cap->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | cap->device_caps;
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+
+ return 0;
+}
+
+static struct jpu_fmt *jpu_find_format(bool encoder, u32 pixelformat,
+ unsigned int fmt_type)
+{
+ unsigned int i, fmt_flag;
+
+ if (encoder)
+ fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_ENC_OUTPUT :
+ JPU_ENC_CAPTURE;
+ else
+ fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_DEC_OUTPUT :
+ JPU_DEC_CAPTURE;
+
+ for (i = 0; i < ARRAY_SIZE(jpu_formats); i++) {
+ struct jpu_fmt *fmt = &jpu_formats[i];
+
+ if (fmt->fourcc == pixelformat && fmt->types & fmt_flag)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static int jpu_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ unsigned int i, num = 0;
+
+ for (i = 0; i < ARRAY_SIZE(jpu_formats); ++i) {
+ if (jpu_formats[i].types & type) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(jpu_formats))
+ return -EINVAL;
+
+ f->pixelformat = jpu_formats[i].fourcc;
+
+ return 0;
+}
+
+static int jpu_enum_fmt_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_CAPTURE :
+ JPU_DEC_CAPTURE);
+}
+
+static int jpu_enum_fmt_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_OUTPUT : JPU_DEC_OUTPUT);
+}
+
+static struct jpu_q_data *jpu_get_q_data(struct jpu_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->out_q;
+ else
+ return &ctx->cap_q;
+}
+
+static void jpu_bound_align_image(u32 *w, unsigned int w_min,
+ unsigned int w_max, unsigned int w_align,
+ u32 *h, unsigned int h_min,
+ unsigned int h_max, unsigned int h_align)
+{
+ unsigned int width, height, w_step, h_step;
+
+ width = *w;
+ height = *h;
+
+ w_step = 1U << w_align;
+ h_step = 1U << h_align;
+ v4l_bound_align_image(w, w_min, w_max, w_align, h, h_min, h_max,
+ h_align, 3);
+
+ if (*w < width && *w + w_step < w_max)
+ *w += w_step;
+ if (*h < height && *h + h_step < h_max)
+ *h += h_step;
+}
+
+static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix,
+ enum v4l2_buf_type type)
+{
+ struct jpu_fmt *fmt;
+ unsigned int f_type, w, h;
+
+ f_type = V4L2_TYPE_IS_OUTPUT(type) ? JPU_FMT_TYPE_OUTPUT :
+ JPU_FMT_TYPE_CAPTURE;
+
+ fmt = jpu_find_format(ctx->encoder, pix->pixelformat, f_type);
+ if (!fmt) {
+ unsigned int pixelformat;
+
+ dev_dbg(ctx->jpu->dev, "unknown format; set default format\n");
+ if (ctx->encoder)
+ pixelformat = f_type == JPU_FMT_TYPE_OUTPUT ?
+ V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
+ else
+ pixelformat = f_type == JPU_FMT_TYPE_CAPTURE ?
+ V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
+ fmt = jpu_find_format(ctx->encoder, pixelformat, f_type);
+ }
+
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = fmt->colorspace;
+ pix->field = V4L2_FIELD_NONE;
+ pix->num_planes = fmt->num_planes;
+ memset(pix->reserved, 0, sizeof(pix->reserved));
+
+ jpu_bound_align_image(&pix->width, JPU_WIDTH_MIN, JPU_WIDTH_MAX,
+ fmt->h_align, &pix->height, JPU_HEIGHT_MIN,
+ JPU_HEIGHT_MAX, fmt->v_align);
+
+ w = pix->width;
+ h = pix->height;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
+ /* ignore userspaces's sizeimage for encoding */
+ if (pix->plane_fmt[0].sizeimage <= 0 || ctx->encoder)
+ pix->plane_fmt[0].sizeimage = JPU_JPEG_HDR_SIZE +
+ (JPU_JPEG_MAX_BYTES_PER_PIXEL * w * h);
+ pix->plane_fmt[0].bytesperline = 0;
+ memset(pix->plane_fmt[0].reserved, 0,
+ sizeof(pix->plane_fmt[0].reserved));
+ } else {
+ unsigned int i, bpl = 0;
+
+ for (i = 0; i < pix->num_planes; ++i)
+ bpl = max(bpl, pix->plane_fmt[i].bytesperline);
+
+ bpl = clamp_t(unsigned int, bpl, w, JPU_WIDTH_MAX);
+ bpl = round_up(bpl, JPU_MEMALIGN);
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ pix->plane_fmt[i].bytesperline = bpl;
+ pix->plane_fmt[i].sizeimage = bpl * h * fmt->bpp[i] / 8;
+ memset(pix->plane_fmt[i].reserved, 0,
+ sizeof(pix->plane_fmt[i].reserved));
+ }
+ }
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ return 0;
+}
+
+static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ return __jpu_try_fmt(ctx, NULL, &f->fmt.pix_mp, f->type);
+}
+
+static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct jpu_fmt *fmtinfo;
+ struct jpu_q_data *q_data;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->jpu->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = __jpu_try_fmt(ctx, &fmtinfo, &f->fmt.pix_mp, f->type);
+ if (ret < 0)
+ return ret;
+
+ q_data = jpu_get_q_data(ctx, f->type);
+
+ q_data->format = f->fmt.pix_mp;
+ q_data->fmtinfo = fmtinfo;
+
+ return 0;
+}
+
+static int jpu_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct jpu_q_data *q_data;
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ q_data = jpu_get_q_data(ctx, f->type);
+ f->fmt.pix_mp = q_data->format;
+
+ return 0;
+}
+
+/*
+ * V4L2 controls
+ */
+static int jpu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct jpu_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+ if (ctrl->id == V4L2_CID_JPEG_COMPRESSION_QUALITY)
+ ctx->compr_quality = ctrl->val;
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops jpu_ctrl_ops = {
+ .s_ctrl = jpu_s_ctrl,
+};
+
+static int jpu_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_q_data *src_q_data, *dst_q_data, *orig, adj, *ref;
+ enum v4l2_buf_type adj_type;
+
+ src_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ dst_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ if (ctx->encoder) {
+ adj = *src_q_data;
+ orig = src_q_data;
+ ref = dst_q_data;
+ adj_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ } else {
+ adj = *dst_q_data;
+ orig = dst_q_data;
+ ref = src_q_data;
+ adj_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ }
+
+ adj.format.width = ref->format.width;
+ adj.format.height = ref->format.height;
+
+ __jpu_try_fmt(ctx, NULL, &adj.format, adj_type);
+
+ if (adj.format.width != orig->format.width ||
+ adj.format.height != orig->format.height) {
+ dev_err(ctx->jpu->dev, "src and dst formats do not match.\n");
+ /* maybe we can return -EPIPE here? */
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops jpu_ioctl_ops = {
+ .vidioc_querycap = jpu_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = jpu_enum_fmt_cap,
+ .vidioc_enum_fmt_vid_out_mplane = jpu_enum_fmt_out,
+ .vidioc_g_fmt_vid_cap_mplane = jpu_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = jpu_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = jpu_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = jpu_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = jpu_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = jpu_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = jpu_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe
+};
+
+static int jpu_controls_create(struct jpu_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler, &jpu_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ 0, JPU_MAX_QUALITY - 1, 1, 0);
+
+ if (ctx->ctrl_handler.error) {
+ ret = ctx->ctrl_handler.error;
+ goto error_free;
+ }
+
+ if (!ctx->encoder)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY;
+
+ ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (ret < 0)
+ goto error_free;
+
+ return 0;
+
+error_free:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ret;
+}
+
+/*
+ * ============================================================================
+ * Queue operations
+ * ============================================================================
+ */
+static int jpu_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct jpu_q_data *q_data;
+ unsigned int i;
+
+ q_data = jpu_get_q_data(ctx, vq->type);
+
+ if (*nplanes) {
+ if (*nplanes != q_data->format.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ unsigned int q_size = q_data->format.plane_fmt[i].sizeimage;
+
+ if (sizes[i] < q_size)
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ *nplanes = q_data->format.num_planes;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->format.plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static int jpu_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct jpu_q_data *q_data;
+ unsigned int i;
+
+ q_data = jpu_get_q_data(ctx, vb->vb2_queue->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_err(ctx->jpu->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < q_data->format.num_planes; i++) {
+ unsigned long size = q_data->format.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dev_err(ctx->jpu->dev,
+ "%s: data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ /* decoder capture queue */
+ if (!ctx->encoder && !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type))
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void jpu_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
+ struct jpu_q_data *q_data, adjust;
+ void *buffer = vb2_plane_vaddr(vb, 0);
+ unsigned long buf_size = vb2_get_plane_payload(vb, 0);
+ unsigned int width, height;
+
+ u8 subsampling = jpu_parse_hdr(buffer, buf_size, &width,
+ &height);
+
+ /* check if JPEG data basic parsing was successful */
+ if (subsampling != JPU_JPEG_422 && subsampling != JPU_JPEG_420)
+ goto format_error;
+
+ q_data = &ctx->out_q;
+
+ adjust = *q_data;
+ adjust.format.width = width;
+ adjust.format.height = height;
+
+ __jpu_try_fmt(ctx, &adjust.fmtinfo, &adjust.format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ if (adjust.format.width != q_data->format.width ||
+ adjust.format.height != q_data->format.height)
+ goto format_error;
+
+ /*
+ * keep subsampling in buffer to check it
+ * for compatibility in device_run
+ */
+ jpu_buf->subsampling = subsampling;
+ }
+
+ if (ctx->fh.m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+
+ return;
+
+format_error:
+ dev_err(ctx->jpu->dev, "incompatible or corrupted JPEG data\n");
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void jpu_buf_finish(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct jpu_q_data *q_data = &ctx->out_q;
+ enum v4l2_buf_type type = vb->vb2_queue->type;
+ u8 *buffer;
+
+ if (vb->state == VB2_BUF_STATE_DONE)
+ vbuf->sequence = jpu_get_q_data(ctx, type)->sequence++;
+
+ if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE ||
+ V4L2_TYPE_IS_OUTPUT(type))
+ return;
+
+ buffer = vb2_plane_vaddr(vb, 0);
+
+ memcpy(buffer, jpeg_hdrs[jpu_buf->compr_quality], JPU_JPEG_HDR_SIZE);
+ *(__be16 *)(buffer + JPU_JPEG_HEIGHT_OFFSET) =
+ cpu_to_be16(q_data->format.height);
+ *(__be16 *)(buffer + JPU_JPEG_WIDTH_OFFSET) =
+ cpu_to_be16(q_data->format.width);
+ *(buffer + JPU_JPEG_SUBS_OFFSET) = q_data->fmtinfo->subsampling;
+}
+
+static int jpu_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct jpu_q_data *q_data = jpu_get_q_data(ctx, vq->type);
+
+ q_data->sequence = 0;
+ return 0;
+}
+
+static void jpu_stop_streaming(struct vb2_queue *vq)
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vb == NULL)
+ return;
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+ }
+}
+
+static const struct vb2_ops jpu_qops = {
+ .queue_setup = jpu_queue_setup,
+ .buf_prepare = jpu_buf_prepare,
+ .buf_queue = jpu_buf_queue,
+ .buf_finish = jpu_buf_finish,
+ .start_streaming = jpu_start_streaming,
+ .stop_streaming = jpu_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int jpu_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct jpu_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct jpu_buffer);
+ src_vq->ops = &jpu_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->jpu->mutex;
+ src_vq->dev = ctx->jpu->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct jpu_buffer);
+ dst_vq->ops = &jpu_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->jpu->mutex;
+ dst_vq->dev = ctx->jpu->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * ============================================================================
+ * Device file operations
+ * ============================================================================
+ */
+static int jpu_open(struct file *file)
+{
+ struct jpu *jpu = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct jpu_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ v4l2_fh_init(&ctx->fh, vfd);
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->jpu = jpu;
+ ctx->encoder = vfd == &jpu->vfd_encoder;
+
+ __jpu_try_fmt(ctx, &ctx->out_q.fmtinfo, &ctx->out_q.format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ __jpu_try_fmt(ctx, &ctx->cap_q.fmtinfo, &ctx->cap_q.format,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpu->m2m_dev, ctx, jpu_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto v4l_prepare_rollback;
+ }
+
+ ret = jpu_controls_create(ctx);
+ if (ret < 0)
+ goto v4l_prepare_rollback;
+
+ if (mutex_lock_interruptible(&jpu->mutex)) {
+ ret = -ERESTARTSYS;
+ goto v4l_prepare_rollback;
+ }
+
+ if (jpu->ref_count == 0) {
+ ret = clk_prepare_enable(jpu->clk);
+ if (ret < 0)
+ goto device_prepare_rollback;
+ /* ...issue software reset */
+ ret = jpu_reset(jpu);
+ if (ret)
+ goto jpu_reset_rollback;
+ }
+
+ jpu->ref_count++;
+
+ mutex_unlock(&jpu->mutex);
+ return 0;
+
+jpu_reset_rollback:
+ clk_disable_unprepare(jpu->clk);
+device_prepare_rollback:
+ mutex_unlock(&jpu->mutex);
+v4l_prepare_rollback:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int jpu_release(struct file *file)
+{
+ struct jpu *jpu = video_drvdata(file);
+ struct jpu_ctx *ctx = fh_to_ctx(file->private_data);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ mutex_lock(&jpu->mutex);
+ if (--jpu->ref_count == 0)
+ clk_disable_unprepare(jpu->clk);
+ mutex_unlock(&jpu->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations jpu_fops = {
+ .owner = THIS_MODULE,
+ .open = jpu_open,
+ .release = jpu_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+/*
+ * ============================================================================
+ * mem2mem callbacks
+ * ============================================================================
+ */
+static void jpu_cleanup(struct jpu_ctx *ctx, bool reset)
+{
+ /* remove current buffers and finish job */
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+
+ /* ...and give it a chance on next run */
+ if (reset)
+ jpu_write(ctx->jpu, JCCMD_SRST, JCCMD);
+
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+
+ v4l2_m2m_job_finish(ctx->jpu->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void jpu_device_run(void *priv)
+{
+ struct jpu_ctx *ctx = priv;
+ struct jpu *jpu = ctx->jpu;
+ struct jpu_buffer *jpu_buf;
+ struct jpu_q_data *q_data;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ unsigned int w, h, bpl;
+ unsigned char num_planes, subsampling;
+ unsigned long flags;
+
+ /* ...wait until module reset completes; we have mutex locked here */
+ if (jpu_wait_reset(jpu)) {
+ jpu_cleanup(ctx, true);
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+
+ jpu->curr = ctx;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ if (ctx->encoder) {
+ jpu_buf = vb2_to_jpu_buffer(dst_buf);
+ q_data = &ctx->out_q;
+ } else {
+ jpu_buf = vb2_to_jpu_buffer(src_buf);
+ q_data = &ctx->cap_q;
+ }
+
+ w = q_data->format.width;
+ h = q_data->format.height;
+ bpl = q_data->format.plane_fmt[0].bytesperline;
+ num_planes = q_data->fmtinfo->num_planes;
+ subsampling = q_data->fmtinfo->subsampling;
+
+ if (ctx->encoder) {
+ unsigned long src_1_addr, src_2_addr, dst_addr;
+ unsigned int redu, inft;
+
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ src_1_addr =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ if (num_planes > 1)
+ src_2_addr = vb2_dma_contig_plane_dma_addr(
+ &src_buf->vb2_buf, 1);
+ else
+ src_2_addr = src_1_addr + w * h;
+
+ jpu_buf->compr_quality = ctx->compr_quality;
+
+ if (subsampling == JPU_JPEG_420) {
+ redu = JCMOD_REDU_420;
+ inft = JIFECNT_INFT_420;
+ } else {
+ redu = JCMOD_REDU_422;
+ inft = JIFECNT_INFT_422;
+ }
+
+ /* only no marker mode works for encoding */
+ jpu_write(jpu, JCMOD_DSP_ENC | JCMOD_PCTR | redu |
+ JCMOD_MSKIP_ENABLE, JCMOD);
+
+ jpu_write(jpu, JIFECNT_SWAP_WB | inft, JIFECNT);
+ jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
+ jpu_write(jpu, JINTE_TRANSF_COMPL, JINTE);
+
+ /* Y and C components source addresses */
+ jpu_write(jpu, src_1_addr, JIFESYA1);
+ jpu_write(jpu, src_2_addr, JIFESCA1);
+
+ /* memory width */
+ jpu_write(jpu, bpl, JIFESMW);
+
+ jpu_write(jpu, (w >> 8) & JCSZ_MASK, JCHSZU);
+ jpu_write(jpu, w & JCSZ_MASK, JCHSZD);
+
+ jpu_write(jpu, (h >> 8) & JCSZ_MASK, JCVSZU);
+ jpu_write(jpu, h & JCSZ_MASK, JCVSZD);
+
+ jpu_write(jpu, w, JIFESHSZ);
+ jpu_write(jpu, h, JIFESVSZ);
+
+ jpu_write(jpu, dst_addr + JPU_JPEG_HDR_SIZE, JIFEDA1);
+
+ jpu_write(jpu, 0 << JCQTN_SHIFT(1) | 1 << JCQTN_SHIFT(2) |
+ 1 << JCQTN_SHIFT(3), JCQTN);
+
+ jpu_write(jpu, 0 << JCHTN_AC_SHIFT(1) | 0 << JCHTN_DC_SHIFT(1) |
+ 1 << JCHTN_AC_SHIFT(2) | 1 << JCHTN_DC_SHIFT(2) |
+ 1 << JCHTN_AC_SHIFT(3) | 1 << JCHTN_DC_SHIFT(3),
+ JCHTN);
+
+ jpu_set_qtbl(jpu, ctx->compr_quality);
+ jpu_set_htbl(jpu);
+ } else {
+ unsigned long src_addr, dst_1_addr, dst_2_addr;
+
+ if (jpu_buf->subsampling != subsampling) {
+ dev_err(ctx->jpu->dev,
+ "src and dst formats do not match.\n");
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+ jpu_cleanup(ctx, false);
+ return;
+ }
+
+ src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ dst_1_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ if (q_data->fmtinfo->num_planes > 1)
+ dst_2_addr = vb2_dma_contig_plane_dma_addr(
+ &dst_buf->vb2_buf, 1);
+ else
+ dst_2_addr = dst_1_addr + w * h;
+
+ /* ...set up decoder operation */
+ jpu_write(jpu, JCMOD_DSP_DEC | JCMOD_PCTR, JCMOD);
+ jpu_write(jpu, JIFECNT_SWAP_WB, JIFECNT);
+ jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
+
+ /* ...enable interrupts on transfer completion and d-g error */
+ jpu_write(jpu, JINTE_TRANSF_COMPL | JINTE_ERR, JINTE);
+
+ /* ...set source/destination addresses of encoded data */
+ jpu_write(jpu, src_addr, JIFDSA1);
+ jpu_write(jpu, dst_1_addr, JIFDDYA1);
+ jpu_write(jpu, dst_2_addr, JIFDDCA1);
+
+ jpu_write(jpu, bpl, JIFDDMW);
+ }
+
+ /* ...start encoder/decoder operation */
+ jpu_write(jpu, JCCMD_JSRT, JCCMD);
+
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+}
+
+static const struct v4l2_m2m_ops jpu_m2m_ops = {
+ .device_run = jpu_device_run,
+};
+
+/*
+ * ============================================================================
+ * IRQ handler
+ * ============================================================================
+ */
+static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
+{
+ struct jpu *jpu = dev_id;
+ struct jpu_ctx *curr_ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ unsigned int int_status;
+
+ int_status = jpu_read(jpu, JINTS);
+
+ /* ...spurious interrupt */
+ if (!((JINTS_TRANSF_COMPL | JINTS_PROCESS_COMPL | JINTS_ERR) &
+ int_status))
+ return IRQ_NONE;
+
+ /* ...clear interrupts */
+ jpu_write(jpu, ~(int_status & JINTS_MASK), JINTS);
+ if (int_status & (JINTS_ERR | JINTS_PROCESS_COMPL))
+ jpu_write(jpu, JCCMD_JEND, JCCMD);
+
+ spin_lock(&jpu->lock);
+
+ if ((int_status & JINTS_PROCESS_COMPL) &&
+ !(int_status & JINTS_TRANSF_COMPL))
+ goto handled;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(jpu->m2m_dev);
+ if (!curr_ctx) {
+ /* ...instance is not running */
+ dev_err(jpu->dev, "no active context for m2m\n");
+ goto handled;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ if (int_status & JINTS_TRANSF_COMPL) {
+ if (curr_ctx->encoder) {
+ unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16
+ | jpu_read(jpu, JCDTCM) << 8
+ | jpu_read(jpu, JCDTCD);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ payload_size + JPU_JPEG_HDR_SIZE);
+ }
+
+ dst_buf->field = src_buf->field;
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ if (src_buf->flags & V4L2_BUF_FLAG_TIMECODE)
+ dst_buf->timecode = src_buf->timecode;
+ dst_buf->flags = src_buf->flags &
+ (V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ } else if (int_status & JINTS_ERR) {
+ unsigned char error = jpu_read(jpu, JCDERR) & JCDERR_MASK;
+
+ dev_dbg(jpu->dev, "processing error: %#X: %s\n", error,
+ error_to_text[error]);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ jpu->curr = NULL;
+
+ /* ...reset JPU after completion */
+ jpu_write(jpu, JCCMD_SRST, JCCMD);
+ spin_unlock(&jpu->lock);
+
+ v4l2_m2m_job_finish(jpu->m2m_dev, curr_ctx->fh.m2m_ctx);
+
+ return IRQ_HANDLED;
+
+handled:
+ spin_unlock(&jpu->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * ============================================================================
+ * Driver basic infrastructure
+ * ============================================================================
+ */
+static const struct of_device_id jpu_dt_ids[] = {
+ { .compatible = "renesas,jpu-r8a7790" }, /* H2 */
+ { .compatible = "renesas,jpu-r8a7791" }, /* M2-W */
+ { .compatible = "renesas,jpu-r8a7792" }, /* V2H */
+ { .compatible = "renesas,jpu-r8a7793" }, /* M2-N */
+ { .compatible = "renesas,rcar-gen2-jpu" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, jpu_dt_ids);
+
+static int jpu_probe(struct platform_device *pdev)
+{
+ struct jpu *jpu;
+ struct resource *res;
+ int ret;
+ unsigned int i;
+
+ jpu = devm_kzalloc(&pdev->dev, sizeof(*jpu), GFP_KERNEL);
+ if (!jpu)
+ return -ENOMEM;
+
+ mutex_init(&jpu->mutex);
+ spin_lock_init(&jpu->lock);
+ jpu->dev = &pdev->dev;
+
+ /* memory-mapped registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ jpu->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(jpu->regs))
+ return PTR_ERR(jpu->regs);
+
+ /* interrupt service routine registration */
+ jpu->irq = ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot find IRQ\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, jpu->irq, jpu_irq_handler, 0,
+ dev_name(&pdev->dev), jpu);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpu->irq);
+ return ret;
+ }
+
+ /* clocks */
+ jpu->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(jpu->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(jpu->clk);
+ }
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &jpu->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ return ret;
+ }
+
+ /* mem2mem device */
+ jpu->m2m_dev = v4l2_m2m_init(&jpu_m2m_ops);
+ if (IS_ERR(jpu->m2m_dev)) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(jpu->m2m_dev);
+ goto device_register_rollback;
+ }
+
+ /* fill in qantization and Huffman tables for encoder */
+ for (i = 0; i < JPU_MAX_QUALITY; i++)
+ jpu_generate_hdr(i, (unsigned char *)jpeg_hdrs[i]);
+
+ strlcpy(jpu->vfd_encoder.name, DRV_NAME, sizeof(jpu->vfd_encoder.name));
+ jpu->vfd_encoder.fops = &jpu_fops;
+ jpu->vfd_encoder.ioctl_ops = &jpu_ioctl_ops;
+ jpu->vfd_encoder.minor = -1;
+ jpu->vfd_encoder.release = video_device_release_empty;
+ jpu->vfd_encoder.lock = &jpu->mutex;
+ jpu->vfd_encoder.v4l2_dev = &jpu->v4l2_dev;
+ jpu->vfd_encoder.vfl_dir = VFL_DIR_M2M;
+
+ ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
+ goto m2m_init_rollback;
+ }
+
+ video_set_drvdata(&jpu->vfd_encoder, jpu);
+
+ strlcpy(jpu->vfd_decoder.name, DRV_NAME, sizeof(jpu->vfd_decoder.name));
+ jpu->vfd_decoder.fops = &jpu_fops;
+ jpu->vfd_decoder.ioctl_ops = &jpu_ioctl_ops;
+ jpu->vfd_decoder.minor = -1;
+ jpu->vfd_decoder.release = video_device_release_empty;
+ jpu->vfd_decoder.lock = &jpu->mutex;
+ jpu->vfd_decoder.v4l2_dev = &jpu->v4l2_dev;
+ jpu->vfd_decoder.vfl_dir = VFL_DIR_M2M;
+
+ ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
+ goto enc_vdev_register_rollback;
+ }
+
+ video_set_drvdata(&jpu->vfd_decoder, jpu);
+ platform_set_drvdata(pdev, jpu);
+
+ v4l2_info(&jpu->v4l2_dev, "encoder device registered as /dev/video%d\n",
+ jpu->vfd_encoder.num);
+ v4l2_info(&jpu->v4l2_dev, "decoder device registered as /dev/video%d\n",
+ jpu->vfd_decoder.num);
+
+ return 0;
+
+enc_vdev_register_rollback:
+ video_unregister_device(&jpu->vfd_encoder);
+
+m2m_init_rollback:
+ v4l2_m2m_release(jpu->m2m_dev);
+
+device_register_rollback:
+ v4l2_device_unregister(&jpu->v4l2_dev);
+
+ return ret;
+}
+
+static int jpu_remove(struct platform_device *pdev)
+{
+ struct jpu *jpu = platform_get_drvdata(pdev);
+
+ video_unregister_device(&jpu->vfd_decoder);
+ video_unregister_device(&jpu->vfd_encoder);
+ v4l2_m2m_release(jpu->m2m_dev);
+ v4l2_device_unregister(&jpu->v4l2_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int jpu_suspend(struct device *dev)
+{
+ struct jpu *jpu = dev_get_drvdata(dev);
+
+ if (jpu->ref_count == 0)
+ return 0;
+
+ clk_disable_unprepare(jpu->clk);
+
+ return 0;
+}
+
+static int jpu_resume(struct device *dev)
+{
+ struct jpu *jpu = dev_get_drvdata(dev);
+
+ if (jpu->ref_count == 0)
+ return 0;
+
+ clk_prepare_enable(jpu->clk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops jpu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(jpu_suspend, jpu_resume)
+};
+
+static struct platform_driver jpu_driver = {
+ .probe = jpu_probe,
+ .remove = jpu_remove,
+ .driver = {
+ .of_match_table = jpu_dt_ids,
+ .name = DRV_NAME,
+ .pm = &jpu_pm_ops,
+ },
+};
+
+module_platform_driver(jpu_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Mikhail Ulianov <mikhail.ulyanov@cogentembedded.com>");
+MODULE_DESCRIPTION("Renesas R-Car JPEG processing unit driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
new file mode 100644
index 000000000..ad782901c
--- /dev/null
+++ b/drivers/media/platform/renesas-ceu.c
@@ -0,0 +1,1753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Driver for Renesas Capture Engine Unit (CEU) interface
+ * Copyright (C) 2017-2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ *
+ * Based on soc-camera driver "soc_camera/sh_mobile_ceu_camera.c"
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on V4L2 Driver for PXA camera host - "pxa_camera.c",
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <media/drv-intf/renesas-ceu.h>
+
+#define DRIVER_NAME "renesas-ceu"
+
+/* CEU registers offsets and masks. */
+#define CEU_CAPSR 0x00 /* Capture start register */
+#define CEU_CAPCR 0x04 /* Capture control register */
+#define CEU_CAMCR 0x08 /* Capture interface control register */
+#define CEU_CAMOR 0x10 /* Capture interface offset register */
+#define CEU_CAPWR 0x14 /* Capture interface width register */
+#define CEU_CAIFR 0x18 /* Capture interface input format register */
+#define CEU_CRCNTR 0x28 /* CEU register control register */
+#define CEU_CRCMPR 0x2c /* CEU register forcible control register */
+#define CEU_CFLCR 0x30 /* Capture filter control register */
+#define CEU_CFSZR 0x34 /* Capture filter size clip register */
+#define CEU_CDWDR 0x38 /* Capture destination width register */
+#define CEU_CDAYR 0x3c /* Capture data address Y register */
+#define CEU_CDACR 0x40 /* Capture data address C register */
+#define CEU_CFWCR 0x5c /* Firewall operation control register */
+#define CEU_CDOCR 0x64 /* Capture data output control register */
+#define CEU_CEIER 0x70 /* Capture event interrupt enable register */
+#define CEU_CETCR 0x74 /* Capture event flag clear register */
+#define CEU_CSTSR 0x7c /* Capture status register */
+#define CEU_CSRTR 0x80 /* Capture software reset register */
+
+/* Data synchronous fetch mode. */
+#define CEU_CAMCR_JPEG BIT(4)
+
+/* Input components ordering: CEU_CAMCR.DTARY field. */
+#define CEU_CAMCR_DTARY_8_UYVY (0x00 << 8)
+#define CEU_CAMCR_DTARY_8_VYUY (0x01 << 8)
+#define CEU_CAMCR_DTARY_8_YUYV (0x02 << 8)
+#define CEU_CAMCR_DTARY_8_YVYU (0x03 << 8)
+/* TODO: input components ordering for 16 bits input. */
+
+/* Bus transfer MTU. */
+#define CEU_CAPCR_BUS_WIDTH256 (0x3 << 20)
+
+/* Bus width configuration. */
+#define CEU_CAMCR_DTIF_16BITS BIT(12)
+
+/* No downsampling to planar YUV420 in image fetch mode. */
+#define CEU_CDOCR_NO_DOWSAMPLE BIT(4)
+
+/* Swap all input data in 8-bit, 16-bits and 32-bits units (Figure 46.45). */
+#define CEU_CDOCR_SWAP_ENDIANNESS (7)
+
+/* Capture reset and enable bits. */
+#define CEU_CAPSR_CPKIL BIT(16)
+#define CEU_CAPSR_CE BIT(0)
+
+/* CEU operating flag bit. */
+#define CEU_CAPCR_CTNCP BIT(16)
+#define CEU_CSTRST_CPTON BIT(0)
+
+/* Platform specific IRQ source flags. */
+#define CEU_CETCR_ALL_IRQS_RZ 0x397f313
+#define CEU_CETCR_ALL_IRQS_SH4 0x3d7f313
+
+/* Prohibited register access interrupt bit. */
+#define CEU_CETCR_IGRW BIT(4)
+/* One-frame capture end interrupt. */
+#define CEU_CEIER_CPE BIT(0)
+/* VBP error. */
+#define CEU_CEIER_VBP BIT(20)
+#define CEU_CEIER_MASK (CEU_CEIER_CPE | CEU_CEIER_VBP)
+
+#define CEU_MAX_WIDTH 2560
+#define CEU_MAX_HEIGHT 1920
+#define CEU_MAX_BPL 8188
+#define CEU_W_MAX(w) ((w) < CEU_MAX_WIDTH ? (w) : CEU_MAX_WIDTH)
+#define CEU_H_MAX(h) ((h) < CEU_MAX_HEIGHT ? (h) : CEU_MAX_HEIGHT)
+
+/*
+ * ceu_bus_fmt - describe a 8-bits yuyv format the sensor can produce
+ *
+ * @mbus_code: bus format code
+ * @fmt_order: CEU_CAMCR.DTARY ordering of input components (Y, Cb, Cr)
+ * @fmt_order_swap: swapped CEU_CAMCR.DTARY ordering of input components
+ * (Y, Cr, Cb)
+ * @swapped: does Cr appear before Cb?
+ * @bps: number of bits sent over bus for each sample
+ * @bpp: number of bits per pixels unit
+ */
+struct ceu_mbus_fmt {
+ u32 mbus_code;
+ u32 fmt_order;
+ u32 fmt_order_swap;
+ bool swapped;
+ u8 bps;
+ u8 bpp;
+};
+
+/*
+ * ceu_buffer - Link vb2 buffer to the list of available buffers.
+ */
+struct ceu_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head queue;
+};
+
+static inline struct ceu_buffer *vb2_to_ceu(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct ceu_buffer, vb);
+}
+
+/*
+ * ceu_subdev - Wraps v4l2 sub-device and provides async subdevice.
+ */
+struct ceu_subdev {
+ struct v4l2_subdev *v4l2_sd;
+ struct v4l2_async_subdev asd;
+
+ /* per-subdevice mbus configuration options */
+ unsigned int mbus_flags;
+ struct ceu_mbus_fmt mbus_fmt;
+};
+
+static struct ceu_subdev *to_ceu_subdev(struct v4l2_async_subdev *asd)
+{
+ return container_of(asd, struct ceu_subdev, asd);
+}
+
+/*
+ * ceu_device - CEU device instance
+ */
+struct ceu_device {
+ struct device *dev;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+
+ /* subdevices descriptors */
+ struct ceu_subdev *subdevs;
+ /* the subdevice currently in use */
+ struct ceu_subdev *sd;
+ unsigned int sd_index;
+ unsigned int num_sd;
+
+ /* platform specific mask with all IRQ sources flagged */
+ u32 irq_mask;
+
+ /* currently configured field and pixel format */
+ enum v4l2_field field;
+ struct v4l2_pix_format_mplane v4l2_pix;
+
+ /* async subdev notification helpers */
+ struct v4l2_async_notifier notifier;
+ /* pointers to "struct ceu_subdevice -> asd" */
+ struct v4l2_async_subdev **asds;
+
+ /* vb2 queue, capture buffer list and active buffer pointer */
+ struct vb2_queue vb2_vq;
+ struct list_head capture;
+ struct vb2_v4l2_buffer *active;
+ unsigned int sequence;
+
+ /* mlock - lock access to interface reset and vb2 queue */
+ struct mutex mlock;
+
+ /* lock - lock access to capture buffer queue and active buffer */
+ spinlock_t lock;
+
+ /* base - CEU memory base address */
+ void __iomem *base;
+};
+
+static inline struct ceu_device *v4l2_to_ceu(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct ceu_device, v4l2_dev);
+}
+
+/* --- CEU memory output formats --- */
+
+/*
+ * ceu_fmt - describe a memory output format supported by CEU interface.
+ *
+ * @fourcc: memory layout fourcc format code
+ * @bpp: number of bits for each pixel stored in memory
+ */
+struct ceu_fmt {
+ u32 fourcc;
+ u32 bpp;
+};
+
+/*
+ * ceu_format_list - List of supported memory output formats
+ *
+ * If sensor provides any YUYV bus format, all the following planar memory
+ * formats are available thanks to CEU re-ordering and sub-sampling
+ * capabilities.
+ */
+static const struct ceu_fmt ceu_fmt_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .bpp = 12,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .bpp = 12,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .bpp = 16,
+ },
+};
+
+static const struct ceu_fmt *get_ceu_fmt_from_fourcc(unsigned int fourcc)
+{
+ const struct ceu_fmt *fmt = &ceu_fmt_list[0];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ceu_fmt_list); i++, fmt++)
+ if (fmt->fourcc == fourcc)
+ return fmt;
+
+ return NULL;
+}
+
+static bool ceu_fmt_mplane(struct v4l2_pix_format_mplane *pix)
+{
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ return false;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* --- CEU HW operations --- */
+
+static void ceu_write(struct ceu_device *priv, unsigned int reg_offs, u32 data)
+{
+ iowrite32(data, priv->base + reg_offs);
+}
+
+static u32 ceu_read(struct ceu_device *priv, unsigned int reg_offs)
+{
+ return ioread32(priv->base + reg_offs);
+}
+
+/*
+ * ceu_soft_reset() - Software reset the CEU interface.
+ * @ceu_device: CEU device.
+ *
+ * Returns 0 for success, -EIO for error.
+ */
+static int ceu_soft_reset(struct ceu_device *ceudev)
+{
+ unsigned int i;
+
+ ceu_write(ceudev, CEU_CAPSR, CEU_CAPSR_CPKIL);
+
+ for (i = 0; i < 100; i++) {
+ if (!(ceu_read(ceudev, CEU_CSTSR) & CEU_CSTRST_CPTON))
+ break;
+ udelay(1);
+ }
+
+ if (i == 100) {
+ dev_err(ceudev->dev, "soft reset time out\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < 100; i++) {
+ if (!(ceu_read(ceudev, CEU_CAPSR) & CEU_CAPSR_CPKIL))
+ return 0;
+ udelay(1);
+ }
+
+ /* If we get here, CEU has not reset properly. */
+ return -EIO;
+}
+
+/* --- CEU Capture Operations --- */
+
+/*
+ * ceu_hw_config() - Configure CEU interface registers.
+ */
+static int ceu_hw_config(struct ceu_device *ceudev)
+{
+ u32 camcr, cdocr, cfzsr, cdwdr, capwr;
+ struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix;
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ struct ceu_mbus_fmt *mbus_fmt = &ceu_sd->mbus_fmt;
+ unsigned int mbus_flags = ceu_sd->mbus_flags;
+
+ /* Start configuring CEU registers */
+ ceu_write(ceudev, CEU_CAIFR, 0);
+ ceu_write(ceudev, CEU_CFWCR, 0);
+ ceu_write(ceudev, CEU_CRCNTR, 0);
+ ceu_write(ceudev, CEU_CRCMPR, 0);
+
+ /* Set the frame capture period for both image capture and data sync. */
+ capwr = (pix->height << 16) | pix->width * mbus_fmt->bpp / 8;
+
+ /*
+ * Swap input data endianness by default.
+ * In data fetch mode bytes are received in chunks of 8 bytes.
+ * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
+ * The data is however by default written to memory in reverse order:
+ * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
+ *
+ * Use CEU_CDOCR[2:0] to swap data ordering.
+ */
+ cdocr = CEU_CDOCR_SWAP_ENDIANNESS;
+
+ /*
+ * Configure CAMCR and CDOCR:
+ * match input components ordering with memory output format and
+ * handle downsampling to YUV420.
+ *
+ * If the memory output planar format is 'swapped' (Cr before Cb) and
+ * input format is not, use the swapped version of CAMCR.DTARY.
+ *
+ * If the memory output planar format is not 'swapped' (Cb before Cr)
+ * and input format is, use the swapped version of CAMCR.DTARY.
+ *
+ * CEU by default downsample to planar YUV420 (CDCOR[4] = 0).
+ * If output is planar YUV422 set CDOCR[4] = 1
+ *
+ * No downsample for data fetch sync mode.
+ */
+ switch (pix->pixelformat) {
+ /* Data fetch sync mode */
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ camcr = CEU_CAMCR_JPEG;
+ cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
+ cfzsr = (pix->height << 16) | pix->width;
+ cdwdr = pix->plane_fmt[0].bytesperline;
+ break;
+
+ /* Non-swapped planar image capture mode. */
+ case V4L2_PIX_FMT_NV16:
+ cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
+ /* fall-through */
+ case V4L2_PIX_FMT_NV12:
+ if (mbus_fmt->swapped)
+ camcr = mbus_fmt->fmt_order_swap;
+ else
+ camcr = mbus_fmt->fmt_order;
+
+ cfzsr = (pix->height << 16) | pix->width;
+ cdwdr = pix->width;
+ break;
+
+ /* Swapped planar image capture mode. */
+ case V4L2_PIX_FMT_NV61:
+ cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
+ /* fall-through */
+ case V4L2_PIX_FMT_NV21:
+ if (mbus_fmt->swapped)
+ camcr = mbus_fmt->fmt_order;
+ else
+ camcr = mbus_fmt->fmt_order_swap;
+
+ cfzsr = (pix->height << 16) | pix->width;
+ cdwdr = pix->width;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ camcr |= mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
+ camcr |= mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
+
+ /* TODO: handle 16 bit bus width with DTIF bit in CAMCR */
+ ceu_write(ceudev, CEU_CAMCR, camcr);
+ ceu_write(ceudev, CEU_CDOCR, cdocr);
+ ceu_write(ceudev, CEU_CAPCR, CEU_CAPCR_BUS_WIDTH256);
+
+ /*
+ * TODO: make CAMOR offsets configurable.
+ * CAMOR wants to know the number of blanks between a VS/HS signal
+ * and valid data. This value should actually come from the sensor...
+ */
+ ceu_write(ceudev, CEU_CAMOR, 0);
+
+ /* TODO: 16 bit bus width require re-calculation of cdwdr and cfzsr */
+ ceu_write(ceudev, CEU_CAPWR, capwr);
+ ceu_write(ceudev, CEU_CFSZR, cfzsr);
+ ceu_write(ceudev, CEU_CDWDR, cdwdr);
+
+ return 0;
+}
+
+/*
+ * ceu_capture() - Trigger start of a capture sequence.
+ *
+ * Program the CEU DMA registers with addresses where to transfer image data.
+ */
+static int ceu_capture(struct ceu_device *ceudev)
+{
+ struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix;
+ dma_addr_t phys_addr_top;
+
+ phys_addr_top =
+ vb2_dma_contig_plane_dma_addr(&ceudev->active->vb2_buf, 0);
+ ceu_write(ceudev, CEU_CDAYR, phys_addr_top);
+
+ /* Ignore CbCr plane for non multi-planar image formats. */
+ if (ceu_fmt_mplane(pix)) {
+ phys_addr_top =
+ vb2_dma_contig_plane_dma_addr(&ceudev->active->vb2_buf,
+ 1);
+ ceu_write(ceudev, CEU_CDACR, phys_addr_top);
+ }
+
+ /*
+ * Trigger new capture start: once for each frame, as we work in
+ * one-frame capture mode.
+ */
+ ceu_write(ceudev, CEU_CAPSR, CEU_CAPSR_CE);
+
+ return 0;
+}
+
+static irqreturn_t ceu_irq(int irq, void *data)
+{
+ struct ceu_device *ceudev = data;
+ struct vb2_v4l2_buffer *vbuf;
+ struct ceu_buffer *buf;
+ u32 status;
+
+ /* Clean interrupt status. */
+ status = ceu_read(ceudev, CEU_CETCR);
+ ceu_write(ceudev, CEU_CETCR, ~ceudev->irq_mask);
+
+ /* Unexpected interrupt. */
+ if (!(status & CEU_CEIER_MASK))
+ return IRQ_NONE;
+
+ spin_lock(&ceudev->lock);
+
+ /* Stale interrupt from a released buffer, ignore it. */
+ vbuf = ceudev->active;
+ if (!vbuf) {
+ spin_unlock(&ceudev->lock);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * When a VBP interrupt occurs, no capture end interrupt will occur
+ * and the image of that frame is not captured correctly.
+ */
+ if (status & CEU_CEIER_VBP) {
+ dev_err(ceudev->dev, "VBP interrupt: abort capture\n");
+ goto error_irq_out;
+ }
+
+ /* Prepare to return the 'previous' buffer. */
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vbuf->sequence = ceudev->sequence++;
+ vbuf->field = ceudev->field;
+
+ /* Prepare a new 'active' buffer and trigger a new capture. */
+ if (!list_empty(&ceudev->capture)) {
+ buf = list_first_entry(&ceudev->capture, struct ceu_buffer,
+ queue);
+ list_del(&buf->queue);
+ ceudev->active = &buf->vb;
+
+ ceu_capture(ceudev);
+ }
+
+ /* Return the 'previous' buffer. */
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+
+ spin_unlock(&ceudev->lock);
+
+ return IRQ_HANDLED;
+
+error_irq_out:
+ /* Return the 'previous' buffer and all queued ones. */
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_ERROR);
+
+ list_for_each_entry(buf, &ceudev->capture, queue)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+
+ spin_unlock(&ceudev->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* --- CEU Videobuf2 operations --- */
+
+static void ceu_update_plane_sizes(struct v4l2_plane_pix_format *plane,
+ unsigned int bpl, unsigned int szimage)
+{
+ memset(plane, 0, sizeof(*plane));
+
+ plane->sizeimage = szimage;
+ if (plane->bytesperline < bpl || plane->bytesperline > CEU_MAX_BPL)
+ plane->bytesperline = bpl;
+}
+
+/*
+ * ceu_calc_plane_sizes() - Fill per-plane 'struct v4l2_plane_pix_format'
+ * information according to the currently configured
+ * pixel format.
+ * @ceu_device: CEU device.
+ * @ceu_fmt: Active image format.
+ * @pix: Pixel format information (store line width and image sizes)
+ */
+static void ceu_calc_plane_sizes(struct ceu_device *ceudev,
+ const struct ceu_fmt *ceu_fmt,
+ struct v4l2_pix_format_mplane *pix)
+{
+ unsigned int bpl, szimage;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ pix->num_planes = 1;
+ bpl = pix->width * ceu_fmt->bpp / 8;
+ szimage = pix->height * bpl;
+ ceu_update_plane_sizes(&pix->plane_fmt[0], bpl, szimage);
+ break;
+
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ pix->num_planes = 2;
+ bpl = pix->width;
+ szimage = pix->height * pix->width;
+ ceu_update_plane_sizes(&pix->plane_fmt[0], bpl, szimage);
+ ceu_update_plane_sizes(&pix->plane_fmt[1], bpl, szimage / 2);
+ break;
+
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ default:
+ pix->num_planes = 2;
+ bpl = pix->width;
+ szimage = pix->height * pix->width;
+ ceu_update_plane_sizes(&pix->plane_fmt[0], bpl, szimage);
+ ceu_update_plane_sizes(&pix->plane_fmt[1], bpl, szimage);
+ break;
+ }
+}
+
+/*
+ * ceu_vb2_setup() - is called to check whether the driver can accept the
+ * requested number of buffers and to fill in plane sizes
+ * for the current frame format, if required.
+ */
+static int ceu_vb2_setup(struct vb2_queue *vq, unsigned int *count,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix;
+ unsigned int i;
+
+ /* num_planes is set: just check plane sizes. */
+ if (*num_planes) {
+ for (i = 0; i < pix->num_planes; i++)
+ if (sizes[i] < pix->plane_fmt[i].sizeimage)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ /* num_planes not set: called from REQBUFS, just set plane sizes. */
+ *num_planes = pix->num_planes;
+ for (i = 0; i < pix->num_planes; i++)
+ sizes[i] = pix->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static void ceu_vb2_queue(struct vb2_buffer *vb)
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct ceu_buffer *buf = vb2_to_ceu(vbuf);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&ceudev->lock, irqflags);
+ list_add_tail(&buf->queue, &ceudev->capture);
+ spin_unlock_irqrestore(&ceudev->lock, irqflags);
+}
+
+static int ceu_vb2_prepare(struct vb2_buffer *vb)
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix;
+ unsigned int i;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) {
+ dev_err(ceudev->dev,
+ "Plane size too small (%lu < %u)\n",
+ vb2_plane_size(vb, i),
+ pix->plane_fmt[i].sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static int ceu_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vq);
+ struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
+ struct ceu_buffer *buf;
+ unsigned long irqflags;
+ int ret;
+
+ /* Program the CEU interface according to the CEU image format. */
+ ret = ceu_hw_config(ceudev);
+ if (ret)
+ goto error_return_bufs;
+
+ ret = v4l2_subdev_call(v4l2_sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ dev_dbg(ceudev->dev,
+ "Subdevice failed to start streaming: %d\n", ret);
+ goto error_return_bufs;
+ }
+
+ spin_lock_irqsave(&ceudev->lock, irqflags);
+ ceudev->sequence = 0;
+
+ /* Grab the first available buffer and trigger the first capture. */
+ buf = list_first_entry(&ceudev->capture, struct ceu_buffer,
+ queue);
+ if (!buf) {
+ spin_unlock_irqrestore(&ceudev->lock, irqflags);
+ dev_dbg(ceudev->dev,
+ "No buffer available for capture.\n");
+ goto error_stop_sensor;
+ }
+
+ list_del(&buf->queue);
+ ceudev->active = &buf->vb;
+
+ /* Clean and program interrupts for first capture. */
+ ceu_write(ceudev, CEU_CETCR, ~ceudev->irq_mask);
+ ceu_write(ceudev, CEU_CEIER, CEU_CEIER_MASK);
+
+ ceu_capture(ceudev);
+
+ spin_unlock_irqrestore(&ceudev->lock, irqflags);
+
+ return 0;
+
+error_stop_sensor:
+ v4l2_subdev_call(v4l2_sd, video, s_stream, 0);
+
+error_return_bufs:
+ spin_lock_irqsave(&ceudev->lock, irqflags);
+ list_for_each_entry(buf, &ceudev->capture, queue)
+ vb2_buffer_done(&ceudev->active->vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ ceudev->active = NULL;
+ spin_unlock_irqrestore(&ceudev->lock, irqflags);
+
+ return ret;
+}
+
+static void ceu_stop_streaming(struct vb2_queue *vq)
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vq);
+ struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
+ struct ceu_buffer *buf;
+ unsigned long irqflags;
+
+ /* Clean and disable interrupt sources. */
+ ceu_write(ceudev, CEU_CETCR,
+ ceu_read(ceudev, CEU_CETCR) & ceudev->irq_mask);
+ ceu_write(ceudev, CEU_CEIER, CEU_CEIER_MASK);
+
+ v4l2_subdev_call(v4l2_sd, video, s_stream, 0);
+
+ spin_lock_irqsave(&ceudev->lock, irqflags);
+ if (ceudev->active) {
+ vb2_buffer_done(&ceudev->active->vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ ceudev->active = NULL;
+ }
+
+ /* Release all queued buffers. */
+ list_for_each_entry(buf, &ceudev->capture, queue)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&ceudev->capture);
+
+ spin_unlock_irqrestore(&ceudev->lock, irqflags);
+
+ ceu_soft_reset(ceudev);
+}
+
+static const struct vb2_ops ceu_vb2_ops = {
+ .queue_setup = ceu_vb2_setup,
+ .buf_queue = ceu_vb2_queue,
+ .buf_prepare = ceu_vb2_prepare,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = ceu_start_streaming,
+ .stop_streaming = ceu_stop_streaming,
+};
+
+/* --- CEU image formats handling --- */
+
+/*
+ * __ceu_try_fmt() - test format on CEU and sensor
+ * @ceudev: The CEU device.
+ * @v4l2_fmt: format to test.
+ * @sd_mbus_code: the media bus code accepted by the subdevice; output param.
+ *
+ * Returns 0 for success, < 0 for errors.
+ */
+static int __ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt,
+ u32 *sd_mbus_code)
+{
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ struct v4l2_pix_format_mplane *pix = &v4l2_fmt->fmt.pix_mp;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ struct v4l2_subdev_pad_config pad_cfg;
+ const struct ceu_fmt *ceu_fmt;
+ u32 mbus_code_old;
+ u32 mbus_code;
+ int ret;
+
+ /*
+ * Set format on sensor sub device: bus format used to produce memory
+ * format is selected depending on YUV component ordering or
+ * at initialization time.
+ */
+ struct v4l2_subdev_format sd_format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+
+ mbus_code_old = ceu_sd->mbus_fmt.mbus_code;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mbus_code = MEDIA_BUS_FMT_UYVY8_2X8;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ mbus_code = MEDIA_BUS_FMT_YVYU8_2X8;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ mbus_code = MEDIA_BUS_FMT_VYUY8_2X8;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ mbus_code = ceu_sd->mbus_fmt.mbus_code;
+ break;
+
+ default:
+ pix->pixelformat = V4L2_PIX_FMT_NV16;
+ mbus_code = ceu_sd->mbus_fmt.mbus_code;
+ break;
+ }
+
+ ceu_fmt = get_ceu_fmt_from_fourcc(pix->pixelformat);
+
+ /* CFSZR requires height and width to be 4-pixel aligned. */
+ v4l_bound_align_image(&pix->width, 2, CEU_MAX_WIDTH, 4,
+ &pix->height, 4, CEU_MAX_HEIGHT, 4, 0);
+
+ v4l2_fill_mbus_format_mplane(&sd_format.format, pix);
+
+ /*
+ * Try with the mbus_code matching YUYV components ordering first,
+ * if that one fails, fallback to default selected at initialization
+ * time.
+ */
+ sd_format.format.code = mbus_code;
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_cfg, &sd_format);
+ if (ret) {
+ if (ret == -EINVAL) {
+ /* fallback */
+ sd_format.format.code = mbus_code_old;
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt,
+ &pad_cfg, &sd_format);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ /* Apply size returned by sensor as the CEU can't scale. */
+ v4l2_fill_pix_format_mplane(pix, &sd_format.format);
+
+ /* Calculate per-plane sizes based on image format. */
+ ceu_calc_plane_sizes(ceudev, ceu_fmt, pix);
+
+ /* Report to caller the configured mbus format. */
+ *sd_mbus_code = sd_format.format.code;
+
+ return 0;
+}
+
+/*
+ * ceu_try_fmt() - Wrapper for __ceu_try_fmt; discard configured mbus_fmt
+ */
+static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
+{
+ u32 mbus_code;
+
+ return __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code);
+}
+
+/*
+ * ceu_set_fmt() - Apply the supplied format to both sensor and CEU
+ */
+static int ceu_set_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
+{
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ u32 mbus_code;
+ int ret;
+
+ /*
+ * Set format on sensor sub device: bus format used to produce memory
+ * format is selected at initialization time.
+ */
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ ret = __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code);
+ if (ret)
+ return ret;
+
+ format.format.code = mbus_code;
+ v4l2_fill_mbus_format_mplane(&format.format, &v4l2_fmt->fmt.pix_mp);
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, NULL, &format);
+ if (ret)
+ return ret;
+
+ ceudev->v4l2_pix = v4l2_fmt->fmt.pix_mp;
+ ceudev->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/*
+ * ceu_set_default_fmt() - Apply default NV16 memory output format with VGA
+ * sizes.
+ */
+static int ceu_set_default_fmt(struct ceu_device *ceudev)
+{
+ int ret;
+
+ struct v4l2_format v4l2_fmt = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ .fmt.pix_mp = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .num_planes = 2,
+ .plane_fmt = {
+ [0] = {
+ .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
+ .bytesperline = VGA_WIDTH * 2,
+ },
+ [1] = {
+ .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
+ .bytesperline = VGA_WIDTH * 2,
+ },
+ },
+ },
+ };
+
+ ret = ceu_try_fmt(ceudev, &v4l2_fmt);
+ if (ret)
+ return ret;
+
+ ceudev->v4l2_pix = v4l2_fmt.fmt.pix_mp;
+ ceudev->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/*
+ * ceu_init_mbus_fmt() - Query sensor for supported formats and initialize
+ * CEU media bus format used to produce memory formats.
+ *
+ * Find out if sensor can produce a permutation of 8-bits YUYV bus format.
+ * From a single 8-bits YUYV bus format the CEU can produce several memory
+ * output formats:
+ * - NV[12|21|16|61] through image fetch mode;
+ * - YUYV422 if sensor provides YUYV422
+ *
+ * TODO: Other YUYV422 permutations through data fetch sync mode and DTARY
+ * TODO: Binary data (eg. JPEG) and raw formats through data fetch sync mode
+ */
+static int ceu_init_mbus_fmt(struct ceu_device *ceudev)
+{
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ struct ceu_mbus_fmt *mbus_fmt = &ceu_sd->mbus_fmt;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ bool yuyv_bus_fmt = false;
+
+ struct v4l2_subdev_mbus_code_enum sd_mbus_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .index = 0,
+ };
+
+ /* Find out if sensor can produce any permutation of 8-bits YUYV422. */
+ while (!yuyv_bus_fmt &&
+ !v4l2_subdev_call(v4l2_sd, pad, enum_mbus_code,
+ NULL, &sd_mbus_fmt)) {
+ switch (sd_mbus_fmt.code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ yuyv_bus_fmt = true;
+ break;
+ default:
+ /*
+ * Only support 8-bits YUYV bus formats at the moment;
+ *
+ * TODO: add support for binary formats (data sync
+ * fetch mode).
+ */
+ break;
+ }
+
+ sd_mbus_fmt.index++;
+ }
+
+ if (!yuyv_bus_fmt)
+ return -ENXIO;
+
+ /*
+ * Save the first encountered YUYV format as "mbus_fmt" and use it
+ * to output all planar YUV422 and YUV420 (NV*) formats to memory as
+ * well as for data synch fetch mode (YUYV - YVYU etc. ).
+ */
+ mbus_fmt->mbus_code = sd_mbus_fmt.code;
+ mbus_fmt->bps = 8;
+
+ /* Annotate the selected bus format components ordering. */
+ switch (sd_mbus_fmt.code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_YUYV;
+ mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_YVYU;
+ mbus_fmt->swapped = false;
+ mbus_fmt->bpp = 16;
+ break;
+
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_YVYU;
+ mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_YUYV;
+ mbus_fmt->swapped = true;
+ mbus_fmt->bpp = 16;
+ break;
+
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_UYVY;
+ mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_VYUY;
+ mbus_fmt->swapped = false;
+ mbus_fmt->bpp = 16;
+ break;
+
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_VYUY;
+ mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_UYVY;
+ mbus_fmt->swapped = true;
+ mbus_fmt->bpp = 16;
+ break;
+ }
+
+ return 0;
+}
+
+/* --- Runtime PM Handlers --- */
+
+/*
+ * ceu_runtime_resume() - soft-reset the interface and turn sensor power on.
+ */
+static int __maybe_unused ceu_runtime_resume(struct device *dev)
+{
+ struct ceu_device *ceudev = dev_get_drvdata(dev);
+ struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
+
+ v4l2_subdev_call(v4l2_sd, core, s_power, 1);
+
+ ceu_soft_reset(ceudev);
+
+ return 0;
+}
+
+/*
+ * ceu_runtime_suspend() - disable capture and interrupts and soft-reset.
+ * Turn sensor power off.
+ */
+static int __maybe_unused ceu_runtime_suspend(struct device *dev)
+{
+ struct ceu_device *ceudev = dev_get_drvdata(dev);
+ struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
+
+ v4l2_subdev_call(v4l2_sd, core, s_power, 0);
+
+ ceu_write(ceudev, CEU_CEIER, 0);
+ ceu_soft_reset(ceudev);
+
+ return 0;
+}
+
+/* --- File Operations --- */
+
+static int ceu_open(struct file *file)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ int ret;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ceudev->mlock);
+ /* Causes soft-reset and sensor power on on first open */
+ pm_runtime_get_sync(ceudev->dev);
+ mutex_unlock(&ceudev->mlock);
+
+ return 0;
+}
+
+static int ceu_release(struct file *file)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ vb2_fop_release(file);
+
+ mutex_lock(&ceudev->mlock);
+ /* Causes soft-reset and sensor power down on last close */
+ pm_runtime_put(ceudev->dev);
+ mutex_unlock(&ceudev->mlock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations ceu_fops = {
+ .owner = THIS_MODULE,
+ .open = ceu_open,
+ .release = ceu_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+/* --- Video Device IOCTLs --- */
+
+static int ceu_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ strlcpy(cap->card, "Renesas CEU", sizeof(cap->card));
+ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:renesas-ceu-%s", dev_name(ceudev->dev));
+
+ return 0;
+}
+
+static int ceu_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct ceu_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(ceu_fmt_list))
+ return -EINVAL;
+
+ fmt = &ceu_fmt_list[f->index];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int ceu_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ return ceu_try_fmt(ceudev, f);
+}
+
+static int ceu_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ if (vb2_is_streaming(&ceudev->vb2_vq))
+ return -EBUSY;
+
+ return ceu_set_fmt(ceudev, f);
+}
+
+static int ceu_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ f->fmt.pix_mp = ceudev->v4l2_pix;
+
+ return 0;
+}
+
+static int ceu_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ struct ceu_subdev *ceusd;
+
+ if (inp->index >= ceudev->num_sd)
+ return -EINVAL;
+
+ ceusd = &ceudev->subdevs[inp->index];
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = 0;
+ snprintf(inp->name, sizeof(inp->name), "Camera%u: %s",
+ inp->index, ceusd->v4l2_sd->name);
+
+ return 0;
+}
+
+static int ceu_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ *i = ceudev->sd_index;
+
+ return 0;
+}
+
+static int ceu_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ struct ceu_subdev *ceu_sd_old;
+ int ret;
+
+ if (i >= ceudev->num_sd)
+ return -EINVAL;
+
+ if (vb2_is_streaming(&ceudev->vb2_vq))
+ return -EBUSY;
+
+ if (i == ceudev->sd_index)
+ return 0;
+
+ ceu_sd_old = ceudev->sd;
+ ceudev->sd = &ceudev->subdevs[i];
+
+ /*
+ * Make sure we can generate output image formats and apply
+ * default one.
+ */
+ ret = ceu_init_mbus_fmt(ceudev);
+ if (ret) {
+ ceudev->sd = ceu_sd_old;
+ return -EINVAL;
+ }
+
+ ret = ceu_set_default_fmt(ceudev);
+ if (ret) {
+ ceudev->sd = ceu_sd_old;
+ return -EINVAL;
+ }
+
+ /* Now that we're sure we can use the sensor, power off the old one. */
+ v4l2_subdev_call(ceu_sd_old->v4l2_sd, core, s_power, 0);
+ v4l2_subdev_call(ceudev->sd->v4l2_sd, core, s_power, 1);
+
+ ceudev->sd_index = i;
+
+ return 0;
+}
+
+static int ceu_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ return v4l2_g_parm_cap(video_devdata(file), ceudev->sd->v4l2_sd, a);
+}
+
+static int ceu_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ return v4l2_s_parm_cap(video_devdata(file), ceudev->sd->v4l2_sd, a);
+}
+
+static int ceu_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ const struct ceu_fmt *ceu_fmt;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ int ret;
+
+ struct v4l2_subdev_frame_size_enum fse = {
+ .code = ceu_sd->mbus_fmt.mbus_code,
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ /* Just check if user supplied pixel format is supported. */
+ ceu_fmt = get_ceu_fmt_from_fourcc(fsize->pixel_format);
+ if (!ceu_fmt)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(v4l2_sd, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = CEU_W_MAX(fse.max_width);
+ fsize->discrete.height = CEU_H_MAX(fse.max_height);
+
+ return 0;
+}
+
+static int ceu_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ const struct ceu_fmt *ceu_fmt;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ int ret;
+
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .code = ceu_sd->mbus_fmt.mbus_code,
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ /* Just check if user supplied pixel format is supported. */
+ ceu_fmt = get_ceu_fmt_from_fourcc(fival->pixel_format);
+ if (!ceu_fmt)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(v4l2_sd, pad, enum_frame_interval, NULL,
+ &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops ceu_ioctl_ops = {
+ .vidioc_querycap = ceu_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = ceu_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = ceu_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = ceu_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = ceu_g_fmt_vid_cap,
+
+ .vidioc_enum_input = ceu_enum_input,
+ .vidioc_g_input = ceu_g_input,
+ .vidioc_s_input = ceu_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_g_parm = ceu_g_parm,
+ .vidioc_s_parm = ceu_s_parm,
+ .vidioc_enum_framesizes = ceu_enum_framesizes,
+ .vidioc_enum_frameintervals = ceu_enum_frameintervals,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * ceu_vdev_release() - release CEU video device memory when last reference
+ * to this driver is closed
+ */
+static void ceu_vdev_release(struct video_device *vdev)
+{
+ struct ceu_device *ceudev = video_get_drvdata(vdev);
+
+ kfree(ceudev);
+}
+
+static int ceu_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *v4l2_sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct v4l2_device *v4l2_dev = notifier->v4l2_dev;
+ struct ceu_device *ceudev = v4l2_to_ceu(v4l2_dev);
+ struct ceu_subdev *ceu_sd = to_ceu_subdev(asd);
+
+ ceu_sd->v4l2_sd = v4l2_sd;
+ ceudev->num_sd++;
+
+ return 0;
+}
+
+static int ceu_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_device *v4l2_dev = notifier->v4l2_dev;
+ struct ceu_device *ceudev = v4l2_to_ceu(v4l2_dev);
+ struct video_device *vdev = &ceudev->vdev;
+ struct vb2_queue *q = &ceudev->vb2_vq;
+ struct v4l2_subdev *v4l2_sd;
+ int ret;
+
+ /* Initialize vb2 queue. */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = ceudev;
+ q->ops = &ceu_vb2_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct ceu_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &ceudev->mlock;
+ q->dev = ceudev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure at least one sensor is primary and use it to initialize
+ * ceu formats.
+ */
+ if (!ceudev->sd) {
+ ceudev->sd = &ceudev->subdevs[0];
+ ceudev->sd_index = 0;
+ }
+
+ v4l2_sd = ceudev->sd->v4l2_sd;
+
+ ret = ceu_init_mbus_fmt(ceudev);
+ if (ret)
+ return ret;
+
+ ret = ceu_set_default_fmt(ceudev);
+ if (ret)
+ return ret;
+
+ /* Register the video device. */
+ strlcpy(vdev->name, DRIVER_NAME, sizeof(vdev->name));
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->lock = &ceudev->mlock;
+ vdev->queue = &ceudev->vb2_vq;
+ vdev->ctrl_handler = v4l2_sd->ctrl_handler;
+ vdev->fops = &ceu_fops;
+ vdev->ioctl_ops = &ceu_ioctl_ops;
+ vdev->release = ceu_vdev_release;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING;
+ video_set_drvdata(vdev, ceudev);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ v4l2_err(vdev->v4l2_dev,
+ "video_register_device failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations ceu_notify_ops = {
+ .bound = ceu_notify_bound,
+ .complete = ceu_notify_complete,
+};
+
+/*
+ * ceu_init_async_subdevs() - Initialize CEU subdevices and async_subdevs in
+ * ceu device. Both DT and platform data parsing use
+ * this routine.
+ *
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+static int ceu_init_async_subdevs(struct ceu_device *ceudev, unsigned int n_sd)
+{
+ /* Reserve memory for 'n_sd' ceu_subdev descriptors. */
+ ceudev->subdevs = devm_kcalloc(ceudev->dev, n_sd,
+ sizeof(*ceudev->subdevs), GFP_KERNEL);
+ if (!ceudev->subdevs)
+ return -ENOMEM;
+
+ /*
+ * Reserve memory for 'n_sd' pointers to async_subdevices.
+ * ceudev->asds members will point to &ceu_subdev.asd
+ */
+ ceudev->asds = devm_kcalloc(ceudev->dev, n_sd,
+ sizeof(*ceudev->asds), GFP_KERNEL);
+ if (!ceudev->asds)
+ return -ENOMEM;
+
+ ceudev->sd = NULL;
+ ceudev->sd_index = 0;
+ ceudev->num_sd = 0;
+
+ return 0;
+}
+
+/*
+ * ceu_parse_platform_data() - Initialize async_subdevices using platform
+ * device provided data.
+ */
+static int ceu_parse_platform_data(struct ceu_device *ceudev,
+ const struct ceu_platform_data *pdata)
+{
+ const struct ceu_async_subdev *async_sd;
+ struct ceu_subdev *ceu_sd;
+ unsigned int i;
+ int ret;
+
+ if (pdata->num_subdevs == 0)
+ return -ENODEV;
+
+ ret = ceu_init_async_subdevs(ceudev, pdata->num_subdevs);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ /* Setup the ceu subdevice and the async subdevice. */
+ async_sd = &pdata->subdevs[i];
+ ceu_sd = &ceudev->subdevs[i];
+
+ INIT_LIST_HEAD(&ceu_sd->asd.list);
+
+ ceu_sd->mbus_flags = async_sd->flags;
+ ceu_sd->asd.match_type = V4L2_ASYNC_MATCH_I2C;
+ ceu_sd->asd.match.i2c.adapter_id = async_sd->i2c_adapter_id;
+ ceu_sd->asd.match.i2c.address = async_sd->i2c_address;
+
+ ceudev->asds[i] = &ceu_sd->asd;
+ }
+
+ return pdata->num_subdevs;
+}
+
+/*
+ * ceu_parse_dt() - Initialize async_subdevs parsing device tree graph.
+ */
+static int ceu_parse_dt(struct ceu_device *ceudev)
+{
+ struct device_node *of = ceudev->dev->of_node;
+ struct v4l2_fwnode_endpoint fw_ep;
+ struct ceu_subdev *ceu_sd;
+ struct device_node *ep;
+ unsigned int i;
+ int num_ep;
+ int ret;
+
+ num_ep = of_graph_get_endpoint_count(of);
+ if (!num_ep)
+ return -ENODEV;
+
+ ret = ceu_init_async_subdevs(ceudev, num_ep);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_ep; i++) {
+ ep = of_graph_get_endpoint_by_regs(of, 0, i);
+ if (!ep) {
+ dev_err(ceudev->dev,
+ "No subdevice connected on endpoint %u.\n", i);
+ ret = -ENODEV;
+ goto error_put_node;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &fw_ep);
+ if (ret) {
+ dev_err(ceudev->dev,
+ "Unable to parse endpoint #%u.\n", i);
+ goto error_put_node;
+ }
+
+ if (fw_ep.bus_type != V4L2_MBUS_PARALLEL) {
+ dev_err(ceudev->dev,
+ "Only parallel input supported.\n");
+ ret = -EINVAL;
+ goto error_put_node;
+ }
+
+ /* Setup the ceu subdevice and the async subdevice. */
+ ceu_sd = &ceudev->subdevs[i];
+ INIT_LIST_HEAD(&ceu_sd->asd.list);
+
+ ceu_sd->mbus_flags = fw_ep.bus.parallel.flags;
+ ceu_sd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ ceu_sd->asd.match.fwnode =
+ fwnode_graph_get_remote_port_parent(
+ of_fwnode_handle(ep));
+
+ ceudev->asds[i] = &ceu_sd->asd;
+ of_node_put(ep);
+ }
+
+ return num_ep;
+
+error_put_node:
+ of_node_put(ep);
+ return ret;
+}
+
+/*
+ * struct ceu_data - Platform specific CEU data
+ * @irq_mask: CETCR mask with all interrupt sources enabled. The mask differs
+ * between SH4 and RZ platforms.
+ */
+struct ceu_data {
+ u32 irq_mask;
+};
+
+static const struct ceu_data ceu_data_rz = {
+ .irq_mask = CEU_CETCR_ALL_IRQS_RZ,
+};
+
+static const struct ceu_data ceu_data_sh4 = {
+ .irq_mask = CEU_CETCR_ALL_IRQS_SH4,
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id ceu_of_match[] = {
+ { .compatible = "renesas,r7s72100-ceu", .data = &ceu_data_rz },
+ { .compatible = "renesas,r8a7740-ceu", .data = &ceu_data_rz },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ceu_of_match);
+#endif
+
+static int ceu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct ceu_data *ceu_data;
+ struct ceu_device *ceudev;
+ struct resource *res;
+ unsigned int irq;
+ int num_subdevs;
+ int ret;
+
+ ceudev = kzalloc(sizeof(*ceudev), GFP_KERNEL);
+ if (!ceudev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ceudev);
+ ceudev->dev = dev;
+
+ INIT_LIST_HEAD(&ceudev->capture);
+ spin_lock_init(&ceudev->lock);
+ mutex_init(&ceudev->mlock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ceudev->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ceudev->base)) {
+ ret = PTR_ERR(ceudev->base);
+ goto error_free_ceudev;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get irq: %d\n", ret);
+ goto error_free_ceudev;
+ }
+ irq = ret;
+
+ ret = devm_request_irq(dev, irq, ceu_irq,
+ 0, dev_name(dev), ceudev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request CEU interrupt.\n");
+ goto error_free_ceudev;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = v4l2_device_register(dev, &ceudev->v4l2_dev);
+ if (ret)
+ goto error_pm_disable;
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ ceu_data = of_match_device(ceu_of_match, dev)->data;
+ num_subdevs = ceu_parse_dt(ceudev);
+ } else if (dev->platform_data) {
+ /* Assume SH4 if booting with platform data. */
+ ceu_data = &ceu_data_sh4;
+ num_subdevs = ceu_parse_platform_data(ceudev,
+ dev->platform_data);
+ } else {
+ num_subdevs = -EINVAL;
+ }
+
+ if (num_subdevs < 0) {
+ ret = num_subdevs;
+ goto error_v4l2_unregister;
+ }
+ ceudev->irq_mask = ceu_data->irq_mask;
+
+ ceudev->notifier.v4l2_dev = &ceudev->v4l2_dev;
+ ceudev->notifier.subdevs = ceudev->asds;
+ ceudev->notifier.num_subdevs = num_subdevs;
+ ceudev->notifier.ops = &ceu_notify_ops;
+ ret = v4l2_async_notifier_register(&ceudev->v4l2_dev,
+ &ceudev->notifier);
+ if (ret)
+ goto error_v4l2_unregister;
+
+ dev_info(dev, "Renesas Capture Engine Unit %s\n", dev_name(dev));
+
+ return 0;
+
+error_v4l2_unregister:
+ v4l2_device_unregister(&ceudev->v4l2_dev);
+error_pm_disable:
+ pm_runtime_disable(dev);
+error_free_ceudev:
+ kfree(ceudev);
+
+ return ret;
+}
+
+static int ceu_remove(struct platform_device *pdev)
+{
+ struct ceu_device *ceudev = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(ceudev->dev);
+
+ v4l2_async_notifier_unregister(&ceudev->notifier);
+
+ v4l2_device_unregister(&ceudev->v4l2_dev);
+
+ video_unregister_device(&ceudev->vdev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ceu_pm_ops = {
+ SET_RUNTIME_PM_OPS(ceu_runtime_suspend,
+ ceu_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver ceu_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &ceu_pm_ops,
+ .of_match_table = of_match_ptr(ceu_of_match),
+ },
+ .probe = ceu_probe,
+ .remove = ceu_remove,
+};
+
+module_platform_driver(ceu_driver);
+
+MODULE_DESCRIPTION("Renesas CEU camera driver");
+MODULE_AUTHOR("Jacopo Mondi <jacopo+renesas@jmondi.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/rockchip/rga/Makefile b/drivers/media/platform/rockchip/rga/Makefile
new file mode 100644
index 000000000..92fe25490
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/Makefile
@@ -0,0 +1,3 @@
+rockchip-rga-objs := rga.o rga-hw.o rga-buf.o
+
+obj-$(CONFIG_VIDEO_ROCKCHIP_RGA) += rockchip-rga.o
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
new file mode 100644
index 000000000..0932f1445
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+static int
+rga_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vq);
+ struct rga_frame *f = rga_get_frame(ctx, vq->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ if (*nplanes)
+ return sizes[0] < f->size ? -EINVAL : 0;
+
+ sizes[0] = f->size;
+ *nplanes = 1;
+
+ return 0;
+}
+
+static int rga_buf_prepare(struct vb2_buffer *vb)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ vb2_set_plane_payload(vb, 0, f->size);
+
+ return 0;
+}
+
+static void rga_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void rga_buf_return_buffers(struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
+
+static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct rockchip_rga *rga = ctx->rga;
+ int ret;
+
+ ret = pm_runtime_get_sync(rga->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(rga->dev);
+ rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rga_buf_stop_streaming(struct vb2_queue *q)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct rockchip_rga *rga = ctx->rga;
+
+ rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR);
+ pm_runtime_put(rga->dev);
+}
+
+const struct vb2_ops rga_qops = {
+ .queue_setup = rga_queue_setup,
+ .buf_prepare = rga_buf_prepare,
+ .buf_queue = rga_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = rga_buf_start_streaming,
+ .stop_streaming = rga_buf_stop_streaming,
+};
+
+/* RGA MMU is a 1-Level MMU, so it can't be used through the IOMMU API.
+ * We use it more like a scatter-gather list.
+ */
+void rga_buf_map(struct vb2_buffer *vb)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rockchip_rga *rga = ctx->rga;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ unsigned int *pages;
+ unsigned int address, len, i, p;
+ unsigned int mapped_size = 0;
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pages = rga->src_mmu_pages;
+ else
+ pages = rga->dst_mmu_pages;
+
+ /* Create local MMU table for RGA */
+ sgt = vb2_plane_cookie(vb, 0);
+
+ for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;
+ address = sg_phys(sgl);
+
+ for (p = 0; p < len; p++) {
+ dma_addr_t phys = address +
+ ((dma_addr_t)p << PAGE_SHIFT);
+
+ pages[mapped_size + p] = phys;
+ }
+
+ mapped_size += len;
+ }
+
+ /* sync local MMU table for RGA */
+ dma_sync_single_for_device(rga->dev, virt_to_phys(pages),
+ 8 * PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
new file mode 100644
index 000000000..681de42f1
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+enum e_rga_start_pos {
+ LT = 0,
+ LB = 1,
+ RT = 2,
+ RB = 3,
+};
+
+struct rga_addr_offset {
+ unsigned int y_off;
+ unsigned int u_off;
+ unsigned int v_off;
+};
+
+struct rga_corners_addr_offset {
+ struct rga_addr_offset left_top;
+ struct rga_addr_offset right_top;
+ struct rga_addr_offset left_bottom;
+ struct rga_addr_offset right_bottom;
+};
+
+static unsigned int rga_get_scaling(unsigned int src, unsigned int dst)
+{
+ /*
+ * The rga hw scaling factor is a normalized inverse of the
+ * scaling factor.
+ * For example: When source width is 100 and destination width is 200
+ * (scaling of 2x), then the hw factor is NC * 100 / 200.
+ * The normalization factor (NC) is 2^16 = 0x10000.
+ */
+
+ return (src > dst) ? ((dst << 16) / src) : ((src << 16) / dst);
+}
+
+static struct rga_corners_addr_offset
+rga_get_addr_offset(struct rga_frame *frm, unsigned int x, unsigned int y,
+ unsigned int w, unsigned int h)
+{
+ struct rga_corners_addr_offset offsets;
+ struct rga_addr_offset *lt, *lb, *rt, *rb;
+ unsigned int x_div = 0,
+ y_div = 0, uv_stride = 0, pixel_width = 0, uv_factor = 0;
+
+ lt = &offsets.left_top;
+ lb = &offsets.left_bottom;
+ rt = &offsets.right_top;
+ rb = &offsets.right_bottom;
+
+ x_div = frm->fmt->x_div;
+ y_div = frm->fmt->y_div;
+ uv_factor = frm->fmt->uv_factor;
+ uv_stride = frm->stride / x_div;
+ pixel_width = frm->stride / frm->width;
+
+ lt->y_off = y * frm->stride + x * pixel_width;
+ lt->u_off =
+ frm->width * frm->height + (y / y_div) * uv_stride + x / x_div;
+ lt->v_off = lt->u_off + frm->width * frm->height / uv_factor;
+
+ lb->y_off = lt->y_off + (h - 1) * frm->stride;
+ lb->u_off = lt->u_off + (h / y_div - 1) * uv_stride;
+ lb->v_off = lt->v_off + (h / y_div - 1) * uv_stride;
+
+ rt->y_off = lt->y_off + (w - 1) * pixel_width;
+ rt->u_off = lt->u_off + w / x_div - 1;
+ rt->v_off = lt->v_off + w / x_div - 1;
+
+ rb->y_off = lb->y_off + (w - 1) * pixel_width;
+ rb->u_off = lb->u_off + w / x_div - 1;
+ rb->v_off = lb->v_off + w / x_div - 1;
+
+ return offsets;
+}
+
+static struct rga_addr_offset *rga_lookup_draw_pos(struct
+ rga_corners_addr_offset
+ * offsets, u32 rotate_mode,
+ u32 mirr_mode)
+{
+ static enum e_rga_start_pos rot_mir_point_matrix[4][4] = {
+ {
+ LT, RT, LB, RB,
+ },
+ {
+ RT, LT, RB, LB,
+ },
+ {
+ RB, LB, RT, LT,
+ },
+ {
+ LB, RB, LT, RT,
+ },
+ };
+
+ if (!offsets)
+ return NULL;
+
+ switch (rot_mir_point_matrix[rotate_mode][mirr_mode]) {
+ case LT:
+ return &offsets->left_top;
+ case LB:
+ return &offsets->left_bottom;
+ case RT:
+ return &offsets->right_top;
+ case RB:
+ return &offsets->right_bottom;
+ }
+
+ return NULL;
+}
+
+static void rga_cmd_set_src_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7;
+}
+
+static void rga_cmd_set_src1_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7 << 4;
+}
+
+static void rga_cmd_set_dst_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7 << 8;
+}
+
+static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int scale_dst_w, scale_dst_h;
+ unsigned int src_h, src_w, src_x, src_y, dst_h, dst_w, dst_x, dst_y;
+ union rga_src_info src_info;
+ union rga_dst_info dst_info;
+ union rga_src_x_factor x_factor;
+ union rga_src_y_factor y_factor;
+ union rga_src_vir_info src_vir_info;
+ union rga_src_act_info src_act_info;
+ union rga_dst_vir_info dst_vir_info;
+ union rga_dst_act_info dst_act_info;
+
+ struct rga_addr_offset *dst_offset;
+ struct rga_corners_addr_offset offsets;
+ struct rga_corners_addr_offset src_offsets;
+
+ src_h = ctx->in.crop.height;
+ src_w = ctx->in.crop.width;
+ src_x = ctx->in.crop.left;
+ src_y = ctx->in.crop.top;
+ dst_h = ctx->out.crop.height;
+ dst_w = ctx->out.crop.width;
+ dst_x = ctx->out.crop.left;
+ dst_y = ctx->out.crop.top;
+
+ src_info.val = dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_info.val = dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2];
+ x_factor.val = dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2];
+ y_factor.val = dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2];
+ src_vir_info.val = dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
+ src_act_info.val = dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_vir_info.val = dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_act_info.val = dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
+
+ src_info.data.format = ctx->in.fmt->hw_format;
+ src_info.data.swap = ctx->in.fmt->color_swap;
+ dst_info.data.format = ctx->out.fmt->hw_format;
+ dst_info.data.swap = ctx->out.fmt->color_swap;
+
+ /*
+ * CSC mode must only be set when the colorspace families differ between
+ * input and output. It must remain unset (zeroed) if both are the same.
+ */
+
+ if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) &&
+ RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) {
+ switch (ctx->in.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
+ break;
+ default:
+ src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0;
+ break;
+ }
+ }
+
+ if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) &&
+ RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) {
+ switch (ctx->out.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
+ break;
+ default:
+ dst_info.data.csc_mode = RGA_DST_CSC_MODE_BT601_R0;
+ break;
+ }
+ }
+
+ if (ctx->vflip)
+ src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_X;
+
+ if (ctx->hflip)
+ src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_Y;
+
+ switch (ctx->rotate) {
+ case 90:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_90_DEGREE;
+ break;
+ case 180:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_180_DEGREE;
+ break;
+ case 270:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_270_DEGREE;
+ break;
+ default:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_0_DEGREE;
+ break;
+ }
+
+ /*
+ * Cacluate the up/down scaling mode/factor.
+ *
+ * RGA used to scale the picture first, and then rotate second,
+ * so we need to swap the w/h when rotate degree is 90/270.
+ */
+ if (src_info.data.rot_mode == RGA_SRC_ROT_MODE_90_DEGREE ||
+ src_info.data.rot_mode == RGA_SRC_ROT_MODE_270_DEGREE) {
+ if (rga->version.major == 0 || rga->version.minor == 0) {
+ if (dst_w == src_h)
+ src_h -= 8;
+ if (abs(src_w - dst_h) < 16)
+ src_w -= 16;
+ }
+
+ scale_dst_h = dst_w;
+ scale_dst_w = dst_h;
+ } else {
+ scale_dst_w = dst_w;
+ scale_dst_h = dst_h;
+ }
+
+ if (src_w == scale_dst_w) {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_NO;
+ x_factor.val = 0;
+ } else if (src_w > scale_dst_w) {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_DOWN;
+ x_factor.data.down_scale_factor =
+ rga_get_scaling(src_w, scale_dst_w) + 1;
+ } else {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_UP;
+ x_factor.data.up_scale_factor =
+ rga_get_scaling(src_w - 1, scale_dst_w - 1);
+ }
+
+ if (src_h == scale_dst_h) {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_NO;
+ y_factor.val = 0;
+ } else if (src_h > scale_dst_h) {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_DOWN;
+ y_factor.data.down_scale_factor =
+ rga_get_scaling(src_h, scale_dst_h) + 1;
+ } else {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_UP;
+ y_factor.data.up_scale_factor =
+ rga_get_scaling(src_h - 1, scale_dst_h - 1);
+ }
+
+ /*
+ * Cacluate the framebuffer virtual strides and active size,
+ * note that the step of vir_stride / vir_width is 4 byte words
+ */
+ src_vir_info.data.vir_stride = ctx->in.stride >> 2;
+ src_vir_info.data.vir_width = ctx->in.stride >> 2;
+
+ src_act_info.data.act_height = src_h - 1;
+ src_act_info.data.act_width = src_w - 1;
+
+ dst_vir_info.data.vir_stride = ctx->out.stride >> 2;
+ dst_act_info.data.act_height = dst_h - 1;
+ dst_act_info.data.act_width = dst_w - 1;
+
+ /*
+ * Cacluate the source framebuffer base address with offset pixel.
+ */
+ src_offsets = rga_get_addr_offset(&ctx->in, src_x, src_y,
+ src_w, src_h);
+
+ /*
+ * Configure the dest framebuffer base address with pixel offset.
+ */
+ offsets = rga_get_addr_offset(&ctx->out, dst_x, dst_y, dst_w, dst_h);
+ dst_offset = rga_lookup_draw_pos(&offsets, src_info.data.rot_mode,
+ src_info.data.mir_mode);
+
+ dest[(RGA_SRC_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.y_off;
+ dest[(RGA_SRC_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.u_off;
+ dest[(RGA_SRC_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.v_off;
+
+ dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2] = x_factor.val;
+ dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2] = y_factor.val;
+ dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = src_vir_info.val;
+ dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = src_act_info.val;
+
+ dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2] = src_info.val;
+
+ dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->y_off;
+ dest[(RGA_DST_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->u_off;
+ dest[(RGA_DST_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->v_off;
+
+ dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = dst_vir_info.val;
+ dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = dst_act_info.val;
+
+ dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2] = dst_info.val;
+}
+
+static void rga_cmd_set_mode(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ union rga_mode_ctrl mode;
+ union rga_alpha_ctrl0 alpha_ctrl0;
+ union rga_alpha_ctrl1 alpha_ctrl1;
+
+ mode.val = 0;
+ alpha_ctrl0.val = 0;
+ alpha_ctrl1.val = 0;
+
+ mode.data.gradient_sat = 1;
+ mode.data.render = RGA_MODE_RENDER_BITBLT;
+ mode.data.bitblt = RGA_MODE_BITBLT_MODE_SRC_TO_DST;
+
+ /* disable alpha blending */
+ dest[(RGA_ALPHA_CTRL0 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl0.val;
+ dest[(RGA_ALPHA_CTRL1 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl1.val;
+
+ dest[(RGA_MODE_CTRL - RGA_MODE_BASE_REG) >> 2] = mode.val;
+}
+
+static void rga_cmd_set(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+
+ memset(rga->cmdbuf_virt, 0, RGA_CMDBUF_SIZE * 4);
+
+ rga_cmd_set_src_addr(ctx, rga->src_mmu_pages);
+ /*
+ * Due to hardware bug,
+ * src1 mmu also should be configured when using alpha blending.
+ */
+ rga_cmd_set_src1_addr(ctx, rga->dst_mmu_pages);
+
+ rga_cmd_set_dst_addr(ctx, rga->dst_mmu_pages);
+ rga_cmd_set_mode(ctx);
+
+ rga_cmd_set_trans_info(ctx);
+
+ rga_write(rga, RGA_CMD_BASE, rga->cmdbuf_phy);
+
+ /* sync CMD buf for RGA */
+ dma_sync_single_for_device(rga->dev, rga->cmdbuf_phy,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
+
+void rga_hw_start(struct rockchip_rga *rga)
+{
+ struct rga_ctx *ctx = rga->curr;
+
+ rga_cmd_set(ctx);
+
+ rga_write(rga, RGA_SYS_CTRL, 0x00);
+
+ rga_write(rga, RGA_SYS_CTRL, 0x22);
+
+ rga_write(rga, RGA_INT, 0x600);
+
+ rga_write(rga, RGA_CMD_CTRL, 0x1);
+}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
new file mode 100644
index 000000000..3e4b70eb9
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-hw.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __RGA_HW_H__
+#define __RGA_HW_H__
+
+#define RGA_CMDBUF_SIZE 0x20
+
+/* Hardware limits */
+#define MAX_WIDTH 8192
+#define MAX_HEIGHT 8192
+
+#define MIN_WIDTH 34
+#define MIN_HEIGHT 34
+
+#define DEFAULT_WIDTH 100
+#define DEFAULT_HEIGHT 100
+
+#define RGA_TIMEOUT 500
+
+/* Registers address */
+#define RGA_SYS_CTRL 0x0000
+#define RGA_CMD_CTRL 0x0004
+#define RGA_CMD_BASE 0x0008
+#define RGA_INT 0x0010
+#define RGA_MMU_CTRL0 0x0014
+#define RGA_VERSION_INFO 0x0028
+
+#define RGA_MODE_BASE_REG 0x0100
+#define RGA_MODE_MAX_REG 0x017C
+
+#define RGA_MODE_CTRL 0x0100
+#define RGA_SRC_INFO 0x0104
+#define RGA_SRC_Y_RGB_BASE_ADDR 0x0108
+#define RGA_SRC_CB_BASE_ADDR 0x010c
+#define RGA_SRC_CR_BASE_ADDR 0x0110
+#define RGA_SRC1_RGB_BASE_ADDR 0x0114
+#define RGA_SRC_VIR_INFO 0x0118
+#define RGA_SRC_ACT_INFO 0x011c
+#define RGA_SRC_X_FACTOR 0x0120
+#define RGA_SRC_Y_FACTOR 0x0124
+#define RGA_SRC_BG_COLOR 0x0128
+#define RGA_SRC_FG_COLOR 0x012c
+#define RGA_SRC_TR_COLOR0 0x0130
+#define RGA_SRC_TR_COLOR1 0x0134
+
+#define RGA_DST_INFO 0x0138
+#define RGA_DST_Y_RGB_BASE_ADDR 0x013c
+#define RGA_DST_CB_BASE_ADDR 0x0140
+#define RGA_DST_CR_BASE_ADDR 0x0144
+#define RGA_DST_VIR_INFO 0x0148
+#define RGA_DST_ACT_INFO 0x014c
+
+#define RGA_ALPHA_CTRL0 0x0150
+#define RGA_ALPHA_CTRL1 0x0154
+#define RGA_FADING_CTRL 0x0158
+#define RGA_PAT_CON 0x015c
+#define RGA_ROP_CON0 0x0160
+#define RGA_ROP_CON1 0x0164
+#define RGA_MASK_BASE 0x0168
+
+#define RGA_MMU_CTRL1 0x016C
+#define RGA_MMU_SRC_BASE 0x0170
+#define RGA_MMU_SRC1_BASE 0x0174
+#define RGA_MMU_DST_BASE 0x0178
+
+/* Registers value */
+#define RGA_MODE_RENDER_BITBLT 0
+#define RGA_MODE_RENDER_COLOR_PALETTE 1
+#define RGA_MODE_RENDER_RECTANGLE_FILL 2
+#define RGA_MODE_RENDER_UPDATE_PALETTE_LUT_RAM 3
+
+#define RGA_MODE_BITBLT_MODE_SRC_TO_DST 0
+#define RGA_MODE_BITBLT_MODE_SRC_SRC1_TO_DST 1
+
+#define RGA_MODE_CF_ROP4_SOLID 0
+#define RGA_MODE_CF_ROP4_PATTERN 1
+
+#define RGA_COLOR_FMT_ABGR8888 0
+#define RGA_COLOR_FMT_XBGR8888 1
+#define RGA_COLOR_FMT_RGB888 2
+#define RGA_COLOR_FMT_BGR565 4
+#define RGA_COLOR_FMT_ABGR1555 5
+#define RGA_COLOR_FMT_ABGR4444 6
+#define RGA_COLOR_FMT_YUV422SP 8
+#define RGA_COLOR_FMT_YUV422P 9
+#define RGA_COLOR_FMT_YUV420SP 10
+#define RGA_COLOR_FMT_YUV420P 11
+/* SRC_COLOR Palette */
+#define RGA_COLOR_FMT_CP_1BPP 12
+#define RGA_COLOR_FMT_CP_2BPP 13
+#define RGA_COLOR_FMT_CP_4BPP 14
+#define RGA_COLOR_FMT_CP_8BPP 15
+#define RGA_COLOR_FMT_MASK 15
+
+#define RGA_COLOR_FMT_IS_YUV(fmt) \
+ (((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP))
+#define RGA_COLOR_FMT_IS_RGB(fmt) \
+ ((fmt) < RGA_COLOR_FMT_YUV422SP)
+
+#define RGA_COLOR_NONE_SWAP 0
+#define RGA_COLOR_RB_SWAP 1
+#define RGA_COLOR_ALPHA_SWAP 2
+#define RGA_COLOR_UV_SWAP 4
+
+#define RGA_SRC_CSC_MODE_BYPASS 0
+#define RGA_SRC_CSC_MODE_BT601_R0 1
+#define RGA_SRC_CSC_MODE_BT601_R1 2
+#define RGA_SRC_CSC_MODE_BT709_R0 3
+#define RGA_SRC_CSC_MODE_BT709_R1 4
+
+#define RGA_SRC_ROT_MODE_0_DEGREE 0
+#define RGA_SRC_ROT_MODE_90_DEGREE 1
+#define RGA_SRC_ROT_MODE_180_DEGREE 2
+#define RGA_SRC_ROT_MODE_270_DEGREE 3
+
+#define RGA_SRC_MIRR_MODE_NO 0
+#define RGA_SRC_MIRR_MODE_X 1
+#define RGA_SRC_MIRR_MODE_Y 2
+#define RGA_SRC_MIRR_MODE_X_Y 3
+
+#define RGA_SRC_HSCL_MODE_NO 0
+#define RGA_SRC_HSCL_MODE_DOWN 1
+#define RGA_SRC_HSCL_MODE_UP 2
+
+#define RGA_SRC_VSCL_MODE_NO 0
+#define RGA_SRC_VSCL_MODE_DOWN 1
+#define RGA_SRC_VSCL_MODE_UP 2
+
+#define RGA_SRC_TRANS_ENABLE_R 1
+#define RGA_SRC_TRANS_ENABLE_G 2
+#define RGA_SRC_TRANS_ENABLE_B 4
+#define RGA_SRC_TRANS_ENABLE_A 8
+
+#define RGA_SRC_BIC_COE_SELEC_CATROM 0
+#define RGA_SRC_BIC_COE_SELEC_MITCHELL 1
+#define RGA_SRC_BIC_COE_SELEC_HERMITE 2
+#define RGA_SRC_BIC_COE_SELEC_BSPLINE 3
+
+#define RGA_DST_DITHER_MODE_888_TO_666 0
+#define RGA_DST_DITHER_MODE_888_TO_565 1
+#define RGA_DST_DITHER_MODE_888_TO_555 2
+#define RGA_DST_DITHER_MODE_888_TO_444 3
+
+#define RGA_DST_CSC_MODE_BYPASS 0
+#define RGA_DST_CSC_MODE_BT601_R0 1
+#define RGA_DST_CSC_MODE_BT601_R1 2
+#define RGA_DST_CSC_MODE_BT709_R0 3
+
+#define RGA_ALPHA_ROP_MODE_2 0
+#define RGA_ALPHA_ROP_MODE_3 1
+#define RGA_ALPHA_ROP_MODE_4 2
+
+#define RGA_ALPHA_SELECT_ALPHA 0
+#define RGA_ALPHA_SELECT_ROP 1
+
+#define RGA_ALPHA_MASK_BIG_ENDIAN 0
+#define RGA_ALPHA_MASK_LITTLE_ENDIAN 1
+
+#define RGA_ALPHA_NORMAL 0
+#define RGA_ALPHA_REVERSE 1
+
+#define RGA_ALPHA_BLEND_GLOBAL 0
+#define RGA_ALPHA_BLEND_NORMAL 1
+#define RGA_ALPHA_BLEND_MULTIPLY 2
+
+#define RGA_ALPHA_CAL_CUT 0
+#define RGA_ALPHA_CAL_NORMAL 1
+
+#define RGA_ALPHA_FACTOR_ZERO 0
+#define RGA_ALPHA_FACTOR_ONE 1
+#define RGA_ALPHA_FACTOR_OTHER 2
+#define RGA_ALPHA_FACTOR_OTHER_REVERSE 3
+#define RGA_ALPHA_FACTOR_SELF 4
+
+#define RGA_ALPHA_COLOR_NORMAL 0
+#define RGA_ALPHA_COLOR_MULTIPLY_CAL 1
+
+/* Registers union */
+union rga_mode_ctrl {
+ unsigned int val;
+ struct {
+ /* [0:2] */
+ unsigned int render:3;
+ /* [3:6] */
+ unsigned int bitblt:1;
+ unsigned int cf_rop4_pat:1;
+ unsigned int alpha_zero_key:1;
+ unsigned int gradient_sat:1;
+ /* [7:31] */
+ unsigned int reserved:25;
+ } data;
+};
+
+union rga_src_info {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int format:4;
+ /* [4:7] */
+ unsigned int swap:3;
+ unsigned int cp_endian:1;
+ /* [8:17] */
+ unsigned int csc_mode:2;
+ unsigned int rot_mode:2;
+ unsigned int mir_mode:2;
+ unsigned int hscl_mode:2;
+ unsigned int vscl_mode:2;
+ /* [18:22] */
+ unsigned int trans_mode:1;
+ unsigned int trans_enable:4;
+ /* [23:25] */
+ unsigned int dither_up_en:1;
+ unsigned int bic_coe_sel:2;
+ /* [26:31] */
+ unsigned int reserved:6;
+ } data;
+};
+
+union rga_src_vir_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int vir_width:15;
+ unsigned int reserved:1;
+ /* [16:25] */
+ unsigned int vir_stride:10;
+ /* [26:31] */
+ unsigned int reserved1:6;
+ } data;
+};
+
+union rga_src_act_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int act_width:13;
+ unsigned int reserved:3;
+ /* [16:31] */
+ unsigned int act_height:13;
+ unsigned int reserved1:3;
+ } data;
+};
+
+union rga_src_x_factor {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int down_scale_factor:16;
+ /* [16:31] */
+ unsigned int up_scale_factor:16;
+ } data;
+};
+
+union rga_src_y_factor {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int down_scale_factor:16;
+ /* [16:31] */
+ unsigned int up_scale_factor:16;
+ } data;
+};
+
+/* Alpha / Red / Green / Blue */
+union rga_src_cp_gr_color {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int gradient_x:16;
+ /* [16:31] */
+ unsigned int gradient_y:16;
+ } data;
+};
+
+union rga_src_transparency_color0 {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int trans_rmin:8;
+ /* [8:15] */
+ unsigned int trans_gmin:8;
+ /* [16:23] */
+ unsigned int trans_bmin:8;
+ /* [24:31] */
+ unsigned int trans_amin:8;
+ } data;
+};
+
+union rga_src_transparency_color1 {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int trans_rmax:8;
+ /* [8:15] */
+ unsigned int trans_gmax:8;
+ /* [16:23] */
+ unsigned int trans_bmax:8;
+ /* [24:31] */
+ unsigned int trans_amax:8;
+ } data;
+};
+
+union rga_dst_info {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int format:4;
+ /* [4:6] */
+ unsigned int swap:3;
+ /* [7:9] */
+ unsigned int src1_format:3;
+ /* [10:11] */
+ unsigned int src1_swap:2;
+ /* [12:15] */
+ unsigned int dither_up_en:1;
+ unsigned int dither_down_en:1;
+ unsigned int dither_down_mode:2;
+ /* [16:18] */
+ unsigned int csc_mode:2;
+ unsigned int csc_clip:1;
+ /* [19:31] */
+ unsigned int reserved:13;
+ } data;
+};
+
+union rga_dst_vir_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int vir_stride:15;
+ unsigned int reserved:1;
+ /* [16:31] */
+ unsigned int src1_vir_stride:15;
+ unsigned int reserved1:1;
+ } data;
+};
+
+union rga_dst_act_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int act_width:12;
+ unsigned int reserved:4;
+ /* [16:31] */
+ unsigned int act_height:12;
+ unsigned int reserved1:4;
+ } data;
+};
+
+union rga_alpha_ctrl0 {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int rop_en:1;
+ unsigned int rop_select:1;
+ unsigned int rop_mode:2;
+ /* [4:11] */
+ unsigned int src_fading_val:8;
+ /* [12:20] */
+ unsigned int dst_fading_val:8;
+ unsigned int mask_endian:1;
+ /* [21:31] */
+ unsigned int reserved:11;
+ } data;
+};
+
+union rga_alpha_ctrl1 {
+ unsigned int val;
+ struct {
+ /* [0:1] */
+ unsigned int dst_color_m0:1;
+ unsigned int src_color_m0:1;
+ /* [2:7] */
+ unsigned int dst_factor_m0:3;
+ unsigned int src_factor_m0:3;
+ /* [8:9] */
+ unsigned int dst_alpha_cal_m0:1;
+ unsigned int src_alpha_cal_m0:1;
+ /* [10:13] */
+ unsigned int dst_blend_m0:2;
+ unsigned int src_blend_m0:2;
+ /* [14:15] */
+ unsigned int dst_alpha_m0:1;
+ unsigned int src_alpha_m0:1;
+ /* [16:21] */
+ unsigned int dst_factor_m1:3;
+ unsigned int src_factor_m1:3;
+ /* [22:23] */
+ unsigned int dst_alpha_cal_m1:1;
+ unsigned int src_alpha_cal_m1:1;
+ /* [24:27] */
+ unsigned int dst_blend_m1:2;
+ unsigned int src_blend_m1:2;
+ /* [28:29] */
+ unsigned int dst_alpha_m1:1;
+ unsigned int src_alpha_m1:1;
+ /* [30:31] */
+ unsigned int reserved:2;
+ } data;
+};
+
+union rga_fading_ctrl {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int fading_offset_r:8;
+ /* [8:15] */
+ unsigned int fading_offset_g:8;
+ /* [16:23] */
+ unsigned int fading_offset_b:8;
+ /* [24:31] */
+ unsigned int fading_en:1;
+ unsigned int reserved:7;
+ } data;
+};
+
+union rga_pat_con {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int width:8;
+ /* [8:15] */
+ unsigned int height:8;
+ /* [16:23] */
+ unsigned int offset_x:8;
+ /* [24:31] */
+ unsigned int offset_y:8;
+ } data;
+};
+
+#endif
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
new file mode 100644
index 000000000..86a76f35a
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -0,0 +1,992 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+static void device_run(void *prv)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_v4l2_buffer *src, *dst;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rga->ctrl_lock, flags);
+
+ rga->curr = ctx;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ rga_buf_map(&src->vb2_buf);
+ rga_buf_map(&dst->vb2_buf);
+
+ rga_hw_start(rga);
+
+ spin_unlock_irqrestore(&rga->ctrl_lock, flags);
+}
+
+static irqreturn_t rga_isr(int irq, void *prv)
+{
+ struct rockchip_rga *rga = prv;
+ int intr;
+
+ intr = rga_read(rga, RGA_INT) & 0xf;
+
+ rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
+
+ if (intr & 0x04) {
+ struct vb2_v4l2_buffer *src, *dst;
+ struct rga_ctx *ctx = rga->curr;
+
+ WARN_ON(!ctx);
+
+ rga->curr = NULL;
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ WARN_ON(!src);
+ WARN_ON(!dst);
+
+ dst->timecode = src->timecode;
+ dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
+ dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->flags |= src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(rga->m2m_dev, ctx->fh.m2m_ctx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct v4l2_m2m_ops rga_m2m_ops = {
+ .device_run = device_run,
+};
+
+static int
+queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct rga_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &rga_qops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->rga->mutex;
+ src_vq->dev = ctx->rga->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &rga_qops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->rga->mutex;
+ dst_vq->dev = ctx->rga->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int rga_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rga_ctx *ctx = container_of(ctrl->handler, struct rga_ctx,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->rga->ctrl_lock, flags);
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ ctx->rotate = ctrl->val;
+ break;
+ case V4L2_CID_BG_COLOR:
+ ctx->fill_color = ctrl->val;
+ break;
+ }
+ spin_unlock_irqrestore(&ctx->rga->ctrl_lock, flags);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops rga_ctrl_ops = {
+ .s_ctrl = rga_s_ctrl,
+};
+
+static int rga_setup_ctrls(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_BG_COLOR, 0, 0xffffffff, 1, 0);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_err(&rga->v4l2_dev, "%s failed\n", __func__);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct rga_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_XBGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .hw_format = RGA_COLOR_FMT_XBGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_RGB888,
+ .depth = 24,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_RGB888,
+ .depth = 24,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR4444,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR1555,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_BGR565,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420SP,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422SP,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420SP,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422SP,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420P,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422P,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420P,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct rga_fmt *rga_fmt_find(struct v4l2_format *f)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix.pixelformat)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct rga_frame def_frame = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+ .crop.left = 0,
+ .crop.top = 0,
+ .crop.width = DEFAULT_WIDTH,
+ .crop.height = DEFAULT_HEIGHT,
+ .fmt = &formats[0],
+};
+
+struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->in;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->out;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static int rga_open(struct file *file)
+{
+ struct rockchip_rga *rga = video_drvdata(file);
+ struct rga_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->rga = rga;
+ /* Set default formats */
+ ctx->in = def_frame;
+ ctx->out = def_frame;
+
+ if (mutex_lock_interruptible(&rga->mutex)) {
+ kfree(ctx);
+ return -ERESTARTSYS;
+ }
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rga->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ mutex_unlock(&rga->mutex);
+ kfree(ctx);
+ return ret;
+ }
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ rga_setup_ctrls(ctx);
+
+ /* Write the default values to the ctx struct */
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ mutex_unlock(&rga->mutex);
+
+ return 0;
+}
+
+static int rga_release(struct file *file)
+{
+ struct rga_ctx *ctx =
+ container_of(file->private_data, struct rga_ctx, fh);
+ struct rockchip_rga *rga = ctx->rga;
+
+ mutex_lock(&rga->mutex);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ mutex_unlock(&rga->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations rga_fops = {
+ .owner = THIS_MODULE,
+ .open = rga_open,
+ .release = rga_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int
+vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, RGA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, "rockchip-rga", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:rga", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+{
+ struct rga_fmt *fmt;
+
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = &formats[f->index];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct rga_frame *frm;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+ frm = rga_get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+
+ f->fmt.pix.width = frm->width;
+ f->fmt.pix.height = frm->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = frm->fmt->fourcc;
+ f->fmt.pix.bytesperline = frm->stride;
+ f->fmt.pix.sizeimage = frm->size;
+ f->fmt.pix.colorspace = frm->colorspace;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_fmt *fmt;
+
+ fmt = rga_fmt_find(f);
+ if (!fmt) {
+ fmt = &formats[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ if (f->fmt.pix.width > MAX_WIDTH)
+ f->fmt.pix.width = MAX_WIDTH;
+ if (f->fmt.pix.height > MAX_HEIGHT)
+ f->fmt.pix.height = MAX_HEIGHT;
+
+ if (f->fmt.pix.width < MIN_WIDTH)
+ f->fmt.pix.width = MIN_WIDTH;
+ if (f->fmt.pix.height < MIN_HEIGHT)
+ f->fmt.pix.height = MIN_HEIGHT;
+
+ if (fmt->hw_format >= RGA_COLOR_FMT_YUV422SP)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * (f->fmt.pix.width * fmt->depth) >> 3;
+
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_queue *vq;
+ struct rga_frame *frm;
+ struct rga_fmt *fmt;
+ int ret = 0;
+
+ /* Adjust all values accordingly to the hardware capabilities
+ * and chosen format.
+ */
+ ret = vidioc_try_fmt(file, prv, f);
+ if (ret)
+ return ret;
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&rga->v4l2_dev, "queue (%d) bust\n", f->type);
+ return -EBUSY;
+ }
+ frm = rga_get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+ fmt = rga_fmt_find(f);
+ if (!fmt)
+ return -EINVAL;
+ frm->width = f->fmt.pix.width;
+ frm->height = f->fmt.pix.height;
+ frm->size = f->fmt.pix.sizeimage;
+ frm->fmt = fmt;
+ frm->stride = f->fmt.pix.bytesperline;
+ frm->colorspace = f->fmt.pix.colorspace;
+
+ /* Reset crop settings */
+ frm->crop.left = 0;
+ frm->crop.top = 0;
+ frm->crop.width = frm->width;
+ frm->crop.height = frm->height;
+
+ return 0;
+}
+
+static int vidioc_g_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
+{
+ struct rga_ctx *ctx = prv;
+ struct rga_frame *f;
+ bool use_frame = false;
+
+ f = rga_get_frame(ctx, s->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ use_frame = true;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ use_frame = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_frame) {
+ s->r = f->crop;
+ } else {
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = f->width;
+ s->r.height = f->height;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct rga_frame *f;
+ int ret = 0;
+
+ f = rga_get_frame(ctx, s->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ /*
+ * COMPOSE target is only valid for capture buffer type, return
+ * error for output buffer type
+ */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * CROP target is only valid for output buffer type, return
+ * error for capture buffer type
+ */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ /*
+ * bound and default crop/compose targets are invalid targets to
+ * try/set
+ */
+ default:
+ return -EINVAL;
+ }
+
+ if (s->r.top < 0 || s->r.left < 0) {
+ v4l2_dbg(debug, 1, &rga->v4l2_dev,
+ "doesn't support negative values for top & left.\n");
+ return -EINVAL;
+ }
+
+ if (s->r.left + s->r.width > f->width ||
+ s->r.top + s->r.height > f->height ||
+ s->r.width < MIN_WIDTH || s->r.height < MIN_HEIGHT) {
+ v4l2_dbg(debug, 1, &rga->v4l2_dev, "unsupported crop value.\n");
+ return -EINVAL;
+ }
+
+ f->crop = s->r;
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops rga_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+};
+
+static struct video_device rga_videodev = {
+ .name = "rockchip-rga",
+ .fops = &rga_fops,
+ .ioctl_ops = &rga_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static int rga_enable_clocks(struct rockchip_rga *rga)
+{
+ int ret;
+
+ ret = clk_prepare_enable(rga->sclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rga->aclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
+ goto err_disable_sclk;
+ }
+
+ ret = clk_prepare_enable(rga->hclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
+ goto err_disable_aclk;
+ }
+
+ return 0;
+
+err_disable_sclk:
+ clk_disable_unprepare(rga->sclk);
+err_disable_aclk:
+ clk_disable_unprepare(rga->aclk);
+
+ return ret;
+}
+
+static void rga_disable_clocks(struct rockchip_rga *rga)
+{
+ clk_disable_unprepare(rga->sclk);
+ clk_disable_unprepare(rga->hclk);
+ clk_disable_unprepare(rga->aclk);
+}
+
+static int rga_parse_dt(struct rockchip_rga *rga)
+{
+ struct reset_control *core_rst, *axi_rst, *ahb_rst;
+
+ core_rst = devm_reset_control_get(rga->dev, "core");
+ if (IS_ERR(core_rst)) {
+ dev_err(rga->dev, "failed to get core reset controller\n");
+ return PTR_ERR(core_rst);
+ }
+
+ axi_rst = devm_reset_control_get(rga->dev, "axi");
+ if (IS_ERR(axi_rst)) {
+ dev_err(rga->dev, "failed to get axi reset controller\n");
+ return PTR_ERR(axi_rst);
+ }
+
+ ahb_rst = devm_reset_control_get(rga->dev, "ahb");
+ if (IS_ERR(ahb_rst)) {
+ dev_err(rga->dev, "failed to get ahb reset controller\n");
+ return PTR_ERR(ahb_rst);
+ }
+
+ reset_control_assert(core_rst);
+ udelay(1);
+ reset_control_deassert(core_rst);
+
+ reset_control_assert(axi_rst);
+ udelay(1);
+ reset_control_deassert(axi_rst);
+
+ reset_control_assert(ahb_rst);
+ udelay(1);
+ reset_control_deassert(ahb_rst);
+
+ rga->sclk = devm_clk_get(rga->dev, "sclk");
+ if (IS_ERR(rga->sclk)) {
+ dev_err(rga->dev, "failed to get sclk clock\n");
+ return PTR_ERR(rga->sclk);
+ }
+
+ rga->aclk = devm_clk_get(rga->dev, "aclk");
+ if (IS_ERR(rga->aclk)) {
+ dev_err(rga->dev, "failed to get aclk clock\n");
+ return PTR_ERR(rga->aclk);
+ }
+
+ rga->hclk = devm_clk_get(rga->dev, "hclk");
+ if (IS_ERR(rga->hclk)) {
+ dev_err(rga->dev, "failed to get hclk clock\n");
+ return PTR_ERR(rga->hclk);
+ }
+
+ return 0;
+}
+
+static int rga_probe(struct platform_device *pdev)
+{
+ struct rockchip_rga *rga;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret = 0;
+ int irq;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
+ if (!rga)
+ return -ENOMEM;
+
+ rga->dev = &pdev->dev;
+ spin_lock_init(&rga->ctrl_lock);
+ mutex_init(&rga->mutex);
+
+ ret = rga_parse_dt(rga);
+ if (ret)
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+
+ pm_runtime_enable(rga->dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ rga->regs = devm_ioremap_resource(rga->dev, res);
+ if (IS_ERR(rga->regs)) {
+ ret = PTR_ERR(rga->regs);
+ goto err_put_clk;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(rga->dev, "failed to get irq\n");
+ ret = irq;
+ goto err_put_clk;
+ }
+
+ ret = devm_request_irq(rga->dev, irq, rga_isr, 0,
+ dev_name(rga->dev), rga);
+ if (ret < 0) {
+ dev_err(rga->dev, "failed to request irq\n");
+ goto err_put_clk;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &rga->v4l2_dev);
+ if (ret)
+ goto err_put_clk;
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&rga->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_v4l2_dev;
+ }
+ *vfd = rga_videodev;
+ vfd->lock = &rga->mutex;
+ vfd->v4l2_dev = &rga->v4l2_dev;
+
+ video_set_drvdata(vfd, rga);
+ rga->vfd = vfd;
+
+ platform_set_drvdata(pdev, rga);
+ rga->m2m_dev = v4l2_m2m_init(&rga_m2m_ops);
+ if (IS_ERR(rga->m2m_dev)) {
+ v4l2_err(&rga->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(rga->m2m_dev);
+ goto unreg_video_dev;
+ }
+
+ pm_runtime_get_sync(rga->dev);
+
+ rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
+ rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
+
+ v4l2_info(&rga->v4l2_dev, "HW Version: 0x%02x.%02x\n",
+ rga->version.major, rga->version.minor);
+
+ pm_runtime_put(rga->dev);
+
+ /* Create CMD buffer */
+ rga->cmdbuf_virt = dma_alloc_attrs(rga->dev, RGA_CMDBUF_SIZE,
+ &rga->cmdbuf_phy, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+
+ rga->src_mmu_pages =
+ (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
+ rga->dst_mmu_pages =
+ (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
+
+ def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
+ def_frame.size = def_frame.stride * def_frame.height;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&rga->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+
+ v4l2_info(&rga->v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+
+ return 0;
+
+rel_vdev:
+ video_device_release(vfd);
+unreg_video_dev:
+ video_unregister_device(rga->vfd);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&rga->v4l2_dev);
+err_put_clk:
+ pm_runtime_disable(rga->dev);
+
+ return ret;
+}
+
+static int rga_remove(struct platform_device *pdev)
+{
+ struct rockchip_rga *rga = platform_get_drvdata(pdev);
+
+ dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
+ rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
+
+ free_pages((unsigned long)rga->src_mmu_pages, 3);
+ free_pages((unsigned long)rga->dst_mmu_pages, 3);
+
+ v4l2_info(&rga->v4l2_dev, "Removing\n");
+
+ v4l2_m2m_release(rga->m2m_dev);
+ video_unregister_device(rga->vfd);
+ v4l2_device_unregister(&rga->v4l2_dev);
+
+ pm_runtime_disable(rga->dev);
+
+ return 0;
+}
+
+static int __maybe_unused rga_runtime_suspend(struct device *dev)
+{
+ struct rockchip_rga *rga = dev_get_drvdata(dev);
+
+ rga_disable_clocks(rga);
+
+ return 0;
+}
+
+static int __maybe_unused rga_runtime_resume(struct device *dev)
+{
+ struct rockchip_rga *rga = dev_get_drvdata(dev);
+
+ return rga_enable_clocks(rga);
+}
+
+static const struct dev_pm_ops rga_pm = {
+ SET_RUNTIME_PM_OPS(rga_runtime_suspend,
+ rga_runtime_resume, NULL)
+};
+
+static const struct of_device_id rockchip_rga_match[] = {
+ {
+ .compatible = "rockchip,rk3288-rga",
+ },
+ {
+ .compatible = "rockchip,rk3399-rga",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_rga_match);
+
+static struct platform_driver rga_pdrv = {
+ .probe = rga_probe,
+ .remove = rga_remove,
+ .driver = {
+ .name = RGA_NAME,
+ .pm = &rga_pm,
+ .of_match_table = rockchip_rga_match,
+ },
+};
+
+module_platform_driver(rga_pdrv);
+
+MODULE_AUTHOR("Jacob Chen <jacob-chen@iotwrt.com>");
+MODULE_DESCRIPTION("Rockchip Raster 2d Graphic Acceleration Unit");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
new file mode 100644
index 000000000..72d8a159f
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __RGA_H__
+#define __RGA_H__
+
+#include <linux/platform_device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define RGA_NAME "rockchip-rga"
+
+struct rga_fmt {
+ u32 fourcc;
+ int depth;
+ u8 uv_factor;
+ u8 y_div;
+ u8 x_div;
+ u8 color_swap;
+ u8 hw_format;
+};
+
+struct rga_frame {
+ /* Original dimensions */
+ u32 width;
+ u32 height;
+ u32 colorspace;
+
+ /* Crop */
+ struct v4l2_rect crop;
+
+ /* Image format */
+ struct rga_fmt *fmt;
+
+ /* Variables that can calculated once and reused */
+ u32 stride;
+ u32 size;
+};
+
+struct rockchip_rga_version {
+ u32 major;
+ u32 minor;
+};
+
+struct rga_ctx {
+ struct v4l2_fh fh;
+ struct rockchip_rga *rga;
+ struct rga_frame in;
+ struct rga_frame out;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Control values */
+ u32 op;
+ u32 hflip;
+ u32 vflip;
+ u32 rotate;
+ u32 fill_color;
+};
+
+struct rockchip_rga {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct video_device *vfd;
+
+ struct device *dev;
+ struct regmap *grf;
+ void __iomem *regs;
+ struct clk *sclk;
+ struct clk *aclk;
+ struct clk *hclk;
+ struct rockchip_rga_version version;
+
+ /* vfd lock */
+ struct mutex mutex;
+ /* ctrl parm lock */
+ spinlock_t ctrl_lock;
+
+ struct rga_ctx *curr;
+ dma_addr_t cmdbuf_phy;
+ void *cmdbuf_virt;
+ unsigned int *src_mmu_pages;
+ unsigned int *dst_mmu_pages;
+};
+
+struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type);
+
+/* RGA Buffers Manage */
+extern const struct vb2_ops rga_qops;
+void rga_buf_map(struct vb2_buffer *vb);
+
+/* RGA Hardware */
+static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
+{
+ writel(value, rga->regs + reg);
+};
+
+static inline u32 rga_read(struct rockchip_rga *rga, u32 reg)
+{
+ return readl(rga->regs + reg);
+};
+
+static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
+{
+ u32 temp = rga_read(rga, reg) & ~(mask);
+
+ temp |= val & mask;
+ rga_write(rga, reg, temp);
+};
+
+void rga_hw_start(struct rockchip_rga *rga);
+
+#endif
diff --git a/drivers/media/platform/s3c-camif/Makefile b/drivers/media/platform/s3c-camif/Makefile
new file mode 100644
index 000000000..50bf8c59b
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/Makefile
@@ -0,0 +1,5 @@
+# Makefile for s3c244x/s3c64xx CAMIF driver
+
+s3c-camif-objs := camif-core.o camif-capture.o camif-regs.o
+
+obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif.o
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
new file mode 100644
index 000000000..c02dce8b4
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -0,0 +1,1661 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Based on drivers/media/platform/s5p-fimc,
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "camif-core.h"
+#include "camif-regs.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+/* Locking: called with vp->camif->slock spinlock held */
+static void camif_cfg_video_path(struct camif_vp *vp)
+{
+ WARN_ON(s3c_camif_get_scaler_config(vp, &vp->scaler));
+ camif_hw_set_scaler(vp);
+ camif_hw_set_flip(vp);
+ camif_hw_set_target_format(vp);
+ camif_hw_set_output_dma(vp);
+}
+
+static void camif_prepare_dma_offset(struct camif_vp *vp)
+{
+ struct camif_frame *f = &vp->out_frame;
+
+ f->dma_offset.initial = f->rect.top * f->f_width + f->rect.left;
+ f->dma_offset.line = f->f_width - (f->rect.left + f->rect.width);
+
+ pr_debug("dma_offset: initial: %d, line: %d\n",
+ f->dma_offset.initial, f->dma_offset.line);
+}
+
+/* Locking: called with camif->slock spinlock held */
+static int s3c_camif_hw_init(struct camif_dev *camif, struct camif_vp *vp)
+{
+ const struct s3c_camif_variant *variant = camif->variant;
+
+ if (camif->sensor.sd == NULL || vp->out_fmt == NULL)
+ return -EINVAL;
+
+ if (variant->ip_revision == S3C244X_CAMIF_IP_REV)
+ camif_hw_clear_fifo_overflow(vp);
+ camif_hw_set_camera_bus(camif);
+ camif_hw_set_source_format(camif);
+ camif_hw_set_camera_crop(camif);
+ camif_hw_set_test_pattern(camif, camif->test_pattern);
+ if (variant->has_img_effect)
+ camif_hw_set_effect(camif, camif->colorfx,
+ camif->colorfx_cr, camif->colorfx_cb);
+ if (variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ camif_hw_set_input_path(vp);
+ camif_cfg_video_path(vp);
+ vp->state &= ~ST_VP_CONFIG;
+
+ return 0;
+}
+
+/*
+ * Initialize the video path, only up from the scaler stage. The camera
+ * input interface set up is skipped. This is useful to enable one of the
+ * video paths when the other is already running.
+ * Locking: called with camif->slock spinlock held.
+ */
+static int s3c_camif_hw_vp_init(struct camif_dev *camif, struct camif_vp *vp)
+{
+ unsigned int ip_rev = camif->variant->ip_revision;
+
+ if (vp->out_fmt == NULL)
+ return -EINVAL;
+
+ camif_prepare_dma_offset(vp);
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ camif_hw_clear_fifo_overflow(vp);
+ camif_cfg_video_path(vp);
+ vp->state &= ~ST_VP_CONFIG;
+ return 0;
+}
+
+static int sensor_set_power(struct camif_dev *camif, int on)
+{
+ struct cam_sensor *sensor = &camif->sensor;
+ int err = 0;
+
+ if (camif->sensor.power_count == !on)
+ err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+ if (err == -ENOIOCTLCMD)
+ err = 0;
+ if (!err)
+ sensor->power_count += on ? 1 : -1;
+
+ pr_debug("on: %d, power_count: %d, err: %d\n",
+ on, sensor->power_count, err);
+
+ return err;
+}
+
+static int sensor_set_streaming(struct camif_dev *camif, int on)
+{
+ struct cam_sensor *sensor = &camif->sensor;
+ int err = 0;
+
+ if (camif->sensor.stream_count == !on)
+ err = v4l2_subdev_call(sensor->sd, video, s_stream, on);
+ if (!err)
+ sensor->stream_count += on ? 1 : -1;
+
+ pr_debug("on: %d, stream_count: %d, err: %d\n",
+ on, sensor->stream_count, err);
+
+ return err;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start streaming again.
+ * Return any buffers to vb2, perform CAMIF software reset and
+ * turn off streaming at the data pipeline (sensor) if required.
+ */
+static int camif_reinitialize(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ streaming = vp->state & ST_VP_SENSOR_STREAMING;
+
+ vp->state &= ~(ST_VP_PENDING | ST_VP_RUNNING | ST_VP_OFF |
+ ST_VP_ABORTING | ST_VP_STREAMING |
+ ST_VP_SENSOR_STREAMING | ST_VP_LASTIRQ);
+
+ /* Release unused buffers */
+ while (!list_empty(&vp->pending_buf_q)) {
+ buf = camif_pending_queue_pop(vp);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&vp->active_buf_q)) {
+ buf = camif_active_queue_pop(vp);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (!streaming)
+ return 0;
+
+ return sensor_set_streaming(camif, 0);
+}
+
+static bool s3c_vp_active(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ ret = (vp->state & ST_VP_RUNNING) || (vp->state & ST_VP_PENDING);
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return ret;
+}
+
+static bool camif_is_streaming(struct camif_dev *camif)
+{
+ unsigned long flags;
+ bool status;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ status = camif->stream_count > 0;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return status;
+}
+
+static int camif_stop_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ int ret;
+
+ if (!s3c_vp_active(vp))
+ return 0;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->state &= ~(ST_VP_OFF | ST_VP_LASTIRQ);
+ vp->state |= ST_VP_ABORTING;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ ret = wait_event_timeout(vp->irq_queue,
+ !(vp->state & ST_VP_ABORTING),
+ msecs_to_jiffies(CAMIF_STOP_TIMEOUT));
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ if (ret == 0 && !(vp->state & ST_VP_OFF)) {
+ /* Timed out, forcibly stop capture */
+ vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
+ ST_VP_LASTIRQ);
+
+ camif_hw_disable_capture(vp);
+ camif_hw_enable_scaler(vp, false);
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return camif_reinitialize(vp);
+}
+
+static int camif_prepare_addr(struct camif_vp *vp, struct vb2_buffer *vb,
+ struct camif_addr *paddr)
+{
+ struct camif_frame *frame = &vp->out_frame;
+ u32 pix_size;
+
+ if (vb == NULL || frame == NULL)
+ return -EINVAL;
+
+ pix_size = frame->rect.width * frame->rect.height;
+
+ pr_debug("colplanes: %d, pix_size: %u\n",
+ vp->out_fmt->colplanes, pix_size);
+
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ switch (vp->out_fmt->colplanes) {
+ case 1:
+ paddr->cb = 0;
+ paddr->cr = 0;
+ break;
+ case 2:
+ /* decompose Y into Y/Cb */
+ paddr->cb = (u32)(paddr->y + pix_size);
+ paddr->cr = 0;
+ break;
+ case 3:
+ paddr->cb = (u32)(paddr->y + pix_size);
+ /* decompose Y into Y/Cb/Cr */
+ if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
+ paddr->cr = (u32)(paddr->cb + (pix_size >> 1));
+ else /* 420 */
+ paddr->cr = (u32)(paddr->cb + (pix_size >> 2));
+
+ if (vp->out_fmt->color == IMG_FMT_YCRCB420)
+ swap(paddr->cb, paddr->cr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pr_debug("DMA address: y: %pad cb: %pad cr: %pad\n",
+ &paddr->y, &paddr->cb, &paddr->cr);
+
+ return 0;
+}
+
+irqreturn_t s3c_camif_irq_handler(int irq, void *priv)
+{
+ struct camif_vp *vp = priv;
+ struct camif_dev *camif = vp->camif;
+ unsigned int ip_rev = camif->variant->ip_revision;
+ unsigned int status;
+
+ spin_lock(&camif->slock);
+
+ if (ip_rev == S3C6410_CAMIF_IP_REV)
+ camif_hw_clear_pending_irq(vp);
+
+ status = camif_hw_get_status(vp);
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV && (status & CISTATUS_OVF_MASK)) {
+ camif_hw_clear_fifo_overflow(vp);
+ goto unlock;
+ }
+
+ if (vp->state & ST_VP_ABORTING) {
+ if (vp->state & ST_VP_OFF) {
+ /* Last IRQ */
+ vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
+ ST_VP_LASTIRQ);
+ wake_up(&vp->irq_queue);
+ goto unlock;
+ } else if (vp->state & ST_VP_LASTIRQ) {
+ camif_hw_disable_capture(vp);
+ camif_hw_enable_scaler(vp, false);
+ camif_hw_set_lastirq(vp, false);
+ vp->state |= ST_VP_OFF;
+ } else {
+ /* Disable capture, enable last IRQ */
+ camif_hw_set_lastirq(vp, true);
+ vp->state |= ST_VP_LASTIRQ;
+ }
+ }
+
+ if (!list_empty(&vp->pending_buf_q) && (vp->state & ST_VP_RUNNING) &&
+ !list_empty(&vp->active_buf_q)) {
+ unsigned int index;
+ struct camif_buffer *vbuf;
+ /*
+ * Get previous DMA write buffer index:
+ * 0 => DMA buffer 0, 2;
+ * 1 => DMA buffer 1, 3.
+ */
+ index = (CISTATUS_FRAMECNT(status) + 2) & 1;
+ vbuf = camif_active_queue_peek(vp, index);
+
+ if (!WARN_ON(vbuf == NULL)) {
+ /* Dequeue a filled buffer */
+ vbuf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vbuf->vb.sequence = vp->frame_sequence++;
+ vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+
+ /* Set up an empty buffer at the DMA engine */
+ vbuf = camif_pending_queue_pop(vp);
+ vbuf->index = index;
+ camif_hw_set_output_addr(vp, &vbuf->paddr, index);
+ camif_hw_set_output_addr(vp, &vbuf->paddr, index + 2);
+
+ /* Scheduled in H/W, add to the queue */
+ camif_active_queue_add(vp, vbuf);
+ }
+ } else if (!(vp->state & ST_VP_ABORTING) &&
+ (vp->state & ST_VP_PENDING)) {
+ vp->state |= ST_VP_RUNNING;
+ }
+
+ if (vp->state & ST_VP_CONFIG) {
+ camif_prepare_dma_offset(vp);
+ camif_hw_set_camera_crop(camif);
+ camif_hw_set_scaler(vp);
+ camif_hw_set_flip(vp);
+ camif_hw_set_test_pattern(camif, camif->test_pattern);
+ if (camif->variant->has_img_effect)
+ camif_hw_set_effect(camif, camif->colorfx,
+ camif->colorfx_cr, camif->colorfx_cb);
+ vp->state &= ~ST_VP_CONFIG;
+ }
+unlock:
+ spin_unlock(&camif->slock);
+ return IRQ_HANDLED;
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * We assume the codec capture path is always activated
+ * first, before the preview path starts streaming.
+ * This is required to avoid internal FIFO overflow and
+ * a need for CAMIF software reset.
+ */
+ spin_lock_irqsave(&camif->slock, flags);
+
+ if (camif->stream_count == 0) {
+ camif_hw_reset(camif);
+ ret = s3c_camif_hw_init(camif, vp);
+ } else {
+ ret = s3c_camif_hw_vp_init(camif, vp);
+ }
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (ret < 0) {
+ camif_reinitialize(vp);
+ return ret;
+ }
+
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->frame_sequence = 0;
+ vp->state |= ST_VP_PENDING;
+
+ if (!list_empty(&vp->pending_buf_q) &&
+ (!(vp->state & ST_VP_STREAMING) ||
+ !(vp->state & ST_VP_SENSOR_STREAMING))) {
+
+ camif_hw_enable_scaler(vp, vp->scaler.enable);
+ camif_hw_enable_capture(vp);
+ vp->state |= ST_VP_STREAMING;
+
+ if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
+ vp->state |= ST_VP_SENSOR_STREAMING;
+ spin_unlock_irqrestore(&camif->slock, flags);
+ ret = sensor_set_streaming(camif, 1);
+ if (ret)
+ v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
+ if (debug)
+ camif_hw_dump_regs(camif, __func__);
+
+ return ret;
+ }
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+ return 0;
+}
+
+static void stop_streaming(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ camif_stop_capture(vp);
+}
+
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+ unsigned int size;
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ size = (frame->f_width * frame->f_height * fmt->depth) / 8;
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *num_planes = 1;
+ sizes[0] = size;
+
+ pr_debug("size: %u\n", sizes[0]);
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vp->out_fmt == NULL)
+ return -EINVAL;
+
+ if (vb2_plane_size(vb, 0) < vp->payload) {
+ v4l2_err(&vp->vdev, "buffer too small: %lu, required: %u\n",
+ vb2_plane_size(vb, 0), vp->payload);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, vp->payload);
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct camif_buffer *buf = container_of(vbuf, struct camif_buffer, vb);
+ struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ WARN_ON(camif_prepare_addr(vp, &buf->vb.vb2_buf, &buf->paddr));
+
+ if (!(vp->state & ST_VP_STREAMING) && vp->active_buffers < 2) {
+ /* Schedule an empty buffer in H/W */
+ buf->index = vp->buf_index;
+
+ camif_hw_set_output_addr(vp, &buf->paddr, buf->index);
+ camif_hw_set_output_addr(vp, &buf->paddr, buf->index + 2);
+
+ camif_active_queue_add(vp, buf);
+ vp->buf_index = !vp->buf_index;
+ } else {
+ camif_pending_queue_add(vp, buf);
+ }
+
+ if (vb2_is_streaming(&vp->vb_queue) && !list_empty(&vp->pending_buf_q)
+ && !(vp->state & ST_VP_STREAMING)) {
+
+ vp->state |= ST_VP_STREAMING;
+ camif_hw_enable_scaler(vp, vp->scaler.enable);
+ camif_hw_enable_capture(vp);
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
+ if (sensor_set_streaming(camif, 1) == 0)
+ vp->state |= ST_VP_SENSOR_STREAMING;
+ else
+ v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
+
+ if (debug)
+ camif_hw_dump_regs(camif, __func__);
+ }
+ return;
+ }
+ spin_unlock_irqrestore(&camif->slock, flags);
+}
+
+static const struct vb2_ops s3c_camif_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static int s3c_camif_open(struct file *file)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
+ vp->state, vp->owner, task_pid_nr(current));
+
+ if (mutex_lock_interruptible(&camif->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ ret = pm_runtime_get_sync(camif->dev);
+ if (ret < 0)
+ goto err_pm;
+
+ ret = sensor_set_power(camif, 1);
+ if (!ret)
+ goto unlock;
+
+ pm_runtime_put(camif->dev);
+err_pm:
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static int s3c_camif_close(struct file *file)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
+ vp->state, vp->owner, task_pid_nr(current));
+
+ mutex_lock(&camif->lock);
+
+ if (vp->owner == file->private_data) {
+ camif_stop_capture(vp);
+ vb2_queue_release(&vp->vb_queue);
+ vp->owner = NULL;
+ }
+
+ sensor_set_power(camif, 0);
+
+ pm_runtime_put(camif->dev);
+ ret = v4l2_fh_release(file);
+
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static __poll_t s3c_camif_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ __poll_t ret;
+
+ mutex_lock(&camif->lock);
+ if (vp->owner && vp->owner != file->private_data)
+ ret = EPOLLERR;
+ else
+ ret = vb2_poll(&vp->vb_queue, file, wait);
+
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static int s3c_camif_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ if (vp->owner && vp->owner != file->private_data)
+ ret = -EBUSY;
+ else
+ ret = vb2_mmap(&vp->vb_queue, vma);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations s3c_camif_fops = {
+ .owner = THIS_MODULE,
+ .open = s3c_camif_open,
+ .release = s3c_camif_close,
+ .poll = s3c_camif_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = s3c_camif_mmap,
+};
+
+/*
+ * Video node IOCTLs
+ */
+
+static int s3c_camif_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ strlcpy(cap->driver, S3C_CAMIF_DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, S3C_CAMIF_DRIVER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s.%d",
+ dev_name(vp->camif->dev), vp->id);
+
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int s3c_camif_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct v4l2_subdev *sensor = vp->camif->sensor.sd;
+
+ if (input->index || sensor == NULL)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(input->name, sensor->name, sizeof(input->name));
+ return 0;
+}
+
+static int s3c_camif_vidioc_s_input(struct file *file, void *priv,
+ unsigned int i)
+{
+ return i == 0 ? 0 : -EINVAL;
+}
+
+static int s3c_camif_vidioc_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int s3c_camif_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ const struct camif_fmt *fmt;
+
+ fmt = s3c_camif_find_format(vp, NULL, f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ pr_debug("fmt(%d): %s\n", f->index, f->description);
+ return 0;
+}
+
+static int s3c_camif_vidioc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+
+ pix->bytesperline = frame->f_width * fmt->ybpp;
+ pix->sizeimage = vp->payload;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->width = frame->f_width;
+ pix->height = frame->f_height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ return 0;
+}
+
+static int __camif_video_try_format(struct camif_vp *vp,
+ struct v4l2_pix_format *pix,
+ const struct camif_fmt **ffmt)
+{
+ struct camif_dev *camif = vp->camif;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ unsigned int wmin, hmin, sc_hrmax, sc_vrmax;
+ const struct vp_pix_limits *pix_lim;
+ const struct camif_fmt *fmt;
+
+ fmt = s3c_camif_find_format(vp, &pix->pixelformat, 0);
+
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+
+ if (ffmt)
+ *ffmt = fmt;
+
+ pix_lim = &camif->variant->vp_pix_limits[vp->id];
+
+ pr_debug("fmt: %ux%u, crop: %ux%u, bytesperline: %u\n",
+ pix->width, pix->height, crop->width, crop->height,
+ pix->bytesperline);
+ /*
+ * Calculate minimum width and height according to the configured
+ * camera input interface crop rectangle and the resizer's capabilities.
+ */
+ sc_hrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->width) - 3));
+ sc_vrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->height) - 1));
+
+ wmin = max_t(u32, pix_lim->min_out_width, crop->width / sc_hrmax);
+ wmin = round_up(wmin, pix_lim->out_width_align);
+ hmin = max_t(u32, 8, crop->height / sc_vrmax);
+ hmin = round_up(hmin, 8);
+
+ v4l_bound_align_image(&pix->width, wmin, pix_lim->max_sc_out_width,
+ ffs(pix_lim->out_width_align) - 1,
+ &pix->height, hmin, pix_lim->max_height, 0, 0);
+
+ pix->bytesperline = pix->width * fmt->ybpp;
+ pix->sizeimage = (pix->width * pix->height * fmt->depth) / 8;
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->field = V4L2_FIELD_NONE;
+
+ pr_debug("%ux%u, wmin: %d, hmin: %d, sc_hrmax: %d, sc_vrmax: %d\n",
+ pix->width, pix->height, wmin, hmin, sc_hrmax, sc_vrmax);
+
+ return 0;
+}
+
+static int s3c_camif_vidioc_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return __camif_video_try_format(vp, &f->fmt.pix, NULL);
+}
+
+static int s3c_camif_vidioc_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_frame *out_frame = &vp->out_frame;
+ const struct camif_fmt *fmt = NULL;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (vb2_is_busy(&vp->vb_queue))
+ return -EBUSY;
+
+ ret = __camif_video_try_format(vp, &f->fmt.pix, &fmt);
+ if (ret < 0)
+ return ret;
+
+ vp->out_fmt = fmt;
+ vp->payload = pix->sizeimage;
+ out_frame->f_width = pix->width;
+ out_frame->f_height = pix->height;
+
+ /* Reset composition rectangle */
+ out_frame->rect.width = pix->width;
+ out_frame->rect.height = pix->height;
+ out_frame->rect.left = 0;
+ out_frame->rect.top = 0;
+
+ if (vp->owner == NULL)
+ vp->owner = priv;
+
+ pr_debug("%ux%u. payload: %u. fmt: %s. %d %d. sizeimage: %d. bpl: %d\n",
+ out_frame->f_width, out_frame->f_height, vp->payload, fmt->name,
+ pix->width * pix->height * fmt->depth, fmt->depth,
+ pix->sizeimage, pix->bytesperline);
+
+ return 0;
+}
+
+/* Only check pixel formats at the sensor and the camif subdev pads */
+static int camif_pipeline_validate(struct camif_dev *camif)
+{
+ struct v4l2_subdev_format src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ /* Retrieve format at the sensor subdev source pad */
+ pad = media_entity_remote_pad(&camif->pads[0]);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ return -EPIPE;
+
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(camif->sensor.sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != camif->mbus_fmt.width ||
+ src_fmt.format.height != camif->mbus_fmt.height ||
+ src_fmt.format.code != camif->mbus_fmt.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int s3c_camif_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ struct media_entity *sensor = &camif->sensor.sd->entity;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ if (s3c_vp_active(vp))
+ return 0;
+
+ ret = media_pipeline_start(sensor, camif->m_pipeline);
+ if (ret < 0)
+ return ret;
+
+ ret = camif_pipeline_validate(camif);
+ if (ret < 0) {
+ media_pipeline_stop(sensor);
+ return ret;
+ }
+
+ return vb2_streamon(&vp->vb_queue, type);
+}
+
+static int s3c_camif_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ ret = vb2_streamoff(&vp->vb_queue, type);
+ if (ret == 0)
+ media_pipeline_stop(&camif->sensor.sd->entity);
+ return ret;
+}
+
+static int s3c_camif_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ pr_debug("[vp%d] rb count: %d, owner: %p, priv: %p\n",
+ vp->id, rb->count, vp->owner, priv);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ if (rb->count)
+ rb->count = max_t(u32, CAMIF_REQ_BUFS_MIN, rb->count);
+ else
+ vp->owner = NULL;
+
+ ret = vb2_reqbufs(&vp->vb_queue, rb);
+ if (ret < 0)
+ return ret;
+
+ if (rb->count && rb->count < CAMIF_REQ_BUFS_MIN) {
+ rb->count = 0;
+ vb2_reqbufs(&vp->vb_queue, rb);
+ ret = -ENOMEM;
+ }
+
+ vp->reqbufs_count = rb->count;
+ if (vp->owner == NULL && rb->count > 0)
+ vp->owner = priv;
+
+ return ret;
+}
+
+static int s3c_camif_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return vb2_querybuf(&vp->vb_queue, buf);
+}
+
+static int s3c_camif_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ return vb2_qbuf(&vp->vb_queue, buf);
+}
+
+static int s3c_camif_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ pr_debug("[vp%d] sequence: %d\n", vp->id, vp->frame_sequence);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ return vb2_dqbuf(&vp->vb_queue, buf, file->f_flags & O_NONBLOCK);
+}
+
+static int s3c_camif_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ create->count = max_t(u32, 1, create->count);
+ ret = vb2_create_bufs(&vp->vb_queue, create);
+
+ if (!ret && vp->owner == NULL)
+ vp->owner = priv;
+
+ return ret;
+}
+
+static int s3c_camif_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return vb2_prepare_buf(&vp->vb_queue, b);
+}
+
+static int s3c_camif_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = vp->out_frame.f_width;
+ sel->r.height = vp->out_frame.f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = vp->out_frame.rect;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void __camif_try_compose(struct camif_dev *camif, struct camif_vp *vp,
+ struct v4l2_rect *r)
+{
+ /* s3c244x doesn't support composition */
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
+ *r = vp->out_frame.rect;
+ return;
+ }
+
+ /* TODO: s3c64xx */
+}
+
+static int s3c_camif_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ struct v4l2_rect rect = sel->r;
+ unsigned long flags;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ __camif_try_compose(camif, vp, &rect);
+
+ sel->r = rect;
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->out_frame.rect = rect;
+ vp->state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ pr_debug("type: %#x, target: %#x, flags: %#x, (%d,%d)/%dx%d\n",
+ sel->type, sel->target, sel->flags,
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops s3c_camif_ioctl_ops = {
+ .vidioc_querycap = s3c_camif_vidioc_querycap,
+ .vidioc_enum_input = s3c_camif_vidioc_enum_input,
+ .vidioc_g_input = s3c_camif_vidioc_g_input,
+ .vidioc_s_input = s3c_camif_vidioc_s_input,
+ .vidioc_enum_fmt_vid_cap = s3c_camif_vidioc_enum_fmt,
+ .vidioc_try_fmt_vid_cap = s3c_camif_vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = s3c_camif_vidioc_s_fmt,
+ .vidioc_g_fmt_vid_cap = s3c_camif_vidioc_g_fmt,
+ .vidioc_g_selection = s3c_camif_g_selection,
+ .vidioc_s_selection = s3c_camif_s_selection,
+ .vidioc_reqbufs = s3c_camif_reqbufs,
+ .vidioc_querybuf = s3c_camif_querybuf,
+ .vidioc_prepare_buf = s3c_camif_prepare_buf,
+ .vidioc_create_bufs = s3c_camif_create_bufs,
+ .vidioc_qbuf = s3c_camif_qbuf,
+ .vidioc_dqbuf = s3c_camif_dqbuf,
+ .vidioc_streamon = s3c_camif_streamon,
+ .vidioc_streamoff = s3c_camif_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+/*
+ * Video node controls
+ */
+static int s3c_camif_video_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct camif_vp *vp = ctrl->priv;
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+
+ pr_debug("[vp%d] ctrl: %s, value: %d\n", vp->id,
+ ctrl->name, ctrl->val);
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ vp->hflip = ctrl->val;
+ break;
+
+ case V4L2_CID_VFLIP:
+ vp->vflip = ctrl->val;
+ break;
+ }
+
+ vp->state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+ return 0;
+}
+
+/* Codec and preview video node control ops */
+static const struct v4l2_ctrl_ops s3c_camif_video_ctrl_ops = {
+ .s_ctrl = s3c_camif_video_s_ctrl,
+};
+
+int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+{
+ struct camif_vp *vp = &camif->vp[idx];
+ struct vb2_queue *q = &vp->vb_queue;
+ struct video_device *vfd = &vp->vdev;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ memset(vfd, 0, sizeof(*vfd));
+ snprintf(vfd->name, sizeof(vfd->name), "camif-%s",
+ vp->id == 0 ? "codec" : "preview");
+
+ vfd->fops = &s3c_camif_fops;
+ vfd->ioctl_ops = &s3c_camif_ioctl_ops;
+ vfd->v4l2_dev = &camif->v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &camif->lock;
+ vp->reqbufs_count = 0;
+
+ INIT_LIST_HEAD(&vp->pending_buf_q);
+ INIT_LIST_HEAD(&vp->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &s3c_camif_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct camif_buffer);
+ q->drv_priv = vp;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &vp->camif->lock;
+ q->dev = camif->v4l2_dev.dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto err_vd_rel;
+
+ vp->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
+ if (ret)
+ goto err_vd_rel;
+
+ video_set_drvdata(vfd, vp);
+
+ v4l2_ctrl_handler_init(&vp->ctrl_handler, 1);
+ ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (ctrl)
+ ctrl->priv = vp;
+ ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ctrl)
+ ctrl->priv = vp;
+
+ ret = vp->ctrl_handler.error;
+ if (ret < 0)
+ goto err_me_cleanup;
+
+ vfd->ctrl_handler = &vp->ctrl_handler;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_ctrlh_free;
+
+ v4l2_info(&camif->v4l2_dev, "registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+err_ctrlh_free:
+ v4l2_ctrl_handler_free(&vp->ctrl_handler);
+err_me_cleanup:
+ media_entity_cleanup(&vfd->entity);
+err_vd_rel:
+ video_device_release(vfd);
+ return ret;
+}
+
+void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx)
+{
+ struct video_device *vfd = &camif->vp[idx].vdev;
+
+ if (video_is_registered(vfd)) {
+ video_unregister_device(vfd);
+ media_entity_cleanup(&vfd->entity);
+ v4l2_ctrl_handler_free(vfd->ctrl_handler);
+ }
+}
+
+/* Media bus pixel formats supported at the camif input */
+static const u32 camif_mbus_formats[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+};
+
+/*
+ * Camera input interface subdev operations
+ */
+
+static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(camif_mbus_formats))
+ return -EINVAL;
+
+ code->code = camif_mbus_formats[code->index];
+ return 0;
+}
+
+static int s3c_camif_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mutex_lock(&camif->lock);
+
+ switch (fmt->pad) {
+ case CAMIF_SD_PAD_SINK:
+ /* full camera input pixel size */
+ *mf = camif->mbus_fmt;
+ break;
+
+ case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
+ /* crop rectangle at camera interface input */
+ mf->width = camif->camif_crop.width;
+ mf->height = camif->camif_crop.height;
+ mf->code = camif->mbus_fmt.code;
+ break;
+ }
+
+ mutex_unlock(&camif->lock);
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ return 0;
+}
+
+static void __camif_subdev_try_format(struct camif_dev *camif,
+ struct v4l2_mbus_framefmt *mf, int pad)
+{
+ const struct s3c_camif_variant *variant = camif->variant;
+ const struct vp_pix_limits *pix_lim;
+ unsigned int i;
+
+ /* FIXME: constraints against codec or preview path ? */
+ pix_lim = &variant->vp_pix_limits[VP_CODEC];
+
+ for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
+ if (camif_mbus_formats[i] == mf->code)
+ break;
+
+ if (i == ARRAY_SIZE(camif_mbus_formats))
+ mf->code = camif_mbus_formats[0];
+
+ if (pad == CAMIF_SD_PAD_SINK) {
+ v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
+ ffs(pix_lim->out_width_align) - 1,
+ &mf->height, 8, CAMIF_MAX_PIX_HEIGHT, 0,
+ 0);
+ } else {
+ struct v4l2_rect *crop = &camif->camif_crop;
+ v4l_bound_align_image(&mf->width, 8, crop->width,
+ ffs(pix_lim->out_width_align) - 1,
+ &mf->height, 8, crop->height,
+ 0, 0);
+ }
+
+ v4l2_dbg(1, debug, &camif->subdev, "%ux%u\n", mf->width, mf->height);
+}
+
+static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ int i;
+
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %ux%u\n",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mutex_lock(&camif->lock);
+
+ /*
+ * No pixel format change at the camera input is allowed
+ * while streaming.
+ */
+ if (vb2_is_busy(&camif->vp[VP_CODEC].vb_queue) ||
+ vb2_is_busy(&camif->vp[VP_PREVIEW].vb_queue)) {
+ mutex_unlock(&camif->lock);
+ return -EBUSY;
+ }
+
+ __camif_subdev_try_format(camif, mf, fmt->pad);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *mf = fmt->format;
+ mutex_unlock(&camif->lock);
+ return 0;
+ }
+
+ switch (fmt->pad) {
+ case CAMIF_SD_PAD_SINK:
+ camif->mbus_fmt = *mf;
+ /* Reset sink crop rectangle. */
+ crop->width = mf->width;
+ crop->height = mf->height;
+ crop->left = 0;
+ crop->top = 0;
+ /*
+ * Reset source format (the camif's crop rectangle)
+ * and the video output resolution.
+ */
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_frame *frame = &camif->vp[i].out_frame;
+ frame->rect = *crop;
+ frame->f_width = mf->width;
+ frame->f_height = mf->height;
+ }
+ break;
+
+ case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
+ /* Pixel format can be only changed on the sink pad. */
+ mf->code = camif->mbus_fmt.code;
+ mf->width = crop->width;
+ mf->height = crop->height;
+ break;
+ }
+
+ mutex_unlock(&camif->lock);
+ return 0;
+}
+
+static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop = &camif->camif_crop;
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+
+ if ((sel->target != V4L2_SEL_TGT_CROP &&
+ sel->target != V4L2_SEL_TGT_CROP_BOUNDS) ||
+ sel->pad != CAMIF_SD_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&camif->lock);
+
+ if (sel->target == V4L2_SEL_TGT_CROP) {
+ sel->r = *crop;
+ } else { /* crop bounds */
+ sel->r.width = mf->width;
+ sel->r.height = mf->height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+
+ mutex_unlock(&camif->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: crop: (%d,%d) %dx%d, size: %ux%u\n",
+ __func__, crop->left, crop->top, crop->width,
+ crop->height, mf->width, mf->height);
+
+ return 0;
+}
+
+static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ const struct camif_pix_limits *pix_lim = &camif->variant->pix_limits;
+ unsigned int left = 2 * r->left;
+ unsigned int top = 2 * r->top;
+
+ /*
+ * Following constraints must be met:
+ * - r->width + 2 * r->left = mf->width;
+ * - r->height + 2 * r->top = mf->height;
+ * - crop rectangle size and position must be aligned
+ * to 8 or 2 pixels, depending on SoC version.
+ */
+ v4l_bound_align_image(&r->width, 0, mf->width,
+ ffs(pix_lim->win_hor_offset_align) - 1,
+ &r->height, 0, mf->height, 1, 0);
+
+ v4l_bound_align_image(&left, 0, mf->width - r->width,
+ ffs(pix_lim->win_hor_offset_align),
+ &top, 0, mf->height - r->height, 2, 0);
+
+ r->left = left / 2;
+ r->top = top / 2;
+ r->width = mf->width - left;
+ r->height = mf->height - top;
+ /*
+ * Make sure we either downscale or upscale both the pixel
+ * width and height. Just return current crop rectangle if
+ * this scaler constraint is not met.
+ */
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV &&
+ camif_is_streaming(camif)) {
+ unsigned int i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct v4l2_rect *or = &camif->vp[i].out_frame.rect;
+ if ((or->width > r->width) == (or->height > r->height))
+ continue;
+ *r = camif->camif_crop;
+ pr_debug("Width/height scaling direction limitation\n");
+ break;
+ }
+ }
+
+ v4l2_dbg(1, debug, &camif->v4l2_dev, "crop: (%d,%d)/%dx%d, fmt: %ux%u\n",
+ r->left, r->top, r->width, r->height, mf->width, mf->height);
+}
+
+static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop = &camif->camif_crop;
+ struct camif_scaler scaler;
+
+ if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != CAMIF_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&camif->lock);
+ __camif_try_crop(camif, &sel->r);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_crop(sd, cfg, sel->pad) = sel->r;
+ } else {
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ *crop = sel->r;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+ scaler = vp->scaler;
+ if (s3c_camif_get_scaler_config(vp, &scaler))
+ continue;
+ vp->scaler = scaler;
+ vp->state |= ST_VP_CONFIG;
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+ }
+ mutex_unlock(&camif->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %u, f_h: %u\n",
+ __func__, crop->left, crop->top, crop->width, crop->height,
+ camif->mbus_fmt.width, camif->mbus_fmt.height);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops s3c_camif_subdev_pad_ops = {
+ .enum_mbus_code = s3c_camif_subdev_enum_mbus_code,
+ .get_selection = s3c_camif_subdev_get_selection,
+ .set_selection = s3c_camif_subdev_set_selection,
+ .get_fmt = s3c_camif_subdev_get_fmt,
+ .set_fmt = s3c_camif_subdev_set_fmt,
+};
+
+static const struct v4l2_subdev_ops s3c_camif_subdev_ops = {
+ .pad = &s3c_camif_subdev_pad_ops,
+};
+
+static int s3c_camif_subdev_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct camif_dev *camif = container_of(ctrl->handler, struct camif_dev,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_COLORFX:
+ camif->colorfx = camif->ctrl_colorfx->val;
+ /* Set Cb, Cr */
+ switch (ctrl->val) {
+ case V4L2_COLORFX_SEPIA:
+ camif->colorfx_cb = 115;
+ camif->colorfx_cr = 145;
+ break;
+ case V4L2_COLORFX_SET_CBCR:
+ camif->colorfx_cb = camif->ctrl_colorfx_cbcr->val >> 8;
+ camif->colorfx_cr = camif->ctrl_colorfx_cbcr->val & 0xff;
+ break;
+ default:
+ /* for V4L2_COLORFX_BW and others */
+ camif->colorfx_cb = 128;
+ camif->colorfx_cr = 128;
+ }
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ camif->test_pattern = camif->ctrl_test_pattern->val;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ camif->vp[VP_CODEC].state |= ST_VP_CONFIG;
+ camif->vp[VP_PREVIEW].state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops s3c_camif_subdev_ctrl_ops = {
+ .s_ctrl = s3c_camif_subdev_s_ctrl,
+};
+
+static const char * const s3c_camif_test_pattern_menu[] = {
+ "Disabled",
+ "Color bars",
+ "Horizontal increment",
+ "Vertical increment",
+};
+
+int s3c_camif_create_subdev(struct camif_dev *camif)
+{
+ struct v4l2_ctrl_handler *handler = &camif->ctrl_handler;
+ struct v4l2_subdev *sd = &camif->subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &s3c_camif_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strlcpy(sd->name, "S3C-CAMIF", sizeof(sd->name));
+
+ camif->pads[CAMIF_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ camif->pads[CAMIF_SD_PAD_SOURCE_C].flags = MEDIA_PAD_FL_SOURCE;
+ camif->pads[CAMIF_SD_PAD_SOURCE_P].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&sd->entity, CAMIF_SD_PADS_NUM,
+ camif->pads);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 3);
+ camif->ctrl_test_pattern = v4l2_ctrl_new_std_menu_items(handler,
+ &s3c_camif_subdev_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(s3c_camif_test_pattern_menu) - 1, 0, 0,
+ s3c_camif_test_pattern_menu);
+
+ if (camif->variant->has_img_effect) {
+ camif->ctrl_colorfx = v4l2_ctrl_new_std_menu(handler,
+ &s3c_camif_subdev_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
+ ~0x981f, V4L2_COLORFX_NONE);
+
+ camif->ctrl_colorfx_cbcr = v4l2_ctrl_new_std(handler,
+ &s3c_camif_subdev_ctrl_ops,
+ V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+ }
+
+ if (handler->error) {
+ v4l2_ctrl_handler_free(handler);
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ if (camif->variant->has_img_effect)
+ v4l2_ctrl_auto_cluster(2, &camif->ctrl_colorfx,
+ V4L2_COLORFX_SET_CBCR, false);
+
+ sd->ctrl_handler = handler;
+ v4l2_set_subdevdata(sd, camif);
+
+ return 0;
+}
+
+void s3c_camif_unregister_subdev(struct camif_dev *camif)
+{
+ struct v4l2_subdev *sd = &camif->subdev;
+
+ /* Return if not registered */
+ if (v4l2_get_subdevdata(sd) == NULL)
+ return;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&camif->ctrl_handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
+
+int s3c_camif_set_defaults(struct camif_dev *camif)
+{
+ unsigned int ip_rev = camif->variant->ip_revision;
+ int i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+ struct camif_frame *f = &vp->out_frame;
+
+ vp->camif = camif;
+ vp->id = i;
+ vp->offset = camif->variant->vp_offset;
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ vp->fmt_flags = i ? FMT_FL_S3C24XX_PREVIEW :
+ FMT_FL_S3C24XX_CODEC;
+ else
+ vp->fmt_flags = FMT_FL_S3C64XX;
+
+ vp->out_fmt = s3c_camif_find_format(vp, NULL, 0);
+ BUG_ON(vp->out_fmt == NULL);
+
+ memset(f, 0, sizeof(*f));
+ f->f_width = CAMIF_DEF_WIDTH;
+ f->f_height = CAMIF_DEF_HEIGHT;
+ f->rect.width = CAMIF_DEF_WIDTH;
+ f->rect.height = CAMIF_DEF_HEIGHT;
+
+ /* Scaler is always enabled */
+ vp->scaler.enable = 1;
+
+ vp->payload = (f->f_width * f->f_height *
+ vp->out_fmt->depth) / 8;
+ }
+
+ memset(&camif->mbus_fmt, 0, sizeof(camif->mbus_fmt));
+ camif->mbus_fmt.width = CAMIF_DEF_WIDTH;
+ camif->mbus_fmt.height = CAMIF_DEF_HEIGHT;
+ camif->mbus_fmt.code = camif_mbus_formats[0];
+
+ memset(&camif->camif_crop, 0, sizeof(camif->camif_crop));
+ camif->camif_crop.width = CAMIF_DEF_WIDTH;
+ camif->camif_crop.height = CAMIF_DEF_HEIGHT;
+
+ return 0;
+}
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
new file mode 100644
index 000000000..8d8ed72bd
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -0,0 +1,652 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "camif-core.h"
+
+static char *camif_clocks[CLK_MAX_NUM] = {
+ /* HCLK CAMIF clock */
+ [CLK_GATE] = "camif",
+ /* CAMIF / external camera sensor master clock */
+ [CLK_CAM] = "camera",
+};
+
+static const struct camif_fmt camif_formats[] = {
+ {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .depth = 16,
+ .ybpp = 1,
+ .color = IMG_FMT_YCBCR422P,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .ybpp = 1,
+ .color = IMG_FMT_YCBCR420,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "YVU 4:2:0 planar, Y/Cr/Cb",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .depth = 12,
+ .ybpp = 1,
+ .color = IMG_FMT_YCRCB420,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "RGB565, 16 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = 16,
+ .ybpp = 2,
+ .color = IMG_FMT_RGB565,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C24XX_PREVIEW |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "XRGB8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .ybpp = 4,
+ .color = IMG_FMT_XRGB8888,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C24XX_PREVIEW |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "BGR666",
+ .fourcc = V4L2_PIX_FMT_BGR666,
+ .depth = 32,
+ .ybpp = 4,
+ .color = IMG_FMT_RGB666,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C64XX,
+ }
+};
+
+/**
+ * s3c_camif_find_format() - lookup camif color format by fourcc or an index
+ * @vp: video path (DMA) description (codec/preview)
+ * @pixelformat: fourcc to match, ignored if null
+ * @index: index to the camif_formats array, ignored if negative
+ */
+const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
+ const u32 *pixelformat,
+ int index)
+{
+ const struct camif_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(camif_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(camif_formats); ++i) {
+ fmt = &camif_formats[i];
+ if (vp && !(vp->fmt_flags & fmt->flags))
+ continue;
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static int camif_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+ unsigned int sh = 6;
+
+ if (src >= 64 * tar)
+ return -EINVAL;
+
+ while (sh--) {
+ unsigned int tmp = 1 << sh;
+ if (src >= tar * tmp) {
+ *shift = sh, *ratio = tmp;
+ return 0;
+ }
+ }
+ *shift = 0, *ratio = 1;
+ return 0;
+}
+
+int s3c_camif_get_scaler_config(struct camif_vp *vp,
+ struct camif_scaler *scaler)
+{
+ struct v4l2_rect *camif_crop = &vp->camif->camif_crop;
+ int source_x = camif_crop->width;
+ int source_y = camif_crop->height;
+ int target_x = vp->out_frame.rect.width;
+ int target_y = vp->out_frame.rect.height;
+ int ret;
+
+ if (vp->rotation == 90 || vp->rotation == 270)
+ swap(target_x, target_y);
+
+ ret = camif_get_scaler_factor(source_x, target_x, &scaler->pre_h_ratio,
+ &scaler->h_shift);
+ if (ret < 0)
+ return ret;
+
+ ret = camif_get_scaler_factor(source_y, target_y, &scaler->pre_v_ratio,
+ &scaler->v_shift);
+ if (ret < 0)
+ return ret;
+
+ scaler->pre_dst_width = source_x / scaler->pre_h_ratio;
+ scaler->pre_dst_height = source_y / scaler->pre_v_ratio;
+
+ scaler->main_h_ratio = (source_x << 8) / (target_x << scaler->h_shift);
+ scaler->main_v_ratio = (source_y << 8) / (target_y << scaler->v_shift);
+
+ scaler->scaleup_h = (target_x >= source_x);
+ scaler->scaleup_v = (target_y >= source_y);
+
+ scaler->copy = 0;
+
+ pr_debug("H: ratio: %u, shift: %u. V: ratio: %u, shift: %u.\n",
+ scaler->pre_h_ratio, scaler->h_shift,
+ scaler->pre_v_ratio, scaler->v_shift);
+
+ pr_debug("Source: %dx%d, Target: %dx%d, scaleup_h/v: %d/%d\n",
+ source_x, source_y, target_x, target_y,
+ scaler->scaleup_h, scaler->scaleup_v);
+
+ return 0;
+}
+
+static int camif_register_sensor(struct camif_dev *camif)
+{
+ struct s3c_camif_sensor_info *sensor = &camif->pdata.sensor;
+ struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
+ struct i2c_adapter *adapter;
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ camif->sensor.sd = NULL;
+
+ if (sensor->i2c_board_info.addr == 0)
+ return -EINVAL;
+
+ adapter = i2c_get_adapter(sensor->i2c_bus_num);
+ if (adapter == NULL) {
+ v4l2_warn(v4l2_dev, "failed to get I2C adapter %d\n",
+ sensor->i2c_bus_num);
+ return -EPROBE_DEFER;
+ }
+
+ sd = v4l2_i2c_new_subdev_board(v4l2_dev, adapter,
+ &sensor->i2c_board_info, NULL);
+ if (sd == NULL) {
+ i2c_put_adapter(adapter);
+ v4l2_warn(v4l2_dev, "failed to acquire subdev %s\n",
+ sensor->i2c_board_info.type);
+ return -EPROBE_DEFER;
+ }
+ camif->sensor.sd = sd;
+
+ v4l2_info(v4l2_dev, "registered sensor subdevice %s\n", sd->name);
+
+ /* Get initial pixel format and set it at the camif sink pad */
+ format.pad = 0;
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+
+ if (ret < 0)
+ return 0;
+
+ format.pad = CAMIF_SD_PAD_SINK;
+ v4l2_subdev_call(&camif->subdev, pad, set_fmt, NULL, &format);
+
+ v4l2_info(sd, "Initial format from sensor: %dx%d, %#x\n",
+ format.format.width, format.format.height,
+ format.format.code);
+ return 0;
+}
+
+static void camif_unregister_sensor(struct camif_dev *camif)
+{
+ struct v4l2_subdev *sd = camif->sensor.sd;
+ struct i2c_client *client = sd ? v4l2_get_subdevdata(sd) : NULL;
+ struct i2c_adapter *adapter;
+
+ if (client == NULL)
+ return;
+
+ adapter = client->adapter;
+ v4l2_device_unregister_subdev(sd);
+ camif->sensor.sd = NULL;
+ i2c_unregister_device(client);
+ i2c_put_adapter(adapter);
+}
+
+static int camif_create_media_links(struct camif_dev *camif)
+{
+ int i, ret;
+
+ ret = media_create_pad_link(&camif->sensor.sd->entity, 0,
+ &camif->subdev.entity, CAMIF_SD_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < CAMIF_SD_PADS_NUM && !ret; i++) {
+ ret = media_create_pad_link(&camif->subdev.entity, i,
+ &camif->vp[i - 1].vdev.entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ }
+
+ return ret;
+}
+
+static int camif_register_video_nodes(struct camif_dev *camif)
+{
+ int ret = s3c_camif_register_video_node(camif, VP_CODEC);
+ if (ret < 0)
+ return ret;
+
+ return s3c_camif_register_video_node(camif, VP_PREVIEW);
+}
+
+static void camif_unregister_video_nodes(struct camif_dev *camif)
+{
+ s3c_camif_unregister_video_node(camif, VP_CODEC);
+ s3c_camif_unregister_video_node(camif, VP_PREVIEW);
+}
+
+static void camif_unregister_media_entities(struct camif_dev *camif)
+{
+ camif_unregister_video_nodes(camif);
+ camif_unregister_sensor(camif);
+ s3c_camif_unregister_subdev(camif);
+}
+
+/*
+ * Media device
+ */
+static int camif_media_dev_init(struct camif_dev *camif)
+{
+ struct media_device *md = &camif->media_dev;
+ struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
+ unsigned int ip_rev = camif->variant->ip_revision;
+ int ret;
+
+ memset(md, 0, sizeof(*md));
+ snprintf(md->model, sizeof(md->model), "SAMSUNG S3C%s CAMIF",
+ ip_rev == S3C6410_CAMIF_IP_REV ? "6410" : "244X");
+ strlcpy(md->bus_info, "platform", sizeof(md->bus_info));
+ md->hw_revision = ip_rev;
+
+ md->dev = camif->dev;
+
+ strlcpy(v4l2_dev->name, "s3c-camif", sizeof(v4l2_dev->name));
+ v4l2_dev->mdev = md;
+
+ media_device_init(md);
+
+ ret = v4l2_device_register(camif->dev, v4l2_dev);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static void camif_clk_put(struct camif_dev *camif)
+{
+ int i;
+
+ for (i = 0; i < CLK_MAX_NUM; i++) {
+ if (IS_ERR(camif->clock[i]))
+ continue;
+ clk_unprepare(camif->clock[i]);
+ clk_put(camif->clock[i]);
+ camif->clock[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int camif_clk_get(struct camif_dev *camif)
+{
+ int ret, i;
+
+ for (i = 1; i < CLK_MAX_NUM; i++)
+ camif->clock[i] = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < CLK_MAX_NUM; i++) {
+ camif->clock[i] = clk_get(camif->dev, camif_clocks[i]);
+ if (IS_ERR(camif->clock[i])) {
+ ret = PTR_ERR(camif->clock[i]);
+ goto err;
+ }
+ ret = clk_prepare(camif->clock[i]);
+ if (ret < 0) {
+ clk_put(camif->clock[i]);
+ camif->clock[i] = NULL;
+ goto err;
+ }
+ }
+ return 0;
+err:
+ camif_clk_put(camif);
+ dev_err(camif->dev, "failed to get clock: %s\n",
+ camif_clocks[i]);
+ return ret;
+}
+
+/*
+ * The CAMIF device has two relatively independent data processing paths
+ * that can source data from memory or the common camera input frontend.
+ * Register interrupts for each data processing path (camif_vp).
+ */
+static int camif_request_irqs(struct platform_device *pdev,
+ struct camif_dev *camif)
+{
+ int irq, ret, i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+
+ init_waitqueue_head(&vp->irq_queue);
+
+ irq = platform_get_irq(pdev, i);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "failed to get IRQ %d\n", i);
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler,
+ 0, dev_name(&pdev->dev), vp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to install IRQ: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int s3c_camif_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct s3c_camif_plat_data *pdata = dev->platform_data;
+ struct s3c_camif_drvdata *drvdata;
+ struct camif_dev *camif;
+ struct resource *mres;
+ int ret = 0;
+
+ camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
+ if (!camif)
+ return -ENOMEM;
+
+ spin_lock_init(&camif->slock);
+ mutex_init(&camif->lock);
+
+ camif->dev = dev;
+
+ if (!pdata || !pdata->gpio_get || !pdata->gpio_put) {
+ dev_err(dev, "wrong platform data\n");
+ return -EINVAL;
+ }
+
+ camif->pdata = *pdata;
+ drvdata = (void *)platform_get_device_id(pdev)->driver_data;
+ camif->variant = drvdata->variant;
+
+ mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ camif->io_base = devm_ioremap_resource(dev, mres);
+ if (IS_ERR(camif->io_base))
+ return PTR_ERR(camif->io_base);
+
+ ret = camif_request_irqs(pdev, camif);
+ if (ret < 0)
+ return ret;
+
+ ret = pdata->gpio_get();
+ if (ret < 0)
+ return ret;
+
+ ret = s3c_camif_create_subdev(camif);
+ if (ret < 0)
+ goto err_sd;
+
+ ret = camif_clk_get(camif);
+ if (ret < 0)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, camif);
+ clk_set_rate(camif->clock[CLK_CAM],
+ camif->pdata.sensor.clock_frequency);
+
+ dev_info(dev, "sensor clock frequency: %lu\n",
+ clk_get_rate(camif->clock[CLK_CAM]));
+ /*
+ * Set initial pixel format, resolution and crop rectangle.
+ * Must be done before a sensor subdev is registered as some
+ * settings are overrode with values from sensor subdev.
+ */
+ s3c_camif_set_defaults(camif);
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm;
+
+ ret = camif_media_dev_init(camif);
+ if (ret < 0)
+ goto err_pm;
+
+ ret = camif_register_sensor(camif);
+ if (ret < 0)
+ goto err_sens;
+
+ ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev);
+ if (ret < 0)
+ goto err_sens;
+
+ ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
+ if (ret < 0)
+ goto err_sens;
+
+ ret = camif_register_video_nodes(camif);
+ if (ret < 0)
+ goto err_sens;
+
+ ret = camif_create_media_links(camif);
+ if (ret < 0)
+ goto err_sens;
+
+ ret = media_device_register(&camif->media_dev);
+ if (ret < 0)
+ goto err_sens;
+
+ pm_runtime_put(dev);
+ return 0;
+
+err_sens:
+ v4l2_device_unregister(&camif->v4l2_dev);
+ media_device_unregister(&camif->media_dev);
+ media_device_cleanup(&camif->media_dev);
+ camif_unregister_media_entities(camif);
+err_pm:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ camif_clk_put(camif);
+err_clk:
+ s3c_camif_unregister_subdev(camif);
+err_sd:
+ pdata->gpio_put();
+ return ret;
+}
+
+static int s3c_camif_remove(struct platform_device *pdev)
+{
+ struct camif_dev *camif = platform_get_drvdata(pdev);
+ struct s3c_camif_plat_data *pdata = &camif->pdata;
+
+ media_device_unregister(&camif->media_dev);
+ media_device_cleanup(&camif->media_dev);
+ camif_unregister_media_entities(camif);
+ v4l2_device_unregister(&camif->v4l2_dev);
+
+ pm_runtime_disable(&pdev->dev);
+ camif_clk_put(camif);
+ pdata->gpio_put();
+
+ return 0;
+}
+
+static int s3c_camif_runtime_resume(struct device *dev)
+{
+ struct camif_dev *camif = dev_get_drvdata(dev);
+
+ clk_enable(camif->clock[CLK_GATE]);
+ /* null op on s3c244x */
+ clk_enable(camif->clock[CLK_CAM]);
+ return 0;
+}
+
+static int s3c_camif_runtime_suspend(struct device *dev)
+{
+ struct camif_dev *camif = dev_get_drvdata(dev);
+
+ /* null op on s3c244x */
+ clk_disable(camif->clock[CLK_CAM]);
+
+ clk_disable(camif->clock[CLK_GATE]);
+ return 0;
+}
+
+static const struct s3c_camif_variant s3c244x_camif_variant = {
+ .vp_pix_limits = {
+ [VP_CODEC] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 2048,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ },
+ [VP_PREVIEW] = {
+ .max_out_width = 640,
+ .max_sc_out_width = 640,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 480,
+ }
+ },
+ .pix_limits = {
+ .win_hor_offset_align = 8,
+ },
+ .ip_revision = S3C244X_CAMIF_IP_REV,
+};
+
+static struct s3c_camif_drvdata s3c244x_camif_drvdata = {
+ .variant = &s3c244x_camif_variant,
+ .bus_clk_freq = 24000000UL,
+};
+
+static const struct s3c_camif_variant s3c6410_camif_variant = {
+ .vp_pix_limits = {
+ [VP_CODEC] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 2048,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ },
+ [VP_PREVIEW] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 720,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ }
+ },
+ .pix_limits = {
+ .win_hor_offset_align = 8,
+ },
+ .ip_revision = S3C6410_CAMIF_IP_REV,
+ .has_img_effect = 1,
+ .vp_offset = 0x20,
+};
+
+static struct s3c_camif_drvdata s3c6410_camif_drvdata = {
+ .variant = &s3c6410_camif_variant,
+ .bus_clk_freq = 133000000UL,
+};
+
+static const struct platform_device_id s3c_camif_driver_ids[] = {
+ {
+ .name = "s3c2440-camif",
+ .driver_data = (unsigned long)&s3c244x_camif_drvdata,
+ }, {
+ .name = "s3c6410-camif",
+ .driver_data = (unsigned long)&s3c6410_camif_drvdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, s3c_camif_driver_ids);
+
+static const struct dev_pm_ops s3c_camif_pm_ops = {
+ .runtime_suspend = s3c_camif_runtime_suspend,
+ .runtime_resume = s3c_camif_runtime_resume,
+};
+
+static struct platform_driver s3c_camif_driver = {
+ .probe = s3c_camif_probe,
+ .remove = s3c_camif_remove,
+ .id_table = s3c_camif_driver_ids,
+ .driver = {
+ .name = S3C_CAMIF_DRIVER_NAME,
+ .pm = &s3c_camif_pm_ops,
+ }
+};
+
+module_platform_driver(s3c_camif_driver);
+
+MODULE_AUTHOR("Sylwester Nawrocki <sylvester.nawrocki@gmail.com>");
+MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>");
+MODULE_DESCRIPTION("S3C24XX/S3C64XX SoC camera interface driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
new file mode 100644
index 000000000..1f5c8c94c
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -0,0 +1,391 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef CAMIF_CORE_H_
+#define CAMIF_CORE_H_
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/drv-intf/s3c_camif.h>
+
+#define S3C_CAMIF_DRIVER_NAME "s3c-camif"
+#define CAMIF_REQ_BUFS_MIN 3
+#define CAMIF_MAX_OUT_BUFS 4
+#define CAMIF_MAX_PIX_WIDTH 4096
+#define CAMIF_MAX_PIX_HEIGHT 4096
+#define SCALER_MAX_RATIO 64
+#define CAMIF_DEF_WIDTH 640
+#define CAMIF_DEF_HEIGHT 480
+#define CAMIF_STOP_TIMEOUT 1500 /* ms */
+
+#define S3C244X_CAMIF_IP_REV 0x20 /* 2.0 */
+#define S3C2450_CAMIF_IP_REV 0x30 /* 3.0 - not implemented, not tested */
+#define S3C6400_CAMIF_IP_REV 0x31 /* 3.1 - not implemented, not tested */
+#define S3C6410_CAMIF_IP_REV 0x32 /* 3.2 */
+
+/* struct camif_vp::state */
+
+#define ST_VP_PENDING (1 << 0)
+#define ST_VP_RUNNING (1 << 1)
+#define ST_VP_STREAMING (1 << 2)
+#define ST_VP_SENSOR_STREAMING (1 << 3)
+
+#define ST_VP_ABORTING (1 << 4)
+#define ST_VP_OFF (1 << 5)
+#define ST_VP_LASTIRQ (1 << 6)
+
+#define ST_VP_CONFIG (1 << 8)
+
+#define CAMIF_SD_PAD_SINK 0
+#define CAMIF_SD_PAD_SOURCE_C 1
+#define CAMIF_SD_PAD_SOURCE_P 2
+#define CAMIF_SD_PADS_NUM 3
+
+enum img_fmt {
+ IMG_FMT_RGB565 = 0x0010,
+ IMG_FMT_RGB666,
+ IMG_FMT_XRGB8888,
+ IMG_FMT_YCBCR420 = 0x0020,
+ IMG_FMT_YCRCB420,
+ IMG_FMT_YCBCR422P,
+ IMG_FMT_YCBYCR422 = 0x0040,
+ IMG_FMT_YCRYCB422,
+ IMG_FMT_CBYCRY422,
+ IMG_FMT_CRYCBY422,
+};
+
+#define img_fmt_is_rgb(x) ((x) & 0x10)
+#define img_fmt_is_ycbcr(x) ((x) & 0x60)
+
+/* Possible values for struct camif_fmt::flags */
+#define FMT_FL_S3C24XX_CODEC (1 << 0)
+#define FMT_FL_S3C24XX_PREVIEW (1 << 1)
+#define FMT_FL_S3C64XX (1 << 2)
+
+/**
+ * struct camif_fmt - pixel format description
+ * @fourcc: fourcc code for this format, 0 if not applicable
+ * @color: a corresponding enum img_fmt
+ * @colplanes: number of physically contiguous data planes
+ * @flags: indicate for which SoCs revisions this format is valid
+ * @depth: bits per pixel (total)
+ * @ybpp: number of luminance bytes per pixel
+ */
+struct camif_fmt {
+ char *name;
+ u32 fourcc;
+ u32 color;
+ u16 colplanes;
+ u16 flags;
+ u8 depth;
+ u8 ybpp;
+};
+
+/**
+ * struct camif_dma_offset - pixel offset information for DMA
+ * @initial: offset (in pixels) to first pixel
+ * @line: offset (in pixels) from end of line to start of next line
+ */
+struct camif_dma_offset {
+ int initial;
+ int line;
+};
+
+/**
+ * struct camif_frame - source/target frame properties
+ * @f_width: full pixel width
+ * @f_height: full pixel height
+ * @rect: crop/composition rectangle
+ * @dma_offset: DMA offset configuration
+ */
+struct camif_frame {
+ u16 f_width;
+ u16 f_height;
+ struct v4l2_rect rect;
+ struct camif_dma_offset dma_offset;
+};
+
+/* CAMIF clocks enumeration */
+enum {
+ CLK_GATE,
+ CLK_CAM,
+ CLK_MAX_NUM,
+};
+
+struct vp_pix_limits {
+ u16 max_out_width;
+ u16 max_sc_out_width;
+ u16 out_width_align;
+ u16 max_height;
+ u8 min_out_width;
+ u16 out_hor_offset_align;
+};
+
+struct camif_pix_limits {
+ u16 win_hor_offset_align;
+};
+
+/**
+ * struct s3c_camif_variant - CAMIF variant structure
+ * @vp_pix_limits: pixel limits for the codec and preview paths
+ * @camif_pix_limits: pixel limits for the camera input interface
+ * @ip_revision: the CAMIF IP revision: 0x20 for s3c244x, 0x32 for s3c6410
+ */
+struct s3c_camif_variant {
+ struct vp_pix_limits vp_pix_limits[2];
+ struct camif_pix_limits pix_limits;
+ u8 ip_revision;
+ u8 has_img_effect;
+ unsigned int vp_offset;
+};
+
+struct s3c_camif_drvdata {
+ const struct s3c_camif_variant *variant;
+ unsigned long bus_clk_freq;
+};
+
+struct camif_scaler {
+ u8 scaleup_h;
+ u8 scaleup_v;
+ u8 copy;
+ u8 enable;
+ u32 h_shift;
+ u32 v_shift;
+ u32 pre_h_ratio;
+ u32 pre_v_ratio;
+ u32 pre_dst_width;
+ u32 pre_dst_height;
+ u32 main_h_ratio;
+ u32 main_v_ratio;
+};
+
+struct camif_dev;
+
+/**
+ * struct camif_vp - CAMIF data processing path structure (codec/preview)
+ * @irq_queue: interrupt handling waitqueue
+ * @irq: interrupt number for this data path
+ * @camif: pointer to the camif structure
+ * @pad: media pad for the video node
+ * @vdev video device
+ * @ctrl_handler: video node controls handler
+ * @owner: file handle that own the streaming
+ * @pending_buf_q: pending (empty) buffers queue head
+ * @active_buf_q: active (being written) buffers queue head
+ * @active_buffers: counter of buffer set up at the DMA engine
+ * @buf_index: identifier of a last empty buffer set up in H/W
+ * @frame_sequence: image frame sequence counter
+ * @reqbufs_count: the number of buffers requested
+ * @scaler: the scaler structure
+ * @out_fmt: pixel format at this video path output
+ * @payload: the output data frame payload size
+ * @out_frame: the output pixel resolution
+ * @state: the video path's state
+ * @fmt_flags: flags determining supported pixel formats
+ * @id: CAMIF id, 0 - codec, 1 - preview
+ * @rotation: current image rotation value
+ * @hflip: apply horizontal flip if set
+ * @vflip: apply vertical flip if set
+ */
+struct camif_vp {
+ wait_queue_head_t irq_queue;
+ int irq;
+ struct camif_dev *camif;
+ struct media_pad pad;
+ struct video_device vdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_fh *owner;
+ struct vb2_queue vb_queue;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ unsigned int active_buffers;
+ unsigned int buf_index;
+ unsigned int frame_sequence;
+ unsigned int reqbufs_count;
+ struct camif_scaler scaler;
+ const struct camif_fmt *out_fmt;
+ unsigned int payload;
+ struct camif_frame out_frame;
+ unsigned int state;
+ u16 fmt_flags;
+ u8 id;
+ u16 rotation;
+ u8 hflip;
+ u8 vflip;
+ unsigned int offset;
+};
+
+/* Video processing path enumeration */
+#define VP_CODEC 0
+#define VP_PREVIEW 1
+#define CAMIF_VP_NUM 2
+
+/**
+ * struct camif_dev - the CAMIF driver private data structure
+ * @media_dev: top-level media device structure
+ * @v4l2_dev: root v4l2_device
+ * @subdev: camera interface ("catchcam") subdev
+ * @mbus_fmt: camera input media bus format
+ * @camif_crop: camera input interface crop rectangle
+ * @pads: the camif subdev's media pads
+ * @stream_count: the camera interface streaming reference counter
+ * @sensor: image sensor data structure
+ * @m_pipeline: video entity pipeline description
+ * @ctrl_handler: v4l2 control handler (owned by @subdev)
+ * @test_pattern: test pattern controls
+ * @vp: video path (DMA) description (codec/preview)
+ * @variant: variant information for this device
+ * @dev: pointer to the CAMIF device struct
+ * @pdata: a copy of the driver's platform data
+ * @clock: clocks required for the CAMIF operation
+ * @lock: mutex protecting this data structure
+ * @slock: spinlock protecting CAMIF registers
+ * @io_base: start address of the mmaped CAMIF registers
+ */
+struct camif_dev {
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_rect camif_crop;
+ struct media_pad pads[CAMIF_SD_PADS_NUM];
+ int stream_count;
+
+ struct cam_sensor {
+ struct v4l2_subdev *sd;
+ short power_count;
+ short stream_count;
+ } sensor;
+ struct media_pipeline *m_pipeline;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_test_pattern;
+ struct {
+ struct v4l2_ctrl *ctrl_colorfx;
+ struct v4l2_ctrl *ctrl_colorfx_cbcr;
+ };
+ u8 test_pattern;
+ u8 colorfx;
+ u8 colorfx_cb;
+ u8 colorfx_cr;
+
+ struct camif_vp vp[CAMIF_VP_NUM];
+
+ const struct s3c_camif_variant *variant;
+ struct device *dev;
+ struct s3c_camif_plat_data pdata;
+ struct clk *clock[CLK_MAX_NUM];
+ struct mutex lock;
+ spinlock_t slock;
+ void __iomem *io_base;
+};
+
+/**
+ * struct camif_addr - Y/Cb/Cr DMA start address structure
+ * @y: luminance plane dma address
+ * @cb: Cb plane dma address
+ * @cr: Cr plane dma address
+ */
+struct camif_addr {
+ dma_addr_t y;
+ dma_addr_t cb;
+ dma_addr_t cr;
+};
+
+/**
+ * struct camif_buffer - the camif video buffer structure
+ * @vb: vb2 buffer
+ * @list: list head for the buffers queue
+ * @paddr: DMA start addresses
+ * @index: an identifier of this buffer at the DMA engine
+ */
+struct camif_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ struct camif_addr paddr;
+ unsigned int index;
+};
+
+const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
+ const u32 *pixelformat, int index);
+int s3c_camif_register_video_node(struct camif_dev *camif, int idx);
+void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx);
+irqreturn_t s3c_camif_irq_handler(int irq, void *priv);
+int s3c_camif_create_subdev(struct camif_dev *camif);
+void s3c_camif_unregister_subdev(struct camif_dev *camif);
+int s3c_camif_set_defaults(struct camif_dev *camif);
+int s3c_camif_get_scaler_config(struct camif_vp *vp,
+ struct camif_scaler *scaler);
+
+static inline void camif_active_queue_add(struct camif_vp *vp,
+ struct camif_buffer *buf)
+{
+ list_add_tail(&buf->list, &vp->active_buf_q);
+ vp->active_buffers++;
+}
+
+static inline struct camif_buffer *camif_active_queue_pop(
+ struct camif_vp *vp)
+{
+ struct camif_buffer *buf = list_first_entry(&vp->active_buf_q,
+ struct camif_buffer, list);
+ list_del(&buf->list);
+ vp->active_buffers--;
+ return buf;
+}
+
+static inline struct camif_buffer *camif_active_queue_peek(
+ struct camif_vp *vp, int index)
+{
+ struct camif_buffer *tmp, *buf;
+
+ if (WARN_ON(list_empty(&vp->active_buf_q)))
+ return NULL;
+
+ list_for_each_entry_safe(buf, tmp, &vp->active_buf_q, list) {
+ if (buf->index == index) {
+ list_del(&buf->list);
+ vp->active_buffers--;
+ return buf;
+ }
+ }
+
+ return NULL;
+}
+
+static inline void camif_pending_queue_add(struct camif_vp *vp,
+ struct camif_buffer *buf)
+{
+ list_add_tail(&buf->list, &vp->pending_buf_q);
+}
+
+static inline struct camif_buffer *camif_pending_queue_pop(
+ struct camif_vp *vp)
+{
+ struct camif_buffer *buf = list_first_entry(&vp->pending_buf_q,
+ struct camif_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* CAMIF_CORE_H_ */
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
new file mode 100644
index 000000000..812fb3a7c
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -0,0 +1,606 @@
+/*
+ * Samsung s3c24xx/s3c64xx SoC CAMIF driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include "camif-regs.h"
+
+#define camif_write(_camif, _off, _val) writel(_val, (_camif)->io_base + (_off))
+#define camif_read(_camif, _off) readl((_camif)->io_base + (_off))
+
+void camif_hw_reset(struct camif_dev *camif)
+{
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
+ cfg |= CISRCFMT_ITU601_8BIT;
+ camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
+
+ /* S/W reset */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg |= CIGCTRL_SWRST;
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ cfg |= CIGCTRL_IRQ_LEVEL;
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+ udelay(10);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg &= ~CIGCTRL_SWRST;
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+ udelay(10);
+}
+
+void camif_hw_clear_pending_irq(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg |= CIGCTRL_IRQ_CLR(vp->id);
+ camif_write(vp->camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+/*
+ * Sets video test pattern (off, color bar, horizontal or vertical gradient).
+ * External sensor pixel clock must be active for the test pattern to work.
+ */
+void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern)
+{
+ u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg &= ~CIGCTRL_TESTPATTERN_MASK;
+ cfg |= (pattern << 27);
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
+ unsigned int cr, unsigned int cb)
+{
+ static const struct v4l2_control colorfx[] = {
+ { V4L2_COLORFX_NONE, CIIMGEFF_FIN_BYPASS },
+ { V4L2_COLORFX_BW, CIIMGEFF_FIN_ARBITRARY },
+ { V4L2_COLORFX_SEPIA, CIIMGEFF_FIN_ARBITRARY },
+ { V4L2_COLORFX_NEGATIVE, CIIMGEFF_FIN_NEGATIVE },
+ { V4L2_COLORFX_ART_FREEZE, CIIMGEFF_FIN_ARTFREEZE },
+ { V4L2_COLORFX_EMBOSS, CIIMGEFF_FIN_EMBOSSING },
+ { V4L2_COLORFX_SILHOUETTE, CIIMGEFF_FIN_SILHOUETTE },
+ { V4L2_COLORFX_SET_CBCR, CIIMGEFF_FIN_ARBITRARY },
+ };
+ unsigned int i, cfg;
+
+ for (i = 0; i < ARRAY_SIZE(colorfx); i++)
+ if (colorfx[i].id == effect)
+ break;
+
+ if (i == ARRAY_SIZE(colorfx))
+ return;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset));
+ /* Set effect */
+ cfg &= ~CIIMGEFF_FIN_MASK;
+ cfg |= colorfx[i].value;
+ /* Set both paths */
+ if (camif->variant->ip_revision >= S3C6400_CAMIF_IP_REV) {
+ if (effect == V4L2_COLORFX_NONE)
+ cfg &= ~CIIMGEFF_IE_ENABLE_MASK;
+ else
+ cfg |= CIIMGEFF_IE_ENABLE_MASK;
+ }
+ cfg &= ~CIIMGEFF_PAT_CBCR_MASK;
+ cfg |= cr | (cb << 13);
+ camif_write(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset), cfg);
+}
+
+static const u32 src_pixfmt_map[8][2] = {
+ { MEDIA_BUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR },
+ { MEDIA_BUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB },
+ { MEDIA_BUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY },
+ { MEDIA_BUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY },
+};
+
+/* Set camera input pixel format and resolution */
+void camif_hw_set_source_format(struct camif_dev *camif)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ int i;
+ u32 cfg;
+
+ for (i = ARRAY_SIZE(src_pixfmt_map) - 1; i >= 0; i--) {
+ if (src_pixfmt_map[i][0] == mf->code)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ dev_err(camif->dev,
+ "Unsupported pixel code, falling back to %#08x\n",
+ src_pixfmt_map[i][0]);
+ }
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
+ cfg &= ~(CISRCFMT_ORDER422_MASK | CISRCFMT_SIZE_CAM_MASK);
+ cfg |= (mf->width << 16) | mf->height;
+ cfg |= src_pixfmt_map[i][1];
+ camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
+}
+
+/* Set the camera host input window offsets (cropping) */
+void camif_hw_set_camera_crop(struct camif_dev *camif)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ u32 hoff2, voff2;
+ u32 cfg;
+
+ /* Note: s3c244x requirement: left = f_width - rect.width / 2 */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
+ cfg &= ~(CIWDOFST_OFST_MASK | CIWDOFST_WINOFSEN);
+ cfg |= (crop->left << 16) | crop->top;
+ if (crop->left != 0 || crop->top != 0)
+ cfg |= CIWDOFST_WINOFSEN;
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ hoff2 = mf->width - crop->width - crop->left;
+ voff2 = mf->height - crop->height - crop->top;
+ cfg = (hoff2 << 16) | voff2;
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST2, cfg);
+ }
+}
+
+void camif_hw_clear_fifo_overflow(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
+ if (vp->id == 0)
+ cfg |= (CIWDOFST_CLROVCOFIY | CIWDOFST_CLROVCOFICB |
+ CIWDOFST_CLROVCOFICR);
+ else
+ cfg |= (/* CIWDOFST_CLROVPRFIY | */ CIWDOFST_CLROVPRFICB |
+ CIWDOFST_CLROVPRFICR);
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
+}
+
+/* Set video bus signals polarity */
+void camif_hw_set_camera_bus(struct camif_dev *camif)
+{
+ unsigned int flags = camif->pdata.sensor.flags;
+
+ u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+
+ cfg &= ~(CIGCTRL_INVPOLPCLK | CIGCTRL_INVPOLVSYNC |
+ CIGCTRL_INVPOLHREF | CIGCTRL_INVPOLFIELD);
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= CIGCTRL_INVPOLPCLK;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= CIGCTRL_INVPOLVSYNC;
+ /*
+ * HREF is normally high during frame active data
+ * transmission and low during horizontal synchronization
+ * period. Thus HREF active high means HSYNC active low.
+ */
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ cfg |= CIGCTRL_INVPOLHREF; /* HREF active low */
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ if (flags & V4L2_MBUS_FIELD_EVEN_LOW)
+ cfg |= CIGCTRL_INVPOLFIELD;
+ cfg |= CIGCTRL_FIELDMODE;
+ }
+
+ pr_debug("Setting CIGCTRL to: %#x\n", cfg);
+
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+void camif_hw_set_output_addr(struct camif_vp *vp,
+ struct camif_addr *paddr, int i)
+{
+ struct camif_dev *camif = vp->camif;
+
+ camif_write(camif, S3C_CAMIF_REG_CIYSA(vp->id, i), paddr->y);
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV
+ || vp->id == VP_CODEC) {
+ camif_write(camif, S3C_CAMIF_REG_CICBSA(vp->id, i),
+ paddr->cb);
+ camif_write(camif, S3C_CAMIF_REG_CICRSA(vp->id, i),
+ paddr->cr);
+ }
+
+ pr_debug("dst_buf[%d]: %pad, cb: %pad, cr: %pad\n",
+ i, &paddr->y, &paddr->cb, &paddr->cr);
+}
+
+static void camif_hw_set_out_dma_size(struct camif_vp *vp)
+{
+ struct camif_frame *frame = &vp->out_frame;
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+ cfg &= ~CITRGFMT_TARGETSIZE_MASK;
+ cfg |= (frame->f_width << 16) | frame->f_height;
+ camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+}
+
+static void camif_get_dma_burst(u32 width, u32 ybpp, u32 *mburst, u32 *rburst)
+{
+ unsigned int nwords = width * ybpp / 4;
+ unsigned int div, rem;
+
+ if (WARN_ON(width < 8 || (width * ybpp) & 7))
+ return;
+
+ for (div = 16; div >= 2; div /= 2) {
+ if (nwords < div)
+ continue;
+
+ rem = nwords & (div - 1);
+ if (rem == 0) {
+ *mburst = div;
+ *rburst = div;
+ break;
+ }
+ if (rem == div / 2 || rem == div / 4) {
+ *mburst = div;
+ *rburst = rem;
+ break;
+ }
+ }
+}
+
+void camif_hw_set_output_dma(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+ unsigned int ymburst = 0, yrburst = 0;
+ u32 cfg;
+
+ camif_hw_set_out_dma_size(vp);
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ struct camif_dma_offset *offset = &frame->dma_offset;
+ /* Set the input dma offsets. */
+ cfg = S3C_CISS_OFFS_INITIAL(offset->initial);
+ cfg |= S3C_CISS_OFFS_LINE(offset->line);
+ camif_write(camif, S3C_CAMIF_REG_CISSY(vp->id), cfg);
+ camif_write(camif, S3C_CAMIF_REG_CISSCB(vp->id), cfg);
+ camif_write(camif, S3C_CAMIF_REG_CISSCR(vp->id), cfg);
+ }
+
+ /* Configure DMA burst values */
+ camif_get_dma_burst(frame->rect.width, fmt->ybpp, &ymburst, &yrburst);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset));
+ cfg &= ~CICTRL_BURST_MASK;
+
+ cfg |= CICTRL_YBURST1(ymburst) | CICTRL_YBURST2(yrburst);
+ cfg |= CICTRL_CBURST1(ymburst / 2) | CICTRL_CBURST2(yrburst / 2);
+
+ camif_write(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("ymburst: %u, yrburst: %u\n", ymburst, yrburst);
+}
+
+void camif_hw_set_input_path(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id));
+ cfg &= ~MSCTRL_SEL_DMA_CAM;
+ camif_write(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id), cfg);
+}
+
+void camif_hw_set_target_format(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ u32 cfg;
+
+ pr_debug("fw: %d, fh: %d color: %d\n", frame->f_width,
+ frame->f_height, vp->out_fmt->color);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+ cfg &= ~CITRGFMT_TARGETSIZE_MASK;
+
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
+ /* We currently support only YCbCr 4:2:2 at the camera input */
+ cfg |= CITRGFMT_IN422;
+ cfg &= ~CITRGFMT_OUT422;
+ if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
+ cfg |= CITRGFMT_OUT422;
+ } else {
+ cfg &= ~CITRGFMT_OUTFORMAT_MASK;
+ switch (vp->out_fmt->color) {
+ case IMG_FMT_RGB565...IMG_FMT_XRGB8888:
+ cfg |= CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case IMG_FMT_YCBCR420...IMG_FMT_YCRCB420:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ case IMG_FMT_YCBCR422P:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case IMG_FMT_YCBYCR422...IMG_FMT_CRYCBY422:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR422I;
+ break;
+ }
+ }
+
+ /* Rotation is only supported by s3c64xx */
+ if (vp->rotation == 90 || vp->rotation == 270)
+ cfg |= (frame->f_height << 16) | frame->f_width;
+ else
+ cfg |= (frame->f_width << 16) | frame->f_height;
+ camif_write(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+
+ /* Target area, output pixel width * height */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset));
+ cfg &= ~CITAREA_MASK;
+ cfg |= (frame->f_width * frame->f_height);
+ camif_write(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset), cfg);
+}
+
+void camif_hw_set_flip(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif,
+ S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+
+ cfg &= ~CITRGFMT_FLIP_MASK;
+
+ if (vp->hflip)
+ cfg |= CITRGFMT_FLIP_Y_MIRROR;
+ if (vp->vflip)
+ cfg |= CITRGFMT_FLIP_X_MIRROR;
+
+ camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+}
+
+static void camif_hw_set_prescaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *sc = &vp->scaler;
+ u32 cfg, shfactor, addr;
+
+ addr = S3C_CAMIF_REG_CISCPRERATIO(vp->id, vp->offset);
+
+ shfactor = 10 - (sc->h_shift + sc->v_shift);
+ cfg = shfactor << 28;
+
+ cfg |= (sc->pre_h_ratio << 16) | sc->pre_v_ratio;
+ camif_write(camif, addr, cfg);
+
+ cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
+ camif_write(camif, S3C_CAMIF_REG_CISCPREDST(vp->id, vp->offset), cfg);
+}
+
+static void camif_s3c244x_hw_set_scaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *scaler = &vp->scaler;
+ unsigned int color = vp->out_fmt->color;
+ u32 cfg;
+
+ camif_hw_set_prescaler(vp);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
+
+ cfg &= ~(CISCCTRL_SCALEUP_MASK | CISCCTRL_SCALERBYPASS |
+ CISCCTRL_MAIN_RATIO_MASK | CIPRSCCTRL_RGB_FORMAT_24BIT);
+
+ if (scaler->enable) {
+ if (scaler->scaleup_h) {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALEUP_H;
+ else
+ cfg |= CIPRSCCTRL_SCALEUP_H;
+ }
+ if (scaler->scaleup_v) {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALEUP_V;
+ else
+ cfg |= CIPRSCCTRL_SCALEUP_V;
+ }
+ } else {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALERBYPASS;
+ }
+
+ cfg |= ((scaler->main_h_ratio & 0x1ff) << 16);
+ cfg |= scaler->main_v_ratio & 0x1ff;
+
+ if (vp->id == VP_PREVIEW) {
+ if (color == IMG_FMT_XRGB8888)
+ cfg |= CIPRSCCTRL_RGB_FORMAT_24BIT;
+ cfg |= CIPRSCCTRL_SAMPLE;
+ }
+
+ camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("main: h_ratio: %#x, v_ratio: %#x",
+ scaler->main_h_ratio, scaler->main_v_ratio);
+}
+
+static void camif_s3c64xx_hw_set_scaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *scaler = &vp->scaler;
+ unsigned int color = vp->out_fmt->color;
+ u32 cfg;
+
+ camif_hw_set_prescaler(vp);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
+
+ cfg &= ~(CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE
+ | CISCCTRL_SCALEUP_H | CISCCTRL_SCALEUP_V
+ | CISCCTRL_SCALERBYPASS | CISCCTRL_ONE2ONE
+ | CISCCTRL_INRGB_FMT_MASK | CISCCTRL_OUTRGB_FMT_MASK
+ | CISCCTRL_INTERLACE | CISCCTRL_EXTRGB_EXTENSION
+ | CISCCTRL_MAIN_RATIO_MASK);
+
+ cfg |= (CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE);
+
+ if (!scaler->enable) {
+ cfg |= CISCCTRL_SCALERBYPASS;
+ } else {
+ if (scaler->scaleup_h)
+ cfg |= CISCCTRL_SCALEUP_H;
+ if (scaler->scaleup_v)
+ cfg |= CISCCTRL_SCALEUP_V;
+ if (scaler->copy)
+ cfg |= CISCCTRL_ONE2ONE;
+ }
+
+ switch (color) {
+ case IMG_FMT_RGB666:
+ cfg |= CISCCTRL_OUTRGB_FMT_RGB666;
+ break;
+ case IMG_FMT_XRGB8888:
+ cfg |= CISCCTRL_OUTRGB_FMT_RGB888;
+ break;
+ }
+
+ cfg |= (scaler->main_h_ratio & 0x1ff) << 16;
+ cfg |= scaler->main_v_ratio & 0x1ff;
+
+ camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("main: h_ratio: %#x, v_ratio: %#x",
+ scaler->main_h_ratio, scaler->main_v_ratio);
+}
+
+void camif_hw_set_scaler(struct camif_vp *vp)
+{
+ unsigned int ip_rev = vp->camif->variant->ip_revision;
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ camif_s3c244x_hw_set_scaler(vp);
+ else
+ camif_s3c64xx_hw_set_scaler(vp);
+}
+
+void camif_hw_enable_scaler(struct camif_vp *vp, bool on)
+{
+ u32 addr = S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset);
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, addr);
+ if (on)
+ cfg |= CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~CISCCTRL_SCALERSTART;
+ camif_write(vp->camif, addr, cfg);
+}
+
+void camif_hw_set_lastirq(struct camif_vp *vp, int enable)
+{
+ u32 addr = S3C_CAMIF_REG_CICTRL(vp->id, vp->offset);
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, addr);
+ if (enable)
+ cfg |= CICTRL_LASTIRQ_ENABLE;
+ else
+ cfg &= ~CICTRL_LASTIRQ_ENABLE;
+ camif_write(vp->camif, addr, cfg);
+}
+
+void camif_hw_enable_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
+ camif->stream_count++;
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ cfg |= CIIMGCPT_CPT_FREN_ENABLE(vp->id);
+
+ if (vp->scaler.enable)
+ cfg |= CIIMGCPT_IMGCPTEN_SC(vp->id);
+
+ if (camif->stream_count == 1)
+ cfg |= CIIMGCPT_IMGCPTEN;
+
+ camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
+
+ pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
+ cfg, camif->stream_count);
+}
+
+void camif_hw_disable_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
+ cfg &= ~CIIMGCPT_IMGCPTEN_SC(vp->id);
+
+ if (WARN_ON(--(camif->stream_count) < 0))
+ camif->stream_count = 0;
+
+ if (camif->stream_count == 0)
+ cfg &= ~CIIMGCPT_IMGCPTEN;
+
+ pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
+ cfg, camif->stream_count);
+
+ camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
+}
+
+void camif_hw_dump_regs(struct camif_dev *camif, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { S3C_CAMIF_REG_CISRCFMT, "CISRCFMT" },
+ { S3C_CAMIF_REG_CIWDOFST, "CIWDOFST" },
+ { S3C_CAMIF_REG_CIGCTRL, "CIGCTRL" },
+ { S3C_CAMIF_REG_CIWDOFST2, "CIWDOFST2" },
+ { S3C_CAMIF_REG_CIYSA(0, 0), "CICOYSA0" },
+ { S3C_CAMIF_REG_CICBSA(0, 0), "CICOCBSA0" },
+ { S3C_CAMIF_REG_CICRSA(0, 0), "CICOCRSA0" },
+ { S3C_CAMIF_REG_CIYSA(0, 1), "CICOYSA1" },
+ { S3C_CAMIF_REG_CICBSA(0, 1), "CICOCBSA1" },
+ { S3C_CAMIF_REG_CICRSA(0, 1), "CICOCRSA1" },
+ { S3C_CAMIF_REG_CIYSA(0, 2), "CICOYSA2" },
+ { S3C_CAMIF_REG_CICBSA(0, 2), "CICOCBSA2" },
+ { S3C_CAMIF_REG_CICRSA(0, 2), "CICOCRSA2" },
+ { S3C_CAMIF_REG_CIYSA(0, 3), "CICOYSA3" },
+ { S3C_CAMIF_REG_CICBSA(0, 3), "CICOCBSA3" },
+ { S3C_CAMIF_REG_CICRSA(0, 3), "CICOCRSA3" },
+ { S3C_CAMIF_REG_CIYSA(1, 0), "CIPRYSA0" },
+ { S3C_CAMIF_REG_CIYSA(1, 1), "CIPRYSA1" },
+ { S3C_CAMIF_REG_CIYSA(1, 2), "CIPRYSA2" },
+ { S3C_CAMIF_REG_CIYSA(1, 3), "CIPRYSA3" },
+ { S3C_CAMIF_REG_CITRGFMT(0, 0), "CICOTRGFMT" },
+ { S3C_CAMIF_REG_CITRGFMT(1, 0), "CIPRTRGFMT" },
+ { S3C_CAMIF_REG_CICTRL(0, 0), "CICOCTRL" },
+ { S3C_CAMIF_REG_CICTRL(1, 0), "CIPRCTRL" },
+ { S3C_CAMIF_REG_CISCPREDST(0, 0), "CICOSCPREDST" },
+ { S3C_CAMIF_REG_CISCPREDST(1, 0), "CIPRSCPREDST" },
+ { S3C_CAMIF_REG_CISCPRERATIO(0, 0), "CICOSCPRERATIO" },
+ { S3C_CAMIF_REG_CISCPRERATIO(1, 0), "CIPRSCPRERATIO" },
+ { S3C_CAMIF_REG_CISCCTRL(0, 0), "CICOSCCTRL" },
+ { S3C_CAMIF_REG_CISCCTRL(1, 0), "CIPRSCCTRL" },
+ { S3C_CAMIF_REG_CITAREA(0, 0), "CICOTAREA" },
+ { S3C_CAMIF_REG_CITAREA(1, 0), "CIPRTAREA" },
+ { S3C_CAMIF_REG_CISTATUS(0, 0), "CICOSTATUS" },
+ { S3C_CAMIF_REG_CISTATUS(1, 0), "CIPRSTATUS" },
+ { S3C_CAMIF_REG_CIIMGCPT(0), "CIIMGCPT" },
+ };
+ u32 i;
+
+ pr_info("--- %s ---\n", label);
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = readl(camif->io_base + registers[i].offset);
+ dev_info(camif->dev, "%s:\t0x%08x\n", registers[i].name, cfg);
+ }
+}
diff --git a/drivers/media/platform/s3c-camif/camif-regs.h b/drivers/media/platform/s3c-camif/camif-regs.h
new file mode 100644
index 000000000..5ad36c1c2
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-regs.h
@@ -0,0 +1,269 @@
+/*
+ * Register definition file for s3c24xx/s3c64xx SoC CAMIF driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef CAMIF_REGS_H_
+#define CAMIF_REGS_H_
+
+#include "camif-core.h"
+#include <media/drv-intf/s3c_camif.h>
+
+/*
+ * The id argument indicates the processing path:
+ * id = 0 - codec (FIMC C), 1 - preview (FIMC P).
+ */
+
+/* Camera input format */
+#define S3C_CAMIF_REG_CISRCFMT 0x00
+#define CISRCFMT_ITU601_8BIT (1 << 31)
+#define CISRCFMT_ITU656_8BIT (0 << 31)
+#define CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define CISRCFMT_ORDER422_CRYCBY (3 << 14)
+#define CISRCFMT_ORDER422_MASK (3 << 14)
+#define CISRCFMT_SIZE_CAM_MASK (0x1fff << 16 | 0x1fff)
+
+/* Window offset */
+#define S3C_CAMIF_REG_CIWDOFST 0x04
+#define CIWDOFST_WINOFSEN (1 << 31)
+#define CIWDOFST_CLROVCOFIY (1 << 30)
+#define CIWDOFST_CLROVRLB_PR (1 << 28)
+/* #define CIWDOFST_CLROVPRFIY (1 << 27) */
+#define CIWDOFST_CLROVCOFICB (1 << 15)
+#define CIWDOFST_CLROVCOFICR (1 << 14)
+#define CIWDOFST_CLROVPRFICB (1 << 13)
+#define CIWDOFST_CLROVPRFICR (1 << 12)
+#define CIWDOFST_OFST_MASK (0x7ff << 16 | 0x7ff)
+
+/* Window offset 2 */
+#define S3C_CAMIF_REG_CIWDOFST2 0x14
+#define CIWDOFST2_OFST2_MASK (0xfff << 16 | 0xfff)
+
+/* Global control */
+#define S3C_CAMIF_REG_CIGCTRL 0x08
+#define CIGCTRL_SWRST (1 << 31)
+#define CIGCTRL_CAMRST (1 << 30)
+#define CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define CIGCTRL_INVPOLPCLK (1 << 26)
+#define CIGCTRL_INVPOLVSYNC (1 << 25)
+#define CIGCTRL_INVPOLHREF (1 << 24)
+#define CIGCTRL_IRQ_OVFEN (1 << 22)
+#define CIGCTRL_HREF_MASK (1 << 21)
+#define CIGCTRL_IRQ_LEVEL (1 << 20)
+/* IRQ_CLR_C, IRQ_CLR_P */
+#define CIGCTRL_IRQ_CLR(id) (1 << (19 - (id)))
+#define CIGCTRL_FIELDMODE (1 << 2)
+#define CIGCTRL_INVPOLFIELD (1 << 1)
+#define CIGCTRL_CAM_INTERLACE (1 << 0)
+
+/* Y DMA output frame start address. n = 0..3. */
+#define S3C_CAMIF_REG_CIYSA(id, n) (0x18 + (id) * 0x54 + (n) * 4)
+/* Cb plane output DMA start address. n = 0..3. Only codec path. */
+#define S3C_CAMIF_REG_CICBSA(id, n) (0x28 + (id) * 0x54 + (n) * 4)
+/* Cr plane output DMA start address. n = 0..3. Only codec path. */
+#define S3C_CAMIF_REG_CICRSA(id, n) (0x38 + (id) * 0x54 + (n) * 4)
+
+/* CICOTRGFMT, CIPRTRGFMT - Target format */
+#define S3C_CAMIF_REG_CITRGFMT(id, _offs) (0x48 + (id) * (0x34 + (_offs)))
+#define CITRGFMT_IN422 (1 << 31) /* only for s3c24xx */
+#define CITRGFMT_OUT422 (1 << 30) /* only for s3c24xx */
+#define CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_YCBCR422I (2 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_RGB (3 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_MASK (3 << 29) /* only for s3c6410 */
+#define CITRGFMT_TARGETHSIZE(x) ((x) << 16)
+#define CITRGFMT_FLIP_NORMAL (0 << 14)
+#define CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define CITRGFMT_FLIP_180 (3 << 14)
+#define CITRGFMT_FLIP_MASK (3 << 14)
+/* Preview path only */
+#define CITRGFMT_ROT90_PR (1 << 13)
+#define CITRGFMT_TARGETVSIZE(x) ((x) << 0)
+#define CITRGFMT_TARGETSIZE_MASK ((0x1fff << 16) | 0x1fff)
+
+/* CICOCTRL, CIPRCTRL. Output DMA control. */
+#define S3C_CAMIF_REG_CICTRL(id, _offs) (0x4c + (id) * (0x34 + (_offs)))
+#define CICTRL_BURST_MASK (0xfffff << 4)
+/* xBURSTn - 5-bits width */
+#define CICTRL_YBURST1(x) ((x) << 19)
+#define CICTRL_YBURST2(x) ((x) << 14)
+#define CICTRL_RGBBURST1(x) ((x) << 19)
+#define CICTRL_RGBBURST2(x) ((x) << 14)
+#define CICTRL_CBURST1(x) ((x) << 9)
+#define CICTRL_CBURST2(x) ((x) << 4)
+#define CICTRL_LASTIRQ_ENABLE (1 << 2)
+#define CICTRL_ORDER422_MASK (3 << 0)
+
+/* CICOSCPRERATIO, CIPRSCPRERATIO. Pre-scaler control 1. */
+#define S3C_CAMIF_REG_CISCPRERATIO(id, _offs) (0x50 + (id) * (0x34 + (_offs)))
+
+/* CICOSCPREDST, CIPRSCPREDST. Pre-scaler control 2. */
+#define S3C_CAMIF_REG_CISCPREDST(id, _offs) (0x54 + (id) * (0x34 + (_offs)))
+
+/* CICOSCCTRL, CIPRSCCTRL. Main scaler control. */
+#define S3C_CAMIF_REG_CISCCTRL(id, _offs) (0x58 + (id) * (0x34 + (_offs)))
+#define CISCCTRL_SCALERBYPASS (1 << 31)
+/* s3c244x preview path only, s3c64xx both */
+#define CIPRSCCTRL_SAMPLE (1 << 31)
+/* 0 - 16-bit RGB, 1 - 24-bit RGB */
+#define CIPRSCCTRL_RGB_FORMAT_24BIT (1 << 30) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_H (1 << 29) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_V (1 << 28) /* only for s3c244x */
+/* s3c64xx */
+#define CISCCTRL_SCALEUP_H (1 << 30)
+#define CISCCTRL_SCALEUP_V (1 << 29)
+#define CISCCTRL_SCALEUP_MASK (0x3 << 29)
+#define CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define CISCCTRL_INTERLACE (1 << 25)
+#define CISCCTRL_SCALERSTART (1 << 15)
+#define CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define CISCCTRL_INRGB_FMT_MASK (3 << 13)
+#define CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
+#define CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define CISCCTRL_ONE2ONE (1 << 9)
+#define CISCCTRL_MAIN_RATIO_MASK (0x1ff << 16 | 0x1ff)
+
+/* CICOTAREA, CIPRTAREA. Target area for DMA (Hsize x Vsize). */
+#define S3C_CAMIF_REG_CITAREA(id, _offs) (0x5c + (id) * (0x34 + (_offs)))
+#define CITAREA_MASK 0xfffffff
+
+/* Codec (id = 0) or preview (id = 1) path status. */
+#define S3C_CAMIF_REG_CISTATUS(id, _offs) (0x64 + (id) * (0x34 + (_offs)))
+#define CISTATUS_OVFIY_STATUS (1 << 31)
+#define CISTATUS_OVFICB_STATUS (1 << 30)
+#define CISTATUS_OVFICR_STATUS (1 << 29)
+#define CISTATUS_OVF_MASK (0x7 << 29)
+#define CIPRSTATUS_OVF_MASK (0x3 << 30)
+#define CISTATUS_VSYNC_STATUS (1 << 28)
+#define CISTATUS_FRAMECNT_MASK (3 << 26)
+#define CISTATUS_FRAMECNT(__reg) (((__reg) >> 26) & 0x3)
+#define CISTATUS_WINOFSTEN_STATUS (1 << 25)
+#define CISTATUS_IMGCPTEN_STATUS (1 << 22)
+#define CISTATUS_IMGCPTENSC_STATUS (1 << 21)
+#define CISTATUS_VSYNC_A_STATUS (1 << 20)
+#define CISTATUS_FRAMEEND_STATUS (1 << 19) /* 17 on s3c64xx */
+
+/* Image capture enable */
+#define S3C_CAMIF_REG_CIIMGCPT(_offs) (0xa0 + (_offs))
+#define CIIMGCPT_IMGCPTEN (1 << 31)
+#define CIIMGCPT_IMGCPTEN_SC(id) (1 << (30 - (id)))
+/* Frame control: 1 - one-shot, 0 - free run */
+#define CIIMGCPT_CPT_FREN_ENABLE(id) (1 << (25 - (id)))
+#define CIIMGCPT_CPT_FRMOD_ENABLE (0 << 18)
+#define CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Capture sequence */
+#define S3C_CAMIF_REG_CICPTSEQ 0xc4
+
+/* Image effects */
+#define S3C_CAMIF_REG_CIIMGEFF(_offs) (0xb0 + (_offs))
+#define CIIMGEFF_IE_ENABLE(id) (1 << (30 + (id)))
+#define CIIMGEFF_IE_ENABLE_MASK (3 << 30)
+/* Image effect: 1 - after scaler, 0 - before scaler */
+#define CIIMGEFF_IE_AFTER_SC (1 << 29)
+#define CIIMGEFF_FIN_MASK (7 << 26)
+#define CIIMGEFF_FIN_BYPASS (0 << 26)
+#define CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | 0xff)
+#define CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define CIIMGEFF_PAT_CR(x) (x)
+
+/* MSCOY0SA, MSPRY0SA. Y/Cb/Cr frame start address for input DMA. */
+#define S3C_CAMIF_REG_MSY0SA(id) (0xd4 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCB0SA(id) (0xd8 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCR0SA(id) (0xdc + ((id) * 0x2c))
+
+/* MSCOY0END, MSCOY0END. Y/Cb/Cr frame end address for input DMA. */
+#define S3C_CAMIF_REG_MSY0END(id) (0xe0 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCB0END(id) (0xe4 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCR0END(id) (0xe8 + ((id) * 0x2c))
+
+/* MSPRYOFF, MSPRYOFF. Y/Cb/Cr offset. n: 0 - codec, 1 - preview. */
+#define S3C_CAMIF_REG_MSYOFF(id) (0x118 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCBOFF(id) (0x11c + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCROFF(id) (0x120 + ((id) * 0x2c))
+
+/* Real input DMA data size. n = 0 - codec, 1 - preview. */
+#define S3C_CAMIF_REG_MSWIDTH(id) (0xf8 + (id) * 0x2c)
+#define AUTOLOAD_ENABLE (1 << 31)
+#define ADDR_CH_DIS (1 << 30)
+#define MSHEIGHT(x) (((x) & 0x3ff) << 16)
+#define MSWIDTH(x) ((x) & 0x3ff)
+
+/* Input DMA control. n = 0 - codec, 1 - preview */
+#define S3C_CAMIF_REG_MSCTRL(id) (0xfc + (id) * 0x2c)
+#define MSCTRL_ORDER422_M_YCBYCR (0 << 4)
+#define MSCTRL_ORDER422_M_YCRYCB (1 << 4)
+#define MSCTRL_ORDER422_M_CBYCRY (2 << 4)
+#define MSCTRL_ORDER422_M_CRYCBY (3 << 4)
+/* 0 - camera, 1 - DMA */
+#define MSCTRL_SEL_DMA_CAM (1 << 3)
+#define MSCTRL_INFORMAT_M_YCBCR420 (0 << 1)
+#define MSCTRL_INFORMAT_M_YCBCR422 (1 << 1)
+#define MSCTRL_INFORMAT_M_YCBCR422I (2 << 1)
+#define MSCTRL_INFORMAT_M_RGB (3 << 1)
+#define MSCTRL_ENVID_M (1 << 0)
+
+/* CICOSCOSY, CIPRSCOSY. Scan line Y/Cb/Cr offset. */
+#define S3C_CAMIF_REG_CISSY(id) (0x12c + (id) * 0x0c)
+#define S3C_CAMIF_REG_CISSCB(id) (0x130 + (id) * 0x0c)
+#define S3C_CAMIF_REG_CISSCR(id) (0x134 + (id) * 0x0c)
+#define S3C_CISS_OFFS_INITIAL(x) ((x) << 16)
+#define S3C_CISS_OFFS_LINE(x) ((x) << 0)
+
+/* ------------------------------------------------------------------ */
+
+void camif_hw_reset(struct camif_dev *camif);
+void camif_hw_clear_pending_irq(struct camif_vp *vp);
+void camif_hw_clear_fifo_overflow(struct camif_vp *vp);
+void camif_hw_set_lastirq(struct camif_vp *vp, int enable);
+void camif_hw_set_input_path(struct camif_vp *vp);
+void camif_hw_enable_scaler(struct camif_vp *vp, bool on);
+void camif_hw_enable_capture(struct camif_vp *vp);
+void camif_hw_disable_capture(struct camif_vp *vp);
+void camif_hw_set_camera_bus(struct camif_dev *camif);
+void camif_hw_set_source_format(struct camif_dev *camif);
+void camif_hw_set_camera_crop(struct camif_dev *camif);
+void camif_hw_set_scaler(struct camif_vp *vp);
+void camif_hw_set_flip(struct camif_vp *vp);
+void camif_hw_set_output_dma(struct camif_vp *vp);
+void camif_hw_set_target_format(struct camif_vp *vp);
+void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern);
+void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
+ unsigned int cr, unsigned int cb);
+void camif_hw_set_output_addr(struct camif_vp *vp, struct camif_addr *paddr,
+ int index);
+void camif_hw_dump_regs(struct camif_dev *camif, const char *label);
+
+static inline u32 camif_hw_get_status(struct camif_vp *vp)
+{
+ return readl(vp->camif->io_base + S3C_CAMIF_REG_CISTATUS(vp->id,
+ vp->offset));
+}
+
+#endif /* CAMIF_REGS_H_ */
diff --git a/drivers/media/platform/s5p-cec/Makefile b/drivers/media/platform/s5p-cec/Makefile
new file mode 100644
index 000000000..0e2cf4578
--- /dev/null
+++ b/drivers/media/platform/s5p-cec/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec.o
+s5p-cec-y += s5p_cec.o exynos_hdmi_cecctrl.o
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cec.h b/drivers/media/platform/s5p-cec/exynos_hdmi_cec.h
new file mode 100644
index 000000000..7d9453505
--- /dev/null
+++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cec.h
@@ -0,0 +1,37 @@
+/* drivers/media/platform/s5p-cec/exynos_hdmi_cec.h
+ *
+ * Copyright (c) 2010, 2014 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Header file for interface of Samsung Exynos hdmi cec hardware
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _EXYNOS_HDMI_CEC_H_
+#define _EXYNOS_HDMI_CEC_H_ __FILE__
+
+#include <linux/regmap.h>
+#include "s5p_cec.h"
+
+void s5p_cec_set_divider(struct s5p_cec_dev *cec);
+void s5p_cec_enable_rx(struct s5p_cec_dev *cec);
+void s5p_cec_mask_rx_interrupts(struct s5p_cec_dev *cec);
+void s5p_cec_unmask_rx_interrupts(struct s5p_cec_dev *cec);
+void s5p_cec_mask_tx_interrupts(struct s5p_cec_dev *cec);
+void s5p_cec_unmask_tx_interrupts(struct s5p_cec_dev *cec);
+void s5p_cec_reset(struct s5p_cec_dev *cec);
+void s5p_cec_tx_reset(struct s5p_cec_dev *cec);
+void s5p_cec_rx_reset(struct s5p_cec_dev *cec);
+void s5p_cec_threshold(struct s5p_cec_dev *cec);
+void s5p_cec_copy_packet(struct s5p_cec_dev *cec, char *data,
+ size_t count, u8 retries);
+void s5p_cec_set_addr(struct s5p_cec_dev *cec, u32 addr);
+u32 s5p_cec_get_status(struct s5p_cec_dev *cec);
+void s5p_clr_pending_tx(struct s5p_cec_dev *cec);
+void s5p_clr_pending_rx(struct s5p_cec_dev *cec);
+void s5p_cec_get_rx_buf(struct s5p_cec_dev *cec, u32 size, u8 *buffer);
+
+#endif /* _EXYNOS_HDMI_CEC_H_ */
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
new file mode 100644
index 000000000..146ae6f25
--- /dev/null
+++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
@@ -0,0 +1,209 @@
+/* drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+ *
+ * Copyright (c) 2009, 2014 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * cec ftn file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "exynos_hdmi_cec.h"
+#include "regs-cec.h"
+
+#define S5P_HDMI_FIN 24000000
+#define CEC_DIV_RATIO 320000
+
+#define CEC_MESSAGE_BROADCAST_MASK 0x0F
+#define CEC_MESSAGE_BROADCAST 0x0F
+#define CEC_FILTER_THRESHOLD 0x15
+
+void s5p_cec_set_divider(struct s5p_cec_dev *cec)
+{
+ u32 div_ratio, div_val;
+ unsigned int reg;
+
+ div_ratio = S5P_HDMI_FIN / CEC_DIV_RATIO - 1;
+
+ if (regmap_read(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, &reg)) {
+ dev_err(cec->dev, "failed to read phy control\n");
+ return;
+ }
+
+ reg = (reg & ~(0x3FF << 16)) | (div_ratio << 16);
+
+ if (regmap_write(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, reg)) {
+ dev_err(cec->dev, "failed to write phy control\n");
+ return;
+ }
+
+ div_val = CEC_DIV_RATIO * 0.00005 - 1;
+
+ writeb(0x0, cec->reg + S5P_CEC_DIVISOR_3);
+ writeb(0x0, cec->reg + S5P_CEC_DIVISOR_2);
+ writeb(0x0, cec->reg + S5P_CEC_DIVISOR_1);
+ writeb(div_val, cec->reg + S5P_CEC_DIVISOR_0);
+}
+
+void s5p_cec_enable_rx(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_RX_CTRL);
+ reg |= S5P_CEC_RX_CTRL_ENABLE;
+ writeb(reg, cec->reg + S5P_CEC_RX_CTRL);
+}
+
+void s5p_cec_mask_rx_interrupts(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
+ reg |= S5P_CEC_IRQ_RX_DONE;
+ reg |= S5P_CEC_IRQ_RX_ERROR;
+ writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
+}
+
+void s5p_cec_unmask_rx_interrupts(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
+ reg &= ~S5P_CEC_IRQ_RX_DONE;
+ reg &= ~S5P_CEC_IRQ_RX_ERROR;
+ writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
+}
+
+void s5p_cec_mask_tx_interrupts(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
+ reg |= S5P_CEC_IRQ_TX_DONE;
+ reg |= S5P_CEC_IRQ_TX_ERROR;
+ writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
+}
+
+void s5p_cec_unmask_tx_interrupts(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
+ reg &= ~S5P_CEC_IRQ_TX_DONE;
+ reg &= ~S5P_CEC_IRQ_TX_ERROR;
+ writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
+}
+
+void s5p_cec_reset(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ writeb(S5P_CEC_RX_CTRL_RESET, cec->reg + S5P_CEC_RX_CTRL);
+ writeb(S5P_CEC_TX_CTRL_RESET, cec->reg + S5P_CEC_TX_CTRL);
+
+ reg = readb(cec->reg + 0xc4);
+ reg &= ~0x1;
+ writeb(reg, cec->reg + 0xc4);
+}
+
+void s5p_cec_tx_reset(struct s5p_cec_dev *cec)
+{
+ writeb(S5P_CEC_TX_CTRL_RESET, cec->reg + S5P_CEC_TX_CTRL);
+}
+
+void s5p_cec_rx_reset(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ writeb(S5P_CEC_RX_CTRL_RESET, cec->reg + S5P_CEC_RX_CTRL);
+
+ reg = readb(cec->reg + 0xc4);
+ reg &= ~0x1;
+ writeb(reg, cec->reg + 0xc4);
+}
+
+void s5p_cec_threshold(struct s5p_cec_dev *cec)
+{
+ writeb(CEC_FILTER_THRESHOLD, cec->reg + S5P_CEC_RX_FILTER_TH);
+ writeb(0, cec->reg + S5P_CEC_RX_FILTER_CTRL);
+}
+
+void s5p_cec_copy_packet(struct s5p_cec_dev *cec, char *data,
+ size_t count, u8 retries)
+{
+ int i = 0;
+ u8 reg;
+
+ while (i < count) {
+ writeb(data[i], cec->reg + (S5P_CEC_TX_BUFF0 + (i * 4)));
+ i++;
+ }
+
+ writeb(count, cec->reg + S5P_CEC_TX_BYTES);
+ reg = readb(cec->reg + S5P_CEC_TX_CTRL);
+ reg |= S5P_CEC_TX_CTRL_START;
+ reg &= ~0x70;
+ reg |= retries << 4;
+
+ if ((data[0] & CEC_MESSAGE_BROADCAST_MASK) == CEC_MESSAGE_BROADCAST) {
+ dev_dbg(cec->dev, "Broadcast");
+ reg |= S5P_CEC_TX_CTRL_BCAST;
+ } else {
+ dev_dbg(cec->dev, "No Broadcast");
+ reg &= ~S5P_CEC_TX_CTRL_BCAST;
+ }
+
+ writeb(reg, cec->reg + S5P_CEC_TX_CTRL);
+ dev_dbg(cec->dev, "cec-tx: cec count (%zu): %*ph", count,
+ (int)count, data);
+}
+
+void s5p_cec_set_addr(struct s5p_cec_dev *cec, u32 addr)
+{
+ writeb(addr & 0x0F, cec->reg + S5P_CEC_LOGIC_ADDR);
+}
+
+u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
+{
+ u32 status = 0;
+
+ status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
+ status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
+ status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
+ status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
+ status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
+
+ dev_dbg(cec->dev, "status = 0x%x!\n", status);
+
+ return status;
+}
+
+void s5p_clr_pending_tx(struct s5p_cec_dev *cec)
+{
+ writeb(S5P_CEC_IRQ_TX_DONE | S5P_CEC_IRQ_TX_ERROR,
+ cec->reg + S5P_CEC_IRQ_CLEAR);
+}
+
+void s5p_clr_pending_rx(struct s5p_cec_dev *cec)
+{
+ writeb(S5P_CEC_IRQ_RX_DONE | S5P_CEC_IRQ_RX_ERROR,
+ cec->reg + S5P_CEC_IRQ_CLEAR);
+}
+
+void s5p_cec_get_rx_buf(struct s5p_cec_dev *cec, u32 size, u8 *buffer)
+{
+ u32 i = 0;
+ char debug[40];
+
+ while (i < size) {
+ buffer[i] = readb(cec->reg + S5P_CEC_RX_BUFF0 + (i * 4));
+ sprintf(debug + i * 2, "%02x ", buffer[i]);
+ i++;
+ }
+ dev_dbg(cec->dev, "cec-rx: cec size(%d): %s", size, debug);
+}
diff --git a/drivers/media/platform/s5p-cec/regs-cec.h b/drivers/media/platform/s5p-cec/regs-cec.h
new file mode 100644
index 000000000..b2e7e1299
--- /dev/null
+++ b/drivers/media/platform/s5p-cec/regs-cec.h
@@ -0,0 +1,96 @@
+/* drivers/media/platform/s5p-cec/regs-cec.h
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_REGS__H
+#define __EXYNOS_REGS__H
+
+/*
+ * Register part
+ */
+#define S5P_CEC_STATUS_0 (0x0000)
+#define S5P_CEC_STATUS_1 (0x0004)
+#define S5P_CEC_STATUS_2 (0x0008)
+#define S5P_CEC_STATUS_3 (0x000C)
+#define S5P_CEC_IRQ_MASK (0x0010)
+#define S5P_CEC_IRQ_CLEAR (0x0014)
+#define S5P_CEC_LOGIC_ADDR (0x0020)
+#define S5P_CEC_DIVISOR_0 (0x0030)
+#define S5P_CEC_DIVISOR_1 (0x0034)
+#define S5P_CEC_DIVISOR_2 (0x0038)
+#define S5P_CEC_DIVISOR_3 (0x003C)
+
+#define S5P_CEC_TX_CTRL (0x0040)
+#define S5P_CEC_TX_BYTES (0x0044)
+#define S5P_CEC_TX_STAT0 (0x0060)
+#define S5P_CEC_TX_STAT1 (0x0064)
+#define S5P_CEC_TX_BUFF0 (0x0080)
+#define S5P_CEC_TX_BUFF1 (0x0084)
+#define S5P_CEC_TX_BUFF2 (0x0088)
+#define S5P_CEC_TX_BUFF3 (0x008C)
+#define S5P_CEC_TX_BUFF4 (0x0090)
+#define S5P_CEC_TX_BUFF5 (0x0094)
+#define S5P_CEC_TX_BUFF6 (0x0098)
+#define S5P_CEC_TX_BUFF7 (0x009C)
+#define S5P_CEC_TX_BUFF8 (0x00A0)
+#define S5P_CEC_TX_BUFF9 (0x00A4)
+#define S5P_CEC_TX_BUFF10 (0x00A8)
+#define S5P_CEC_TX_BUFF11 (0x00AC)
+#define S5P_CEC_TX_BUFF12 (0x00B0)
+#define S5P_CEC_TX_BUFF13 (0x00B4)
+#define S5P_CEC_TX_BUFF14 (0x00B8)
+#define S5P_CEC_TX_BUFF15 (0x00BC)
+
+#define S5P_CEC_RX_CTRL (0x00C0)
+#define S5P_CEC_RX_STAT0 (0x00E0)
+#define S5P_CEC_RX_STAT1 (0x00E4)
+#define S5P_CEC_RX_BUFF0 (0x0100)
+#define S5P_CEC_RX_BUFF1 (0x0104)
+#define S5P_CEC_RX_BUFF2 (0x0108)
+#define S5P_CEC_RX_BUFF3 (0x010C)
+#define S5P_CEC_RX_BUFF4 (0x0110)
+#define S5P_CEC_RX_BUFF5 (0x0114)
+#define S5P_CEC_RX_BUFF6 (0x0118)
+#define S5P_CEC_RX_BUFF7 (0x011C)
+#define S5P_CEC_RX_BUFF8 (0x0120)
+#define S5P_CEC_RX_BUFF9 (0x0124)
+#define S5P_CEC_RX_BUFF10 (0x0128)
+#define S5P_CEC_RX_BUFF11 (0x012C)
+#define S5P_CEC_RX_BUFF12 (0x0130)
+#define S5P_CEC_RX_BUFF13 (0x0134)
+#define S5P_CEC_RX_BUFF14 (0x0138)
+#define S5P_CEC_RX_BUFF15 (0x013C)
+
+#define S5P_CEC_RX_FILTER_CTRL (0x0180)
+#define S5P_CEC_RX_FILTER_TH (0x0184)
+
+/*
+ * Bit definition part
+ */
+#define S5P_CEC_IRQ_TX_DONE (1<<0)
+#define S5P_CEC_IRQ_TX_ERROR (1<<1)
+#define S5P_CEC_IRQ_RX_DONE (1<<4)
+#define S5P_CEC_IRQ_RX_ERROR (1<<5)
+
+#define S5P_CEC_TX_CTRL_START (1<<0)
+#define S5P_CEC_TX_CTRL_BCAST (1<<1)
+#define S5P_CEC_TX_CTRL_RETRY (0x04<<4)
+#define S5P_CEC_TX_CTRL_RESET (1<<7)
+
+#define S5P_CEC_RX_CTRL_ENABLE (1<<0)
+#define S5P_CEC_RX_CTRL_RESET (1<<7)
+
+#define S5P_CEC_LOGIC_ADDR_MASK (0xF)
+
+/* PMU Registers for PHY */
+#define EXYNOS_HDMI_PHY_CONTROL 0x700
+
+#endif /* __EXYNOS_REGS__H */
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
new file mode 100644
index 000000000..3032247c6
--- /dev/null
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -0,0 +1,312 @@
+/* drivers/media/platform/s5p-cec/s5p_cec.c
+ *
+ * Samsung S5P CEC driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This driver is based on the "cec interface driver for exynos soc" by
+ * SangPil Moon.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+#include "exynos_hdmi_cec.h"
+#include "regs-cec.h"
+#include "s5p_cec.h"
+
+#define CEC_NAME "s5p-cec"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct s5p_cec_dev *cec = cec_get_drvdata(adap);
+
+ if (enable) {
+ pm_runtime_get_sync(cec->dev);
+
+ s5p_cec_reset(cec);
+
+ s5p_cec_set_divider(cec);
+ s5p_cec_threshold(cec);
+
+ s5p_cec_unmask_tx_interrupts(cec);
+ s5p_cec_unmask_rx_interrupts(cec);
+ s5p_cec_enable_rx(cec);
+ } else {
+ s5p_cec_mask_tx_interrupts(cec);
+ s5p_cec_mask_rx_interrupts(cec);
+ pm_runtime_put(cec->dev);
+ }
+
+ return 0;
+}
+
+static int s5p_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct s5p_cec_dev *cec = cec_get_drvdata(adap);
+
+ s5p_cec_set_addr(cec, addr);
+ return 0;
+}
+
+static int s5p_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct s5p_cec_dev *cec = cec_get_drvdata(adap);
+
+ /*
+ * Unclear if 0 retries are allowed by the hardware, so have 1 as
+ * the minimum.
+ */
+ s5p_cec_copy_packet(cec, msg->msg, msg->len, max(1, attempts - 1));
+ return 0;
+}
+
+static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
+{
+ struct s5p_cec_dev *cec = priv;
+ u32 status = 0;
+
+ status = s5p_cec_get_status(cec);
+
+ dev_dbg(cec->dev, "irq received\n");
+
+ if (status & CEC_STATUS_TX_DONE) {
+ if (status & CEC_STATUS_TX_NACK) {
+ dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
+ cec->tx = STATE_NACK;
+ } else if (status & CEC_STATUS_TX_ERROR) {
+ dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
+ cec->tx = STATE_ERROR;
+ } else {
+ dev_dbg(cec->dev, "CEC_STATUS_TX_DONE\n");
+ cec->tx = STATE_DONE;
+ }
+ s5p_clr_pending_tx(cec);
+ }
+
+ if (status & CEC_STATUS_RX_DONE) {
+ if (status & CEC_STATUS_RX_ERROR) {
+ dev_dbg(cec->dev, "CEC_STATUS_RX_ERROR set\n");
+ s5p_cec_rx_reset(cec);
+ s5p_cec_enable_rx(cec);
+ } else {
+ dev_dbg(cec->dev, "CEC_STATUS_RX_DONE set\n");
+ if (cec->rx != STATE_IDLE)
+ dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
+ cec->rx = STATE_BUSY;
+ cec->msg.len = status >> 24;
+ cec->msg.rx_status = CEC_RX_STATUS_OK;
+ s5p_cec_get_rx_buf(cec, cec->msg.len,
+ cec->msg.msg);
+ cec->rx = STATE_DONE;
+ s5p_cec_enable_rx(cec);
+ }
+ /* Clear interrupt pending bit */
+ s5p_clr_pending_rx(cec);
+ }
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
+{
+ struct s5p_cec_dev *cec = priv;
+
+ dev_dbg(cec->dev, "irq processing thread\n");
+ switch (cec->tx) {
+ case STATE_DONE:
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+ cec->tx = STATE_IDLE;
+ break;
+ case STATE_NACK:
+ cec_transmit_done(cec->adap,
+ CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
+ 0, 1, 0, 0);
+ cec->tx = STATE_IDLE;
+ break;
+ case STATE_ERROR:
+ cec_transmit_done(cec->adap,
+ CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
+ 0, 0, 0, 1);
+ cec->tx = STATE_IDLE;
+ break;
+ case STATE_BUSY:
+ dev_err(cec->dev, "state set to busy, this should not occur here\n");
+ break;
+ default:
+ break;
+ }
+
+ switch (cec->rx) {
+ case STATE_DONE:
+ cec_received_msg(cec->adap, &cec->msg);
+ cec->rx = STATE_IDLE;
+ break;
+ default:
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct cec_adap_ops s5p_cec_adap_ops = {
+ .adap_enable = s5p_cec_adap_enable,
+ .adap_log_addr = s5p_cec_adap_log_addr,
+ .adap_transmit = s5p_cec_adap_transmit,
+};
+
+static int s5p_cec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct platform_device *hdmi_dev;
+ struct resource *res;
+ struct s5p_cec_dev *cec;
+ bool needs_hpd = of_property_read_bool(pdev->dev.of_node, "needs-hpd");
+ int ret;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
+ return -ENODEV;
+ }
+ hdmi_dev = of_find_device_by_node(np);
+ if (hdmi_dev == NULL)
+ return -EPROBE_DEFER;
+
+ cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0)
+ return cec->irq;
+
+ ret = devm_request_threaded_irq(dev, cec->irq, s5p_cec_irq_handler,
+ s5p_cec_irq_handler_thread, 0, pdev->name, cec);
+ if (ret)
+ return ret;
+
+ cec->clk = devm_clk_get(dev, "hdmicec");
+ if (IS_ERR(cec->clk))
+ return PTR_ERR(cec->clk);
+
+ cec->pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(cec->pmu))
+ return -EPROBE_DEFER;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cec->reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cec->reg))
+ return PTR_ERR(cec->reg);
+
+ cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ if (cec->notifier == NULL)
+ return -ENOMEM;
+
+ cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec, CEC_NAME,
+ CEC_CAP_DEFAULTS | (needs_hpd ? CEC_CAP_NEEDS_HPD : 0), 1);
+ ret = PTR_ERR_OR_ZERO(cec->adap);
+ if (ret)
+ return ret;
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret)
+ goto err_delete_adapter;
+
+ cec_register_cec_notifier(cec->adap, cec->notifier);
+
+ platform_set_drvdata(pdev, cec);
+ pm_runtime_enable(dev);
+
+ dev_dbg(dev, "successfully probed\n");
+ return 0;
+
+err_delete_adapter:
+ cec_delete_adapter(cec->adap);
+ return ret;
+}
+
+static int s5p_cec_remove(struct platform_device *pdev)
+{
+ struct s5p_cec_dev *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notifier);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev)
+{
+ struct s5p_cec_dev *cec = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(cec->clk);
+ return 0;
+}
+
+static int __maybe_unused s5p_cec_runtime_resume(struct device *dev)
+{
+ struct s5p_cec_dev *cec = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(cec->clk);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static const struct dev_pm_ops s5p_cec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(s5p_cec_runtime_suspend, s5p_cec_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id s5p_cec_match[] = {
+ {
+ .compatible = "samsung,s5p-cec",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s5p_cec_match);
+
+static struct platform_driver s5p_cec_pdrv = {
+ .probe = s5p_cec_probe,
+ .remove = s5p_cec_remove,
+ .driver = {
+ .name = CEC_NAME,
+ .of_match_table = s5p_cec_match,
+ .pm = &s5p_cec_pm_ops,
+ },
+};
+
+module_platform_driver(s5p_cec_pdrv);
+
+MODULE_AUTHOR("Kamil Debski <kamil@wypas.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung S5P CEC driver");
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h
new file mode 100644
index 000000000..86ded522e
--- /dev/null
+++ b/drivers/media/platform/s5p-cec/s5p_cec.h
@@ -0,0 +1,80 @@
+/* drivers/media/platform/s5p-cec/s5p_cec.h
+ *
+ * Samsung S5P HDMI CEC driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _S5P_CEC_H_
+#define _S5P_CEC_H_ __FILE__
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
+
+#include "exynos_hdmi_cec.h"
+#include "regs-cec.h"
+#include "s5p_cec.h"
+
+#define CEC_NAME "s5p-cec"
+
+#define CEC_STATUS_TX_RUNNING (1 << 0)
+#define CEC_STATUS_TX_TRANSFERRING (1 << 1)
+#define CEC_STATUS_TX_DONE (1 << 2)
+#define CEC_STATUS_TX_ERROR (1 << 3)
+#define CEC_STATUS_TX_NACK (1 << 4)
+#define CEC_STATUS_TX_BYTES (0xFF << 8)
+#define CEC_STATUS_RX_RUNNING (1 << 16)
+#define CEC_STATUS_RX_RECEIVING (1 << 17)
+#define CEC_STATUS_RX_DONE (1 << 18)
+#define CEC_STATUS_RX_ERROR (1 << 19)
+#define CEC_STATUS_RX_BCAST (1 << 20)
+#define CEC_STATUS_RX_BYTES (0xFF << 24)
+
+#define CEC_WORKER_TX_DONE (1 << 0)
+#define CEC_WORKER_RX_MSG (1 << 1)
+
+/* CEC Rx buffer size */
+#define CEC_RX_BUFF_SIZE 16
+/* CEC Tx buffer size */
+#define CEC_TX_BUFF_SIZE 16
+
+enum cec_state {
+ STATE_IDLE,
+ STATE_BUSY,
+ STATE_DONE,
+ STATE_NACK,
+ STATE_ERROR
+};
+
+struct cec_notifier;
+
+struct s5p_cec_dev {
+ struct cec_adapter *adap;
+ struct clk *clk;
+ struct device *dev;
+ struct mutex lock;
+ struct regmap *pmu;
+ struct cec_notifier *notifier;
+ int irq;
+ void __iomem *reg;
+
+ enum cec_state rx;
+ enum cec_state tx;
+ struct cec_msg msg;
+};
+
+#endif /* _S5P_CEC_H_ */
diff --git a/drivers/media/platform/s5p-g2d/Makefile b/drivers/media/platform/s5p-g2d/Makefile
new file mode 100644
index 000000000..2c48c416a
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/Makefile
@@ -0,0 +1,3 @@
+s5p-g2d-objs := g2d.o g2d-hw.o
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d.o
diff --git a/drivers/media/platform/s5p-g2d/g2d-hw.c b/drivers/media/platform/s5p-g2d/g2d-hw.c
new file mode 100644
index 000000000..e87bd9381
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/g2d-hw.c
@@ -0,0 +1,117 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/io.h>
+
+#include "g2d.h"
+#include "g2d-regs.h"
+
+#define w(x, a) writel((x), d->regs + (a))
+#define r(a) readl(d->regs + (a))
+
+/* g2d_reset clears all g2d registers */
+void g2d_reset(struct g2d_dev *d)
+{
+ w(1, SOFT_RESET_REG);
+}
+
+void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f)
+{
+ u32 n;
+
+ w(0, SRC_SELECT_REG);
+ w(f->stride & 0xFFFF, SRC_STRIDE_REG);
+
+ n = f->o_height & 0xFFF;
+ n <<= 16;
+ n |= f->o_width & 0xFFF;
+ w(n, SRC_LEFT_TOP_REG);
+
+ n = f->bottom & 0xFFF;
+ n <<= 16;
+ n |= f->right & 0xFFF;
+ w(n, SRC_RIGHT_BOTTOM_REG);
+
+ w(f->fmt->hw, SRC_COLOR_MODE_REG);
+}
+
+void g2d_set_src_addr(struct g2d_dev *d, dma_addr_t a)
+{
+ w(a, SRC_BASE_ADDR_REG);
+}
+
+void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f)
+{
+ u32 n;
+
+ w(0, DST_SELECT_REG);
+ w(f->stride & 0xFFFF, DST_STRIDE_REG);
+
+ n = f->o_height & 0xFFF;
+ n <<= 16;
+ n |= f->o_width & 0xFFF;
+ w(n, DST_LEFT_TOP_REG);
+
+ n = f->bottom & 0xFFF;
+ n <<= 16;
+ n |= f->right & 0xFFF;
+ w(n, DST_RIGHT_BOTTOM_REG);
+
+ w(f->fmt->hw, DST_COLOR_MODE_REG);
+}
+
+void g2d_set_dst_addr(struct g2d_dev *d, dma_addr_t a)
+{
+ w(a, DST_BASE_ADDR_REG);
+}
+
+void g2d_set_rop4(struct g2d_dev *d, u32 r)
+{
+ w(r, ROP4_REG);
+}
+
+void g2d_set_flip(struct g2d_dev *d, u32 r)
+{
+ w(r, SRC_MSK_DIRECT_REG);
+}
+
+void g2d_set_v41_stretch(struct g2d_dev *d, struct g2d_frame *src,
+ struct g2d_frame *dst)
+{
+ w(DEFAULT_SCALE_MODE, SRC_SCALE_CTRL_REG);
+
+ /* inversed scaling factor: src is numerator */
+ w((src->c_width << 16) / dst->c_width, SRC_XSCALE_REG);
+ w((src->c_height << 16) / dst->c_height, SRC_YSCALE_REG);
+}
+
+void g2d_set_cmd(struct g2d_dev *d, u32 c)
+{
+ w(c, BITBLT_COMMAND_REG);
+}
+
+void g2d_start(struct g2d_dev *d)
+{
+ /* Clear cache */
+ if (d->variant->hw_rev == TYPE_G2D_3X)
+ w(0x7, CACHECTL_REG);
+
+ /* Enable interrupt */
+ w(1, INTEN_REG);
+ /* Start G2D engine */
+ w(1, BITBLT_START_REG);
+}
+
+void g2d_clear_int(struct g2d_dev *d)
+{
+ w(1, INTC_PEND_REG);
+}
diff --git a/drivers/media/platform/s5p-g2d/g2d-regs.h b/drivers/media/platform/s5p-g2d/g2d-regs.h
new file mode 100644
index 000000000..9bf31ad35
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/g2d-regs.h
@@ -0,0 +1,122 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+/* General Registers */
+#define SOFT_RESET_REG 0x0000 /* Software reset reg */
+#define INTEN_REG 0x0004 /* Interrupt Enable reg */
+#define INTC_PEND_REG 0x000C /* Interrupt Control Pending reg */
+#define FIFO_STAT_REG 0x0010 /* Command FIFO Status reg */
+#define AXI_ID_MODE_REG 0x0014 /* AXI Read ID Mode reg */
+#define CACHECTL_REG 0x0018 /* Cache & Buffer clear reg */
+#define AXI_MODE_REG 0x001C /* AXI Mode reg */
+
+/* Command Registers */
+#define BITBLT_START_REG 0x0100 /* BitBLT Start reg */
+#define BITBLT_COMMAND_REG 0x0104 /* Command reg for BitBLT */
+
+/* Parameter Setting Registers (Rotate & Direction) */
+#define ROTATE_REG 0x0200 /* Rotation reg */
+#define SRC_MSK_DIRECT_REG 0x0204 /* Src and Mask Direction reg */
+#define DST_PAT_DIRECT_REG 0x0208 /* Dest and Pattern Direction reg */
+
+/* Parameter Setting Registers (Src) */
+#define SRC_SELECT_REG 0x0300 /* Src Image Selection reg */
+#define SRC_BASE_ADDR_REG 0x0304 /* Src Image Base Address reg */
+#define SRC_STRIDE_REG 0x0308 /* Src Stride reg */
+#define SRC_COLOR_MODE_REG 0x030C /* Src Image Color Mode reg */
+#define SRC_LEFT_TOP_REG 0x0310 /* Src Left Top Coordinate reg */
+#define SRC_RIGHT_BOTTOM_REG 0x0314 /* Src Right Bottom Coordinate reg */
+#define SRC_SCALE_CTRL_REG 0x0328 /* Src Scaling type select */
+#define SRC_XSCALE_REG 0x032c /* Src X Scaling ratio */
+#define SRC_YSCALE_REG 0x0330 /* Src Y Scaling ratio */
+
+/* Parameter Setting Registers (Dest) */
+#define DST_SELECT_REG 0x0400 /* Dest Image Selection reg */
+#define DST_BASE_ADDR_REG 0x0404 /* Dest Image Base Address reg */
+#define DST_STRIDE_REG 0x0408 /* Dest Stride reg */
+#define DST_COLOR_MODE_REG 0x040C /* Dest Image Color Mode reg */
+#define DST_LEFT_TOP_REG 0x0410 /* Dest Left Top Coordinate reg */
+#define DST_RIGHT_BOTTOM_REG 0x0414 /* Dest Right Bottom Coordinate reg */
+
+/* Parameter Setting Registers (Pattern) */
+#define PAT_BASE_ADDR_REG 0x0500 /* Pattern Image Base Address reg */
+#define PAT_SIZE_REG 0x0504 /* Pattern Image Size reg */
+#define PAT_COLOR_MODE_REG 0x0508 /* Pattern Image Color Mode reg */
+#define PAT_OFFSET_REG 0x050C /* Pattern Left Top Coordinate reg */
+#define PAT_STRIDE_REG 0x0510 /* Pattern Stride reg */
+
+/* Parameter Setting Registers (Mask) */
+#define MASK_BASE_ADDR_REG 0x0520 /* Mask Base Address reg */
+#define MASK_STRIDE_REG 0x0524 /* Mask Stride reg */
+
+/* Parameter Setting Registers (Clipping Window) */
+#define CW_LT_REG 0x0600 /* LeftTop coordinates of Clip Window */
+#define CW_RB_REG 0x0604 /* RightBottom coordinates of Clip
+ Window */
+
+/* Parameter Setting Registers (ROP & Alpha Setting) */
+#define THIRD_OPERAND_REG 0x0610 /* Third Operand Selection reg */
+#define ROP4_REG 0x0614 /* Raster Operation reg */
+#define ALPHA_REG 0x0618 /* Alpha value, Fading offset value */
+
+/* Parameter Setting Registers (Color) */
+#define FG_COLOR_REG 0x0700 /* Foreground Color reg */
+#define BG_COLOR_REG 0x0704 /* Background Color reg */
+#define BS_COLOR_REG 0x0708 /* Blue Screen Color reg */
+
+/* Parameter Setting Registers (Color Key) */
+#define SRC_COLORKEY_CTRL_REG 0x0710 /* Src Colorkey control reg */
+#define SRC_COLORKEY_DR_MIN_REG 0x0714 /* Src Colorkey Decision Reference
+ Min reg */
+#define SRC_COLORKEY_DR_MAX_REG 0x0718 /* Src Colorkey Decision Reference
+ Max reg */
+#define DST_COLORKEY_CTRL_REG 0x071C /* Dest Colorkey control reg */
+#define DST_COLORKEY_DR_MIN_REG 0x0720 /* Dest Colorkey Decision Reference
+ Min reg */
+#define DST_COLORKEY_DR_MAX_REG 0x0724 /* Dest Colorkey Decision Reference
+ Max reg */
+
+/* Color mode values */
+
+#define ORDER_XRGB 0
+#define ORDER_RGBX 1
+#define ORDER_XBGR 2
+#define ORDER_BGRX 3
+
+#define MODE_XRGB_8888 0
+#define MODE_ARGB_8888 1
+#define MODE_RGB_565 2
+#define MODE_XRGB_1555 3
+#define MODE_ARGB_1555 4
+#define MODE_XRGB_4444 5
+#define MODE_ARGB_4444 6
+#define MODE_PACKED_RGB_888 7
+
+#define COLOR_MODE(o, m) (((o) << 4) | (m))
+
+/* ROP4 operation values */
+#define ROP4_COPY 0xCCCC
+#define ROP4_INVERT 0x3333
+
+/* Hardware limits */
+#define MAX_WIDTH 8000
+#define MAX_HEIGHT 8000
+
+#define G2D_TIMEOUT 500
+
+#define DEFAULT_WIDTH 100
+#define DEFAULT_HEIGHT 100
+
+#define DEFAULT_SCALE_MODE (2 << 0)
+
+/* Command mode register values */
+#define CMD_V3_ENABLE_STRETCH (1 << 4)
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
new file mode 100644
index 000000000..4cf5b5594
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -0,0 +1,783 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "g2d.h"
+#include "g2d-regs.h"
+
+#define fh2ctx(__fh) container_of(__fh, struct g2d_ctx, fh)
+
+static struct g2d_fmt formats[] = {
+ {
+ .name = "XRGB_8888",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_8888),
+ },
+ {
+ .name = "RGB_565",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = 16,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_RGB_565),
+ },
+ {
+ .name = "XRGB_1555",
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .depth = 16,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_1555),
+ },
+ {
+ .name = "XRGB_4444",
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .depth = 16,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_4444),
+ },
+ {
+ .name = "PACKED_RGB_888",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .depth = 24,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_PACKED_RGB_888),
+ },
+};
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct g2d_frame def_frame = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .c_width = DEFAULT_WIDTH,
+ .c_height = DEFAULT_HEIGHT,
+ .o_width = 0,
+ .o_height = 0,
+ .fmt = &formats[0],
+ .right = DEFAULT_WIDTH,
+ .bottom = DEFAULT_HEIGHT,
+};
+
+static struct g2d_fmt *find_fmt(struct v4l2_format *f)
+{
+ unsigned int i;
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix.pixelformat)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+
+static struct g2d_frame *get_frame(struct g2d_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->in;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->out;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static int g2d_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct g2d_ctx *ctx = vb2_get_drv_priv(vq);
+ struct g2d_frame *f = get_frame(ctx, vq->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ sizes[0] = f->size;
+ *nplanes = 1;
+
+ if (*nbuffers == 0)
+ *nbuffers = 1;
+
+ return 0;
+}
+
+static int g2d_buf_prepare(struct vb2_buffer *vb)
+{
+ struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct g2d_frame *f = get_frame(ctx, vb->vb2_queue->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ vb2_set_plane_payload(vb, 0, f->size);
+ return 0;
+}
+
+static void g2d_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static const struct vb2_ops g2d_qops = {
+ .queue_setup = g2d_queue_setup,
+ .buf_prepare = g2d_buf_prepare,
+ .buf_queue = g2d_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct g2d_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &g2d_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->mutex;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &g2d_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->mutex;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct g2d_ctx *ctx = container_of(ctrl->handler, struct g2d_ctx,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->dev->ctrl_lock, flags);
+ switch (ctrl->id) {
+ case V4L2_CID_COLORFX:
+ if (ctrl->val == V4L2_COLORFX_NEGATIVE)
+ ctx->rop = ROP4_INVERT;
+ else
+ ctx->rop = ROP4_COPY;
+ break;
+
+ case V4L2_CID_HFLIP:
+ ctx->flip = ctx->ctrl_hflip->val | (ctx->ctrl_vflip->val << 1);
+ break;
+
+ }
+ spin_unlock_irqrestore(&ctx->dev->ctrl_lock, flags);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops g2d_ctrl_ops = {
+ .s_ctrl = g2d_s_ctrl,
+};
+
+static int g2d_setup_ctrls(struct g2d_ctx *ctx)
+{
+ struct g2d_dev *dev = ctx->dev;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
+
+ ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std_menu(
+ &ctx->ctrl_handler,
+ &g2d_ctrl_ops,
+ V4L2_CID_COLORFX,
+ V4L2_COLORFX_NEGATIVE,
+ ~((1 << V4L2_COLORFX_NONE) | (1 << V4L2_COLORFX_NEGATIVE)),
+ V4L2_COLORFX_NONE);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+ v4l2_err(&dev->v4l2_dev, "g2d_setup_ctrls failed\n");
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ v4l2_ctrl_cluster(2, &ctx->ctrl_hflip);
+
+ return 0;
+}
+
+static int g2d_open(struct file *file)
+{
+ struct g2d_dev *dev = video_drvdata(file);
+ struct g2d_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dev = dev;
+ /* Set default formats */
+ ctx->in = def_frame;
+ ctx->out = def_frame;
+
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ kfree(ctx);
+ return -ERESTARTSYS;
+ }
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ mutex_unlock(&dev->mutex);
+ kfree(ctx);
+ return ret;
+ }
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ g2d_setup_ctrls(ctx);
+
+ /* Write the default values to the ctx struct */
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ mutex_unlock(&dev->mutex);
+
+ v4l2_info(&dev->v4l2_dev, "instance opened\n");
+ return 0;
+}
+
+static int g2d_release(struct file *file)
+{
+ struct g2d_dev *dev = video_drvdata(file);
+ struct g2d_ctx *ctx = fh2ctx(file->private_data);
+
+ mutex_lock(&dev->mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&dev->mutex);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ v4l2_info(&dev->v4l2_dev, "instance closed\n");
+ return 0;
+}
+
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, G2D_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, G2D_NAME, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+{
+ struct g2d_fmt *fmt;
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+ fmt = &formats[f->index];
+ f->pixelformat = fmt->fourcc;
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct g2d_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct g2d_frame *frm;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+ frm = get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+
+ f->fmt.pix.width = frm->width;
+ f->fmt.pix.height = frm->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = frm->fmt->fourcc;
+ f->fmt.pix.bytesperline = (frm->width * frm->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = frm->size;
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct g2d_fmt *fmt;
+ enum v4l2_field *field;
+
+ fmt = find_fmt(f);
+ if (!fmt)
+ return -EINVAL;
+
+ field = &f->fmt.pix.field;
+ if (*field == V4L2_FIELD_ANY)
+ *field = V4L2_FIELD_NONE;
+ else if (*field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->fmt.pix.width > MAX_WIDTH)
+ f->fmt.pix.width = MAX_WIDTH;
+ if (f->fmt.pix.height > MAX_HEIGHT)
+ f->fmt.pix.height = MAX_HEIGHT;
+
+ if (f->fmt.pix.width < 1)
+ f->fmt.pix.width = 1;
+ if (f->fmt.pix.height < 1)
+ f->fmt.pix.height = 1;
+
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_dev *dev = ctx->dev;
+ struct vb2_queue *vq;
+ struct g2d_frame *frm;
+ struct g2d_fmt *fmt;
+ int ret = 0;
+
+ /* Adjust all values accordingly to the hardware capabilities
+ * and chosen format. */
+ ret = vidioc_try_fmt(file, prv, f);
+ if (ret)
+ return ret;
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&dev->v4l2_dev, "queue (%d) bust\n", f->type);
+ return -EBUSY;
+ }
+ frm = get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+ fmt = find_fmt(f);
+ if (!fmt)
+ return -EINVAL;
+ frm->width = f->fmt.pix.width;
+ frm->height = f->fmt.pix.height;
+ frm->size = f->fmt.pix.sizeimage;
+ /* Reset crop settings */
+ frm->o_width = 0;
+ frm->o_height = 0;
+ frm->c_width = frm->width;
+ frm->c_height = frm->height;
+ frm->right = frm->width;
+ frm->bottom = frm->height;
+ frm->fmt = fmt;
+ frm->stride = f->fmt.pix.bytesperline;
+ return 0;
+}
+
+static int vidioc_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cr)
+{
+ struct g2d_ctx *ctx = priv;
+ struct g2d_frame *f;
+
+ f = get_frame(ctx, cr->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = f->width;
+ cr->bounds.height = f->height;
+ cr->defrect = cr->bounds;
+ return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *prv, struct v4l2_crop *cr)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_frame *f;
+
+ f = get_frame(ctx, cr->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ cr->c.left = f->o_height;
+ cr->c.top = f->o_width;
+ cr->c.width = f->c_width;
+ cr->c.height = f->c_height;
+ return 0;
+}
+
+static int vidioc_try_crop(struct file *file, void *prv, const struct v4l2_crop *cr)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_dev *dev = ctx->dev;
+ struct g2d_frame *f;
+
+ f = get_frame(ctx, cr->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ if (cr->c.top < 0 || cr->c.left < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *prv, const struct v4l2_crop *cr)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_frame *f;
+ int ret;
+
+ ret = vidioc_try_crop(file, prv, cr);
+ if (ret)
+ return ret;
+ f = get_frame(ctx, cr->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ f->c_width = cr->c.width;
+ f->c_height = cr->c.height;
+ f->o_width = cr->c.left;
+ f->o_height = cr->c.top;
+ f->bottom = f->o_height + f->c_height;
+ f->right = f->o_width + f->c_width;
+ return 0;
+}
+
+static void device_run(void *prv)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src, *dst;
+ unsigned long flags;
+ u32 cmd = 0;
+
+ dev->curr = ctx;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ clk_enable(dev->gate);
+ g2d_reset(dev);
+
+ spin_lock_irqsave(&dev->ctrl_lock, flags);
+
+ g2d_set_src_size(dev, &ctx->in);
+ g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
+
+ g2d_set_dst_size(dev, &ctx->out);
+ g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
+
+ g2d_set_rop4(dev, ctx->rop);
+ g2d_set_flip(dev, ctx->flip);
+
+ if (ctx->in.c_width != ctx->out.c_width ||
+ ctx->in.c_height != ctx->out.c_height) {
+ if (dev->variant->hw_rev == TYPE_G2D_3X)
+ cmd |= CMD_V3_ENABLE_STRETCH;
+ else
+ g2d_set_v41_stretch(dev, &ctx->in, &ctx->out);
+ }
+
+ g2d_set_cmd(dev, cmd);
+ g2d_start(dev);
+
+ spin_unlock_irqrestore(&dev->ctrl_lock, flags);
+}
+
+static irqreturn_t g2d_isr(int irq, void *prv)
+{
+ struct g2d_dev *dev = prv;
+ struct g2d_ctx *ctx = dev->curr;
+ struct vb2_v4l2_buffer *src, *dst;
+
+ g2d_clear_int(dev);
+ clk_disable(dev->gate);
+
+ BUG_ON(ctx == NULL);
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ BUG_ON(src == NULL);
+ BUG_ON(dst == NULL);
+
+ dst->timecode = src->timecode;
+ dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
+ dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->flags |=
+ src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
+
+ dev->curr = NULL;
+ return IRQ_HANDLED;
+}
+
+static const struct v4l2_file_operations g2d_fops = {
+ .owner = THIS_MODULE,
+ .open = g2d_open,
+ .release = g2d_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_s_crop = vidioc_s_crop,
+ .vidioc_cropcap = vidioc_cropcap,
+};
+
+static const struct video_device g2d_videodev = {
+ .name = G2D_NAME,
+ .fops = &g2d_fops,
+ .ioctl_ops = &g2d_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops g2d_m2m_ops = {
+ .device_run = device_run,
+};
+
+static const struct of_device_id exynos_g2d_match[];
+
+static int g2d_probe(struct platform_device *pdev)
+{
+ struct g2d_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ const struct of_device_id *of_id;
+ int ret = 0;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->ctrl_lock);
+ mutex_init(&dev->mutex);
+ atomic_set(&dev->num_inst, 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs))
+ return PTR_ERR(dev->regs);
+
+ dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
+ if (IS_ERR(dev->clk)) {
+ dev_err(&pdev->dev, "failed to get g2d clock\n");
+ return -ENXIO;
+ }
+
+ ret = clk_prepare(dev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare g2d clock\n");
+ goto put_clk;
+ }
+
+ dev->gate = clk_get(&pdev->dev, "fimg2d");
+ if (IS_ERR(dev->gate)) {
+ dev_err(&pdev->dev, "failed to get g2d clock gate\n");
+ ret = -ENXIO;
+ goto unprep_clk;
+ }
+
+ ret = clk_prepare(dev->gate);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare g2d clock gate\n");
+ goto put_clk_gate;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to find IRQ\n");
+ ret = -ENXIO;
+ goto unprep_clk_gate;
+ }
+
+ dev->irq = res->start;
+
+ ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr,
+ 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to install IRQ\n");
+ goto unprep_clk_gate;
+ }
+
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto unprep_clk_gate;
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_v4l2_dev;
+ }
+ *vfd = g2d_videodev;
+ vfd->lock = &dev->mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+ video_set_drvdata(vfd, dev);
+ dev->vfd = vfd;
+ v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
+ vfd->num);
+ platform_set_drvdata(pdev, dev);
+ dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto unreg_video_dev;
+ }
+
+ def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
+
+ of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node);
+ if (!of_id) {
+ ret = -ENODEV;
+ goto unreg_video_dev;
+ }
+ dev->variant = (struct g2d_variant *)of_id->data;
+
+ return 0;
+
+unreg_video_dev:
+ video_unregister_device(dev->vfd);
+rel_vdev:
+ video_device_release(vfd);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+unprep_clk_gate:
+ clk_unprepare(dev->gate);
+put_clk_gate:
+ clk_put(dev->gate);
+unprep_clk:
+ clk_unprepare(dev->clk);
+put_clk:
+ clk_put(dev->clk);
+
+ return ret;
+}
+
+static int g2d_remove(struct platform_device *pdev)
+{
+ struct g2d_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME);
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+ clk_unprepare(dev->gate);
+ clk_put(dev->gate);
+ clk_unprepare(dev->clk);
+ clk_put(dev->clk);
+ return 0;
+}
+
+static struct g2d_variant g2d_drvdata_v3x = {
+ .hw_rev = TYPE_G2D_3X, /* Revision 3.0 for S5PV210 and Exynos4210 */
+};
+
+static struct g2d_variant g2d_drvdata_v4x = {
+ .hw_rev = TYPE_G2D_4X, /* Revision 4.1 for Exynos4X12 and Exynos5 */
+};
+
+static const struct of_device_id exynos_g2d_match[] = {
+ {
+ .compatible = "samsung,s5pv210-g2d",
+ .data = &g2d_drvdata_v3x,
+ }, {
+ .compatible = "samsung,exynos4212-g2d",
+ .data = &g2d_drvdata_v4x,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_g2d_match);
+
+static struct platform_driver g2d_pdrv = {
+ .probe = g2d_probe,
+ .remove = g2d_remove,
+ .driver = {
+ .name = G2D_NAME,
+ .of_match_table = exynos_g2d_match,
+ },
+};
+
+module_platform_driver(g2d_pdrv);
+
+MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
+MODULE_DESCRIPTION("S5P G2D 2d graphics accelerator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
new file mode 100644
index 000000000..9ffb458a1
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -0,0 +1,89 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#define G2D_NAME "s5p-g2d"
+#define TYPE_G2D_3X 3
+#define TYPE_G2D_4X 4
+
+struct g2d_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct video_device *vfd;
+ struct mutex mutex;
+ spinlock_t ctrl_lock;
+ atomic_t num_inst;
+ void __iomem *regs;
+ struct clk *clk;
+ struct clk *gate;
+ struct g2d_ctx *curr;
+ struct g2d_variant *variant;
+ int irq;
+};
+
+struct g2d_frame {
+ /* Original dimensions */
+ u32 width;
+ u32 height;
+ /* Crop size */
+ u32 c_width;
+ u32 c_height;
+ /* Offset */
+ u32 o_width;
+ u32 o_height;
+ /* Image format */
+ struct g2d_fmt *fmt;
+ /* Variables that can calculated once and reused */
+ u32 stride;
+ u32 bottom;
+ u32 right;
+ u32 size;
+};
+
+struct g2d_ctx {
+ struct v4l2_fh fh;
+ struct g2d_dev *dev;
+ struct g2d_frame in;
+ struct g2d_frame out;
+ struct v4l2_ctrl *ctrl_hflip;
+ struct v4l2_ctrl *ctrl_vflip;
+ struct v4l2_ctrl_handler ctrl_handler;
+ u32 rop;
+ u32 flip;
+};
+
+struct g2d_fmt {
+ char *name;
+ u32 fourcc;
+ int depth;
+ u32 hw;
+};
+
+struct g2d_variant {
+ unsigned short hw_rev;
+};
+
+void g2d_reset(struct g2d_dev *d);
+void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f);
+void g2d_set_src_addr(struct g2d_dev *d, dma_addr_t a);
+void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f);
+void g2d_set_dst_addr(struct g2d_dev *d, dma_addr_t a);
+void g2d_start(struct g2d_dev *d);
+void g2d_clear_int(struct g2d_dev *d);
+void g2d_set_rop4(struct g2d_dev *d, u32 r);
+void g2d_set_flip(struct g2d_dev *d, u32 r);
+void g2d_set_v41_stretch(struct g2d_dev *d,
+ struct g2d_frame *src, struct g2d_frame *dst);
+void g2d_set_cmd(struct g2d_dev *d, u32 c);
diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile
new file mode 100644
index 000000000..9e5f214c4
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/Makefile
@@ -0,0 +1,2 @@
+s5p-jpeg-objs := jpeg-core.o jpeg-hw-exynos3250.o jpeg-hw-exynos4.o jpeg-hw-s5p.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
new file mode 100644
index 000000000..fa7c42cf4
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -0,0 +1,3229 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-core.c
+ *
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "jpeg-core.h"
+#include "jpeg-hw-s5p.h"
+#include "jpeg-hw-exynos4.h"
+#include "jpeg-hw-exynos3250.h"
+#include "jpeg-regs.h"
+
+static struct s5p_jpeg_fmt sjpeg_formats[] = {
+ {
+ .name = "JPEG JFIF",
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .flags = SJPEG_FMT_FLAG_ENC_CAPTURE |
+ SJPEG_FMT_FLAG_DEC_OUTPUT |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_FLAG_EXYNOS4,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 4,
+ .v_align = 3,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 2,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 2,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 2,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 2,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 2,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "RGB565X",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 2,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "ARGB8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .colplanes = 1,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "ARGB8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .colplanes = 1,
+ .h_align = 2,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "YUV 4:4:4 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV24,
+ .depth = 24,
+ .colplanes = 2,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "YUV 4:4:4 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV42,
+ .depth = 24,
+ .colplanes = 2,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .depth = 16,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .depth = 16,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .colplanes = 2,
+ .h_align = 3,
+ .v_align = 3,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .colplanes = 2,
+ .h_align = 4,
+ .v_align = 4,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .depth = 12,
+ .colplanes = 2,
+ .h_align = 3,
+ .v_align = 3,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .depth = 12,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .colplanes = 3,
+ .h_align = 1,
+ .v_align = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .colplanes = 3,
+ .h_align = 4,
+ .v_align = 4,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS3250 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "Gray",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .depth = 8,
+ .colplanes = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
+ },
+};
+#define SJPEG_NUM_FORMATS ARRAY_SIZE(sjpeg_formats)
+
+static const unsigned char qtbl_luminance[4][64] = {
+ {/*level 0 - high compression quality */
+ 20, 16, 25, 39, 50, 46, 62, 68,
+ 16, 18, 23, 38, 38, 53, 65, 68,
+ 25, 23, 31, 38, 53, 65, 68, 68,
+ 39, 38, 38, 53, 65, 68, 68, 68,
+ 50, 38, 53, 65, 68, 68, 68, 68,
+ 46, 53, 65, 68, 68, 68, 68, 68,
+ 62, 65, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+ },
+ {/* level 1 */
+ 16, 11, 11, 16, 23, 27, 31, 30,
+ 11, 12, 12, 15, 20, 23, 23, 30,
+ 11, 12, 13, 16, 23, 26, 35, 47,
+ 16, 15, 16, 23, 26, 37, 47, 64,
+ 23, 20, 23, 26, 39, 51, 64, 64,
+ 27, 23, 26, 37, 51, 64, 64, 64,
+ 31, 23, 35, 47, 64, 64, 64, 64,
+ 30, 30, 47, 64, 64, 64, 64, 64
+ },
+ {/* level 2 */
+ 12, 8, 8, 12, 17, 21, 24, 23,
+ 8, 9, 9, 11, 15, 19, 18, 23,
+ 8, 9, 10, 12, 19, 20, 27, 36,
+ 12, 11, 12, 21, 20, 28, 36, 53,
+ 17, 15, 19, 20, 30, 39, 51, 59,
+ 21, 19, 20, 28, 39, 51, 59, 59,
+ 24, 18, 27, 36, 51, 59, 59, 59,
+ 23, 23, 36, 53, 59, 59, 59, 59
+ },
+ {/* level 3 - low compression quality */
+ 8, 6, 6, 8, 12, 14, 16, 17,
+ 6, 6, 6, 8, 10, 13, 12, 15,
+ 6, 6, 7, 8, 13, 14, 18, 24,
+ 8, 8, 8, 14, 13, 19, 24, 35,
+ 12, 10, 13, 13, 20, 26, 34, 39,
+ 14, 13, 14, 19, 26, 34, 39, 39,
+ 16, 12, 18, 24, 34, 39, 39, 39,
+ 17, 15, 24, 35, 39, 39, 39, 39
+ }
+};
+
+static const unsigned char qtbl_chrominance[4][64] = {
+ {/*level 0 - high compression quality */
+ 21, 25, 32, 38, 54, 68, 68, 68,
+ 25, 28, 24, 38, 54, 68, 68, 68,
+ 32, 24, 32, 43, 66, 68, 68, 68,
+ 38, 38, 43, 53, 68, 68, 68, 68,
+ 54, 54, 66, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+ },
+ {/* level 1 */
+ 17, 15, 17, 21, 20, 26, 38, 48,
+ 15, 19, 18, 17, 20, 26, 35, 43,
+ 17, 18, 20, 22, 26, 30, 46, 53,
+ 21, 17, 22, 28, 30, 39, 53, 64,
+ 20, 20, 26, 30, 39, 48, 64, 64,
+ 26, 26, 30, 39, 48, 63, 64, 64,
+ 38, 35, 46, 53, 64, 64, 64, 64,
+ 48, 43, 53, 64, 64, 64, 64, 64
+ },
+ {/* level 2 */
+ 13, 11, 13, 16, 20, 20, 29, 37,
+ 11, 14, 14, 14, 16, 20, 26, 32,
+ 13, 14, 15, 17, 20, 23, 35, 40,
+ 16, 14, 17, 21, 23, 30, 40, 50,
+ 20, 16, 20, 23, 30, 37, 50, 59,
+ 20, 20, 23, 30, 37, 48, 59, 59,
+ 29, 26, 35, 40, 50, 59, 59, 59,
+ 37, 32, 40, 50, 59, 59, 59, 59
+ },
+ {/* level 3 - low compression quality */
+ 9, 8, 9, 11, 14, 17, 19, 24,
+ 8, 10, 9, 11, 14, 13, 17, 22,
+ 9, 9, 13, 14, 13, 15, 23, 26,
+ 11, 11, 14, 14, 15, 20, 26, 33,
+ 14, 14, 13, 15, 20, 24, 33, 39,
+ 17, 13, 15, 20, 24, 32, 39, 39,
+ 19, 17, 23, 26, 33, 39, 39, 39,
+ 24, 22, 26, 33, 39, 39, 39, 39
+ }
+};
+
+static const unsigned char hdctbl0[16] = {
+ 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0
+};
+
+static const unsigned char hdctblg0[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb
+};
+static const unsigned char hactbl0[16] = {
+ 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d
+};
+static const unsigned char hactblg0[162] = {
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa
+};
+
+/*
+ * Fourcc downgrade schema lookup tables for 422 and 420
+ * chroma subsampling - fourcc on each position maps on the
+ * fourcc from the table fourcc_to_dwngrd_schema_id which allows
+ * to get the most suitable fourcc counterpart for the given
+ * downgraded subsampling property.
+ */
+static const u32 subs422_fourcc_dwngrd_schema[] = {
+ V4L2_PIX_FMT_NV16,
+ V4L2_PIX_FMT_NV61,
+};
+
+static const u32 subs420_fourcc_dwngrd_schema[] = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_GREY,
+ V4L2_PIX_FMT_GREY,
+ V4L2_PIX_FMT_GREY,
+ V4L2_PIX_FMT_GREY,
+};
+
+/*
+ * Lookup table for translation of a fourcc to the position
+ * of its downgraded counterpart in the *fourcc_dwngrd_schema
+ * tables.
+ */
+static const u32 fourcc_to_dwngrd_schema_id[] = {
+ V4L2_PIX_FMT_NV24,
+ V4L2_PIX_FMT_NV42,
+ V4L2_PIX_FMT_NV16,
+ V4L2_PIX_FMT_NV61,
+ V4L2_PIX_FMT_YUYV,
+ V4L2_PIX_FMT_YVYU,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_GREY,
+};
+
+static int s5p_jpeg_get_dwngrd_sch_id_by_fourcc(u32 fourcc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fourcc_to_dwngrd_schema_id); ++i) {
+ if (fourcc_to_dwngrd_schema_id[i] == fourcc)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int s5p_jpeg_adjust_fourcc_to_subsampling(
+ enum v4l2_jpeg_chroma_subsampling subs,
+ u32 in_fourcc,
+ u32 *out_fourcc,
+ struct s5p_jpeg_ctx *ctx)
+{
+ int dwngrd_sch_id;
+
+ if (ctx->subsampling != V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY) {
+ dwngrd_sch_id =
+ s5p_jpeg_get_dwngrd_sch_id_by_fourcc(in_fourcc);
+ if (dwngrd_sch_id < 0)
+ return -EINVAL;
+ }
+
+ switch (ctx->subsampling) {
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
+ *out_fourcc = V4L2_PIX_FMT_GREY;
+ break;
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+ if (dwngrd_sch_id >
+ ARRAY_SIZE(subs420_fourcc_dwngrd_schema) - 1)
+ return -EINVAL;
+ *out_fourcc = subs420_fourcc_dwngrd_schema[dwngrd_sch_id];
+ break;
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+ if (dwngrd_sch_id >
+ ARRAY_SIZE(subs422_fourcc_dwngrd_schema) - 1)
+ return -EINVAL;
+ *out_fourcc = subs422_fourcc_dwngrd_schema[dwngrd_sch_id];
+ break;
+ default:
+ *out_fourcc = V4L2_PIX_FMT_GREY;
+ break;
+ }
+
+ return 0;
+}
+
+static int exynos4x12_decoded_subsampling[] = {
+ V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+};
+
+static int exynos3250_decoded_subsampling[] = {
+ V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
+ -1,
+ -1,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_411,
+};
+
+static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
+{
+ return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler);
+}
+
+static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct s5p_jpeg_ctx, fh);
+}
+
+static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx)
+{
+ switch (ctx->jpeg->variant->version) {
+ case SJPEG_S5P:
+ WARN_ON(ctx->subsampling > 3);
+ if (ctx->subsampling > 2)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+ return ctx->subsampling;
+ case SJPEG_EXYNOS3250:
+ case SJPEG_EXYNOS5420:
+ WARN_ON(ctx->subsampling > 6);
+ if (ctx->subsampling > 3)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_411;
+ return exynos3250_decoded_subsampling[ctx->subsampling];
+ case SJPEG_EXYNOS4:
+ WARN_ON(ctx->subsampling > 3);
+ if (ctx->subsampling > 2)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+ return exynos4x12_decoded_subsampling[ctx->subsampling];
+ case SJPEG_EXYNOS5433:
+ return ctx->subsampling; /* parsed from header */
+ default:
+ WARN_ON(ctx->subsampling > 3);
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+ }
+}
+
+static inline void s5p_jpeg_set_qtbl(void __iomem *regs,
+ const unsigned char *qtbl,
+ unsigned long tab, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ writel((unsigned int)qtbl[i], regs + tab + (i * 0x04));
+}
+
+static inline void s5p_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 0 with data for luma */
+ s5p_jpeg_set_qtbl(regs, qtbl_luminance[quality],
+ S5P_JPG_QTBL_CONTENT(0),
+ ARRAY_SIZE(qtbl_luminance[quality]));
+}
+
+static inline void s5p_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 1 with data for chroma */
+ s5p_jpeg_set_qtbl(regs, qtbl_chrominance[quality],
+ S5P_JPG_QTBL_CONTENT(1),
+ ARRAY_SIZE(qtbl_chrominance[quality]));
+}
+
+static inline void s5p_jpeg_set_htbl(void __iomem *regs,
+ const unsigned char *htbl,
+ unsigned long tab, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ writel((unsigned int)htbl[i], regs + tab + (i * 0x04));
+}
+
+static inline void s5p_jpeg_set_hdctbl(void __iomem *regs)
+{
+ /* this driver fills table 0 for this component */
+ s5p_jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0),
+ ARRAY_SIZE(hdctbl0));
+}
+
+static inline void s5p_jpeg_set_hdctblg(void __iomem *regs)
+{
+ /* this driver fills table 0 for this component */
+ s5p_jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0),
+ ARRAY_SIZE(hdctblg0));
+}
+
+static inline void s5p_jpeg_set_hactbl(void __iomem *regs)
+{
+ /* this driver fills table 0 for this component */
+ s5p_jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0),
+ ARRAY_SIZE(hactbl0));
+}
+
+static inline void s5p_jpeg_set_hactblg(void __iomem *regs)
+{
+ /* this driver fills table 0 for this component */
+ s5p_jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0),
+ ARRAY_SIZE(hactblg0));
+}
+
+static inline void exynos4_jpeg_set_tbl(void __iomem *regs,
+ const unsigned char *tbl,
+ unsigned long tab, int len)
+{
+ int i;
+ unsigned int dword;
+
+ for (i = 0; i < len; i += 4) {
+ dword = tbl[i] |
+ (tbl[i + 1] << 8) |
+ (tbl[i + 2] << 16) |
+ (tbl[i + 3] << 24);
+ writel(dword, regs + tab + i);
+ }
+}
+
+static inline void exynos4_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 0 with data for luma */
+ exynos4_jpeg_set_tbl(regs, qtbl_luminance[quality],
+ EXYNOS4_QTBL_CONTENT(0),
+ ARRAY_SIZE(qtbl_luminance[quality]));
+}
+
+static inline void exynos4_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 1 with data for chroma */
+ exynos4_jpeg_set_tbl(regs, qtbl_chrominance[quality],
+ EXYNOS4_QTBL_CONTENT(1),
+ ARRAY_SIZE(qtbl_chrominance[quality]));
+}
+
+static void exynos4_jpeg_set_huff_tbl(void __iomem *base)
+{
+ exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCLL,
+ ARRAY_SIZE(hdctbl0));
+ exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCCL,
+ ARRAY_SIZE(hdctbl0));
+ exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCLV,
+ ARRAY_SIZE(hdctblg0));
+ exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCCV,
+ ARRAY_SIZE(hdctblg0));
+ exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACLL,
+ ARRAY_SIZE(hactbl0));
+ exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACCL,
+ ARRAY_SIZE(hactbl0));
+ exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACLV,
+ ARRAY_SIZE(hactblg0));
+ exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACCV,
+ ARRAY_SIZE(hactblg0));
+}
+
+static inline int __exynos4_huff_tbl(int class, int id, bool lenval)
+{
+ /*
+ * class: 0 - DC, 1 - AC
+ * id: 0 - Y, 1 - Cb/Cr
+ */
+ if (class) {
+ if (id)
+ return lenval ? EXYNOS4_HUFF_TBL_HACCL :
+ EXYNOS4_HUFF_TBL_HACCV;
+ return lenval ? EXYNOS4_HUFF_TBL_HACLL : EXYNOS4_HUFF_TBL_HACLV;
+
+ }
+ /* class == 0 */
+ if (id)
+ return lenval ? EXYNOS4_HUFF_TBL_HDCCL : EXYNOS4_HUFF_TBL_HDCCV;
+
+ return lenval ? EXYNOS4_HUFF_TBL_HDCLL : EXYNOS4_HUFF_TBL_HDCLV;
+}
+
+static inline int exynos4_huff_tbl_len(int class, int id)
+{
+ return __exynos4_huff_tbl(class, id, true);
+}
+
+static inline int exynos4_huff_tbl_val(int class, int id)
+{
+ return __exynos4_huff_tbl(class, id, false);
+}
+
+static int get_byte(struct s5p_jpeg_buffer *buf);
+static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word);
+static void skip(struct s5p_jpeg_buffer *buf, long len);
+
+static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct s5p_jpeg_buffer jpeg_buffer;
+ unsigned int word;
+ int c, x, components;
+
+ jpeg_buffer.size = 2; /* Ls */
+ jpeg_buffer.data =
+ (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
+ jpeg_buffer.curr = 0;
+
+ word = 0;
+
+ if (get_word_be(&jpeg_buffer, &word))
+ return;
+ jpeg_buffer.size = (long)word - 2;
+ jpeg_buffer.data += 2;
+ jpeg_buffer.curr = 0;
+
+ components = get_byte(&jpeg_buffer);
+ if (components == -1)
+ return;
+ while (components--) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ x = get_byte(&jpeg_buffer);
+ if (x == -1)
+ return;
+ exynos4_jpeg_select_dec_h_tbl(jpeg->regs, c,
+ (((x >> 4) & 0x1) << 1) | (x & 0x1));
+ }
+
+}
+
+static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct s5p_jpeg_buffer jpeg_buffer;
+ unsigned int word;
+ int c, i, n, j;
+
+ for (j = 0; j < ctx->out_q.dht.n; ++j) {
+ jpeg_buffer.size = ctx->out_q.dht.len[j];
+ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
+ ctx->out_q.dht.marker[j];
+ jpeg_buffer.curr = 0;
+
+ word = 0;
+ while (jpeg_buffer.curr < jpeg_buffer.size) {
+ char id, class;
+
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ id = c & 0xf;
+ class = (c >> 4) & 0xf;
+ n = 0;
+ for (i = 0; i < 16; ++i) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ word |= c << ((i % 4) * 8);
+ if ((i + 1) % 4 == 0) {
+ writel(word, jpeg->regs +
+ exynos4_huff_tbl_len(class, id) +
+ (i / 4) * 4);
+ word = 0;
+ }
+ n += c;
+ }
+ word = 0;
+ for (i = 0; i < n; ++i) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ word |= c << ((i % 4) * 8);
+ if ((i + 1) % 4 == 0) {
+ writel(word, jpeg->regs +
+ exynos4_huff_tbl_val(class, id) +
+ (i / 4) * 4);
+ word = 0;
+ }
+ }
+ if (i % 4) {
+ writel(word, jpeg->regs +
+ exynos4_huff_tbl_val(class, id) + (i / 4) * 4);
+ }
+ word = 0;
+ }
+ }
+}
+
+static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct s5p_jpeg_buffer jpeg_buffer;
+ int c, x, components;
+
+ jpeg_buffer.size = ctx->out_q.sof_len;
+ jpeg_buffer.data =
+ (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof;
+ jpeg_buffer.curr = 0;
+
+ skip(&jpeg_buffer, 5); /* P, Y, X */
+ components = get_byte(&jpeg_buffer);
+ if (components == -1)
+ return;
+
+ exynos4_jpeg_set_dec_components(jpeg->regs, components);
+
+ while (components--) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ skip(&jpeg_buffer, 1);
+ x = get_byte(&jpeg_buffer);
+ if (x == -1)
+ return;
+ exynos4_jpeg_select_dec_q_tbl(jpeg->regs, c, x);
+ }
+}
+
+static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct s5p_jpeg_buffer jpeg_buffer;
+ unsigned int word;
+ int c, i, j;
+
+ for (j = 0; j < ctx->out_q.dqt.n; ++j) {
+ jpeg_buffer.size = ctx->out_q.dqt.len[j];
+ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
+ ctx->out_q.dqt.marker[j];
+ jpeg_buffer.curr = 0;
+
+ word = 0;
+ while (jpeg_buffer.size - jpeg_buffer.curr >= 65) {
+ char id;
+
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ id = c & 0xf;
+ /* nonzero means extended mode - not supported */
+ if ((c >> 4) & 0xf)
+ return;
+ for (i = 0; i < 64; ++i) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ word |= c << ((i % 4) * 8);
+ if ((i + 1) % 4 == 0) {
+ writel(word, jpeg->regs +
+ EXYNOS4_QTBL_CONTENT(id) + (i / 4) * 4);
+ word = 0;
+ }
+ }
+ word = 0;
+ }
+ }
+}
+
+/*
+ * ============================================================================
+ * Device file operations
+ * ============================================================================
+ */
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
+ __u32 pixelformat, unsigned int fmt_type);
+static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx);
+
+static int s5p_jpeg_open(struct file *file)
+{
+ struct s5p_jpeg *jpeg = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct s5p_jpeg_ctx *ctx;
+ struct s5p_jpeg_fmt *out_fmt, *cap_fmt;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&jpeg->lock)) {
+ ret = -ERESTARTSYS;
+ goto free;
+ }
+
+ v4l2_fh_init(&ctx->fh, vfd);
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->jpeg = jpeg;
+ if (vfd == jpeg->vfd_encoder) {
+ ctx->mode = S5P_JPEG_ENCODE;
+ out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_RGB565,
+ FMT_TYPE_OUTPUT);
+ cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+ FMT_TYPE_CAPTURE);
+ } else {
+ ctx->mode = S5P_JPEG_DECODE;
+ out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+ FMT_TYPE_OUTPUT);
+ cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_YUYV,
+ FMT_TYPE_CAPTURE);
+ ctx->scale_factor = EXYNOS3250_DEC_SCALE_FACTOR_8_8;
+ }
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto error;
+ }
+
+ ctx->out_q.fmt = out_fmt;
+ ctx->cap_q.fmt = cap_fmt;
+
+ ret = s5p_jpeg_controls_create(ctx);
+ if (ret < 0)
+ goto error;
+
+ mutex_unlock(&jpeg->lock);
+ return 0;
+
+error:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mutex_unlock(&jpeg->lock);
+free:
+ kfree(ctx);
+ return ret;
+}
+
+static int s5p_jpeg_release(struct file *file)
+{
+ struct s5p_jpeg *jpeg = video_drvdata(file);
+ struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
+
+ mutex_lock(&jpeg->lock);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mutex_unlock(&jpeg->lock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations s5p_jpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = s5p_jpeg_open,
+ .release = s5p_jpeg_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+/*
+ * ============================================================================
+ * video ioctl operations
+ * ============================================================================
+ */
+
+static int get_byte(struct s5p_jpeg_buffer *buf)
+{
+ if (buf->curr >= buf->size)
+ return -1;
+
+ return ((unsigned char *)buf->data)[buf->curr++];
+}
+
+static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word)
+{
+ unsigned int temp;
+ int byte;
+
+ byte = get_byte(buf);
+ if (byte == -1)
+ return -1;
+ temp = byte << 8;
+ byte = get_byte(buf);
+ if (byte == -1)
+ return -1;
+ *word = (unsigned int)byte | temp;
+ return 0;
+}
+
+static void skip(struct s5p_jpeg_buffer *buf, long len)
+{
+ if (len <= 0)
+ return;
+
+ while (len--)
+ get_byte(buf);
+}
+
+static bool s5p_jpeg_subsampling_decode(struct s5p_jpeg_ctx *ctx,
+ unsigned int subsampling)
+{
+ unsigned int version;
+
+ switch (subsampling) {
+ case 0x11:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444;
+ break;
+ case 0x21:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422;
+ break;
+ case 0x22:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+ break;
+ case 0x33:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+ break;
+ case 0x41:
+ /*
+ * 4:1:1 subsampling only supported by 3250, 5420, and 5433
+ * variants
+ */
+ version = ctx->jpeg->variant->version;
+ if (version != SJPEG_EXYNOS3250 &&
+ version != SJPEG_EXYNOS5420 &&
+ version != SJPEG_EXYNOS5433)
+ return false;
+
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_411;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
+ unsigned long buffer, unsigned long size,
+ struct s5p_jpeg_ctx *ctx)
+{
+ int c, components = 0, notfound, n_dht = 0, n_dqt = 0;
+ unsigned int height = 0, width = 0, word, subsampling = 0;
+ unsigned int sos = 0, sof = 0, sof_len = 0;
+ unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER];
+ unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
+ long length;
+ struct s5p_jpeg_buffer jpeg_buffer;
+
+ jpeg_buffer.size = size;
+ jpeg_buffer.data = buffer;
+ jpeg_buffer.curr = 0;
+
+ notfound = 1;
+ while (notfound || !sos) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return false;
+ if (c != 0xff)
+ continue;
+ do
+ c = get_byte(&jpeg_buffer);
+ while (c == 0xff);
+ if (c == -1)
+ return false;
+ if (c == 0)
+ continue;
+ length = 0;
+ switch (c) {
+ /* SOF0: baseline JPEG */
+ case SOF0:
+ if (get_word_be(&jpeg_buffer, &word))
+ break;
+ length = (long)word - 2;
+ if (!length)
+ return false;
+ sof = jpeg_buffer.curr; /* after 0xffc0 */
+ sof_len = length;
+ if (get_byte(&jpeg_buffer) == -1)
+ break;
+ if (get_word_be(&jpeg_buffer, &height))
+ break;
+ if (get_word_be(&jpeg_buffer, &width))
+ break;
+ components = get_byte(&jpeg_buffer);
+ if (components == -1)
+ break;
+
+ if (components == 1) {
+ subsampling = 0x33;
+ } else {
+ skip(&jpeg_buffer, 1);
+ subsampling = get_byte(&jpeg_buffer);
+ skip(&jpeg_buffer, 1);
+ }
+ if (components > 3)
+ return false;
+ skip(&jpeg_buffer, components * 2);
+ notfound = 0;
+ break;
+
+ case DQT:
+ if (get_word_be(&jpeg_buffer, &word))
+ break;
+ length = (long)word - 2;
+ if (!length)
+ return false;
+ if (n_dqt >= S5P_JPEG_MAX_MARKER)
+ return false;
+ dqt[n_dqt] = jpeg_buffer.curr; /* after 0xffdb */
+ dqt_len[n_dqt++] = length;
+ skip(&jpeg_buffer, length);
+ break;
+
+ case DHT:
+ if (get_word_be(&jpeg_buffer, &word))
+ break;
+ length = (long)word - 2;
+ if (!length)
+ return false;
+ if (n_dht >= S5P_JPEG_MAX_MARKER)
+ return false;
+ dht[n_dht] = jpeg_buffer.curr; /* after 0xffc4 */
+ dht_len[n_dht++] = length;
+ skip(&jpeg_buffer, length);
+ break;
+
+ case SOS:
+ sos = jpeg_buffer.curr - 2; /* 0xffda */
+ break;
+
+ /* skip payload-less markers */
+ case RST ... RST + 7:
+ case SOI:
+ case EOI:
+ case TEM:
+ break;
+
+ /* skip uninteresting payload markers */
+ default:
+ if (get_word_be(&jpeg_buffer, &word))
+ break;
+ length = (long)word - 2;
+ skip(&jpeg_buffer, length);
+ break;
+ }
+ }
+
+ if (notfound || !sos || !s5p_jpeg_subsampling_decode(ctx, subsampling))
+ return false;
+
+ result->w = width;
+ result->h = height;
+ result->sos = sos;
+ result->dht.n = n_dht;
+ while (n_dht--) {
+ result->dht.marker[n_dht] = dht[n_dht];
+ result->dht.len[n_dht] = dht_len[n_dht];
+ }
+ result->dqt.n = n_dqt;
+ while (n_dqt--) {
+ result->dqt.marker[n_dqt] = dqt[n_dqt];
+ result->dqt.len[n_dqt] = dqt_len[n_dqt];
+ }
+ result->sof = sof;
+ result->sof_len = sof_len;
+ result->components = components;
+
+ return true;
+}
+
+static int s5p_jpeg_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ strlcpy(cap->driver, S5P_JPEG_M2M_NAME,
+ sizeof(cap->driver));
+ strlcpy(cap->card, S5P_JPEG_M2M_NAME " encoder",
+ sizeof(cap->card));
+ } else {
+ strlcpy(cap->driver, S5P_JPEG_M2M_NAME,
+ sizeof(cap->driver));
+ strlcpy(cap->card, S5P_JPEG_M2M_NAME " decoder",
+ sizeof(cap->card));
+ }
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(ctx->jpeg->dev));
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int enum_fmt(struct s5p_jpeg_ctx *ctx,
+ struct s5p_jpeg_fmt *sjpeg_formats, int n,
+ struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num = 0;
+ unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
+
+ for (i = 0; i < n; ++i) {
+ if (sjpeg_formats[i].flags & type &&
+ sjpeg_formats[i].flags & fmt_ver_flag) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index
+ */
+ ++num;
+ }
+ }
+
+ /* Format not found */
+ if (i >= n)
+ return -EINVAL;
+
+ strlcpy(f->description, sjpeg_formats[i].name, sizeof(f->description));
+ f->pixelformat = sjpeg_formats[i].fourcc;
+
+ return 0;
+}
+
+static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_ENC_CAPTURE);
+
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_CAPTURE);
+}
+
+static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_ENC_OUTPUT);
+
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_OUTPUT);
+}
+
+static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return &ctx->out_q;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return &ctx->cap_q;
+
+ return NULL;
+}
+
+static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct s5p_jpeg_q_data *q_data = NULL;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct s5p_jpeg_ctx *ct = fh_to_ctx(priv);
+
+ vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ ct->mode == S5P_JPEG_DECODE && !ct->hdr_parsed)
+ return -EINVAL;
+ q_data = get_q_data(ct, f->type);
+ BUG_ON(q_data == NULL);
+
+ pix->width = q_data->w;
+ pix->height = q_data->h;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->bytesperline = 0;
+ if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
+ u32 bpl = q_data->w;
+
+ if (q_data->fmt->colplanes == 1)
+ bpl = (bpl * q_data->fmt->depth) >> 3;
+ pix->bytesperline = bpl;
+ }
+ pix->sizeimage = q_data->size;
+
+ return 0;
+}
+
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
+ u32 pixelformat, unsigned int fmt_type)
+{
+ unsigned int k, fmt_flag;
+
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
+ SJPEG_FMT_FLAG_ENC_OUTPUT :
+ SJPEG_FMT_FLAG_ENC_CAPTURE;
+ else
+ fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
+ SJPEG_FMT_FLAG_DEC_OUTPUT :
+ SJPEG_FMT_FLAG_DEC_CAPTURE;
+
+ for (k = 0; k < ARRAY_SIZE(sjpeg_formats); k++) {
+ struct s5p_jpeg_fmt *fmt = &sjpeg_formats[k];
+
+ if (fmt->fourcc == pixelformat &&
+ fmt->flags & fmt_flag &&
+ fmt->flags & ctx->jpeg->variant->fmt_ver_flag) {
+ return fmt;
+ }
+ }
+
+ return NULL;
+}
+
+static void jpeg_bound_align_image(struct s5p_jpeg_ctx *ctx,
+ u32 *w, unsigned int wmin, unsigned int wmax,
+ unsigned int walign,
+ u32 *h, unsigned int hmin, unsigned int hmax,
+ unsigned int halign)
+{
+ int width, height, w_step, h_step;
+
+ width = *w;
+ height = *h;
+
+ w_step = 1 << walign;
+ h_step = 1 << halign;
+
+ if (ctx->jpeg->variant->hw3250_compat) {
+ /*
+ * Rightmost and bottommost pixels are cropped by the
+ * Exynos3250/compatible JPEG IP for RGB formats, for the
+ * specific width and height values respectively. This
+ * assignment will result in v4l_bound_align_image returning
+ * dimensions reduced by 1 for the aforementioned cases.
+ */
+ if (w_step == 4 && ((width & 3) == 1)) {
+ wmax = width;
+ hmax = height;
+ }
+ }
+
+ v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
+
+ if (*w < width && (*w + w_step) < wmax)
+ *w += w_step;
+ if (*h < height && (*h + h_step) < hmax)
+ *h += h_step;
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
+ struct s5p_jpeg_ctx *ctx, int q_type)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+ else if (pix->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported
+ */
+ if (q_type == FMT_TYPE_OUTPUT)
+ jpeg_bound_align_image(ctx, &pix->width, S5P_JPEG_MIN_WIDTH,
+ S5P_JPEG_MAX_WIDTH, 0,
+ &pix->height, S5P_JPEG_MIN_HEIGHT,
+ S5P_JPEG_MAX_HEIGHT, 0);
+ else
+ jpeg_bound_align_image(ctx, &pix->width, S5P_JPEG_MIN_WIDTH,
+ S5P_JPEG_MAX_WIDTH, fmt->h_align,
+ &pix->height, S5P_JPEG_MIN_HEIGHT,
+ S5P_JPEG_MAX_HEIGHT, fmt->v_align);
+
+ if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
+ if (pix->sizeimage <= 0)
+ pix->sizeimage = PAGE_SIZE;
+ pix->bytesperline = 0;
+ } else {
+ u32 bpl = pix->bytesperline;
+
+ if (fmt->colplanes > 1 && bpl < pix->width)
+ bpl = pix->width; /* planar */
+
+ if (fmt->colplanes == 1 && /* packed */
+ (bpl << 3) / fmt->depth < pix->width)
+ bpl = (pix->width * fmt->depth) >> 3;
+
+ pix->bytesperline = bpl;
+ pix->sizeimage = (pix->width * pix->height * fmt->depth) >> 3;
+ }
+
+ return 0;
+}
+
+static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct s5p_jpeg_fmt *fmt;
+ int ret;
+
+ fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
+ FMT_TYPE_CAPTURE);
+ if (!fmt) {
+ v4l2_err(&ctx->jpeg->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ if (!ctx->jpeg->variant->hw_ex4_compat || ctx->mode != S5P_JPEG_DECODE)
+ goto exit;
+
+ /*
+ * The exynos4x12 device requires resulting YUV image
+ * subsampling not to be lower than the input jpeg subsampling.
+ * If this requirement is not met then downgrade the requested
+ * capture format to the one with subsampling equal to the input jpeg.
+ */
+ if ((fmt->flags & SJPEG_FMT_NON_RGB) &&
+ (fmt->subsampling < ctx->subsampling)) {
+ ret = s5p_jpeg_adjust_fourcc_to_subsampling(ctx->subsampling,
+ fmt->fourcc,
+ &pix->pixelformat,
+ ctx);
+ if (ret < 0)
+ pix->pixelformat = V4L2_PIX_FMT_GREY;
+
+ fmt = s5p_jpeg_find_format(ctx, pix->pixelformat,
+ FMT_TYPE_CAPTURE);
+ }
+
+ /*
+ * Decompression of a JPEG file with 4:2:0 subsampling and odd
+ * width to the YUV 4:2:0 compliant formats produces a raw image
+ * with broken luma component. Adjust capture format to RGB565
+ * in such a case.
+ */
+ if (ctx->subsampling == V4L2_JPEG_CHROMA_SUBSAMPLING_420 &&
+ (ctx->out_q.w & 1) &&
+ (pix->pixelformat == V4L2_PIX_FMT_NV12 ||
+ pix->pixelformat == V4L2_PIX_FMT_NV21 ||
+ pix->pixelformat == V4L2_PIX_FMT_YUV420)) {
+ pix->pixelformat = V4L2_PIX_FMT_RGB565;
+ fmt = s5p_jpeg_find_format(ctx, pix->pixelformat,
+ FMT_TYPE_CAPTURE);
+ }
+
+exit:
+ return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_CAPTURE);
+}
+
+static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_fmt *fmt;
+
+ fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
+ FMT_TYPE_OUTPUT);
+ if (!fmt) {
+ v4l2_err(&ctx->jpeg->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_OUTPUT);
+}
+
+static int exynos4_jpeg_get_output_buffer_size(struct s5p_jpeg_ctx *ctx,
+ struct v4l2_format *f,
+ int fmt_depth)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 pix_fmt = f->fmt.pix.pixelformat;
+ int w = pix->width, h = pix->height, wh_align;
+ int padding = 0;
+
+ if (pix_fmt == V4L2_PIX_FMT_RGB32 ||
+ pix_fmt == V4L2_PIX_FMT_RGB565 ||
+ pix_fmt == V4L2_PIX_FMT_NV24 ||
+ pix_fmt == V4L2_PIX_FMT_NV42 ||
+ pix_fmt == V4L2_PIX_FMT_NV12 ||
+ pix_fmt == V4L2_PIX_FMT_NV21 ||
+ pix_fmt == V4L2_PIX_FMT_YUV420)
+ wh_align = 4;
+ else
+ wh_align = 1;
+
+ jpeg_bound_align_image(ctx, &w, S5P_JPEG_MIN_WIDTH,
+ S5P_JPEG_MAX_WIDTH, wh_align,
+ &h, S5P_JPEG_MIN_HEIGHT,
+ S5P_JPEG_MAX_HEIGHT, wh_align);
+
+ if (ctx->jpeg->variant->version == SJPEG_EXYNOS4)
+ padding = PAGE_SIZE;
+
+ return (w * h * fmt_depth >> 3) + padding;
+}
+
+static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx,
+ struct v4l2_rect *r);
+
+static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct s5p_jpeg_q_data *q_data = NULL;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_ctrl *ctrl_subs;
+ struct v4l2_rect scale_rect;
+ unsigned int f_type;
+
+ vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ct, f->type);
+ BUG_ON(q_data == NULL);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ct->jpeg->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ f_type = V4L2_TYPE_IS_OUTPUT(f->type) ?
+ FMT_TYPE_OUTPUT : FMT_TYPE_CAPTURE;
+
+ q_data->fmt = s5p_jpeg_find_format(ct, pix->pixelformat, f_type);
+ if (ct->mode == S5P_JPEG_ENCODE ||
+ (ct->mode == S5P_JPEG_DECODE &&
+ q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG)) {
+ q_data->w = pix->width;
+ q_data->h = pix->height;
+ }
+ if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
+ /*
+ * During encoding Exynos4x12 SoCs access wider memory area
+ * than it results from Image_x and Image_y values written to
+ * the JPEG_IMAGE_SIZE register. In order to avoid sysmmu
+ * page fault calculate proper buffer size in such a case.
+ */
+ if (ct->jpeg->variant->hw_ex4_compat &&
+ f_type == FMT_TYPE_OUTPUT && ct->mode == S5P_JPEG_ENCODE)
+ q_data->size = exynos4_jpeg_get_output_buffer_size(ct,
+ f,
+ q_data->fmt->depth);
+ else
+ q_data->size = q_data->w * q_data->h *
+ q_data->fmt->depth >> 3;
+ } else {
+ q_data->size = pix->sizeimage;
+ }
+
+ if (f_type == FMT_TYPE_OUTPUT) {
+ ctrl_subs = v4l2_ctrl_find(&ct->ctrl_handler,
+ V4L2_CID_JPEG_CHROMA_SUBSAMPLING);
+ if (ctrl_subs)
+ v4l2_ctrl_s_ctrl(ctrl_subs, q_data->fmt->subsampling);
+ ct->crop_altered = false;
+ }
+
+ /*
+ * For decoding init crop_rect with capture buffer dimmensions which
+ * contain aligned dimensions of the input JPEG image and do it only
+ * if crop rectangle hasn't been altered by the user space e.g. with
+ * S_SELECTION ioctl. For encoding assign output buffer dimensions.
+ */
+ if (!ct->crop_altered &&
+ ((ct->mode == S5P_JPEG_DECODE && f_type == FMT_TYPE_CAPTURE) ||
+ (ct->mode == S5P_JPEG_ENCODE && f_type == FMT_TYPE_OUTPUT))) {
+ ct->crop_rect.width = pix->width;
+ ct->crop_rect.height = pix->height;
+ }
+
+ /*
+ * Prevent downscaling to YUV420 format by more than 2
+ * for Exynos3250/compatible SoC as it produces broken raw image
+ * in such cases.
+ */
+ if (ct->mode == S5P_JPEG_DECODE &&
+ f_type == FMT_TYPE_CAPTURE &&
+ ct->jpeg->variant->hw3250_compat &&
+ pix->pixelformat == V4L2_PIX_FMT_YUV420 &&
+ ct->scale_factor > 2) {
+ scale_rect.width = ct->out_q.w / 2;
+ scale_rect.height = ct->out_q.h / 2;
+ exynos3250_jpeg_try_downscale(ct, &scale_rect);
+ }
+
+ return 0;
+}
+
+static int s5p_jpeg_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = s5p_jpeg_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
+}
+
+static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = s5p_jpeg_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
+}
+
+static int s5p_jpeg_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
+ return v4l2_src_change_event_subscribe(fh, sub);
+
+ return -EINVAL;
+}
+
+static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx,
+ struct v4l2_rect *r)
+{
+ int w_ratio, h_ratio, scale_factor, cur_ratio, i;
+
+ w_ratio = ctx->out_q.w / r->width;
+ h_ratio = ctx->out_q.h / r->height;
+
+ scale_factor = w_ratio > h_ratio ? w_ratio : h_ratio;
+ scale_factor = clamp_val(scale_factor, 1, 8);
+
+ /* Align scale ratio to the nearest power of 2 */
+ for (i = 0; i <= 3; ++i) {
+ cur_ratio = 1 << i;
+ if (scale_factor <= cur_ratio) {
+ ctx->scale_factor = cur_ratio;
+ break;
+ }
+ }
+
+ r->width = round_down(ctx->out_q.w / ctx->scale_factor, 2);
+ r->height = round_down(ctx->out_q.h / ctx->scale_factor, 2);
+
+ ctx->crop_rect.width = r->width;
+ ctx->crop_rect.height = r->height;
+ ctx->crop_rect.left = 0;
+ ctx->crop_rect.top = 0;
+
+ ctx->crop_altered = true;
+
+ return 0;
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int exynos3250_jpeg_try_crop(struct s5p_jpeg_ctx *ctx,
+ struct v4l2_rect *r)
+{
+ struct v4l2_rect base_rect;
+ int w_step, h_step;
+
+ switch (ctx->cap_q.fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ w_step = 1;
+ h_step = 2;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ w_step = 2;
+ h_step = 2;
+ break;
+ default:
+ w_step = 1;
+ h_step = 1;
+ break;
+ }
+
+ base_rect.top = 0;
+ base_rect.left = 0;
+ base_rect.width = ctx->out_q.w;
+ base_rect.height = ctx->out_q.h;
+
+ r->width = round_down(r->width, w_step);
+ r->height = round_down(r->height, h_step);
+ r->left = round_down(r->left, 2);
+ r->top = round_down(r->top, 2);
+
+ if (!enclosed_rectangle(r, &base_rect))
+ return -EINVAL;
+
+ ctx->crop_rect.left = r->left;
+ ctx->crop_rect.top = r->top;
+ ctx->crop_rect.width = r->width;
+ ctx->crop_rect.height = r->height;
+
+ ctx->crop_altered = true;
+
+ return 0;
+}
+
+/*
+ * V4L2 controls
+ */
+
+static int s5p_jpeg_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ /* For JPEG blob active == default == bounds */
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.width = ctx->out_q.w;
+ s->r.height = ctx->out_q.h;
+ s->r.left = 0;
+ s->r.top = 0;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r.width = ctx->crop_rect.width;
+ s->r.height = ctx->crop_rect.height;
+ s->r.left = ctx->crop_rect.left;
+ s->r.top = ctx->crop_rect.top;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * V4L2 controls
+ */
+static int s5p_jpeg_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
+ struct v4l2_rect *rect = &s->r;
+ int ret = -EINVAL;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (s->target == V4L2_SEL_TGT_COMPOSE) {
+ if (ctx->mode != S5P_JPEG_DECODE)
+ return -EINVAL;
+ if (ctx->jpeg->variant->hw3250_compat)
+ ret = exynos3250_jpeg_try_downscale(ctx, rect);
+ } else if (s->target == V4L2_SEL_TGT_CROP) {
+ if (ctx->mode != S5P_JPEG_ENCODE)
+ return -EINVAL;
+ if (ctx->jpeg->variant->hw3250_compat)
+ ret = exynos3250_jpeg_try_crop(ctx, rect);
+ }
+
+ return ret;
+}
+
+static int s5p_jpeg_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ unsigned long flags;
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ spin_lock_irqsave(&jpeg->slock, flags);
+ ctrl->val = s5p_jpeg_to_user_subsampling(ctx);
+ spin_unlock_irqrestore(&jpeg->slock, flags);
+ break;
+ }
+
+ return 0;
+}
+
+static int s5p_jpeg_adjust_subs_ctrl(struct s5p_jpeg_ctx *ctx, int *ctrl_val)
+{
+ switch (ctx->jpeg->variant->version) {
+ case SJPEG_S5P:
+ return 0;
+ case SJPEG_EXYNOS3250:
+ case SJPEG_EXYNOS5420:
+ /*
+ * The exynos3250/compatible device can produce JPEG image only
+ * of 4:4:4 subsampling when given RGB32 source image.
+ */
+ if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB32)
+ *ctrl_val = 0;
+ break;
+ case SJPEG_EXYNOS4:
+ /*
+ * The exynos4x12 device requires input raw image fourcc
+ * to be V4L2_PIX_FMT_GREY if gray jpeg format
+ * is to be set.
+ */
+ if (ctx->out_q.fmt->fourcc != V4L2_PIX_FMT_GREY &&
+ *ctrl_val == V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY)
+ return -EINVAL;
+ break;
+ }
+
+ /*
+ * The exynos4x12 and exynos3250/compatible devices require resulting
+ * jpeg subsampling not to be lower than the input raw image
+ * subsampling.
+ */
+ if (ctx->out_q.fmt->subsampling > *ctrl_val)
+ *ctrl_val = ctx->out_q.fmt->subsampling;
+
+ return 0;
+}
+
+static int s5p_jpeg_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+ if (ctrl->id == V4L2_CID_JPEG_CHROMA_SUBSAMPLING)
+ ret = s5p_jpeg_adjust_subs_ctrl(ctx, &ctrl->val);
+
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+ return ret;
+}
+
+static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ctx->compr_quality = ctrl->val;
+ break;
+ case V4L2_CID_JPEG_RESTART_INTERVAL:
+ ctx->restart_interval = ctrl->val;
+ break;
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ ctx->subsampling = ctrl->val;
+ break;
+ }
+
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops s5p_jpeg_ctrl_ops = {
+ .g_volatile_ctrl = s5p_jpeg_g_volatile_ctrl,
+ .try_ctrl = s5p_jpeg_try_ctrl,
+ .s_ctrl = s5p_jpeg_s_ctrl,
+};
+
+static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
+{
+ unsigned int mask = ~0x27; /* 444, 422, 420, GRAY */
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ 0, 3, 1, S5P_JPEG_COMPR_QUAL_WORST);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
+ V4L2_CID_JPEG_RESTART_INTERVAL,
+ 0, 0xffff, 1, 0);
+ if (ctx->jpeg->variant->version == SJPEG_S5P)
+ mask = ~0x06; /* 422, 420 */
+ }
+
+ ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
+ V4L2_CID_JPEG_CHROMA_SUBSAMPLING,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY, mask,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422);
+
+ if (ctx->ctrl_handler.error) {
+ ret = ctx->ctrl_handler.error;
+ goto error_free;
+ }
+
+ if (ctx->mode == S5P_JPEG_DECODE)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY;
+
+ ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (ret < 0)
+ goto error_free;
+
+ return ret;
+
+error_free:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = {
+ .vidioc_querycap = s5p_jpeg_querycap,
+
+ .vidioc_enum_fmt_vid_cap = s5p_jpeg_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = s5p_jpeg_enum_fmt_vid_out,
+
+ .vidioc_g_fmt_vid_cap = s5p_jpeg_g_fmt,
+ .vidioc_g_fmt_vid_out = s5p_jpeg_g_fmt,
+
+ .vidioc_try_fmt_vid_cap = s5p_jpeg_try_fmt_vid_cap,
+ .vidioc_try_fmt_vid_out = s5p_jpeg_try_fmt_vid_out,
+
+ .vidioc_s_fmt_vid_cap = s5p_jpeg_s_fmt_vid_cap,
+ .vidioc_s_fmt_vid_out = s5p_jpeg_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = s5p_jpeg_g_selection,
+ .vidioc_s_selection = s5p_jpeg_s_selection,
+
+ .vidioc_subscribe_event = s5p_jpeg_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * ============================================================================
+ * mem2mem callbacks
+ * ============================================================================
+ */
+
+static void s5p_jpeg_device_run(void *priv)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ unsigned long src_addr, dst_addr, flags;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ s5p_jpeg_reset(jpeg->regs);
+ s5p_jpeg_poweron(jpeg->regs);
+ s5p_jpeg_proc_mode(jpeg->regs, ctx->mode);
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565)
+ s5p_jpeg_input_raw_mode(jpeg->regs,
+ S5P_JPEG_RAW_IN_565);
+ else
+ s5p_jpeg_input_raw_mode(jpeg->regs,
+ S5P_JPEG_RAW_IN_422);
+ s5p_jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
+ s5p_jpeg_dri(jpeg->regs, ctx->restart_interval);
+ s5p_jpeg_x(jpeg->regs, ctx->out_q.w);
+ s5p_jpeg_y(jpeg->regs, ctx->out_q.h);
+ s5p_jpeg_imgadr(jpeg->regs, src_addr);
+ s5p_jpeg_jpgadr(jpeg->regs, dst_addr);
+
+ /* ultimately comes from sizeimage from userspace */
+ s5p_jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
+
+ /* JPEG RGB to YCbCr conversion matrix */
+ s5p_jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
+ s5p_jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
+ s5p_jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
+ s5p_jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
+ s5p_jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
+ s5p_jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
+ s5p_jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
+ s5p_jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
+ s5p_jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
+
+ /*
+ * JPEG IP allows storing 4 quantization tables
+ * We fill table 0 for luma and table 1 for chroma
+ */
+ s5p_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+ s5p_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+ /* use table 0 for Y */
+ s5p_jpeg_qtbl(jpeg->regs, 1, 0);
+ /* use table 1 for Cb and Cr*/
+ s5p_jpeg_qtbl(jpeg->regs, 2, 1);
+ s5p_jpeg_qtbl(jpeg->regs, 3, 1);
+
+ /* Y, Cb, Cr use Huffman table 0 */
+ s5p_jpeg_htbl_ac(jpeg->regs, 1);
+ s5p_jpeg_htbl_dc(jpeg->regs, 1);
+ s5p_jpeg_htbl_ac(jpeg->regs, 2);
+ s5p_jpeg_htbl_dc(jpeg->regs, 2);
+ s5p_jpeg_htbl_ac(jpeg->regs, 3);
+ s5p_jpeg_htbl_dc(jpeg->regs, 3);
+ } else { /* S5P_JPEG_DECODE */
+ s5p_jpeg_rst_int_enable(jpeg->regs, true);
+ s5p_jpeg_data_num_int_enable(jpeg->regs, true);
+ s5p_jpeg_final_mcu_num_int_enable(jpeg->regs, true);
+ if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV)
+ s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
+ else
+ s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420);
+ s5p_jpeg_jpgadr(jpeg->regs, src_addr);
+ s5p_jpeg_imgadr(jpeg->regs, dst_addr);
+ }
+
+ s5p_jpeg_start(jpeg->regs);
+
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+}
+
+static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct s5p_jpeg_fmt *fmt;
+ struct vb2_v4l2_buffer *vb;
+ struct s5p_jpeg_addr jpeg_addr = {};
+ u32 pix_size, padding_bytes = 0;
+
+ jpeg_addr.cb = 0;
+ jpeg_addr.cr = 0;
+
+ pix_size = ctx->cap_q.w * ctx->cap_q.h;
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ fmt = ctx->out_q.fmt;
+ if (ctx->out_q.w % 2 && fmt->h_align > 0)
+ padding_bytes = ctx->out_q.h;
+ } else {
+ fmt = ctx->cap_q.fmt;
+ vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ }
+
+ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
+
+ if (fmt->colplanes == 2) {
+ jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
+ } else if (fmt->colplanes == 3) {
+ jpeg_addr.cb = jpeg_addr.y + pix_size;
+ if (fmt->fourcc == V4L2_PIX_FMT_YUV420)
+ jpeg_addr.cr = jpeg_addr.cb + pix_size / 4;
+ else
+ jpeg_addr.cr = jpeg_addr.cb + pix_size / 2;
+ }
+
+ exynos4_jpeg_set_frame_buf_address(jpeg->regs, &jpeg_addr);
+}
+
+static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_v4l2_buffer *vb;
+ unsigned int jpeg_addr = 0;
+
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+
+ jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
+ if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
+ ctx->mode == S5P_JPEG_DECODE)
+ jpeg_addr += ctx->out_q.sos;
+ exynos4_jpeg_set_stream_buf_address(jpeg->regs, jpeg_addr);
+}
+
+static inline void exynos4_jpeg_set_img_fmt(void __iomem *base,
+ unsigned int img_fmt)
+{
+ __exynos4_jpeg_set_img_fmt(base, img_fmt, SJPEG_EXYNOS4);
+}
+
+static inline void exynos5433_jpeg_set_img_fmt(void __iomem *base,
+ unsigned int img_fmt)
+{
+ __exynos4_jpeg_set_img_fmt(base, img_fmt, SJPEG_EXYNOS5433);
+}
+
+static inline void exynos4_jpeg_set_enc_out_fmt(void __iomem *base,
+ unsigned int out_fmt)
+{
+ __exynos4_jpeg_set_enc_out_fmt(base, out_fmt, SJPEG_EXYNOS4);
+}
+
+static inline void exynos5433_jpeg_set_enc_out_fmt(void __iomem *base,
+ unsigned int out_fmt)
+{
+ __exynos4_jpeg_set_enc_out_fmt(base, out_fmt, SJPEG_EXYNOS5433);
+}
+
+static void exynos4_jpeg_device_run(void *priv)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ unsigned int bitstream_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&jpeg->slock, flags);
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ exynos4_jpeg_sw_reset(jpeg->regs);
+ exynos4_jpeg_set_interrupt(jpeg->regs, jpeg->variant->version);
+ exynos4_jpeg_set_huf_table_enable(jpeg->regs, 1);
+
+ exynos4_jpeg_set_huff_tbl(jpeg->regs);
+
+ /*
+ * JPEG IP allows storing 4 quantization tables
+ * We fill table 0 for luma and table 1 for chroma
+ */
+ exynos4_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+ exynos4_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+
+ exynos4_jpeg_set_encode_tbl_select(jpeg->regs,
+ ctx->compr_quality);
+ exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w,
+ ctx->cap_q.h);
+
+ if (ctx->jpeg->variant->version == SJPEG_EXYNOS4) {
+ exynos4_jpeg_set_enc_out_fmt(jpeg->regs,
+ ctx->subsampling);
+ exynos4_jpeg_set_img_fmt(jpeg->regs,
+ ctx->out_q.fmt->fourcc);
+ } else {
+ exynos5433_jpeg_set_enc_out_fmt(jpeg->regs,
+ ctx->subsampling);
+ exynos5433_jpeg_set_img_fmt(jpeg->regs,
+ ctx->out_q.fmt->fourcc);
+ }
+ exynos4_jpeg_set_img_addr(ctx);
+ exynos4_jpeg_set_jpeg_addr(ctx);
+ exynos4_jpeg_set_encode_hoff_cnt(jpeg->regs,
+ ctx->out_q.fmt->fourcc);
+ } else {
+ exynos4_jpeg_sw_reset(jpeg->regs);
+ exynos4_jpeg_set_interrupt(jpeg->regs,
+ jpeg->variant->version);
+ exynos4_jpeg_set_img_addr(ctx);
+ exynos4_jpeg_set_jpeg_addr(ctx);
+
+ if (jpeg->variant->version == SJPEG_EXYNOS5433) {
+ exynos4_jpeg_parse_huff_tbl(ctx);
+ exynos4_jpeg_parse_decode_h_tbl(ctx);
+
+ exynos4_jpeg_parse_q_tbl(ctx);
+ exynos4_jpeg_parse_decode_q_tbl(ctx);
+
+ exynos4_jpeg_set_huf_table_enable(jpeg->regs, 1);
+
+ exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w,
+ ctx->cap_q.h);
+ exynos5433_jpeg_set_enc_out_fmt(jpeg->regs,
+ ctx->subsampling);
+ exynos5433_jpeg_set_img_fmt(jpeg->regs,
+ ctx->cap_q.fmt->fourcc);
+ bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 16);
+ } else {
+ exynos4_jpeg_set_img_fmt(jpeg->regs,
+ ctx->cap_q.fmt->fourcc);
+ bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 32);
+ }
+
+ exynos4_jpeg_set_dec_bitstream_size(jpeg->regs, bitstream_size);
+ }
+
+ exynos4_jpeg_set_sys_int_enable(jpeg->regs, 1);
+ exynos4_jpeg_set_enc_dec_mode(jpeg->regs, ctx->mode);
+
+ spin_unlock_irqrestore(&jpeg->slock, flags);
+}
+
+static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct s5p_jpeg_fmt *fmt;
+ struct vb2_v4l2_buffer *vb;
+ struct s5p_jpeg_addr jpeg_addr = {};
+ u32 pix_size;
+
+ pix_size = ctx->cap_q.w * ctx->cap_q.h;
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ fmt = ctx->out_q.fmt;
+ } else {
+ vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ fmt = ctx->cap_q.fmt;
+ }
+
+ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
+
+ if (fmt->colplanes == 2) {
+ jpeg_addr.cb = jpeg_addr.y + pix_size;
+ } else if (fmt->colplanes == 3) {
+ jpeg_addr.cb = jpeg_addr.y + pix_size;
+ if (fmt->fourcc == V4L2_PIX_FMT_YUV420)
+ jpeg_addr.cr = jpeg_addr.cb + pix_size / 4;
+ else
+ jpeg_addr.cr = jpeg_addr.cb + pix_size / 2;
+ }
+
+ exynos3250_jpeg_imgadr(jpeg->regs, &jpeg_addr);
+}
+
+static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_v4l2_buffer *vb;
+ unsigned int jpeg_addr = 0;
+
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+
+ jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
+ exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr);
+}
+
+static void exynos3250_jpeg_device_run(void *priv)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+ exynos3250_jpeg_reset(jpeg->regs);
+ exynos3250_jpeg_set_dma_num(jpeg->regs);
+ exynos3250_jpeg_poweron(jpeg->regs);
+ exynos3250_jpeg_clk_set(jpeg->regs);
+ exynos3250_jpeg_proc_mode(jpeg->regs, ctx->mode);
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ exynos3250_jpeg_input_raw_fmt(jpeg->regs,
+ ctx->out_q.fmt->fourcc);
+ exynos3250_jpeg_dri(jpeg->regs, ctx->restart_interval);
+
+ /*
+ * JPEG IP allows storing 4 quantization tables
+ * We fill table 0 for luma and table 1 for chroma
+ */
+ s5p_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+ s5p_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+ /* use table 0 for Y */
+ exynos3250_jpeg_qtbl(jpeg->regs, 1, 0);
+ /* use table 1 for Cb and Cr*/
+ exynos3250_jpeg_qtbl(jpeg->regs, 2, 1);
+ exynos3250_jpeg_qtbl(jpeg->regs, 3, 1);
+
+ /*
+ * Some SoCs require setting Huffman tables before each run
+ */
+ if (jpeg->variant->htbl_reinit) {
+ s5p_jpeg_set_hdctbl(jpeg->regs);
+ s5p_jpeg_set_hdctblg(jpeg->regs);
+ s5p_jpeg_set_hactbl(jpeg->regs);
+ s5p_jpeg_set_hactblg(jpeg->regs);
+ }
+
+ /* Y, Cb, Cr use Huffman table 0 */
+ exynos3250_jpeg_htbl_ac(jpeg->regs, 1);
+ exynos3250_jpeg_htbl_dc(jpeg->regs, 1);
+ exynos3250_jpeg_htbl_ac(jpeg->regs, 2);
+ exynos3250_jpeg_htbl_dc(jpeg->regs, 2);
+ exynos3250_jpeg_htbl_ac(jpeg->regs, 3);
+ exynos3250_jpeg_htbl_dc(jpeg->regs, 3);
+
+ exynos3250_jpeg_set_x(jpeg->regs, ctx->crop_rect.width);
+ exynos3250_jpeg_set_y(jpeg->regs, ctx->crop_rect.height);
+ exynos3250_jpeg_stride(jpeg->regs, ctx->out_q.fmt->fourcc,
+ ctx->out_q.w);
+ exynos3250_jpeg_offset(jpeg->regs, ctx->crop_rect.left,
+ ctx->crop_rect.top);
+ exynos3250_jpeg_set_img_addr(ctx);
+ exynos3250_jpeg_set_jpeg_addr(ctx);
+ exynos3250_jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
+
+ /* ultimately comes from sizeimage from userspace */
+ exynos3250_jpeg_enc_stream_bound(jpeg->regs, ctx->cap_q.size);
+
+ if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565 ||
+ ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565X ||
+ ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB32)
+ exynos3250_jpeg_set_y16(jpeg->regs, true);
+ } else {
+ exynos3250_jpeg_set_img_addr(ctx);
+ exynos3250_jpeg_set_jpeg_addr(ctx);
+ exynos3250_jpeg_stride(jpeg->regs, ctx->cap_q.fmt->fourcc,
+ ctx->cap_q.w);
+ exynos3250_jpeg_offset(jpeg->regs, 0, 0);
+ exynos3250_jpeg_dec_scaling_ratio(jpeg->regs,
+ ctx->scale_factor);
+ exynos3250_jpeg_dec_stream_size(jpeg->regs, ctx->out_q.size);
+ exynos3250_jpeg_output_raw_fmt(jpeg->regs,
+ ctx->cap_q.fmt->fourcc);
+ }
+
+ exynos3250_jpeg_interrupts_enable(jpeg->regs);
+
+ /* JPEG RGB to YCbCr conversion matrix */
+ exynos3250_jpeg_coef(jpeg->regs, ctx->mode);
+
+ exynos3250_jpeg_set_timer(jpeg->regs, EXYNOS3250_IRQ_TIMEOUT);
+ jpeg->irq_status = 0;
+ exynos3250_jpeg_start(jpeg->regs);
+
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+}
+
+static int s5p_jpeg_job_ready(void *priv)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ if (ctx->mode == S5P_JPEG_DECODE) {
+ /*
+ * We have only one input buffer and one output buffer. If there
+ * is a resolution change event, no need to continue decoding.
+ */
+ if (ctx->state == JPEGCTX_RESOLUTION_CHANGE)
+ return 0;
+
+ return ctx->hdr_parsed;
+ }
+
+ return 1;
+}
+
+static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
+ .device_run = s5p_jpeg_device_run,
+ .job_ready = s5p_jpeg_job_ready,
+};
+
+static struct v4l2_m2m_ops exynos3250_jpeg_m2m_ops = {
+ .device_run = exynos3250_jpeg_device_run,
+ .job_ready = s5p_jpeg_job_ready,
+};
+
+static struct v4l2_m2m_ops exynos4_jpeg_m2m_ops = {
+ .device_run = exynos4_jpeg_device_run,
+ .job_ready = s5p_jpeg_job_ready,
+};
+
+/*
+ * ============================================================================
+ * Queue operations
+ * ============================================================================
+ */
+
+static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+ struct s5p_jpeg_q_data *q_data = NULL;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(ctx, vq->type);
+ BUG_ON(q_data == NULL);
+
+ size = q_data->size;
+
+ /*
+ * header is parsed during decoding and parsed information stored
+ * in the context so we do not allow another buffer to overwrite it
+ */
+ if (ctx->mode == S5P_JPEG_DECODE)
+ count = 1;
+
+ *nbuffers = count;
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int s5p_jpeg_buf_prepare(struct vb2_buffer *vb)
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct s5p_jpeg_q_data *q_data = NULL;
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ BUG_ON(q_data == NULL);
+
+ if (vb2_plane_size(vb, 0) < q_data->size) {
+ pr_err("%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, q_data->size);
+
+ return 0;
+}
+
+static void s5p_jpeg_set_capture_queue_data(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg_q_data *q_data = &ctx->cap_q;
+
+ q_data->w = ctx->out_q.w;
+ q_data->h = ctx->out_q.h;
+
+ /*
+ * This call to jpeg_bound_align_image() takes care of width and
+ * height values alignment when user space calls the QBUF of
+ * OUTPUT buffer after the S_FMT of CAPTURE buffer.
+ * Please note that on Exynos4x12 SoCs, resigning from executing
+ * S_FMT on capture buffer for each JPEG image can result in a
+ * hardware hangup if subsampling is lower than the one of input
+ * JPEG.
+ */
+ jpeg_bound_align_image(ctx, &q_data->w, S5P_JPEG_MIN_WIDTH,
+ S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align,
+ &q_data->h, S5P_JPEG_MIN_HEIGHT,
+ S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align);
+
+ q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3;
+}
+
+static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (ctx->mode == S5P_JPEG_DECODE &&
+ vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+ struct vb2_queue *dst_vq;
+ u32 ori_w;
+ u32 ori_h;
+
+ dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ ori_w = ctx->out_q.w;
+ ori_h = ctx->out_q.h;
+
+ ctx->hdr_parsed = s5p_jpeg_parse_hdr(&ctx->out_q,
+ (unsigned long)vb2_plane_vaddr(vb, 0),
+ min((unsigned long)ctx->out_q.size,
+ vb2_get_plane_payload(vb, 0)), ctx);
+ if (!ctx->hdr_parsed) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ /*
+ * If there is a resolution change event, only update capture
+ * queue when it is not streaming. Otherwise, update it in
+ * STREAMOFF. See s5p_jpeg_stop_streaming for detail.
+ */
+ if (ctx->out_q.w != ori_w || ctx->out_q.h != ori_h) {
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+ if (vb2_is_streaming(dst_vq))
+ ctx->state = JPEGCTX_RESOLUTION_CHANGE;
+ else
+ s5p_jpeg_set_capture_queue_data(ctx);
+ }
+ }
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+ int ret;
+
+ ret = pm_runtime_get_sync(ctx->jpeg->dev);
+
+ return ret > 0 ? 0 : ret;
+}
+
+static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+
+ /*
+ * STREAMOFF is an acknowledgment for resolution change event.
+ * Before STREAMOFF, we still have to return the old resolution and
+ * subsampling. Update capture queue when the stream is off.
+ */
+ if (ctx->state == JPEGCTX_RESOLUTION_CHANGE &&
+ q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ s5p_jpeg_set_capture_queue_data(ctx);
+ ctx->state = JPEGCTX_RUNNING;
+ }
+
+ pm_runtime_put(ctx->jpeg->dev);
+}
+
+static const struct vb2_ops s5p_jpeg_qops = {
+ .queue_setup = s5p_jpeg_queue_setup,
+ .buf_prepare = s5p_jpeg_buf_prepare,
+ .buf_queue = s5p_jpeg_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = s5p_jpeg_start_streaming,
+ .stop_streaming = s5p_jpeg_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &s5p_jpeg_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->jpeg->lock;
+ src_vq->dev = ctx->jpeg->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &s5p_jpeg_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->jpeg->lock;
+ dst_vq->dev = ctx->jpeg->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * ============================================================================
+ * ISR
+ * ============================================================================
+ */
+
+static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
+{
+ struct s5p_jpeg *jpeg = dev_id;
+ struct s5p_jpeg_ctx *curr_ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ unsigned long payload_size = 0;
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+ bool enc_jpeg_too_large = false;
+ bool timer_elapsed = false;
+ bool op_completed = false;
+
+ spin_lock(&jpeg->slock);
+
+ curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ if (curr_ctx->mode == S5P_JPEG_ENCODE)
+ enc_jpeg_too_large = s5p_jpeg_enc_stream_stat(jpeg->regs);
+ timer_elapsed = s5p_jpeg_timer_stat(jpeg->regs);
+ op_completed = s5p_jpeg_result_stat_ok(jpeg->regs);
+ if (curr_ctx->mode == S5P_JPEG_DECODE)
+ op_completed = op_completed &&
+ s5p_jpeg_stream_stat_ok(jpeg->regs);
+
+ if (enc_jpeg_too_large) {
+ state = VB2_BUF_STATE_ERROR;
+ s5p_jpeg_clear_enc_stream_stat(jpeg->regs);
+ } else if (timer_elapsed) {
+ state = VB2_BUF_STATE_ERROR;
+ s5p_jpeg_clear_timer_stat(jpeg->regs);
+ } else if (!op_completed) {
+ state = VB2_BUF_STATE_ERROR;
+ } else {
+ payload_size = s5p_jpeg_compressed_size(jpeg->regs);
+ }
+
+ dst_buf->timecode = src_buf->timecode;
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->flags |=
+ src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src_buf, state);
+ if (curr_ctx->mode == S5P_JPEG_ENCODE)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
+ v4l2_m2m_buf_done(dst_buf, state);
+
+ curr_ctx->subsampling = s5p_jpeg_get_subsampling_mode(jpeg->regs);
+ spin_unlock(&jpeg->slock);
+
+ s5p_jpeg_clear_int(jpeg->regs);
+
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
+{
+ unsigned int int_status;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ struct s5p_jpeg *jpeg = priv;
+ struct s5p_jpeg_ctx *curr_ctx;
+ unsigned long payload_size = 0;
+
+ spin_lock(&jpeg->slock);
+
+ exynos4_jpeg_set_sys_int_enable(jpeg->regs, 0);
+
+ curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ int_status = exynos4_jpeg_get_int_status(jpeg->regs);
+
+ if (int_status) {
+ switch (int_status & 0x1f) {
+ case 0x1:
+ jpeg->irq_ret = ERR_PROT;
+ break;
+ case 0x2:
+ jpeg->irq_ret = OK_ENC_OR_DEC;
+ break;
+ case 0x4:
+ jpeg->irq_ret = ERR_DEC_INVALID_FORMAT;
+ break;
+ case 0x8:
+ jpeg->irq_ret = ERR_MULTI_SCAN;
+ break;
+ case 0x10:
+ jpeg->irq_ret = ERR_FRAME;
+ break;
+ default:
+ jpeg->irq_ret = ERR_UNKNOWN;
+ break;
+ }
+ } else {
+ jpeg->irq_ret = ERR_UNKNOWN;
+ }
+
+ if (jpeg->irq_ret == OK_ENC_OR_DEC) {
+ if (curr_ctx->mode == S5P_JPEG_ENCODE) {
+ payload_size = exynos4_jpeg_get_stream_size(jpeg->regs);
+ vb2_set_plane_payload(&dst_vb->vb2_buf,
+ 0, payload_size);
+ }
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ } else {
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ if (jpeg->variant->version == SJPEG_EXYNOS4)
+ curr_ctx->subsampling = exynos4_jpeg_get_frame_fmt(jpeg->regs);
+
+ exynos4_jpeg_set_enc_dec_mode(jpeg->regs, S5P_JPEG_DISABLE);
+
+ spin_unlock(&jpeg->slock);
+
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id)
+{
+ struct s5p_jpeg *jpeg = dev_id;
+ struct s5p_jpeg_ctx *curr_ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ unsigned long payload_size = 0;
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+ bool interrupt_timeout = false;
+ bool stream_error = false;
+ u32 irq_status;
+
+ spin_lock(&jpeg->slock);
+
+ irq_status = exynos3250_jpeg_get_timer_status(jpeg->regs);
+ if (irq_status & EXYNOS3250_TIMER_INT_STAT) {
+ exynos3250_jpeg_clear_timer_status(jpeg->regs);
+ interrupt_timeout = true;
+ dev_err(jpeg->dev, "Interrupt timeout occurred.\n");
+ }
+
+ irq_status = exynos3250_jpeg_get_int_status(jpeg->regs);
+ exynos3250_jpeg_clear_int_status(jpeg->regs, irq_status);
+
+ jpeg->irq_status |= irq_status;
+
+ if (jpeg->variant->version == SJPEG_EXYNOS5420 &&
+ irq_status & EXYNOS3250_STREAM_STAT) {
+ stream_error = true;
+ dev_err(jpeg->dev, "Syntax error or unrecoverable error occurred.\n");
+ }
+
+ curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+
+ if (!curr_ctx)
+ goto exit_unlock;
+
+ if ((irq_status & EXYNOS3250_HEADER_STAT) &&
+ (curr_ctx->mode == S5P_JPEG_DECODE)) {
+ exynos3250_jpeg_rstart(jpeg->regs);
+ goto exit_unlock;
+ }
+
+ if (jpeg->irq_status & (EXYNOS3250_JPEG_DONE |
+ EXYNOS3250_WDMA_DONE |
+ EXYNOS3250_RDMA_DONE |
+ EXYNOS3250_RESULT_STAT))
+ payload_size = exynos3250_jpeg_compressed_size(jpeg->regs);
+ else if (interrupt_timeout || stream_error)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ goto exit_unlock;
+
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ dst_buf->timecode = src_buf->timecode;
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+
+ v4l2_m2m_buf_done(src_buf, state);
+ if (curr_ctx->mode == S5P_JPEG_ENCODE)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
+ v4l2_m2m_buf_done(dst_buf, state);
+
+ curr_ctx->subsampling =
+ exynos3250_jpeg_get_subsampling_mode(jpeg->regs);
+
+ spin_unlock(&jpeg->slock);
+
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
+ return IRQ_HANDLED;
+
+exit_unlock:
+ spin_unlock(&jpeg->slock);
+ return IRQ_HANDLED;
+}
+
+static void *jpeg_get_drv_data(struct device *dev);
+
+/*
+ * ============================================================================
+ * Driver basic infrastructure
+ * ============================================================================
+ */
+
+static int s5p_jpeg_probe(struct platform_device *pdev)
+{
+ struct s5p_jpeg *jpeg;
+ struct resource *res;
+ int i, ret;
+
+ /* JPEG IP abstraction struct */
+ jpeg = devm_kzalloc(&pdev->dev, sizeof(struct s5p_jpeg), GFP_KERNEL);
+ if (!jpeg)
+ return -ENOMEM;
+
+ jpeg->variant = jpeg_get_drv_data(&pdev->dev);
+
+ mutex_init(&jpeg->lock);
+ spin_lock_init(&jpeg->slock);
+ jpeg->dev = &pdev->dev;
+
+ /* memory-mapped registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ jpeg->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(jpeg->regs))
+ return PTR_ERR(jpeg->regs);
+
+ /* interrupt service routine registration */
+ jpeg->irq = ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot find IRQ\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, jpeg->irq, jpeg->variant->jpeg_irq,
+ 0, dev_name(&pdev->dev), jpeg);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq);
+ return ret;
+ }
+
+ /* clocks */
+ for (i = 0; i < jpeg->variant->num_clocks; i++) {
+ jpeg->clocks[i] = devm_clk_get(&pdev->dev,
+ jpeg->variant->clk_names[i]);
+ if (IS_ERR(jpeg->clocks[i])) {
+ dev_err(&pdev->dev, "failed to get clock: %s\n",
+ jpeg->variant->clk_names[i]);
+ return PTR_ERR(jpeg->clocks[i]);
+ }
+ }
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ return ret;
+ }
+
+ /* mem2mem device */
+ jpeg->m2m_dev = v4l2_m2m_init(jpeg->variant->m2m_ops);
+ if (IS_ERR(jpeg->m2m_dev)) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(jpeg->m2m_dev);
+ goto device_register_rollback;
+ }
+
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ /* JPEG encoder /dev/videoX node */
+ jpeg->vfd_encoder = video_device_alloc();
+ if (!jpeg->vfd_encoder) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto m2m_init_rollback;
+ }
+ snprintf(jpeg->vfd_encoder->name, sizeof(jpeg->vfd_encoder->name),
+ "%s-enc", S5P_JPEG_M2M_NAME);
+ jpeg->vfd_encoder->fops = &s5p_jpeg_fops;
+ jpeg->vfd_encoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
+ jpeg->vfd_encoder->minor = -1;
+ jpeg->vfd_encoder->release = video_device_release;
+ jpeg->vfd_encoder->lock = &jpeg->lock;
+ jpeg->vfd_encoder->v4l2_dev = &jpeg->v4l2_dev;
+ jpeg->vfd_encoder->vfl_dir = VFL_DIR_M2M;
+
+ ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
+ video_device_release(jpeg->vfd_encoder);
+ goto m2m_init_rollback;
+ }
+
+ video_set_drvdata(jpeg->vfd_encoder, jpeg);
+ v4l2_info(&jpeg->v4l2_dev,
+ "encoder device registered as /dev/video%d\n",
+ jpeg->vfd_encoder->num);
+
+ /* JPEG decoder /dev/videoX node */
+ jpeg->vfd_decoder = video_device_alloc();
+ if (!jpeg->vfd_decoder) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto enc_vdev_register_rollback;
+ }
+ snprintf(jpeg->vfd_decoder->name, sizeof(jpeg->vfd_decoder->name),
+ "%s-dec", S5P_JPEG_M2M_NAME);
+ jpeg->vfd_decoder->fops = &s5p_jpeg_fops;
+ jpeg->vfd_decoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
+ jpeg->vfd_decoder->minor = -1;
+ jpeg->vfd_decoder->release = video_device_release;
+ jpeg->vfd_decoder->lock = &jpeg->lock;
+ jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
+ jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M;
+
+ ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
+ video_device_release(jpeg->vfd_decoder);
+ goto enc_vdev_register_rollback;
+ }
+
+ video_set_drvdata(jpeg->vfd_decoder, jpeg);
+ v4l2_info(&jpeg->v4l2_dev,
+ "decoder device registered as /dev/video%d\n",
+ jpeg->vfd_decoder->num);
+
+ /* final statements & power management */
+ platform_set_drvdata(pdev, jpeg);
+
+ pm_runtime_enable(&pdev->dev);
+
+ v4l2_info(&jpeg->v4l2_dev, "Samsung S5P JPEG codec\n");
+
+ return 0;
+
+enc_vdev_register_rollback:
+ video_unregister_device(jpeg->vfd_encoder);
+
+m2m_init_rollback:
+ v4l2_m2m_release(jpeg->m2m_dev);
+
+device_register_rollback:
+ v4l2_device_unregister(&jpeg->v4l2_dev);
+
+ return ret;
+}
+
+static int s5p_jpeg_remove(struct platform_device *pdev)
+{
+ struct s5p_jpeg *jpeg = platform_get_drvdata(pdev);
+ int i;
+
+ pm_runtime_disable(jpeg->dev);
+
+ video_unregister_device(jpeg->vfd_decoder);
+ video_unregister_device(jpeg->vfd_encoder);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+ v4l2_m2m_release(jpeg->m2m_dev);
+ v4l2_device_unregister(&jpeg->v4l2_dev);
+
+ if (!pm_runtime_status_suspended(&pdev->dev)) {
+ for (i = jpeg->variant->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(jpeg->clocks[i]);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s5p_jpeg_runtime_suspend(struct device *dev)
+{
+ struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+ int i;
+
+ for (i = jpeg->variant->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(jpeg->clocks[i]);
+
+ return 0;
+}
+
+static int s5p_jpeg_runtime_resume(struct device *dev)
+{
+ struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+ unsigned long flags;
+ int i, ret;
+
+ for (i = 0; i < jpeg->variant->num_clocks; i++) {
+ ret = clk_prepare_enable(jpeg->clocks[i]);
+ if (ret) {
+ while (--i >= 0)
+ clk_disable_unprepare(jpeg->clocks[i]);
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&jpeg->slock, flags);
+
+ /*
+ * JPEG IP allows storing two Huffman tables for each component.
+ * We fill table 0 for each component and do this here only
+ * for S5PC210 and Exynos3250 SoCs. Exynos4x12 and Exynos542x SoC
+ * require programming their Huffman tables each time the encoding
+ * process is initialized, and thus it is accomplished in the
+ * device_run callback of m2m_ops.
+ */
+ if (!jpeg->variant->htbl_reinit) {
+ s5p_jpeg_set_hdctbl(jpeg->regs);
+ s5p_jpeg_set_hdctblg(jpeg->regs);
+ s5p_jpeg_set_hactbl(jpeg->regs);
+ s5p_jpeg_set_hactblg(jpeg->regs);
+ }
+
+ spin_unlock_irqrestore(&jpeg->slock, flags);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops s5p_jpeg_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(s5p_jpeg_runtime_suspend, s5p_jpeg_runtime_resume,
+ NULL)
+};
+
+static struct s5p_jpeg_variant s5p_jpeg_drvdata = {
+ .version = SJPEG_S5P,
+ .jpeg_irq = s5p_jpeg_irq,
+ .m2m_ops = &s5p_jpeg_m2m_ops,
+ .fmt_ver_flag = SJPEG_FMT_FLAG_S5P,
+ .clk_names = {"jpeg"},
+ .num_clocks = 1,
+};
+
+static struct s5p_jpeg_variant exynos3250_jpeg_drvdata = {
+ .version = SJPEG_EXYNOS3250,
+ .jpeg_irq = exynos3250_jpeg_irq,
+ .m2m_ops = &exynos3250_jpeg_m2m_ops,
+ .fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS3250,
+ .hw3250_compat = 1,
+ .clk_names = {"jpeg", "sclk"},
+ .num_clocks = 2,
+};
+
+static struct s5p_jpeg_variant exynos4_jpeg_drvdata = {
+ .version = SJPEG_EXYNOS4,
+ .jpeg_irq = exynos4_jpeg_irq,
+ .m2m_ops = &exynos4_jpeg_m2m_ops,
+ .fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS4,
+ .htbl_reinit = 1,
+ .clk_names = {"jpeg"},
+ .num_clocks = 1,
+ .hw_ex4_compat = 1,
+};
+
+static struct s5p_jpeg_variant exynos5420_jpeg_drvdata = {
+ .version = SJPEG_EXYNOS5420,
+ .jpeg_irq = exynos3250_jpeg_irq, /* intentionally 3250 */
+ .m2m_ops = &exynos3250_jpeg_m2m_ops, /* intentionally 3250 */
+ .fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS3250, /* intentionally 3250 */
+ .hw3250_compat = 1,
+ .htbl_reinit = 1,
+ .clk_names = {"jpeg"},
+ .num_clocks = 1,
+};
+
+static struct s5p_jpeg_variant exynos5433_jpeg_drvdata = {
+ .version = SJPEG_EXYNOS5433,
+ .jpeg_irq = exynos4_jpeg_irq,
+ .m2m_ops = &exynos4_jpeg_m2m_ops,
+ .fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS4,
+ .htbl_reinit = 1,
+ .clk_names = {"pclk", "aclk", "aclk_xiu", "sclk"},
+ .num_clocks = 4,
+ .hw_ex4_compat = 1,
+};
+
+static const struct of_device_id samsung_jpeg_match[] = {
+ {
+ .compatible = "samsung,s5pv210-jpeg",
+ .data = &s5p_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos3250-jpeg",
+ .data = &exynos3250_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos4210-jpeg",
+ .data = &exynos4_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos4212-jpeg",
+ .data = &exynos4_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos5420-jpeg",
+ .data = &exynos5420_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos5433-jpeg",
+ .data = &exynos5433_jpeg_drvdata,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, samsung_jpeg_match);
+
+static void *jpeg_get_drv_data(struct device *dev)
+{
+ struct s5p_jpeg_variant *driver_data = NULL;
+ const struct of_device_id *match;
+
+ if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
+ return &s5p_jpeg_drvdata;
+
+ match = of_match_node(samsung_jpeg_match, dev->of_node);
+
+ if (match)
+ driver_data = (struct s5p_jpeg_variant *)match->data;
+
+ return driver_data;
+}
+
+static struct platform_driver s5p_jpeg_driver = {
+ .probe = s5p_jpeg_probe,
+ .remove = s5p_jpeg_remove,
+ .driver = {
+ .of_match_table = of_match_ptr(samsung_jpeg_match),
+ .name = S5P_JPEG_M2M_NAME,
+ .pm = &s5p_jpeg_pm_ops,
+ },
+};
+
+module_platform_driver(s5p_jpeg_driver);
+
+MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>");
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
+MODULE_DESCRIPTION("Samsung JPEG codec driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
new file mode 100644
index 000000000..a46465e10
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -0,0 +1,272 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-core.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef JPEG_CORE_H_
+#define JPEG_CORE_H_
+
+#include <linux/interrupt.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+
+#define S5P_JPEG_M2M_NAME "s5p-jpeg"
+
+#define JPEG_MAX_CLOCKS 4
+
+/* JPEG compression quality setting */
+#define S5P_JPEG_COMPR_QUAL_BEST 0
+#define S5P_JPEG_COMPR_QUAL_WORST 3
+
+/* JPEG RGB to YCbCr conversion matrix coefficients */
+#define S5P_JPEG_COEF11 0x4d
+#define S5P_JPEG_COEF12 0x97
+#define S5P_JPEG_COEF13 0x1e
+#define S5P_JPEG_COEF21 0x2c
+#define S5P_JPEG_COEF22 0x57
+#define S5P_JPEG_COEF23 0x83
+#define S5P_JPEG_COEF31 0x83
+#define S5P_JPEG_COEF32 0x6e
+#define S5P_JPEG_COEF33 0x13
+
+#define EXYNOS3250_IRQ_TIMEOUT 0x10000000
+
+/* a selection of JPEG markers */
+#define TEM 0x01
+#define SOF0 0xc0
+#define DHT 0xc4
+#define RST 0xd0
+#define SOI 0xd8
+#define EOI 0xd9
+#define SOS 0xda
+#define DQT 0xdb
+#define DHP 0xde
+
+/* Flags that indicate a format can be used for capture/output */
+#define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)
+#define SJPEG_FMT_FLAG_ENC_OUTPUT (1 << 1)
+#define SJPEG_FMT_FLAG_DEC_CAPTURE (1 << 2)
+#define SJPEG_FMT_FLAG_DEC_OUTPUT (1 << 3)
+#define SJPEG_FMT_FLAG_S5P (1 << 4)
+#define SJPEG_FMT_FLAG_EXYNOS3250 (1 << 5)
+#define SJPEG_FMT_FLAG_EXYNOS4 (1 << 6)
+#define SJPEG_FMT_RGB (1 << 7)
+#define SJPEG_FMT_NON_RGB (1 << 8)
+
+#define S5P_JPEG_ENCODE 0
+#define S5P_JPEG_DECODE 1
+#define S5P_JPEG_DISABLE -1
+
+#define FMT_TYPE_OUTPUT 0
+#define FMT_TYPE_CAPTURE 1
+
+#define SJPEG_SUBSAMPLING_444 0x11
+#define SJPEG_SUBSAMPLING_422 0x21
+#define SJPEG_SUBSAMPLING_420 0x22
+
+#define S5P_JPEG_MAX_MARKER 4
+
+/* Version numbers */
+enum sjpeg_version {
+ SJPEG_S5P,
+ SJPEG_EXYNOS3250,
+ SJPEG_EXYNOS4,
+ SJPEG_EXYNOS5420,
+ SJPEG_EXYNOS5433,
+};
+
+enum exynos4_jpeg_result {
+ OK_ENC_OR_DEC,
+ ERR_PROT,
+ ERR_DEC_INVALID_FORMAT,
+ ERR_MULTI_SCAN,
+ ERR_FRAME,
+ ERR_UNKNOWN,
+};
+
+enum exynos4_jpeg_img_quality_level {
+ QUALITY_LEVEL_1 = 0, /* high */
+ QUALITY_LEVEL_2,
+ QUALITY_LEVEL_3,
+ QUALITY_LEVEL_4, /* low */
+};
+
+enum s5p_jpeg_ctx_state {
+ JPEGCTX_RUNNING = 0,
+ JPEGCTX_RESOLUTION_CHANGE,
+};
+
+/**
+ * struct s5p_jpeg - JPEG IP abstraction
+ * @lock: the mutex protecting this structure
+ * @slock: spinlock protecting the device contexts
+ * @v4l2_dev: v4l2 device for mem2mem mode
+ * @vfd_encoder: video device node for encoder mem2mem mode
+ * @vfd_decoder: video device node for decoder mem2mem mode
+ * @m2m_dev: v4l2 mem2mem device data
+ * @regs: JPEG IP registers mapping
+ * @irq: JPEG IP irq
+ * @clocks: JPEG IP clock(s)
+ * @dev: JPEG IP struct device
+ * @variant: driver variant to be used
+ * @irq_status interrupt flags set during single encode/decode
+ operation
+
+ */
+struct s5p_jpeg {
+ struct mutex lock;
+ spinlock_t slock;
+
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_encoder;
+ struct video_device *vfd_decoder;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ void __iomem *regs;
+ unsigned int irq;
+ enum exynos4_jpeg_result irq_ret;
+ struct clk *clocks[JPEG_MAX_CLOCKS];
+ struct device *dev;
+ struct s5p_jpeg_variant *variant;
+ u32 irq_status;
+};
+
+struct s5p_jpeg_variant {
+ unsigned int version;
+ unsigned int fmt_ver_flag;
+ unsigned int hw3250_compat:1;
+ unsigned int htbl_reinit:1;
+ unsigned int hw_ex4_compat:1;
+ struct v4l2_m2m_ops *m2m_ops;
+ irqreturn_t (*jpeg_irq)(int irq, void *priv);
+ const char *clk_names[JPEG_MAX_CLOCKS];
+ int num_clocks;
+};
+
+/**
+ * struct jpeg_fmt - driver's internal color format data
+ * @name: format descritpion
+ * @fourcc: the fourcc code, 0 if not applicable
+ * @depth: number of bits per pixel
+ * @colplanes: number of color planes (1 for packed formats)
+ * @h_align: horizontal alignment order (align to 2^h_align)
+ * @v_align: vertical alignment order (align to 2^v_align)
+ * @flags: flags describing format applicability
+ */
+struct s5p_jpeg_fmt {
+ char *name;
+ u32 fourcc;
+ int depth;
+ int colplanes;
+ int memplanes;
+ int h_align;
+ int v_align;
+ int subsampling;
+ u32 flags;
+};
+
+/**
+ * s5p_jpeg_marker - collection of markers from jpeg header
+ * @marker: markers' positions relative to the buffer beginning
+ * @len: markers' payload lengths (without length field)
+ * @n: number of markers in collection
+ */
+struct s5p_jpeg_marker {
+ u32 marker[S5P_JPEG_MAX_MARKER];
+ u32 len[S5P_JPEG_MAX_MARKER];
+ u32 n;
+};
+
+/**
+ * s5p_jpeg_q_data - parameters of one queue
+ * @fmt: driver-specific format of this queue
+ * @w: image width
+ * @h: image height
+ * @sos: SOS marker's position relative to the buffer beginning
+ * @dht: DHT markers' positions relative to the buffer beginning
+ * @dqt: DQT markers' positions relative to the buffer beginning
+ * @sof: SOF0 marker's postition relative to the buffer beginning
+ * @sof_len: SOF0 marker's payload length (without length field itself)
+ * @components: number of image components
+ * @size: image buffer size in bytes
+ */
+struct s5p_jpeg_q_data {
+ struct s5p_jpeg_fmt *fmt;
+ u32 w;
+ u32 h;
+ u32 sos;
+ struct s5p_jpeg_marker dht;
+ struct s5p_jpeg_marker dqt;
+ u32 sof;
+ u32 sof_len;
+ u32 components;
+ u32 size;
+};
+
+/**
+ * s5p_jpeg_ctx - the device context data
+ * @jpeg: JPEG IP device for this context
+ * @mode: compression (encode) operation or decompression (decode)
+ * @compr_quality: destination image quality in compression (encode) mode
+ * @restart_interval: JPEG restart interval for JPEG encoding
+ * @subsampling: subsampling of a raw format or a JPEG
+ * @out_q: source (output) queue information
+ * @cap_q: destination (capture) queue queue information
+ * @scale_factor: scale factor for JPEG decoding
+ * @crop_rect: a rectangle representing crop area of the output buffer
+ * @fh: V4L2 file handle
+ * @hdr_parsed: set if header has been parsed during decompression
+ * @crop_altered: set if crop rectangle has been altered by the user space
+ * @ctrl_handler: controls handler
+ * @state: state of the context
+ */
+struct s5p_jpeg_ctx {
+ struct s5p_jpeg *jpeg;
+ unsigned int mode;
+ unsigned short compr_quality;
+ unsigned short restart_interval;
+ unsigned short subsampling;
+ struct s5p_jpeg_q_data out_q;
+ struct s5p_jpeg_q_data cap_q;
+ unsigned int scale_factor;
+ struct v4l2_rect crop_rect;
+ struct v4l2_fh fh;
+ bool hdr_parsed;
+ bool crop_altered;
+ struct v4l2_ctrl_handler ctrl_handler;
+ enum s5p_jpeg_ctx_state state;
+};
+
+/**
+ * s5p_jpeg_buffer - description of memory containing input JPEG data
+ * @size: buffer size
+ * @curr: current position in the buffer
+ * @data: pointer to the data
+ */
+struct s5p_jpeg_buffer {
+ unsigned long size;
+ unsigned long curr;
+ unsigned long data;
+};
+
+/**
+ * struct s5p_jpeg_addr - JPEG converter physical address set for DMA
+ * @y: luminance plane physical address
+ * @cb: Cb plane physical address
+ * @cr: Cr plane physical address
+ */
+struct s5p_jpeg_addr {
+ u32 y;
+ u32 cb;
+ u32 cr;
+};
+
+#endif /* JPEG_CORE_H */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c
new file mode 100644
index 000000000..0861842b2
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c
@@ -0,0 +1,489 @@
+/* linux/drivers/media/platform/exynos3250-jpeg/jpeg-hw.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+
+#include "jpeg-core.h"
+#include "jpeg-regs.h"
+#include "jpeg-hw-exynos3250.h"
+
+void exynos3250_jpeg_reset(void __iomem *regs)
+{
+ u32 reg = 1;
+ int count = 1000;
+
+ writel(1, regs + EXYNOS3250_SW_RESET);
+ /* no other way but polling for when JPEG IP becomes operational */
+ while (reg != 0 && --count > 0) {
+ udelay(1);
+ cpu_relax();
+ reg = readl(regs + EXYNOS3250_SW_RESET);
+ }
+
+ reg = 0;
+ count = 1000;
+
+ while (reg != 1 && --count > 0) {
+ writel(1, regs + EXYNOS3250_JPGDRI);
+ udelay(1);
+ cpu_relax();
+ reg = readl(regs + EXYNOS3250_JPGDRI);
+ }
+
+ writel(0, regs + EXYNOS3250_JPGDRI);
+}
+
+void exynos3250_jpeg_poweron(void __iomem *regs)
+{
+ writel(EXYNOS3250_POWER_ON, regs + EXYNOS3250_JPGCLKCON);
+}
+
+void exynos3250_jpeg_set_dma_num(void __iomem *regs)
+{
+ writel(((EXYNOS3250_DMA_MO_COUNT << EXYNOS3250_WDMA_ISSUE_NUM_SHIFT) &
+ EXYNOS3250_WDMA_ISSUE_NUM_MASK) |
+ ((EXYNOS3250_DMA_MO_COUNT << EXYNOS3250_RDMA_ISSUE_NUM_SHIFT) &
+ EXYNOS3250_RDMA_ISSUE_NUM_MASK) |
+ ((EXYNOS3250_DMA_MO_COUNT << EXYNOS3250_ISSUE_GATHER_NUM_SHIFT) &
+ EXYNOS3250_ISSUE_GATHER_NUM_MASK),
+ regs + EXYNOS3250_DMA_ISSUE_NUM);
+}
+
+void exynos3250_jpeg_clk_set(void __iomem *base)
+{
+ u32 reg;
+
+ reg = readl(base + EXYNOS3250_JPGCMOD) & ~EXYNOS3250_HALF_EN_MASK;
+
+ writel(reg | EXYNOS3250_HALF_EN, base + EXYNOS3250_JPGCMOD);
+}
+
+void exynos3250_jpeg_input_raw_fmt(void __iomem *regs, unsigned int fmt)
+{
+ u32 reg;
+
+ reg = readl(regs + EXYNOS3250_JPGCMOD) &
+ EXYNOS3250_MODE_Y16_MASK;
+
+ switch (fmt) {
+ case V4L2_PIX_FMT_RGB32:
+ reg |= EXYNOS3250_MODE_SEL_ARGB8888;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ reg |= EXYNOS3250_MODE_SEL_ARGB8888 | EXYNOS3250_SRC_SWAP_RGB;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ reg |= EXYNOS3250_MODE_SEL_RGB565;
+ break;
+ case V4L2_PIX_FMT_RGB565X:
+ reg |= EXYNOS3250_MODE_SEL_RGB565 | EXYNOS3250_SRC_SWAP_RGB;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ reg |= EXYNOS3250_MODE_SEL_422_1P_LUM_CHR;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ reg |= EXYNOS3250_MODE_SEL_422_1P_LUM_CHR |
+ EXYNOS3250_SRC_SWAP_UV;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ reg |= EXYNOS3250_MODE_SEL_422_1P_CHR_LUM;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ reg |= EXYNOS3250_MODE_SEL_422_1P_CHR_LUM |
+ EXYNOS3250_SRC_SWAP_UV;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ reg |= EXYNOS3250_MODE_SEL_420_2P | EXYNOS3250_SRC_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ reg |= EXYNOS3250_MODE_SEL_420_2P | EXYNOS3250_SRC_NV21;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ reg |= EXYNOS3250_MODE_SEL_420_3P;
+ break;
+ default:
+ break;
+
+ }
+
+ writel(reg, regs + EXYNOS3250_JPGCMOD);
+}
+
+void exynos3250_jpeg_set_y16(void __iomem *regs, bool y16)
+{
+ u32 reg;
+
+ reg = readl(regs + EXYNOS3250_JPGCMOD);
+ if (y16)
+ reg |= EXYNOS3250_MODE_Y16;
+ else
+ reg &= ~EXYNOS3250_MODE_Y16_MASK;
+ writel(reg, regs + EXYNOS3250_JPGCMOD);
+}
+
+void exynos3250_jpeg_proc_mode(void __iomem *regs, unsigned int mode)
+{
+ u32 reg, m;
+
+ if (mode == S5P_JPEG_ENCODE)
+ m = EXYNOS3250_PROC_MODE_COMPR;
+ else
+ m = EXYNOS3250_PROC_MODE_DECOMPR;
+ reg = readl(regs + EXYNOS3250_JPGMOD);
+ reg &= ~EXYNOS3250_PROC_MODE_MASK;
+ reg |= m;
+ writel(reg, regs + EXYNOS3250_JPGMOD);
+}
+
+void exynos3250_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
+{
+ u32 reg, m = 0;
+
+ switch (mode) {
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_444:
+ m = EXYNOS3250_SUBSAMPLING_MODE_444;
+ break;
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+ m = EXYNOS3250_SUBSAMPLING_MODE_422;
+ break;
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+ m = EXYNOS3250_SUBSAMPLING_MODE_420;
+ break;
+ }
+
+ reg = readl(regs + EXYNOS3250_JPGMOD);
+ reg &= ~EXYNOS3250_SUBSAMPLING_MODE_MASK;
+ reg |= m;
+ writel(reg, regs + EXYNOS3250_JPGMOD);
+}
+
+unsigned int exynos3250_jpeg_get_subsampling_mode(void __iomem *regs)
+{
+ return readl(regs + EXYNOS3250_JPGMOD) &
+ EXYNOS3250_SUBSAMPLING_MODE_MASK;
+}
+
+void exynos3250_jpeg_dri(void __iomem *regs, unsigned int dri)
+{
+ u32 reg;
+
+ reg = dri & EXYNOS3250_JPGDRI_MASK;
+ writel(reg, regs + EXYNOS3250_JPGDRI);
+}
+
+void exynos3250_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
+{
+ unsigned long reg;
+
+ reg = readl(regs + EXYNOS3250_QHTBL);
+ reg &= ~EXYNOS3250_QT_NUM_MASK(t);
+ reg |= (n << EXYNOS3250_QT_NUM_SHIFT(t)) &
+ EXYNOS3250_QT_NUM_MASK(t);
+ writel(reg, regs + EXYNOS3250_QHTBL);
+}
+
+void exynos3250_jpeg_htbl_ac(void __iomem *regs, unsigned int t)
+{
+ unsigned long reg;
+
+ reg = readl(regs + EXYNOS3250_QHTBL);
+ reg &= ~EXYNOS3250_HT_NUM_AC_MASK(t);
+ /* this driver uses table 0 for all color components */
+ reg |= (0 << EXYNOS3250_HT_NUM_AC_SHIFT(t)) &
+ EXYNOS3250_HT_NUM_AC_MASK(t);
+ writel(reg, regs + EXYNOS3250_QHTBL);
+}
+
+void exynos3250_jpeg_htbl_dc(void __iomem *regs, unsigned int t)
+{
+ unsigned long reg;
+
+ reg = readl(regs + EXYNOS3250_QHTBL);
+ reg &= ~EXYNOS3250_HT_NUM_DC_MASK(t);
+ /* this driver uses table 0 for all color components */
+ reg |= (0 << EXYNOS3250_HT_NUM_DC_SHIFT(t)) &
+ EXYNOS3250_HT_NUM_DC_MASK(t);
+ writel(reg, regs + EXYNOS3250_QHTBL);
+}
+
+void exynos3250_jpeg_set_y(void __iomem *regs, unsigned int y)
+{
+ u32 reg;
+
+ reg = y & EXYNOS3250_JPGY_MASK;
+ writel(reg, regs + EXYNOS3250_JPGY);
+}
+
+void exynos3250_jpeg_set_x(void __iomem *regs, unsigned int x)
+{
+ u32 reg;
+
+ reg = x & EXYNOS3250_JPGX_MASK;
+ writel(reg, regs + EXYNOS3250_JPGX);
+}
+
+#if 0 /* Currently unused */
+unsigned int exynos3250_jpeg_get_y(void __iomem *regs)
+{
+ return readl(regs + EXYNOS3250_JPGY);
+}
+
+unsigned int exynos3250_jpeg_get_x(void __iomem *regs)
+{
+ return readl(regs + EXYNOS3250_JPGX);
+}
+#endif
+
+void exynos3250_jpeg_interrupts_enable(void __iomem *regs)
+{
+ u32 reg;
+
+ reg = readl(regs + EXYNOS3250_JPGINTSE);
+ reg |= (EXYNOS3250_JPEG_DONE_EN |
+ EXYNOS3250_WDMA_DONE_EN |
+ EXYNOS3250_RDMA_DONE_EN |
+ EXYNOS3250_ENC_STREAM_INT_EN |
+ EXYNOS3250_CORE_DONE_EN |
+ EXYNOS3250_ERR_INT_EN |
+ EXYNOS3250_HEAD_INT_EN);
+ writel(reg, regs + EXYNOS3250_JPGINTSE);
+}
+
+void exynos3250_jpeg_enc_stream_bound(void __iomem *regs, unsigned int size)
+{
+ u32 reg;
+
+ reg = size & EXYNOS3250_ENC_STREAM_BOUND_MASK;
+ writel(reg, regs + EXYNOS3250_ENC_STREAM_BOUND);
+}
+
+void exynos3250_jpeg_output_raw_fmt(void __iomem *regs, unsigned int fmt)
+{
+ u32 reg;
+
+ switch (fmt) {
+ case V4L2_PIX_FMT_RGB32:
+ reg = EXYNOS3250_OUT_FMT_ARGB8888;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ reg = EXYNOS3250_OUT_FMT_ARGB8888 | EXYNOS3250_OUT_SWAP_RGB;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ reg = EXYNOS3250_OUT_FMT_RGB565;
+ break;
+ case V4L2_PIX_FMT_RGB565X:
+ reg = EXYNOS3250_OUT_FMT_RGB565 | EXYNOS3250_OUT_SWAP_RGB;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ reg = EXYNOS3250_OUT_FMT_422_1P_LUM_CHR;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ reg = EXYNOS3250_OUT_FMT_422_1P_LUM_CHR |
+ EXYNOS3250_OUT_SWAP_UV;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ reg = EXYNOS3250_OUT_FMT_422_1P_CHR_LUM;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ reg = EXYNOS3250_OUT_FMT_422_1P_CHR_LUM |
+ EXYNOS3250_OUT_SWAP_UV;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ reg = EXYNOS3250_OUT_FMT_420_2P | EXYNOS3250_OUT_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ reg = EXYNOS3250_OUT_FMT_420_2P | EXYNOS3250_OUT_NV21;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ reg = EXYNOS3250_OUT_FMT_420_3P;
+ break;
+ default:
+ reg = 0;
+ break;
+ }
+
+ writel(reg, regs + EXYNOS3250_OUTFORM);
+}
+
+void exynos3250_jpeg_jpgadr(void __iomem *regs, unsigned int addr)
+{
+ writel(addr, regs + EXYNOS3250_JPG_JPGADR);
+}
+
+void exynos3250_jpeg_imgadr(void __iomem *regs, struct s5p_jpeg_addr *img_addr)
+{
+ writel(img_addr->y, regs + EXYNOS3250_LUMA_BASE);
+ writel(img_addr->cb, regs + EXYNOS3250_CHROMA_BASE);
+ writel(img_addr->cr, regs + EXYNOS3250_CHROMA_CR_BASE);
+}
+
+void exynos3250_jpeg_stride(void __iomem *regs, unsigned int img_fmt,
+ unsigned int width)
+{
+ u32 reg_luma = 0, reg_cr = 0, reg_cb = 0;
+
+ switch (img_fmt) {
+ case V4L2_PIX_FMT_RGB32:
+ reg_luma = 4 * width;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ reg_luma = 2 * width;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ reg_luma = width;
+ reg_cb = reg_luma;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ reg_luma = width;
+ reg_cb = reg_cr = reg_luma / 2;
+ break;
+ default:
+ break;
+ }
+
+ writel(reg_luma, regs + EXYNOS3250_LUMA_STRIDE);
+ writel(reg_cb, regs + EXYNOS3250_CHROMA_STRIDE);
+ writel(reg_cr, regs + EXYNOS3250_CHROMA_CR_STRIDE);
+}
+
+void exynos3250_jpeg_offset(void __iomem *regs, unsigned int x_offset,
+ unsigned int y_offset)
+{
+ u32 reg;
+
+ reg = (y_offset << EXYNOS3250_LUMA_YY_OFFSET_SHIFT) &
+ EXYNOS3250_LUMA_YY_OFFSET_MASK;
+ reg |= (x_offset << EXYNOS3250_LUMA_YX_OFFSET_SHIFT) &
+ EXYNOS3250_LUMA_YX_OFFSET_MASK;
+
+ writel(reg, regs + EXYNOS3250_LUMA_XY_OFFSET);
+
+ reg = (y_offset << EXYNOS3250_CHROMA_YY_OFFSET_SHIFT) &
+ EXYNOS3250_CHROMA_YY_OFFSET_MASK;
+ reg |= (x_offset << EXYNOS3250_CHROMA_YX_OFFSET_SHIFT) &
+ EXYNOS3250_CHROMA_YX_OFFSET_MASK;
+
+ writel(reg, regs + EXYNOS3250_CHROMA_XY_OFFSET);
+
+ reg = (y_offset << EXYNOS3250_CHROMA_CR_YY_OFFSET_SHIFT) &
+ EXYNOS3250_CHROMA_CR_YY_OFFSET_MASK;
+ reg |= (x_offset << EXYNOS3250_CHROMA_CR_YX_OFFSET_SHIFT) &
+ EXYNOS3250_CHROMA_CR_YX_OFFSET_MASK;
+
+ writel(reg, regs + EXYNOS3250_CHROMA_CR_XY_OFFSET);
+}
+
+void exynos3250_jpeg_coef(void __iomem *base, unsigned int mode)
+{
+ if (mode == S5P_JPEG_ENCODE) {
+ writel(EXYNOS3250_JPEG_ENC_COEF1,
+ base + EXYNOS3250_JPG_COEF(1));
+ writel(EXYNOS3250_JPEG_ENC_COEF2,
+ base + EXYNOS3250_JPG_COEF(2));
+ writel(EXYNOS3250_JPEG_ENC_COEF3,
+ base + EXYNOS3250_JPG_COEF(3));
+ } else {
+ writel(EXYNOS3250_JPEG_DEC_COEF1,
+ base + EXYNOS3250_JPG_COEF(1));
+ writel(EXYNOS3250_JPEG_DEC_COEF2,
+ base + EXYNOS3250_JPG_COEF(2));
+ writel(EXYNOS3250_JPEG_DEC_COEF3,
+ base + EXYNOS3250_JPG_COEF(3));
+ }
+}
+
+void exynos3250_jpeg_start(void __iomem *regs)
+{
+ writel(1, regs + EXYNOS3250_JSTART);
+}
+
+void exynos3250_jpeg_rstart(void __iomem *regs)
+{
+ writel(1, regs + EXYNOS3250_JRSTART);
+}
+
+unsigned int exynos3250_jpeg_get_int_status(void __iomem *regs)
+{
+ return readl(regs + EXYNOS3250_JPGINTST);
+}
+
+void exynos3250_jpeg_clear_int_status(void __iomem *regs,
+ unsigned int value)
+{
+ writel(value, regs + EXYNOS3250_JPGINTST);
+}
+
+unsigned int exynos3250_jpeg_operating(void __iomem *regs)
+{
+ return readl(regs + S5P_JPGOPR) & EXYNOS3250_JPGOPR_MASK;
+}
+
+unsigned int exynos3250_jpeg_compressed_size(void __iomem *regs)
+{
+ return readl(regs + EXYNOS3250_JPGCNT) & EXYNOS3250_JPGCNT_MASK;
+}
+
+void exynos3250_jpeg_dec_stream_size(void __iomem *regs,
+ unsigned int size)
+{
+ writel(size & EXYNOS3250_DEC_STREAM_MASK,
+ regs + EXYNOS3250_DEC_STREAM_SIZE);
+}
+
+void exynos3250_jpeg_dec_scaling_ratio(void __iomem *regs,
+ unsigned int sratio)
+{
+ switch (sratio) {
+ case 1:
+ default:
+ sratio = EXYNOS3250_DEC_SCALE_FACTOR_8_8;
+ break;
+ case 2:
+ sratio = EXYNOS3250_DEC_SCALE_FACTOR_4_8;
+ break;
+ case 4:
+ sratio = EXYNOS3250_DEC_SCALE_FACTOR_2_8;
+ break;
+ case 8:
+ sratio = EXYNOS3250_DEC_SCALE_FACTOR_1_8;
+ break;
+ }
+
+ writel(sratio & EXYNOS3250_DEC_SCALE_FACTOR_MASK,
+ regs + EXYNOS3250_DEC_SCALING_RATIO);
+}
+
+void exynos3250_jpeg_set_timer(void __iomem *regs, unsigned int time_value)
+{
+ time_value &= EXYNOS3250_TIMER_INIT_MASK;
+
+ writel(EXYNOS3250_TIMER_INT_STAT | time_value,
+ regs + EXYNOS3250_TIMER_SE);
+}
+
+unsigned int exynos3250_jpeg_get_timer_status(void __iomem *regs)
+{
+ return readl(regs + EXYNOS3250_TIMER_ST);
+}
+
+void exynos3250_jpeg_clear_timer_status(void __iomem *regs)
+{
+ writel(EXYNOS3250_TIMER_INT_STAT, regs + EXYNOS3250_TIMER_ST);
+}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h
new file mode 100644
index 000000000..b6e3be8b5
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h
@@ -0,0 +1,60 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef JPEG_HW_EXYNOS3250_H_
+#define JPEG_HW_EXYNOS3250_H_
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+
+#include "jpeg-regs.h"
+
+void exynos3250_jpeg_reset(void __iomem *regs);
+void exynos3250_jpeg_poweron(void __iomem *regs);
+void exynos3250_jpeg_set_dma_num(void __iomem *regs);
+void exynos3250_jpeg_clk_set(void __iomem *base);
+void exynos3250_jpeg_input_raw_fmt(void __iomem *regs, unsigned int fmt);
+void exynos3250_jpeg_output_raw_fmt(void __iomem *regs, unsigned int fmt);
+void exynos3250_jpeg_set_y16(void __iomem *regs, bool y16);
+void exynos3250_jpeg_proc_mode(void __iomem *regs, unsigned int mode);
+void exynos3250_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode);
+unsigned int exynos3250_jpeg_get_subsampling_mode(void __iomem *regs);
+void exynos3250_jpeg_dri(void __iomem *regs, unsigned int dri);
+void exynos3250_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n);
+void exynos3250_jpeg_htbl_ac(void __iomem *regs, unsigned int t);
+void exynos3250_jpeg_htbl_dc(void __iomem *regs, unsigned int t);
+void exynos3250_jpeg_set_y(void __iomem *regs, unsigned int y);
+void exynos3250_jpeg_set_x(void __iomem *regs, unsigned int x);
+void exynos3250_jpeg_interrupts_enable(void __iomem *regs);
+void exynos3250_jpeg_enc_stream_bound(void __iomem *regs, unsigned int size);
+void exynos3250_jpeg_outform_raw(void __iomem *regs, unsigned long format);
+void exynos3250_jpeg_jpgadr(void __iomem *regs, unsigned int addr);
+void exynos3250_jpeg_imgadr(void __iomem *regs, struct s5p_jpeg_addr *img_addr);
+void exynos3250_jpeg_stride(void __iomem *regs, unsigned int img_fmt,
+ unsigned int width);
+void exynos3250_jpeg_offset(void __iomem *regs, unsigned int x_offset,
+ unsigned int y_offset);
+void exynos3250_jpeg_coef(void __iomem *base, unsigned int mode);
+void exynos3250_jpeg_start(void __iomem *regs);
+void exynos3250_jpeg_rstart(void __iomem *regs);
+unsigned int exynos3250_jpeg_get_int_status(void __iomem *regs);
+void exynos3250_jpeg_clear_int_status(void __iomem *regs,
+ unsigned int value);
+unsigned int exynos3250_jpeg_operating(void __iomem *regs);
+unsigned int exynos3250_jpeg_compressed_size(void __iomem *regs);
+void exynos3250_jpeg_dec_stream_size(void __iomem *regs, unsigned int size);
+void exynos3250_jpeg_dec_scaling_ratio(void __iomem *regs, unsigned int sratio);
+void exynos3250_jpeg_set_timer(void __iomem *regs, unsigned int time_value);
+unsigned int exynos3250_jpeg_get_timer_status(void __iomem *regs);
+void exynos3250_jpeg_set_timer_status(void __iomem *regs);
+void exynos3250_jpeg_clear_timer_status(void __iomem *regs);
+
+#endif /* JPEG_HW_EXYNOS3250_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
new file mode 100644
index 000000000..c72789bae
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
@@ -0,0 +1,324 @@
+/* Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * Register interface file for JPEG driver on Exynos4x12.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "jpeg-core.h"
+#include "jpeg-hw-exynos4.h"
+#include "jpeg-regs.h"
+
+void exynos4_jpeg_sw_reset(void __iomem *base)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
+ writel(reg & ~(EXYNOS4_DEC_MODE | EXYNOS4_ENC_MODE),
+ base + EXYNOS4_JPEG_CNTL_REG);
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
+ writel(reg & ~EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
+
+ udelay(100);
+
+ writel(reg | EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
+ /* set exynos4_jpeg mod register */
+ if (mode == S5P_JPEG_DECODE) {
+ writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
+ EXYNOS4_DEC_MODE,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ } else if (mode == S5P_JPEG_ENCODE) {/* encode */
+ writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
+ EXYNOS4_ENC_MODE,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ } else { /* disable both */
+ writel(reg & EXYNOS4_ENC_DEC_MODE_MASK,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ }
+}
+
+void __exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt,
+ unsigned int version)
+{
+ unsigned int reg;
+ unsigned int exynos4_swap_chroma_cbcr;
+ unsigned int exynos4_swap_chroma_crcb;
+
+ if (version == SJPEG_EXYNOS4) {
+ exynos4_swap_chroma_cbcr = EXYNOS4_SWAP_CHROMA_CBCR;
+ exynos4_swap_chroma_crcb = EXYNOS4_SWAP_CHROMA_CRCB;
+ } else {
+ exynos4_swap_chroma_cbcr = EXYNOS5433_SWAP_CHROMA_CBCR;
+ exynos4_swap_chroma_crcb = EXYNOS5433_SWAP_CHROMA_CRCB;
+ }
+
+ reg = readl(base + EXYNOS4_IMG_FMT_REG) &
+ EXYNOS4_ENC_IN_FMT_MASK; /* clear except enc format */
+
+ switch (img_fmt) {
+ case V4L2_PIX_FMT_GREY:
+ reg = reg | EXYNOS4_ENC_GRAY_IMG | EXYNOS4_GRAY_IMG_IP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ reg = reg | EXYNOS4_ENC_RGB_IMG |
+ EXYNOS4_RGB_IP_RGB_32BIT_IMG;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ reg = reg | EXYNOS4_ENC_RGB_IMG |
+ EXYNOS4_RGB_IP_RGB_16BIT_IMG;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ reg = reg | EXYNOS4_ENC_YUV_444_IMG |
+ EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
+ exynos4_swap_chroma_cbcr;
+ break;
+ case V4L2_PIX_FMT_NV42:
+ reg = reg | EXYNOS4_ENC_YUV_444_IMG |
+ EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
+ exynos4_swap_chroma_crcb;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
+ exynos4_swap_chroma_cbcr;
+ break;
+
+ case V4L2_PIX_FMT_YVYU:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
+ exynos4_swap_chroma_crcb;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
+ exynos4_swap_chroma_cbcr;
+ break;
+ case V4L2_PIX_FMT_NV61:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
+ exynos4_swap_chroma_crcb;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+ EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
+ exynos4_swap_chroma_cbcr;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+ EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
+ exynos4_swap_chroma_crcb;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+ EXYNOS4_YUV_420_IP_YUV_420_3P_IMG |
+ exynos4_swap_chroma_cbcr;
+ break;
+ default:
+ break;
+
+ }
+
+ writel(reg, base + EXYNOS4_IMG_FMT_REG);
+}
+
+void __exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt,
+ unsigned int version)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_IMG_FMT_REG) &
+ ~(version == SJPEG_EXYNOS4 ? EXYNOS4_ENC_FMT_MASK :
+ EXYNOS5433_ENC_FMT_MASK); /* clear enc format */
+
+ switch (out_fmt) {
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
+ reg = reg | EXYNOS4_ENC_FMT_GRAY;
+ break;
+
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_444:
+ reg = reg | EXYNOS4_ENC_FMT_YUV_444;
+ break;
+
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+ reg = reg | EXYNOS4_ENC_FMT_YUV_422;
+ break;
+
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+ reg = reg | EXYNOS4_ENC_FMT_YUV_420;
+ break;
+
+ default:
+ break;
+ }
+
+ writel(reg, base + EXYNOS4_IMG_FMT_REG);
+}
+
+void exynos4_jpeg_set_interrupt(void __iomem *base, unsigned int version)
+{
+ unsigned int reg;
+
+ if (version == SJPEG_EXYNOS4) {
+ reg = readl(base + EXYNOS4_INT_EN_REG) & ~EXYNOS4_INT_EN_MASK;
+ writel(reg | EXYNOS4_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
+ } else {
+ reg = readl(base + EXYNOS4_INT_EN_REG) &
+ ~EXYNOS5433_INT_EN_MASK;
+ writel(reg | EXYNOS5433_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
+ }
+}
+
+unsigned int exynos4_jpeg_get_int_status(void __iomem *base)
+{
+ return readl(base + EXYNOS4_INT_STATUS_REG);
+}
+
+unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base)
+{
+ return readl(base + EXYNOS4_FIFO_STATUS_REG);
+}
+
+void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~EXYNOS4_HUF_TBL_EN;
+
+ if (value == 1)
+ writel(reg | EXYNOS4_HUF_TBL_EN,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ else
+ writel(reg & ~EXYNOS4_HUF_TBL_EN,
+ base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~(EXYNOS4_SYS_INT_EN);
+
+ if (value == 1)
+ writel(reg | EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+ else
+ writel(reg & ~EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
+ unsigned int address)
+{
+ writel(address, base + EXYNOS4_OUT_MEM_BASE_REG);
+}
+
+void exynos4_jpeg_set_stream_size(void __iomem *base,
+ unsigned int x_value, unsigned int y_value)
+{
+ writel(0x0, base + EXYNOS4_JPEG_IMG_SIZE_REG); /* clear */
+ writel(EXYNOS4_X_SIZE(x_value) | EXYNOS4_Y_SIZE(y_value),
+ base + EXYNOS4_JPEG_IMG_SIZE_REG);
+}
+
+void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
+ struct s5p_jpeg_addr *exynos4_jpeg_addr)
+{
+ writel(exynos4_jpeg_addr->y, base + EXYNOS4_IMG_BA_PLANE_1_REG);
+ writel(exynos4_jpeg_addr->cb, base + EXYNOS4_IMG_BA_PLANE_2_REG);
+ writel(exynos4_jpeg_addr->cr, base + EXYNOS4_IMG_BA_PLANE_3_REG);
+}
+
+void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
+ enum exynos4_jpeg_img_quality_level level)
+{
+ unsigned int reg;
+
+ reg = EXYNOS4_Q_TBL_COMP1_0 | EXYNOS4_Q_TBL_COMP2_1 |
+ EXYNOS4_Q_TBL_COMP3_1 |
+ EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 |
+ EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 |
+ EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1;
+
+ writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
+void exynos4_jpeg_set_dec_components(void __iomem *base, int n)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_TBL_SEL_REG);
+
+ reg |= EXYNOS4_NF(n);
+ writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
+void exynos4_jpeg_select_dec_q_tbl(void __iomem *base, char c, char x)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_TBL_SEL_REG);
+
+ reg |= EXYNOS4_Q_TBL_COMP(c, x);
+ writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
+void exynos4_jpeg_select_dec_h_tbl(void __iomem *base, char c, char x)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_TBL_SEL_REG);
+
+ reg |= EXYNOS4_HUFF_TBL_COMP(c, x);
+ writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
+void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt)
+{
+ if (fmt == V4L2_PIX_FMT_GREY)
+ writel(0xd2, base + EXYNOS4_HUFF_CNT_REG);
+ else
+ writel(0x1a2, base + EXYNOS4_HUFF_CNT_REG);
+}
+
+unsigned int exynos4_jpeg_get_stream_size(void __iomem *base)
+{
+ return readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
+}
+
+void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size)
+{
+ writel(size, base + EXYNOS4_BITSTREAM_SIZE_REG);
+}
+
+void exynos4_jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height)
+{
+ *width = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) &
+ EXYNOS4_DECODED_SIZE_MASK);
+ *height = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) >> 16) &
+ EXYNOS4_DECODED_SIZE_MASK;
+}
+
+unsigned int exynos4_jpeg_get_frame_fmt(void __iomem *base)
+{
+ return readl(base + EXYNOS4_DECODE_IMG_FMT_REG) &
+ EXYNOS4_JPEG_DECODED_IMG_FMT_MASK;
+}
+
+void exynos4_jpeg_set_timer_count(void __iomem *base, unsigned int size)
+{
+ writel(size, base + EXYNOS4_INT_TIMER_COUNT_REG);
+}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
new file mode 100644
index 000000000..cf6ec055d
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * Header file of the register interface for JPEG driver on Exynos4x12.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef JPEG_HW_EXYNOS4_H_
+#define JPEG_HW_EXYNOS4_H_
+
+void exynos4_jpeg_sw_reset(void __iomem *base);
+void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode);
+void __exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt,
+ unsigned int version);
+void __exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt,
+ unsigned int version);
+void exynos4_jpeg_set_enc_tbl(void __iomem *base);
+void exynos4_jpeg_set_interrupt(void __iomem *base, unsigned int version);
+unsigned int exynos4_jpeg_get_int_status(void __iomem *base);
+void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value);
+void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value);
+void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
+ unsigned int address);
+void exynos4_jpeg_set_stream_size(void __iomem *base,
+ unsigned int x_value, unsigned int y_value);
+void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
+ struct s5p_jpeg_addr *jpeg_addr);
+void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
+ enum exynos4_jpeg_img_quality_level level);
+void exynos4_jpeg_set_dec_components(void __iomem *base, int n);
+void exynos4_jpeg_select_dec_q_tbl(void __iomem *base, char c, char x);
+void exynos4_jpeg_select_dec_h_tbl(void __iomem *base, char c, char x);
+void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt);
+void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size);
+unsigned int exynos4_jpeg_get_stream_size(void __iomem *base);
+void exynos4_jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height);
+unsigned int exynos4_jpeg_get_frame_fmt(void __iomem *base);
+unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base);
+void exynos4_jpeg_set_timer_count(void __iomem *base, unsigned int size);
+
+#endif /* JPEG_HW_EXYNOS4_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
new file mode 100644
index 000000000..b5f20e722
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
@@ -0,0 +1,309 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+
+#include "jpeg-core.h"
+#include "jpeg-regs.h"
+#include "jpeg-hw-s5p.h"
+
+void s5p_jpeg_reset(void __iomem *regs)
+{
+ unsigned long reg;
+
+ writel(1, regs + S5P_JPG_SW_RESET);
+ reg = readl(regs + S5P_JPG_SW_RESET);
+ /* no other way but polling for when JPEG IP becomes operational */
+ while (reg != 0) {
+ cpu_relax();
+ reg = readl(regs + S5P_JPG_SW_RESET);
+ }
+}
+
+void s5p_jpeg_poweron(void __iomem *regs)
+{
+ writel(S5P_POWER_ON, regs + S5P_JPGCLKCON);
+}
+
+void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
+{
+ unsigned long reg, m;
+
+ m = S5P_MOD_SEL_565;
+ if (mode == S5P_JPEG_RAW_IN_565)
+ m = S5P_MOD_SEL_565;
+ else if (mode == S5P_JPEG_RAW_IN_422)
+ m = S5P_MOD_SEL_422;
+
+ reg = readl(regs + S5P_JPGCMOD);
+ reg &= ~S5P_MOD_SEL_MASK;
+ reg |= m;
+ writel(reg, regs + S5P_JPGCMOD);
+}
+
+void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode)
+{
+ unsigned long reg, m;
+
+ m = S5P_PROC_MODE_DECOMPR;
+ if (mode == S5P_JPEG_ENCODE)
+ m = S5P_PROC_MODE_COMPR;
+ else
+ m = S5P_PROC_MODE_DECOMPR;
+ reg = readl(regs + S5P_JPGMOD);
+ reg &= ~S5P_PROC_MODE_MASK;
+ reg |= m;
+ writel(reg, regs + S5P_JPGMOD);
+}
+
+void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
+{
+ unsigned long reg, m;
+
+ if (mode == V4L2_JPEG_CHROMA_SUBSAMPLING_420)
+ m = S5P_SUBSAMPLING_MODE_420;
+ else
+ m = S5P_SUBSAMPLING_MODE_422;
+
+ reg = readl(regs + S5P_JPGMOD);
+ reg &= ~S5P_SUBSAMPLING_MODE_MASK;
+ reg |= m;
+ writel(reg, regs + S5P_JPGMOD);
+}
+
+unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs)
+{
+ return readl(regs + S5P_JPGMOD) & S5P_SUBSAMPLING_MODE_MASK;
+}
+
+void s5p_jpeg_dri(void __iomem *regs, unsigned int dri)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGDRI_U);
+ reg &= ~0xff;
+ reg |= (dri >> 8) & 0xff;
+ writel(reg, regs + S5P_JPGDRI_U);
+
+ reg = readl(regs + S5P_JPGDRI_L);
+ reg &= ~0xff;
+ reg |= dri & 0xff;
+ writel(reg, regs + S5P_JPGDRI_L);
+}
+
+void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_QTBL);
+ reg &= ~S5P_QT_NUMt_MASK(t);
+ reg |= (n << S5P_QT_NUMt_SHIFT(t)) & S5P_QT_NUMt_MASK(t);
+ writel(reg, regs + S5P_JPG_QTBL);
+}
+
+void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_HTBL);
+ reg &= ~S5P_HT_NUMt_AC_MASK(t);
+ /* this driver uses table 0 for all color components */
+ reg |= (0 << S5P_HT_NUMt_AC_SHIFT(t)) & S5P_HT_NUMt_AC_MASK(t);
+ writel(reg, regs + S5P_JPG_HTBL);
+}
+
+void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_HTBL);
+ reg &= ~S5P_HT_NUMt_DC_MASK(t);
+ /* this driver uses table 0 for all color components */
+ reg |= (0 << S5P_HT_NUMt_DC_SHIFT(t)) & S5P_HT_NUMt_DC_MASK(t);
+ writel(reg, regs + S5P_JPG_HTBL);
+}
+
+void s5p_jpeg_y(void __iomem *regs, unsigned int y)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGY_U);
+ reg &= ~0xff;
+ reg |= (y >> 8) & 0xff;
+ writel(reg, regs + S5P_JPGY_U);
+
+ reg = readl(regs + S5P_JPGY_L);
+ reg &= ~0xff;
+ reg |= y & 0xff;
+ writel(reg, regs + S5P_JPGY_L);
+}
+
+void s5p_jpeg_x(void __iomem *regs, unsigned int x)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGX_U);
+ reg &= ~0xff;
+ reg |= (x >> 8) & 0xff;
+ writel(reg, regs + S5P_JPGX_U);
+
+ reg = readl(regs + S5P_JPGX_L);
+ reg &= ~0xff;
+ reg |= x & 0xff;
+ writel(reg, regs + S5P_JPGX_L);
+}
+
+void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGINTSE);
+ reg &= ~S5P_RSTm_INT_EN_MASK;
+ if (enable)
+ reg |= S5P_RSTm_INT_EN;
+ writel(reg, regs + S5P_JPGINTSE);
+}
+
+void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGINTSE);
+ reg &= ~S5P_DATA_NUM_INT_EN_MASK;
+ if (enable)
+ reg |= S5P_DATA_NUM_INT_EN;
+ writel(reg, regs + S5P_JPGINTSE);
+}
+
+void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGINTSE);
+ reg &= ~S5P_FINAL_MCU_NUM_INT_EN_MASK;
+ if (enbl)
+ reg |= S5P_FINAL_MCU_NUM_INT_EN;
+ writel(reg, regs + S5P_JPGINTSE);
+}
+
+int s5p_jpeg_timer_stat(void __iomem *regs)
+{
+ return (int)((readl(regs + S5P_JPG_TIMER_ST) & S5P_TIMER_INT_STAT_MASK)
+ >> S5P_TIMER_INT_STAT_SHIFT);
+}
+
+void s5p_jpeg_clear_timer_stat(void __iomem *regs)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_TIMER_SE);
+ reg &= ~S5P_TIMER_INT_STAT_MASK;
+ writel(reg, regs + S5P_JPG_TIMER_SE);
+}
+
+void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_ENC_STREAM_INTSE);
+ reg &= ~S5P_ENC_STREAM_BOUND_MASK;
+ reg |= S5P_ENC_STREAM_INT_EN;
+ reg |= size & S5P_ENC_STREAM_BOUND_MASK;
+ writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
+}
+
+int s5p_jpeg_enc_stream_stat(void __iomem *regs)
+{
+ return (int)(readl(regs + S5P_JPG_ENC_STREAM_INTST) &
+ S5P_ENC_STREAM_INT_STAT_MASK);
+}
+
+void s5p_jpeg_clear_enc_stream_stat(void __iomem *regs)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_ENC_STREAM_INTSE);
+ reg &= ~S5P_ENC_STREAM_INT_MASK;
+ writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
+}
+
+void s5p_jpeg_outform_raw(void __iomem *regs, unsigned long format)
+{
+ unsigned long reg, f;
+
+ f = S5P_DEC_OUT_FORMAT_422;
+ if (format == S5P_JPEG_RAW_OUT_422)
+ f = S5P_DEC_OUT_FORMAT_422;
+ else if (format == S5P_JPEG_RAW_OUT_420)
+ f = S5P_DEC_OUT_FORMAT_420;
+ reg = readl(regs + S5P_JPG_OUTFORM);
+ reg &= ~S5P_DEC_OUT_FORMAT_MASK;
+ reg |= f;
+ writel(reg, regs + S5P_JPG_OUTFORM);
+}
+
+void s5p_jpeg_jpgadr(void __iomem *regs, unsigned long addr)
+{
+ writel(addr, regs + S5P_JPG_JPGADR);
+}
+
+void s5p_jpeg_imgadr(void __iomem *regs, unsigned long addr)
+{
+ writel(addr, regs + S5P_JPG_IMGADR);
+}
+
+void s5p_jpeg_coef(void __iomem *regs, unsigned int i,
+ unsigned int j, unsigned int coef)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_COEF(i));
+ reg &= ~S5P_COEFn_MASK(j);
+ reg |= (coef << S5P_COEFn_SHIFT(j)) & S5P_COEFn_MASK(j);
+ writel(reg, regs + S5P_JPG_COEF(i));
+}
+
+void s5p_jpeg_start(void __iomem *regs)
+{
+ writel(1, regs + S5P_JSTART);
+}
+
+int s5p_jpeg_result_stat_ok(void __iomem *regs)
+{
+ return (int)((readl(regs + S5P_JPGINTST) & S5P_RESULT_STAT_MASK)
+ >> S5P_RESULT_STAT_SHIFT);
+}
+
+int s5p_jpeg_stream_stat_ok(void __iomem *regs)
+{
+ return !(int)((readl(regs + S5P_JPGINTST) & S5P_STREAM_STAT_MASK)
+ >> S5P_STREAM_STAT_SHIFT);
+}
+
+void s5p_jpeg_clear_int(void __iomem *regs)
+{
+ readl(regs + S5P_JPGINTST);
+ writel(S5P_INT_RELEASE, regs + S5P_JPGCOM);
+ readl(regs + S5P_JPGOPR);
+}
+
+unsigned int s5p_jpeg_compressed_size(void __iomem *regs)
+{
+ unsigned long jpeg_size = 0;
+
+ jpeg_size |= (readl(regs + S5P_JPGCNT_U) & 0xff) << 16;
+ jpeg_size |= (readl(regs + S5P_JPGCNT_M) & 0xff) << 8;
+ jpeg_size |= (readl(regs + S5P_JPGCNT_L) & 0xff);
+
+ return (unsigned int)jpeg_size;
+}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
new file mode 100644
index 000000000..f208fa3ed
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
@@ -0,0 +1,60 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef JPEG_HW_S5P_H_
+#define JPEG_HW_S5P_H_
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+
+#include "jpeg-regs.h"
+
+#define S5P_JPEG_MIN_WIDTH 32
+#define S5P_JPEG_MIN_HEIGHT 32
+#define S5P_JPEG_MAX_WIDTH 8192
+#define S5P_JPEG_MAX_HEIGHT 8192
+#define S5P_JPEG_RAW_IN_565 0
+#define S5P_JPEG_RAW_IN_422 1
+#define S5P_JPEG_RAW_OUT_422 0
+#define S5P_JPEG_RAW_OUT_420 1
+
+void s5p_jpeg_reset(void __iomem *regs);
+void s5p_jpeg_poweron(void __iomem *regs);
+void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode);
+void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode);
+void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode);
+unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs);
+void s5p_jpeg_dri(void __iomem *regs, unsigned int dri);
+void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n);
+void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t);
+void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t);
+void s5p_jpeg_y(void __iomem *regs, unsigned int y);
+void s5p_jpeg_x(void __iomem *regs, unsigned int x);
+void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable);
+void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable);
+void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl);
+int s5p_jpeg_timer_stat(void __iomem *regs);
+void s5p_jpeg_clear_timer_stat(void __iomem *regs);
+void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size);
+int s5p_jpeg_enc_stream_stat(void __iomem *regs);
+void s5p_jpeg_clear_enc_stream_stat(void __iomem *regs);
+void s5p_jpeg_outform_raw(void __iomem *regs, unsigned long format);
+void s5p_jpeg_jpgadr(void __iomem *regs, unsigned long addr);
+void s5p_jpeg_imgadr(void __iomem *regs, unsigned long addr);
+void s5p_jpeg_coef(void __iomem *regs, unsigned int i,
+ unsigned int j, unsigned int coef);
+void s5p_jpeg_start(void __iomem *regs);
+int s5p_jpeg_result_stat_ok(void __iomem *regs);
+int s5p_jpeg_stream_stat_ok(void __iomem *regs);
+void s5p_jpeg_clear_int(void __iomem *regs);
+unsigned int s5p_jpeg_compressed_size(void __iomem *regs);
+
+#endif /* JPEG_HW_S5P_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
new file mode 100644
index 000000000..df790b101
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -0,0 +1,649 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+ *
+ * Register definition file for Samsung JPEG codec driver
+ *
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef JPEG_REGS_H_
+#define JPEG_REGS_H_
+
+/* Register and bit definitions for S5PC210 */
+
+/* JPEG mode register */
+#define S5P_JPGMOD 0x00
+#define S5P_PROC_MODE_MASK (0x1 << 3)
+#define S5P_PROC_MODE_DECOMPR (0x1 << 3)
+#define S5P_PROC_MODE_COMPR (0x0 << 3)
+#define S5P_SUBSAMPLING_MODE_MASK 0x7
+#define S5P_SUBSAMPLING_MODE_444 (0x0 << 0)
+#define S5P_SUBSAMPLING_MODE_422 (0x1 << 0)
+#define S5P_SUBSAMPLING_MODE_420 (0x2 << 0)
+#define S5P_SUBSAMPLING_MODE_GRAY (0x3 << 0)
+
+/* JPEG operation status register */
+#define S5P_JPGOPR 0x04
+
+/* Quantization tables*/
+#define S5P_JPG_QTBL 0x08
+#define S5P_QT_NUMt_SHIFT(t) (((t) - 1) << 1)
+#define S5P_QT_NUMt_MASK(t) (0x3 << S5P_QT_NUMt_SHIFT(t))
+
+/* Huffman tables */
+#define S5P_JPG_HTBL 0x0c
+#define S5P_HT_NUMt_AC_SHIFT(t) (((t) << 1) - 1)
+#define S5P_HT_NUMt_AC_MASK(t) (0x1 << S5P_HT_NUMt_AC_SHIFT(t))
+
+#define S5P_HT_NUMt_DC_SHIFT(t) (((t) - 1) << 1)
+#define S5P_HT_NUMt_DC_MASK(t) (0x1 << S5P_HT_NUMt_DC_SHIFT(t))
+
+/* JPEG restart interval register upper byte */
+#define S5P_JPGDRI_U 0x10
+
+/* JPEG restart interval register lower byte */
+#define S5P_JPGDRI_L 0x14
+
+/* JPEG vertical resolution register upper byte */
+#define S5P_JPGY_U 0x18
+
+/* JPEG vertical resolution register lower byte */
+#define S5P_JPGY_L 0x1c
+
+/* JPEG horizontal resolution register upper byte */
+#define S5P_JPGX_U 0x20
+
+/* JPEG horizontal resolution register lower byte */
+#define S5P_JPGX_L 0x24
+
+/* JPEG byte count register upper byte */
+#define S5P_JPGCNT_U 0x28
+
+/* JPEG byte count register middle byte */
+#define S5P_JPGCNT_M 0x2c
+
+/* JPEG byte count register lower byte */
+#define S5P_JPGCNT_L 0x30
+
+/* JPEG interrupt setting register */
+#define S5P_JPGINTSE 0x34
+#define S5P_RSTm_INT_EN_MASK (0x1 << 7)
+#define S5P_RSTm_INT_EN (0x1 << 7)
+#define S5P_DATA_NUM_INT_EN_MASK (0x1 << 6)
+#define S5P_DATA_NUM_INT_EN (0x1 << 6)
+#define S5P_FINAL_MCU_NUM_INT_EN_MASK (0x1 << 5)
+#define S5P_FINAL_MCU_NUM_INT_EN (0x1 << 5)
+
+/* JPEG interrupt status register */
+#define S5P_JPGINTST 0x38
+#define S5P_RESULT_STAT_SHIFT 6
+#define S5P_RESULT_STAT_MASK (0x1 << S5P_RESULT_STAT_SHIFT)
+#define S5P_STREAM_STAT_SHIFT 5
+#define S5P_STREAM_STAT_MASK (0x1 << S5P_STREAM_STAT_SHIFT)
+
+/* JPEG command register */
+#define S5P_JPGCOM 0x4c
+#define S5P_INT_RELEASE (0x1 << 2)
+
+/* Raw image data r/w address register */
+#define S5P_JPG_IMGADR 0x50
+
+/* JPEG file r/w address register */
+#define S5P_JPG_JPGADR 0x58
+
+/* Coefficient for RGB-to-YCbCr converter register */
+#define S5P_JPG_COEF(n) (0x5c + (((n) - 1) << 2))
+#define S5P_COEFn_SHIFT(j) ((3 - (j)) << 3)
+#define S5P_COEFn_MASK(j) (0xff << S5P_COEFn_SHIFT(j))
+
+/* JPEG color mode register */
+#define S5P_JPGCMOD 0x68
+#define S5P_MOD_SEL_MASK (0x7 << 5)
+#define S5P_MOD_SEL_422 (0x1 << 5)
+#define S5P_MOD_SEL_565 (0x2 << 5)
+#define S5P_MODE_Y16_MASK (0x1 << 1)
+#define S5P_MODE_Y16 (0x1 << 1)
+
+/* JPEG clock control register */
+#define S5P_JPGCLKCON 0x6c
+#define S5P_CLK_DOWN_READY (0x1 << 1)
+#define S5P_POWER_ON (0x1 << 0)
+
+/* JPEG start register */
+#define S5P_JSTART 0x70
+
+/* JPEG SW reset register */
+#define S5P_JPG_SW_RESET 0x78
+
+/* JPEG timer setting register */
+#define S5P_JPG_TIMER_SE 0x7c
+#define S5P_TIMER_INT_EN_MASK (0x1 << 31)
+#define S5P_TIMER_INT_EN (0x1 << 31)
+#define S5P_TIMER_INIT_MASK 0x7fffffff
+
+/* JPEG timer status register */
+#define S5P_JPG_TIMER_ST 0x80
+#define S5P_TIMER_INT_STAT_SHIFT 31
+#define S5P_TIMER_INT_STAT_MASK (0x1 << S5P_TIMER_INT_STAT_SHIFT)
+#define S5P_TIMER_CNT_SHIFT 0
+#define S5P_TIMER_CNT_MASK 0x7fffffff
+
+/* JPEG decompression output format register */
+#define S5P_JPG_OUTFORM 0x88
+#define S5P_DEC_OUT_FORMAT_MASK (0x1 << 0)
+#define S5P_DEC_OUT_FORMAT_422 (0x0 << 0)
+#define S5P_DEC_OUT_FORMAT_420 (0x1 << 0)
+
+/* JPEG version register */
+#define S5P_JPG_VERSION 0x8c
+
+/* JPEG compressed stream size interrupt setting register */
+#define S5P_JPG_ENC_STREAM_INTSE 0x98
+#define S5P_ENC_STREAM_INT_MASK (0x1 << 24)
+#define S5P_ENC_STREAM_INT_EN (0x1 << 24)
+#define S5P_ENC_STREAM_BOUND_MASK 0xffffff
+
+/* JPEG compressed stream size interrupt status register */
+#define S5P_JPG_ENC_STREAM_INTST 0x9c
+#define S5P_ENC_STREAM_INT_STAT_MASK 0x1
+
+/* JPEG quantizer table register */
+#define S5P_JPG_QTBL_CONTENT(n) (0x400 + (n) * 0x100)
+
+/* JPEG DC Huffman table register */
+#define S5P_JPG_HDCTBL(n) (0x800 + (n) * 0x400)
+
+/* JPEG DC Huffman table register */
+#define S5P_JPG_HDCTBLG(n) (0x840 + (n) * 0x400)
+
+/* JPEG AC Huffman table register */
+#define S5P_JPG_HACTBL(n) (0x880 + (n) * 0x400)
+
+/* JPEG AC Huffman table register */
+#define S5P_JPG_HACTBLG(n) (0x8c0 + (n) * 0x400)
+
+
+/* Register and bit definitions for Exynos 4x12 */
+
+/* JPEG Codec Control Registers */
+#define EXYNOS4_JPEG_CNTL_REG 0x00
+#define EXYNOS4_INT_EN_REG 0x04
+#define EXYNOS4_INT_TIMER_COUNT_REG 0x08
+#define EXYNOS4_INT_STATUS_REG 0x0c
+#define EXYNOS4_OUT_MEM_BASE_REG 0x10
+#define EXYNOS4_JPEG_IMG_SIZE_REG 0x14
+#define EXYNOS4_IMG_BA_PLANE_1_REG 0x18
+#define EXYNOS4_IMG_SO_PLANE_1_REG 0x1c
+#define EXYNOS4_IMG_PO_PLANE_1_REG 0x20
+#define EXYNOS4_IMG_BA_PLANE_2_REG 0x24
+#define EXYNOS4_IMG_SO_PLANE_2_REG 0x28
+#define EXYNOS4_IMG_PO_PLANE_2_REG 0x2c
+#define EXYNOS4_IMG_BA_PLANE_3_REG 0x30
+#define EXYNOS4_IMG_SO_PLANE_3_REG 0x34
+#define EXYNOS4_IMG_PO_PLANE_3_REG 0x38
+
+#define EXYNOS4_TBL_SEL_REG 0x3c
+
+#define EXYNOS4_IMG_FMT_REG 0x40
+
+#define EXYNOS4_BITSTREAM_SIZE_REG 0x44
+#define EXYNOS4_PADDING_REG 0x48
+#define EXYNOS4_HUFF_CNT_REG 0x4c
+#define EXYNOS4_FIFO_STATUS_REG 0x50
+#define EXYNOS4_DECODE_XY_SIZE_REG 0x54
+#define EXYNOS4_DECODE_IMG_FMT_REG 0x58
+
+#define EXYNOS4_QUAN_TBL_ENTRY_REG 0x100
+#define EXYNOS4_HUFF_TBL_ENTRY_REG 0x200
+
+
+/****************************************************************/
+/* Bit definition part */
+/****************************************************************/
+
+/* JPEG CNTL Register bit */
+#define EXYNOS4_ENC_DEC_MODE_MASK (0xfffffffc << 0)
+#define EXYNOS4_DEC_MODE (1 << 0)
+#define EXYNOS4_ENC_MODE (1 << 1)
+#define EXYNOS4_AUTO_RST_MARKER (1 << 2)
+#define EXYNOS4_RST_INTERVAL_SHIFT 3
+#define EXYNOS4_RST_INTERVAL(x) (((x) & 0xffff) \
+ << EXYNOS4_RST_INTERVAL_SHIFT)
+#define EXYNOS4_HUF_TBL_EN (1 << 19)
+#define EXYNOS4_HOR_SCALING_SHIFT 20
+#define EXYNOS4_HOR_SCALING_MASK (3 << EXYNOS4_HOR_SCALING_SHIFT)
+#define EXYNOS4_HOR_SCALING(x) (((x) & 0x3) \
+ << EXYNOS4_HOR_SCALING_SHIFT)
+#define EXYNOS4_VER_SCALING_SHIFT 22
+#define EXYNOS4_VER_SCALING_MASK (3 << EXYNOS4_VER_SCALING_SHIFT)
+#define EXYNOS4_VER_SCALING(x) (((x) & 0x3) \
+ << EXYNOS4_VER_SCALING_SHIFT)
+#define EXYNOS4_PADDING (1 << 27)
+#define EXYNOS4_SYS_INT_EN (1 << 28)
+#define EXYNOS4_SOFT_RESET_HI (1 << 29)
+
+/* JPEG INT Register bit */
+#define EXYNOS4_INT_EN_MASK (0x1f << 0)
+#define EXYNOS5433_INT_EN_MASK (0x1ff << 0)
+#define EXYNOS4_PROT_ERR_INT_EN (1 << 0)
+#define EXYNOS4_IMG_COMPLETION_INT_EN (1 << 1)
+#define EXYNOS4_DEC_INVALID_FORMAT_EN (1 << 2)
+#define EXYNOS4_MULTI_SCAN_ERROR_EN (1 << 3)
+#define EXYNOS4_FRAME_ERR_EN (1 << 4)
+#define EXYNOS4_INT_EN_ALL (0x1f << 0)
+#define EXYNOS5433_INT_EN_ALL (0x1b6 << 0)
+
+#define EXYNOS4_MOD_REG_PROC_ENC (0 << 3)
+#define EXYNOS4_MOD_REG_PROC_DEC (1 << 3)
+
+#define EXYNOS4_MOD_REG_SUBSAMPLE_444 (0 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_422 (1 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_420 (2 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_GRAY (3 << 0)
+
+
+/* JPEG IMAGE SIZE Register bit */
+#define EXYNOS4_X_SIZE_SHIFT 0
+#define EXYNOS4_X_SIZE_MASK (0xffff << EXYNOS4_X_SIZE_SHIFT)
+#define EXYNOS4_X_SIZE(x) (((x) & 0xffff) << EXYNOS4_X_SIZE_SHIFT)
+#define EXYNOS4_Y_SIZE_SHIFT 16
+#define EXYNOS4_Y_SIZE_MASK (0xffff << EXYNOS4_Y_SIZE_SHIFT)
+#define EXYNOS4_Y_SIZE(x) (((x) & 0xffff) << EXYNOS4_Y_SIZE_SHIFT)
+
+/* JPEG IMAGE FORMAT Register bit */
+#define EXYNOS4_ENC_IN_FMT_MASK 0xffff0000
+#define EXYNOS4_ENC_GRAY_IMG (0 << 0)
+#define EXYNOS4_ENC_RGB_IMG (1 << 0)
+#define EXYNOS4_ENC_YUV_444_IMG (2 << 0)
+#define EXYNOS4_ENC_YUV_422_IMG (3 << 0)
+#define EXYNOS4_ENC_YUV_440_IMG (4 << 0)
+
+#define EXYNOS4_DEC_GRAY_IMG (0 << 0)
+#define EXYNOS4_DEC_RGB_IMG (1 << 0)
+#define EXYNOS4_DEC_YUV_444_IMG (2 << 0)
+#define EXYNOS4_DEC_YUV_422_IMG (3 << 0)
+#define EXYNOS4_DEC_YUV_420_IMG (4 << 0)
+
+#define EXYNOS4_GRAY_IMG_IP_SHIFT 3
+#define EXYNOS4_GRAY_IMG_IP_MASK (7 << EXYNOS4_GRAY_IMG_IP_SHIFT)
+#define EXYNOS4_GRAY_IMG_IP (4 << EXYNOS4_GRAY_IMG_IP_SHIFT)
+
+#define EXYNOS4_RGB_IP_SHIFT 6
+#define EXYNOS4_RGB_IP_MASK (7 << EXYNOS4_RGB_IP_SHIFT)
+#define EXYNOS4_RGB_IP_RGB_16BIT_IMG (4 << EXYNOS4_RGB_IP_SHIFT)
+#define EXYNOS4_RGB_IP_RGB_32BIT_IMG (5 << EXYNOS4_RGB_IP_SHIFT)
+
+#define EXYNOS4_YUV_444_IP_SHIFT 9
+#define EXYNOS4_YUV_444_IP_MASK (7 << EXYNOS4_YUV_444_IP_SHIFT)
+#define EXYNOS4_YUV_444_IP_YUV_444_2P_IMG (4 << EXYNOS4_YUV_444_IP_SHIFT)
+#define EXYNOS4_YUV_444_IP_YUV_444_3P_IMG (5 << EXYNOS4_YUV_444_IP_SHIFT)
+
+#define EXYNOS4_YUV_422_IP_SHIFT 12
+#define EXYNOS4_YUV_422_IP_MASK (7 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_1P_IMG (4 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_2P_IMG (5 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_3P_IMG (6 << EXYNOS4_YUV_422_IP_SHIFT)
+
+#define EXYNOS4_YUV_420_IP_SHIFT 15
+#define EXYNOS4_YUV_420_IP_MASK (7 << EXYNOS4_YUV_420_IP_SHIFT)
+#define EXYNOS4_YUV_420_IP_YUV_420_2P_IMG (4 << EXYNOS4_YUV_420_IP_SHIFT)
+#define EXYNOS4_YUV_420_IP_YUV_420_3P_IMG (5 << EXYNOS4_YUV_420_IP_SHIFT)
+
+#define EXYNOS4_ENC_FMT_SHIFT 24
+#define EXYNOS4_ENC_FMT_MASK (3 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS5433_ENC_FMT_MASK (7 << EXYNOS4_ENC_FMT_SHIFT)
+
+#define EXYNOS4_ENC_FMT_GRAY (0 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_444 (1 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_422 (2 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_420 (3 << EXYNOS4_ENC_FMT_SHIFT)
+
+#define EXYNOS4_JPEG_DECODED_IMG_FMT_MASK 0x03
+
+#define EXYNOS4_SWAP_CHROMA_CRCB (1 << 26)
+#define EXYNOS4_SWAP_CHROMA_CBCR (0 << 26)
+#define EXYNOS5433_SWAP_CHROMA_CRCB (1 << 27)
+#define EXYNOS5433_SWAP_CHROMA_CBCR (0 << 27)
+
+/* JPEG HUFF count Register bit */
+#define EXYNOS4_HUFF_COUNT_MASK 0xffff
+
+/* JPEG Decoded_img_x_y_size Register bit */
+#define EXYNOS4_DECODED_SIZE_MASK 0x0000ffff
+
+/* JPEG Decoded image format Register bit */
+#define EXYNOS4_DECODED_IMG_FMT_MASK 0x3
+
+/* JPEG TBL SEL Register bit */
+#define EXYNOS4_Q_TBL_COMP(c, n) ((n) << (((c) - 1) << 1))
+
+#define EXYNOS4_Q_TBL_COMP1_0 EXYNOS4_Q_TBL_COMP(1, 0)
+#define EXYNOS4_Q_TBL_COMP1_1 EXYNOS4_Q_TBL_COMP(1, 1)
+#define EXYNOS4_Q_TBL_COMP1_2 EXYNOS4_Q_TBL_COMP(1, 2)
+#define EXYNOS4_Q_TBL_COMP1_3 EXYNOS4_Q_TBL_COMP(1, 3)
+
+#define EXYNOS4_Q_TBL_COMP2_0 EXYNOS4_Q_TBL_COMP(2, 0)
+#define EXYNOS4_Q_TBL_COMP2_1 EXYNOS4_Q_TBL_COMP(2, 1)
+#define EXYNOS4_Q_TBL_COMP2_2 EXYNOS4_Q_TBL_COMP(2, 2)
+#define EXYNOS4_Q_TBL_COMP2_3 EXYNOS4_Q_TBL_COMP(2, 3)
+
+#define EXYNOS4_Q_TBL_COMP3_0 EXYNOS4_Q_TBL_COMP(3, 0)
+#define EXYNOS4_Q_TBL_COMP3_1 EXYNOS4_Q_TBL_COMP(3, 1)
+#define EXYNOS4_Q_TBL_COMP3_2 EXYNOS4_Q_TBL_COMP(3, 2)
+#define EXYNOS4_Q_TBL_COMP3_3 EXYNOS4_Q_TBL_COMP(3, 3)
+
+#define EXYNOS4_HUFF_TBL_COMP(c, n) ((n) << ((((c) - 1) << 1) + 6))
+
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(1, 0)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(1, 1)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(1, 2)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(1, 3)
+
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(2, 0)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(2, 1)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(2, 2)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(2, 3)
+
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(3, 0)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(3, 1)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(3, 2)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(3, 3)
+
+#define EXYNOS4_NF_SHIFT 16
+#define EXYNOS4_NF_MASK 0xff
+#define EXYNOS4_NF(x) \
+ (((x) & EXYNOS4_NF_MASK) << EXYNOS4_NF_SHIFT)
+
+/* JPEG quantizer table register */
+#define EXYNOS4_QTBL_CONTENT(n) (0x100 + (n) * 0x40)
+
+/* JPEG DC luminance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCLL 0x200
+
+/* JPEG DC luminance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCLV 0x210
+
+/* JPEG DC chrominance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCCL 0x220
+
+/* JPEG DC chrominance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCCV 0x230
+
+/* JPEG AC luminance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACLL 0x240
+
+/* JPEG AC luminance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACLV 0x250
+
+/* JPEG AC chrominance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACCL 0x300
+
+/* JPEG AC chrominance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACCV 0x310
+
+/* Register and bit definitions for Exynos 3250 */
+
+/* JPEG mode register */
+#define EXYNOS3250_JPGMOD 0x00
+#define EXYNOS3250_PROC_MODE_MASK (0x1 << 3)
+#define EXYNOS3250_PROC_MODE_DECOMPR (0x1 << 3)
+#define EXYNOS3250_PROC_MODE_COMPR (0x0 << 3)
+#define EXYNOS3250_SUBSAMPLING_MODE_MASK (0x7 << 0)
+#define EXYNOS3250_SUBSAMPLING_MODE_444 (0x0 << 0)
+#define EXYNOS3250_SUBSAMPLING_MODE_422 (0x1 << 0)
+#define EXYNOS3250_SUBSAMPLING_MODE_420 (0x2 << 0)
+#define EXYNOS3250_SUBSAMPLING_MODE_411 (0x6 << 0)
+#define EXYNOS3250_SUBSAMPLING_MODE_GRAY (0x3 << 0)
+
+/* JPEG operation status register */
+#define EXYNOS3250_JPGOPR 0x04
+#define EXYNOS3250_JPGOPR_MASK 0x01
+
+/* Quantization and Huffman tables register */
+#define EXYNOS3250_QHTBL 0x08
+#define EXYNOS3250_QT_NUM_SHIFT(t) ((((t) - 1) << 1) + 8)
+#define EXYNOS3250_QT_NUM_MASK(t) (0x3 << EXYNOS3250_QT_NUM_SHIFT(t))
+
+/* Huffman tables */
+#define EXYNOS3250_HT_NUM_AC_SHIFT(t) (((t) << 1) - 1)
+#define EXYNOS3250_HT_NUM_AC_MASK(t) (0x1 << EXYNOS3250_HT_NUM_AC_SHIFT(t))
+
+#define EXYNOS3250_HT_NUM_DC_SHIFT(t) (((t) - 1) << 1)
+#define EXYNOS3250_HT_NUM_DC_MASK(t) (0x1 << EXYNOS3250_HT_NUM_DC_SHIFT(t))
+
+/* JPEG restart interval register */
+#define EXYNOS3250_JPGDRI 0x0c
+#define EXYNOS3250_JPGDRI_MASK 0xffff
+
+/* JPEG vertical resolution register */
+#define EXYNOS3250_JPGY 0x10
+#define EXYNOS3250_JPGY_MASK 0xffff
+
+/* JPEG horizontal resolution register */
+#define EXYNOS3250_JPGX 0x14
+#define EXYNOS3250_JPGX_MASK 0xffff
+
+/* JPEG byte count register */
+#define EXYNOS3250_JPGCNT 0x18
+#define EXYNOS3250_JPGCNT_MASK 0xffffff
+
+/* JPEG interrupt mask register */
+#define EXYNOS3250_JPGINTSE 0x1c
+#define EXYNOS3250_JPEG_DONE_EN (1 << 11)
+#define EXYNOS3250_WDMA_DONE_EN (1 << 10)
+#define EXYNOS3250_RDMA_DONE_EN (1 << 9)
+#define EXYNOS3250_ENC_STREAM_INT_EN (1 << 8)
+#define EXYNOS3250_CORE_DONE_EN (1 << 5)
+#define EXYNOS3250_ERR_INT_EN (1 << 4)
+#define EXYNOS3250_HEAD_INT_EN (1 << 3)
+
+/* JPEG interrupt status register */
+#define EXYNOS3250_JPGINTST 0x20
+#define EXYNOS3250_JPEG_DONE (1 << 11)
+#define EXYNOS3250_WDMA_DONE (1 << 10)
+#define EXYNOS3250_RDMA_DONE (1 << 9)
+#define EXYNOS3250_ENC_STREAM_STAT (1 << 8)
+#define EXYNOS3250_RESULT_STAT (1 << 5)
+#define EXYNOS3250_STREAM_STAT (1 << 4)
+#define EXYNOS3250_HEADER_STAT (1 << 3)
+
+/*
+ * Base address of the luma component DMA buffer
+ * of the raw input or output image.
+ */
+#define EXYNOS3250_LUMA_BASE 0x100
+#define EXYNOS3250_SRC_TILE_EN_MASK 0x100
+
+/* Stride of source or destination luma raw image buffer */
+#define EXYNOS3250_LUMA_STRIDE 0x104
+
+/* Horizontal/vertical offset of active region in luma raw image buffer */
+#define EXYNOS3250_LUMA_XY_OFFSET 0x108
+#define EXYNOS3250_LUMA_YY_OFFSET_SHIFT 18
+#define EXYNOS3250_LUMA_YY_OFFSET_MASK (0x1fff << EXYNOS3250_LUMA_YY_OFFSET_SHIFT)
+#define EXYNOS3250_LUMA_YX_OFFSET_SHIFT 2
+#define EXYNOS3250_LUMA_YX_OFFSET_MASK (0x1fff << EXYNOS3250_LUMA_YX_OFFSET_SHIFT)
+
+/*
+ * Base address of the chroma(Cb) component DMA buffer
+ * of the raw input or output image.
+ */
+#define EXYNOS3250_CHROMA_BASE 0x10c
+
+/* Stride of source or destination chroma(Cb) raw image buffer */
+#define EXYNOS3250_CHROMA_STRIDE 0x110
+
+/* Horizontal/vertical offset of active region in chroma(Cb) raw image buffer */
+#define EXYNOS3250_CHROMA_XY_OFFSET 0x114
+#define EXYNOS3250_CHROMA_YY_OFFSET_SHIFT 18
+#define EXYNOS3250_CHROMA_YY_OFFSET_MASK (0x1fff << EXYNOS3250_CHROMA_YY_OFFSET_SHIFT)
+#define EXYNOS3250_CHROMA_YX_OFFSET_SHIFT 2
+#define EXYNOS3250_CHROMA_YX_OFFSET_MASK (0x1fff << EXYNOS3250_CHROMA_YX_OFFSET_SHIFT)
+
+/*
+ * Base address of the chroma(Cr) component DMA buffer
+ * of the raw input or output image.
+ */
+#define EXYNOS3250_CHROMA_CR_BASE 0x118
+
+/* Stride of source or destination chroma(Cr) raw image buffer */
+#define EXYNOS3250_CHROMA_CR_STRIDE 0x11c
+
+/* Horizontal/vertical offset of active region in chroma(Cb) raw image buffer */
+#define EXYNOS3250_CHROMA_CR_XY_OFFSET 0x120
+#define EXYNOS3250_CHROMA_CR_YY_OFFSET_SHIFT 18
+#define EXYNOS3250_CHROMA_CR_YY_OFFSET_MASK (0x1fff << EXYNOS3250_CHROMA_CR_YY_OFFSET_SHIFT)
+#define EXYNOS3250_CHROMA_CR_YX_OFFSET_SHIFT 2
+#define EXYNOS3250_CHROMA_CR_YX_OFFSET_MASK (0x1fff << EXYNOS3250_CHROMA_CR_YX_OFFSET_SHIFT)
+
+/* Raw image data r/w address register */
+#define EXYNOS3250_JPG_IMGADR 0x50
+
+/* Source or destination JPEG file DMA buffer address */
+#define EXYNOS3250_JPG_JPGADR 0x124
+
+/* Coefficients for RGB-to-YCbCr converter register */
+#define EXYNOS3250_JPG_COEF(n) (0x128 + (((n) - 1) << 2))
+#define EXYNOS3250_COEF_SHIFT(j) ((3 - (j)) << 3)
+#define EXYNOS3250_COEF_MASK(j) (0xff << EXYNOS3250_COEF_SHIFT(j))
+
+/* Raw input format setting */
+#define EXYNOS3250_JPGCMOD 0x134
+#define EXYNOS3250_SRC_TILE_EN (0x1 << 10)
+#define EXYNOS3250_SRC_NV_MASK (0x1 << 9)
+#define EXYNOS3250_SRC_NV12 (0x0 << 9)
+#define EXYNOS3250_SRC_NV21 (0x1 << 9)
+#define EXYNOS3250_SRC_BIG_ENDIAN_MASK (0x1 << 8)
+#define EXYNOS3250_SRC_BIG_ENDIAN (0x1 << 8)
+#define EXYNOS3250_MODE_SEL_MASK (0x7 << 5)
+#define EXYNOS3250_MODE_SEL_420_2P (0x0 << 5)
+#define EXYNOS3250_MODE_SEL_422_1P_LUM_CHR (0x1 << 5)
+#define EXYNOS3250_MODE_SEL_RGB565 (0x2 << 5)
+#define EXYNOS3250_MODE_SEL_422_1P_CHR_LUM (0x3 << 5)
+#define EXYNOS3250_MODE_SEL_ARGB8888 (0x4 << 5)
+#define EXYNOS3250_MODE_SEL_420_3P (0x5 << 5)
+#define EXYNOS3250_SRC_SWAP_RGB (0x1 << 3)
+#define EXYNOS3250_SRC_SWAP_UV (0x1 << 2)
+#define EXYNOS3250_MODE_Y16_MASK (0x1 << 1)
+#define EXYNOS3250_MODE_Y16 (0x1 << 1)
+#define EXYNOS3250_HALF_EN_MASK (0x1 << 0)
+#define EXYNOS3250_HALF_EN (0x1 << 0)
+
+/* Power on/off and clock down control */
+#define EXYNOS3250_JPGCLKCON 0x138
+#define EXYNOS3250_CLK_DOWN_READY (0x1 << 1)
+#define EXYNOS3250_POWER_ON (0x1 << 0)
+
+/* Start compression or decompression */
+#define EXYNOS3250_JSTART 0x13c
+
+/* Restart decompression after header analysis */
+#define EXYNOS3250_JRSTART 0x140
+
+/* JPEG SW reset register */
+#define EXYNOS3250_SW_RESET 0x144
+
+/* JPEG timer setting register */
+#define EXYNOS3250_TIMER_SE 0x148
+#define EXYNOS3250_TIMER_INT_EN_SHIFT 31
+#define EXYNOS3250_TIMER_INT_EN (1 << EXYNOS3250_TIMER_INT_EN_SHIFT)
+#define EXYNOS3250_TIMER_INIT_MASK 0x7fffffff
+
+/* JPEG timer status register */
+#define EXYNOS3250_TIMER_ST 0x14c
+#define EXYNOS3250_TIMER_INT_STAT_SHIFT 31
+#define EXYNOS3250_TIMER_INT_STAT (1 << EXYNOS3250_TIMER_INT_STAT_SHIFT)
+#define EXYNOS3250_TIMER_CNT_SHIFT 0
+#define EXYNOS3250_TIMER_CNT_MASK 0x7fffffff
+
+/* Command status register */
+#define EXYNOS3250_COMSTAT 0x150
+#define EXYNOS3250_CUR_PROC_MODE (0x1 << 1)
+#define EXYNOS3250_CUR_COM_MODE (0x1 << 0)
+
+/* JPEG decompression output format register */
+#define EXYNOS3250_OUTFORM 0x154
+#define EXYNOS3250_OUT_ALPHA_MASK (0xff << 24)
+#define EXYNOS3250_OUT_TILE_EN (0x1 << 10)
+#define EXYNOS3250_OUT_NV_MASK (0x1 << 9)
+#define EXYNOS3250_OUT_NV12 (0x0 << 9)
+#define EXYNOS3250_OUT_NV21 (0x1 << 9)
+#define EXYNOS3250_OUT_BIG_ENDIAN_MASK (0x1 << 8)
+#define EXYNOS3250_OUT_BIG_ENDIAN (0x1 << 8)
+#define EXYNOS3250_OUT_SWAP_RGB (0x1 << 7)
+#define EXYNOS3250_OUT_SWAP_UV (0x1 << 6)
+#define EXYNOS3250_OUT_FMT_MASK (0x7 << 0)
+#define EXYNOS3250_OUT_FMT_420_2P (0x0 << 0)
+#define EXYNOS3250_OUT_FMT_422_1P_LUM_CHR (0x1 << 0)
+#define EXYNOS3250_OUT_FMT_422_1P_CHR_LUM (0x3 << 0)
+#define EXYNOS3250_OUT_FMT_420_3P (0x4 << 0)
+#define EXYNOS3250_OUT_FMT_RGB565 (0x5 << 0)
+#define EXYNOS3250_OUT_FMT_ARGB8888 (0x6 << 0)
+
+/* Input JPEG stream byte size for decompression */
+#define EXYNOS3250_DEC_STREAM_SIZE 0x158
+#define EXYNOS3250_DEC_STREAM_MASK 0x1fffffff
+
+/* The upper bound of the byte size of output compressed stream */
+#define EXYNOS3250_ENC_STREAM_BOUND 0x15c
+#define EXYNOS3250_ENC_STREAM_BOUND_MASK 0xffffc0
+
+/* Scale-down ratio when decoding */
+#define EXYNOS3250_DEC_SCALING_RATIO 0x160
+#define EXYNOS3250_DEC_SCALE_FACTOR_MASK 0x3
+#define EXYNOS3250_DEC_SCALE_FACTOR_8_8 0x0
+#define EXYNOS3250_DEC_SCALE_FACTOR_4_8 0x1
+#define EXYNOS3250_DEC_SCALE_FACTOR_2_8 0x2
+#define EXYNOS3250_DEC_SCALE_FACTOR_1_8 0x3
+
+/* Error check */
+#define EXYNOS3250_CRC_RESULT 0x164
+
+/* RDMA and WDMA operation status register */
+#define EXYNOS3250_DMA_OPER_STATUS 0x168
+#define EXYNOS3250_WDMA_OPER_STATUS (0x1 << 1)
+#define EXYNOS3250_RDMA_OPER_STATUS (0x1 << 0)
+
+/* DMA issue gathering number and issue number settings */
+#define EXYNOS3250_DMA_ISSUE_NUM 0x16c
+#define EXYNOS3250_WDMA_ISSUE_NUM_SHIFT 16
+#define EXYNOS3250_WDMA_ISSUE_NUM_MASK (0x7 << EXYNOS3250_WDMA_ISSUE_NUM_SHIFT)
+#define EXYNOS3250_RDMA_ISSUE_NUM_SHIFT 8
+#define EXYNOS3250_RDMA_ISSUE_NUM_MASK (0x7 << EXYNOS3250_RDMA_ISSUE_NUM_SHIFT)
+#define EXYNOS3250_ISSUE_GATHER_NUM_SHIFT 0
+#define EXYNOS3250_ISSUE_GATHER_NUM_MASK (0x7 << EXYNOS3250_ISSUE_GATHER_NUM_SHIFT)
+#define EXYNOS3250_DMA_MO_COUNT 0x7
+
+/* Version register */
+#define EXYNOS3250_VERSION 0x1fc
+
+/* RGB <-> YUV conversion coefficients */
+#define EXYNOS3250_JPEG_ENC_COEF1 0x01352e1e
+#define EXYNOS3250_JPEG_ENC_COEF2 0x00b0ae83
+#define EXYNOS3250_JPEG_ENC_COEF3 0x020cdc13
+
+#define EXYNOS3250_JPEG_DEC_COEF1 0x04a80199
+#define EXYNOS3250_JPEG_DEC_COEF2 0x04a9a064
+#define EXYNOS3250_JPEG_DEC_COEF3 0x04a80102
+
+#endif /* JPEG_REGS_H_ */
+
diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/s5p-mfc/Makefile
new file mode 100644
index 000000000..0b324af2a
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o
+s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
+s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
+s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
+s5p-mfc-y += s5p_mfc_opr.o s5p_mfc_opr_v5.o s5p_mfc_opr_v6.o
+s5p-mfc-y += s5p_mfc_cmd.o s5p_mfc_cmd_v5.o s5p_mfc_cmd_v6.o
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v10.h b/drivers/media/platform/s5p-mfc/regs-mfc-v10.h
new file mode 100644
index 000000000..fadd9139b
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v10.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung MFC V10.x Interface (FIMV) driver
+ *
+ */
+
+#ifndef _REGS_MFC_V10_H
+#define _REGS_MFC_V10_H
+
+#include <linux/sizes.h>
+#include "regs-mfc-v8.h"
+
+/* MFCv10 register definitions*/
+#define S5P_FIMV_MFC_CLOCK_OFF_V10 0x7120
+#define S5P_FIMV_MFC_STATE_V10 0x7124
+#define S5P_FIMV_D_STATIC_BUFFER_ADDR_V10 0xF570
+#define S5P_FIMV_D_STATIC_BUFFER_SIZE_V10 0xF574
+#define S5P_FIMV_E_NUM_T_LAYER_V10 0xFBAC
+#define S5P_FIMV_E_HIERARCHICAL_QP_LAYER0_V10 0xFBB0
+#define S5P_FIMV_E_HIERARCHICAL_QP_LAYER1_V10 0xFBB4
+#define S5P_FIMV_E_HIERARCHICAL_QP_LAYER2_V10 0xFBB8
+#define S5P_FIMV_E_HIERARCHICAL_QP_LAYER3_V10 0xFBBC
+#define S5P_FIMV_E_HIERARCHICAL_QP_LAYER4_V10 0xFBC0
+#define S5P_FIMV_E_HIERARCHICAL_QP_LAYER5_V10 0xFBC4
+#define S5P_FIMV_E_HIERARCHICAL_QP_LAYER6_V10 0xFBC8
+#define S5P_FIMV_E_HIERARCHICAL_BIT_RATE_LAYER0_V10 0xFD18
+#define S5P_FIMV_E_HIERARCHICAL_BIT_RATE_LAYER1_V10 0xFD1C
+#define S5P_FIMV_E_HIERARCHICAL_BIT_RATE_LAYER2_V10 0xFD20
+#define S5P_FIMV_E_HIERARCHICAL_BIT_RATE_LAYER3_V10 0xFD24
+#define S5P_FIMV_E_HIERARCHICAL_BIT_RATE_LAYER4_V10 0xFD28
+#define S5P_FIMV_E_HIERARCHICAL_BIT_RATE_LAYER5_V10 0xFD2C
+#define S5P_FIMV_E_HIERARCHICAL_BIT_RATE_LAYER6_V10 0xFD30
+#define S5P_FIMV_E_HEVC_OPTIONS_V10 0xFDD4
+#define S5P_FIMV_E_HEVC_REFRESH_PERIOD_V10 0xFDD8
+#define S5P_FIMV_E_HEVC_CHROMA_QP_OFFSET_V10 0xFDDC
+#define S5P_FIMV_E_HEVC_LF_BETA_OFFSET_DIV2_V10 0xFDE0
+#define S5P_FIMV_E_HEVC_LF_TC_OFFSET_DIV2_V10 0xFDE4
+#define S5P_FIMV_E_HEVC_NAL_CONTROL_V10 0xFDE8
+
+/* MFCv10 Context buffer sizes */
+#define MFC_CTX_BUF_SIZE_V10 (30 * SZ_1K)
+#define MFC_H264_DEC_CTX_BUF_SIZE_V10 (2 * SZ_1M)
+#define MFC_OTHER_DEC_CTX_BUF_SIZE_V10 (20 * SZ_1K)
+#define MFC_H264_ENC_CTX_BUF_SIZE_V10 (100 * SZ_1K)
+#define MFC_HEVC_ENC_CTX_BUF_SIZE_V10 (30 * SZ_1K)
+#define MFC_OTHER_ENC_CTX_BUF_SIZE_V10 (15 * SZ_1K)
+
+/* MFCv10 variant defines */
+#define MAX_FW_SIZE_V10 (SZ_1M)
+#define MAX_CPB_SIZE_V10 (3 * SZ_1M)
+#define MFC_VERSION_V10 0xA0
+#define MFC_NUM_PORTS_V10 1
+
+/* MFCv10 codec defines*/
+#define S5P_FIMV_CODEC_HEVC_DEC 17
+#define S5P_FIMV_CODEC_VP9_DEC 18
+#define S5P_FIMV_CODEC_HEVC_ENC 26
+
+/* Decoder buffer size for MFC v10 */
+#define DEC_VP9_STATIC_BUFFER_SIZE 20480
+
+/* Encoder buffer size for MFC v10.0 */
+#define ENC_V100_BASE_SIZE(x, y) \
+ (((x + 3) * (y + 3) * 8) \
+ + ((y * 64) + 1280) * DIV_ROUND_UP(x, 8))
+
+#define ENC_V100_H264_ME_SIZE(x, y) \
+ (ENC_V100_BASE_SIZE(x, y) \
+ + (DIV_ROUND_UP(x * y, 64) * 32))
+
+#define ENC_V100_MPEG4_ME_SIZE(x, y) \
+ (ENC_V100_BASE_SIZE(x, y) \
+ + (DIV_ROUND_UP(x * y, 128) * 16))
+
+#define ENC_V100_VP8_ME_SIZE(x, y) \
+ ENC_V100_BASE_SIZE(x, y)
+
+#define ENC_V100_HEVC_ME_SIZE(x, y) \
+ (((x + 3) * (y + 3) * 32) \
+ + ((y * 128) + 1280) * DIV_ROUND_UP(x, 4))
+
+#endif /*_REGS_MFC_V10_H*/
+
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
new file mode 100644
index 000000000..c0166ee9a
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -0,0 +1,411 @@
+/*
+ * Register definition file for Samsung MFC V6.x Interface (FIMV) driver
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGS_FIMV_V6_H
+#define _REGS_FIMV_V6_H
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+
+#define S5P_FIMV_REG_SIZE_V6 (S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR)
+#define S5P_FIMV_REG_COUNT_V6 ((S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) / 4)
+
+/* Number of bits that the buffer address should be shifted for particular
+ * MFC buffers. */
+#define S5P_FIMV_MEM_OFFSET_V6 0
+
+#define S5P_FIMV_START_ADDR_V6 0x0000
+#define S5P_FIMV_END_ADDR_V6 0xfd80
+
+#define S5P_FIMV_REG_CLEAR_BEGIN_V6 0xf000
+#define S5P_FIMV_REG_CLEAR_COUNT_V6 1024
+
+/* Codec Common Registers */
+#define S5P_FIMV_RISC_ON_V6 0x0000
+#define S5P_FIMV_RISC2HOST_INT_V6 0x003C
+#define S5P_FIMV_HOST2RISC_INT_V6 0x0044
+#define S5P_FIMV_RISC_BASE_ADDRESS_V6 0x0054
+
+#define S5P_FIMV_MFC_RESET_V6 0x1070
+
+#define S5P_FIMV_HOST2RISC_CMD_V6 0x1100
+#define S5P_FIMV_H2R_CMD_EMPTY_V6 0
+#define S5P_FIMV_H2R_CMD_SYS_INIT_V6 1
+#define S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6 2
+#define S5P_FIMV_CH_SEQ_HEADER_V6 3
+#define S5P_FIMV_CH_INIT_BUFS_V6 4
+#define S5P_FIMV_CH_FRAME_START_V6 5
+#define S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6 6
+#define S5P_FIMV_H2R_CMD_SLEEP_V6 7
+#define S5P_FIMV_H2R_CMD_WAKEUP_V6 8
+#define S5P_FIMV_CH_LAST_FRAME_V6 9
+#define S5P_FIMV_H2R_CMD_FLUSH_V6 10
+/* RMVME: REALLOC used? */
+#define S5P_FIMV_CH_FRAME_START_REALLOC_V6 5
+
+#define S5P_FIMV_RISC2HOST_CMD_V6 0x1104
+#define S5P_FIMV_R2H_CMD_EMPTY_V6 0
+#define S5P_FIMV_R2H_CMD_SYS_INIT_RET_V6 1
+#define S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET_V6 2
+#define S5P_FIMV_R2H_CMD_SEQ_DONE_RET_V6 3
+#define S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET_V6 4
+
+#define S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET_V6 6
+#define S5P_FIMV_R2H_CMD_SLEEP_RET_V6 7
+#define S5P_FIMV_R2H_CMD_WAKEUP_RET_V6 8
+#define S5P_FIMV_R2H_CMD_COMPLETE_SEQ_RET_V6 9
+#define S5P_FIMV_R2H_CMD_DPB_FLUSH_RET_V6 10
+#define S5P_FIMV_R2H_CMD_NAL_ABORT_RET_V6 11
+#define S5P_FIMV_R2H_CMD_FW_STATUS_RET_V6 12
+#define S5P_FIMV_R2H_CMD_FRAME_DONE_RET_V6 13
+#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET_V6 14
+#define S5P_FIMV_R2H_CMD_SLICE_DONE_RET_V6 15
+#define S5P_FIMV_R2H_CMD_ENC_BUFFER_FUL_RET_V6 16
+#define S5P_FIMV_R2H_CMD_ERR_RET_V6 32
+
+#define S5P_FIMV_MFC_BUS_RESET_CTRL 0x7110
+#define S5P_FIMV_FW_VERSION_V6 0xf000
+
+#define S5P_FIMV_INSTANCE_ID_V6 0xf008
+#define S5P_FIMV_CODEC_TYPE_V6 0xf00c
+#define S5P_FIMV_CONTEXT_MEM_ADDR_V6 0xf014
+#define S5P_FIMV_CONTEXT_MEM_SIZE_V6 0xf018
+#define S5P_FIMV_PIXEL_FORMAT_V6 0xf020
+
+#define S5P_FIMV_METADATA_ENABLE_V6 0xf024
+#define S5P_FIMV_DBG_BUFFER_ADDR_V6 0xf030
+#define S5P_FIMV_DBG_BUFFER_SIZE_V6 0xf034
+#define S5P_FIMV_RET_INSTANCE_ID_V6 0xf070
+
+#define S5P_FIMV_ERROR_CODE_V6 0xf074
+#define S5P_FIMV_ERR_WARNINGS_START_V6 160
+#define S5P_FIMV_ERR_DEC_MASK_V6 0xffff
+#define S5P_FIMV_ERR_DEC_SHIFT_V6 0
+#define S5P_FIMV_ERR_DSPL_MASK_V6 0xffff0000
+#define S5P_FIMV_ERR_DSPL_SHIFT_V6 16
+
+#define S5P_FIMV_DBG_BUFFER_OUTPUT_SIZE_V6 0xf078
+#define S5P_FIMV_METADATA_STATUS_V6 0xf07C
+#define S5P_FIMV_METADATA_ADDR_MB_INFO_V6 0xf080
+#define S5P_FIMV_METADATA_SIZE_MB_INFO_V6 0xf084
+
+/* Decoder Registers */
+#define S5P_FIMV_D_CRC_CTRL_V6 0xf0b0
+#define S5P_FIMV_D_DEC_OPTIONS_V6 0xf0b4
+#define S5P_FIMV_D_OPT_FMO_ASO_CTRL_MASK_V6 4
+#define S5P_FIMV_D_OPT_DDELAY_EN_SHIFT_V6 3
+#define S5P_FIMV_D_OPT_LF_CTRL_SHIFT_V6 1
+#define S5P_FIMV_D_OPT_LF_CTRL_MASK_V6 0x3
+#define S5P_FIMV_D_OPT_TILE_MODE_SHIFT_V6 0
+
+#define S5P_FIMV_D_DISPLAY_DELAY_V6 0xf0b8
+
+#define S5P_FIMV_D_SET_FRAME_WIDTH_V6 0xf0bc
+#define S5P_FIMV_D_SET_FRAME_HEIGHT_V6 0xf0c0
+
+#define S5P_FIMV_D_SEI_ENABLE_V6 0xf0c4
+
+/* Buffer setting registers */
+#define S5P_FIMV_D_MIN_NUM_DPB_V6 0xf0f0
+#define S5P_FIMV_D_MIN_LUMA_DPB_SIZE_V6 0xf0f4
+#define S5P_FIMV_D_MIN_CHROMA_DPB_SIZE_V6 0xf0f8
+#define S5P_FIMV_D_MVC_NUM_VIEWS_V6 0xf0fc
+#define S5P_FIMV_D_MIN_NUM_MV_V6 0xf100
+#define S5P_FIMV_D_NUM_DPB_V6 0xf130
+#define S5P_FIMV_D_LUMA_DPB_SIZE_V6 0xf134
+#define S5P_FIMV_D_CHROMA_DPB_SIZE_V6 0xf138
+#define S5P_FIMV_D_MV_BUFFER_SIZE_V6 0xf13c
+
+#define S5P_FIMV_D_LUMA_DPB_V6 0xf140
+#define S5P_FIMV_D_CHROMA_DPB_V6 0xf240
+#define S5P_FIMV_D_MV_BUFFER_V6 0xf340
+
+#define S5P_FIMV_D_SCRATCH_BUFFER_ADDR_V6 0xf440
+#define S5P_FIMV_D_SCRATCH_BUFFER_SIZE_V6 0xf444
+#define S5P_FIMV_D_METADATA_BUFFER_ADDR_V6 0xf448
+#define S5P_FIMV_D_METADATA_BUFFER_SIZE_V6 0xf44c
+#define S5P_FIMV_D_NUM_MV_V6 0xf478
+#define S5P_FIMV_D_CPB_BUFFER_ADDR_V6 0xf4b0
+#define S5P_FIMV_D_CPB_BUFFER_SIZE_V6 0xf4b4
+
+#define S5P_FIMV_D_AVAILABLE_DPB_FLAG_UPPER_V6 0xf4b8
+#define S5P_FIMV_D_AVAILABLE_DPB_FLAG_LOWER_V6 0xf4bc
+#define S5P_FIMV_D_CPB_BUFFER_OFFSET_V6 0xf4c0
+#define S5P_FIMV_D_SLICE_IF_ENABLE_V6 0xf4c4
+#define S5P_FIMV_D_PICTURE_TAG_V6 0xf4c8
+#define S5P_FIMV_D_STREAM_DATA_SIZE_V6 0xf4d0
+#define S5P_FIMV_D_INIT_BUFFER_OPTIONS_V6 0xf47c
+
+/* Display information register */
+#define S5P_FIMV_D_DISPLAY_FRAME_WIDTH_V6 0xf500
+#define S5P_FIMV_D_DISPLAY_FRAME_HEIGHT_V6 0xf504
+
+/* Display status */
+#define S5P_FIMV_D_DISPLAY_STATUS_V6 0xf508
+
+#define S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6 0xf50c
+#define S5P_FIMV_D_DISPLAY_CHROMA_ADDR_V6 0xf510
+
+#define S5P_FIMV_D_DISPLAY_FRAME_TYPE_V6 0xf514
+
+#define S5P_FIMV_D_DISPLAY_CROP_INFO1_V6 0xf518
+#define S5P_FIMV_D_DISPLAY_CROP_INFO2_V6 0xf51c
+#define S5P_FIMV_D_DISPLAY_PICTURE_PROFILE_V6 0xf520
+#define S5P_FIMV_D_DISPLAY_LUMA_CRC_TOP_V6 0xf524
+#define S5P_FIMV_D_DISPLAY_CHROMA_CRC_TOP_V6 0xf528
+#define S5P_FIMV_D_DISPLAY_LUMA_CRC_BOT_V6 0xf52c
+#define S5P_FIMV_D_DISPLAY_CHROMA_CRC_BOT_V6 0xf530
+#define S5P_FIMV_D_DISPLAY_ASPECT_RATIO_V6 0xf534
+#define S5P_FIMV_D_DISPLAY_EXTENDED_AR_V6 0xf538
+
+/* Decoded picture information register */
+#define S5P_FIMV_D_DECODED_FRAME_WIDTH_V6 0xf53c
+#define S5P_FIMV_D_DECODED_FRAME_HEIGHT_V6 0xf540
+#define S5P_FIMV_D_DECODED_STATUS_V6 0xf544
+#define S5P_FIMV_DEC_CRC_GEN_MASK_V6 0x1
+#define S5P_FIMV_DEC_CRC_GEN_SHIFT_V6 6
+
+#define S5P_FIMV_D_DECODED_LUMA_ADDR_V6 0xf548
+#define S5P_FIMV_D_DECODED_CHROMA_ADDR_V6 0xf54c
+
+#define S5P_FIMV_D_DECODED_FRAME_TYPE_V6 0xf550
+#define S5P_FIMV_DECODE_FRAME_MASK_V6 7
+
+#define S5P_FIMV_D_DECODED_CROP_INFO1_V6 0xf554
+#define S5P_FIMV_D_DECODED_CROP_INFO2_V6 0xf558
+#define S5P_FIMV_D_DECODED_PICTURE_PROFILE_V6 0xf55c
+#define S5P_FIMV_D_DECODED_NAL_SIZE_V6 0xf560
+#define S5P_FIMV_D_DECODED_LUMA_CRC_TOP_V6 0xf564
+#define S5P_FIMV_D_DECODED_CHROMA_CRC_TOP_V6 0xf568
+#define S5P_FIMV_D_DECODED_LUMA_CRC_BOT_V6 0xf56c
+#define S5P_FIMV_D_DECODED_CHROMA_CRC_BOT_V6 0xf570
+
+/* Returned value register for specific setting */
+#define S5P_FIMV_D_RET_PICTURE_TAG_TOP_V6 0xf574
+#define S5P_FIMV_D_RET_PICTURE_TAG_BOT_V6 0xf578
+#define S5P_FIMV_D_RET_PICTURE_TIME_TOP_V6 0xf57c
+#define S5P_FIMV_D_RET_PICTURE_TIME_BOT_V6 0xf580
+#define S5P_FIMV_D_CHROMA_FORMAT_V6 0xf588
+#define S5P_FIMV_D_MPEG4_INFO_V6 0xf58c
+#define S5P_FIMV_D_H264_INFO_V6 0xf590
+
+#define S5P_FIMV_D_METADATA_ADDR_CONCEALED_MB_V6 0xf594
+#define S5P_FIMV_D_METADATA_SIZE_CONCEALED_MB_V6 0xf598
+#define S5P_FIMV_D_METADATA_ADDR_VC1_PARAM_V6 0xf59c
+#define S5P_FIMV_D_METADATA_SIZE_VC1_PARAM_V6 0xf5a0
+#define S5P_FIMV_D_METADATA_ADDR_SEI_NAL_V6 0xf5a4
+#define S5P_FIMV_D_METADATA_SIZE_SEI_NAL_V6 0xf5a8
+#define S5P_FIMV_D_METADATA_ADDR_VUI_V6 0xf5ac
+#define S5P_FIMV_D_METADATA_SIZE_VUI_V6 0xf5b0
+
+#define S5P_FIMV_D_MVC_VIEW_ID_V6 0xf5b4
+
+/* SEI related information */
+#define S5P_FIMV_D_FRAME_PACK_SEI_AVAIL_V6 0xf5f0
+#define S5P_FIMV_D_FRAME_PACK_ARRGMENT_ID_V6 0xf5f4
+#define S5P_FIMV_D_FRAME_PACK_SEI_INFO_V6 0xf5f8
+#define S5P_FIMV_D_FRAME_PACK_GRID_POS_V6 0xf5fc
+
+/* Encoder Registers */
+#define S5P_FIMV_E_FRAME_WIDTH_V6 0xf770
+#define S5P_FIMV_E_FRAME_HEIGHT_V6 0xf774
+#define S5P_FIMV_E_CROPPED_FRAME_WIDTH_V6 0xf778
+#define S5P_FIMV_E_CROPPED_FRAME_HEIGHT_V6 0xf77c
+#define S5P_FIMV_E_FRAME_CROP_OFFSET_V6 0xf780
+#define S5P_FIMV_E_ENC_OPTIONS_V6 0xf784
+#define S5P_FIMV_E_PICTURE_PROFILE_V6 0xf788
+#define S5P_FIMV_E_FIXED_PICTURE_QP_V6 0xf790
+
+#define S5P_FIMV_E_RC_CONFIG_V6 0xf794
+#define S5P_FIMV_E_RC_QP_BOUND_V6 0xf798
+#define S5P_FIMV_E_RC_RPARAM_V6 0xf79c
+#define S5P_FIMV_E_MB_RC_CONFIG_V6 0xf7a0
+#define S5P_FIMV_E_PADDING_CTRL_V6 0xf7a4
+#define S5P_FIMV_E_MV_HOR_RANGE_V6 0xf7ac
+#define S5P_FIMV_E_MV_VER_RANGE_V6 0xf7b0
+#define S5P_FIMV_E_MV_RANGE_V6_MASK 0x3fff
+
+#define S5P_FIMV_E_VBV_BUFFER_SIZE_V6 0xf84c
+#define S5P_FIMV_E_VBV_INIT_DELAY_V6 0xf850
+#define S5P_FIMV_E_NUM_DPB_V6 0xf890
+#define S5P_FIMV_E_LUMA_DPB_V6 0xf8c0
+#define S5P_FIMV_E_CHROMA_DPB_V6 0xf904
+#define S5P_FIMV_E_ME_BUFFER_V6 0xf948
+
+#define S5P_FIMV_E_SCRATCH_BUFFER_ADDR_V6 0xf98c
+#define S5P_FIMV_E_SCRATCH_BUFFER_SIZE_V6 0xf990
+#define S5P_FIMV_E_TMV_BUFFER0_V6 0xf994
+#define S5P_FIMV_E_TMV_BUFFER1_V6 0xf998
+#define S5P_FIMV_E_SOURCE_LUMA_ADDR_V6 0xf9f0
+#define S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6 0xf9f4
+#define S5P_FIMV_E_STREAM_BUFFER_ADDR_V6 0xf9f8
+#define S5P_FIMV_E_STREAM_BUFFER_SIZE_V6 0xf9fc
+#define S5P_FIMV_E_ROI_BUFFER_ADDR_V6 0xfA00
+
+#define S5P_FIMV_E_PARAM_CHANGE_V6 0xfa04
+#define S5P_FIMV_E_IR_SIZE_V6 0xfa08
+#define S5P_FIMV_E_GOP_CONFIG_V6 0xfa0c
+#define S5P_FIMV_E_MSLICE_MODE_V6 0xfa10
+#define S5P_FIMV_E_MSLICE_SIZE_MB_V6 0xfa14
+#define S5P_FIMV_E_MSLICE_SIZE_BITS_V6 0xfa18
+#define S5P_FIMV_E_FRAME_INSERTION_V6 0xfa1c
+
+#define S5P_FIMV_E_RC_FRAME_RATE_V6 0xfa20
+#define S5P_FIMV_E_RC_BIT_RATE_V6 0xfa24
+#define S5P_FIMV_E_RC_QP_OFFSET_V6 0xfa28
+#define S5P_FIMV_E_RC_ROI_CTRL_V6 0xfa2c
+#define S5P_FIMV_E_PICTURE_TAG_V6 0xfa30
+#define S5P_FIMV_E_BIT_COUNT_ENABLE_V6 0xfa34
+#define S5P_FIMV_E_MAX_BIT_COUNT_V6 0xfa38
+#define S5P_FIMV_E_MIN_BIT_COUNT_V6 0xfa3c
+
+#define S5P_FIMV_E_METADATA_BUFFER_ADDR_V6 0xfa40
+#define S5P_FIMV_E_METADATA_BUFFER_SIZE_V6 0xfa44
+#define S5P_FIMV_E_STREAM_SIZE_V6 0xfa80
+#define S5P_FIMV_E_SLICE_TYPE_V6 0xfa84
+#define S5P_FIMV_E_PICTURE_COUNT_V6 0xfa88
+#define S5P_FIMV_E_RET_PICTURE_TAG_V6 0xfa8c
+#define S5P_FIMV_E_STREAM_BUFFER_WRITE_POINTER_V6 0xfa90
+
+#define S5P_FIMV_E_ENCODED_SOURCE_LUMA_ADDR_V6 0xfa94
+#define S5P_FIMV_E_ENCODED_SOURCE_CHROMA_ADDR_V6 0xfa98
+#define S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6 0xfa9c
+#define S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6 0xfaa0
+#define S5P_FIMV_E_METADATA_ADDR_ENC_SLICE_V6 0xfaa4
+#define S5P_FIMV_E_METADATA_SIZE_ENC_SLICE_V6 0xfaa8
+
+#define S5P_FIMV_E_MPEG4_OPTIONS_V6 0xfb10
+#define S5P_FIMV_E_MPEG4_HEC_PERIOD_V6 0xfb14
+#define S5P_FIMV_E_ASPECT_RATIO_V6 0xfb50
+#define S5P_FIMV_E_EXTENDED_SAR_V6 0xfb54
+
+#define S5P_FIMV_E_H264_OPTIONS_V6 0xfb58
+#define S5P_FIMV_E_H264_LF_ALPHA_OFFSET_V6 0xfb5c
+#define S5P_FIMV_E_H264_LF_BETA_OFFSET_V6 0xfb60
+#define S5P_FIMV_E_H264_I_PERIOD_V6 0xfb64
+
+#define S5P_FIMV_E_H264_FMO_SLICE_GRP_MAP_TYPE_V6 0xfb68
+#define S5P_FIMV_E_H264_FMO_NUM_SLICE_GRP_MINUS1_V6 0xfb6c
+#define S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_DIR_V6 0xfb70
+#define S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_RATE_MINUS1_V6 0xfb74
+#define S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_0_V6 0xfb78
+#define S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_1_V6 0xfb7c
+#define S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_2_V6 0xfb80
+#define S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_3_V6 0xfb84
+
+#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_0_V6 0xfb88
+#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_1_V6 0xfb8c
+#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_2_V6 0xfb90
+#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_3_V6 0xfb94
+#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_4_V6 0xfb98
+#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_5_V6 0xfb9c
+#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_6_V6 0xfba0
+#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_7_V6 0xfba4
+
+#define S5P_FIMV_E_H264_CHROMA_QP_OFFSET_V6 0xfba8
+#define S5P_FIMV_E_H264_NUM_T_LAYER_V6 0xfbac
+
+#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER0_V6 0xfbb0
+#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER1_V6 0xfbb4
+#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER2_V6 0xfbb8
+#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER3_V6 0xfbbc
+#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER4_V6 0xfbc0
+#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER5_V6 0xfbc4
+#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER6_V6 0xfbc8
+
+#define S5P_FIMV_E_H264_FRAME_PACKING_SEI_INFO_V6 0xfc4c
+#define S5P_FIMV_ENC_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE_V6 0
+#define S5P_FIMV_ENC_FP_ARRANGEMENT_TYPE_TOP_BOTTOM_V6 1
+#define S5P_FIMV_ENC_FP_ARRANGEMENT_TYPE_TEMPORAL_V6 2
+
+#define S5P_FIMV_E_MVC_FRAME_QP_VIEW1_V6 0xfd40
+#define S5P_FIMV_E_MVC_RC_FRAME_RATE_VIEW1_V6 0xfd44
+#define S5P_FIMV_E_MVC_RC_BIT_RATE_VIEW1_V6 0xfd48
+#define S5P_FIMV_E_MVC_RC_QBOUND_VIEW1_V6 0xfd4c
+#define S5P_FIMV_E_MVC_RC_RPARA_VIEW1_V6 0xfd50
+#define S5P_FIMV_E_MVC_INTER_VIEW_PREDICTION_ON_V6 0xfd80
+
+/* Codec numbers */
+#define S5P_FIMV_CODEC_NONE_V6 -1
+
+
+#define S5P_FIMV_CODEC_H264_DEC_V6 0
+#define S5P_FIMV_CODEC_H264_MVC_DEC_V6 1
+
+#define S5P_FIMV_CODEC_MPEG4_DEC_V6 3
+#define S5P_FIMV_CODEC_FIMV1_DEC_V6 4
+#define S5P_FIMV_CODEC_FIMV2_DEC_V6 5
+#define S5P_FIMV_CODEC_FIMV3_DEC_V6 6
+#define S5P_FIMV_CODEC_FIMV4_DEC_V6 7
+#define S5P_FIMV_CODEC_H263_DEC_V6 8
+#define S5P_FIMV_CODEC_VC1RCV_DEC_V6 9
+#define S5P_FIMV_CODEC_VC1_DEC_V6 10
+/* FIXME: Add 11~12 */
+#define S5P_FIMV_CODEC_MPEG2_DEC_V6 13
+#define S5P_FIMV_CODEC_VP8_DEC_V6 14
+/* FIXME: Add 15~16 */
+#define S5P_FIMV_CODEC_H264_ENC_V6 20
+#define S5P_FIMV_CODEC_H264_MVC_ENC_V6 21
+
+#define S5P_FIMV_CODEC_MPEG4_ENC_V6 23
+#define S5P_FIMV_CODEC_H263_ENC_V6 24
+
+#define S5P_FIMV_NV12M_HALIGN_V6 16
+#define S5P_FIMV_NV12MT_HALIGN_V6 16
+#define S5P_FIMV_NV12MT_VALIGN_V6 16
+
+#define S5P_FIMV_TMV_BUFFER_ALIGN_V6 16
+#define S5P_FIMV_LUMA_DPB_BUFFER_ALIGN_V6 256
+#define S5P_FIMV_CHROMA_DPB_BUFFER_ALIGN_V6 256
+#define S5P_FIMV_ME_BUFFER_ALIGN_V6 256
+#define S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6 256
+
+#define S5P_FIMV_LUMA_MB_TO_PIXEL_V6 256
+#define S5P_FIMV_CHROMA_MB_TO_PIXEL_V6 128
+#define S5P_FIMV_NUM_TMV_BUFFERS_V6 2
+
+#define S5P_FIMV_MAX_FRAME_SIZE_V6 (2 * SZ_1M)
+#define S5P_FIMV_NUM_PIXELS_IN_MB_ROW_V6 16
+#define S5P_FIMV_NUM_PIXELS_IN_MB_COL_V6 16
+
+/* Buffer size requirements defined by hardware */
+#define S5P_FIMV_TMV_BUFFER_SIZE_V6(w, h) (((w) + 1) * ((h) + 3) * 8)
+#define S5P_FIMV_ME_BUFFER_SIZE_V6(imw, imh, mbw, mbh) \
+ (((((imw + 127) / 64) * 16) * DIV_ROUND_UP(imh, 64) * 256) + \
+ (DIV_ROUND_UP((mbw) * (mbh), 32) * 16))
+#define S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V6(w, h) (((w) * 192) + 64)
+#define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(w, h) \
+ ((w) * 144 + 8192 * (h) + 49216 + 1048576)
+#define S5P_FIMV_SCRATCH_BUF_SIZE_VC1_DEC_V6(w, h) \
+ (2096 * ((w) + (h) + 1))
+#define S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(w, h) \
+ S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(w, h)
+#define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V6(w, h) \
+ ((w) * 32 + (h) * 128 + (((w) + 1) / 2) * 64 + 2112)
+#define S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V6(w, h) \
+ (((w) * 64) + (((w) + 1) * 16) + (4096 * 16))
+#define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_ENC_V6(w, h) \
+ (((w) * 16) + (((w) + 1) * 16))
+
+/* MFC Context buffer sizes */
+#define MFC_CTX_BUF_SIZE_V6 (28 * SZ_1K) /* 28KB */
+#define MFC_H264_DEC_CTX_BUF_SIZE_V6 (2 * SZ_1M) /* 2MB */
+#define MFC_OTHER_DEC_CTX_BUF_SIZE_V6 (20 * SZ_1K) /* 20KB */
+#define MFC_H264_ENC_CTX_BUF_SIZE_V6 (100 * SZ_1K) /* 100KB */
+#define MFC_OTHER_ENC_CTX_BUF_SIZE_V6 (12 * SZ_1K) /* 12KB */
+
+/* MFCv6 variant defines */
+#define MAX_FW_SIZE_V6 (SZ_512K) /* 512KB */
+#define MAX_CPB_SIZE_V6 (3 * SZ_1M) /* 3MB */
+#define MFC_VERSION_V6 0x61
+#define MFC_NUM_PORTS_V6 1
+
+#endif /* _REGS_FIMV_V6_H */
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v7.h b/drivers/media/platform/s5p-mfc/regs-mfc-v7.h
new file mode 100644
index 000000000..9f220769d
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v7.h
@@ -0,0 +1,60 @@
+/*
+ * Register definition file for Samsung MFC V7.x Interface (FIMV) driver
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGS_MFC_V7_H
+#define _REGS_MFC_V7_H
+
+#include "regs-mfc-v6.h"
+
+/* Additional features of v7 */
+#define S5P_FIMV_CODEC_VP8_ENC_V7 25
+
+/* Additional registers for v7 */
+#define S5P_FIMV_E_SOURCE_FIRST_ADDR_V7 0xf9e0
+#define S5P_FIMV_E_SOURCE_SECOND_ADDR_V7 0xf9e4
+#define S5P_FIMV_E_SOURCE_THIRD_ADDR_V7 0xf9e8
+#define S5P_FIMV_E_SOURCE_FIRST_STRIDE_V7 0xf9ec
+#define S5P_FIMV_E_SOURCE_SECOND_STRIDE_V7 0xf9f0
+#define S5P_FIMV_E_SOURCE_THIRD_STRIDE_V7 0xf9f4
+
+#define S5P_FIMV_E_ENCODED_SOURCE_FIRST_ADDR_V7 0xfa70
+#define S5P_FIMV_E_ENCODED_SOURCE_SECOND_ADDR_V7 0xfa74
+
+#define S5P_FIMV_E_VP8_OPTIONS_V7 0xfdb0
+#define S5P_FIMV_E_VP8_FILTER_OPTIONS_V7 0xfdb4
+#define S5P_FIMV_E_VP8_GOLDEN_FRAME_OPTION_V7 0xfdb8
+#define S5P_FIMV_E_VP8_NUM_T_LAYER_V7 0xfdc4
+
+/* MFCv7 variant defines */
+#define MAX_FW_SIZE_V7 (SZ_512K) /* 512KB */
+#define MAX_CPB_SIZE_V7 (3 * SZ_1M) /* 3MB */
+#define MFC_VERSION_V7 0x72
+#define MFC_NUM_PORTS_V7 1
+
+#define MFC_LUMA_PAD_BYTES_V7 256
+#define MFC_CHROMA_PAD_BYTES_V7 128
+
+/* MFCv7 Context buffer sizes */
+#define MFC_CTX_BUF_SIZE_V7 (30 * SZ_1K) /* 30KB */
+#define MFC_H264_DEC_CTX_BUF_SIZE_V7 (2 * SZ_1M) /* 2MB */
+#define MFC_OTHER_DEC_CTX_BUF_SIZE_V7 (20 * SZ_1K) /* 20KB */
+#define MFC_H264_ENC_CTX_BUF_SIZE_V7 (100 * SZ_1K) /* 100KB */
+#define MFC_OTHER_ENC_CTX_BUF_SIZE_V7 (10 * SZ_1K) /* 10KB */
+
+/* Buffer size defines */
+#define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V7(w, h) \
+ (SZ_1M + ((w) * 144) + (8192 * (h)) + 49216)
+
+#define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_ENC_V7(w, h) \
+ (((w) * 48) + 8192 + ((((w) + 1) / 2) * 128) + 144 + \
+ ((((((w) * 16) * ((h) * 16)) * 3) / 2) * 4))
+
+#endif /*_REGS_MFC_V7_H*/
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
new file mode 100644
index 000000000..bd639ae71
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
@@ -0,0 +1,126 @@
+/*
+ * Register definition file for Samsung MFC V8.x Interface (FIMV) driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGS_MFC_V8_H
+#define _REGS_MFC_V8_H
+
+#include <linux/sizes.h>
+#include "regs-mfc-v7.h"
+
+/* Additional registers for v8 */
+#define S5P_FIMV_D_MVC_NUM_VIEWS_V8 0xf104
+#define S5P_FIMV_D_MIN_SCRATCH_BUFFER_SIZE_V8 0xf108
+#define S5P_FIMV_D_FIRST_PLANE_DPB_SIZE_V8 0xf144
+#define S5P_FIMV_D_SECOND_PLANE_DPB_SIZE_V8 0xf148
+#define S5P_FIMV_D_MV_BUFFER_SIZE_V8 0xf150
+
+#define S5P_FIMV_D_FIRST_PLANE_DPB_STRIDE_SIZE_V8 0xf138
+#define S5P_FIMV_D_SECOND_PLANE_DPB_STRIDE_SIZE_V8 0xf13c
+
+#define S5P_FIMV_D_FIRST_PLANE_DPB_V8 0xf160
+#define S5P_FIMV_D_SECOND_PLANE_DPB_V8 0xf260
+#define S5P_FIMV_D_MV_BUFFER_V8 0xf460
+
+#define S5P_FIMV_D_NUM_MV_V8 0xf134
+#define S5P_FIMV_D_INIT_BUFFER_OPTIONS_V8 0xf154
+
+#define S5P_FIMV_D_SCRATCH_BUFFER_ADDR_V8 0xf560
+#define S5P_FIMV_D_SCRATCH_BUFFER_SIZE_V8 0xf564
+
+#define S5P_FIMV_D_CPB_BUFFER_ADDR_V8 0xf5b0
+#define S5P_FIMV_D_CPB_BUFFER_SIZE_V8 0xf5b4
+#define S5P_FIMV_D_AVAILABLE_DPB_FLAG_LOWER_V8 0xf5bc
+#define S5P_FIMV_D_CPB_BUFFER_OFFSET_V8 0xf5c0
+#define S5P_FIMV_D_SLICE_IF_ENABLE_V8 0xf5c4
+#define S5P_FIMV_D_STREAM_DATA_SIZE_V8 0xf5d0
+
+/* Display information register */
+#define S5P_FIMV_D_DISPLAY_FRAME_WIDTH_V8 0xf600
+#define S5P_FIMV_D_DISPLAY_FRAME_HEIGHT_V8 0xf604
+
+/* Display status */
+#define S5P_FIMV_D_DISPLAY_STATUS_V8 0xf608
+
+#define S5P_FIMV_D_DISPLAY_FIRST_PLANE_ADDR_V8 0xf60c
+#define S5P_FIMV_D_DISPLAY_SECOND_PLANE_ADDR_V8 0xf610
+
+#define S5P_FIMV_D_DISPLAY_FRAME_TYPE_V8 0xf618
+#define S5P_FIMV_D_DISPLAY_CROP_INFO1_V8 0xf61c
+#define S5P_FIMV_D_DISPLAY_CROP_INFO2_V8 0xf620
+#define S5P_FIMV_D_DISPLAY_PICTURE_PROFILE_V8 0xf624
+
+/* Decoded picture information register */
+#define S5P_FIMV_D_DECODED_STATUS_V8 0xf644
+#define S5P_FIMV_D_DECODED_FIRST_PLANE_ADDR_V8 0xf648
+#define S5P_FIMV_D_DECODED_SECOND_PLANE_ADDR_V8 0xf64c
+#define S5P_FIMV_D_DECODED_THIRD_PLANE_ADDR_V8 0xf650
+#define S5P_FIMV_D_DECODED_FRAME_TYPE_V8 0xf654
+#define S5P_FIMV_D_DECODED_NAL_SIZE_V8 0xf664
+
+/* Returned value register for specific setting */
+#define S5P_FIMV_D_RET_PICTURE_TAG_TOP_V8 0xf674
+#define S5P_FIMV_D_RET_PICTURE_TAG_BOT_V8 0xf678
+#define S5P_FIMV_D_MVC_VIEW_ID_V8 0xf6d8
+
+/* SEI related information */
+#define S5P_FIMV_D_FRAME_PACK_SEI_AVAIL_V8 0xf6dc
+
+/* Encoder Registers */
+#define S5P_FIMV_E_FIXED_PICTURE_QP_V8 0xf794
+#define S5P_FIMV_E_RC_CONFIG_V8 0xf798
+#define S5P_FIMV_E_RC_QP_BOUND_V8 0xf79c
+#define S5P_FIMV_E_RC_RPARAM_V8 0xf7a4
+#define S5P_FIMV_E_MB_RC_CONFIG_V8 0xf7a8
+#define S5P_FIMV_E_PADDING_CTRL_V8 0xf7ac
+#define S5P_FIMV_E_MV_HOR_RANGE_V8 0xf7b4
+#define S5P_FIMV_E_MV_VER_RANGE_V8 0xf7b8
+
+#define S5P_FIMV_E_VBV_BUFFER_SIZE_V8 0xf78c
+#define S5P_FIMV_E_VBV_INIT_DELAY_V8 0xf790
+#define S5P_FIMV_E_MIN_SCRATCH_BUFFER_SIZE_V8 0xf894
+
+#define S5P_FIMV_E_ASPECT_RATIO_V8 0xfb4c
+#define S5P_FIMV_E_EXTENDED_SAR_V8 0xfb50
+#define S5P_FIMV_E_H264_OPTIONS_V8 0xfb54
+
+/* MFCv8 Context buffer sizes */
+#define MFC_CTX_BUF_SIZE_V8 (36 * SZ_1K) /* 36KB */
+#define MFC_H264_DEC_CTX_BUF_SIZE_V8 (2 * SZ_1M) /* 2MB */
+#define MFC_OTHER_DEC_CTX_BUF_SIZE_V8 (20 * SZ_1K) /* 20KB */
+#define MFC_H264_ENC_CTX_BUF_SIZE_V8 (100 * SZ_1K) /* 100KB */
+#define MFC_OTHER_ENC_CTX_BUF_SIZE_V8 (10 * SZ_1K) /* 10KB */
+
+/* Buffer size defines */
+#define S5P_FIMV_TMV_BUFFER_SIZE_V8(w, h) (((w) + 1) * ((h) + 1) * 8)
+
+#define S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V8(w, h) (((w) * 704) + 2176)
+#define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V8(w, h) \
+ (((w) * 576 + (h) * 128) + 4128)
+
+#define S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V8(w, h) \
+ (((w) * 592) + 2336)
+#define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_ENC_V8(w, h) \
+ (((w) * 576) + 10512 + \
+ ((((((w) * 16) * ((h) * 16)) * 3) / 2) * 4))
+#define S5P_FIMV_ME_BUFFER_SIZE_V8(imw, imh, mbw, mbh) \
+ ((DIV_ROUND_UP((mbw * 16), 64) * DIV_ROUND_UP((mbh * 16), 64) * 256) \
+ + (DIV_ROUND_UP((mbw) * (mbh), 32) * 16))
+
+/* BUffer alignment defines */
+#define S5P_FIMV_D_ALIGN_PLANE_SIZE_V8 64
+
+/* MFCv8 variant defines */
+#define MAX_FW_SIZE_V8 (SZ_512K) /* 512KB */
+#define MAX_CPB_SIZE_V8 (3 * SZ_1M) /* 3MB */
+#define MFC_VERSION_V8 0x80
+#define MFC_NUM_PORTS_V8 1
+
+#endif /*_REGS_MFC_V8_H*/
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
new file mode 100644
index 000000000..57b7e0be0
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -0,0 +1,462 @@
+/*
+ * Register definition file for Samsung MFC V5.1 Interface (FIMV) driver
+ *
+ * Kamil Debski, Copyright (c) 2010 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _REGS_FIMV_H
+#define _REGS_FIMV_H
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+
+#define S5P_FIMV_REG_SIZE (S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR)
+#define S5P_FIMV_REG_COUNT ((S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) / 4)
+
+/* Number of bits that the buffer address should be shifted for particular
+ * MFC buffers. */
+#define S5P_FIMV_START_ADDR 0x0000
+#define S5P_FIMV_END_ADDR 0xe008
+
+#define S5P_FIMV_SW_RESET 0x0000
+#define S5P_FIMV_RISC_HOST_INT 0x0008
+
+/* Command from HOST to RISC */
+#define S5P_FIMV_HOST2RISC_CMD 0x0030
+#define S5P_FIMV_HOST2RISC_ARG1 0x0034
+#define S5P_FIMV_HOST2RISC_ARG2 0x0038
+#define S5P_FIMV_HOST2RISC_ARG3 0x003c
+#define S5P_FIMV_HOST2RISC_ARG4 0x0040
+
+/* Command from RISC to HOST */
+#define S5P_FIMV_RISC2HOST_CMD 0x0044
+#define S5P_FIMV_RISC2HOST_CMD_MASK 0x1FFFF
+#define S5P_FIMV_RISC2HOST_ARG1 0x0048
+#define S5P_FIMV_RISC2HOST_ARG2 0x004c
+#define S5P_FIMV_RISC2HOST_ARG3 0x0050
+#define S5P_FIMV_RISC2HOST_ARG4 0x0054
+
+#define S5P_FIMV_FW_VERSION 0x0058
+#define S5P_FIMV_SYS_MEM_SZ 0x005c
+#define S5P_FIMV_FW_STATUS 0x0080
+
+/* Memory controller register */
+#define S5P_FIMV_MC_DRAMBASE_ADR_A 0x0508
+#define S5P_FIMV_MC_DRAMBASE_ADR_B 0x050c
+#define S5P_FIMV_MC_STATUS 0x0510
+
+/* Common register */
+#define S5P_FIMV_COMMON_BASE_A 0x0600
+#define S5P_FIMV_COMMON_BASE_B 0x0700
+
+/* Decoder */
+#define S5P_FIMV_DEC_CHROMA_ADR (S5P_FIMV_COMMON_BASE_A)
+#define S5P_FIMV_DEC_LUMA_ADR (S5P_FIMV_COMMON_BASE_B)
+
+/* H.264 decoding */
+#define S5P_FIMV_H264_VERT_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+ /* vertical neighbor motion vector */
+#define S5P_FIMV_H264_NB_IP_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+ /* neighbor pixels for intra pred */
+#define S5P_FIMV_H264_MV_ADR (S5P_FIMV_COMMON_BASE_B + 0x80)
+ /* H264 motion vector */
+
+/* MPEG4 decoding */
+#define S5P_FIMV_MPEG4_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+ /* neighbor AC/DC coeff. */
+#define S5P_FIMV_MPEG4_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+ /* upper neighbor motion vector */
+#define S5P_FIMV_MPEG4_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
+ /* subseq. anchor motion vector */
+#define S5P_FIMV_MPEG4_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
+ /* overlap transform line */
+#define S5P_FIMV_MPEG4_SP_ADR (S5P_FIMV_COMMON_BASE_A + 0xa8)
+ /* syntax parser */
+
+/* H.263 decoding */
+#define S5P_FIMV_H263_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+#define S5P_FIMV_H263_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+#define S5P_FIMV_H263_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
+#define S5P_FIMV_H263_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
+
+/* VC-1 decoding */
+#define S5P_FIMV_VC1_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+#define S5P_FIMV_VC1_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+#define S5P_FIMV_VC1_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
+#define S5P_FIMV_VC1_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
+#define S5P_FIMV_VC1_BITPLANE3_ADR (S5P_FIMV_COMMON_BASE_A + 0x9c)
+ /* bitplane3 */
+#define S5P_FIMV_VC1_BITPLANE2_ADR (S5P_FIMV_COMMON_BASE_A + 0xa0)
+ /* bitplane2 */
+#define S5P_FIMV_VC1_BITPLANE1_ADR (S5P_FIMV_COMMON_BASE_A + 0xa4)
+ /* bitplane1 */
+
+/* Encoder */
+#define S5P_FIMV_ENC_REF0_LUMA_ADR (S5P_FIMV_COMMON_BASE_A + 0x1c)
+#define S5P_FIMV_ENC_REF1_LUMA_ADR (S5P_FIMV_COMMON_BASE_A + 0x20)
+ /* reconstructed luma */
+#define S5P_FIMV_ENC_REF0_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B)
+#define S5P_FIMV_ENC_REF1_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x04)
+ /* reconstructed chroma */
+#define S5P_FIMV_ENC_REF2_LUMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x10)
+#define S5P_FIMV_ENC_REF2_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x08)
+#define S5P_FIMV_ENC_REF3_LUMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x14)
+#define S5P_FIMV_ENC_REF3_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x0c)
+
+/* H.264 encoding */
+#define S5P_FIMV_H264_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
+ /* upper motion vector */
+#define S5P_FIMV_H264_NBOR_INFO_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
+ /* entropy engine's neighbor info. */
+#define S5P_FIMV_H264_UP_INTRA_MD_ADR (S5P_FIMV_COMMON_BASE_A + 0x08)
+ /* upper intra MD */
+#define S5P_FIMV_H264_COZERO_FLAG_ADR (S5P_FIMV_COMMON_BASE_A + 0x10)
+ /* direct cozero flag */
+#define S5P_FIMV_H264_UP_INTRA_PRED_ADR (S5P_FIMV_COMMON_BASE_B + 0x40)
+ /* upper intra PRED */
+
+/* H.263 encoding */
+#define S5P_FIMV_H263_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
+ /* upper motion vector */
+#define S5P_FIMV_H263_ACDC_COEF_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
+ /* upper Q coeff. */
+
+/* MPEG4 encoding */
+#define S5P_FIMV_MPEG4_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
+ /* upper motion vector */
+#define S5P_FIMV_MPEG4_ACDC_COEF_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
+ /* upper Q coeff. */
+#define S5P_FIMV_MPEG4_COZERO_FLAG_ADR (S5P_FIMV_COMMON_BASE_A + 0x10)
+ /* direct cozero flag */
+
+#define S5P_FIMV_ENC_REF_B_LUMA_ADR 0x062c /* ref B Luma addr */
+#define S5P_FIMV_ENC_REF_B_CHROMA_ADR 0x0630 /* ref B Chroma addr */
+
+#define S5P_FIMV_ENC_CUR_LUMA_ADR 0x0718 /* current Luma addr */
+#define S5P_FIMV_ENC_CUR_CHROMA_ADR 0x071C /* current Chroma addr */
+
+/* Codec common register */
+#define S5P_FIMV_ENC_HSIZE_PX 0x0818 /* frame width at encoder */
+#define S5P_FIMV_ENC_VSIZE_PX 0x081c /* frame height at encoder */
+#define S5P_FIMV_ENC_PROFILE 0x0830 /* profile register */
+#define S5P_FIMV_ENC_PROFILE_H264_MAIN 0
+#define S5P_FIMV_ENC_PROFILE_H264_HIGH 1
+#define S5P_FIMV_ENC_PROFILE_H264_BASELINE 2
+#define S5P_FIMV_ENC_PROFILE_H264_CONSTRAINED_BASELINE 3
+#define S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE 0
+#define S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE 1
+#define S5P_FIMV_ENC_PIC_STRUCT 0x083c /* picture field/frame flag */
+#define S5P_FIMV_ENC_LF_CTRL 0x0848 /* loop filter control */
+#define S5P_FIMV_ENC_ALPHA_OFF 0x084c /* loop filter alpha offset */
+#define S5P_FIMV_ENC_BETA_OFF 0x0850 /* loop filter beta offset */
+#define S5P_FIMV_MR_BUSIF_CTRL 0x0854 /* hidden, bus interface ctrl */
+#define S5P_FIMV_ENC_PXL_CACHE_CTRL 0x0a00 /* pixel cache control */
+
+/* Channel & stream interface register */
+#define S5P_FIMV_SI_RTN_CHID 0x2000 /* Return CH inst ID register */
+#define S5P_FIMV_SI_CH0_INST_ID 0x2040 /* codec instance ID */
+#define S5P_FIMV_SI_CH1_INST_ID 0x2080 /* codec instance ID */
+/* Decoder */
+#define S5P_FIMV_SI_VRESOL 0x2004 /* vertical res of decoder */
+#define S5P_FIMV_SI_HRESOL 0x2008 /* horizontal res of decoder */
+#define S5P_FIMV_SI_BUF_NUMBER 0x200c /* number of frames in the
+ decoded pic */
+#define S5P_FIMV_SI_DISPLAY_Y_ADR 0x2010 /* luma addr of displayed pic */
+#define S5P_FIMV_SI_DISPLAY_C_ADR 0x2014 /* chroma addrof displayed pic */
+
+#define S5P_FIMV_SI_CONSUMED_BYTES 0x2018 /* Consumed number of bytes to
+ decode a frame */
+#define S5P_FIMV_SI_DISPLAY_STATUS 0x201c /* status of decoded picture */
+
+#define S5P_FIMV_SI_DECODE_Y_ADR 0x2024 /* luma addr of decoded pic */
+#define S5P_FIMV_SI_DECODE_C_ADR 0x2028 /* chroma addrof decoded pic */
+#define S5P_FIMV_SI_DECODE_STATUS 0x202c /* status of decoded picture */
+
+#define S5P_FIMV_SI_CH0_SB_ST_ADR 0x2044 /* start addr of stream buf */
+#define S5P_FIMV_SI_CH0_SB_FRM_SIZE 0x2048 /* size of stream buf */
+#define S5P_FIMV_SI_CH0_DESC_ADR 0x204c /* addr of descriptor buf */
+#define S5P_FIMV_SI_CH0_CPB_SIZE 0x2058 /* max size of coded pic. buf */
+#define S5P_FIMV_SI_CH0_DESC_SIZE 0x205c /* max size of descriptor buf */
+
+#define S5P_FIMV_SI_CH1_SB_ST_ADR 0x2084 /* start addr of stream buf */
+#define S5P_FIMV_SI_CH1_SB_FRM_SIZE 0x2088 /* size of stream buf */
+#define S5P_FIMV_SI_CH1_DESC_ADR 0x208c /* addr of descriptor buf */
+#define S5P_FIMV_SI_CH1_CPB_SIZE 0x2098 /* max size of coded pic. buf */
+#define S5P_FIMV_SI_CH1_DESC_SIZE 0x209c /* max size of descriptor buf */
+
+#define S5P_FIMV_CRC_LUMA0 0x2030 /* luma crc data per frame
+ (top field) */
+#define S5P_FIMV_CRC_CHROMA0 0x2034 /* chroma crc data per frame
+ (top field) */
+#define S5P_FIMV_CRC_LUMA1 0x2038 /* luma crc data per bottom
+ field */
+#define S5P_FIMV_CRC_CHROMA1 0x203c /* chroma crc data per bottom
+ field */
+
+/* Display status */
+#define S5P_FIMV_DEC_STATUS_DECODING_ONLY 0
+#define S5P_FIMV_DEC_STATUS_DECODING_DISPLAY 1
+#define S5P_FIMV_DEC_STATUS_DISPLAY_ONLY 2
+#define S5P_FIMV_DEC_STATUS_DECODING_EMPTY 3
+#define S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK 7
+#define S5P_FIMV_DEC_STATUS_PROGRESSIVE (0<<3)
+#define S5P_FIMV_DEC_STATUS_INTERLACE (1<<3)
+#define S5P_FIMV_DEC_STATUS_INTERLACE_MASK (1<<3)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_TWO (0<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_FOUR (1<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_MASK (1<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_GENERATED (1<<5)
+#define S5P_FIMV_DEC_STATUS_CRC_NOT_GENERATED (0<<5)
+#define S5P_FIMV_DEC_STATUS_CRC_MASK (1<<5)
+
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_MASK (3<<4)
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_INC (1<<4)
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_DEC (2<<4)
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_SHIFT 4
+
+/* Decode frame address */
+#define S5P_FIMV_DECODE_Y_ADR 0x2024
+#define S5P_FIMV_DECODE_C_ADR 0x2028
+
+/* Decoded frame tpe */
+#define S5P_FIMV_DECODE_FRAME_TYPE 0x2020
+#define S5P_FIMV_DECODE_FRAME_MASK 7
+
+#define S5P_FIMV_DECODE_FRAME_SKIPPED 0
+#define S5P_FIMV_DECODE_FRAME_I_FRAME 1
+#define S5P_FIMV_DECODE_FRAME_P_FRAME 2
+#define S5P_FIMV_DECODE_FRAME_B_FRAME 3
+#define S5P_FIMV_DECODE_FRAME_OTHER_FRAME 4
+
+/* Sizes of buffers required for decoding */
+#define S5P_FIMV_DEC_NB_IP_SIZE (32 * 1024)
+#define S5P_FIMV_DEC_VERT_NB_MV_SIZE (16 * 1024)
+#define S5P_FIMV_DEC_NB_DCAC_SIZE (16 * 1024)
+#define S5P_FIMV_DEC_UPNB_MV_SIZE (68 * 1024)
+#define S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE (136 * 1024)
+#define S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE (32 * 1024)
+#define S5P_FIMV_DEC_VC1_BITPLANE_SIZE (2 * 1024)
+#define S5P_FIMV_DEC_STX_PARSER_SIZE (68 * 1024)
+
+#define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
+#define S5P_FIMV_ENC_BUF_ALIGN (8 * 1024)
+#define S5P_FIMV_NV12M_HALIGN 16
+#define S5P_FIMV_NV12M_LVALIGN 16
+#define S5P_FIMV_NV12M_CVALIGN 8
+#define S5P_FIMV_NV12MT_HALIGN 128
+#define S5P_FIMV_NV12MT_VALIGN 32
+#define S5P_FIMV_NV12M_SALIGN 2048
+#define S5P_FIMV_NV12MT_SALIGN 8192
+
+/* Sizes of buffers required for encoding */
+#define S5P_FIMV_ENC_UPMV_SIZE 0x10000
+#define S5P_FIMV_ENC_COLFLG_SIZE 0x10000
+#define S5P_FIMV_ENC_INTRAMD_SIZE 0x10000
+#define S5P_FIMV_ENC_INTRAPRED_SIZE 0x4000
+#define S5P_FIMV_ENC_NBORINFO_SIZE 0x10000
+#define S5P_FIMV_ENC_ACDCCOEF_SIZE 0x10000
+
+/* Encoder */
+#define S5P_FIMV_ENC_SI_STRM_SIZE 0x2004 /* stream size */
+#define S5P_FIMV_ENC_SI_PIC_CNT 0x2008 /* picture count */
+#define S5P_FIMV_ENC_SI_WRITE_PTR 0x200c /* write pointer */
+#define S5P_FIMV_ENC_SI_SLICE_TYPE 0x2010 /* slice type(I/P/B/IDR) */
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_NON_CODED 0
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_I 1
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_P 2
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_B 3
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_SKIPPED 4
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_OTHERS 5
+#define S5P_FIMV_ENCODED_Y_ADDR 0x2014 /* the addr of the encoded
+ luma pic */
+#define S5P_FIMV_ENCODED_C_ADDR 0x2018 /* the addr of the encoded
+ chroma pic */
+
+#define S5P_FIMV_ENC_SI_CH0_SB_ADR 0x2044 /* addr of stream buf */
+#define S5P_FIMV_ENC_SI_CH0_SB_SIZE 0x204c /* size of stream buf */
+#define S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR 0x2050 /* current Luma addr */
+#define S5P_FIMV_ENC_SI_CH0_CUR_C_ADR 0x2054 /* current Chroma addr */
+#define S5P_FIMV_ENC_SI_CH0_FRAME_INS 0x2058 /* frame insertion */
+
+#define S5P_FIMV_ENC_SI_CH1_SB_ADR 0x2084 /* addr of stream buf */
+#define S5P_FIMV_ENC_SI_CH1_SB_SIZE 0x208c /* size of stream buf */
+#define S5P_FIMV_ENC_SI_CH1_CUR_Y_ADR 0x2090 /* current Luma addr */
+#define S5P_FIMV_ENC_SI_CH1_CUR_C_ADR 0x2094 /* current Chroma addr */
+#define S5P_FIMV_ENC_SI_CH1_FRAME_INS 0x2098 /* frame insertion */
+
+#define S5P_FIMV_ENC_PIC_TYPE_CTRL 0xc504 /* pic type level control */
+#define S5P_FIMV_ENC_B_RECON_WRITE_ON 0xc508 /* B frame recon write ctrl */
+#define S5P_FIMV_ENC_MSLICE_CTRL 0xc50c /* multi slice control */
+#define S5P_FIMV_ENC_MSLICE_MB 0xc510 /* MB number in the one slice */
+#define S5P_FIMV_ENC_MSLICE_BIT 0xc514 /* bit count for one slice */
+#define S5P_FIMV_ENC_CIR_CTRL 0xc518 /* number of intra refresh MB */
+#define S5P_FIMV_ENC_MAP_FOR_CUR 0xc51c /* linear or tiled mode */
+#define S5P_FIMV_ENC_PADDING_CTRL 0xc520 /* padding control */
+
+#define S5P_FIMV_ENC_RC_CONFIG 0xc5a0 /* RC config */
+#define S5P_FIMV_ENC_RC_BIT_RATE 0xc5a8 /* bit rate */
+#define S5P_FIMV_ENC_RC_QBOUND 0xc5ac /* max/min QP */
+#define S5P_FIMV_ENC_RC_RPARA 0xc5b0 /* rate control reaction coeff */
+#define S5P_FIMV_ENC_RC_MB_CTRL 0xc5b4 /* MB adaptive scaling */
+
+/* Encoder for H264 only */
+#define S5P_FIMV_ENC_H264_ENTROPY_MODE 0xd004 /* CAVLC or CABAC */
+#define S5P_FIMV_ENC_H264_ALPHA_OFF 0xd008 /* loop filter alpha offset */
+#define S5P_FIMV_ENC_H264_BETA_OFF 0xd00c /* loop filter beta offset */
+#define S5P_FIMV_ENC_H264_NUM_OF_REF 0xd010 /* number of reference for P/B */
+#define S5P_FIMV_ENC_H264_TRANS_FLAG 0xd034 /* 8x8 transform flag in PPS &
+ high profile */
+
+#define S5P_FIMV_ENC_RC_FRAME_RATE 0xd0d0 /* frame rate */
+
+/* Encoder for MPEG4 only */
+#define S5P_FIMV_ENC_MPEG4_QUART_PXL 0xe008 /* qpel interpolation ctrl */
+
+/* Additional */
+#define S5P_FIMV_SI_CH0_DPB_CONF_CTRL 0x2068 /* DPB Config Control Register */
+#define S5P_FIMV_SLICE_INT_MASK 1
+#define S5P_FIMV_SLICE_INT_SHIFT 31
+#define S5P_FIMV_DDELAY_ENA_SHIFT 30
+#define S5P_FIMV_DDELAY_VAL_MASK 0xff
+#define S5P_FIMV_DDELAY_VAL_SHIFT 16
+#define S5P_FIMV_DPB_COUNT_MASK 0xffff
+#define S5P_FIMV_DPB_FLUSH_MASK 1
+#define S5P_FIMV_DPB_FLUSH_SHIFT 14
+
+
+#define S5P_FIMV_SI_CH0_RELEASE_BUF 0x2060 /* DPB release buffer register */
+#define S5P_FIMV_SI_CH0_HOST_WR_ADR 0x2064 /* address of shared memory */
+
+/* Codec numbers */
+#define S5P_FIMV_CODEC_NONE -1
+
+#define S5P_FIMV_CODEC_H264_DEC 0
+#define S5P_FIMV_CODEC_VC1_DEC 1
+#define S5P_FIMV_CODEC_MPEG4_DEC 2
+#define S5P_FIMV_CODEC_MPEG2_DEC 3
+#define S5P_FIMV_CODEC_H263_DEC 4
+#define S5P_FIMV_CODEC_VC1RCV_DEC 5
+
+#define S5P_FIMV_CODEC_H264_ENC 16
+#define S5P_FIMV_CODEC_MPEG4_ENC 17
+#define S5P_FIMV_CODEC_H263_ENC 18
+
+/* Channel Control Register */
+#define S5P_FIMV_CH_SEQ_HEADER 1
+#define S5P_FIMV_CH_FRAME_START 2
+#define S5P_FIMV_CH_LAST_FRAME 3
+#define S5P_FIMV_CH_INIT_BUFS 4
+#define S5P_FIMV_CH_FRAME_START_REALLOC 5
+#define S5P_FIMV_CH_MASK 7
+#define S5P_FIMV_CH_SHIFT 16
+
+
+/* Host to RISC command */
+#define S5P_FIMV_H2R_CMD_EMPTY 0
+#define S5P_FIMV_H2R_CMD_OPEN_INSTANCE 1
+#define S5P_FIMV_H2R_CMD_CLOSE_INSTANCE 2
+#define S5P_FIMV_H2R_CMD_SYS_INIT 3
+#define S5P_FIMV_H2R_CMD_FLUSH 4
+#define S5P_FIMV_H2R_CMD_SLEEP 5
+#define S5P_FIMV_H2R_CMD_WAKEUP 6
+
+#define S5P_FIMV_R2H_CMD_EMPTY 0
+#define S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET 1
+#define S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET 2
+#define S5P_FIMV_R2H_CMD_RSV_RET 3
+#define S5P_FIMV_R2H_CMD_SEQ_DONE_RET 4
+#define S5P_FIMV_R2H_CMD_FRAME_DONE_RET 5
+#define S5P_FIMV_R2H_CMD_SLICE_DONE_RET 6
+#define S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET 7
+#define S5P_FIMV_R2H_CMD_SYS_INIT_RET 8
+#define S5P_FIMV_R2H_CMD_FW_STATUS_RET 9
+#define S5P_FIMV_R2H_CMD_SLEEP_RET 10
+#define S5P_FIMV_R2H_CMD_WAKEUP_RET 11
+#define S5P_FIMV_R2H_CMD_FLUSH_RET 12
+#define S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET 15
+#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16
+#define S5P_FIMV_R2H_CMD_ERR_RET 32
+
+/* Dummy definition for MFCv6 compatibility */
+#define S5P_FIMV_CODEC_H264_MVC_DEC -1
+#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1
+#define S5P_FIMV_MFC_RESET -1
+#define S5P_FIMV_RISC_ON -1
+#define S5P_FIMV_RISC_BASE_ADDRESS -1
+#define S5P_FIMV_CODEC_VP8_DEC -1
+#define S5P_FIMV_REG_CLEAR_BEGIN 0
+#define S5P_FIMV_REG_CLEAR_COUNT 0
+
+/* Error handling defines */
+#define S5P_FIMV_ERR_NO_VALID_SEQ_HDR 67
+#define S5P_FIMV_ERR_INCOMPLETE_FRAME 124
+#define S5P_FIMV_ERR_TIMEOUT 140
+#define S5P_FIMV_ERR_WARNINGS_START 145
+#define S5P_FIMV_ERR_DEC_MASK 0xFFFF
+#define S5P_FIMV_ERR_DEC_SHIFT 0
+#define S5P_FIMV_ERR_DSPL_MASK 0xFFFF0000
+#define S5P_FIMV_ERR_DSPL_SHIFT 16
+
+/* Shared memory registers' offsets */
+
+/* An offset of the start position in the stream when
+ * the start position is not aligned */
+#define S5P_FIMV_SHARED_CROP_INFO_H 0x0020
+#define S5P_FIMV_SHARED_CROP_LEFT_MASK 0xFFFF
+#define S5P_FIMV_SHARED_CROP_LEFT_SHIFT 0
+#define S5P_FIMV_SHARED_CROP_RIGHT_MASK 0xFFFF0000
+#define S5P_FIMV_SHARED_CROP_RIGHT_SHIFT 16
+#define S5P_FIMV_SHARED_CROP_INFO_V 0x0024
+#define S5P_FIMV_SHARED_CROP_TOP_MASK 0xFFFF
+#define S5P_FIMV_SHARED_CROP_TOP_SHIFT 0
+#define S5P_FIMV_SHARED_CROP_BOTTOM_MASK 0xFFFF0000
+#define S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT 16
+#define S5P_FIMV_SHARED_SET_FRAME_TAG 0x0004
+#define S5P_FIMV_SHARED_GET_FRAME_TAG_TOP 0x0008
+#define S5P_FIMV_SHARED_GET_FRAME_TAG_BOT 0x000C
+#define S5P_FIMV_SHARED_START_BYTE_NUM 0x0018
+#define S5P_FIMV_SHARED_RC_VOP_TIMING 0x0030
+#define S5P_FIMV_SHARED_LUMA_DPB_SIZE 0x0064
+#define S5P_FIMV_SHARED_CHROMA_DPB_SIZE 0x0068
+#define S5P_FIMV_SHARED_MV_SIZE 0x006C
+#define S5P_FIMV_SHARED_PIC_TIME_TOP 0x0010
+#define S5P_FIMV_SHARED_PIC_TIME_BOTTOM 0x0014
+#define S5P_FIMV_SHARED_EXT_ENC_CONTROL 0x0028
+#define S5P_FIMV_SHARED_P_B_FRAME_QP 0x0070
+#define S5P_FIMV_SHARED_ASPECT_RATIO_IDC 0x0074
+#define S5P_FIMV_SHARED_EXTENDED_SAR 0x0078
+#define S5P_FIMV_SHARED_H264_I_PERIOD 0x009C
+#define S5P_FIMV_SHARED_RC_CONTROL_CONFIG 0x00A0
+#define S5P_FIMV_SHARED_DISP_FRAME_TYPE_SHIFT 2
+
+/* Offset used by the hardware to store addresses */
+#define MFC_OFFSET_SHIFT 11
+
+#define FIRMWARE_ALIGN (128 * SZ_1K) /* 128KB */
+#define MFC_H264_CTX_BUF_SIZE (600 * SZ_1K) /* 600KB per H264 instance */
+#define MFC_CTX_BUF_SIZE (10 * SZ_1K) /* 10KB per instance */
+#define DESC_BUF_SIZE (128 * SZ_1K) /* 128KB for DESC buffer */
+#define SHARED_BUF_SIZE (8 * SZ_1K) /* 8KB for shared buffer */
+
+#define DEF_CPB_SIZE (256 * SZ_1K) /* 256KB */
+#define MAX_CPB_SIZE (4 * SZ_1M) /* 4MB */
+#define MAX_FW_SIZE (384 * SZ_1K)
+
+#define MFC_VERSION 0x51
+#define MFC_NUM_PORTS 2
+
+#define S5P_FIMV_SHARED_FRAME_PACK_SEI_AVAIL 0x16C
+#define S5P_FIMV_SHARED_FRAME_PACK_ARRGMENT_ID 0x170
+#define S5P_FIMV_SHARED_FRAME_PACK_SEI_INFO 0x174
+#define S5P_FIMV_SHARED_FRAME_PACK_GRID_POS 0x178
+
+/* Values for resolution change in display status */
+#define S5P_FIMV_RES_INCREASE 1
+#define S5P_FIMV_RES_DECREASE 2
+
+#endif /* _REGS_FIMV_H */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
new file mode 100644
index 000000000..0fc101bc5
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -0,0 +1,1677 @@
+/*
+ * Samsung S5P Multi Format Codec v 5.1
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-event.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <media/videobuf2-v4l2.h>
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_dec.h"
+#include "s5p_mfc_enc.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_iommu.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_pm.h"
+
+#define S5P_MFC_DEC_NAME "s5p-mfc-dec"
+#define S5P_MFC_ENC_NAME "s5p-mfc-enc"
+
+int mfc_debug_level;
+module_param_named(debug, mfc_debug_level, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
+
+static char *mfc_mem_size;
+module_param_named(mem, mfc_mem_size, charp, 0644);
+MODULE_PARM_DESC(mem, "Preallocated memory size for the firmware and context buffers");
+
+/* Helper functions for interrupt processing */
+
+/* Remove from hw execution round robin */
+void clear_work_bit(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ spin_lock(&dev->condlock);
+ __clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock(&dev->condlock);
+}
+
+/* Add to hw execution round robin */
+void set_work_bit(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ spin_lock(&dev->condlock);
+ __set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock(&dev->condlock);
+}
+
+/* Remove from hw execution round robin */
+void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->condlock, flags);
+ __clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+}
+
+/* Add to hw execution round robin */
+void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->condlock, flags);
+ __set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+}
+
+int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
+{
+ unsigned long flags;
+ int ctx;
+
+ spin_lock_irqsave(&dev->condlock, flags);
+ ctx = dev->curr_ctx;
+ do {
+ ctx = (ctx + 1) % MFC_NUM_CONTEXTS;
+ if (ctx == dev->curr_ctx) {
+ if (!test_bit(ctx, &dev->ctx_work_bits))
+ ctx = -EAGAIN;
+ break;
+ }
+ } while (!test_bit(ctx, &dev->ctx_work_bits));
+ spin_unlock_irqrestore(&dev->condlock, flags);
+
+ return ctx;
+}
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
+ unsigned int err)
+{
+ ctx->int_cond = 1;
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ wake_up(&ctx->queue);
+}
+
+/* Wake up device wait_queue */
+static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
+ unsigned int err)
+{
+ dev->int_cond = 1;
+ dev->int_type = reason;
+ dev->int_err = err;
+ wake_up(&dev->queue);
+}
+
+void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
+{
+ struct s5p_mfc_buf *b;
+ int i;
+
+ while (!list_empty(lh)) {
+ b = list_entry(lh->next, struct s5p_mfc_buf, list);
+ for (i = 0; i < b->b->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&b->b->vb2_buf, i, 0);
+ vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR);
+ list_del(&b->list);
+ }
+}
+
+static void s5p_mfc_watchdog(struct timer_list *t)
+{
+ struct s5p_mfc_dev *dev = from_timer(dev, t, watchdog_timer);
+
+ if (test_bit(0, &dev->hw_lock))
+ atomic_inc(&dev->watchdog_cnt);
+ if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
+ /* This means that hw is busy and no interrupts were
+ * generated by hw for the Nth time of running this
+ * watchdog timer. This usually means a serious hw
+ * error. Now it is time to kill all instances and
+ * reset the MFC. */
+ mfc_err("Time out during waiting for HW\n");
+ schedule_work(&dev->watchdog_work);
+ }
+ dev->watchdog_timer.expires = jiffies +
+ msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
+ add_timer(&dev->watchdog_timer);
+}
+
+static void s5p_mfc_watchdog_worker(struct work_struct *work)
+{
+ struct s5p_mfc_dev *dev;
+ struct s5p_mfc_ctx *ctx;
+ unsigned long flags;
+ int mutex_locked;
+ int i, ret;
+
+ dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
+
+ mfc_err("Driver timeout error handling\n");
+ /* Lock the mutex that protects open and release.
+ * This is necessary as they may load and unload firmware. */
+ mutex_locked = mutex_trylock(&dev->mfc_mutex);
+ if (!mutex_locked)
+ mfc_err("Error: some instance may be closing/opening\n");
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ s5p_mfc_clock_off();
+
+ for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
+ ctx = dev->ctx[i];
+ if (!ctx)
+ continue;
+ ctx->state = MFCINST_ERROR;
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ clear_work_bit(ctx);
+ wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0);
+ }
+ clear_bit(0, &dev->hw_lock);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* De-init MFC */
+ s5p_mfc_deinit_hw(dev);
+
+ /* Double check if there is at least one instance running.
+ * If no instance is in memory than no firmware should be present */
+ if (dev->num_inst > 0) {
+ ret = s5p_mfc_load_firmware(dev);
+ if (ret) {
+ mfc_err("Failed to reload FW\n");
+ goto unlock;
+ }
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_init_hw(dev);
+ s5p_mfc_clock_off();
+ if (ret)
+ mfc_err("Failed to reinit FW\n");
+ }
+unlock:
+ if (mutex_locked)
+ mutex_unlock(&dev->mfc_mutex);
+}
+
+static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_buf *dst_buf;
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ ctx->state = MFCINST_FINISHED;
+ ctx->sequence++;
+ while (!list_empty(&ctx->dst_queue)) {
+ dst_buf = list_entry(ctx->dst_queue.next,
+ struct s5p_mfc_buf, list);
+ mfc_debug(2, "Cleaning up buffer: %d\n",
+ dst_buf->b->vb2_buf.index);
+ vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0, 0);
+ vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1, 0);
+ list_del(&dst_buf->list);
+ dst_buf->flags |= MFC_BUF_FLAG_EOS;
+ ctx->dst_queue_cnt--;
+ dst_buf->b->sequence = (ctx->sequence++);
+
+ if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) ==
+ s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx))
+ dst_buf->b->field = V4L2_FIELD_NONE;
+ else
+ dst_buf->b->field = V4L2_FIELD_INTERLACED;
+ dst_buf->b->flags |= V4L2_BUF_FLAG_LAST;
+
+ ctx->dec_dst_flag &= ~(1 << dst_buf->b->vb2_buf.index);
+ vb2_buffer_done(&dst_buf->b->vb2_buf, VB2_BUF_STATE_DONE);
+ }
+}
+
+static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_buf, *src_buf;
+ u32 dec_y_addr;
+ unsigned int frame_type;
+
+ /* Make sure we actually have a new frame before continuing. */
+ frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
+ if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
+ return;
+ dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
+
+ /* Copy timestamp / timecode from decoded src to dst and set
+ appropriate flags. */
+ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
+ if (addr == dec_y_addr) {
+ dst_buf->b->timecode = src_buf->b->timecode;
+ dst_buf->b->vb2_buf.timestamp =
+ src_buf->b->vb2_buf.timestamp;
+ dst_buf->b->flags &=
+ ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->b->flags |=
+ src_buf->b->flags
+ & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ switch (frame_type) {
+ case S5P_FIMV_DECODE_FRAME_I_FRAME:
+ dst_buf->b->flags |=
+ V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case S5P_FIMV_DECODE_FRAME_P_FRAME:
+ dst_buf->b->flags |=
+ V4L2_BUF_FLAG_PFRAME;
+ break;
+ case S5P_FIMV_DECODE_FRAME_B_FRAME:
+ dst_buf->b->flags |=
+ V4L2_BUF_FLAG_BFRAME;
+ break;
+ default:
+ /* Don't know how to handle
+ S5P_FIMV_DECODE_FRAME_OTHER_FRAME. */
+ mfc_debug(2, "Unexpected frame type: %d\n",
+ frame_type);
+ }
+ break;
+ }
+ }
+}
+
+static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_buf;
+ u32 dspl_y_addr;
+ unsigned int frame_type;
+
+ dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
+ if (IS_MFCV6_PLUS(dev))
+ frame_type = s5p_mfc_hw_call(dev->mfc_ops,
+ get_disp_frame_type, ctx);
+ else
+ frame_type = s5p_mfc_hw_call(dev->mfc_ops,
+ get_dec_frame_type, dev);
+
+ /* If frame is same as previous then skip and do not dequeue */
+ if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
+ if (!ctx->after_packed_pb)
+ ctx->sequence++;
+ ctx->after_packed_pb = 0;
+ return;
+ }
+ ctx->sequence++;
+ /* The MFC returns address of the buffer, now we have to
+ * check which videobuf does it correspond to */
+ list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
+ /* Check if this is the buffer we're looking for */
+ if (addr == dspl_y_addr) {
+ list_del(&dst_buf->list);
+ ctx->dst_queue_cnt--;
+ dst_buf->b->sequence = ctx->sequence;
+ if (s5p_mfc_hw_call(dev->mfc_ops,
+ get_pic_type_top, ctx) ==
+ s5p_mfc_hw_call(dev->mfc_ops,
+ get_pic_type_bot, ctx))
+ dst_buf->b->field = V4L2_FIELD_NONE;
+ else
+ dst_buf->b->field =
+ V4L2_FIELD_INTERLACED;
+ vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0,
+ ctx->luma_size);
+ vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1,
+ ctx->chroma_size);
+ clear_bit(dst_buf->b->vb2_buf.index,
+ &ctx->dec_dst_flag);
+
+ vb2_buffer_done(&dst_buf->b->vb2_buf, err ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ break;
+ }
+ }
+}
+
+/* Handle frame decoding interrupt */
+static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int dst_frame_status;
+ unsigned int dec_frame_status;
+ struct s5p_mfc_buf *src_buf;
+ unsigned int res_change;
+
+ dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
+ & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
+ dec_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dec_status, dev)
+ & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
+ res_change = (s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
+ & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK)
+ >> S5P_FIMV_DEC_STATUS_RESOLUTION_SHIFT;
+ mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
+ if (ctx->state == MFCINST_RES_CHANGE_INIT)
+ ctx->state = MFCINST_RES_CHANGE_FLUSH;
+ if (res_change == S5P_FIMV_RES_INCREASE ||
+ res_change == S5P_FIMV_RES_DECREASE) {
+ ctx->state = MFCINST_RES_CHANGE_INIT;
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ wake_up_ctx(ctx, reason, err);
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+ s5p_mfc_clock_off();
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ return;
+ }
+ if (ctx->dpb_flush_flag)
+ ctx->dpb_flush_flag = 0;
+
+ /* All frames remaining in the buffer have been extracted */
+ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
+ if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ s5p_mfc_handle_frame_all_extracted(ctx);
+ ctx->state = MFCINST_RES_CHANGE_END;
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+
+ goto leave_handle_frame;
+ } else {
+ s5p_mfc_handle_frame_all_extracted(ctx);
+ }
+ }
+
+ if (dec_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY)
+ s5p_mfc_handle_frame_copy_time(ctx);
+
+ /* A frame has been decoded and is in the buffer */
+ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
+ dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
+ s5p_mfc_handle_frame_new(ctx, err);
+ } else {
+ mfc_debug(2, "No frame decode\n");
+ }
+ /* Mark source buffer as complete */
+ if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
+ && !list_empty(&ctx->src_queue)) {
+ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+ list);
+ ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
+ get_consumed_stream, dev);
+ if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
+ ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC &&
+ ctx->consumed_stream + STUFF_BYTE <
+ src_buf->b->vb2_buf.planes[0].bytesused) {
+ /* Run MFC again on the same buffer */
+ mfc_debug(2, "Running again the same buffer\n");
+ ctx->after_packed_pb = 1;
+ } else {
+ mfc_debug(2, "MFC needs next buffer\n");
+ ctx->consumed_stream = 0;
+ if (src_buf->flags & MFC_BUF_FLAG_EOS)
+ ctx->state = MFCINST_FINISHING;
+ list_del(&src_buf->list);
+ ctx->src_queue_cnt--;
+ if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0)
+ vb2_buffer_done(&src_buf->b->vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ else
+ vb2_buffer_done(&src_buf->b->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ }
+ }
+leave_handle_frame:
+ if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
+ || ctx->dst_queue_cnt < ctx->pb_count)
+ clear_work_bit(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ wake_up_ctx(ctx, reason, err);
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+ s5p_mfc_clock_off();
+ /* if suspending, wake up device and do not try_run again*/
+ if (test_bit(0, &dev->enter_suspend))
+ wake_up_dev(dev, reason, err);
+ else
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+}
+
+/* Error handling for interrupt */
+static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
+{
+ mfc_err("Interrupt Error: %08x\n", err);
+
+ if (ctx) {
+ /* Error recovery is dependent on the state of context */
+ switch (ctx->state) {
+ case MFCINST_RES_CHANGE_INIT:
+ case MFCINST_RES_CHANGE_FLUSH:
+ case MFCINST_RES_CHANGE_END:
+ case MFCINST_FINISHING:
+ case MFCINST_FINISHED:
+ case MFCINST_RUNNING:
+ /* It is highly probable that an error occurred
+ * while decoding a frame */
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_ERROR;
+ /* Mark all dst buffers as having an error */
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ /* Mark all src buffers as having an error */
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ wake_up_ctx(ctx, reason, err);
+ break;
+ default:
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_ERROR;
+ wake_up_ctx(ctx, reason, err);
+ break;
+ }
+ }
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_clock_off();
+ wake_up_dev(dev, reason, err);
+}
+
+/* Header parsing interrupt handling */
+static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_dev *dev;
+
+ if (!ctx)
+ return;
+ dev = ctx->dev;
+ if (ctx->c_ops->post_seq_start) {
+ if (ctx->c_ops->post_seq_start(ctx))
+ mfc_err("post_seq_start() failed\n");
+ } else {
+ ctx->img_width = s5p_mfc_hw_call(dev->mfc_ops, get_img_width,
+ dev);
+ ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height,
+ dev);
+
+ s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
+
+ ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
+ dev);
+ ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
+ dev);
+ if (FW_HAS_E_MIN_SCRATCH_BUF(dev))
+ ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
+ get_min_scratch_buf_size, dev);
+ if (ctx->img_width == 0 || ctx->img_height == 0)
+ ctx->state = MFCINST_ERROR;
+ else
+ ctx->state = MFCINST_HEAD_PARSED;
+
+ if ((ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
+ ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) &&
+ !list_empty(&ctx->src_queue)) {
+ struct s5p_mfc_buf *src_buf;
+ src_buf = list_entry(ctx->src_queue.next,
+ struct s5p_mfc_buf, list);
+ if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream,
+ dev) <
+ src_buf->b->vb2_buf.planes[0].bytesused)
+ ctx->head_processed = 0;
+ else
+ ctx->head_processed = 1;
+ } else {
+ ctx->head_processed = 1;
+ }
+ }
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ clear_work_bit(ctx);
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+ s5p_mfc_clock_off();
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ wake_up_ctx(ctx, reason, err);
+}
+
+/* Header parsing interrupt handling */
+static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_buf *src_buf;
+ struct s5p_mfc_dev *dev;
+
+ if (!ctx)
+ return;
+ dev = ctx->dev;
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ ctx->int_cond = 1;
+ clear_work_bit(ctx);
+ if (err == 0) {
+ ctx->state = MFCINST_RUNNING;
+ if (!ctx->dpb_flush_flag && ctx->head_processed) {
+ if (!list_empty(&ctx->src_queue)) {
+ src_buf = list_entry(ctx->src_queue.next,
+ struct s5p_mfc_buf, list);
+ list_del(&src_buf->list);
+ ctx->src_queue_cnt--;
+ vb2_buffer_done(&src_buf->b->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ }
+ } else {
+ ctx->dpb_flush_flag = 0;
+ }
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+
+ s5p_mfc_clock_off();
+
+ wake_up(&ctx->queue);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ } else {
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+
+ s5p_mfc_clock_off();
+
+ wake_up(&ctx->queue);
+ }
+}
+
+static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *mb_entry;
+
+ mfc_debug(2, "Stream completed\n");
+
+ ctx->state = MFCINST_FINISHED;
+
+ if (!list_empty(&ctx->dst_queue)) {
+ mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
+ list);
+ list_del(&mb_entry->list);
+ ctx->dst_queue_cnt--;
+ vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, 0);
+ vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ clear_work_bit(ctx);
+
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+
+ s5p_mfc_clock_off();
+ wake_up(&ctx->queue);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+}
+
+/* Interrupt processing */
+static irqreturn_t s5p_mfc_irq(int irq, void *priv)
+{
+ struct s5p_mfc_dev *dev = priv;
+ struct s5p_mfc_ctx *ctx;
+ unsigned int reason;
+ unsigned int err;
+
+ mfc_debug_enter();
+ /* Reset the timeout watchdog */
+ atomic_set(&dev->watchdog_cnt, 0);
+ spin_lock(&dev->irqlock);
+ ctx = dev->ctx[dev->curr_ctx];
+ /* Get the reason of interrupt and the error code */
+ reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev);
+ err = s5p_mfc_hw_call(dev->mfc_ops, get_int_err, dev);
+ mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
+ switch (reason) {
+ case S5P_MFC_R2H_CMD_ERR_RET:
+ /* An error has occurred */
+ if (ctx->state == MFCINST_RUNNING &&
+ (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
+ dev->warn_start ||
+ err == S5P_FIMV_ERR_NO_VALID_SEQ_HDR ||
+ err == S5P_FIMV_ERR_INCOMPLETE_FRAME ||
+ err == S5P_FIMV_ERR_TIMEOUT))
+ s5p_mfc_handle_frame(ctx, reason, err);
+ else
+ s5p_mfc_handle_error(dev, ctx, reason, err);
+ clear_bit(0, &dev->enter_suspend);
+ break;
+
+ case S5P_MFC_R2H_CMD_SLICE_DONE_RET:
+ case S5P_MFC_R2H_CMD_FIELD_DONE_RET:
+ case S5P_MFC_R2H_CMD_FRAME_DONE_RET:
+ if (ctx->c_ops->post_frame_start) {
+ if (ctx->c_ops->post_frame_start(ctx))
+ mfc_err("post_frame_start() failed\n");
+
+ if (ctx->state == MFCINST_FINISHING &&
+ list_empty(&ctx->ref_queue)) {
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_handle_stream_complete(ctx);
+ break;
+ }
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+ s5p_mfc_clock_off();
+ wake_up_ctx(ctx, reason, err);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ } else {
+ s5p_mfc_handle_frame(ctx, reason, err);
+ }
+ break;
+
+ case S5P_MFC_R2H_CMD_SEQ_DONE_RET:
+ s5p_mfc_handle_seq_done(ctx, reason, err);
+ break;
+
+ case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
+ ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
+ ctx->state = MFCINST_GOT_INST;
+ goto irq_cleanup_hw;
+
+ case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
+ ctx->inst_no = MFC_NO_INSTANCE_SET;
+ ctx->state = MFCINST_FREE;
+ goto irq_cleanup_hw;
+
+ case S5P_MFC_R2H_CMD_SYS_INIT_RET:
+ case S5P_MFC_R2H_CMD_FW_STATUS_RET:
+ case S5P_MFC_R2H_CMD_SLEEP_RET:
+ case S5P_MFC_R2H_CMD_WAKEUP_RET:
+ if (ctx)
+ clear_work_bit(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ clear_bit(0, &dev->hw_lock);
+ clear_bit(0, &dev->enter_suspend);
+ wake_up_dev(dev, reason, err);
+ break;
+
+ case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
+ s5p_mfc_handle_init_buffers(ctx, reason, err);
+ break;
+
+ case S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET:
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ s5p_mfc_handle_stream_complete(ctx);
+ break;
+
+ case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
+ ctx->state = MFCINST_RUNNING;
+ goto irq_cleanup_hw;
+
+ default:
+ mfc_debug(2, "Unknown int reason\n");
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ }
+ spin_unlock(&dev->irqlock);
+ mfc_debug_leave();
+ return IRQ_HANDLED;
+irq_cleanup_hw:
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ ctx->int_cond = 1;
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ mfc_err("Failed to unlock hw\n");
+
+ s5p_mfc_clock_off();
+ clear_work_bit(ctx);
+ wake_up(&ctx->queue);
+
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ spin_unlock(&dev->irqlock);
+ mfc_debug(2, "Exit via irq_cleanup_hw\n");
+ return IRQ_HANDLED;
+}
+
+/* Open an MFC node */
+static int s5p_mfc_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = NULL;
+ struct vb2_queue *q;
+ int ret = 0;
+
+ mfc_debug_enter();
+ if (mutex_lock_interruptible(&dev->mfc_mutex))
+ return -ERESTARTSYS;
+ dev->num_inst++; /* It is guarded by mfc_mutex in vfd */
+ /* Allocate memory for context */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+ init_waitqueue_head(&ctx->queue);
+ v4l2_fh_init(&ctx->fh, vdev);
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ ctx->dev = dev;
+ INIT_LIST_HEAD(&ctx->src_queue);
+ INIT_LIST_HEAD(&ctx->dst_queue);
+ ctx->src_queue_cnt = 0;
+ ctx->dst_queue_cnt = 0;
+ /* Get context number */
+ ctx->num = 0;
+ while (dev->ctx[ctx->num]) {
+ ctx->num++;
+ if (ctx->num >= MFC_NUM_CONTEXTS) {
+ mfc_debug(2, "Too many open contexts\n");
+ ret = -EBUSY;
+ goto err_no_ctx;
+ }
+ }
+ /* Mark context as idle */
+ clear_work_bit_irqsave(ctx);
+ dev->ctx[ctx->num] = ctx;
+ if (vdev == dev->vfd_dec) {
+ ctx->type = MFCINST_DECODER;
+ ctx->c_ops = get_dec_codec_ops();
+ s5p_mfc_dec_init(ctx);
+ /* Setup ctrl handler */
+ ret = s5p_mfc_dec_ctrls_setup(ctx);
+ if (ret) {
+ mfc_err("Failed to setup mfc controls\n");
+ goto err_ctrls_setup;
+ }
+ } else if (vdev == dev->vfd_enc) {
+ ctx->type = MFCINST_ENCODER;
+ ctx->c_ops = get_enc_codec_ops();
+ /* only for encoder */
+ INIT_LIST_HEAD(&ctx->ref_queue);
+ ctx->ref_queue_cnt = 0;
+ s5p_mfc_enc_init(ctx);
+ /* Setup ctrl handler */
+ ret = s5p_mfc_enc_ctrls_setup(ctx);
+ if (ret) {
+ mfc_err("Failed to setup mfc controls\n");
+ goto err_ctrls_setup;
+ }
+ } else {
+ ret = -ENOENT;
+ goto err_bad_node;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ ctx->inst_no = MFC_NO_INSTANCE_SET;
+ /* Load firmware if this is the first instance */
+ if (dev->num_inst == 1) {
+ dev->watchdog_timer.expires = jiffies +
+ msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
+ add_timer(&dev->watchdog_timer);
+ ret = s5p_mfc_power_on();
+ if (ret < 0) {
+ mfc_err("power on failed\n");
+ goto err_pwr_enable;
+ }
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_load_firmware(dev);
+ if (ret) {
+ s5p_mfc_clock_off();
+ goto err_load_fw;
+ }
+ /* Init the FW */
+ ret = s5p_mfc_init_hw(dev);
+ s5p_mfc_clock_off();
+ if (ret)
+ goto err_init_hw;
+ }
+ /* Init videobuf2 queue for CAPTURE */
+ q = &ctx->vq_dst;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->drv_priv = &ctx->fh;
+ q->lock = &dev->mfc_mutex;
+ if (vdev == dev->vfd_dec) {
+ q->io_modes = VB2_MMAP;
+ q->ops = get_dec_queue_ops();
+ } else if (vdev == dev->vfd_enc) {
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = get_enc_queue_ops();
+ } else {
+ ret = -ENOENT;
+ goto err_queue_init;
+ }
+ /*
+ * We'll do mostly sequential access, so sacrifice TLB efficiency for
+ * faster allocation.
+ */
+ q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ ret = vb2_queue_init(q);
+ if (ret) {
+ mfc_err("Failed to initialize videobuf2 queue(capture)\n");
+ goto err_queue_init;
+ }
+ /* Init videobuf2 queue for OUTPUT */
+ q = &ctx->vq_src;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ q->drv_priv = &ctx->fh;
+ q->lock = &dev->mfc_mutex;
+ if (vdev == dev->vfd_dec) {
+ q->io_modes = VB2_MMAP;
+ q->ops = get_dec_queue_ops();
+ } else if (vdev == dev->vfd_enc) {
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = get_enc_queue_ops();
+ } else {
+ ret = -ENOENT;
+ goto err_queue_init;
+ }
+ /* One way to indicate end-of-stream for MFC is to set the
+ * bytesused == 0. However by default videobuf2 handles bytesused
+ * equal to 0 as a special case and changes its value to the size
+ * of the buffer. Set the allow_zero_bytesused flag so that videobuf2
+ * will keep the value of bytesused intact.
+ */
+ q->allow_zero_bytesused = 1;
+
+ /*
+ * We'll do mostly sequential access, so sacrifice TLB efficiency for
+ * faster allocation.
+ */
+ q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ ret = vb2_queue_init(q);
+ if (ret) {
+ mfc_err("Failed to initialize videobuf2 queue(output)\n");
+ goto err_queue_init;
+ }
+ mutex_unlock(&dev->mfc_mutex);
+ mfc_debug_leave();
+ return ret;
+ /* Deinit when failure occurred */
+err_queue_init:
+ if (dev->num_inst == 1)
+ s5p_mfc_deinit_hw(dev);
+err_init_hw:
+err_load_fw:
+err_pwr_enable:
+ if (dev->num_inst == 1) {
+ if (s5p_mfc_power_off() < 0)
+ mfc_err("power off failed\n");
+ del_timer_sync(&dev->watchdog_timer);
+ }
+err_ctrls_setup:
+ s5p_mfc_dec_ctrls_delete(ctx);
+err_bad_node:
+ dev->ctx[ctx->num] = NULL;
+err_no_ctx:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+err_alloc:
+ dev->num_inst--;
+ mutex_unlock(&dev->mfc_mutex);
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Release MFC context */
+static int s5p_mfc_release(struct file *file)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ /* if dev is null, do cleanup that doesn't need dev */
+ mfc_debug_enter();
+ if (dev)
+ mutex_lock(&dev->mfc_mutex);
+ vb2_queue_release(&ctx->vq_src);
+ vb2_queue_release(&ctx->vq_dst);
+ if (dev) {
+ s5p_mfc_clock_on();
+
+ /* Mark context as idle */
+ clear_work_bit_irqsave(ctx);
+ /*
+ * If instance was initialised and not yet freed,
+ * return instance and free resources
+ */
+ if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) {
+ mfc_debug(2, "Has to free instance\n");
+ s5p_mfc_close_mfc_inst(dev, ctx);
+ }
+ /* hardware locking scheme */
+ if (dev->curr_ctx == ctx->num)
+ clear_bit(0, &dev->hw_lock);
+ dev->num_inst--;
+ if (dev->num_inst == 0) {
+ mfc_debug(2, "Last instance\n");
+ s5p_mfc_deinit_hw(dev);
+ del_timer_sync(&dev->watchdog_timer);
+ s5p_mfc_clock_off();
+ if (s5p_mfc_power_off() < 0)
+ mfc_err("Power off failed\n");
+ } else {
+ mfc_debug(2, "Shutting down clock\n");
+ s5p_mfc_clock_off();
+ }
+ }
+ if (dev)
+ dev->ctx[ctx->num] = NULL;
+ s5p_mfc_dec_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ /* vdev is gone if dev is null */
+ if (dev)
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mfc_debug_leave();
+ if (dev)
+ mutex_unlock(&dev->mfc_mutex);
+
+ return 0;
+}
+
+/* Poll */
+static __poll_t s5p_mfc_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct vb2_queue *src_q, *dst_q;
+ struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
+ __poll_t rc = 0;
+ unsigned long flags;
+
+ mutex_lock(&dev->mfc_mutex);
+ src_q = &ctx->vq_src;
+ dst_q = &ctx->vq_dst;
+ /*
+ * There has to be at least one buffer queued on each queued_list, which
+ * means either in driver already or waiting for driver to claim it
+ * and start processing.
+ */
+ if ((!src_q->streaming || list_empty(&src_q->queued_list))
+ && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
+ rc = EPOLLERR;
+ goto end;
+ }
+ mutex_unlock(&dev->mfc_mutex);
+ poll_wait(file, &ctx->fh.wait, wait);
+ poll_wait(file, &src_q->done_wq, wait);
+ poll_wait(file, &dst_q->done_wq, wait);
+ mutex_lock(&dev->mfc_mutex);
+ if (v4l2_event_pending(&ctx->fh))
+ rc |= EPOLLPRI;
+ spin_lock_irqsave(&src_q->done_lock, flags);
+ if (!list_empty(&src_q->done_list))
+ src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
+ || src_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= EPOLLOUT | EPOLLWRNORM;
+ spin_unlock_irqrestore(&src_q->done_lock, flags);
+ spin_lock_irqsave(&dst_q->done_lock, flags);
+ if (!list_empty(&dst_q->done_list))
+ dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
+ || dst_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&dst_q->done_lock, flags);
+end:
+ mutex_unlock(&dev->mfc_mutex);
+ return rc;
+}
+
+/* Mmap */
+static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ int ret;
+
+ if (offset < DST_QUEUE_OFF_BASE) {
+ mfc_debug(2, "mmaping source\n");
+ ret = vb2_mmap(&ctx->vq_src, vma);
+ } else { /* capture */
+ mfc_debug(2, "mmaping destination\n");
+ vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ ret = vb2_mmap(&ctx->vq_dst, vma);
+ }
+ return ret;
+}
+
+/* v4l2 ops */
+static const struct v4l2_file_operations s5p_mfc_fops = {
+ .owner = THIS_MODULE,
+ .open = s5p_mfc_open,
+ .release = s5p_mfc_release,
+ .poll = s5p_mfc_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = s5p_mfc_mmap,
+};
+
+/* DMA memory related helper functions */
+static void s5p_mfc_memdev_release(struct device *dev)
+{
+ of_reserved_mem_device_release(dev);
+}
+
+static struct device *s5p_mfc_alloc_memdev(struct device *dev,
+ const char *name, unsigned int idx)
+{
+ struct device *child;
+ int ret;
+
+ child = devm_kzalloc(dev, sizeof(*child), GFP_KERNEL);
+ if (!child)
+ return NULL;
+
+ device_initialize(child);
+ dev_set_name(child, "%s:%s", dev_name(dev), name);
+ child->parent = dev;
+ child->coherent_dma_mask = dev->coherent_dma_mask;
+ child->dma_mask = dev->dma_mask;
+ child->release = s5p_mfc_memdev_release;
+
+ if (device_add(child) == 0) {
+ ret = of_reserved_mem_device_init_by_idx(child, dev->of_node,
+ idx);
+ if (ret == 0)
+ return child;
+ device_del(child);
+ }
+
+ put_device(child);
+ return NULL;
+}
+
+static int s5p_mfc_configure_2port_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ struct device *dev = &mfc_dev->plat_dev->dev;
+ void *bank2_virt;
+ dma_addr_t bank2_dma_addr;
+ unsigned long align_size = 1 << MFC_BASE_ALIGN_ORDER;
+ int ret;
+
+ /*
+ * Create and initialize virtual devices for accessing
+ * reserved memory regions.
+ */
+ mfc_dev->mem_dev[BANK_L_CTX] = s5p_mfc_alloc_memdev(dev, "left",
+ BANK_L_CTX);
+ if (!mfc_dev->mem_dev[BANK_L_CTX])
+ return -ENODEV;
+ mfc_dev->mem_dev[BANK_R_CTX] = s5p_mfc_alloc_memdev(dev, "right",
+ BANK_R_CTX);
+ if (!mfc_dev->mem_dev[BANK_R_CTX]) {
+ device_unregister(mfc_dev->mem_dev[BANK_L_CTX]);
+ return -ENODEV;
+ }
+
+ /* Allocate memory for firmware and initialize both banks addresses */
+ ret = s5p_mfc_alloc_firmware(mfc_dev);
+ if (ret) {
+ device_unregister(mfc_dev->mem_dev[BANK_R_CTX]);
+ device_unregister(mfc_dev->mem_dev[BANK_L_CTX]);
+ return ret;
+ }
+
+ mfc_dev->dma_base[BANK_L_CTX] = mfc_dev->fw_buf.dma;
+
+ bank2_virt = dma_alloc_coherent(mfc_dev->mem_dev[BANK_R_CTX],
+ align_size, &bank2_dma_addr, GFP_KERNEL);
+ if (!bank2_virt) {
+ mfc_err("Allocating bank2 base failed\n");
+ s5p_mfc_release_firmware(mfc_dev);
+ device_unregister(mfc_dev->mem_dev[BANK_R_CTX]);
+ device_unregister(mfc_dev->mem_dev[BANK_L_CTX]);
+ return -ENOMEM;
+ }
+
+ /* Valid buffers passed to MFC encoder with LAST_FRAME command
+ * should not have address of bank2 - MFC will treat it as a null frame.
+ * To avoid such situation we set bank2 address below the pool address.
+ */
+ mfc_dev->dma_base[BANK_R_CTX] = bank2_dma_addr - align_size;
+
+ dma_free_coherent(mfc_dev->mem_dev[BANK_R_CTX], align_size, bank2_virt,
+ bank2_dma_addr);
+
+ vb2_dma_contig_set_max_seg_size(mfc_dev->mem_dev[BANK_L_CTX],
+ DMA_BIT_MASK(32));
+ vb2_dma_contig_set_max_seg_size(mfc_dev->mem_dev[BANK_R_CTX],
+ DMA_BIT_MASK(32));
+
+ return 0;
+}
+
+static void s5p_mfc_unconfigure_2port_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ device_unregister(mfc_dev->mem_dev[BANK_L_CTX]);
+ device_unregister(mfc_dev->mem_dev[BANK_R_CTX]);
+ vb2_dma_contig_clear_max_seg_size(mfc_dev->mem_dev[BANK_L_CTX]);
+ vb2_dma_contig_clear_max_seg_size(mfc_dev->mem_dev[BANK_R_CTX]);
+}
+
+static int s5p_mfc_configure_common_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ struct device *dev = &mfc_dev->plat_dev->dev;
+ unsigned long mem_size = SZ_4M;
+ unsigned int bitmap_size;
+
+ if (IS_ENABLED(CONFIG_DMA_CMA) || exynos_is_iommu_available(dev))
+ mem_size = SZ_8M;
+
+ if (mfc_mem_size)
+ mem_size = memparse(mfc_mem_size, NULL);
+
+ bitmap_size = BITS_TO_LONGS(mem_size >> PAGE_SHIFT) * sizeof(long);
+
+ mfc_dev->mem_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mfc_dev->mem_bitmap)
+ return -ENOMEM;
+
+ mfc_dev->mem_virt = dma_alloc_coherent(dev, mem_size,
+ &mfc_dev->mem_base, GFP_KERNEL);
+ if (!mfc_dev->mem_virt) {
+ kfree(mfc_dev->mem_bitmap);
+ dev_err(dev, "failed to preallocate %ld MiB for the firmware and context buffers\n",
+ (mem_size / SZ_1M));
+ return -ENOMEM;
+ }
+ mfc_dev->mem_size = mem_size;
+ mfc_dev->dma_base[BANK_L_CTX] = mfc_dev->mem_base;
+ mfc_dev->dma_base[BANK_R_CTX] = mfc_dev->mem_base;
+
+ /*
+ * MFC hardware cannot handle 0 as a base address, so mark first 128K
+ * as used (to keep required base alignment) and adjust base address
+ */
+ if (mfc_dev->mem_base == (dma_addr_t)0) {
+ unsigned int offset = 1 << MFC_BASE_ALIGN_ORDER;
+
+ bitmap_set(mfc_dev->mem_bitmap, 0, offset >> PAGE_SHIFT);
+ mfc_dev->dma_base[BANK_L_CTX] += offset;
+ mfc_dev->dma_base[BANK_R_CTX] += offset;
+ }
+
+ /* Firmware allocation cannot fail in this case */
+ s5p_mfc_alloc_firmware(mfc_dev);
+
+ mfc_dev->mem_dev[BANK_L_CTX] = mfc_dev->mem_dev[BANK_R_CTX] = dev;
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ dev_info(dev, "preallocated %ld MiB buffer for the firmware and context buffers\n",
+ (mem_size / SZ_1M));
+
+ return 0;
+}
+
+static void s5p_mfc_unconfigure_common_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ struct device *dev = &mfc_dev->plat_dev->dev;
+
+ dma_free_coherent(dev, mfc_dev->mem_size, mfc_dev->mem_virt,
+ mfc_dev->mem_base);
+ kfree(mfc_dev->mem_bitmap);
+ vb2_dma_contig_clear_max_seg_size(dev);
+}
+
+static int s5p_mfc_configure_dma_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ struct device *dev = &mfc_dev->plat_dev->dev;
+
+ if (exynos_is_iommu_available(dev) || !IS_TWOPORT(mfc_dev))
+ return s5p_mfc_configure_common_memory(mfc_dev);
+ else
+ return s5p_mfc_configure_2port_memory(mfc_dev);
+}
+
+static void s5p_mfc_unconfigure_dma_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ struct device *dev = &mfc_dev->plat_dev->dev;
+
+ s5p_mfc_release_firmware(mfc_dev);
+ if (exynos_is_iommu_available(dev) || !IS_TWOPORT(mfc_dev))
+ s5p_mfc_unconfigure_common_memory(mfc_dev);
+ else
+ s5p_mfc_unconfigure_2port_memory(mfc_dev);
+}
+
+/* MFC probe function */
+static int s5p_mfc_probe(struct platform_device *pdev)
+{
+ struct s5p_mfc_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret;
+
+ pr_debug("%s++\n", __func__);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->irqlock);
+ spin_lock_init(&dev->condlock);
+ dev->plat_dev = pdev;
+ if (!dev->plat_dev) {
+ mfc_err("No platform data specified\n");
+ return -ENODEV;
+ }
+
+ dev->variant = of_device_get_match_data(&pdev->dev);
+ if (!dev->variant) {
+ dev_err(&pdev->dev, "Failed to get device MFC hardware variant information\n");
+ return -ENOENT;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs_base))
+ return PTR_ERR(dev->regs_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return -ENOENT;
+ }
+ dev->irq = res->start;
+ ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
+ 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
+ return ret;
+ }
+
+ ret = s5p_mfc_configure_dma_memory(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to configure DMA memory\n");
+ return ret;
+ }
+
+ ret = s5p_mfc_init_pm(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get mfc clock source\n");
+ goto err_dma;
+ }
+
+ /*
+ * Load fails if fs isn't mounted. Try loading anyway.
+ * _open() will load it, it it fails now. Ignore failure.
+ */
+ s5p_mfc_load_firmware(dev);
+
+ mutex_init(&dev->mfc_mutex);
+ init_waitqueue_head(&dev->queue);
+ dev->hw_lock = 0;
+ INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
+ atomic_set(&dev->watchdog_cnt, 0);
+ timer_setup(&dev->watchdog_timer, s5p_mfc_watchdog, 0);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto err_v4l2_dev_reg;
+
+ /* decoder */
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_dec_alloc;
+ }
+ vfd->fops = &s5p_mfc_fops;
+ vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
+ vfd->release = video_device_release;
+ vfd->lock = &dev->mfc_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
+ dev->vfd_dec = vfd;
+ video_set_drvdata(vfd, dev);
+
+ /* encoder */
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_enc_alloc;
+ }
+ vfd->fops = &s5p_mfc_fops;
+ vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
+ vfd->release = video_device_release;
+ vfd->lock = &dev->mfc_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
+ dev->vfd_enc = vfd;
+ video_set_drvdata(vfd, dev);
+ platform_set_drvdata(pdev, dev);
+
+ /* Initialize HW ops and commands based on MFC version */
+ s5p_mfc_init_hw_ops(dev);
+ s5p_mfc_init_hw_cmds(dev);
+ s5p_mfc_init_regs(dev);
+
+ /* Register decoder and encoder */
+ ret = video_register_device(dev->vfd_dec, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_dec_reg;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "decoder registered as /dev/video%d\n", dev->vfd_dec->num);
+
+ ret = video_register_device(dev->vfd_enc, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_enc_reg;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "encoder registered as /dev/video%d\n", dev->vfd_enc->num);
+
+ pr_debug("%s--\n", __func__);
+ return 0;
+
+/* Deinit MFC if probe had failed */
+err_enc_reg:
+ video_unregister_device(dev->vfd_dec);
+err_dec_reg:
+ video_device_release(dev->vfd_enc);
+err_enc_alloc:
+ video_device_release(dev->vfd_dec);
+err_dec_alloc:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_v4l2_dev_reg:
+ s5p_mfc_final_pm(dev);
+err_dma:
+ s5p_mfc_unconfigure_dma_memory(dev);
+
+ pr_debug("%s-- with error\n", __func__);
+ return ret;
+
+}
+
+/* Remove the driver */
+static int s5p_mfc_remove(struct platform_device *pdev)
+{
+ struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
+ struct s5p_mfc_ctx *ctx;
+ int i;
+
+ v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
+
+ /*
+ * Clear ctx dev pointer to avoid races between s5p_mfc_remove()
+ * and s5p_mfc_release() and s5p_mfc_release() accessing ctx->dev
+ * after s5p_mfc_remove() is run during unbind.
+ */
+ mutex_lock(&dev->mfc_mutex);
+ for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
+ ctx = dev->ctx[i];
+ if (!ctx)
+ continue;
+ /* clear ctx->dev */
+ ctx->dev = NULL;
+ }
+ mutex_unlock(&dev->mfc_mutex);
+
+ del_timer_sync(&dev->watchdog_timer);
+ flush_work(&dev->watchdog_work);
+
+ video_unregister_device(dev->vfd_enc);
+ video_unregister_device(dev->vfd_dec);
+ video_device_release(dev->vfd_enc);
+ video_device_release(dev->vfd_dec);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ s5p_mfc_unconfigure_dma_memory(dev);
+
+ s5p_mfc_final_pm(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int s5p_mfc_suspend(struct device *dev)
+{
+ struct s5p_mfc_dev *m_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (m_dev->num_inst == 0)
+ return 0;
+
+ if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
+ mfc_err("Error: going to suspend for a second time\n");
+ return -EIO;
+ }
+
+ /* Check if we're processing then wait if it necessary. */
+ while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
+ /* Try and lock the HW */
+ /* Wait on the interrupt waitqueue */
+ ret = wait_event_interruptible_timeout(m_dev->queue,
+ m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT));
+ if (ret == 0) {
+ mfc_err("Waiting for hardware to finish timed out\n");
+ clear_bit(0, &m_dev->enter_suspend);
+ return -EIO;
+ }
+ }
+
+ ret = s5p_mfc_sleep(m_dev);
+ if (ret) {
+ clear_bit(0, &m_dev->enter_suspend);
+ clear_bit(0, &m_dev->hw_lock);
+ }
+ return ret;
+}
+
+static int s5p_mfc_resume(struct device *dev)
+{
+ struct s5p_mfc_dev *m_dev = dev_get_drvdata(dev);
+
+ if (m_dev->num_inst == 0)
+ return 0;
+ return s5p_mfc_wakeup(m_dev);
+}
+#endif
+
+/* Power management */
+static const struct dev_pm_ops s5p_mfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
+};
+
+static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
+ .h264_ctx = MFC_H264_CTX_BUF_SIZE,
+ .non_h264_ctx = MFC_CTX_BUF_SIZE,
+ .dsc = DESC_BUF_SIZE,
+ .shm = SHARED_BUF_SIZE,
+};
+
+static struct s5p_mfc_buf_size buf_size_v5 = {
+ .fw = MAX_FW_SIZE,
+ .cpb = MAX_CPB_SIZE,
+ .priv = &mfc_buf_size_v5,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v5 = {
+ .version = MFC_VERSION,
+ .version_bit = MFC_V5_BIT,
+ .port_num = MFC_NUM_PORTS,
+ .buf_size = &buf_size_v5,
+ .fw_name[0] = "s5p-mfc.fw",
+ .clk_names = {"mfc", "sclk_mfc"},
+ .num_clocks = 2,
+ .use_clock_gating = true,
+};
+
+static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
+ .dev_ctx = MFC_CTX_BUF_SIZE_V6,
+ .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V6,
+ .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V6,
+ .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V6,
+ .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V6,
+};
+
+static struct s5p_mfc_buf_size buf_size_v6 = {
+ .fw = MAX_FW_SIZE_V6,
+ .cpb = MAX_CPB_SIZE_V6,
+ .priv = &mfc_buf_size_v6,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v6 = {
+ .version = MFC_VERSION_V6,
+ .version_bit = MFC_V6_BIT,
+ .port_num = MFC_NUM_PORTS_V6,
+ .buf_size = &buf_size_v6,
+ .fw_name[0] = "s5p-mfc-v6.fw",
+ /*
+ * v6-v2 firmware contains bug fixes and interface change
+ * for init buffer command
+ */
+ .fw_name[1] = "s5p-mfc-v6-v2.fw",
+ .clk_names = {"mfc"},
+ .num_clocks = 1,
+};
+
+static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
+ .dev_ctx = MFC_CTX_BUF_SIZE_V7,
+ .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V7,
+ .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V7,
+ .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V7,
+ .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V7,
+};
+
+static struct s5p_mfc_buf_size buf_size_v7 = {
+ .fw = MAX_FW_SIZE_V7,
+ .cpb = MAX_CPB_SIZE_V7,
+ .priv = &mfc_buf_size_v7,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v7 = {
+ .version = MFC_VERSION_V7,
+ .version_bit = MFC_V7_BIT,
+ .port_num = MFC_NUM_PORTS_V7,
+ .buf_size = &buf_size_v7,
+ .fw_name[0] = "s5p-mfc-v7.fw",
+ .clk_names = {"mfc", "sclk_mfc"},
+ .num_clocks = 2,
+};
+
+static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
+ .dev_ctx = MFC_CTX_BUF_SIZE_V8,
+ .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V8,
+ .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V8,
+ .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V8,
+ .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V8,
+};
+
+static struct s5p_mfc_buf_size buf_size_v8 = {
+ .fw = MAX_FW_SIZE_V8,
+ .cpb = MAX_CPB_SIZE_V8,
+ .priv = &mfc_buf_size_v8,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v8 = {
+ .version = MFC_VERSION_V8,
+ .version_bit = MFC_V8_BIT,
+ .port_num = MFC_NUM_PORTS_V8,
+ .buf_size = &buf_size_v8,
+ .fw_name[0] = "s5p-mfc-v8.fw",
+ .clk_names = {"mfc"},
+ .num_clocks = 1,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v8_5433 = {
+ .version = MFC_VERSION_V8,
+ .version_bit = MFC_V8_BIT,
+ .port_num = MFC_NUM_PORTS_V8,
+ .buf_size = &buf_size_v8,
+ .fw_name[0] = "s5p-mfc-v8.fw",
+ .clk_names = {"pclk", "aclk", "aclk_xiu"},
+ .num_clocks = 3,
+};
+
+static struct s5p_mfc_buf_size_v6 mfc_buf_size_v10 = {
+ .dev_ctx = MFC_CTX_BUF_SIZE_V10,
+ .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V10,
+ .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V10,
+ .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V10,
+ .hevc_enc_ctx = MFC_HEVC_ENC_CTX_BUF_SIZE_V10,
+ .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V10,
+};
+
+static struct s5p_mfc_buf_size buf_size_v10 = {
+ .fw = MAX_FW_SIZE_V10,
+ .cpb = MAX_CPB_SIZE_V10,
+ .priv = &mfc_buf_size_v10,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v10 = {
+ .version = MFC_VERSION_V10,
+ .version_bit = MFC_V10_BIT,
+ .port_num = MFC_NUM_PORTS_V10,
+ .buf_size = &buf_size_v10,
+ .fw_name[0] = "s5p-mfc-v10.fw",
+};
+
+static const struct of_device_id exynos_mfc_match[] = {
+ {
+ .compatible = "samsung,mfc-v5",
+ .data = &mfc_drvdata_v5,
+ }, {
+ .compatible = "samsung,mfc-v6",
+ .data = &mfc_drvdata_v6,
+ }, {
+ .compatible = "samsung,mfc-v7",
+ .data = &mfc_drvdata_v7,
+ }, {
+ .compatible = "samsung,mfc-v8",
+ .data = &mfc_drvdata_v8,
+ }, {
+ .compatible = "samsung,exynos5433-mfc",
+ .data = &mfc_drvdata_v8_5433,
+ }, {
+ .compatible = "samsung,mfc-v10",
+ .data = &mfc_drvdata_v10,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_mfc_match);
+
+static struct platform_driver s5p_mfc_driver = {
+ .probe = s5p_mfc_probe,
+ .remove = s5p_mfc_remove,
+ .driver = {
+ .name = S5P_MFC_NAME,
+ .pm = &s5p_mfc_pm_ops,
+ .of_match_table = exynos_mfc_match,
+ },
+};
+
+module_platform_driver(s5p_mfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
+MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
new file mode 100644
index 000000000..242c033cf
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
@@ -0,0 +1,29 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_cmd_v5.h"
+#include "s5p_mfc_cmd_v6.h"
+
+static struct s5p_mfc_hw_cmds *s5p_mfc_cmds;
+
+void s5p_mfc_init_hw_cmds(struct s5p_mfc_dev *dev)
+{
+ if (IS_MFCV6_PLUS(dev))
+ s5p_mfc_cmds = s5p_mfc_init_hw_cmds_v6();
+ else
+ s5p_mfc_cmds = s5p_mfc_init_hw_cmds_v5();
+
+ dev->mfc_cmds = s5p_mfc_cmds;
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
new file mode 100644
index 000000000..282e6c780
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
@@ -0,0 +1,35 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CMD_H_
+#define S5P_MFC_CMD_H_
+
+#include "s5p_mfc_common.h"
+
+#define MAX_H2R_ARG 4
+
+struct s5p_mfc_cmd_args {
+ unsigned int arg[MAX_H2R_ARG];
+};
+
+struct s5p_mfc_hw_cmds {
+ int (*cmd_host2risc)(struct s5p_mfc_dev *dev, int cmd,
+ struct s5p_mfc_cmd_args *args);
+ int (*sys_init_cmd)(struct s5p_mfc_dev *dev);
+ int (*sleep_cmd)(struct s5p_mfc_dev *dev);
+ int (*wakeup_cmd)(struct s5p_mfc_dev *dev);
+ int (*open_inst_cmd)(struct s5p_mfc_ctx *ctx);
+ int (*close_inst_cmd)(struct s5p_mfc_ctx *ctx);
+};
+
+void s5p_mfc_init_hw_cmds(struct s5p_mfc_dev *dev);
+#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
new file mode 100644
index 000000000..4c80bb424
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
@@ -0,0 +1,167 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_cmd_v5.h"
+
+/* This function is used to send a command to the MFC */
+static int s5p_mfc_cmd_host2risc_v5(struct s5p_mfc_dev *dev, int cmd,
+ struct s5p_mfc_cmd_args *args)
+{
+ int cur_cmd;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+ /* wait until host to risc command register becomes 'H2R_CMD_EMPTY' */
+ do {
+ if (time_after(jiffies, timeout)) {
+ mfc_err("Timeout while waiting for hardware\n");
+ return -EIO;
+ }
+ cur_cmd = mfc_read(dev, S5P_FIMV_HOST2RISC_CMD);
+ } while (cur_cmd != S5P_FIMV_H2R_CMD_EMPTY);
+ mfc_write(dev, args->arg[0], S5P_FIMV_HOST2RISC_ARG1);
+ mfc_write(dev, args->arg[1], S5P_FIMV_HOST2RISC_ARG2);
+ mfc_write(dev, args->arg[2], S5P_FIMV_HOST2RISC_ARG3);
+ mfc_write(dev, args->arg[3], S5P_FIMV_HOST2RISC_ARG4);
+ /* Issue the command */
+ mfc_write(dev, cmd, S5P_FIMV_HOST2RISC_CMD);
+ return 0;
+}
+
+/* Initialize the MFC */
+static int s5p_mfc_sys_init_cmd_v5(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ h2r_args.arg[0] = dev->fw_buf.size;
+ return s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_SYS_INIT,
+ &h2r_args);
+}
+
+/* Suspend the MFC hardware */
+static int s5p_mfc_sleep_cmd_v5(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ return s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_SLEEP, &h2r_args);
+}
+
+/* Wake up the MFC hardware */
+static int s5p_mfc_wakeup_cmd_v5(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ return s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_WAKEUP,
+ &h2r_args);
+}
+
+
+static int s5p_mfc_open_inst_cmd_v5(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_cmd_args h2r_args;
+ int ret;
+
+ /* Preparing decoding - getting instance number */
+ mfc_debug(2, "Getting instance number (codec: %d)\n", ctx->codec_mode);
+ dev->curr_ctx = ctx->num;
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ switch (ctx->codec_mode) {
+ case S5P_MFC_CODEC_H264_DEC:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_H264_DEC;
+ break;
+ case S5P_MFC_CODEC_VC1_DEC:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_VC1_DEC;
+ break;
+ case S5P_MFC_CODEC_MPEG4_DEC:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_MPEG4_DEC;
+ break;
+ case S5P_MFC_CODEC_MPEG2_DEC:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_MPEG2_DEC;
+ break;
+ case S5P_MFC_CODEC_H263_DEC:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_H263_DEC;
+ break;
+ case S5P_MFC_CODEC_VC1RCV_DEC:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_VC1RCV_DEC;
+ break;
+ case S5P_MFC_CODEC_H264_ENC:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_H264_ENC;
+ break;
+ case S5P_MFC_CODEC_MPEG4_ENC:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_MPEG4_ENC;
+ break;
+ case S5P_MFC_CODEC_H263_ENC:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_H263_ENC;
+ break;
+ default:
+ h2r_args.arg[0] = S5P_FIMV_CODEC_NONE;
+ }
+ h2r_args.arg[1] = 0; /* no crc & no pixelcache */
+ h2r_args.arg[2] = ctx->ctx.ofs;
+ h2r_args.arg[3] = ctx->ctx.size;
+ ret = s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE,
+ &h2r_args);
+ if (ret) {
+ mfc_err("Failed to create a new instance\n");
+ ctx->state = MFCINST_ERROR;
+ }
+ return ret;
+}
+
+static int s5p_mfc_close_inst_cmd_v5(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_cmd_args h2r_args;
+ int ret;
+
+ if (ctx->state == MFCINST_FREE) {
+ mfc_err("Instance already returned\n");
+ ctx->state = MFCINST_ERROR;
+ return -EINVAL;
+ }
+ /* Closing decoding instance */
+ mfc_debug(2, "Returning instance number %d\n", ctx->inst_no);
+ dev->curr_ctx = ctx->num;
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ h2r_args.arg[0] = ctx->inst_no;
+ ret = s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_CLOSE_INSTANCE,
+ &h2r_args);
+ if (ret) {
+ mfc_err("Failed to return an instance\n");
+ ctx->state = MFCINST_ERROR;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Initialize cmd function pointers for MFC v5 */
+static struct s5p_mfc_hw_cmds s5p_mfc_cmds_v5 = {
+ .cmd_host2risc = s5p_mfc_cmd_host2risc_v5,
+ .sys_init_cmd = s5p_mfc_sys_init_cmd_v5,
+ .sleep_cmd = s5p_mfc_sleep_cmd_v5,
+ .wakeup_cmd = s5p_mfc_wakeup_cmd_v5,
+ .open_inst_cmd = s5p_mfc_open_inst_cmd_v5,
+ .close_inst_cmd = s5p_mfc_close_inst_cmd_v5,
+};
+
+struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v5(void)
+{
+ return &s5p_mfc_cmds_v5;
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h
new file mode 100644
index 000000000..6928a5514
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h
@@ -0,0 +1,20 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CMD_V5_H_
+#define S5P_MFC_CMD_V5_H_
+
+#include "s5p_mfc_common.h"
+
+struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v5(void);
+
+#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
new file mode 100644
index 000000000..7521fceb6
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -0,0 +1,173 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "s5p_mfc_common.h"
+
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_cmd_v6.h"
+
+static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
+ struct s5p_mfc_cmd_args *args)
+{
+ mfc_debug(2, "Issue the command: %d\n", cmd);
+
+ /* Reset RISC2HOST command */
+ mfc_write(dev, 0x0, S5P_FIMV_RISC2HOST_CMD_V6);
+
+ /* Issue the command */
+ mfc_write(dev, cmd, S5P_FIMV_HOST2RISC_CMD_V6);
+ mfc_write(dev, 0x1, S5P_FIMV_HOST2RISC_INT_V6);
+
+ return 0;
+}
+
+static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+ struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ int ret;
+
+ ret = s5p_mfc_hw_call(dev->mfc_ops, alloc_dev_context_buffer, dev);
+ if (ret)
+ return ret;
+
+ mfc_write(dev, dev->ctx_buf.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
+ mfc_write(dev, buf_size->dev_ctx, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6,
+ &h2r_args);
+}
+
+static int s5p_mfc_sleep_cmd_v6(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6,
+ &h2r_args);
+}
+
+static int s5p_mfc_wakeup_cmd_v6(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6,
+ &h2r_args);
+}
+
+/* Open a new instance and get its number */
+static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_cmd_args h2r_args;
+ int codec_type;
+
+ mfc_debug(2, "Requested codec mode: %d\n", ctx->codec_mode);
+ dev->curr_ctx = ctx->num;
+ switch (ctx->codec_mode) {
+ case S5P_MFC_CODEC_H264_DEC:
+ codec_type = S5P_FIMV_CODEC_H264_DEC_V6;
+ break;
+ case S5P_MFC_CODEC_H264_MVC_DEC:
+ codec_type = S5P_FIMV_CODEC_H264_MVC_DEC_V6;
+ break;
+ case S5P_MFC_CODEC_VC1_DEC:
+ codec_type = S5P_FIMV_CODEC_VC1_DEC_V6;
+ break;
+ case S5P_MFC_CODEC_MPEG4_DEC:
+ codec_type = S5P_FIMV_CODEC_MPEG4_DEC_V6;
+ break;
+ case S5P_MFC_CODEC_MPEG2_DEC:
+ codec_type = S5P_FIMV_CODEC_MPEG2_DEC_V6;
+ break;
+ case S5P_MFC_CODEC_H263_DEC:
+ codec_type = S5P_FIMV_CODEC_H263_DEC_V6;
+ break;
+ case S5P_MFC_CODEC_VC1RCV_DEC:
+ codec_type = S5P_FIMV_CODEC_VC1RCV_DEC_V6;
+ break;
+ case S5P_MFC_CODEC_VP8_DEC:
+ codec_type = S5P_FIMV_CODEC_VP8_DEC_V6;
+ break;
+ case S5P_MFC_CODEC_HEVC_DEC:
+ codec_type = S5P_FIMV_CODEC_HEVC_DEC;
+ break;
+ case S5P_MFC_CODEC_VP9_DEC:
+ codec_type = S5P_FIMV_CODEC_VP9_DEC;
+ break;
+ case S5P_MFC_CODEC_H264_ENC:
+ codec_type = S5P_FIMV_CODEC_H264_ENC_V6;
+ break;
+ case S5P_MFC_CODEC_H264_MVC_ENC:
+ codec_type = S5P_FIMV_CODEC_H264_MVC_ENC_V6;
+ break;
+ case S5P_MFC_CODEC_MPEG4_ENC:
+ codec_type = S5P_FIMV_CODEC_MPEG4_ENC_V6;
+ break;
+ case S5P_MFC_CODEC_H263_ENC:
+ codec_type = S5P_FIMV_CODEC_H263_ENC_V6;
+ break;
+ case S5P_MFC_CODEC_VP8_ENC:
+ codec_type = S5P_FIMV_CODEC_VP8_ENC_V7;
+ break;
+ case S5P_MFC_CODEC_HEVC_ENC:
+ codec_type = S5P_FIMV_CODEC_HEVC_ENC;
+ break;
+ default:
+ codec_type = S5P_FIMV_CODEC_NONE_V6;
+ }
+ mfc_write(dev, codec_type, S5P_FIMV_CODEC_TYPE_V6);
+ mfc_write(dev, ctx->ctx.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
+ mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
+ mfc_write(dev, 0, S5P_FIMV_D_CRC_CTRL_V6); /* no crc */
+
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6,
+ &h2r_args);
+}
+
+/* Close instance */
+static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_cmd_args h2r_args;
+ int ret = 0;
+
+ dev->curr_ctx = ctx->num;
+ if (ctx->state != MFCINST_FREE) {
+ mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
+ ret = s5p_mfc_cmd_host2risc_v6(dev,
+ S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6,
+ &h2r_args);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Initialize cmd function pointers for MFC v6 */
+static struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = {
+ .cmd_host2risc = s5p_mfc_cmd_host2risc_v6,
+ .sys_init_cmd = s5p_mfc_sys_init_cmd_v6,
+ .sleep_cmd = s5p_mfc_sleep_cmd_v6,
+ .wakeup_cmd = s5p_mfc_wakeup_cmd_v6,
+ .open_inst_cmd = s5p_mfc_open_inst_cmd_v6,
+ .close_inst_cmd = s5p_mfc_close_inst_cmd_v6,
+};
+
+struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v6(void)
+{
+ return &s5p_mfc_cmds_v6;
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h
new file mode 100644
index 000000000..b7a8e5783
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h
@@ -0,0 +1,20 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CMD_V6_H_
+#define S5P_MFC_CMD_V6_H_
+
+#include "s5p_mfc_common.h"
+
+struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v6(void);
+
+#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
new file mode 100644
index 000000000..20442a9b9
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -0,0 +1,787 @@
+/*
+ * Samsung S5P Multi Format Codec v 5.0
+ *
+ * This file contains definitions of enums and structs used by the codec
+ * driver.
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#ifndef S5P_MFC_COMMON_H_
+#define S5P_MFC_COMMON_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include "regs-mfc.h"
+#include "regs-mfc-v10.h"
+
+#define S5P_MFC_NAME "s5p-mfc"
+
+/* Definitions related to MFC memory */
+
+/* Offset base used to differentiate between CAPTURE and OUTPUT
+* while mmaping */
+#define DST_QUEUE_OFF_BASE (1 << 30)
+
+#define BANK_L_CTX 0
+#define BANK_R_CTX 1
+#define BANK_CTX_NUM 2
+
+#define MFC_BANK1_ALIGN_ORDER 13
+#define MFC_BANK2_ALIGN_ORDER 13
+#define MFC_BASE_ALIGN_ORDER 17
+
+#define MFC_FW_MAX_VERSIONS 2
+
+#include <media/videobuf2-dma-contig.h>
+
+/* MFC definitions */
+#define MFC_MAX_EXTRA_DPB 5
+#define MFC_MAX_BUFFERS 32
+#define MFC_NUM_CONTEXTS 4
+/* Interrupt timeout */
+#define MFC_INT_TIMEOUT 2000
+/* Busy wait timeout */
+#define MFC_BW_TIMEOUT 500
+/* Watchdog interval */
+#define MFC_WATCHDOG_INTERVAL 1000
+/* After how many executions watchdog should assume lock up */
+#define MFC_WATCHDOG_CNT 10
+#define MFC_NO_INSTANCE_SET -1
+#define MFC_ENC_CAP_PLANE_COUNT 1
+#define MFC_ENC_OUT_PLANE_COUNT 2
+#define STUFF_BYTE 4
+#define MFC_MAX_CTRLS 128
+
+#define S5P_MFC_CODEC_NONE -1
+#define S5P_MFC_CODEC_H264_DEC 0
+#define S5P_MFC_CODEC_H264_MVC_DEC 1
+#define S5P_MFC_CODEC_VC1_DEC 2
+#define S5P_MFC_CODEC_MPEG4_DEC 3
+#define S5P_MFC_CODEC_MPEG2_DEC 4
+#define S5P_MFC_CODEC_H263_DEC 5
+#define S5P_MFC_CODEC_VC1RCV_DEC 6
+#define S5P_MFC_CODEC_VP8_DEC 7
+#define S5P_MFC_CODEC_HEVC_DEC 17
+#define S5P_MFC_CODEC_VP9_DEC 18
+
+#define S5P_MFC_CODEC_H264_ENC 20
+#define S5P_MFC_CODEC_H264_MVC_ENC 21
+#define S5P_MFC_CODEC_MPEG4_ENC 22
+#define S5P_MFC_CODEC_H263_ENC 23
+#define S5P_MFC_CODEC_VP8_ENC 24
+#define S5P_MFC_CODEC_HEVC_ENC 26
+
+#define S5P_MFC_R2H_CMD_EMPTY 0
+#define S5P_MFC_R2H_CMD_SYS_INIT_RET 1
+#define S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET 2
+#define S5P_MFC_R2H_CMD_SEQ_DONE_RET 3
+#define S5P_MFC_R2H_CMD_INIT_BUFFERS_RET 4
+#define S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET 6
+#define S5P_MFC_R2H_CMD_SLEEP_RET 7
+#define S5P_MFC_R2H_CMD_WAKEUP_RET 8
+#define S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET 9
+#define S5P_MFC_R2H_CMD_DPB_FLUSH_RET 10
+#define S5P_MFC_R2H_CMD_NAL_ABORT_RET 11
+#define S5P_MFC_R2H_CMD_FW_STATUS_RET 12
+#define S5P_MFC_R2H_CMD_FRAME_DONE_RET 13
+#define S5P_MFC_R2H_CMD_FIELD_DONE_RET 14
+#define S5P_MFC_R2H_CMD_SLICE_DONE_RET 15
+#define S5P_MFC_R2H_CMD_ENC_BUFFER_FUL_RET 16
+#define S5P_MFC_R2H_CMD_ERR_RET 32
+
+#define MFC_MAX_CLOCKS 4
+
+#define mfc_read(dev, offset) readl(dev->regs_base + (offset))
+#define mfc_write(dev, data, offset) writel((data), dev->regs_base + \
+ (offset))
+
+/**
+ * enum s5p_mfc_fmt_type - type of the pixelformat
+ */
+enum s5p_mfc_fmt_type {
+ MFC_FMT_DEC,
+ MFC_FMT_ENC,
+ MFC_FMT_RAW,
+};
+
+/**
+ * enum s5p_mfc_inst_type - The type of an MFC instance.
+ */
+enum s5p_mfc_inst_type {
+ MFCINST_INVALID,
+ MFCINST_DECODER,
+ MFCINST_ENCODER,
+};
+
+/**
+ * enum s5p_mfc_inst_state - The state of an MFC instance.
+ */
+enum s5p_mfc_inst_state {
+ MFCINST_FREE = 0,
+ MFCINST_INIT = 100,
+ MFCINST_GOT_INST,
+ MFCINST_HEAD_PARSED,
+ MFCINST_HEAD_PRODUCED,
+ MFCINST_BUFS_SET,
+ MFCINST_RUNNING,
+ MFCINST_FINISHING,
+ MFCINST_FINISHED,
+ MFCINST_RETURN_INST,
+ MFCINST_ERROR,
+ MFCINST_ABORT,
+ MFCINST_FLUSH,
+ MFCINST_RES_CHANGE_INIT,
+ MFCINST_RES_CHANGE_FLUSH,
+ MFCINST_RES_CHANGE_END,
+};
+
+/**
+ * enum s5p_mfc_queue_state - The state of buffer queue.
+ */
+enum s5p_mfc_queue_state {
+ QUEUE_FREE,
+ QUEUE_BUFS_REQUESTED,
+ QUEUE_BUFS_QUERIED,
+ QUEUE_BUFS_MMAPED,
+};
+
+/**
+ * enum s5p_mfc_decode_arg - type of frame decoding
+ */
+enum s5p_mfc_decode_arg {
+ MFC_DEC_FRAME,
+ MFC_DEC_LAST_FRAME,
+ MFC_DEC_RES_CHANGE,
+};
+
+enum s5p_mfc_fw_ver {
+ MFC_FW_V1,
+ MFC_FW_V2,
+};
+
+#define MFC_BUF_FLAG_USED (1 << 0)
+#define MFC_BUF_FLAG_EOS (1 << 1)
+
+struct s5p_mfc_ctx;
+
+/**
+ * struct s5p_mfc_buf - MFC buffer
+ */
+struct s5p_mfc_buf {
+ struct vb2_v4l2_buffer *b;
+ struct list_head list;
+ union {
+ struct {
+ size_t luma;
+ size_t chroma;
+ } raw;
+ size_t stream;
+ } cookie;
+ int flags;
+};
+
+/**
+ * struct s5p_mfc_pm - power management data structure
+ */
+struct s5p_mfc_pm {
+ struct clk *clock_gate;
+ const char * const *clk_names;
+ struct clk *clocks[MFC_MAX_CLOCKS];
+ int num_clocks;
+ bool use_clock_gating;
+
+ struct device *device;
+};
+
+struct s5p_mfc_buf_size_v5 {
+ unsigned int h264_ctx;
+ unsigned int non_h264_ctx;
+ unsigned int dsc;
+ unsigned int shm;
+};
+
+struct s5p_mfc_buf_size_v6 {
+ unsigned int dev_ctx;
+ unsigned int h264_dec_ctx;
+ unsigned int other_dec_ctx;
+ unsigned int h264_enc_ctx;
+ unsigned int hevc_enc_ctx;
+ unsigned int other_enc_ctx;
+};
+
+struct s5p_mfc_buf_size {
+ unsigned int fw;
+ unsigned int cpb;
+ void *priv;
+};
+
+struct s5p_mfc_variant {
+ unsigned int version;
+ unsigned int port_num;
+ u32 version_bit;
+ struct s5p_mfc_buf_size *buf_size;
+ char *fw_name[MFC_FW_MAX_VERSIONS];
+ const char *clk_names[MFC_MAX_CLOCKS];
+ int num_clocks;
+ bool use_clock_gating;
+};
+
+/**
+ * struct s5p_mfc_priv_buf - represents internal used buffer
+ * @ofs: offset of each buffer, will be used for MFC
+ * @virt: kernel virtual address, only valid when the
+ * buffer accessed by driver
+ * @dma: DMA address, only valid when kernel DMA API used
+ * @size: size of the buffer
+ * @ctx: memory context (bank) used for this allocation
+ */
+struct s5p_mfc_priv_buf {
+ unsigned long ofs;
+ void *virt;
+ dma_addr_t dma;
+ size_t size;
+ unsigned int ctx;
+};
+
+/**
+ * struct s5p_mfc_dev - The struct containing driver internal parameters.
+ *
+ * @v4l2_dev: v4l2_device
+ * @vfd_dec: video device for decoding
+ * @vfd_enc: video device for encoding
+ * @plat_dev: platform device
+ * @mem_dev[]: child devices of the memory banks
+ * @regs_base: base address of the MFC hw registers
+ * @irq: irq resource
+ * @dec_ctrl_handler: control framework handler for decoding
+ * @enc_ctrl_handler: control framework handler for encoding
+ * @pm: power management control
+ * @variant: MFC hardware variant information
+ * @num_inst: couter of active MFC instances
+ * @irqlock: lock for operations on videobuf2 queues
+ * @condlock: lock for changing/checking if a context is ready to be
+ * processed
+ * @mfc_mutex: lock for video_device
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of last interrupt
+ * @int_err: error number for last interrupt
+ * @queue: waitqueue for waiting for completion of device commands
+ * @fw_size: size of firmware
+ * @fw_virt_addr: virtual firmware address
+ * @dma_base[]: address of the beginning of memory banks
+ * @hw_lock: used for hardware locking
+ * @ctx: array of driver contexts
+ * @curr_ctx: number of the currently running context
+ * @ctx_work_bits: used to mark which contexts are waiting for hardware
+ * @watchdog_cnt: counter for the watchdog
+ * @watchdog_workqueue: workqueue for the watchdog
+ * @watchdog_work: worker for the watchdog
+ * @enter_suspend: flag set when entering suspend
+ * @ctx_buf: common context memory (MFCv6)
+ * @warn_start: hardware error code from which warnings start
+ * @mfc_ops: ops structure holding HW operation function pointers
+ * @mfc_cmds: cmd structure holding HW commands function pointers
+ * @mfc_regs: structure holding MFC registers
+ * @fw_ver: loaded firmware sub-version
+ * @fw_get_done flag set when request_firmware() is complete and
+ * copied into fw_buf
+ * risc_on: flag indicates RISC is on or off
+ *
+ */
+struct s5p_mfc_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_dec;
+ struct video_device *vfd_enc;
+ struct platform_device *plat_dev;
+ struct device *mem_dev[BANK_CTX_NUM];
+ void __iomem *regs_base;
+ int irq;
+ struct v4l2_ctrl_handler dec_ctrl_handler;
+ struct v4l2_ctrl_handler enc_ctrl_handler;
+ struct s5p_mfc_pm pm;
+ const struct s5p_mfc_variant *variant;
+ int num_inst;
+ spinlock_t irqlock; /* lock when operating on context */
+ spinlock_t condlock; /* lock when changing/checking if a context is
+ ready to be processed */
+ struct mutex mfc_mutex; /* video_device lock */
+ int int_cond;
+ int int_type;
+ unsigned int int_err;
+ wait_queue_head_t queue;
+ struct s5p_mfc_priv_buf fw_buf;
+ size_t mem_size;
+ dma_addr_t mem_base;
+ unsigned long *mem_bitmap;
+ void *mem_virt;
+ dma_addr_t dma_base[BANK_CTX_NUM];
+ unsigned long hw_lock;
+ struct s5p_mfc_ctx *ctx[MFC_NUM_CONTEXTS];
+ int curr_ctx;
+ unsigned long ctx_work_bits;
+ atomic_t watchdog_cnt;
+ struct timer_list watchdog_timer;
+ struct workqueue_struct *watchdog_workqueue;
+ struct work_struct watchdog_work;
+ unsigned long enter_suspend;
+
+ struct s5p_mfc_priv_buf ctx_buf;
+ int warn_start;
+ struct s5p_mfc_hw_ops *mfc_ops;
+ struct s5p_mfc_hw_cmds *mfc_cmds;
+ const struct s5p_mfc_regs *mfc_regs;
+ enum s5p_mfc_fw_ver fw_ver;
+ bool fw_get_done;
+ bool risc_on; /* indicates if RISC is on or off */
+};
+
+/**
+ * struct s5p_mfc_h264_enc_params - encoding parameters for h264
+ */
+struct s5p_mfc_h264_enc_params {
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_loop_filter_mode loop_filter_mode;
+ s8 loop_filter_alpha;
+ s8 loop_filter_beta;
+ enum v4l2_mpeg_video_h264_entropy_mode entropy_mode;
+ u8 max_ref_pic;
+ u8 num_ref_pic_4p;
+ int _8x8_transform;
+ int rc_mb_dark;
+ int rc_mb_smooth;
+ int rc_mb_static;
+ int rc_mb_activity;
+ int vui_sar;
+ u8 vui_sar_idc;
+ u16 vui_ext_sar_width;
+ u16 vui_ext_sar_height;
+ int open_gop;
+ u16 open_gop_size;
+ u8 rc_frame_qp;
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_p_frame_qp;
+ u8 rc_b_frame_qp;
+ enum v4l2_mpeg_video_h264_level level_v4l2;
+ int level;
+ u16 cpb_size;
+ int interlace;
+ u8 hier_qp;
+ u8 hier_qp_type;
+ u8 hier_qp_layer;
+ u8 hier_qp_layer_qp[7];
+ u8 sei_frame_packing;
+ u8 sei_fp_curr_frame_0;
+ u8 sei_fp_arrangement_type;
+
+ u8 fmo;
+ u8 fmo_map_type;
+ u8 fmo_slice_grp;
+ u8 fmo_chg_dir;
+ u32 fmo_chg_rate;
+ u32 fmo_run_len[4];
+ u8 aso;
+ u32 aso_slice_order[8];
+};
+
+/**
+ * struct s5p_mfc_mpeg4_enc_params - encoding parameters for h263 and mpeg4
+ */
+struct s5p_mfc_mpeg4_enc_params {
+ /* MPEG4 Only */
+ enum v4l2_mpeg_video_mpeg4_profile profile;
+ int quarter_pixel;
+ /* Common for MPEG4, H263 */
+ u16 vop_time_res;
+ u16 vop_frm_delta;
+ u8 rc_frame_qp;
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_p_frame_qp;
+ u8 rc_b_frame_qp;
+ enum v4l2_mpeg_video_mpeg4_level level_v4l2;
+ int level;
+};
+
+/**
+ * struct s5p_mfc_vp8_enc_params - encoding parameters for vp8
+ */
+struct s5p_mfc_vp8_enc_params {
+ u8 imd_4x4;
+ enum v4l2_vp8_num_partitions num_partitions;
+ enum v4l2_vp8_num_ref_frames num_ref;
+ u8 filter_level;
+ u8 filter_sharpness;
+ u32 golden_frame_ref_period;
+ enum v4l2_vp8_golden_frame_sel golden_frame_sel;
+ u8 hier_layer;
+ u8 hier_layer_qp[3];
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_frame_qp;
+ u8 rc_p_frame_qp;
+ u8 profile;
+};
+
+struct s5p_mfc_hevc_enc_params {
+ enum v4l2_mpeg_video_hevc_profile profile;
+ int level;
+ enum v4l2_mpeg_video_h264_level level_v4l2;
+ u8 tier;
+ u32 rc_framerate;
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_lcu_dark;
+ u8 rc_lcu_smooth;
+ u8 rc_lcu_static;
+ u8 rc_lcu_activity;
+ u8 rc_frame_qp;
+ u8 rc_p_frame_qp;
+ u8 rc_b_frame_qp;
+ u8 max_partition_depth;
+ u8 num_refs_for_p;
+ u8 refreshtype;
+ u16 refreshperiod;
+ s32 lf_beta_offset_div2;
+ s32 lf_tc_offset_div2;
+ u8 loopfilter;
+ u8 loopfilter_disable;
+ u8 loopfilter_across;
+ u8 nal_control_length_filed;
+ u8 nal_control_user_ref;
+ u8 nal_control_store_ref;
+ u8 const_intra_period_enable;
+ u8 lossless_cu_enable;
+ u8 wavefront_enable;
+ u8 enable_ltr;
+ u8 hier_qp_enable;
+ enum v4l2_mpeg_video_hevc_hier_coding_type hier_qp_type;
+ u8 num_hier_layer;
+ u8 hier_qp_layer[7];
+ u32 hier_bit_layer[7];
+ u8 sign_data_hiding;
+ u8 general_pb_enable;
+ u8 temporal_id_enable;
+ u8 strong_intra_smooth;
+ u8 intra_pu_split_disable;
+ u8 tmv_prediction_disable;
+ u8 max_num_merge_mv;
+ u8 eco_mode_enable;
+ u8 encoding_nostartcode_enable;
+ u8 size_of_length_field;
+ u8 prepend_sps_pps_to_idr;
+};
+
+/**
+ * struct s5p_mfc_enc_params - general encoding parameters
+ */
+struct s5p_mfc_enc_params {
+ u16 width;
+ u16 height;
+ u32 mv_h_range;
+ u32 mv_v_range;
+
+ u16 gop_size;
+ enum v4l2_mpeg_video_multi_slice_mode slice_mode;
+ u16 slice_mb;
+ u32 slice_bit;
+ u16 intra_refresh_mb;
+ int pad;
+ u8 pad_luma;
+ u8 pad_cb;
+ u8 pad_cr;
+ int rc_frame;
+ int rc_mb;
+ u32 rc_bitrate;
+ u16 rc_reaction_coeff;
+ u16 vbv_size;
+ u32 vbv_delay;
+
+ enum v4l2_mpeg_video_header_mode seq_hdr_mode;
+ enum v4l2_mpeg_mfc51_video_frame_skip_mode frame_skip_mode;
+ int fixed_target_bit;
+
+ u8 num_b_frame;
+ u32 rc_framerate_num;
+ u32 rc_framerate_denom;
+
+ struct {
+ struct s5p_mfc_h264_enc_params h264;
+ struct s5p_mfc_mpeg4_enc_params mpeg4;
+ struct s5p_mfc_vp8_enc_params vp8;
+ struct s5p_mfc_hevc_enc_params hevc;
+ } codec;
+
+};
+
+/**
+ * struct s5p_mfc_codec_ops - codec ops, used by encoding
+ */
+struct s5p_mfc_codec_ops {
+ /* initialization routines */
+ int (*pre_seq_start) (struct s5p_mfc_ctx *ctx);
+ int (*post_seq_start) (struct s5p_mfc_ctx *ctx);
+ /* execution routines */
+ int (*pre_frame_start) (struct s5p_mfc_ctx *ctx);
+ int (*post_frame_start) (struct s5p_mfc_ctx *ctx);
+};
+
+#define call_cop(c, op, args...) \
+ (((c)->c_ops->op) ? \
+ ((c)->c_ops->op(args)) : 0)
+
+/**
+ * struct s5p_mfc_ctx - This struct contains the instance context
+ *
+ * @dev: pointer to the s5p_mfc_dev of the device
+ * @fh: struct v4l2_fh
+ * @num: number of the context that this structure describes
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of the last interrupt
+ * @int_err: error number received from MFC hw in the interrupt
+ * @queue: waitqueue that can be used to wait for this context to
+ * finish
+ * @src_fmt: source pixelformat information
+ * @dst_fmt: destination pixelformat information
+ * @vq_src: vb2 queue for source buffers
+ * @vq_dst: vb2 queue for destination buffers
+ * @src_queue: driver internal queue for source buffers
+ * @dst_queue: driver internal queue for destination buffers
+ * @src_queue_cnt: number of buffers queued on the source internal queue
+ * @dst_queue_cnt: number of buffers queued on the dest internal queue
+ * @type: type of the instance - decoder or encoder
+ * @state: state of the context
+ * @inst_no: number of hw instance associated with the context
+ * @img_width: width of the image that is decoded or encoded
+ * @img_height: height of the image that is decoded or encoded
+ * @buf_width: width of the buffer for processed image
+ * @buf_height: height of the buffer for processed image
+ * @luma_size: size of a luma plane
+ * @chroma_size: size of a chroma plane
+ * @mv_size: size of a motion vectors buffer
+ * @consumed_stream: number of bytes that have been used so far from the
+ * decoding buffer
+ * @dpb_flush_flag: flag used to indicate that a DPB buffers are being
+ * flushed
+ * @head_processed: flag mentioning whether the header data is processed
+ * completely or not
+ * @bank1: handle to memory allocated for temporary buffers from
+ * memory bank 1
+ * @bank2: handle to memory allocated for temporary buffers from
+ * memory bank 2
+ * @capture_state: state of the capture buffers queue
+ * @output_state: state of the output buffers queue
+ * @src_bufs: information on allocated source buffers
+ * @dst_bufs: information on allocated destination buffers
+ * @sequence: counter for the sequence number for v4l2
+ * @dec_dst_flag: flags for buffers queued in the hardware
+ * @dec_src_buf_size: size of the buffer for source buffers in decoding
+ * @codec_mode: number of codec mode used by MFC hw
+ * @slice_interface: slice interface flag
+ * @loop_filter_mpeg4: loop filter for MPEG4 flag
+ * @display_delay: value of the display delay for H264
+ * @display_delay_enable: display delay for H264 enable flag
+ * @after_packed_pb: flag used to track buffer when stream is in
+ * Packed PB format
+ * @sei_fp_parse: enable/disable parsing of frame packing SEI information
+ * @dpb_count: count of the DPB buffers required by MFC hw
+ * @total_dpb_count: count of DPB buffers with additional buffers
+ * requested by the application
+ * @ctx: context buffer information
+ * @dsc: descriptor buffer information
+ * @shm: shared memory buffer information
+ * @mv_count: number of MV buffers allocated for decoding
+ * @enc_params: encoding parameters for MFC
+ * @enc_dst_buf_size: size of the buffers for encoder output
+ * @luma_dpb_size: dpb buffer size for luma
+ * @chroma_dpb_size: dpb buffer size for chroma
+ * @me_buffer_size: size of the motion estimation buffer
+ * @tmv_buffer_size: size of temporal predictor motion vector buffer
+ * @frame_type: used to force the type of the next encoded frame
+ * @ref_queue: list of the reference buffers for encoding
+ * @ref_queue_cnt: number of the buffers in the reference list
+ * @c_ops: ops for encoding
+ * @ctrls: array of controls, used when adding controls to the
+ * v4l2 control framework
+ * @ctrl_handler: handler for v4l2 framework
+ */
+struct s5p_mfc_ctx {
+ struct s5p_mfc_dev *dev;
+ struct v4l2_fh fh;
+
+ int num;
+
+ int int_cond;
+ int int_type;
+ unsigned int int_err;
+ wait_queue_head_t queue;
+
+ struct s5p_mfc_fmt *src_fmt;
+ struct s5p_mfc_fmt *dst_fmt;
+
+ struct vb2_queue vq_src;
+ struct vb2_queue vq_dst;
+
+ struct list_head src_queue;
+ struct list_head dst_queue;
+
+ unsigned int src_queue_cnt;
+ unsigned int dst_queue_cnt;
+
+ enum s5p_mfc_inst_type type;
+ enum s5p_mfc_inst_state state;
+ int inst_no;
+
+ /* Image parameters */
+ int img_width;
+ int img_height;
+ int buf_width;
+ int buf_height;
+
+ int luma_size;
+ int chroma_size;
+ int mv_size;
+
+ unsigned long consumed_stream;
+
+ unsigned int dpb_flush_flag;
+ unsigned int head_processed;
+
+ struct s5p_mfc_priv_buf bank1;
+ struct s5p_mfc_priv_buf bank2;
+
+ enum s5p_mfc_queue_state capture_state;
+ enum s5p_mfc_queue_state output_state;
+
+ struct s5p_mfc_buf src_bufs[MFC_MAX_BUFFERS];
+ int src_bufs_cnt;
+ struct s5p_mfc_buf dst_bufs[MFC_MAX_BUFFERS];
+ int dst_bufs_cnt;
+
+ unsigned int sequence;
+ unsigned long dec_dst_flag;
+ size_t dec_src_buf_size;
+
+ /* Control values */
+ int codec_mode;
+ int slice_interface;
+ int loop_filter_mpeg4;
+ int display_delay;
+ int display_delay_enable;
+ int after_packed_pb;
+ int sei_fp_parse;
+
+ int pb_count;
+ int total_dpb_count;
+ int mv_count;
+ /* Buffers */
+ struct s5p_mfc_priv_buf ctx;
+ struct s5p_mfc_priv_buf dsc;
+ struct s5p_mfc_priv_buf shm;
+
+ struct s5p_mfc_enc_params enc_params;
+
+ size_t enc_dst_buf_size;
+ size_t luma_dpb_size;
+ size_t chroma_dpb_size;
+ size_t me_buffer_size;
+ size_t tmv_buffer_size;
+
+ enum v4l2_mpeg_mfc51_video_force_frame_type force_frame_type;
+
+ struct list_head ref_queue;
+ unsigned int ref_queue_cnt;
+
+ enum v4l2_mpeg_video_multi_slice_mode slice_mode;
+ union {
+ unsigned int mb;
+ unsigned int bits;
+ } slice_size;
+
+ const struct s5p_mfc_codec_ops *c_ops;
+
+ struct v4l2_ctrl *ctrls[MFC_MAX_CTRLS];
+ struct v4l2_ctrl_handler ctrl_handler;
+ unsigned int frame_tag;
+ size_t scratch_buf_size;
+};
+
+/*
+ * struct s5p_mfc_fmt - structure used to store information about pixelformats
+ * used by the MFC
+ */
+struct s5p_mfc_fmt {
+ char *name;
+ u32 fourcc;
+ u32 codec_mode;
+ enum s5p_mfc_fmt_type type;
+ u32 num_planes;
+ u32 versions;
+};
+
+/**
+ * struct mfc_control - structure used to store information about MFC controls
+ * it is used to initialize the control framework.
+ */
+struct mfc_control {
+ __u32 id;
+ enum v4l2_ctrl_type type;
+ __u8 name[32]; /* Whatever */
+ __s32 minimum; /* Note signedness */
+ __s32 maximum;
+ __s32 step;
+ __u32 menu_skip_mask;
+ __s32 default_value;
+ __u32 flags;
+ __u32 reserved[2];
+ __u8 is_volatile;
+};
+
+/* Macro for making hardware specific calls */
+#define s5p_mfc_hw_call(f, op, args...) \
+ ((f && f->op) ? f->op(args) : (typeof(f->op(args)))(-ENODEV))
+
+#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh)
+#define ctrl_to_ctx(__ctrl) \
+ container_of((__ctrl)->handler, struct s5p_mfc_ctx, ctrl_handler)
+
+void clear_work_bit(struct s5p_mfc_ctx *ctx);
+void set_work_bit(struct s5p_mfc_ctx *ctx);
+void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx);
+void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev);
+void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
+
+#define HAS_PORTNUM(dev) (dev ? (dev->variant ? \
+ (dev->variant->port_num ? 1 : 0) : 0) : 0)
+#define IS_TWOPORT(dev) (dev->variant->port_num == 2 ? 1 : 0)
+#define IS_MFCV6_PLUS(dev) (dev->variant->version >= 0x60 ? 1 : 0)
+#define IS_MFCV7_PLUS(dev) (dev->variant->version >= 0x70 ? 1 : 0)
+#define IS_MFCV8_PLUS(dev) (dev->variant->version >= 0x80 ? 1 : 0)
+#define IS_MFCV10(dev) (dev->variant->version >= 0xA0 ? 1 : 0)
+#define FW_HAS_E_MIN_SCRATCH_BUF(dev) (IS_MFCV10(dev))
+
+#define MFC_V5_BIT BIT(0)
+#define MFC_V6_BIT BIT(1)
+#define MFC_V7_BIT BIT(2)
+#define MFC_V8_BIT BIT(3)
+#define MFC_V10_BIT BIT(5)
+
+#define MFC_V5PLUS_BITS (MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | \
+ MFC_V8_BIT | MFC_V10_BIT)
+#define MFC_V6PLUS_BITS (MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT | \
+ MFC_V10_BIT)
+#define MFC_V7PLUS_BITS (MFC_V7_BIT | MFC_V8_BIT | MFC_V10_BIT)
+
+#endif /* S5P_MFC_COMMON_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
new file mode 100644
index 000000000..ee7b15b33
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -0,0 +1,486 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_ctrl.h"
+
+/* Allocate memory for firmware */
+int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_priv_buf *fw_buf = &dev->fw_buf;
+ int err;
+
+ fw_buf->size = dev->variant->buf_size->fw;
+
+ if (fw_buf->virt) {
+ mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
+ return -ENOMEM;
+ }
+
+ err = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &dev->fw_buf);
+ if (err) {
+ mfc_err("Allocating bitprocessor buffer failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Load firmware */
+int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev)
+{
+ struct firmware *fw_blob;
+ int i, err = -EINVAL;
+
+ /* Firmare has to be present as a separate file or compiled
+ * into kernel. */
+ mfc_debug_enter();
+
+ if (dev->fw_get_done)
+ return 0;
+
+ for (i = MFC_FW_MAX_VERSIONS - 1; i >= 0; i--) {
+ if (!dev->variant->fw_name[i])
+ continue;
+ err = request_firmware((const struct firmware **)&fw_blob,
+ dev->variant->fw_name[i], &dev->plat_dev->dev);
+ if (!err) {
+ dev->fw_ver = (enum s5p_mfc_fw_ver) i;
+ break;
+ }
+ }
+
+ if (err != 0) {
+ mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
+ return -EINVAL;
+ }
+ if (fw_blob->size > dev->fw_buf.size) {
+ mfc_err("MFC firmware is too big to be loaded\n");
+ release_firmware(fw_blob);
+ return -ENOMEM;
+ }
+ memcpy(dev->fw_buf.virt, fw_blob->data, fw_blob->size);
+ wmb();
+ dev->fw_get_done = true;
+ release_firmware(fw_blob);
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Release firmware memory */
+int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
+{
+ /* Before calling this function one has to make sure
+ * that MFC is no longer processing */
+ s5p_mfc_release_priv_buf(dev, &dev->fw_buf);
+ dev->fw_get_done = false;
+ return 0;
+}
+
+static int s5p_mfc_bus_reset(struct s5p_mfc_dev *dev)
+{
+ unsigned int status;
+ unsigned long timeout;
+
+ /* Reset */
+ mfc_write(dev, 0x1, S5P_FIMV_MFC_BUS_RESET_CTRL);
+ timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+ /* Check bus status */
+ do {
+ if (time_after(jiffies, timeout)) {
+ mfc_err("Timeout while resetting MFC.\n");
+ return -EIO;
+ }
+ status = mfc_read(dev, S5P_FIMV_MFC_BUS_RESET_CTRL);
+ } while ((status & 0x2) == 0);
+ return 0;
+}
+
+/* Reset the device */
+int s5p_mfc_reset(struct s5p_mfc_dev *dev)
+{
+ unsigned int mc_status;
+ unsigned long timeout;
+ int i;
+
+ mfc_debug_enter();
+
+ if (IS_MFCV6_PLUS(dev)) {
+ /* Zero Initialization of MFC registers */
+ mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD_V6);
+ mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD_V6);
+ mfc_write(dev, 0, S5P_FIMV_FW_VERSION_V6);
+
+ for (i = 0; i < S5P_FIMV_REG_CLEAR_COUNT_V6; i++)
+ mfc_write(dev, 0, S5P_FIMV_REG_CLEAR_BEGIN_V6 + (i*4));
+
+ /* check bus reset control before reset */
+ if (dev->risc_on)
+ if (s5p_mfc_bus_reset(dev))
+ return -EIO;
+ /* Reset
+ * set RISC_ON to 0 during power_on & wake_up.
+ * V6 needs RISC_ON set to 0 during reset also.
+ */
+ if ((!dev->risc_on) || (!IS_MFCV7_PLUS(dev)))
+ mfc_write(dev, 0, S5P_FIMV_RISC_ON_V6);
+
+ mfc_write(dev, 0x1FFF, S5P_FIMV_MFC_RESET_V6);
+ mfc_write(dev, 0, S5P_FIMV_MFC_RESET_V6);
+ } else {
+ /* Stop procedure */
+ /* reset RISC */
+ mfc_write(dev, 0x3f6, S5P_FIMV_SW_RESET);
+ /* All reset except for MC */
+ mfc_write(dev, 0x3e2, S5P_FIMV_SW_RESET);
+ mdelay(10);
+
+ timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+ /* Check MC status */
+ do {
+ if (time_after(jiffies, timeout)) {
+ mfc_err("Timeout while resetting MFC\n");
+ return -EIO;
+ }
+
+ mc_status = mfc_read(dev, S5P_FIMV_MC_STATUS);
+
+ } while (mc_status & 0x3);
+
+ mfc_write(dev, 0x0, S5P_FIMV_SW_RESET);
+ mfc_write(dev, 0x3fe, S5P_FIMV_SW_RESET);
+ }
+
+ mfc_debug_leave();
+ return 0;
+}
+
+static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
+{
+ if (IS_MFCV6_PLUS(dev)) {
+ mfc_write(dev, dev->dma_base[BANK_L_CTX],
+ S5P_FIMV_RISC_BASE_ADDRESS_V6);
+ mfc_debug(2, "Base Address : %pad\n",
+ &dev->dma_base[BANK_L_CTX]);
+ } else {
+ mfc_write(dev, dev->dma_base[BANK_L_CTX],
+ S5P_FIMV_MC_DRAMBASE_ADR_A);
+ mfc_write(dev, dev->dma_base[BANK_R_CTX],
+ S5P_FIMV_MC_DRAMBASE_ADR_B);
+ mfc_debug(2, "Bank1: %pad, Bank2: %pad\n",
+ &dev->dma_base[BANK_L_CTX],
+ &dev->dma_base[BANK_R_CTX]);
+ }
+}
+
+static inline void s5p_mfc_clear_cmds(struct s5p_mfc_dev *dev)
+{
+ if (IS_MFCV6_PLUS(dev)) {
+ /* Zero initialization should be done before RESET.
+ * Nothing to do here. */
+ } else {
+ mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH0_INST_ID);
+ mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH1_INST_ID);
+ mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
+ mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD);
+ }
+}
+
+/* Initialize hardware */
+int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
+{
+ unsigned int ver;
+ int ret;
+
+ mfc_debug_enter();
+ if (!dev->fw_buf.virt) {
+ mfc_err("Firmware memory is not allocated.\n");
+ return -EINVAL;
+ }
+
+ /* 0. MFC reset */
+ mfc_debug(2, "MFC reset..\n");
+ s5p_mfc_clock_on();
+ dev->risc_on = 0;
+ ret = s5p_mfc_reset(dev);
+ if (ret) {
+ mfc_err("Failed to reset MFC - timeout\n");
+ return ret;
+ }
+ mfc_debug(2, "Done MFC reset..\n");
+ /* 1. Set DRAM base Addr */
+ s5p_mfc_init_memctrl(dev);
+ /* 2. Initialize registers of channel I/F */
+ s5p_mfc_clear_cmds(dev);
+ /* 3. Release reset signal to the RISC */
+ s5p_mfc_clean_dev_int_flags(dev);
+ if (IS_MFCV6_PLUS(dev)) {
+ dev->risc_on = 1;
+ mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
+ }
+ else
+ mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
+
+ if (IS_MFCV10(dev))
+ mfc_write(dev, 0x0, S5P_FIMV_MFC_CLOCK_OFF_V10);
+
+ mfc_debug(2, "Will now wait for completion of firmware transfer\n");
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_FW_STATUS_RET)) {
+ mfc_err("Failed to load firmware\n");
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return -EIO;
+ }
+ s5p_mfc_clean_dev_int_flags(dev);
+ /* 4. Initialize firmware */
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, sys_init_cmd, dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return ret;
+ }
+ mfc_debug(2, "Ok, now will wait for completion of hardware init\n");
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_SYS_INIT_RET)) {
+ mfc_err("Failed to init hardware\n");
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return -EIO;
+ }
+ dev->int_cond = 0;
+ if (dev->int_err != 0 || dev->int_type !=
+ S5P_MFC_R2H_CMD_SYS_INIT_RET) {
+ /* Failure. */
+ mfc_err("Failed to init firmware - error: %d int: %d\n",
+ dev->int_err, dev->int_type);
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return -EIO;
+ }
+ if (IS_MFCV6_PLUS(dev))
+ ver = mfc_read(dev, S5P_FIMV_FW_VERSION_V6);
+ else
+ ver = mfc_read(dev, S5P_FIMV_FW_VERSION);
+
+ mfc_debug(2, "MFC F/W version : %02xyy, %02xmm, %02xdd\n",
+ (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
+ s5p_mfc_clock_off();
+ mfc_debug_leave();
+ return 0;
+}
+
+
+/* Deinitialize hardware */
+void s5p_mfc_deinit_hw(struct s5p_mfc_dev *dev)
+{
+ s5p_mfc_clock_on();
+
+ s5p_mfc_reset(dev);
+ s5p_mfc_hw_call(dev->mfc_ops, release_dev_context_buffer, dev);
+
+ s5p_mfc_clock_off();
+}
+
+int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ mfc_debug_enter();
+ s5p_mfc_clock_on();
+ s5p_mfc_clean_dev_int_flags(dev);
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, sleep_cmd, dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ return ret;
+ }
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_SLEEP_RET)) {
+ mfc_err("Failed to sleep\n");
+ return -EIO;
+ }
+ s5p_mfc_clock_off();
+ dev->int_cond = 0;
+ if (dev->int_err != 0 || dev->int_type !=
+ S5P_MFC_R2H_CMD_SLEEP_RET) {
+ /* Failure. */
+ mfc_err("Failed to sleep - error: %d int: %d\n", dev->int_err,
+ dev->int_type);
+ return -EIO;
+ }
+ mfc_debug_leave();
+ return ret;
+}
+
+static int s5p_mfc_v8_wait_wakeup(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ /* Release reset signal to the RISC */
+ dev->risc_on = 1;
+ mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
+
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_FW_STATUS_RET)) {
+ mfc_err("Failed to reset MFCV8\n");
+ return -EIO;
+ }
+ mfc_debug(2, "Write command to wakeup MFCV8\n");
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, wakeup_cmd, dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFCV8 - timeout\n");
+ return ret;
+ }
+
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_WAKEUP_RET)) {
+ mfc_err("Failed to wakeup MFC\n");
+ return -EIO;
+ }
+ return ret;
+}
+
+static int s5p_mfc_wait_wakeup(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ /* Send MFC wakeup command */
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, wakeup_cmd, dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ return ret;
+ }
+
+ /* Release reset signal to the RISC */
+ if (IS_MFCV6_PLUS(dev)) {
+ dev->risc_on = 1;
+ mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
+ } else {
+ mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
+ }
+
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_WAKEUP_RET)) {
+ mfc_err("Failed to wakeup MFC\n");
+ return -EIO;
+ }
+ return ret;
+}
+
+int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ mfc_debug_enter();
+ /* 0. MFC reset */
+ mfc_debug(2, "MFC reset..\n");
+ s5p_mfc_clock_on();
+ dev->risc_on = 0;
+ ret = s5p_mfc_reset(dev);
+ if (ret) {
+ mfc_err("Failed to reset MFC - timeout\n");
+ s5p_mfc_clock_off();
+ return ret;
+ }
+ mfc_debug(2, "Done MFC reset..\n");
+ /* 1. Set DRAM base Addr */
+ s5p_mfc_init_memctrl(dev);
+ /* 2. Initialize registers of channel I/F */
+ s5p_mfc_clear_cmds(dev);
+ s5p_mfc_clean_dev_int_flags(dev);
+ /* 3. Send MFC wakeup command and wait for completion*/
+ if (IS_MFCV8_PLUS(dev))
+ ret = s5p_mfc_v8_wait_wakeup(dev);
+ else
+ ret = s5p_mfc_wait_wakeup(dev);
+
+ s5p_mfc_clock_off();
+ if (ret)
+ return ret;
+
+ dev->int_cond = 0;
+ if (dev->int_err != 0 || dev->int_type !=
+ S5P_MFC_R2H_CMD_WAKEUP_RET) {
+ /* Failure. */
+ mfc_err("Failed to wakeup - error: %d int: %d\n", dev->int_err,
+ dev->int_type);
+ return -EIO;
+ }
+ mfc_debug_leave();
+ return 0;
+}
+
+int s5p_mfc_open_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
+{
+ int ret = 0;
+
+ ret = s5p_mfc_hw_call(dev->mfc_ops, alloc_instance_buffer, ctx);
+ if (ret) {
+ mfc_err("Failed allocating instance buffer\n");
+ goto err;
+ }
+
+ if (ctx->type == MFCINST_DECODER) {
+ ret = s5p_mfc_hw_call(dev->mfc_ops,
+ alloc_dec_temp_buffers, ctx);
+ if (ret) {
+ mfc_err("Failed allocating temporary buffers\n");
+ goto err_free_inst_buf;
+ }
+ }
+
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ if (s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET, 0)) {
+ /* Error or timeout */
+ mfc_err("Error getting instance from hardware\n");
+ ret = -EIO;
+ goto err_free_desc_buf;
+ }
+
+ mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
+ return ret;
+
+err_free_desc_buf:
+ if (ctx->type == MFCINST_DECODER)
+ s5p_mfc_hw_call(dev->mfc_ops, release_dec_desc_buffer, ctx);
+err_free_inst_buf:
+ s5p_mfc_hw_call(dev->mfc_ops, release_instance_buffer, ctx);
+err:
+ return ret;
+}
+
+void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
+{
+ ctx->state = MFCINST_RETURN_INST;
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ /* Wait until instance is returned or timeout occurred */
+ if (s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0))
+ mfc_err("Err returning instance\n");
+
+ /* Free resources */
+ s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, release_instance_buffer, ctx);
+ if (ctx->type == MFCINST_DECODER)
+ s5p_mfc_hw_call(dev->mfc_ops, release_dec_desc_buffer, ctx);
+
+ ctx->inst_no = MFC_NO_INSTANCE_SET;
+ ctx->state = MFCINST_FREE;
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
new file mode 100644
index 000000000..45c807bf1
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
@@ -0,0 +1,33 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CTRL_H
+#define S5P_MFC_CTRL_H
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_init_hw(struct s5p_mfc_dev *dev);
+void s5p_mfc_deinit_hw(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_sleep(struct s5p_mfc_dev *dev);
+int s5p_mfc_wakeup(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_reset(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_open_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx);
+void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_CTRL_H */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
new file mode 100644
index 000000000..1936a5b86
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
@@ -0,0 +1,54 @@
+/*
+ * drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains debug macros
+ *
+ * Kamil Debski, Copyright (c) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_DEBUG_H_
+#define S5P_MFC_DEBUG_H_
+
+#define DEBUG
+
+#ifdef DEBUG
+extern int mfc_debug_level;
+
+#define mfc_debug(level, fmt, args...) \
+ do { \
+ if (mfc_debug_level >= level) \
+ printk(KERN_DEBUG "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+#else
+#define mfc_debug(level, fmt, args...)
+#endif
+
+#define mfc_debug_enter() mfc_debug(5, "enter\n")
+#define mfc_debug_leave() mfc_debug(5, "leave\n")
+
+#define mfc_err(fmt, args...) \
+ do { \
+ printk(KERN_ERR "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mfc_err_limited(fmt, args...) \
+ do { \
+ printk_ratelimited(KERN_ERR "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mfc_info(fmt, args...) \
+ do { \
+ printk(KERN_INFO "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#endif /* S5P_MFC_DEBUG_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
new file mode 100644
index 000000000..6a3cc4f86
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -0,0 +1,1196 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_dec.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+
+static struct s5p_mfc_fmt formats[] = {
+ {
+ .name = "4:2:0 2 Planes 16x16 Tiles",
+ .fourcc = V4L2_PIX_FMT_NV12MT_16X16,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ .versions = MFC_V6_BIT | MFC_V7_BIT,
+ },
+ {
+ .name = "4:2:0 2 Planes 64x32 Tiles",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ .versions = MFC_V5_BIT,
+ },
+ {
+ .name = "4:2:0 2 Planes Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ .versions = MFC_V6PLUS_BITS,
+ },
+ {
+ .name = "4:2:0 2 Planes Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ .versions = MFC_V6PLUS_BITS,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .codec_mode = S5P_MFC_CODEC_H264_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "H264/MVC Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H264_MVC,
+ .codec_mode = S5P_MFC_CODEC_H264_MVC_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V6PLUS_BITS,
+ },
+ {
+ .name = "H263 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H263,
+ .codec_mode = S5P_MFC_CODEC_H263_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "MPEG1 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG1,
+ .codec_mode = S5P_MFC_CODEC_MPEG2_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "MPEG2 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG2,
+ .codec_mode = S5P_MFC_CODEC_MPEG2_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "MPEG4 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .codec_mode = S5P_MFC_CODEC_MPEG4_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "XviD Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_XVID,
+ .codec_mode = S5P_MFC_CODEC_MPEG4_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "VC1 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
+ .codec_mode = S5P_MFC_CODEC_VC1_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "VC1 RCV Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
+ .codec_mode = S5P_MFC_CODEC_VC1RCV_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "VP8 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .codec_mode = S5P_MFC_CODEC_VP8_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V6PLUS_BITS,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HEVC,
+ .codec_mode = S5P_FIMV_CODEC_HEVC_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V10_BIT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .codec_mode = S5P_FIMV_CODEC_VP9_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ .versions = MFC_V10_BIT,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Find selected format description */
+static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ formats[i].type == t)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct mfc_control controls[] = {
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H264 Display Delay",
+ .minimum = 0,
+ .maximum = 16383,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Display Delay Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mpeg4 Loop Filter Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Slice Interface Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Minimum number of cap bufs",
+ .minimum = 1,
+ .maximum = 32,
+ .step = 1,
+ .default_value = 1,
+ .is_volatile = 1,
+ },
+};
+
+#define NUM_CTRLS ARRAY_SIZE(controls)
+
+/* Check whether a context should be run on hardware */
+static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
+{
+ /* Context is to parse header */
+ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_GOT_INST)
+ return 1;
+ /* Context is to decode a frame */
+ if (ctx->src_queue_cnt >= 1 &&
+ ctx->state == MFCINST_RUNNING &&
+ ctx->dst_queue_cnt >= ctx->pb_count)
+ return 1;
+ /* Context is to return last frame */
+ if (ctx->state == MFCINST_FINISHING &&
+ ctx->dst_queue_cnt >= ctx->pb_count)
+ return 1;
+ /* Context is to set buffers */
+ if (ctx->src_queue_cnt >= 1 &&
+ ctx->state == MFCINST_HEAD_PARSED &&
+ ctx->capture_state == QUEUE_BUFS_MMAPED)
+ return 1;
+ /* Resolution change */
+ if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
+ ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
+ ctx->dst_queue_cnt >= ctx->pb_count)
+ return 1;
+ if (ctx->state == MFCINST_RES_CHANGE_END &&
+ ctx->src_queue_cnt >= 1)
+ return 1;
+ mfc_debug(2, "ctx is not ready\n");
+ return 0;
+}
+
+static const struct s5p_mfc_codec_ops decoder_codec_ops = {
+ .pre_seq_start = NULL,
+ .post_seq_start = NULL,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+};
+
+/* Query capabilities of the device */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+
+ strlcpy(cap->driver, S5P_MFC_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, dev->vfd_dec->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&dev->plat_dev->dev));
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+/* Enumerate format */
+static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
+ bool out)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (out && formats[i].type != MFC_FMT_DEC)
+ continue;
+ else if (!out && formats[i].type != MFC_FMT_RAW)
+ continue;
+ else if ((dev->variant->version_bit & formats[i].versions) == 0)
+ continue;
+
+ if (j == f->index)
+ break;
+ ++j;
+ }
+ if (i == ARRAY_SIZE(formats))
+ return -EINVAL;
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, f, false);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, f, true);
+}
+
+/* Get format */
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ mfc_debug_enter();
+ pix_mp = &f->fmt.pix_mp;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ (ctx->state == MFCINST_GOT_INST || ctx->state ==
+ MFCINST_RES_CHANGE_END)) {
+ /* If the MFC is parsing the header,
+ * so wait until it is finished */
+ s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_SEQ_DONE_RET,
+ 0);
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ /* This is run on CAPTURE (decode output) */
+ /* Width and height are set to the dimensions
+ of the movie, the buffer is bigger and
+ further processing stages should crop to this
+ rectangle. */
+ pix_mp->width = ctx->buf_width;
+ pix_mp->height = ctx->buf_height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->num_planes = 2;
+ /* Set pixelformat to the format in which MFC
+ outputs the decoded frame */
+ pix_mp->pixelformat = ctx->dst_fmt->fourcc;
+ pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ pix_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* This is run on OUTPUT
+ The buffer contains compressed image
+ so width and height have no meaning */
+ pix_mp->width = 0;
+ pix_mp->height = 0;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->plane_fmt[0].bytesperline = ctx->dec_src_buf_size;
+ pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size;
+ pix_mp->pixelformat = ctx->src_fmt->fourcc;
+ pix_mp->num_planes = ctx->src_fmt->num_planes;
+ } else {
+ mfc_err("Format could not be read\n");
+ mfc_debug(2, "%s-- with error\n", __func__);
+ return -EINVAL;
+ }
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Try format */
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_fmt *fmt;
+
+ mfc_debug(2, "Type is %d\n", f->type);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt = find_format(f, MFC_FMT_DEC);
+ if (!fmt) {
+ mfc_err("Unsupported format for source.\n");
+ return -EINVAL;
+ }
+ if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
+ mfc_err("Unknown codec\n");
+ return -EINVAL;
+ }
+ if ((dev->variant->version_bit & fmt->versions) == 0) {
+ mfc_err("Unsupported format by this MFC version.\n");
+ return -EINVAL;
+ }
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt = find_format(f, MFC_FMT_RAW);
+ if (!fmt) {
+ mfc_err("Unsupported format for destination.\n");
+ return -EINVAL;
+ }
+ if ((dev->variant->version_bit & fmt->versions) == 0) {
+ mfc_err("Unsupported format by this MFC version.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Set format */
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;
+
+ mfc_debug_enter();
+ ret = vidioc_try_fmt(file, priv, f);
+ pix_mp = &f->fmt.pix_mp;
+ if (ret)
+ return ret;
+ if (vb2_is_streaming(&ctx->vq_src) || vb2_is_streaming(&ctx->vq_dst)) {
+ v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* dst_fmt is validated by call to vidioc_try_fmt */
+ ctx->dst_fmt = find_format(f, MFC_FMT_RAW);
+ ret = 0;
+ goto out;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* src_fmt is validated by call to vidioc_try_fmt */
+ ctx->src_fmt = find_format(f, MFC_FMT_DEC);
+ ctx->codec_mode = ctx->src_fmt->codec_mode;
+ mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
+ pix_mp->height = 0;
+ pix_mp->width = 0;
+ if (pix_mp->plane_fmt[0].sizeimage == 0)
+ pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
+ DEF_CPB_SIZE;
+ else if (pix_mp->plane_fmt[0].sizeimage > buf_size->cpb)
+ ctx->dec_src_buf_size = buf_size->cpb;
+ else
+ ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ ctx->state = MFCINST_INIT;
+ ret = 0;
+ goto out;
+ } else {
+ mfc_err("Wrong type error for S_FMT : %d", f->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ mfc_debug_leave();
+ return ret;
+}
+
+static int reqbufs_output(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ int ret = 0;
+
+ s5p_mfc_clock_on();
+
+ if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ if (ret)
+ goto out;
+ ctx->src_bufs_cnt = 0;
+ ctx->output_state = QUEUE_FREE;
+ } else if (ctx->output_state == QUEUE_FREE) {
+ /* Can only request buffers when we have a valid format set. */
+ WARN_ON(ctx->src_bufs_cnt != 0);
+ if (ctx->state != MFCINST_INIT) {
+ mfc_err("Reqbufs called in an invalid state\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mfc_debug(2, "Allocating %d buffers for OUTPUT queue\n",
+ reqbufs->count);
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ if (ret)
+ goto out;
+
+ ret = s5p_mfc_open_mfc_inst(dev, ctx);
+ if (ret) {
+ reqbufs->count = 0;
+ vb2_reqbufs(&ctx->vq_src, reqbufs);
+ goto out;
+ }
+
+ ctx->output_state = QUEUE_BUFS_REQUESTED;
+ } else {
+ mfc_err("Buffers have already been requested\n");
+ ret = -EINVAL;
+ }
+out:
+ s5p_mfc_clock_off();
+ if (ret)
+ mfc_err("Failed allocating buffers for OUTPUT queue\n");
+ return ret;
+}
+
+static int reqbufs_capture(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ int ret = 0;
+
+ s5p_mfc_clock_on();
+
+ if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ if (ret)
+ goto out;
+ s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);
+ ctx->dst_bufs_cnt = 0;
+ } else if (ctx->capture_state == QUEUE_FREE) {
+ WARN_ON(ctx->dst_bufs_cnt != 0);
+ mfc_debug(2, "Allocating %d buffers for CAPTURE queue\n",
+ reqbufs->count);
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ if (ret)
+ goto out;
+
+ ctx->capture_state = QUEUE_BUFS_REQUESTED;
+ ctx->total_dpb_count = reqbufs->count;
+
+ ret = s5p_mfc_hw_call(dev->mfc_ops, alloc_codec_buffers, ctx);
+ if (ret) {
+ mfc_err("Failed to allocate decoding buffers\n");
+ reqbufs->count = 0;
+ vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ ret = -ENOMEM;
+ ctx->capture_state = QUEUE_FREE;
+ goto out;
+ }
+
+ WARN_ON(ctx->dst_bufs_cnt != ctx->total_dpb_count);
+ ctx->capture_state = QUEUE_BUFS_MMAPED;
+
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_INIT_BUFFERS_RET,
+ 0);
+ } else {
+ mfc_err("Buffers have already been requested\n");
+ ret = -EINVAL;
+ }
+out:
+ s5p_mfc_clock_off();
+ if (ret)
+ mfc_err("Failed allocating buffers for CAPTURE queue\n");
+ return ret;
+}
+
+/* Request buffers */
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (reqbufs->memory != V4L2_MEMORY_MMAP) {
+ mfc_debug(2, "Only V4L2_MEMORY_MMAP is supported\n");
+ return -EINVAL;
+ }
+
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ return reqbufs_output(dev, ctx, reqbufs);
+ } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ return reqbufs_capture(dev, ctx, reqbufs);
+ } else {
+ mfc_err("Invalid type requested\n");
+ return -EINVAL;
+ }
+}
+
+/* Query buffer */
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+ int i;
+
+ if (buf->memory != V4L2_MEMORY_MMAP) {
+ mfc_err("Only mmaped buffers can be used\n");
+ return -EINVAL;
+ }
+ mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type);
+ if (ctx->state == MFCINST_GOT_INST &&
+ buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = vb2_querybuf(&ctx->vq_src, buf);
+ } else if (ctx->state == MFCINST_RUNNING &&
+ buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = vb2_querybuf(&ctx->vq_dst, buf);
+ for (i = 0; i < buf->length; i++)
+ buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE;
+ } else {
+ mfc_err("vidioc_querybuf called in an inappropriate state\n");
+ ret = -EINVAL;
+ }
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Queue a buffer */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on QBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_qbuf(&ctx->vq_src, buf);
+ else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_qbuf(&ctx->vq_dst, buf);
+ return -EINVAL;
+}
+
+/* Dequeue a buffer */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ const struct v4l2_event ev = {
+ .type = V4L2_EVENT_EOS
+ };
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err_limited("Call on DQBUF after unrecoverable error\n");
+ return -EIO;
+ }
+
+ switch (buf->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ ret = vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
+ if (ret)
+ return ret;
+
+ if (ctx->state == MFCINST_FINISHED &&
+ (ctx->dst_bufs[buf->index].flags & MFC_BUF_FLAG_EOS))
+ v4l2_event_queue_fh(&ctx->fh, &ev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Export DMA buffer */
+static int vidioc_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_expbuf(&ctx->vq_src, eb);
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_expbuf(&ctx->vq_dst, eb);
+ return -EINVAL;
+}
+
+/* Stream on */
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = -EINVAL;
+
+ mfc_debug_enter();
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ ret = vb2_streamon(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ret = vb2_streamon(&ctx->vq_dst, type);
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Stream off, which equals to a pause */
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_streamoff(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_streamoff(&ctx->vq_dst, type);
+ return -EINVAL;
+}
+
+/* Set controls - v4l2 control framework */
+static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
+ ctx->display_delay = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
+ ctx->display_delay_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ ctx->loop_filter_mpeg4 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+ ctx->slice_interface = ctrl->val;
+ break;
+ default:
+ mfc_err("Invalid control 0x%08x\n", ctrl->id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->pb_count;
+ break;
+ } else if (ctx->state != MFCINST_INIT &&
+ ctx->state != MFCINST_RES_CHANGE_END) {
+ v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
+ return -EINVAL;
+ }
+ /* Should wait for the header to be parsed */
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->pb_count;
+ } else {
+ v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
+ return -EINVAL;
+ }
+ break;
+ }
+ return 0;
+}
+
+
+static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = {
+ .s_ctrl = s5p_mfc_dec_s_ctrl,
+ .g_volatile_ctrl = s5p_mfc_dec_g_v_ctrl,
+};
+
+/* Get cropping information */
+static int vidioc_g_crop(struct file *file, void *priv,
+ struct v4l2_crop *cr)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ u32 left, right, top, bottom;
+
+ if (ctx->state != MFCINST_HEAD_PARSED &&
+ ctx->state != MFCINST_RUNNING &&
+ ctx->state != MFCINST_FINISHING &&
+ ctx->state != MFCINST_FINISHED) {
+ mfc_err("Can not get crop information\n");
+ return -EINVAL;
+ }
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) {
+ left = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_h, ctx);
+ right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT;
+ left = left & S5P_FIMV_SHARED_CROP_LEFT_MASK;
+ top = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_v, ctx);
+ bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT;
+ top = top & S5P_FIMV_SHARED_CROP_TOP_MASK;
+ cr->c.left = left;
+ cr->c.top = top;
+ cr->c.width = ctx->img_width - left - right;
+ cr->c.height = ctx->img_height - top - bottom;
+ mfc_debug(2, "Cropping info [h264]: l=%d t=%d w=%d h=%d (r=%d b=%d fw=%d fh=%d\n",
+ left, top, cr->c.width, cr->c.height, right, bottom,
+ ctx->buf_width, ctx->buf_height);
+ } else {
+ cr->c.left = 0;
+ cr->c.top = 0;
+ cr->c.width = ctx->img_width;
+ cr->c.height = ctx->img_height;
+ mfc_debug(2, "Cropping info: w=%d h=%d fw=%d fh=%d\n",
+ cr->c.width, cr->c.height, ctx->buf_width,
+ ctx->buf_height);
+ }
+ return 0;
+}
+
+static int vidioc_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *cmd)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *buf;
+ unsigned long flags;
+
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ if (cmd->flags != 0)
+ return -EINVAL;
+
+ if (!vb2_is_streaming(&ctx->vq_src))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (list_empty(&ctx->src_queue)) {
+ mfc_err("EOS: empty src queue, entering finishing state");
+ ctx->state = MFCINST_FINISHING;
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ } else {
+ mfc_err("EOS: marking last buffer of stream");
+ buf = list_entry(ctx->src_queue.prev,
+ struct s5p_mfc_buf, list);
+ if (buf->flags & MFC_BUF_FLAG_USED)
+ ctx->state = MFCINST_FINISHING;
+ else
+ buf->flags |= MFC_BUF_FLAG_EOS;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+
+/* v4l2_ioctl_ops */
+static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_expbuf = vidioc_expbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_decoder_cmd = vidioc_decoder_cmd,
+ .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int s5p_mfc_queue_setup(struct vb2_queue *vq,
+ unsigned int *buf_count,
+ unsigned int *plane_count, unsigned int psize[],
+ struct device *alloc_devs[])
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ /* Video output for decoding (source)
+ * this can be set after getting an instance */
+ if (ctx->state == MFCINST_INIT &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* A single plane is required for input */
+ *plane_count = 1;
+ if (*buf_count < 1)
+ *buf_count = 1;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ /* Video capture for decoding (destination)
+ * this can be set after the header was parsed */
+ } else if (ctx->state == MFCINST_HEAD_PARSED &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* Output plane count is 2 - one for Y and one for CbCr */
+ *plane_count = 2;
+ /* Setup buffer count */
+ if (*buf_count < ctx->pb_count)
+ *buf_count = ctx->pb_count;
+ if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB)
+ *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ } else {
+ mfc_err("State seems invalid. State = %d, vq->type = %d\n",
+ ctx->state, vq->type);
+ return -EINVAL;
+ }
+ mfc_debug(2, "Buffer count=%d, plane count=%d\n",
+ *buf_count, *plane_count);
+ if (ctx->state == MFCINST_HEAD_PARSED &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ psize[0] = ctx->luma_size;
+ psize[1] = ctx->chroma_size;
+
+ if (IS_MFCV6_PLUS(dev))
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
+ else
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_R_CTX];
+ alloc_devs[1] = ctx->dev->mem_dev[BANK_L_CTX];
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ ctx->state == MFCINST_INIT) {
+ psize[0] = ctx->dec_src_buf_size;
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
+ } else {
+ mfc_err("This video node is dedicated to decoding. Decoding not initialized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ unsigned int i;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->capture_state == QUEUE_BUFS_MMAPED)
+ return 0;
+ for (i = 0; i < ctx->dst_fmt->num_planes; i++) {
+ if (IS_ERR_OR_NULL(ERR_PTR(
+ vb2_dma_contig_plane_dma_addr(vb, i)))) {
+ mfc_err("Plane mem not allocated\n");
+ return -EINVAL;
+ }
+ }
+ if (vb2_plane_size(vb, 0) < ctx->luma_size ||
+ vb2_plane_size(vb, 1) < ctx->chroma_size) {
+ mfc_err("Plane buffer (CAPTURE) is too small\n");
+ return -EINVAL;
+ }
+ i = vb->index;
+ ctx->dst_bufs[i].b = vbuf;
+ ctx->dst_bufs[i].cookie.raw.luma =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ ctx->dst_bufs[i].cookie.raw.chroma =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ ctx->dst_bufs_cnt++;
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (IS_ERR_OR_NULL(ERR_PTR(
+ vb2_dma_contig_plane_dma_addr(vb, 0)))) {
+ mfc_err("Plane memory not allocated\n");
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < ctx->dec_src_buf_size) {
+ mfc_err("Plane buffer (OUTPUT) is too small\n");
+ return -EINVAL;
+ }
+
+ i = vb->index;
+ ctx->src_bufs[i].b = vbuf;
+ ctx->src_bufs[i].cookie.stream =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ ctx->src_bufs_cnt++;
+ } else {
+ mfc_err("s5p_mfc_buf_init: unknown queue type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (ctx->state == MFCINST_FINISHING ||
+ ctx->state == MFCINST_FINISHED)
+ ctx->state = MFCINST_RUNNING;
+ /* If context is ready then dev = work->data;schedule it to run */
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ return 0;
+}
+
+static void s5p_mfc_stop_streaming(struct vb2_queue *q)
+{
+ unsigned long flags;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ int aborted = 0;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if ((ctx->state == MFCINST_FINISHING ||
+ ctx->state == MFCINST_RUNNING) &&
+ dev->curr_ctx == ctx->num && dev->hw_lock) {
+ ctx->state = MFCINST_ABORT;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_FRAME_DONE_RET, 0);
+ aborted = 1;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ }
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ INIT_LIST_HEAD(&ctx->dst_queue);
+ ctx->dst_queue_cnt = 0;
+ ctx->dpb_flush_flag = 1;
+ ctx->dec_dst_flag = 0;
+ if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) {
+ ctx->state = MFCINST_FLUSH;
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ if (s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0))
+ mfc_err("Err flushing buffers\n");
+ spin_lock_irqsave(&dev->irqlock, flags);
+ }
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ INIT_LIST_HEAD(&ctx->src_queue);
+ ctx->src_queue_cnt = 0;
+ }
+ if (aborted)
+ ctx->state = MFCINST_RUNNING;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+
+static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *mfc_buf;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mfc_buf = &ctx->src_bufs[vb->index];
+ mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ list_add_tail(&mfc_buf->list, &ctx->src_queue);
+ ctx->src_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ mfc_buf = &ctx->dst_bufs[vb->index];
+ mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
+ /* Mark destination as available for use by MFC */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ set_bit(vb->index, &ctx->dec_dst_flag);
+ list_add_tail(&mfc_buf->list, &ctx->dst_queue);
+ ctx->dst_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else {
+ mfc_err("Unsupported buffer type (%d)\n", vq->type);
+ }
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+}
+
+static struct vb2_ops s5p_mfc_dec_qops = {
+ .queue_setup = s5p_mfc_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_init = s5p_mfc_buf_init,
+ .start_streaming = s5p_mfc_start_streaming,
+ .stop_streaming = s5p_mfc_stop_streaming,
+ .buf_queue = s5p_mfc_buf_queue,
+};
+
+const struct s5p_mfc_codec_ops *get_dec_codec_ops(void)
+{
+ return &decoder_codec_ops;
+}
+
+struct vb2_ops *get_dec_queue_ops(void)
+{
+ return &s5p_mfc_dec_qops;
+}
+
+const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void)
+{
+ return &s5p_mfc_dec_ioctl_ops;
+}
+
+#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2WHICH(x) == V4L2_CTRL_CLASS_MPEG) \
+ && V4L2_CTRL_DRIVER_PRIV(x))
+
+int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
+{
+ struct v4l2_ctrl_config cfg;
+ int i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
+ if (ctx->ctrl_handler.error) {
+ mfc_err("v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_handler.error;
+ }
+
+ for (i = 0; i < NUM_CTRLS; i++) {
+ if (IS_MFC51_PRIV(controls[i].id)) {
+ memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
+ cfg.ops = &s5p_mfc_dec_ctrl_ops;
+ cfg.id = controls[i].id;
+ cfg.min = controls[i].minimum;
+ cfg.max = controls[i].maximum;
+ cfg.def = controls[i].default_value;
+ cfg.name = controls[i].name;
+ cfg.type = controls[i].type;
+
+ cfg.step = controls[i].step;
+ cfg.menu_skip_mask = 0;
+
+ ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &cfg, NULL);
+ } else {
+ ctx->ctrls[i] = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &s5p_mfc_dec_ctrl_ops,
+ controls[i].id, controls[i].minimum,
+ controls[i].maximum, controls[i].step,
+ controls[i].default_value);
+ }
+ if (ctx->ctrl_handler.error) {
+ mfc_err("Adding control (%d) failed\n", i);
+ return ctx->ctrl_handler.error;
+ }
+ if (controls[i].is_volatile && ctx->ctrls[i])
+ ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+ return 0;
+}
+
+void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx)
+{
+ int i;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ for (i = 0; i < NUM_CTRLS; i++)
+ ctx->ctrls[i] = NULL;
+}
+
+void s5p_mfc_dec_init(struct s5p_mfc_ctx *ctx)
+{
+ struct v4l2_format f;
+ f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
+ ctx->src_fmt = find_format(&f, MFC_FMT_DEC);
+ if (IS_MFCV8_PLUS(ctx->dev))
+ f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M;
+ else if (IS_MFCV6_PLUS(ctx->dev))
+ f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12MT_16X16;
+ else
+ f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12MT;
+ ctx->dst_fmt = find_format(&f, MFC_FMT_RAW);
+ mfc_debug(2, "Default src_fmt is %p, dest_fmt is %p\n",
+ ctx->src_fmt, ctx->dst_fmt);
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
new file mode 100644
index 000000000..886628b15
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
@@ -0,0 +1,24 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_DEC_H_
+#define S5P_MFC_DEC_H_
+
+const struct s5p_mfc_codec_ops *get_dec_codec_ops(void);
+struct vb2_ops *get_dec_queue_ops(void);
+const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void);
+struct s5p_mfc_fmt *get_dec_def_fmt(bool src);
+int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_dec_init(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_DEC_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
new file mode 100644
index 000000000..3ad4f5073
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -0,0 +1,2713 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Jeongtae Park <jtp.park@samsung.com>
+ * Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-event.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-v4l2.h>
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_enc.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+
+#define DEF_SRC_FMT_ENC V4L2_PIX_FMT_NV12M
+#define DEF_DST_FMT_ENC V4L2_PIX_FMT_H264
+
+static struct s5p_mfc_fmt formats[] = {
+ {
+ .name = "4:2:0 2 Planes 16x16 Tiles",
+ .fourcc = V4L2_PIX_FMT_NV12MT_16X16,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ .versions = MFC_V6_BIT | MFC_V7_BIT,
+ },
+ {
+ .name = "4:2:0 2 Planes 64x32 Tiles",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ .versions = MFC_V5_BIT,
+ },
+ {
+ .name = "4:2:0 2 Planes Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "4:2:0 2 Planes Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ .versions = MFC_V6PLUS_BITS,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .codec_mode = S5P_MFC_CODEC_H264_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "MPEG4 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .codec_mode = S5P_MFC_CODEC_MPEG4_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "H263 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H263,
+ .codec_mode = S5P_MFC_CODEC_H263_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ .versions = MFC_V5PLUS_BITS,
+ },
+ {
+ .name = "VP8 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .codec_mode = S5P_MFC_CODEC_VP8_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ .versions = MFC_V7PLUS_BITS,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HEVC,
+ .codec_mode = S5P_FIMV_CODEC_HEVC_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ .versions = MFC_V10_BIT,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ formats[i].type == t)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct mfc_control controls[] = {
+ {
+ .id = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 12,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+ .maximum = V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
+ .default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1900,
+ .maximum = (1 << 30) - 1,
+ .step = 1,
+ .default_value = 1900,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Padding Control Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Padding Color YUV Value",
+ .minimum = 0,
+ .maximum = (1 << 25) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = (1 << 30) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Rate Control Reaction Coeff.",
+ .minimum = 1,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Force frame type",
+ .minimum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
+ .maximum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED,
+ .default_value = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME,
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .minimum = 0,
+ .maximum = 0,
+ .step = 0,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VBV_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Horizontal MV Search Range",
+ .minimum = 16,
+ .maximum = 128,
+ .step = 16,
+ .default_value = 32,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Vertical MV Search Range",
+ .minimum = 16,
+ .maximum = 128,
+ .step = 16,
+ .default_value = 32,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .maximum = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .default_value = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Frame Skip Enable",
+ .minimum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .maximum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .menu_skip_mask = 0,
+ .default_value = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Fixed Target Bit Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_B_FRAMES,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .maximum = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ .default_value = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+ ),
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
+ .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+ .maximum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+ .default_value = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+ .maximum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
+ .default_value = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "The Number of Ref. Pic for P",
+ .minimum = 1,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 51,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 I-Frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 Minimum QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 Maximum QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 31,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 P frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 B frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 I-Frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 Minimum QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 Maximum QP value",
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 51,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 P frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 B frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Dark Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Smooth Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Static Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Activity Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
+ .maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
+ .default_value = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_GOP_CLOSURE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+ .maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
+ .default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_QPEL,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS,
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .maximum = V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS,
+ .default_value = V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES,
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .maximum = V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME,
+ .default_value = V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 7,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV,
+ .maximum = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD,
+ .default_value = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 127,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 11,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 10,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 10,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
+ .maximum = V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ .default_value = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "HEVC I Frame QP Value",
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "HEVC P Frame QP Value",
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .maximum = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE,
+ .step = 1,
+ .default_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .maximum = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ .step = 1,
+ .default_value = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_TIER,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEVC_TIER_MAIN,
+ .maximum = V4L2_MPEG_VIDEO_HEVC_TIER_HIGH,
+ .step = 1,
+ .default_value = V4L2_MPEG_VIDEO_HEVC_TIER_MAIN,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE,
+ .maximum = V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR,
+ .step = 1,
+ .default_value = V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED,
+ .maximum = V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
+ .step = 1,
+ .default_value = V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B,
+ .maximum = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P,
+ .step = 1,
+ .default_value = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 6,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 4,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEVC_SIZE_0,
+ .maximum = V4L2_MPEG_VIDEO_HEVC_SIZE_4,
+ .step = 1,
+ .default_value = V4L2_MPEG_VIDEO_HEVC_SIZE_0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Minimum number of output bufs",
+ .minimum = 1,
+ .maximum = 32,
+ .step = 1,
+ .default_value = 1,
+ .is_volatile = 1,
+ },
+};
+
+#define NUM_CTRLS ARRAY_SIZE(controls)
+static const char * const *mfc51_get_menu(u32 id)
+{
+ static const char * const mfc51_video_frame_skip[] = {
+ "Disabled",
+ "Level Limit",
+ "VBV/CPB Limit",
+ NULL,
+ };
+ static const char * const mfc51_video_force_frame[] = {
+ "Disabled",
+ "I Frame",
+ "Not Coded",
+ NULL,
+ };
+ switch (id) {
+ case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
+ return mfc51_video_frame_skip;
+ case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+ return mfc51_video_force_frame;
+ }
+ return NULL;
+}
+
+static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
+{
+ mfc_debug(2, "src=%d, dst=%d, state=%d\n",
+ ctx->src_queue_cnt, ctx->dst_queue_cnt, ctx->state);
+ /* context is ready to make header */
+ if (ctx->state == MFCINST_GOT_INST && ctx->dst_queue_cnt >= 1)
+ return 1;
+ /* context is ready to encode a frame */
+ if ((ctx->state == MFCINST_RUNNING ||
+ ctx->state == MFCINST_HEAD_PRODUCED) &&
+ ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
+ return 1;
+ /* context is ready to encode remaining frames */
+ if (ctx->state == MFCINST_FINISHING &&
+ ctx->dst_queue_cnt >= 1)
+ return 1;
+ mfc_debug(2, "ctx is not ready\n");
+ return 0;
+}
+
+static void cleanup_ref_queue(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_buf *mb_entry;
+
+ /* move buffers in ref queue to src queue */
+ while (!list_empty(&ctx->ref_queue)) {
+ mb_entry = list_entry((&ctx->ref_queue)->next,
+ struct s5p_mfc_buf, list);
+ list_del(&mb_entry->list);
+ ctx->ref_queue_cnt--;
+ list_add_tail(&mb_entry->list, &ctx->src_queue);
+ ctx->src_queue_cnt++;
+ }
+ mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
+ ctx->src_queue_cnt, ctx->ref_queue_cnt);
+ INIT_LIST_HEAD(&ctx->ref_queue);
+ ctx->ref_queue_cnt = 0;
+}
+
+static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned long dst_addr;
+ unsigned int dst_size;
+
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
+ s5p_mfc_hw_call(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
+ dst_size);
+ return 0;
+}
+
+static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned int enc_pb_count;
+
+ if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
+ if (!list_empty(&ctx->dst_queue)) {
+ dst_mb = list_entry(ctx->dst_queue.next,
+ struct s5p_mfc_buf, list);
+ list_del(&dst_mb->list);
+ ctx->dst_queue_cnt--;
+ vb2_set_plane_payload(&dst_mb->b->vb2_buf, 0,
+ s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size,
+ dev));
+ vb2_buffer_done(&dst_mb->b->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ }
+ }
+
+ if (!IS_MFCV6_PLUS(dev)) {
+ ctx->state = MFCINST_RUNNING;
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ } else {
+ enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops,
+ get_enc_dpb_count, dev);
+ if (ctx->pb_count < enc_pb_count)
+ ctx->pb_count = enc_pb_count;
+ if (FW_HAS_E_MIN_SCRATCH_BUF(dev)) {
+ ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
+ get_e_min_scratch_buf_size, dev);
+ ctx->bank1.size += ctx->scratch_buf_size;
+ }
+ ctx->state = MFCINST_HEAD_PRODUCED;
+ }
+
+ return 0;
+}
+
+static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ struct s5p_mfc_buf *src_mb;
+ unsigned long src_y_addr, src_c_addr, dst_addr;
+ unsigned int dst_size;
+
+ src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
+ s5p_mfc_hw_call(dev->mfc_ops, set_enc_frame_buffer, ctx,
+ src_y_addr, src_c_addr);
+
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
+ s5p_mfc_hw_call(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
+ dst_size);
+
+ return 0;
+}
+
+static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *mb_entry;
+ unsigned long enc_y_addr = 0, enc_c_addr = 0;
+ unsigned long mb_y_addr, mb_c_addr;
+ int slice_type;
+ unsigned int strm_size;
+
+ slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
+ strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
+ mfc_debug(2, "Encoded slice type: %d\n", slice_type);
+ mfc_debug(2, "Encoded stream size: %d\n", strm_size);
+ mfc_debug(2, "Display order: %d\n",
+ mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
+ if (slice_type >= 0) {
+ s5p_mfc_hw_call(dev->mfc_ops, get_enc_frame_buffer, ctx,
+ &enc_y_addr, &enc_c_addr);
+ list_for_each_entry(mb_entry, &ctx->src_queue, list) {
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(
+ &mb_entry->b->vb2_buf, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(
+ &mb_entry->b->vb2_buf, 1);
+ if ((enc_y_addr == mb_y_addr) &&
+ (enc_c_addr == mb_c_addr)) {
+ list_del(&mb_entry->list);
+ ctx->src_queue_cnt--;
+ vb2_buffer_done(&mb_entry->b->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ break;
+ }
+ }
+ list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(
+ &mb_entry->b->vb2_buf, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(
+ &mb_entry->b->vb2_buf, 1);
+ if ((enc_y_addr == mb_y_addr) &&
+ (enc_c_addr == mb_c_addr)) {
+ list_del(&mb_entry->list);
+ ctx->ref_queue_cnt--;
+ vb2_buffer_done(&mb_entry->b->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ break;
+ }
+ }
+ }
+ if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) {
+ mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+ list);
+ if (mb_entry->flags & MFC_BUF_FLAG_USED) {
+ list_del(&mb_entry->list);
+ ctx->src_queue_cnt--;
+ list_add_tail(&mb_entry->list, &ctx->ref_queue);
+ ctx->ref_queue_cnt++;
+ }
+ }
+ mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
+ ctx->src_queue_cnt, ctx->ref_queue_cnt);
+ if ((ctx->dst_queue_cnt > 0) && (strm_size > 0)) {
+ mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
+ list);
+ list_del(&mb_entry->list);
+ ctx->dst_queue_cnt--;
+ switch (slice_type) {
+ case S5P_FIMV_ENC_SI_SLICE_TYPE_I:
+ mb_entry->b->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case S5P_FIMV_ENC_SI_SLICE_TYPE_P:
+ mb_entry->b->flags |= V4L2_BUF_FLAG_PFRAME;
+ break;
+ case S5P_FIMV_ENC_SI_SLICE_TYPE_B:
+ mb_entry->b->flags |= V4L2_BUF_FLAG_BFRAME;
+ break;
+ }
+ vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size);
+ vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
+ }
+ if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0))
+ clear_work_bit(ctx);
+
+ return 0;
+}
+
+static const struct s5p_mfc_codec_ops encoder_codec_ops = {
+ .pre_seq_start = enc_pre_seq_start,
+ .post_seq_start = enc_post_seq_start,
+ .pre_frame_start = enc_pre_frame_start,
+ .post_frame_start = enc_post_frame_start,
+};
+
+/* Query capabilities of the device */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+
+ strlcpy(cap->driver, S5P_MFC_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, dev->vfd_enc->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&dev->plat_dev->dev));
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
+ bool out)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (out && formats[i].type != MFC_FMT_RAW)
+ continue;
+ else if (!out && formats[i].type != MFC_FMT_ENC)
+ continue;
+ else if ((dev->variant->version_bit & formats[i].versions) == 0)
+ continue;
+
+ if (j == f->index) {
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name,
+ sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, f, false);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, f, true);
+}
+
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+ mfc_debug(2, "f->type = %d ctx->state = %d\n", f->type, ctx->state);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* This is run on output (encoder dest) */
+ pix_fmt_mp->width = 0;
+ pix_fmt_mp->height = 0;
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+ pix_fmt_mp->pixelformat = ctx->dst_fmt->fourcc;
+ pix_fmt_mp->num_planes = ctx->dst_fmt->num_planes;
+
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->enc_dst_buf_size;
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->enc_dst_buf_size;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* This is run on capture (encoder src) */
+ pix_fmt_mp->width = ctx->img_width;
+ pix_fmt_mp->height = ctx->img_height;
+
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+ pix_fmt_mp->pixelformat = ctx->src_fmt->fourcc;
+ pix_fmt_mp->num_planes = ctx->src_fmt->num_planes;
+
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt = find_format(f, MFC_FMT_ENC);
+ if (!fmt) {
+ mfc_err("failed to try output format\n");
+ return -EINVAL;
+ }
+ if ((dev->variant->version_bit & fmt->versions) == 0) {
+ mfc_err("Unsupported format by this MFC version.\n");
+ return -EINVAL;
+ }
+
+ pix_fmt_mp->plane_fmt[0].bytesperline =
+ pix_fmt_mp->plane_fmt[0].sizeimage;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt = find_format(f, MFC_FMT_RAW);
+ if (!fmt) {
+ mfc_err("failed to try output format\n");
+ return -EINVAL;
+ }
+ if ((dev->variant->version_bit & fmt->versions) == 0) {
+ mfc_err("Unsupported format by this MFC version.\n");
+ return -EINVAL;
+ }
+
+ v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
+ &pix_fmt_mp->height, 4, 1080, 1, 0);
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ int ret = 0;
+
+ ret = vidioc_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+ if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
+ v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* dst_fmt is validated by call to vidioc_try_fmt */
+ ctx->dst_fmt = find_format(f, MFC_FMT_ENC);
+ ctx->state = MFCINST_INIT;
+ ctx->codec_mode = ctx->dst_fmt->codec_mode;
+ ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
+ pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+ ctx->dst_bufs_cnt = 0;
+ ctx->capture_state = QUEUE_FREE;
+ ret = s5p_mfc_open_mfc_inst(dev, ctx);
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* src_fmt is validated by call to vidioc_try_fmt */
+ ctx->src_fmt = find_format(f, MFC_FMT_RAW);
+ ctx->img_width = pix_fmt_mp->width;
+ ctx->img_height = pix_fmt_mp->height;
+ mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
+ mfc_debug(2, "fmt - w: %d, h: %d, ctx - w: %d, h: %d\n",
+ pix_fmt_mp->width, pix_fmt_mp->height,
+ ctx->img_width, ctx->img_height);
+
+ s5p_mfc_hw_call(dev->mfc_ops, enc_calc_src_size, ctx);
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+
+ ctx->src_bufs_cnt = 0;
+ ctx->output_state = QUEUE_FREE;
+ } else {
+ mfc_err("invalid buf type\n");
+ ret = -EINVAL;
+ }
+out:
+ mfc_debug_leave();
+ return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+
+ /* if memory is not mmp or userptr return error */
+ if ((reqbufs->memory != V4L2_MEMORY_MMAP) &&
+ (reqbufs->memory != V4L2_MEMORY_USERPTR))
+ return -EINVAL;
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers,
+ ctx);
+ ctx->capture_state = QUEUE_FREE;
+ return ret;
+ }
+ if (ctx->capture_state != QUEUE_FREE) {
+ mfc_err("invalid capture state: %d\n",
+ ctx->capture_state);
+ return -EINVAL;
+ }
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ if (ret != 0) {
+ mfc_err("error in vb2_reqbufs() for E(D)\n");
+ return ret;
+ }
+ ctx->capture_state = QUEUE_BUFS_REQUESTED;
+
+ ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
+ alloc_codec_buffers, ctx);
+ if (ret) {
+ mfc_err("Failed to allocate encoding buffers\n");
+ reqbufs->count = 0;
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ return -ENOMEM;
+ }
+ } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers,
+ ctx);
+ ctx->output_state = QUEUE_FREE;
+ return ret;
+ }
+ if (ctx->output_state != QUEUE_FREE) {
+ mfc_err("invalid output state: %d\n",
+ ctx->output_state);
+ return -EINVAL;
+ }
+
+ if (IS_MFCV6_PLUS(dev)) {
+ /* Check for min encoder buffers */
+ if (ctx->pb_count &&
+ (reqbufs->count < ctx->pb_count)) {
+ reqbufs->count = ctx->pb_count;
+ mfc_debug(2, "Minimum %d output buffers needed\n",
+ ctx->pb_count);
+ } else {
+ ctx->pb_count = reqbufs->count;
+ }
+ }
+
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ if (ret != 0) {
+ mfc_err("error in vb2_reqbufs() for E(S)\n");
+ return ret;
+ }
+ ctx->output_state = QUEUE_BUFS_REQUESTED;
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+
+ /* if memory is not mmp or userptr return error */
+ if ((buf->memory != V4L2_MEMORY_MMAP) &&
+ (buf->memory != V4L2_MEMORY_USERPTR))
+ return -EINVAL;
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->state != MFCINST_GOT_INST) {
+ mfc_err("invalid context state: %d\n", ctx->state);
+ return -EINVAL;
+ }
+ ret = vb2_querybuf(&ctx->vq_dst, buf);
+ if (ret != 0) {
+ mfc_err("error in vb2_querybuf() for E(D)\n");
+ return ret;
+ }
+ buf->m.planes[0].m.mem_offset += DST_QUEUE_OFF_BASE;
+ } else if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = vb2_querybuf(&ctx->vq_src, buf);
+ if (ret != 0) {
+ mfc_err("error in vb2_querybuf() for E(S)\n");
+ return ret;
+ }
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+/* Queue a buffer */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on QBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (ctx->state == MFCINST_FINISHING) {
+ mfc_err("Call on QBUF after EOS command\n");
+ return -EIO;
+ }
+ return vb2_qbuf(&ctx->vq_src, buf);
+ } else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ return vb2_qbuf(&ctx->vq_dst, buf);
+ }
+ return -EINVAL;
+}
+
+/* Dequeue a buffer */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ const struct v4l2_event ev = {
+ .type = V4L2_EVENT_EOS
+ };
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err_limited("Call on DQBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
+ } else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
+ if (ret == 0 && ctx->state == MFCINST_FINISHED
+ && list_empty(&ctx->vq_dst.done_list))
+ v4l2_event_queue_fh(&ctx->fh, &ev);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Export DMA buffer */
+static int vidioc_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_expbuf(&ctx->vq_src, eb);
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_expbuf(&ctx->vq_dst, eb);
+ return -EINVAL;
+}
+
+/* Stream on */
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_streamon(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_streamon(&ctx->vq_dst, type);
+ return -EINVAL;
+}
+
+/* Stream off, which equals to a pause */
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_streamoff(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_streamoff(&ctx->vq_dst, type);
+ return -EINVAL;
+}
+
+static inline int h264_level(enum v4l2_mpeg_video_h264_level lvl)
+{
+ static unsigned int t[V4L2_MPEG_VIDEO_H264_LEVEL_4_0 + 1] = {
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_0 */ 10,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1B */ 9,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_1 */ 11,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_2 */ 12,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_3 */ 13,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_2_0 */ 20,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_2_1 */ 21,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_2_2 */ 22,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_3_0 */ 30,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_3_1 */ 31,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_3_2 */ 32,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_4_0 */ 40,
+ };
+ return t[lvl];
+}
+
+static inline int mpeg4_level(enum v4l2_mpeg_video_mpeg4_level lvl)
+{
+ static unsigned int t[V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 + 1] = {
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 */ 0,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B */ 9,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 */ 1,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 */ 2,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 */ 3,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B */ 7,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 */ 4,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 */ 5,
+ };
+ return t[lvl];
+}
+
+static inline int hevc_level(enum v4l2_mpeg_video_hevc_level lvl)
+{
+ static unsigned int t[] = {
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_1 */ 10,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_2 */ 20,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1 */ 21,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_3 */ 30,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1 */ 31,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_4 */ 40,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1 */ 41,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_5 */ 50,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1 */ 51,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2 */ 52,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_6 */ 60,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1 */ 61,
+ /* V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 */ 62,
+ };
+ return t[lvl];
+}
+
+static inline int vui_sar_idc(enum v4l2_mpeg_video_h264_vui_sar_idc sar)
+{
+ static unsigned int t[V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED + 1] = {
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED */ 0,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 */ 1,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11 */ 2,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11 */ 3,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11 */ 4,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33 */ 5,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11 */ 6,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11 */ 7,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11 */ 8,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33 */ 9,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11 */ 10,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11 */ 11,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33 */ 12,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99 */ 13,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3 */ 14,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2 */ 15,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 */ 16,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED */ 255,
+ };
+ return t[sar];
+}
+
+/*
+ * Update range of all HEVC quantization parameter controls that depend on the
+ * V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP controls.
+ */
+static void __enc_update_hevc_qp_ctrls_range(struct s5p_mfc_ctx *ctx,
+ int min, int max)
+{
+ static const int __hevc_qp_ctrls[] = {
+ V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP,
+ };
+ struct v4l2_ctrl *ctrl = NULL;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(__hevc_qp_ctrls); i++) {
+ for (j = 0; j < ARRAY_SIZE(ctx->ctrls); j++) {
+ if (ctx->ctrls[j]->id == __hevc_qp_ctrls[i]) {
+ ctrl = ctx->ctrls[j];
+ break;
+ }
+ }
+ if (WARN_ON(!ctrl))
+ break;
+
+ __v4l2_ctrl_modify_range(ctrl, min, max, ctrl->step, min);
+ }
+}
+
+static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ p->gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ p->slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ p->slice_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ p->slice_bit = ctrl->val * 8;
+ break;
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
+ p->intra_refresh_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_PADDING:
+ p->pad = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV:
+ p->pad_luma = (ctrl->val >> 16) & 0xff;
+ p->pad_cb = (ctrl->val >> 8) & 0xff;
+ p->pad_cr = (ctrl->val >> 0) & 0xff;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ p->rc_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ p->rc_bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF:
+ p->rc_reaction_coeff = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+ ctx->force_frame_type = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ ctx->force_frame_type =
+ V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
+ p->vbv_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
+ p->mv_h_range = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
+ p->mv_v_range = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+ p->codec.h264.cpb_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ p->seq_hdr_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
+ p->frame_skip_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT:
+ p->fixed_target_bit = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ p->num_b_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_MAIN;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_HIGH;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_BASELINE;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ if (IS_MFCV6_PLUS(dev))
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_CONSTRAINED_BASELINE;
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ p->codec.h264.level_v4l2 = ctrl->val;
+ p->codec.h264.level = h264_level(ctrl->val);
+ if (p->codec.h264.level < 0) {
+ mfc_err("Level number is wrong\n");
+ ret = p->codec.h264.level;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ p->codec.mpeg4.level_v4l2 = ctrl->val;
+ p->codec.mpeg4.level = mpeg4_level(ctrl->val);
+ if (p->codec.mpeg4.level < 0) {
+ mfc_err("Level number is wrong\n");
+ ret = p->codec.mpeg4.level;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ p->codec.h264.loop_filter_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ p->codec.h264.loop_filter_alpha = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ p->codec.h264.loop_filter_beta = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ p->codec.h264.entropy_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P:
+ p->codec.h264.num_ref_pic_4p = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ p->codec.h264._8x8_transform = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ p->rc_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ p->codec.h264.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ p->codec.h264.rc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ p->codec.h264.rc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ p->codec.h264.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ p->codec.h264.rc_b_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP:
+ p->codec.mpeg4.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP:
+ p->codec.mpeg4.rc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP:
+ p->codec.mpeg4.rc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:
+ p->codec.mpeg4.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:
+ p->codec.mpeg4.rc_b_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK:
+ p->codec.h264.rc_mb_dark = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH:
+ p->codec.h264.rc_mb_smooth = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC:
+ p->codec.h264.rc_mb_static = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY:
+ p->codec.h264.rc_mb_activity = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ p->codec.h264.vui_sar = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ p->codec.h264.vui_sar_idc = vui_sar_idc(ctrl->val);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH:
+ p->codec.h264.vui_ext_sar_width = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT:
+ p->codec.h264.vui_ext_sar_height = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+ p->codec.h264.open_gop = !ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ p->codec.h264.open_gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
+ p->codec.mpeg4.profile =
+ S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE;
+ break;
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
+ p->codec.mpeg4.profile =
+ S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ p->codec.mpeg4.quarter_pixel = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ p->codec.vp8.num_partitions = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4:
+ p->codec.vp8.imd_4x4 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ p->codec.vp8.num_ref = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL:
+ p->codec.vp8.filter_level = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS:
+ p->codec.vp8.filter_sharpness = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD:
+ p->codec.vp8.golden_frame_ref_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ p->codec.vp8.golden_frame_sel = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:
+ p->codec.vp8.rc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:
+ p->codec.vp8.rc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP:
+ p->codec.vp8.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:
+ p->codec.vp8.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ p->codec.vp8.profile = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+ p->codec.hevc.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
+ p->codec.hevc.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
+ p->codec.hevc.rc_b_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION:
+ p->codec.hevc.rc_framerate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
+ p->codec.hevc.rc_min_qp = ctrl->val;
+ __enc_update_hevc_qp_ctrls_range(ctx, ctrl->val,
+ p->codec.hevc.rc_max_qp);
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
+ p->codec.hevc.rc_max_qp = ctrl->val;
+ __enc_update_hevc_qp_ctrls_range(ctx, p->codec.hevc.rc_min_qp,
+ ctrl->val);
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ p->codec.hevc.level_v4l2 = ctrl->val;
+ p->codec.hevc.level = hevc_level(ctrl->val);
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ p->codec.hevc.profile =
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ p->codec.hevc.profile =
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
+ p->codec.hevc.tier = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH:
+ p->codec.hevc.max_partition_depth = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES:
+ p->codec.hevc.num_refs_for_p = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
+ p->codec.hevc.refreshtype = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED:
+ p->codec.hevc.const_intra_period_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU:
+ p->codec.hevc.lossless_cu_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT:
+ p->codec.hevc.wavefront_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ p->codec.hevc.loopfilter = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP:
+ p->codec.hevc.hier_qp_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
+ p->codec.hevc.hier_qp_type = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
+ p->codec.hevc.num_hier_layer = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP:
+ p->codec.hevc.hier_qp_layer[0] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP:
+ p->codec.hevc.hier_qp_layer[1] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP:
+ p->codec.hevc.hier_qp_layer[2] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP:
+ p->codec.hevc.hier_qp_layer[3] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP:
+ p->codec.hevc.hier_qp_layer[4] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP:
+ p->codec.hevc.hier_qp_layer[5] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP:
+ p->codec.hevc.hier_qp_layer[6] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
+ p->codec.hevc.hier_bit_layer[0] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
+ p->codec.hevc.hier_bit_layer[1] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
+ p->codec.hevc.hier_bit_layer[2] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
+ p->codec.hevc.hier_bit_layer[3] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
+ p->codec.hevc.hier_bit_layer[4] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
+ p->codec.hevc.hier_bit_layer[5] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR:
+ p->codec.hevc.hier_bit_layer[6] = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB:
+ p->codec.hevc.general_pb_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID:
+ p->codec.hevc.temporal_id_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING:
+ p->codec.hevc.strong_intra_smooth = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT:
+ p->codec.hevc.intra_pu_split_disable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION:
+ p->codec.hevc.tmv_prediction_disable = !ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1:
+ p->codec.hevc.max_num_merge_mv = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE:
+ p->codec.hevc.encoding_nostartcode_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD:
+ p->codec.hevc.refreshperiod = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2:
+ p->codec.hevc.lf_beta_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2:
+ p->codec.hevc.lf_tc_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
+ p->codec.hevc.size_of_length_field = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR:
+ p->codec.hevc.prepend_sps_pps_to_idr = ctrl->val;
+ break;
+ default:
+ v4l2_err(&dev->v4l2_dev, "Invalid control, id=%d, val=%d\n",
+ ctrl->id, ctrl->val);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int s5p_mfc_enc_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->pb_count;
+ break;
+ } else if (ctx->state != MFCINST_INIT) {
+ v4l2_err(&dev->v4l2_dev, "Encoding not initialised\n");
+ return -EINVAL;
+ }
+ /* Should wait for the header to be produced */
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->pb_count;
+ } else {
+ v4l2_err(&dev->v4l2_dev, "Encoding not initialised\n");
+ return -EINVAL;
+ }
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
+ .s_ctrl = s5p_mfc_enc_s_ctrl,
+ .g_volatile_ctrl = s5p_mfc_enc_g_v_ctrl,
+};
+
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ctx->enc_params.rc_framerate_num =
+ a->parm.output.timeperframe.denominator;
+ ctx->enc_params.rc_framerate_denom =
+ a->parm.output.timeperframe.numerator;
+ } else {
+ mfc_err("Setting FPS is only possible for the output queue\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ a->parm.output.timeperframe.denominator =
+ ctx->enc_params.rc_framerate_num;
+ a->parm.output.timeperframe.numerator =
+ ctx->enc_params.rc_framerate_denom;
+ } else {
+ mfc_err("Setting FPS is only possible for the output queue\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *cmd)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *buf;
+ unsigned long flags;
+
+ switch (cmd->cmd) {
+ case V4L2_ENC_CMD_STOP:
+ if (cmd->flags != 0)
+ return -EINVAL;
+
+ if (!ctx->vq_src.streaming)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (list_empty(&ctx->src_queue)) {
+ mfc_debug(2, "EOS: empty src queue, entering finishing state\n");
+ ctx->state = MFCINST_FINISHING;
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ } else {
+ mfc_debug(2, "EOS: marking last buffer of stream\n");
+ buf = list_entry(ctx->src_queue.prev,
+ struct s5p_mfc_buf, list);
+ if (buf->flags & MFC_BUF_FLAG_USED)
+ ctx->state = MFCINST_FINISHING;
+ else
+ buf->flags |= MFC_BUF_FLAG_EOS;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ }
+ break;
+ default:
+ return -EINVAL;
+
+ }
+ return 0;
+}
+
+static int vidioc_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_expbuf = vidioc_expbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_s_parm = vidioc_s_parm,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_encoder_cmd = vidioc_encoder_cmd,
+ .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
+{
+ int i;
+
+ if (!fmt)
+ return -EINVAL;
+ if (fmt->num_planes != vb->num_planes) {
+ mfc_err("invalid plane number for the format\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < fmt->num_planes; i++) {
+ dma_addr_t dma = vb2_dma_contig_plane_dma_addr(vb, i);
+ if (!dma) {
+ mfc_err("failed to get plane cookie\n");
+ return -EINVAL;
+ }
+ mfc_debug(2, "index: %d, plane[%d] cookie: %pad\n",
+ vb->index, i, &dma);
+ }
+ return 0;
+}
+
+static int s5p_mfc_queue_setup(struct vb2_queue *vq,
+ unsigned int *buf_count, unsigned int *plane_count,
+ unsigned int psize[], struct device *alloc_devs[])
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->state != MFCINST_GOT_INST) {
+ mfc_err("invalid state: %d\n", ctx->state);
+ return -EINVAL;
+ }
+
+ if (ctx->dst_fmt)
+ *plane_count = ctx->dst_fmt->num_planes;
+ else
+ *plane_count = MFC_ENC_CAP_PLANE_COUNT;
+ if (*buf_count < 1)
+ *buf_count = 1;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ psize[0] = ctx->enc_dst_buf_size;
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (ctx->src_fmt)
+ *plane_count = ctx->src_fmt->num_planes;
+ else
+ *plane_count = MFC_ENC_OUT_PLANE_COUNT;
+
+ if (*buf_count < 1)
+ *buf_count = 1;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+
+ psize[0] = ctx->luma_size;
+ psize[1] = ctx->chroma_size;
+
+ if (IS_MFCV6_PLUS(dev)) {
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
+ alloc_devs[1] = ctx->dev->mem_dev[BANK_L_CTX];
+ } else {
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_R_CTX];
+ alloc_devs[1] = ctx->dev->mem_dev[BANK_R_CTX];
+ }
+ } else {
+ mfc_err("invalid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ unsigned int i;
+ int ret;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = check_vb_with_fmt(ctx->dst_fmt, vb);
+ if (ret < 0)
+ return ret;
+ i = vb->index;
+ ctx->dst_bufs[i].b = vbuf;
+ ctx->dst_bufs[i].cookie.stream =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ ctx->dst_bufs_cnt++;
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = check_vb_with_fmt(ctx->src_fmt, vb);
+ if (ret < 0)
+ return ret;
+ i = vb->index;
+ ctx->src_bufs[i].b = vbuf;
+ ctx->src_bufs[i].cookie.raw.luma =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ ctx->src_bufs[i].cookie.raw.chroma =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ ctx->src_bufs_cnt++;
+ } else {
+ mfc_err("invalid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ int ret;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = check_vb_with_fmt(ctx->dst_fmt, vb);
+ if (ret < 0)
+ return ret;
+ mfc_debug(2, "plane size: %ld, dst size: %zu\n",
+ vb2_plane_size(vb, 0), ctx->enc_dst_buf_size);
+ if (vb2_plane_size(vb, 0) < ctx->enc_dst_buf_size) {
+ mfc_err("plane size is too small for capture\n");
+ return -EINVAL;
+ }
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = check_vb_with_fmt(ctx->src_fmt, vb);
+ if (ret < 0)
+ return ret;
+ mfc_debug(2, "plane size: %ld, luma size: %d\n",
+ vb2_plane_size(vb, 0), ctx->luma_size);
+ mfc_debug(2, "plane size: %ld, chroma size: %d\n",
+ vb2_plane_size(vb, 1), ctx->chroma_size);
+ if (vb2_plane_size(vb, 0) < ctx->luma_size ||
+ vb2_plane_size(vb, 1) < ctx->chroma_size) {
+ mfc_err("plane size is too small for output\n");
+ return -EINVAL;
+ }
+ } else {
+ mfc_err("invalid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if (IS_MFCV6_PLUS(dev) &&
+ (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
+
+ if ((ctx->state == MFCINST_GOT_INST) &&
+ (dev->curr_ctx == ctx->num) && dev->hw_lock) {
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_SEQ_DONE_RET,
+ 0);
+ }
+
+ if (ctx->src_bufs_cnt < ctx->pb_count) {
+ mfc_err("Need minimum %d OUTPUT buffers\n",
+ ctx->pb_count);
+ return -ENOBUFS;
+ }
+ }
+
+ /* If context is ready then dev = work->data;schedule it to run */
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+
+ return 0;
+}
+
+static void s5p_mfc_stop_streaming(struct vb2_queue *q)
+{
+ unsigned long flags;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if ((ctx->state == MFCINST_FINISHING ||
+ ctx->state == MFCINST_RUNNING) &&
+ dev->curr_ctx == ctx->num && dev->hw_lock) {
+ ctx->state = MFCINST_ABORT;
+ s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_FRAME_DONE_RET,
+ 0);
+ }
+ ctx->state = MFCINST_FINISHED;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ INIT_LIST_HEAD(&ctx->dst_queue);
+ ctx->dst_queue_cnt = 0;
+ }
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ cleanup_ref_queue(ctx);
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ INIT_LIST_HEAD(&ctx->src_queue);
+ ctx->src_queue_cnt = 0;
+ }
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *mfc_buf;
+
+ if (ctx->state == MFCINST_ERROR) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ cleanup_ref_queue(ctx);
+ return;
+ }
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ mfc_buf = &ctx->dst_bufs[vb->index];
+ mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
+ /* Mark destination as available for use by MFC */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ list_add_tail(&mfc_buf->list, &ctx->dst_queue);
+ ctx->dst_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mfc_buf = &ctx->src_bufs[vb->index];
+ mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ list_add_tail(&mfc_buf->list, &ctx->src_queue);
+ ctx->src_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else {
+ mfc_err("unsupported buffer type (%d)\n", vq->type);
+ }
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+}
+
+static struct vb2_ops s5p_mfc_enc_qops = {
+ .queue_setup = s5p_mfc_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_init = s5p_mfc_buf_init,
+ .buf_prepare = s5p_mfc_buf_prepare,
+ .start_streaming = s5p_mfc_start_streaming,
+ .stop_streaming = s5p_mfc_stop_streaming,
+ .buf_queue = s5p_mfc_buf_queue,
+};
+
+const struct s5p_mfc_codec_ops *get_enc_codec_ops(void)
+{
+ return &encoder_codec_ops;
+}
+
+struct vb2_ops *get_enc_queue_ops(void)
+{
+ return &s5p_mfc_enc_qops;
+}
+
+const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void)
+{
+ return &s5p_mfc_enc_ioctl_ops;
+}
+
+#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2WHICH(x) == V4L2_CTRL_CLASS_MPEG) \
+ && V4L2_CTRL_DRIVER_PRIV(x))
+
+int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
+{
+ struct v4l2_ctrl_config cfg;
+ int i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
+ if (ctx->ctrl_handler.error) {
+ mfc_err("v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_handler.error;
+ }
+ for (i = 0; i < NUM_CTRLS; i++) {
+ if (IS_MFC51_PRIV(controls[i].id)) {
+ memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
+ cfg.ops = &s5p_mfc_enc_ctrl_ops;
+ cfg.id = controls[i].id;
+ cfg.min = controls[i].minimum;
+ cfg.max = controls[i].maximum;
+ cfg.def = controls[i].default_value;
+ cfg.name = controls[i].name;
+ cfg.type = controls[i].type;
+ cfg.flags = 0;
+
+ if (cfg.type == V4L2_CTRL_TYPE_MENU) {
+ cfg.step = 0;
+ cfg.menu_skip_mask = controls[i].menu_skip_mask;
+ cfg.qmenu = mfc51_get_menu(cfg.id);
+ } else {
+ cfg.step = controls[i].step;
+ cfg.menu_skip_mask = 0;
+ }
+ ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &cfg, NULL);
+ } else {
+ if ((controls[i].type == V4L2_CTRL_TYPE_MENU) ||
+ (controls[i].type ==
+ V4L2_CTRL_TYPE_INTEGER_MENU)) {
+ ctx->ctrls[i] = v4l2_ctrl_new_std_menu(
+ &ctx->ctrl_handler,
+ &s5p_mfc_enc_ctrl_ops, controls[i].id,
+ controls[i].maximum, 0,
+ controls[i].default_value);
+ } else {
+ ctx->ctrls[i] = v4l2_ctrl_new_std(
+ &ctx->ctrl_handler,
+ &s5p_mfc_enc_ctrl_ops, controls[i].id,
+ controls[i].minimum,
+ controls[i].maximum, controls[i].step,
+ controls[i].default_value);
+ }
+ }
+ if (ctx->ctrl_handler.error) {
+ mfc_err("Adding control (%d) failed\n", i);
+ return ctx->ctrl_handler.error;
+ }
+ if (controls[i].is_volatile && ctx->ctrls[i])
+ ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ return 0;
+}
+
+void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx)
+{
+ int i;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ for (i = 0; i < NUM_CTRLS; i++)
+ ctx->ctrls[i] = NULL;
+}
+
+void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx)
+{
+ struct v4l2_format f;
+ f.fmt.pix_mp.pixelformat = DEF_SRC_FMT_ENC;
+ ctx->src_fmt = find_format(&f, MFC_FMT_RAW);
+ f.fmt.pix_mp.pixelformat = DEF_DST_FMT_ENC;
+ ctx->dst_fmt = find_format(&f, MFC_FMT_ENC);
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
new file mode 100644
index 000000000..d0d42f818
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
@@ -0,0 +1,24 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_ENC_H_
+#define S5P_MFC_ENC_H_
+
+const struct s5p_mfc_codec_ops *get_enc_codec_ops(void);
+struct vb2_ops *get_enc_queue_ops(void);
+const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void);
+struct s5p_mfc_fmt *get_enc_def_fmt(bool src);
+int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_ENC_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c
new file mode 100644
index 000000000..5b8f0e085
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c
@@ -0,0 +1,91 @@
+/*
+ * drivers/media/platform/samsung/mfc5/s5p_mfc_intr.c
+ *
+ * C file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains functions used to wait for command completion.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+
+int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command)
+{
+ int ret;
+
+ ret = wait_event_interruptible_timeout(dev->queue,
+ (dev->int_cond && (dev->int_type == command
+ || dev->int_type == S5P_MFC_R2H_CMD_ERR_RET)),
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+ if (ret == 0) {
+ mfc_err("Interrupt (dev->int_type:%d, command:%d) timed out\n",
+ dev->int_type, command);
+ return 1;
+ } else if (ret == -ERESTARTSYS) {
+ mfc_err("Interrupted by a signal\n");
+ return 1;
+ }
+ mfc_debug(1, "Finished waiting (dev->int_type:%d, command: %d)\n",
+ dev->int_type, command);
+ if (dev->int_type == S5P_MFC_R2H_CMD_ERR_RET)
+ return 1;
+ return 0;
+}
+
+void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev)
+{
+ dev->int_cond = 0;
+ dev->int_type = 0;
+ dev->int_err = 0;
+}
+
+int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
+ int command, int interrupt)
+{
+ int ret;
+
+ if (interrupt) {
+ ret = wait_event_interruptible_timeout(ctx->queue,
+ (ctx->int_cond && (ctx->int_type == command
+ || ctx->int_type == S5P_MFC_R2H_CMD_ERR_RET)),
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+ } else {
+ ret = wait_event_timeout(ctx->queue,
+ (ctx->int_cond && (ctx->int_type == command
+ || ctx->int_type == S5P_MFC_R2H_CMD_ERR_RET)),
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+ }
+ if (ret == 0) {
+ mfc_err("Interrupt (ctx->int_type:%d, command:%d) timed out\n",
+ ctx->int_type, command);
+ return 1;
+ } else if (ret == -ERESTARTSYS) {
+ mfc_err("Interrupted by a signal\n");
+ return 1;
+ }
+ mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n",
+ ctx->int_type, command);
+ if (ctx->int_type == S5P_MFC_R2H_CMD_ERR_RET)
+ return 1;
+ return 0;
+}
+
+void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx)
+{
+ ctx->int_cond = 0;
+ ctx->int_type = 0;
+ ctx->int_err = 0;
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.h
new file mode 100644
index 000000000..18341a885
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.h
@@ -0,0 +1,26 @@
+/*
+ * drivers/media/platform/samsung/mfc5/s5p_mfc_intr.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * It contains waiting functions declarations.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_INTR_H_
+#define S5P_MFC_INTR_H_
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
+ int command, int interrupt);
+int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command);
+void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev);
+
+#endif /* S5P_MFC_INTR_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
new file mode 100644
index 000000000..76667924e
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 Samsung Electronics Co.Ltd
+ * Authors: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef S5P_MFC_IOMMU_H_
+#define S5P_MFC_IOMMU_H_
+
+#if defined(CONFIG_EXYNOS_IOMMU)
+
+static inline bool exynos_is_iommu_available(struct device *dev)
+{
+ return dev->archdata.iommu != NULL;
+}
+
+#else
+
+static inline bool exynos_is_iommu_available(struct device *dev)
+{
+ return false;
+}
+
+#endif
+
+#endif /* S5P_MFC_IOMMU_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
new file mode 100644
index 000000000..7f33cf239
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -0,0 +1,127 @@
+/*
+ * drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+ *
+ * Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains hw related functions.
+ *
+ * Kamil Debski, Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_opr_v5.h"
+#include "s5p_mfc_opr_v6.h"
+
+static struct s5p_mfc_hw_ops *s5p_mfc_ops;
+
+void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev)
+{
+ if (IS_MFCV6_PLUS(dev)) {
+ s5p_mfc_ops = s5p_mfc_init_hw_ops_v6();
+ dev->warn_start = S5P_FIMV_ERR_WARNINGS_START_V6;
+ } else {
+ s5p_mfc_ops = s5p_mfc_init_hw_ops_v5();
+ dev->warn_start = S5P_FIMV_ERR_WARNINGS_START;
+ }
+ dev->mfc_ops = s5p_mfc_ops;
+}
+
+void s5p_mfc_init_regs(struct s5p_mfc_dev *dev)
+{
+ if (IS_MFCV6_PLUS(dev))
+ dev->mfc_regs = s5p_mfc_init_regs_v6_plus(dev);
+}
+
+int s5p_mfc_alloc_priv_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
+ struct s5p_mfc_priv_buf *b)
+{
+ unsigned int bits = dev->mem_size >> PAGE_SHIFT;
+ unsigned int count = b->size >> PAGE_SHIFT;
+ unsigned int align = (SZ_64K >> PAGE_SHIFT) - 1;
+ unsigned int start, offset;
+
+ mfc_debug(3, "Allocating priv: %zu\n", b->size);
+
+ if (dev->mem_virt) {
+ start = bitmap_find_next_zero_area(dev->mem_bitmap, bits, 0, count, align);
+ if (start > bits)
+ goto no_mem;
+
+ bitmap_set(dev->mem_bitmap, start, count);
+ offset = start << PAGE_SHIFT;
+ b->virt = dev->mem_virt + offset;
+ b->dma = dev->mem_base + offset;
+ } else {
+ struct device *mem_dev = dev->mem_dev[mem_ctx];
+ dma_addr_t base = dev->dma_base[mem_ctx];
+
+ b->ctx = mem_ctx;
+ b->virt = dma_alloc_coherent(mem_dev, b->size, &b->dma, GFP_KERNEL);
+ if (!b->virt)
+ goto no_mem;
+ if (b->dma < base) {
+ mfc_err("Invalid memory configuration - buffer (%pad) is below base memory address(%pad)\n",
+ &b->dma, &base);
+ dma_free_coherent(mem_dev, b->size, b->virt, b->dma);
+ return -ENOMEM;
+ }
+ }
+
+ mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma);
+ return 0;
+no_mem:
+ mfc_err("Allocating private buffer of size %zu failed\n", b->size);
+ return -ENOMEM;
+}
+
+int s5p_mfc_alloc_generic_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
+ struct s5p_mfc_priv_buf *b)
+{
+ struct device *mem_dev = dev->mem_dev[mem_ctx];
+
+ mfc_debug(3, "Allocating generic buf: %zu\n", b->size);
+
+ b->ctx = mem_ctx;
+ b->virt = dma_alloc_coherent(mem_dev, b->size, &b->dma, GFP_KERNEL);
+ if (!b->virt)
+ goto no_mem;
+
+ mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma);
+ return 0;
+no_mem:
+ mfc_err("Allocating generic buffer of size %zu failed\n", b->size);
+ return -ENOMEM;
+}
+
+void s5p_mfc_release_priv_buf(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_priv_buf *b)
+{
+ if (dev->mem_virt) {
+ unsigned int start = (b->dma - dev->mem_base) >> PAGE_SHIFT;
+ unsigned int count = b->size >> PAGE_SHIFT;
+
+ bitmap_clear(dev->mem_bitmap, start, count);
+ } else {
+ struct device *mem_dev = dev->mem_dev[b->ctx];
+
+ dma_free_coherent(mem_dev, b->size, b->virt, b->dma);
+ }
+ b->virt = NULL;
+ b->dma = 0;
+ b->size = 0;
+}
+
+void s5p_mfc_release_generic_buf(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_priv_buf *b)
+{
+ struct device *mem_dev = dev->mem_dev[b->ctx];
+ dma_free_coherent(mem_dev, b->size, b->virt, b->dma);
+ b->virt = NULL;
+ b->dma = 0;
+ b->size = 0;
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
new file mode 100644
index 000000000..8c295f0f9
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -0,0 +1,342 @@
+/*
+ * drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * Contains declarations of hw related functions.
+ *
+ * Kamil Debski, Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_OPR_H_
+#define S5P_MFC_OPR_H_
+
+#include "s5p_mfc_common.h"
+
+struct s5p_mfc_regs {
+
+ /* codec common registers */
+ void __iomem *risc_on;
+ void __iomem *risc2host_int;
+ void __iomem *host2risc_int;
+ void __iomem *risc_base_address;
+ void __iomem *mfc_reset;
+ void __iomem *host2risc_command;
+ void __iomem *risc2host_command;
+ void __iomem *mfc_bus_reset_ctrl;
+ void __iomem *firmware_version;
+ void __iomem *instance_id;
+ void __iomem *codec_type;
+ void __iomem *context_mem_addr;
+ void __iomem *context_mem_size;
+ void __iomem *pixel_format;
+ void __iomem *metadata_enable;
+ void __iomem *mfc_version;
+ void __iomem *dbg_info_enable;
+ void __iomem *dbg_buffer_addr;
+ void __iomem *dbg_buffer_size;
+ void __iomem *hed_control;
+ void __iomem *mfc_timeout_value;
+ void __iomem *hed_shared_mem_addr;
+ void __iomem *dis_shared_mem_addr;/* only v7 */
+ void __iomem *ret_instance_id;
+ void __iomem *error_code;
+ void __iomem *dbg_buffer_output_size;
+ void __iomem *metadata_status;
+ void __iomem *metadata_addr_mb_info;
+ void __iomem *metadata_size_mb_info;
+ void __iomem *dbg_info_stage_counter;
+
+ /* decoder registers */
+ void __iomem *d_crc_ctrl;
+ void __iomem *d_dec_options;
+ void __iomem *d_display_delay;
+ void __iomem *d_set_frame_width;
+ void __iomem *d_set_frame_height;
+ void __iomem *d_sei_enable;
+ void __iomem *d_min_num_dpb;
+ void __iomem *d_min_first_plane_dpb_size;
+ void __iomem *d_min_second_plane_dpb_size;
+ void __iomem *d_min_third_plane_dpb_size;/* only v8 */
+ void __iomem *d_min_num_mv;
+ void __iomem *d_mvc_num_views;
+ void __iomem *d_min_num_dis;/* only v7 */
+ void __iomem *d_min_first_dis_size;/* only v7 */
+ void __iomem *d_min_second_dis_size;/* only v7 */
+ void __iomem *d_min_third_dis_size;/* only v7 */
+ void __iomem *d_post_filter_luma_dpb0;/* v7 and v8 */
+ void __iomem *d_post_filter_luma_dpb1;/* v7 and v8 */
+ void __iomem *d_post_filter_luma_dpb2;/* only v7 */
+ void __iomem *d_post_filter_chroma_dpb0;/* v7 and v8 */
+ void __iomem *d_post_filter_chroma_dpb1;/* v7 and v8 */
+ void __iomem *d_post_filter_chroma_dpb2;/* only v7 */
+ void __iomem *d_num_dpb;
+ void __iomem *d_num_mv;
+ void __iomem *d_init_buffer_options;
+ void __iomem *d_first_plane_dpb_stride_size;/* only v8 */
+ void __iomem *d_second_plane_dpb_stride_size;/* only v8 */
+ void __iomem *d_third_plane_dpb_stride_size;/* only v8 */
+ void __iomem *d_first_plane_dpb_size;
+ void __iomem *d_second_plane_dpb_size;
+ void __iomem *d_third_plane_dpb_size;/* only v8 */
+ void __iomem *d_mv_buffer_size;
+ void __iomem *d_first_plane_dpb;
+ void __iomem *d_second_plane_dpb;
+ void __iomem *d_third_plane_dpb;
+ void __iomem *d_mv_buffer;
+ void __iomem *d_scratch_buffer_addr;
+ void __iomem *d_scratch_buffer_size;
+ void __iomem *d_metadata_buffer_addr;
+ void __iomem *d_metadata_buffer_size;
+ void __iomem *d_nal_start_options;/* v7 and v8 */
+ void __iomem *d_cpb_buffer_addr;
+ void __iomem *d_cpb_buffer_size;
+ void __iomem *d_available_dpb_flag_upper;
+ void __iomem *d_available_dpb_flag_lower;
+ void __iomem *d_cpb_buffer_offset;
+ void __iomem *d_slice_if_enable;
+ void __iomem *d_picture_tag;
+ void __iomem *d_stream_data_size;
+ void __iomem *d_dynamic_dpb_flag_upper;/* v7 and v8 */
+ void __iomem *d_dynamic_dpb_flag_lower;/* v7 and v8 */
+ void __iomem *d_display_frame_width;
+ void __iomem *d_display_frame_height;
+ void __iomem *d_display_status;
+ void __iomem *d_display_first_plane_addr;
+ void __iomem *d_display_second_plane_addr;
+ void __iomem *d_display_third_plane_addr;/* only v8 */
+ void __iomem *d_display_frame_type;
+ void __iomem *d_display_crop_info1;
+ void __iomem *d_display_crop_info2;
+ void __iomem *d_display_picture_profile;
+ void __iomem *d_display_luma_crc;/* v7 and v8 */
+ void __iomem *d_display_chroma0_crc;/* v7 and v8 */
+ void __iomem *d_display_chroma1_crc;/* only v8 */
+ void __iomem *d_display_luma_crc_top;/* only v6 */
+ void __iomem *d_display_chroma_crc_top;/* only v6 */
+ void __iomem *d_display_luma_crc_bot;/* only v6 */
+ void __iomem *d_display_chroma_crc_bot;/* only v6 */
+ void __iomem *d_display_aspect_ratio;
+ void __iomem *d_display_extended_ar;
+ void __iomem *d_decoded_frame_width;
+ void __iomem *d_decoded_frame_height;
+ void __iomem *d_decoded_status;
+ void __iomem *d_decoded_first_plane_addr;
+ void __iomem *d_decoded_second_plane_addr;
+ void __iomem *d_decoded_third_plane_addr;/* only v8 */
+ void __iomem *d_decoded_frame_type;
+ void __iomem *d_decoded_crop_info1;
+ void __iomem *d_decoded_crop_info2;
+ void __iomem *d_decoded_picture_profile;
+ void __iomem *d_decoded_nal_size;
+ void __iomem *d_decoded_luma_crc;
+ void __iomem *d_decoded_chroma0_crc;
+ void __iomem *d_decoded_chroma1_crc;/* only v8 */
+ void __iomem *d_ret_picture_tag_top;
+ void __iomem *d_ret_picture_tag_bot;
+ void __iomem *d_ret_picture_time_top;
+ void __iomem *d_ret_picture_time_bot;
+ void __iomem *d_chroma_format;
+ void __iomem *d_vc1_info;/* v7 and v8 */
+ void __iomem *d_mpeg4_info;
+ void __iomem *d_h264_info;
+ void __iomem *d_metadata_addr_concealed_mb;
+ void __iomem *d_metadata_size_concealed_mb;
+ void __iomem *d_metadata_addr_vc1_param;
+ void __iomem *d_metadata_size_vc1_param;
+ void __iomem *d_metadata_addr_sei_nal;
+ void __iomem *d_metadata_size_sei_nal;
+ void __iomem *d_metadata_addr_vui;
+ void __iomem *d_metadata_size_vui;
+ void __iomem *d_metadata_addr_mvcvui;/* v7 and v8 */
+ void __iomem *d_metadata_size_mvcvui;/* v7 and v8 */
+ void __iomem *d_mvc_view_id;
+ void __iomem *d_frame_pack_sei_avail;
+ void __iomem *d_frame_pack_arrgment_id;
+ void __iomem *d_frame_pack_sei_info;
+ void __iomem *d_frame_pack_grid_pos;
+ void __iomem *d_display_recovery_sei_info;/* v7 and v8 */
+ void __iomem *d_decoded_recovery_sei_info;/* v7 and v8 */
+ void __iomem *d_display_first_addr;/* only v7 */
+ void __iomem *d_display_second_addr;/* only v7 */
+ void __iomem *d_display_third_addr;/* only v7 */
+ void __iomem *d_decoded_first_addr;/* only v7 */
+ void __iomem *d_decoded_second_addr;/* only v7 */
+ void __iomem *d_decoded_third_addr;/* only v7 */
+ void __iomem *d_used_dpb_flag_upper;/* v7 and v8 */
+ void __iomem *d_used_dpb_flag_lower;/* v7 and v8 */
+ void __iomem *d_min_scratch_buffer_size; /* v10 */
+ void __iomem *d_static_buffer_addr; /* v10 */
+ void __iomem *d_static_buffer_size; /* v10 */
+
+ /* encoder registers */
+ void __iomem *e_frame_width;
+ void __iomem *e_frame_height;
+ void __iomem *e_cropped_frame_width;
+ void __iomem *e_cropped_frame_height;
+ void __iomem *e_frame_crop_offset;
+ void __iomem *e_enc_options;
+ void __iomem *e_picture_profile;
+ void __iomem *e_vbv_buffer_size;
+ void __iomem *e_vbv_init_delay;
+ void __iomem *e_fixed_picture_qp;
+ void __iomem *e_rc_config;
+ void __iomem *e_rc_qp_bound;
+ void __iomem *e_rc_qp_bound_pb;/* v7 and v8 */
+ void __iomem *e_rc_mode;
+ void __iomem *e_mb_rc_config;
+ void __iomem *e_padding_ctrl;
+ void __iomem *e_air_threshold;
+ void __iomem *e_mv_hor_range;
+ void __iomem *e_mv_ver_range;
+ void __iomem *e_num_dpb;
+ void __iomem *e_luma_dpb;
+ void __iomem *e_chroma_dpb;
+ void __iomem *e_me_buffer;
+ void __iomem *e_scratch_buffer_addr;
+ void __iomem *e_scratch_buffer_size;
+ void __iomem *e_tmv_buffer0;
+ void __iomem *e_tmv_buffer1;
+ void __iomem *e_ir_buffer_addr;/* v7 and v8 */
+ void __iomem *e_source_first_plane_addr;
+ void __iomem *e_source_second_plane_addr;
+ void __iomem *e_source_third_plane_addr;/* v7 and v8 */
+ void __iomem *e_source_first_plane_stride;/* v7 and v8 */
+ void __iomem *e_source_second_plane_stride;/* v7 and v8 */
+ void __iomem *e_source_third_plane_stride;/* v7 and v8 */
+ void __iomem *e_stream_buffer_addr;
+ void __iomem *e_stream_buffer_size;
+ void __iomem *e_roi_buffer_addr;
+ void __iomem *e_param_change;
+ void __iomem *e_ir_size;
+ void __iomem *e_gop_config;
+ void __iomem *e_mslice_mode;
+ void __iomem *e_mslice_size_mb;
+ void __iomem *e_mslice_size_bits;
+ void __iomem *e_frame_insertion;
+ void __iomem *e_rc_frame_rate;
+ void __iomem *e_rc_bit_rate;
+ void __iomem *e_rc_roi_ctrl;
+ void __iomem *e_picture_tag;
+ void __iomem *e_bit_count_enable;
+ void __iomem *e_max_bit_count;
+ void __iomem *e_min_bit_count;
+ void __iomem *e_metadata_buffer_addr;
+ void __iomem *e_metadata_buffer_size;
+ void __iomem *e_encoded_source_first_plane_addr;
+ void __iomem *e_encoded_source_second_plane_addr;
+ void __iomem *e_encoded_source_third_plane_addr;/* v7 and v8 */
+ void __iomem *e_stream_size;
+ void __iomem *e_slice_type;
+ void __iomem *e_picture_count;
+ void __iomem *e_ret_picture_tag;
+ void __iomem *e_stream_buffer_write_pointer; /* only v6 */
+ void __iomem *e_recon_luma_dpb_addr;
+ void __iomem *e_recon_chroma_dpb_addr;
+ void __iomem *e_metadata_addr_enc_slice;
+ void __iomem *e_metadata_size_enc_slice;
+ void __iomem *e_mpeg4_options;
+ void __iomem *e_mpeg4_hec_period;
+ void __iomem *e_aspect_ratio;
+ void __iomem *e_extended_sar;
+ void __iomem *e_h264_options;
+ void __iomem *e_h264_options_2;/* v7 and v8 */
+ void __iomem *e_h264_lf_alpha_offset;
+ void __iomem *e_h264_lf_beta_offset;
+ void __iomem *e_h264_i_period;
+ void __iomem *e_h264_fmo_slice_grp_map_type;
+ void __iomem *e_h264_fmo_num_slice_grp_minus1;
+ void __iomem *e_h264_fmo_slice_grp_change_dir;
+ void __iomem *e_h264_fmo_slice_grp_change_rate_minus1;
+ void __iomem *e_h264_fmo_run_length_minus1_0;
+ void __iomem *e_h264_aso_slice_order_0;
+ void __iomem *e_h264_chroma_qp_offset;
+ void __iomem *e_h264_num_t_layer;
+ void __iomem *e_h264_hierarchical_qp_layer0;
+ void __iomem *e_h264_frame_packing_sei_info;
+ void __iomem *e_h264_nal_control;/* v7 and v8 */
+ void __iomem *e_mvc_frame_qp_view1;
+ void __iomem *e_mvc_rc_bit_rate_view1;
+ void __iomem *e_mvc_rc_qbound_view1;
+ void __iomem *e_mvc_rc_mode_view1;
+ void __iomem *e_mvc_inter_view_prediction_on;
+ void __iomem *e_vp8_options;/* v7 and v8 */
+ void __iomem *e_vp8_filter_options;/* v7 and v8 */
+ void __iomem *e_vp8_golden_frame_option;/* v7 and v8 */
+ void __iomem *e_vp8_num_t_layer;/* v7 and v8 */
+ void __iomem *e_vp8_hierarchical_qp_layer0;/* v7 and v8 */
+ void __iomem *e_vp8_hierarchical_qp_layer1;/* v7 and v8 */
+ void __iomem *e_vp8_hierarchical_qp_layer2;/* v7 and v8 */
+ void __iomem *e_min_scratch_buffer_size; /* v10 */
+ void __iomem *e_num_t_layer; /* v10 */
+ void __iomem *e_hier_qp_layer0; /* v10 */
+ void __iomem *e_hier_bit_rate_layer0; /* v10 */
+ void __iomem *e_hevc_options; /* v10 */
+ void __iomem *e_hevc_refresh_period; /* v10 */
+ void __iomem *e_hevc_lf_beta_offset_div2; /* v10 */
+ void __iomem *e_hevc_lf_tc_offset_div2; /* v10 */
+ void __iomem *e_hevc_nal_control; /* v10 */
+};
+
+struct s5p_mfc_hw_ops {
+ int (*alloc_dec_temp_buffers)(struct s5p_mfc_ctx *ctx);
+ void (*release_dec_desc_buffer)(struct s5p_mfc_ctx *ctx);
+ int (*alloc_codec_buffers)(struct s5p_mfc_ctx *ctx);
+ void (*release_codec_buffers)(struct s5p_mfc_ctx *ctx);
+ int (*alloc_instance_buffer)(struct s5p_mfc_ctx *ctx);
+ void (*release_instance_buffer)(struct s5p_mfc_ctx *ctx);
+ int (*alloc_dev_context_buffer)(struct s5p_mfc_dev *dev);
+ void (*release_dev_context_buffer)(struct s5p_mfc_dev *dev);
+ void (*dec_calc_dpb_size)(struct s5p_mfc_ctx *ctx);
+ void (*enc_calc_src_size)(struct s5p_mfc_ctx *ctx);
+ int (*set_enc_stream_buffer)(struct s5p_mfc_ctx *ctx,
+ unsigned long addr, unsigned int size);
+ void (*set_enc_frame_buffer)(struct s5p_mfc_ctx *ctx,
+ unsigned long y_addr, unsigned long c_addr);
+ void (*get_enc_frame_buffer)(struct s5p_mfc_ctx *ctx,
+ unsigned long *y_addr, unsigned long *c_addr);
+ void (*try_run)(struct s5p_mfc_dev *dev);
+ void (*clear_int_flags)(struct s5p_mfc_dev *dev);
+ int (*get_dspl_y_adr)(struct s5p_mfc_dev *dev);
+ int (*get_dec_y_adr)(struct s5p_mfc_dev *dev);
+ int (*get_dspl_status)(struct s5p_mfc_dev *dev);
+ int (*get_dec_status)(struct s5p_mfc_dev *dev);
+ int (*get_dec_frame_type)(struct s5p_mfc_dev *dev);
+ int (*get_disp_frame_type)(struct s5p_mfc_ctx *ctx);
+ int (*get_consumed_stream)(struct s5p_mfc_dev *dev);
+ int (*get_int_reason)(struct s5p_mfc_dev *dev);
+ int (*get_int_err)(struct s5p_mfc_dev *dev);
+ int (*err_dec)(unsigned int err);
+ int (*get_img_width)(struct s5p_mfc_dev *dev);
+ int (*get_img_height)(struct s5p_mfc_dev *dev);
+ int (*get_dpb_count)(struct s5p_mfc_dev *dev);
+ int (*get_mv_count)(struct s5p_mfc_dev *dev);
+ int (*get_inst_no)(struct s5p_mfc_dev *dev);
+ int (*get_enc_strm_size)(struct s5p_mfc_dev *dev);
+ int (*get_enc_slice_type)(struct s5p_mfc_dev *dev);
+ int (*get_enc_dpb_count)(struct s5p_mfc_dev *dev);
+ unsigned int (*get_pic_type_top)(struct s5p_mfc_ctx *ctx);
+ unsigned int (*get_pic_type_bot)(struct s5p_mfc_ctx *ctx);
+ unsigned int (*get_crop_info_h)(struct s5p_mfc_ctx *ctx);
+ unsigned int (*get_crop_info_v)(struct s5p_mfc_ctx *ctx);
+ int (*get_min_scratch_buf_size)(struct s5p_mfc_dev *dev);
+ int (*get_e_min_scratch_buf_size)(struct s5p_mfc_dev *dev);
+};
+
+void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev);
+void s5p_mfc_init_regs(struct s5p_mfc_dev *dev);
+int s5p_mfc_alloc_priv_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
+ struct s5p_mfc_priv_buf *b);
+void s5p_mfc_release_priv_buf(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_priv_buf *b);
+int s5p_mfc_alloc_generic_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
+ struct s5p_mfc_priv_buf *b);
+void s5p_mfc_release_generic_buf(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_priv_buf *b);
+
+
+#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
new file mode 100644
index 000000000..091388121
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -0,0 +1,1640 @@
+/*
+ * drivers/media/platform/samsung/mfc5/s5p_mfc_opr_v5.c
+ *
+ * Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains hw related functions.
+ *
+ * Kamil Debski, Copyright (c) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_opr_v5.h"
+#include <asm/cacheflush.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define OFFSETA(x) (((x) - dev->dma_base[BANK_L_CTX]) >> MFC_OFFSET_SHIFT)
+#define OFFSETB(x) (((x) - dev->dma_base[BANK_R_CTX]) >> MFC_OFFSET_SHIFT)
+
+/* Allocate temporary buffers for decoding */
+static int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+ int ret;
+
+ ctx->dsc.size = buf_size->dsc;
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->dsc);
+ if (ret) {
+ mfc_err("Failed to allocate temporary buffer\n");
+ return ret;
+ }
+
+ BUG_ON(ctx->dsc.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ memset(ctx->dsc.virt, 0, ctx->dsc.size);
+ wmb();
+ return 0;
+}
+
+
+/* Release temporary buffers for decoding */
+static void s5p_mfc_release_dec_desc_buffer_v5(struct s5p_mfc_ctx *ctx)
+{
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->dsc);
+}
+
+/* Allocate codec buffers */
+static int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int enc_ref_y_size = 0;
+ unsigned int enc_ref_c_size = 0;
+ unsigned int guard_width, guard_height;
+ int ret;
+
+ if (ctx->type == MFCINST_DECODER) {
+ mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
+ ctx->luma_size, ctx->chroma_size, ctx->mv_size);
+ mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
+ } else if (ctx->type == MFCINST_ENCODER) {
+ enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
+
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) {
+ enc_ref_c_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height >> 1,
+ S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(enc_ref_c_size,
+ S5P_FIMV_NV12MT_SALIGN);
+ } else {
+ guard_width = ALIGN(ctx->img_width + 16,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN((ctx->img_height >> 1) + 4,
+ S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(guard_width * guard_height,
+ S5P_FIMV_NV12MT_SALIGN);
+ }
+ mfc_debug(2, "recon luma size: %d chroma size: %d\n",
+ enc_ref_y_size, enc_ref_c_size);
+ } else {
+ return -EINVAL;
+ }
+ /* Codecs have different memory requirements */
+ switch (ctx->codec_mode) {
+ case S5P_MFC_CODEC_H264_DEC:
+ ctx->bank1.size =
+ ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
+ S5P_FIMV_DEC_VERT_NB_MV_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2.size = ctx->total_dpb_count * ctx->mv_size;
+ break;
+ case S5P_MFC_CODEC_MPEG4_DEC:
+ ctx->bank1.size =
+ ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
+ S5P_FIMV_DEC_UPNB_MV_SIZE +
+ S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+ S5P_FIMV_DEC_STX_PARSER_SIZE +
+ S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2.size = 0;
+ break;
+ case S5P_MFC_CODEC_VC1RCV_DEC:
+ case S5P_MFC_CODEC_VC1_DEC:
+ ctx->bank1.size =
+ ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
+ S5P_FIMV_DEC_UPNB_MV_SIZE +
+ S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+ S5P_FIMV_DEC_NB_DCAC_SIZE +
+ 3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2.size = 0;
+ break;
+ case S5P_MFC_CODEC_MPEG2_DEC:
+ ctx->bank1.size = 0;
+ ctx->bank2.size = 0;
+ break;
+ case S5P_MFC_CODEC_H263_DEC:
+ ctx->bank1.size =
+ ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
+ S5P_FIMV_DEC_UPNB_MV_SIZE +
+ S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+ S5P_FIMV_DEC_NB_DCAC_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2.size = 0;
+ break;
+ case S5P_MFC_CODEC_H264_ENC:
+ ctx->bank1.size = (enc_ref_y_size * 2) +
+ S5P_FIMV_ENC_UPMV_SIZE +
+ S5P_FIMV_ENC_COLFLG_SIZE +
+ S5P_FIMV_ENC_INTRAMD_SIZE +
+ S5P_FIMV_ENC_NBORINFO_SIZE;
+ ctx->bank2.size = (enc_ref_y_size * 2) +
+ (enc_ref_c_size * 4) +
+ S5P_FIMV_ENC_INTRAPRED_SIZE;
+ break;
+ case S5P_MFC_CODEC_MPEG4_ENC:
+ ctx->bank1.size = (enc_ref_y_size * 2) +
+ S5P_FIMV_ENC_UPMV_SIZE +
+ S5P_FIMV_ENC_COLFLG_SIZE +
+ S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ ctx->bank2.size = (enc_ref_y_size * 2) +
+ (enc_ref_c_size * 4);
+ break;
+ case S5P_MFC_CODEC_H263_ENC:
+ ctx->bank1.size = (enc_ref_y_size * 2) +
+ S5P_FIMV_ENC_UPMV_SIZE +
+ S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ ctx->bank2.size = (enc_ref_y_size * 2) +
+ (enc_ref_c_size * 4);
+ break;
+ default:
+ break;
+ }
+ /* Allocate only if memory from bank 1 is necessary */
+ if (ctx->bank1.size > 0) {
+
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->bank1);
+ if (ret) {
+ mfc_err("Failed to allocate Bank1 temporary buffer\n");
+ return ret;
+ }
+ BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ }
+ /* Allocate only if memory from bank 2 is necessary */
+ if (ctx->bank2.size > 0) {
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_R_CTX, &ctx->bank2);
+ if (ret) {
+ mfc_err("Failed to allocate Bank2 temporary buffer\n");
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->bank1);
+ return ret;
+ }
+ BUG_ON(ctx->bank2.dma & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
+ }
+ return 0;
+}
+
+/* Release buffers allocated for codec */
+static void s5p_mfc_release_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
+{
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->bank1);
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->bank2);
+}
+
+/* Allocate memory for instance data buffer */
+static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+ int ret;
+
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
+ ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
+ ctx->ctx.size = buf_size->h264_ctx;
+ else
+ ctx->ctx.size = buf_size->non_h264_ctx;
+
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->ctx);
+ if (ret) {
+ mfc_err("Failed to allocate instance buffer\n");
+ return ret;
+ }
+ ctx->ctx.ofs = OFFSETA(ctx->ctx.dma);
+
+ /* Zero content of the allocated memory */
+ memset(ctx->ctx.virt, 0, ctx->ctx.size);
+ wmb();
+
+ /* Initialize shared memory */
+ ctx->shm.size = buf_size->shm;
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->shm);
+ if (ret) {
+ mfc_err("Failed to allocate shared memory buffer\n");
+ s5p_mfc_release_priv_buf(dev, &ctx->ctx);
+ return ret;
+ }
+
+ /* shared memory offset only keeps the offset from base (port a) */
+ ctx->shm.ofs = ctx->shm.dma - dev->dma_base[BANK_L_CTX];
+ BUG_ON(ctx->shm.ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+
+ memset(ctx->shm.virt, 0, buf_size->shm);
+ wmb();
+ return 0;
+}
+
+/* Release instance buffer */
+static void s5p_mfc_release_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
+{
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->ctx);
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->shm);
+}
+
+static int s5p_mfc_alloc_dev_context_buffer_v5(struct s5p_mfc_dev *dev)
+{
+ /* NOP */
+
+ return 0;
+}
+
+static void s5p_mfc_release_dev_context_buffer_v5(struct s5p_mfc_dev *dev)
+{
+ /* NOP */
+}
+
+static void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data,
+ unsigned int ofs)
+{
+ *(u32 *)(ctx->shm.virt + ofs) = data;
+ wmb();
+}
+
+static unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx,
+ unsigned long ofs)
+{
+ rmb();
+ return *(u32 *)(ctx->shm.virt + ofs);
+}
+
+static void s5p_mfc_dec_calc_dpb_size_v5(struct s5p_mfc_ctx *ctx)
+{
+ unsigned int guard_width, guard_height;
+
+ ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN);
+ ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+ mfc_debug(2,
+ "SEQ Done: Movie dimensions %dx%d, buffer dimensions: %dx%d\n",
+ ctx->img_width, ctx->img_height, ctx->buf_width,
+ ctx->buf_height);
+
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) {
+ ctx->luma_size = ALIGN(ctx->buf_width * ctx->buf_height,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->chroma_size = ALIGN(ctx->buf_width *
+ ALIGN((ctx->img_height >> 1),
+ S5P_FIMV_NV12MT_VALIGN),
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->mv_size = ALIGN(ctx->buf_width *
+ ALIGN((ctx->buf_height >> 2),
+ S5P_FIMV_NV12MT_VALIGN),
+ S5P_FIMV_DEC_BUF_ALIGN);
+ } else {
+ guard_width =
+ ALIGN(ctx->img_width + 24, S5P_FIMV_NV12MT_HALIGN);
+ guard_height =
+ ALIGN(ctx->img_height + 16, S5P_FIMV_NV12MT_VALIGN);
+ ctx->luma_size = ALIGN(guard_width * guard_height,
+ S5P_FIMV_DEC_BUF_ALIGN);
+
+ guard_width =
+ ALIGN(ctx->img_width + 16, S5P_FIMV_NV12MT_HALIGN);
+ guard_height =
+ ALIGN((ctx->img_height >> 1) + 4,
+ S5P_FIMV_NV12MT_VALIGN);
+ ctx->chroma_size = ALIGN(guard_width * guard_height,
+ S5P_FIMV_DEC_BUF_ALIGN);
+
+ ctx->mv_size = 0;
+ }
+}
+
+static void s5p_mfc_enc_calc_src_size_v5(struct s5p_mfc_ctx *ctx)
+{
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
+ ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN);
+
+ ctx->luma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN)
+ * ALIGN(ctx->img_height, S5P_FIMV_NV12M_LVALIGN);
+ ctx->chroma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN)
+ * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12M_CVALIGN);
+
+ ctx->luma_size = ALIGN(ctx->luma_size, S5P_FIMV_NV12M_SALIGN);
+ ctx->chroma_size =
+ ALIGN(ctx->chroma_size, S5P_FIMV_NV12M_SALIGN);
+ } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
+ ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN);
+
+ ctx->luma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+ ctx->chroma_size =
+ ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
+
+ ctx->luma_size = ALIGN(ctx->luma_size, S5P_FIMV_NV12MT_SALIGN);
+ ctx->chroma_size =
+ ALIGN(ctx->chroma_size, S5P_FIMV_NV12MT_SALIGN);
+ }
+}
+
+/* Set registers for decoding temporary buffers */
+static void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+
+ mfc_write(dev, OFFSETA(ctx->dsc.dma), S5P_FIMV_SI_CH0_DESC_ADR);
+ mfc_write(dev, buf_size->dsc, S5P_FIMV_SI_CH0_DESC_SIZE);
+}
+
+/* Set registers for shared buffer */
+static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ mfc_write(dev, ctx->shm.ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
+}
+
+/* Set registers for decoding stream buffer */
+static int s5p_mfc_set_dec_stream_buffer_v5(struct s5p_mfc_ctx *ctx,
+ int buf_addr, unsigned int start_num_byte,
+ unsigned int buf_size)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR);
+ mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE);
+ mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE);
+ s5p_mfc_write_info_v5(ctx, start_num_byte, START_BYTE_NUM);
+ return 0;
+}
+
+/* Set decoding frame buffer */
+static int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx)
+{
+ unsigned int frame_size_lu, i;
+ unsigned int frame_size_ch, frame_size_mv;
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int dpb;
+ size_t buf_addr1, buf_addr2;
+ int buf_size1, buf_size2;
+
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
+ buf_addr2 = ctx->bank2.dma;
+ buf_size2 = ctx->bank2.size;
+ dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
+ ~S5P_FIMV_DPB_COUNT_MASK;
+ mfc_write(dev, ctx->total_dpb_count | dpb,
+ S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+ s5p_mfc_set_shared_buffer(ctx);
+ switch (ctx->codec_mode) {
+ case S5P_MFC_CODEC_H264_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_VERT_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE;
+ break;
+ case S5P_MFC_CODEC_MPEG4_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR);
+ buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR);
+ buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ break;
+ case S5P_MFC_CODEC_H263_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR);
+ buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+ break;
+ case S5P_MFC_CODEC_VC1_DEC:
+ case S5P_MFC_CODEC_VC1RCV_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR);
+ buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ break;
+ case S5P_MFC_CODEC_MPEG2_DEC:
+ break;
+ default:
+ mfc_err("Unknown codec for decoding (%x)\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ }
+ frame_size_lu = ctx->luma_size;
+ frame_size_ch = ctx->chroma_size;
+ frame_size_mv = ctx->mv_size;
+ mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size_lu, frame_size_ch,
+ frame_size_mv);
+ for (i = 0; i < ctx->total_dpb_count; i++) {
+ /* Bank2 */
+ mfc_debug(2, "Luma %d: %zx\n", i,
+ ctx->dst_bufs[i].cookie.raw.luma);
+ mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
+ S5P_FIMV_DEC_LUMA_ADR + i * 4);
+ mfc_debug(2, "\tChroma %d: %zx\n", i,
+ ctx->dst_bufs[i].cookie.raw.chroma);
+ mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
+ S5P_FIMV_DEC_CHROMA_ADR + i * 4);
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) {
+ mfc_debug(2, "\tBuf2: %zx, size: %d\n",
+ buf_addr2, buf_size2);
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_H264_MV_ADR + i * 4);
+ buf_addr2 += frame_size_mv;
+ buf_size2 -= frame_size_mv;
+ }
+ }
+ mfc_debug(2, "Buf1: %zu, buf_size1: %d\n", buf_addr1, buf_size1);
+ mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
+ buf_size1, buf_size2, ctx->total_dpb_count);
+ if (buf_size1 < 0 || buf_size2 < 0) {
+ mfc_debug(2, "Not enough memory has been allocated\n");
+ return -ENOMEM;
+ }
+ s5p_mfc_write_info_v5(ctx, frame_size_lu, ALLOC_LUMA_DPB_SIZE);
+ s5p_mfc_write_info_v5(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC)
+ s5p_mfc_write_info_v5(ctx, frame_size_mv, ALLOC_MV_SIZE);
+ mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK)
+ << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
+ S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+/* Set registers for encoding stream buffer */
+static int s5p_mfc_set_enc_stream_buffer_v5(struct s5p_mfc_ctx *ctx,
+ unsigned long addr, unsigned int size)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR);
+ mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE);
+ return 0;
+}
+
+static void s5p_mfc_set_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx,
+ unsigned long y_addr, unsigned long c_addr)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR);
+ mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR);
+}
+
+static void s5p_mfc_get_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx,
+ unsigned long *y_addr, unsigned long *c_addr)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ *y_addr = dev->dma_base[BANK_R_CTX] +
+ (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR) << MFC_OFFSET_SHIFT);
+ *c_addr = dev->dma_base[BANK_R_CTX] +
+ (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR) << MFC_OFFSET_SHIFT);
+}
+
+/* Set encoding ref & codec buffer */
+static int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ size_t buf_addr1, buf_addr2;
+ size_t buf_size1, buf_size2;
+ unsigned int enc_ref_y_size, enc_ref_c_size;
+ unsigned int guard_width, guard_height;
+ int i;
+
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
+ buf_addr2 = ctx->bank2.dma;
+ buf_size2 = ctx->bank2.size;
+ enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) {
+ enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN);
+ } else {
+ guard_width = ALIGN(ctx->img_width + 16,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN((ctx->img_height >> 1) + 4,
+ S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(guard_width * guard_height,
+ S5P_FIMV_NV12MT_SALIGN);
+ }
+ mfc_debug(2, "buf_size1: %zu, buf_size2: %zu\n", buf_size1, buf_size2);
+ switch (ctx->codec_mode) {
+ case S5P_MFC_CODEC_H264_ENC:
+ for (i = 0; i < 2; i++) {
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+ buf_addr1 += enc_ref_y_size;
+ buf_size1 -= enc_ref_y_size;
+
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_y_size;
+ buf_size2 -= enc_ref_y_size;
+ }
+ for (i = 0; i < 4; i++) {
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_c_size;
+ buf_size2 -= enc_ref_c_size;
+ }
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR);
+ buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_COZERO_FLAG_ADR);
+ buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_UP_INTRA_MD_ADR);
+ buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE;
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_H264_UP_INTRA_PRED_ADR);
+ buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE;
+ buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_NBOR_INFO_ADR);
+ buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
+ mfc_debug(2, "buf_size1: %zu, buf_size2: %zu\n",
+ buf_size1, buf_size2);
+ break;
+ case S5P_MFC_CODEC_MPEG4_ENC:
+ for (i = 0; i < 2; i++) {
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+ buf_addr1 += enc_ref_y_size;
+ buf_size1 -= enc_ref_y_size;
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_y_size;
+ buf_size2 -= enc_ref_y_size;
+ }
+ for (i = 0; i < 4; i++) {
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_c_size;
+ buf_size2 -= enc_ref_c_size;
+ }
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR);
+ buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_MPEG4_COZERO_FLAG_ADR);
+ buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_MPEG4_ACDC_COEF_ADR);
+ buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ mfc_debug(2, "buf_size1: %zu, buf_size2: %zu\n",
+ buf_size1, buf_size2);
+ break;
+ case S5P_MFC_CODEC_H263_ENC:
+ for (i = 0; i < 2; i++) {
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+ buf_addr1 += enc_ref_y_size;
+ buf_size1 -= enc_ref_y_size;
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_y_size;
+ buf_size2 -= enc_ref_y_size;
+ }
+ for (i = 0; i < 4; i++) {
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_c_size;
+ buf_size2 -= enc_ref_c_size;
+ }
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR);
+ buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR);
+ buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ mfc_debug(2, "buf_size1: %zu, buf_size2: %zu\n",
+ buf_size1, buf_size2);
+ break;
+ default:
+ mfc_err("Unknown codec set for encoding: %d\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ unsigned int reg;
+ unsigned int shm;
+
+ /* width */
+ mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX);
+ /* height */
+ mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX);
+ /* pictype : enable, IDR period */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ reg |= (1 << 18);
+ reg &= ~(0xFFFF);
+ reg |= p->gop_size;
+ mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON);
+ /* multi-slice control */
+ /* multi-slice MB number or bit size */
+ mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
+ if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+ mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
+ } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
+ mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
+ } else {
+ mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB);
+ mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT);
+ }
+ /* cyclic intra refresh */
+ mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL);
+ /* memory structure cur. frame */
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
+ mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
+ else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
+ mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
+ /* padding control & value */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL);
+ if (p->pad) {
+ /** enable */
+ reg |= (1 << 31);
+ /** cr value */
+ reg &= ~(0xFF << 16);
+ reg |= (p->pad_cr << 16);
+ /** cb value */
+ reg &= ~(0xFF << 8);
+ reg |= (p->pad_cb << 8);
+ /** y value */
+ reg &= ~(0xFF);
+ reg |= (p->pad_luma);
+ } else {
+ /** disable & all value clear */
+ reg = 0;
+ }
+ mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL);
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /** frame-level rate control */
+ reg &= ~(0x1 << 9);
+ reg |= (p->rc_frame << 9);
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* bit rate */
+ if (p->rc_frame)
+ mfc_write(dev, p->rc_bitrate,
+ S5P_FIMV_ENC_RC_BIT_RATE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE);
+ /* reaction coefficient */
+ if (p->rc_frame)
+ mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA);
+ shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
+ /* seq header ctrl */
+ shm &= ~(0x1 << 3);
+ shm |= (p->seq_hdr_mode << 3);
+ /* frame skip mode */
+ shm &= ~(0x3 << 1);
+ shm |= (p->frame_skip_mode << 1);
+ s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
+ /* fixed target bit */
+ s5p_mfc_write_info_v5(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
+ unsigned int reg;
+ unsigned int shm;
+
+ s5p_mfc_set_enc_params(ctx);
+ /* pictype : number of B */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* num_b_frame - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ reg |= (p->num_b_frame << 16);
+ mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* profile & level */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
+ /* level */
+ reg &= ~(0xFF << 8);
+ reg |= (p_264->level << 8);
+ /* profile - 0 ~ 2 */
+ reg &= ~(0x3F);
+ reg |= p_264->profile;
+ mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
+ /* interlace */
+ mfc_write(dev, p_264->interlace, S5P_FIMV_ENC_PIC_STRUCT);
+ /* height */
+ if (p_264->interlace)
+ mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX);
+ /* loopfilter ctrl */
+ mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL);
+ /* loopfilter alpha offset */
+ if (p_264->loop_filter_alpha < 0) {
+ reg = 0x10;
+ reg |= (0xFF - p_264->loop_filter_alpha) + 1;
+ } else {
+ reg = 0x00;
+ reg |= (p_264->loop_filter_alpha & 0xF);
+ }
+ mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF);
+ /* loopfilter beta offset */
+ if (p_264->loop_filter_beta < 0) {
+ reg = 0x10;
+ reg |= (0xFF - p_264->loop_filter_beta) + 1;
+ } else {
+ reg = 0x00;
+ reg |= (p_264->loop_filter_beta & 0xF);
+ }
+ mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF);
+ /* entropy coding mode */
+ if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
+ mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE);
+ /* number of ref. picture */
+ reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF);
+ /* num of ref. pictures of P */
+ reg &= ~(0x3 << 5);
+ reg |= (p_264->num_ref_pic_4p << 5);
+ /* max number of ref. pictures */
+ reg &= ~(0x1F);
+ reg |= p_264->max_ref_pic;
+ mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF);
+ /* 8x8 transform enable */
+ mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG);
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /* macroblock level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= (p->rc_mb << 8);
+ /* frame QP */
+ reg &= ~(0x3F);
+ reg |= p_264->rc_frame_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_denom)
+ mfc_write(dev, p->rc_framerate_num * 1000
+ / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+ /* max & min value of QP */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+ /* max QP */
+ reg &= ~(0x3F << 8);
+ reg |= (p_264->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0x3F);
+ reg |= p_264->rc_min_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+ /* macroblock adaptive scaling features */
+ if (p->rc_mb) {
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL);
+ /* dark region */
+ reg &= ~(0x1 << 3);
+ reg |= (p_264->rc_mb_dark << 3);
+ /* smooth region */
+ reg &= ~(0x1 << 2);
+ reg |= (p_264->rc_mb_smooth << 2);
+ /* static region */
+ reg &= ~(0x1 << 1);
+ reg |= (p_264->rc_mb_static << 1);
+ /* high activity region */
+ reg &= ~(0x1);
+ reg |= p_264->rc_mb_activity;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL);
+ }
+ if (!p->rc_frame && !p->rc_mb) {
+ shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF);
+ shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6);
+ shm |= (p_264->rc_p_frame_qp & 0x3F);
+ s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
+ }
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
+ /* AR VUI control */
+ shm &= ~(0x1 << 15);
+ shm |= (p_264->vui_sar << 1);
+ s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
+ if (p_264->vui_sar) {
+ /* aspect ration IDC */
+ shm = s5p_mfc_read_info_v5(ctx, SAMPLE_ASPECT_RATIO_IDC);
+ shm &= ~(0xFF);
+ shm |= p_264->vui_sar_idc;
+ s5p_mfc_write_info_v5(ctx, shm, SAMPLE_ASPECT_RATIO_IDC);
+ if (p_264->vui_sar_idc == 0xFF) {
+ /* sample AR info */
+ shm = s5p_mfc_read_info_v5(ctx, EXTENDED_SAR);
+ shm &= ~(0xFFFFFFFF);
+ shm |= p_264->vui_ext_sar_width << 16;
+ shm |= p_264->vui_ext_sar_height;
+ s5p_mfc_write_info_v5(ctx, shm, EXTENDED_SAR);
+ }
+ }
+ /* intra picture period for H.264 */
+ shm = s5p_mfc_read_info_v5(ctx, H264_I_PERIOD);
+ /* control */
+ shm &= ~(0x1 << 16);
+ shm |= (p_264->open_gop << 16);
+ /* value */
+ if (p_264->open_gop) {
+ shm &= ~(0xFFFF);
+ shm |= p_264->open_gop_size;
+ }
+ s5p_mfc_write_info_v5(ctx, shm, H264_I_PERIOD);
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ shm &= ~(0xFFFF << 16);
+ shm |= (p_264->cpb_size << 16);
+ }
+ s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
+ unsigned int reg;
+ unsigned int shm;
+ unsigned int framerate;
+
+ s5p_mfc_set_enc_params(ctx);
+ /* pictype : number of B */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* num_b_frame - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ reg |= (p->num_b_frame << 16);
+ mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* profile & level */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
+ /* level */
+ reg &= ~(0xFF << 8);
+ reg |= (p_mpeg4->level << 8);
+ /* profile - 0 ~ 2 */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->profile;
+ mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
+ /* quarter_pixel */
+ mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL);
+ /* qp */
+ if (!p->rc_frame) {
+ shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF);
+ shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6);
+ shm |= (p_mpeg4->rc_p_frame_qp & 0x3F);
+ s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
+ }
+ /* frame rate */
+ if (p->rc_frame) {
+ if (p->rc_framerate_denom > 0) {
+ framerate = p->rc_framerate_num * 1000 /
+ p->rc_framerate_denom;
+ mfc_write(dev, framerate,
+ S5P_FIMV_ENC_RC_FRAME_RATE);
+ shm = s5p_mfc_read_info_v5(ctx, RC_VOP_TIMING);
+ shm &= ~(0xFFFFFFFF);
+ shm |= (1 << 31);
+ shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
+ shm |= (p->rc_framerate_denom & 0xFFFF);
+ s5p_mfc_write_info_v5(ctx, shm, RC_VOP_TIMING);
+ }
+ } else {
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+ }
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /* frame QP */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->rc_frame_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* max & min value of QP */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+ /* max QP */
+ reg &= ~(0x3F << 8);
+ reg |= (p_mpeg4->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->rc_min_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ shm &= ~(0xFFFF << 16);
+ shm |= (p->vbv_size << 16);
+ }
+ s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
+ unsigned int reg;
+ unsigned int shm;
+
+ s5p_mfc_set_enc_params(ctx);
+ /* qp */
+ if (!p->rc_frame) {
+ shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF);
+ shm |= (p_h263->rc_p_frame_qp & 0x3F);
+ s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
+ }
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_denom)
+ mfc_write(dev, p->rc_framerate_num * 1000
+ / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /* frame QP */
+ reg &= ~(0x3F);
+ reg |= p_h263->rc_frame_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* max & min value of QP */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+ /* max QP */
+ reg &= ~(0x3F << 8);
+ reg |= (p_h263->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0x3F);
+ reg |= p_h263->rc_min_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ shm &= ~(0xFFFF << 16);
+ shm |= (p->vbv_size << 16);
+ }
+ s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
+ return 0;
+}
+
+/* Initialize decoding */
+static int s5p_mfc_init_decode_v5(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ s5p_mfc_set_shared_buffer(ctx);
+ /* Setup loop filter, for decoding this is only valid for MPEG4 */
+ if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC)
+ mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL);
+ mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) <<
+ S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable <<
+ S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay &
+ S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT),
+ S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+ mfc_write(dev,
+ ((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
+ | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int dpb;
+
+ if (flush)
+ dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (
+ S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
+ else
+ dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
+ ~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
+ mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+}
+
+/* Decode a single frame */
+static int s5p_mfc_decode_one_frame_v5(struct s5p_mfc_ctx *ctx,
+ enum s5p_mfc_decode_arg last_frame)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF);
+ s5p_mfc_set_shared_buffer(ctx);
+ s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
+ /* Issue different commands to instance basing on whether it
+ * is the last frame or not. */
+ switch (last_frame) {
+ case MFC_DEC_FRAME:
+ mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) <<
+ S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ break;
+ case MFC_DEC_LAST_FRAME:
+ mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) <<
+ S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ break;
+ case MFC_DEC_RES_CHANGE:
+ mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC &
+ S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
+ S5P_FIMV_SI_CH0_INST_ID);
+ break;
+ }
+ mfc_debug(2, "Decoding a usual frame\n");
+ return 0;
+}
+
+static int s5p_mfc_init_encode_v5(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
+ s5p_mfc_set_enc_params_h264(ctx);
+ else if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_ENC)
+ s5p_mfc_set_enc_params_mpeg4(ctx);
+ else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC)
+ s5p_mfc_set_enc_params_h263(ctx);
+ else {
+ mfc_err("Unknown codec for encoding (%x)\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ }
+ s5p_mfc_set_shared_buffer(ctx);
+ mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) |
+ (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+/* Encode a single frame */
+static int s5p_mfc_encode_one_frame_v5(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ int cmd;
+ /* memory structure cur. frame */
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
+ mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
+ else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
+ mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
+ s5p_mfc_set_shared_buffer(ctx);
+
+ if (ctx->state == MFCINST_FINISHING)
+ cmd = S5P_FIMV_CH_LAST_FRAME;
+ else
+ cmd = S5P_FIMV_CH_FRAME_START;
+ mfc_write(dev, ((cmd & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
+ | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+
+ return 0;
+}
+
+static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_decode_one_frame_v5(ctx, MFC_DEC_RES_CHANGE);
+}
+
+static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *temp_vb;
+
+ if (ctx->state == MFCINST_FINISHING) {
+ last_frame = MFC_DEC_LAST_FRAME;
+ s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_decode_one_frame_v5(ctx, last_frame);
+ return 0;
+ }
+
+ /* Frames are being decoded */
+ if (list_empty(&ctx->src_queue)) {
+ mfc_debug(2, "No src buffers\n");
+ return -EAGAIN;
+ }
+ /* Get the next source buffer */
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ temp_vb->flags |= MFC_BUF_FLAG_USED;
+ s5p_mfc_set_dec_stream_buffer_v5(ctx,
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+ ctx->consumed_stream, temp_vb->b->vb2_buf.planes[0].bytesused);
+ dev->curr_ctx = ctx->num;
+ if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) {
+ last_frame = MFC_DEC_LAST_FRAME;
+ mfc_debug(2, "Setting ctx->state to FINISHING\n");
+ ctx->state = MFCINST_FINISHING;
+ }
+ s5p_mfc_decode_one_frame_v5(ctx, last_frame);
+ return 0;
+}
+
+static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ struct s5p_mfc_buf *src_mb;
+ unsigned long src_y_addr, src_c_addr, dst_addr;
+ unsigned int dst_size;
+
+ if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) {
+ mfc_debug(2, "no src buffers\n");
+ return -EAGAIN;
+ }
+ if (list_empty(&ctx->dst_queue)) {
+ mfc_debug(2, "no dst buffers\n");
+ return -EAGAIN;
+ }
+ if (list_empty(&ctx->src_queue)) {
+ /* send null frame */
+ s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->dma_base[BANK_R_CTX],
+ dev->dma_base[BANK_R_CTX]);
+ src_mb = NULL;
+ } else {
+ src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+ list);
+ src_mb->flags |= MFC_BUF_FLAG_USED;
+ if (src_mb->b->vb2_buf.planes[0].bytesused == 0) {
+ /* send null frame */
+ s5p_mfc_set_enc_frame_buffer_v5(ctx,
+ dev->dma_base[BANK_R_CTX],
+ dev->dma_base[BANK_R_CTX]);
+ ctx->state = MFCINST_FINISHING;
+ } else {
+ src_y_addr = vb2_dma_contig_plane_dma_addr(
+ &src_mb->b->vb2_buf, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(
+ &src_mb->b->vb2_buf, 1);
+ s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr,
+ src_c_addr);
+ if (src_mb->flags & MFC_BUF_FLAG_EOS)
+ ctx->state = MFCINST_FINISHING;
+ }
+ }
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_mb->flags |= MFC_BUF_FLAG_USED;
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
+ s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
+ dev->curr_ctx = ctx->num;
+ mfc_debug(2, "encoding buffer with index=%d state=%d\n",
+ src_mb ? src_mb->b->vb2_buf.index : -1, ctx->state);
+ s5p_mfc_encode_one_frame_v5(ctx);
+ return 0;
+}
+
+static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *temp_vb;
+
+ /* Initializing decoding - parsing header */
+ mfc_debug(2, "Preparing to init decoding\n");
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ s5p_mfc_set_dec_desc_buffer(ctx);
+ mfc_debug(2, "Header size: %d\n",
+ temp_vb->b->vb2_buf.planes[0].bytesused);
+ s5p_mfc_set_dec_stream_buffer_v5(ctx,
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+ 0, temp_vb->b->vb2_buf.planes[0].bytesused);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_init_decode_v5(ctx);
+}
+
+static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned long dst_addr;
+ unsigned int dst_size;
+
+ s5p_mfc_set_enc_ref_buffer_v5(ctx);
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
+ s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_init_encode_v5(ctx);
+}
+
+static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *temp_vb;
+ int ret;
+
+ /*
+ * Header was parsed now starting processing
+ * First set the output frame buffers
+ */
+ if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
+ mfc_err("It seems that not all destination buffers were mmaped\nMFC requires that all destination are mmaped before starting processing\n");
+ return -EAGAIN;
+ }
+ if (list_empty(&ctx->src_queue)) {
+ mfc_err("Header has been deallocated in the middle of initialization\n");
+ return -EIO;
+ }
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ mfc_debug(2, "Header size: %d\n",
+ temp_vb->b->vb2_buf.planes[0].bytesused);
+ s5p_mfc_set_dec_stream_buffer_v5(ctx,
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+ 0, temp_vb->b->vb2_buf.planes[0].bytesused);
+ dev->curr_ctx = ctx->num;
+ ret = s5p_mfc_set_dec_frame_buffer_v5(ctx);
+ if (ret) {
+ mfc_err("Failed to alloc frame mem\n");
+ ctx->state = MFCINST_ERROR;
+ }
+ return ret;
+}
+
+/* Try running an operation on hardware */
+static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_ctx *ctx;
+ int new_ctx;
+ unsigned int ret = 0;
+
+ if (test_bit(0, &dev->enter_suspend)) {
+ mfc_debug(1, "Entering suspend so do not schedule any jobs\n");
+ return;
+ }
+ /* Check whether hardware is not running */
+ if (test_and_set_bit(0, &dev->hw_lock) != 0) {
+ /* This is perfectly ok, the scheduled ctx should wait */
+ mfc_debug(1, "Couldn't lock HW\n");
+ return;
+ }
+ /* Choose the context to run */
+ new_ctx = s5p_mfc_get_new_ctx(dev);
+ if (new_ctx < 0) {
+ /* No contexts to run */
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
+ mfc_err("Failed to unlock hardware\n");
+ return;
+ }
+ mfc_debug(1, "No ctx is scheduled to be run\n");
+ return;
+ }
+ ctx = dev->ctx[new_ctx];
+ /* Got context to run in ctx */
+ /*
+ * Last frame has already been sent to MFC.
+ * Now obtaining frames from MFC buffer
+ */
+ s5p_mfc_clock_on();
+ s5p_mfc_clean_ctx_int_flags(ctx);
+
+ if (ctx->type == MFCINST_DECODER) {
+ s5p_mfc_set_dec_desc_buffer(ctx);
+ switch (ctx->state) {
+ case MFCINST_FINISHING:
+ s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME);
+ break;
+ case MFCINST_RUNNING:
+ ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
+ break;
+ case MFCINST_INIT:
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
+ ctx);
+ break;
+ case MFCINST_RETURN_INST:
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
+ ctx);
+ break;
+ case MFCINST_GOT_INST:
+ s5p_mfc_run_init_dec(ctx);
+ break;
+ case MFCINST_HEAD_PARSED:
+ ret = s5p_mfc_run_init_dec_buffers(ctx);
+ mfc_debug(1, "head parsed\n");
+ break;
+ case MFCINST_RES_CHANGE_INIT:
+ s5p_mfc_run_res_change(ctx);
+ break;
+ case MFCINST_RES_CHANGE_FLUSH:
+ s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
+ break;
+ case MFCINST_RES_CHANGE_END:
+ mfc_debug(2, "Finished remaining frames after resolution change\n");
+ ctx->capture_state = QUEUE_FREE;
+ mfc_debug(2, "Will re-init the codec\n");
+ s5p_mfc_run_init_dec(ctx);
+ break;
+ default:
+ ret = -EAGAIN;
+ }
+ } else if (ctx->type == MFCINST_ENCODER) {
+ switch (ctx->state) {
+ case MFCINST_FINISHING:
+ case MFCINST_RUNNING:
+ ret = s5p_mfc_run_enc_frame(ctx);
+ break;
+ case MFCINST_INIT:
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
+ ctx);
+ break;
+ case MFCINST_RETURN_INST:
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
+ ctx);
+ break;
+ case MFCINST_GOT_INST:
+ s5p_mfc_run_init_enc(ctx);
+ break;
+ default:
+ ret = -EAGAIN;
+ }
+ } else {
+ mfc_err("Invalid context type: %d\n", ctx->type);
+ ret = -EAGAIN;
+ }
+
+ if (ret) {
+ /* Free hardware lock */
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ mfc_err("Failed to unlock hardware\n");
+
+ /* This is in deed imporant, as no operation has been
+ * scheduled, reduce the clock count as no one will
+ * ever do this, because no interrupt related to this try_run
+ * will ever come from hardware. */
+ s5p_mfc_clock_off();
+ }
+}
+
+static void s5p_mfc_clear_int_flags_v5(struct s5p_mfc_dev *dev)
+{
+ mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
+ mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
+ mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
+}
+
+static int s5p_mfc_get_dspl_y_adr_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_SI_DISPLAY_Y_ADR) << MFC_OFFSET_SHIFT;
+}
+
+static int s5p_mfc_get_dec_y_adr_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_SI_DECODE_Y_ADR) << MFC_OFFSET_SHIFT;
+}
+
+static int s5p_mfc_get_dspl_status_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_SI_DISPLAY_STATUS);
+}
+
+static int s5p_mfc_get_dec_status_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_SI_DECODE_STATUS);
+}
+
+static int s5p_mfc_get_dec_frame_type_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_DECODE_FRAME_TYPE) &
+ S5P_FIMV_DECODE_FRAME_MASK;
+}
+
+static int s5p_mfc_get_disp_frame_type_v5(struct s5p_mfc_ctx *ctx)
+{
+ return (s5p_mfc_read_info_v5(ctx, DISP_PIC_FRAME_TYPE) >>
+ S5P_FIMV_SHARED_DISP_FRAME_TYPE_SHIFT) &
+ S5P_FIMV_DECODE_FRAME_MASK;
+}
+
+static int s5p_mfc_get_consumed_stream_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_SI_CONSUMED_BYTES);
+}
+
+static int s5p_mfc_get_int_reason_v5(struct s5p_mfc_dev *dev)
+{
+ int reason;
+ reason = mfc_read(dev, S5P_FIMV_RISC2HOST_CMD) &
+ S5P_FIMV_RISC2HOST_CMD_MASK;
+ switch (reason) {
+ case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET:
+ reason = S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET:
+ reason = S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_SEQ_DONE_RET:
+ reason = S5P_MFC_R2H_CMD_SEQ_DONE_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_FRAME_DONE_RET:
+ reason = S5P_MFC_R2H_CMD_FRAME_DONE_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_SLICE_DONE_RET:
+ reason = S5P_MFC_R2H_CMD_SLICE_DONE_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_SYS_INIT_RET:
+ reason = S5P_MFC_R2H_CMD_SYS_INIT_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_FW_STATUS_RET:
+ reason = S5P_MFC_R2H_CMD_FW_STATUS_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_SLEEP_RET:
+ reason = S5P_MFC_R2H_CMD_SLEEP_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_WAKEUP_RET:
+ reason = S5P_MFC_R2H_CMD_WAKEUP_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET:
+ reason = S5P_MFC_R2H_CMD_INIT_BUFFERS_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET:
+ reason = S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET;
+ break;
+ case S5P_FIMV_R2H_CMD_ERR_RET:
+ reason = S5P_MFC_R2H_CMD_ERR_RET;
+ break;
+ default:
+ reason = S5P_MFC_R2H_CMD_EMPTY;
+ }
+ return reason;
+}
+
+static int s5p_mfc_get_int_err_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_RISC2HOST_ARG2);
+}
+
+static int s5p_mfc_err_dec_v5(unsigned int err)
+{
+ return (err & S5P_FIMV_ERR_DEC_MASK) >> S5P_FIMV_ERR_DEC_SHIFT;
+}
+
+static int s5p_mfc_get_img_width_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_SI_HRESOL);
+}
+
+static int s5p_mfc_get_img_height_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_SI_VRESOL);
+}
+
+static int s5p_mfc_get_dpb_count_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_SI_BUF_NUMBER);
+}
+
+static int s5p_mfc_get_mv_count_v5(struct s5p_mfc_dev *dev)
+{
+ /* NOP */
+ return -1;
+}
+
+static int s5p_mfc_get_inst_no_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_RISC2HOST_ARG1);
+}
+
+static int s5p_mfc_get_enc_strm_size_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_ENC_SI_STRM_SIZE);
+}
+
+static int s5p_mfc_get_enc_slice_type_v5(struct s5p_mfc_dev *dev)
+{
+ return mfc_read(dev, S5P_FIMV_ENC_SI_SLICE_TYPE);
+}
+
+static int s5p_mfc_get_enc_dpb_count_v5(struct s5p_mfc_dev *dev)
+{
+ return -1;
+}
+
+static unsigned int s5p_mfc_get_pic_type_top_v5(struct s5p_mfc_ctx *ctx)
+{
+ return s5p_mfc_read_info_v5(ctx, PIC_TIME_TOP);
+}
+
+static unsigned int s5p_mfc_get_pic_type_bot_v5(struct s5p_mfc_ctx *ctx)
+{
+ return s5p_mfc_read_info_v5(ctx, PIC_TIME_BOT);
+}
+
+static unsigned int s5p_mfc_get_crop_info_h_v5(struct s5p_mfc_ctx *ctx)
+{
+ return s5p_mfc_read_info_v5(ctx, CROP_INFO_H);
+}
+
+static unsigned int s5p_mfc_get_crop_info_v_v5(struct s5p_mfc_ctx *ctx)
+{
+ return s5p_mfc_read_info_v5(ctx, CROP_INFO_V);
+}
+
+/* Initialize opr function pointers for MFC v5 */
+static struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = {
+ .alloc_dec_temp_buffers = s5p_mfc_alloc_dec_temp_buffers_v5,
+ .release_dec_desc_buffer = s5p_mfc_release_dec_desc_buffer_v5,
+ .alloc_codec_buffers = s5p_mfc_alloc_codec_buffers_v5,
+ .release_codec_buffers = s5p_mfc_release_codec_buffers_v5,
+ .alloc_instance_buffer = s5p_mfc_alloc_instance_buffer_v5,
+ .release_instance_buffer = s5p_mfc_release_instance_buffer_v5,
+ .alloc_dev_context_buffer = s5p_mfc_alloc_dev_context_buffer_v5,
+ .release_dev_context_buffer = s5p_mfc_release_dev_context_buffer_v5,
+ .dec_calc_dpb_size = s5p_mfc_dec_calc_dpb_size_v5,
+ .enc_calc_src_size = s5p_mfc_enc_calc_src_size_v5,
+ .set_enc_stream_buffer = s5p_mfc_set_enc_stream_buffer_v5,
+ .set_enc_frame_buffer = s5p_mfc_set_enc_frame_buffer_v5,
+ .get_enc_frame_buffer = s5p_mfc_get_enc_frame_buffer_v5,
+ .try_run = s5p_mfc_try_run_v5,
+ .clear_int_flags = s5p_mfc_clear_int_flags_v5,
+ .get_dspl_y_adr = s5p_mfc_get_dspl_y_adr_v5,
+ .get_dec_y_adr = s5p_mfc_get_dec_y_adr_v5,
+ .get_dspl_status = s5p_mfc_get_dspl_status_v5,
+ .get_dec_status = s5p_mfc_get_dec_status_v5,
+ .get_dec_frame_type = s5p_mfc_get_dec_frame_type_v5,
+ .get_disp_frame_type = s5p_mfc_get_disp_frame_type_v5,
+ .get_consumed_stream = s5p_mfc_get_consumed_stream_v5,
+ .get_int_reason = s5p_mfc_get_int_reason_v5,
+ .get_int_err = s5p_mfc_get_int_err_v5,
+ .err_dec = s5p_mfc_err_dec_v5,
+ .get_img_width = s5p_mfc_get_img_width_v5,
+ .get_img_height = s5p_mfc_get_img_height_v5,
+ .get_dpb_count = s5p_mfc_get_dpb_count_v5,
+ .get_mv_count = s5p_mfc_get_mv_count_v5,
+ .get_inst_no = s5p_mfc_get_inst_no_v5,
+ .get_enc_strm_size = s5p_mfc_get_enc_strm_size_v5,
+ .get_enc_slice_type = s5p_mfc_get_enc_slice_type_v5,
+ .get_enc_dpb_count = s5p_mfc_get_enc_dpb_count_v5,
+ .get_pic_type_top = s5p_mfc_get_pic_type_top_v5,
+ .get_pic_type_bot = s5p_mfc_get_pic_type_bot_v5,
+ .get_crop_info_h = s5p_mfc_get_crop_info_h_v5,
+ .get_crop_info_v = s5p_mfc_get_crop_info_v_v5,
+};
+
+struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v5(void)
+{
+ return &s5p_mfc_ops_v5;
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h
new file mode 100644
index 000000000..ffee39a12
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h
@@ -0,0 +1,85 @@
+/*
+ * drivers/media/platform/samsung/mfc5/s5p_mfc_opr_v5.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * Contains declarations of hw related functions.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_OPR_V5_H_
+#define S5P_MFC_OPR_V5_H_
+
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_opr.h"
+
+enum MFC_SHM_OFS {
+ EXTENEDED_DECODE_STATUS = 0x00, /* D */
+ SET_FRAME_TAG = 0x04, /* D */
+ GET_FRAME_TAG_TOP = 0x08, /* D */
+ GET_FRAME_TAG_BOT = 0x0C, /* D */
+ PIC_TIME_TOP = 0x10, /* D */
+ PIC_TIME_BOT = 0x14, /* D */
+ START_BYTE_NUM = 0x18, /* D */
+
+ CROP_INFO_H = 0x20, /* D */
+ CROP_INFO_V = 0x24, /* D */
+ EXT_ENC_CONTROL = 0x28, /* E */
+ ENC_PARAM_CHANGE = 0x2C, /* E */
+ RC_VOP_TIMING = 0x30, /* E, MPEG4 */
+ HEC_PERIOD = 0x34, /* E, MPEG4 */
+ METADATA_ENABLE = 0x38, /* C */
+ METADATA_STATUS = 0x3C, /* C */
+ METADATA_DISPLAY_INDEX = 0x40, /* C */
+ EXT_METADATA_START_ADDR = 0x44, /* C */
+ PUT_EXTRADATA = 0x48, /* C */
+ EXTRADATA_ADDR = 0x4C, /* C */
+
+ ALLOC_LUMA_DPB_SIZE = 0x64, /* D */
+ ALLOC_CHROMA_DPB_SIZE = 0x68, /* D */
+ ALLOC_MV_SIZE = 0x6C, /* D */
+ P_B_FRAME_QP = 0x70, /* E */
+ SAMPLE_ASPECT_RATIO_IDC = 0x74, /* E, H.264, depend on
+ ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+ EXTENDED_SAR = 0x78, /* E, H.264, depned on
+ ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+ DISP_PIC_PROFILE = 0x7C, /* D */
+ FLUSH_CMD_TYPE = 0x80, /* C */
+ FLUSH_CMD_INBUF1 = 0x84, /* C */
+ FLUSH_CMD_INBUF2 = 0x88, /* C */
+ FLUSH_CMD_OUTBUF = 0x8C, /* E */
+ NEW_RC_BIT_RATE = 0x90, /* E, format as RC_BIT_RATE(0xC5A8)
+ depend on RC_BIT_RATE_CHANGE in ENC_PARAM_CHANGE */
+ NEW_RC_FRAME_RATE = 0x94, /* E, format as RC_FRAME_RATE(0xD0D0)
+ depend on RC_FRAME_RATE_CHANGE in ENC_PARAM_CHANGE */
+ NEW_I_PERIOD = 0x98, /* E, format as I_FRM_CTRL(0xC504)
+ depend on I_PERIOD_CHANGE in ENC_PARAM_CHANGE */
+ H264_I_PERIOD = 0x9C, /* E, H.264, open GOP */
+ RC_CONTROL_CONFIG = 0xA0, /* E */
+ BATCH_INPUT_ADDR = 0xA4, /* E */
+ BATCH_OUTPUT_ADDR = 0xA8, /* E */
+ BATCH_OUTPUT_SIZE = 0xAC, /* E */
+ MIN_LUMA_DPB_SIZE = 0xB0, /* D */
+ DEVICE_FORMAT_ID = 0xB4, /* C */
+ H264_POC_TYPE = 0xB8, /* D */
+ MIN_CHROMA_DPB_SIZE = 0xBC, /* D */
+ DISP_PIC_FRAME_TYPE = 0xC0, /* D */
+ FREE_LUMA_DPB = 0xC4, /* D, VC1 MPEG4 */
+ ASPECT_RATIO_INFO = 0xC8, /* D, MPEG4 */
+ EXTENDED_PAR = 0xCC, /* D, MPEG4 */
+ DBG_HISTORY_INPUT0 = 0xD0, /* C */
+ DBG_HISTORY_INPUT1 = 0xD4, /* C */
+ DBG_HISTORY_OUTPUT = 0xD8, /* C */
+ HIERARCHICAL_P_QP = 0xE0, /* E, H.264 */
+ FRAME_PACK_SEI_ENABLE = 0x168, /* C */
+ FRAME_PACK_SEI_AVAIL = 0x16c, /* D */
+ FRAME_PACK_SEI_INFO = 0x17c, /* E */
+};
+
+struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v5(void);
+#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
new file mode 100644
index 000000000..7c629be43
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -0,0 +1,2537 @@
+/*
+ * drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+ *
+ * Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains hw related functions.
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef DEBUG
+
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/firmware.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/cacheflush.h>
+
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_opr_v6.h"
+
+/* #define S5P_MFC_DEBUG_REGWRITE */
+#ifdef S5P_MFC_DEBUG_REGWRITE
+#undef writel
+#define writel(v, r) \
+ do { \
+ pr_err("MFCWRITE(%p): %08x\n", r, (unsigned int)v); \
+ __raw_writel(v, r); \
+ } while (0)
+#endif /* S5P_MFC_DEBUG_REGWRITE */
+
+#define IS_MFCV6_V2(dev) (!IS_MFCV7_PLUS(dev) && dev->fw_ver == MFC_FW_V2)
+
+/* Allocate temporary buffers for decoding */
+static int s5p_mfc_alloc_dec_temp_buffers_v6(struct s5p_mfc_ctx *ctx)
+{
+ /* NOP */
+
+ return 0;
+}
+
+/* Release temproary buffers for decoding */
+static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx)
+{
+ /* NOP */
+}
+
+/* Allocate codec buffers */
+static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int mb_width, mb_height;
+ unsigned int lcu_width = 0, lcu_height = 0;
+ int ret;
+
+ mb_width = MB_WIDTH(ctx->img_width);
+ mb_height = MB_HEIGHT(ctx->img_height);
+
+ if (ctx->type == MFCINST_DECODER) {
+ mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
+ ctx->luma_size, ctx->chroma_size, ctx->mv_size);
+ mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
+ } else if (ctx->type == MFCINST_ENCODER) {
+ if (IS_MFCV10(dev)) {
+ ctx->tmv_buffer_size = 0;
+ } else if (IS_MFCV8_PLUS(dev))
+ ctx->tmv_buffer_size = S5P_FIMV_NUM_TMV_BUFFERS_V6 *
+ ALIGN(S5P_FIMV_TMV_BUFFER_SIZE_V8(mb_width, mb_height),
+ S5P_FIMV_TMV_BUFFER_ALIGN_V6);
+ else
+ ctx->tmv_buffer_size = S5P_FIMV_NUM_TMV_BUFFERS_V6 *
+ ALIGN(S5P_FIMV_TMV_BUFFER_SIZE_V6(mb_width, mb_height),
+ S5P_FIMV_TMV_BUFFER_ALIGN_V6);
+ if (IS_MFCV10(dev)) {
+ lcu_width = S5P_MFC_LCU_WIDTH(ctx->img_width);
+ lcu_height = S5P_MFC_LCU_HEIGHT(ctx->img_height);
+ if (ctx->codec_mode != S5P_FIMV_CODEC_HEVC_ENC) {
+ ctx->luma_dpb_size =
+ ALIGN((mb_width * 16), 64)
+ * ALIGN((mb_height * 16), 32)
+ + 64;
+ ctx->chroma_dpb_size =
+ ALIGN((mb_width * 16), 64)
+ * (mb_height * 8)
+ + 64;
+ } else {
+ ctx->luma_dpb_size =
+ ALIGN((lcu_width * 32), 64)
+ * ALIGN((lcu_height * 32), 32)
+ + 64;
+ ctx->chroma_dpb_size =
+ ALIGN((lcu_width * 32), 64)
+ * (lcu_height * 16)
+ + 64;
+ }
+ } else {
+ ctx->luma_dpb_size = ALIGN((mb_width * mb_height) *
+ S5P_FIMV_LUMA_MB_TO_PIXEL_V6,
+ S5P_FIMV_LUMA_DPB_BUFFER_ALIGN_V6);
+ ctx->chroma_dpb_size = ALIGN((mb_width * mb_height) *
+ S5P_FIMV_CHROMA_MB_TO_PIXEL_V6,
+ S5P_FIMV_CHROMA_DPB_BUFFER_ALIGN_V6);
+ }
+ if (IS_MFCV8_PLUS(dev))
+ ctx->me_buffer_size = ALIGN(S5P_FIMV_ME_BUFFER_SIZE_V8(
+ ctx->img_width, ctx->img_height,
+ mb_width, mb_height),
+ S5P_FIMV_ME_BUFFER_ALIGN_V6);
+ else
+ ctx->me_buffer_size = ALIGN(S5P_FIMV_ME_BUFFER_SIZE_V6(
+ ctx->img_width, ctx->img_height,
+ mb_width, mb_height),
+ S5P_FIMV_ME_BUFFER_ALIGN_V6);
+
+ mfc_debug(2, "recon luma size: %zu chroma size: %zu\n",
+ ctx->luma_dpb_size, ctx->chroma_dpb_size);
+ } else {
+ return -EINVAL;
+ }
+
+ /* Codecs have different memory requirements */
+ switch (ctx->codec_mode) {
+ case S5P_MFC_CODEC_H264_DEC:
+ case S5P_MFC_CODEC_H264_MVC_DEC:
+ if (IS_MFCV10(dev))
+ mfc_debug(2, "Use min scratch buffer size\n");
+ else if (IS_MFCV8_PLUS(dev))
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V8(
+ mb_width,
+ mb_height);
+ else
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V6(
+ mb_width,
+ mb_height);
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ ctx->bank1.size =
+ ctx->scratch_buf_size +
+ (ctx->mv_count * ctx->mv_size);
+ break;
+ case S5P_MFC_CODEC_MPEG4_DEC:
+ if (IS_MFCV10(dev))
+ mfc_debug(2, "Use min scratch buffer size\n");
+ else if (IS_MFCV7_PLUS(dev)) {
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V7(
+ mb_width,
+ mb_height);
+ } else {
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(
+ mb_width,
+ mb_height);
+ }
+
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ ctx->bank1.size = ctx->scratch_buf_size;
+ break;
+ case S5P_MFC_CODEC_VC1RCV_DEC:
+ case S5P_MFC_CODEC_VC1_DEC:
+ if (IS_MFCV10(dev))
+ mfc_debug(2, "Use min scratch buffer size\n");
+ else
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_VC1_DEC_V6(
+ mb_width,
+ mb_height);
+
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ ctx->bank1.size = ctx->scratch_buf_size;
+ break;
+ case S5P_MFC_CODEC_MPEG2_DEC:
+ ctx->bank1.size = 0;
+ ctx->bank2.size = 0;
+ break;
+ case S5P_MFC_CODEC_H263_DEC:
+ if (IS_MFCV10(dev))
+ mfc_debug(2, "Use min scratch buffer size\n");
+ else
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(
+ mb_width,
+ mb_height);
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ ctx->bank1.size = ctx->scratch_buf_size;
+ break;
+ case S5P_MFC_CODEC_VP8_DEC:
+ if (IS_MFCV10(dev))
+ mfc_debug(2, "Use min scratch buffer size\n");
+ else if (IS_MFCV8_PLUS(dev))
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V8(
+ mb_width,
+ mb_height);
+ else
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V6(
+ mb_width,
+ mb_height);
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ ctx->bank1.size = ctx->scratch_buf_size;
+ break;
+ case S5P_MFC_CODEC_HEVC_DEC:
+ mfc_debug(2, "Use min scratch buffer size\n");
+ ctx->bank1.size =
+ ctx->scratch_buf_size +
+ (ctx->mv_count * ctx->mv_size);
+ break;
+ case S5P_MFC_CODEC_VP9_DEC:
+ mfc_debug(2, "Use min scratch buffer size\n");
+ ctx->bank1.size =
+ ctx->scratch_buf_size +
+ DEC_VP9_STATIC_BUFFER_SIZE;
+ break;
+ case S5P_MFC_CODEC_H264_ENC:
+ if (IS_MFCV10(dev)) {
+ mfc_debug(2, "Use min scratch buffer size\n");
+ ctx->me_buffer_size =
+ ALIGN(ENC_V100_H264_ME_SIZE(mb_width, mb_height), 16);
+ } else if (IS_MFCV8_PLUS(dev))
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V8(
+ mb_width,
+ mb_height);
+ else
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V6(
+ mb_width,
+ mb_height);
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ ctx->bank1.size =
+ ctx->scratch_buf_size + ctx->tmv_buffer_size +
+ (ctx->pb_count * (ctx->luma_dpb_size +
+ ctx->chroma_dpb_size + ctx->me_buffer_size));
+ ctx->bank2.size = 0;
+ break;
+ case S5P_MFC_CODEC_MPEG4_ENC:
+ case S5P_MFC_CODEC_H263_ENC:
+ if (IS_MFCV10(dev)) {
+ mfc_debug(2, "Use min scratch buffer size\n");
+ ctx->me_buffer_size =
+ ALIGN(ENC_V100_MPEG4_ME_SIZE(mb_width,
+ mb_height), 16);
+ } else
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_ENC_V6(
+ mb_width,
+ mb_height);
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ ctx->bank1.size =
+ ctx->scratch_buf_size + ctx->tmv_buffer_size +
+ (ctx->pb_count * (ctx->luma_dpb_size +
+ ctx->chroma_dpb_size + ctx->me_buffer_size));
+ ctx->bank2.size = 0;
+ break;
+ case S5P_MFC_CODEC_VP8_ENC:
+ if (IS_MFCV10(dev)) {
+ mfc_debug(2, "Use min scratch buffer size\n");
+ ctx->me_buffer_size =
+ ALIGN(ENC_V100_VP8_ME_SIZE(mb_width, mb_height),
+ 16);
+ } else if (IS_MFCV8_PLUS(dev))
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_VP8_ENC_V8(
+ mb_width,
+ mb_height);
+ else
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_VP8_ENC_V7(
+ mb_width,
+ mb_height);
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ ctx->bank1.size =
+ ctx->scratch_buf_size + ctx->tmv_buffer_size +
+ (ctx->pb_count * (ctx->luma_dpb_size +
+ ctx->chroma_dpb_size + ctx->me_buffer_size));
+ ctx->bank2.size = 0;
+ break;
+ case S5P_MFC_CODEC_HEVC_ENC:
+ mfc_debug(2, "Use min scratch buffer size\n");
+ ctx->me_buffer_size =
+ ALIGN(ENC_V100_HEVC_ME_SIZE(lcu_width, lcu_height), 16);
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, 256);
+ ctx->bank1.size =
+ ctx->scratch_buf_size + ctx->tmv_buffer_size +
+ (ctx->pb_count * (ctx->luma_dpb_size +
+ ctx->chroma_dpb_size + ctx->me_buffer_size));
+ ctx->bank2.size = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* Allocate only if memory from bank 1 is necessary */
+ if (ctx->bank1.size > 0) {
+ ret = s5p_mfc_alloc_generic_buf(dev, BANK_L_CTX, &ctx->bank1);
+ if (ret) {
+ mfc_err("Failed to allocate Bank1 memory\n");
+ return ret;
+ }
+ BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ }
+ return 0;
+}
+
+/* Release buffers allocated for codec */
+static void s5p_mfc_release_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
+{
+ s5p_mfc_release_generic_buf(ctx->dev, &ctx->bank1);
+}
+
+/* Allocate memory for instance data buffer */
+static int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ int ret;
+
+ mfc_debug_enter();
+
+ switch (ctx->codec_mode) {
+ case S5P_MFC_CODEC_H264_DEC:
+ case S5P_MFC_CODEC_H264_MVC_DEC:
+ case S5P_MFC_CODEC_HEVC_DEC:
+ ctx->ctx.size = buf_size->h264_dec_ctx;
+ break;
+ case S5P_MFC_CODEC_MPEG4_DEC:
+ case S5P_MFC_CODEC_H263_DEC:
+ case S5P_MFC_CODEC_VC1RCV_DEC:
+ case S5P_MFC_CODEC_VC1_DEC:
+ case S5P_MFC_CODEC_MPEG2_DEC:
+ case S5P_MFC_CODEC_VP8_DEC:
+ case S5P_MFC_CODEC_VP9_DEC:
+ ctx->ctx.size = buf_size->other_dec_ctx;
+ break;
+ case S5P_MFC_CODEC_H264_ENC:
+ ctx->ctx.size = buf_size->h264_enc_ctx;
+ break;
+ case S5P_MFC_CODEC_HEVC_ENC:
+ ctx->ctx.size = buf_size->hevc_enc_ctx;
+ break;
+ case S5P_MFC_CODEC_MPEG4_ENC:
+ case S5P_MFC_CODEC_H263_ENC:
+ case S5P_MFC_CODEC_VP8_ENC:
+ ctx->ctx.size = buf_size->other_enc_ctx;
+ break;
+ default:
+ ctx->ctx.size = 0;
+ mfc_err("Codec type(%d) should be checked!\n", ctx->codec_mode);
+ break;
+ }
+
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->ctx);
+ if (ret) {
+ mfc_err("Failed to allocate instance buffer\n");
+ return ret;
+ }
+
+ memset(ctx->ctx.virt, 0, ctx->ctx.size);
+ wmb();
+
+ mfc_debug_leave();
+
+ return 0;
+}
+
+/* Release instance buffer */
+static void s5p_mfc_release_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
+{
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->ctx);
+}
+
+/* Allocate context buffers for SYS_INIT */
+static int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ int ret;
+
+ mfc_debug_enter();
+
+ dev->ctx_buf.size = buf_size->dev_ctx;
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &dev->ctx_buf);
+ if (ret) {
+ mfc_err("Failed to allocate device context buffer\n");
+ return ret;
+ }
+
+ memset(dev->ctx_buf.virt, 0, buf_size->dev_ctx);
+ wmb();
+
+ mfc_debug_leave();
+
+ return 0;
+}
+
+/* Release context buffers for SYS_INIT */
+static void s5p_mfc_release_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
+{
+ s5p_mfc_release_priv_buf(dev, &dev->ctx_buf);
+}
+
+static int calc_plane(int width, int height)
+{
+ int mbX, mbY;
+
+ mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW_V6);
+ mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL_V6);
+
+ if (width * height < S5P_FIMV_MAX_FRAME_SIZE_V6)
+ mbY = (mbY + 1) / 2 * 2;
+
+ return (mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL_V6) *
+ (mbY * S5P_FIMV_NUM_PIXELS_IN_MB_ROW_V6);
+}
+
+static void s5p_mfc_dec_calc_dpb_size_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN_V6);
+ ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN_V6);
+ mfc_debug(2, "SEQ Done: Movie dimensions %dx%d,\n"
+ "buffer dimensions: %dx%d\n", ctx->img_width,
+ ctx->img_height, ctx->buf_width, ctx->buf_height);
+
+ ctx->luma_size = calc_plane(ctx->img_width, ctx->img_height);
+ ctx->chroma_size = calc_plane(ctx->img_width, (ctx->img_height >> 1));
+ if (IS_MFCV8_PLUS(ctx->dev)) {
+ /* MFCv8 needs additional 64 bytes for luma,chroma dpb*/
+ ctx->luma_size += S5P_FIMV_D_ALIGN_PLANE_SIZE_V8;
+ ctx->chroma_size += S5P_FIMV_D_ALIGN_PLANE_SIZE_V8;
+ }
+
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
+ ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) {
+ if (IS_MFCV10(dev)) {
+ ctx->mv_size = S5P_MFC_DEC_MV_SIZE_V10(ctx->img_width,
+ ctx->img_height);
+ } else {
+ ctx->mv_size = S5P_MFC_DEC_MV_SIZE_V6(ctx->img_width,
+ ctx->img_height);
+ }
+ } else if (ctx->codec_mode == S5P_MFC_CODEC_HEVC_DEC) {
+ ctx->mv_size = s5p_mfc_dec_hevc_mv_size(ctx->img_width,
+ ctx->img_height);
+ ctx->mv_size = ALIGN(ctx->mv_size, 32);
+ } else {
+ ctx->mv_size = 0;
+ }
+}
+
+static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx)
+{
+ unsigned int mb_width, mb_height;
+
+ mb_width = MB_WIDTH(ctx->img_width);
+ mb_height = MB_HEIGHT(ctx->img_height);
+
+ ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
+ ctx->luma_size = ALIGN((mb_width * mb_height) * 256, 256);
+ ctx->chroma_size = ALIGN((mb_width * mb_height) * 128, 256);
+
+ /* MFCv7 needs pad bytes for Luma and Chroma */
+ if (IS_MFCV7_PLUS(ctx->dev)) {
+ ctx->luma_size += MFC_LUMA_PAD_BYTES_V7;
+ ctx->chroma_size += MFC_CHROMA_PAD_BYTES_V7;
+ }
+}
+
+/* Set registers for decoding stream buffer */
+static int s5p_mfc_set_dec_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
+ int buf_addr, unsigned int start_num_byte,
+ unsigned int strm_size)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;
+
+ mfc_debug_enter();
+ mfc_debug(2, "inst_no: %d, buf_addr: 0x%08x,\n"
+ "buf_size: 0x%08x (%d)\n",
+ ctx->inst_no, buf_addr, strm_size, strm_size);
+ writel(strm_size, mfc_regs->d_stream_data_size);
+ writel(buf_addr, mfc_regs->d_cpb_buffer_addr);
+ writel(buf_size->cpb, mfc_regs->d_cpb_buffer_size);
+ writel(start_num_byte, mfc_regs->d_cpb_buffer_offset);
+
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Set decoding frame buffer */
+static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
+{
+ unsigned int frame_size, i;
+ unsigned int frame_size_ch, frame_size_mv;
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ size_t buf_addr1;
+ int buf_size1;
+ int align_gap;
+
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
+
+ mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
+ mfc_debug(2, "Total DPB COUNT: %d\n", ctx->total_dpb_count);
+ mfc_debug(2, "Setting display delay to %d\n", ctx->display_delay);
+
+ writel(ctx->total_dpb_count, mfc_regs->d_num_dpb);
+ writel(ctx->luma_size, mfc_regs->d_first_plane_dpb_size);
+ writel(ctx->chroma_size, mfc_regs->d_second_plane_dpb_size);
+
+ writel(buf_addr1, mfc_regs->d_scratch_buffer_addr);
+ writel(ctx->scratch_buf_size, mfc_regs->d_scratch_buffer_size);
+
+ if (IS_MFCV8_PLUS(dev)) {
+ writel(ctx->img_width,
+ mfc_regs->d_first_plane_dpb_stride_size);
+ writel(ctx->img_width,
+ mfc_regs->d_second_plane_dpb_stride_size);
+ }
+
+ buf_addr1 += ctx->scratch_buf_size;
+ buf_size1 -= ctx->scratch_buf_size;
+
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC ||
+ ctx->codec_mode == S5P_FIMV_CODEC_H264_MVC_DEC ||
+ ctx->codec_mode == S5P_FIMV_CODEC_HEVC_DEC) {
+ writel(ctx->mv_size, mfc_regs->d_mv_buffer_size);
+ writel(ctx->mv_count, mfc_regs->d_num_mv);
+ }
+
+ frame_size = ctx->luma_size;
+ frame_size_ch = ctx->chroma_size;
+ frame_size_mv = ctx->mv_size;
+ mfc_debug(2, "Frame size: %d ch: %d mv: %d\n",
+ frame_size, frame_size_ch, frame_size_mv);
+
+ for (i = 0; i < ctx->total_dpb_count; i++) {
+ /* Bank2 */
+ mfc_debug(2, "Luma %d: %zx\n", i,
+ ctx->dst_bufs[i].cookie.raw.luma);
+ writel(ctx->dst_bufs[i].cookie.raw.luma,
+ mfc_regs->d_first_plane_dpb + i * 4);
+ mfc_debug(2, "\tChroma %d: %zx\n", i,
+ ctx->dst_bufs[i].cookie.raw.chroma);
+ writel(ctx->dst_bufs[i].cookie.raw.chroma,
+ mfc_regs->d_second_plane_dpb + i * 4);
+ }
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
+ ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC ||
+ ctx->codec_mode == S5P_MFC_CODEC_HEVC_DEC) {
+ for (i = 0; i < ctx->mv_count; i++) {
+ /* To test alignment */
+ align_gap = buf_addr1;
+ buf_addr1 = ALIGN(buf_addr1, 16);
+ align_gap = buf_addr1 - align_gap;
+ buf_size1 -= align_gap;
+
+ mfc_debug(2, "\tBuf1: %zx, size: %d\n",
+ buf_addr1, buf_size1);
+ writel(buf_addr1, mfc_regs->d_mv_buffer + i * 4);
+ buf_addr1 += frame_size_mv;
+ buf_size1 -= frame_size_mv;
+ }
+ }
+ if (ctx->codec_mode == S5P_FIMV_CODEC_VP9_DEC) {
+ writel(buf_addr1, mfc_regs->d_static_buffer_addr);
+ writel(DEC_VP9_STATIC_BUFFER_SIZE,
+ mfc_regs->d_static_buffer_size);
+ buf_addr1 += DEC_VP9_STATIC_BUFFER_SIZE;
+ buf_size1 -= DEC_VP9_STATIC_BUFFER_SIZE;
+ }
+
+ mfc_debug(2, "Buf1: %zx, buf_size1: %d (frames %d)\n",
+ buf_addr1, buf_size1, ctx->total_dpb_count);
+ if (buf_size1 < 0) {
+ mfc_debug(2, "Not enough memory has been allocated.\n");
+ return -ENOMEM;
+ }
+
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ S5P_FIMV_CH_INIT_BUFS_V6, NULL);
+
+ mfc_debug(2, "After setting buffers.\n");
+ return 0;
+}
+
+/* Set registers for encoding stream buffer */
+static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
+ unsigned long addr, unsigned int size)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+
+ writel(addr, mfc_regs->e_stream_buffer_addr); /* 16B align */
+ writel(size, mfc_regs->e_stream_buffer_size);
+
+ mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%x\n",
+ addr, size);
+
+ return 0;
+}
+
+static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
+ unsigned long y_addr, unsigned long c_addr)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+
+ writel(y_addr, mfc_regs->e_source_first_plane_addr);
+ writel(c_addr, mfc_regs->e_source_second_plane_addr);
+
+ mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr);
+ mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr);
+}
+
+static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
+ unsigned long *y_addr, unsigned long *c_addr)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ unsigned long enc_recon_y_addr, enc_recon_c_addr;
+
+ *y_addr = readl(mfc_regs->e_encoded_source_first_plane_addr);
+ *c_addr = readl(mfc_regs->e_encoded_source_second_plane_addr);
+
+ enc_recon_y_addr = readl(mfc_regs->e_recon_luma_dpb_addr);
+ enc_recon_c_addr = readl(mfc_regs->e_recon_chroma_dpb_addr);
+
+ mfc_debug(2, "recon y addr: 0x%08lx y_addr: 0x%08lx\n", enc_recon_y_addr, *y_addr);
+ mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr);
+}
+
+/* Set encoding ref & codec buffer */
+static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ size_t buf_addr1;
+ int i, buf_size1;
+
+ mfc_debug_enter();
+
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
+
+ mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
+
+ if (IS_MFCV10(dev)) {
+ /* start address of per buffer is aligned */
+ for (i = 0; i < ctx->pb_count; i++) {
+ writel(buf_addr1, mfc_regs->e_luma_dpb + (4 * i));
+ buf_addr1 += ctx->luma_dpb_size;
+ buf_size1 -= ctx->luma_dpb_size;
+ }
+ for (i = 0; i < ctx->pb_count; i++) {
+ writel(buf_addr1, mfc_regs->e_chroma_dpb + (4 * i));
+ buf_addr1 += ctx->chroma_dpb_size;
+ buf_size1 -= ctx->chroma_dpb_size;
+ }
+ for (i = 0; i < ctx->pb_count; i++) {
+ writel(buf_addr1, mfc_regs->e_me_buffer + (4 * i));
+ buf_addr1 += ctx->me_buffer_size;
+ buf_size1 -= ctx->me_buffer_size;
+ }
+ } else {
+ for (i = 0; i < ctx->pb_count; i++) {
+ writel(buf_addr1, mfc_regs->e_luma_dpb + (4 * i));
+ buf_addr1 += ctx->luma_dpb_size;
+ writel(buf_addr1, mfc_regs->e_chroma_dpb + (4 * i));
+ buf_addr1 += ctx->chroma_dpb_size;
+ writel(buf_addr1, mfc_regs->e_me_buffer + (4 * i));
+ buf_addr1 += ctx->me_buffer_size;
+ buf_size1 -= (ctx->luma_dpb_size + ctx->chroma_dpb_size
+ + ctx->me_buffer_size);
+ }
+ }
+
+ writel(buf_addr1, mfc_regs->e_scratch_buffer_addr);
+ writel(ctx->scratch_buf_size, mfc_regs->e_scratch_buffer_size);
+ buf_addr1 += ctx->scratch_buf_size;
+ buf_size1 -= ctx->scratch_buf_size;
+
+ writel(buf_addr1, mfc_regs->e_tmv_buffer0);
+ buf_addr1 += ctx->tmv_buffer_size >> 1;
+ writel(buf_addr1, mfc_regs->e_tmv_buffer1);
+ buf_addr1 += ctx->tmv_buffer_size >> 1;
+ buf_size1 -= ctx->tmv_buffer_size;
+
+ mfc_debug(2, "Buf1: %zu, buf_size1: %d (ref frames %d)\n",
+ buf_addr1, buf_size1, ctx->pb_count);
+ if (buf_size1 < 0) {
+ mfc_debug(2, "Not enough memory has been allocated.\n");
+ return -ENOMEM;
+ }
+
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ S5P_FIMV_CH_INIT_BUFS_V6, NULL);
+
+ mfc_debug_leave();
+
+ return 0;
+}
+
+static int s5p_mfc_set_slice_mode(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+
+ /* multi-slice control */
+ /* multi-slice MB number or bit size */
+ writel(ctx->slice_mode, mfc_regs->e_mslice_mode);
+ if (ctx->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+ writel(ctx->slice_size.mb, mfc_regs->e_mslice_size_mb);
+ } else if (ctx->slice_mode ==
+ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
+ writel(ctx->slice_size.bits, mfc_regs->e_mslice_size_bits);
+ } else {
+ writel(0x0, mfc_regs->e_mslice_size_mb);
+ writel(0x0, mfc_regs->e_mslice_size_bits);
+ }
+
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ unsigned int reg = 0;
+
+ mfc_debug_enter();
+
+ /* width */
+ writel(ctx->img_width, mfc_regs->e_frame_width); /* 16 align */
+ /* height */
+ writel(ctx->img_height, mfc_regs->e_frame_height); /* 16 align */
+
+ /* cropped width */
+ writel(ctx->img_width, mfc_regs->e_cropped_frame_width);
+ /* cropped height */
+ writel(ctx->img_height, mfc_regs->e_cropped_frame_height);
+ /* cropped offset */
+ writel(0x0, mfc_regs->e_frame_crop_offset);
+
+ /* pictype : IDR period */
+ reg = 0;
+ reg |= p->gop_size & 0xFFFF;
+ writel(reg, mfc_regs->e_gop_config);
+
+ /* multi-slice control */
+ /* multi-slice MB number or bit size */
+ ctx->slice_mode = p->slice_mode;
+ reg = 0;
+ if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+ reg |= (0x1 << 3);
+ writel(reg, mfc_regs->e_enc_options);
+ ctx->slice_size.mb = p->slice_mb;
+ } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
+ reg |= (0x1 << 3);
+ writel(reg, mfc_regs->e_enc_options);
+ ctx->slice_size.bits = p->slice_bit;
+ } else {
+ reg &= ~(0x1 << 3);
+ writel(reg, mfc_regs->e_enc_options);
+ }
+
+ s5p_mfc_set_slice_mode(ctx);
+
+ /* cyclic intra refresh */
+ writel(p->intra_refresh_mb, mfc_regs->e_ir_size);
+ reg = readl(mfc_regs->e_enc_options);
+ if (p->intra_refresh_mb == 0)
+ reg &= ~(0x1 << 4);
+ else
+ reg |= (0x1 << 4);
+ writel(reg, mfc_regs->e_enc_options);
+
+ /* 'NON_REFERENCE_STORE_ENABLE' for debugging */
+ reg = readl(mfc_regs->e_enc_options);
+ reg &= ~(0x1 << 9);
+ writel(reg, mfc_regs->e_enc_options);
+
+ /* memory structure cur. frame */
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
+ /* 0: Linear, 1: 2D tiled*/
+ reg = readl(mfc_regs->e_enc_options);
+ reg &= ~(0x1 << 7);
+ writel(reg, mfc_regs->e_enc_options);
+ /* 0: NV12(CbCr), 1: NV21(CrCb) */
+ writel(0x0, mfc_regs->pixel_format);
+ } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV21M) {
+ /* 0: Linear, 1: 2D tiled*/
+ reg = readl(mfc_regs->e_enc_options);
+ reg &= ~(0x1 << 7);
+ writel(reg, mfc_regs->e_enc_options);
+ /* 0: NV12(CbCr), 1: NV21(CrCb) */
+ writel(0x1, mfc_regs->pixel_format);
+ } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) {
+ /* 0: Linear, 1: 2D tiled*/
+ reg = readl(mfc_regs->e_enc_options);
+ reg |= (0x1 << 7);
+ writel(reg, mfc_regs->e_enc_options);
+ /* 0: NV12(CbCr), 1: NV21(CrCb) */
+ writel(0x0, mfc_regs->pixel_format);
+ }
+
+ /* memory structure recon. frame */
+ /* 0: Linear, 1: 2D tiled */
+ reg = readl(mfc_regs->e_enc_options);
+ reg |= (0x1 << 8);
+ writel(reg, mfc_regs->e_enc_options);
+
+ /* padding control & value */
+ writel(0x0, mfc_regs->e_padding_ctrl);
+ if (p->pad) {
+ reg = 0;
+ /** enable */
+ reg |= (1 << 31);
+ /** cr value */
+ reg |= ((p->pad_cr & 0xFF) << 16);
+ /** cb value */
+ reg |= ((p->pad_cb & 0xFF) << 8);
+ /** y value */
+ reg |= p->pad_luma & 0xFF;
+ writel(reg, mfc_regs->e_padding_ctrl);
+ }
+
+ /* rate control config. */
+ reg = 0;
+ /* frame-level rate control */
+ reg |= ((p->rc_frame & 0x1) << 9);
+ writel(reg, mfc_regs->e_rc_config);
+
+ /* bit rate */
+ if (p->rc_frame)
+ writel(p->rc_bitrate,
+ mfc_regs->e_rc_bit_rate);
+ else
+ writel(1, mfc_regs->e_rc_bit_rate);
+
+ /* reaction coefficient */
+ if (p->rc_frame) {
+ if (p->rc_reaction_coeff < TIGHT_CBR_MAX) /* tight CBR */
+ writel(1, mfc_regs->e_rc_mode);
+ else /* loose CBR */
+ writel(2, mfc_regs->e_rc_mode);
+ }
+
+ /* seq header ctrl */
+ reg = readl(mfc_regs->e_enc_options);
+ reg &= ~(0x1 << 2);
+ reg |= ((p->seq_hdr_mode & 0x1) << 2);
+
+ /* frame skip mode */
+ reg &= ~(0x3);
+ reg |= (p->frame_skip_mode & 0x3);
+ writel(reg, mfc_regs->e_enc_options);
+
+ /* 'DROP_CONTROL_ENABLE', disable */
+ reg = readl(mfc_regs->e_rc_config);
+ reg &= ~(0x1 << 10);
+ writel(reg, mfc_regs->e_rc_config);
+
+ /* setting for MV range [16, 256] */
+ reg = (p->mv_h_range & S5P_FIMV_E_MV_RANGE_V6_MASK);
+ writel(reg, mfc_regs->e_mv_hor_range);
+
+ reg = (p->mv_v_range & S5P_FIMV_E_MV_RANGE_V6_MASK);
+ writel(reg, mfc_regs->e_mv_ver_range);
+
+ writel(0x0, mfc_regs->e_frame_insertion);
+ writel(0x0, mfc_regs->e_roi_buffer_addr);
+ writel(0x0, mfc_regs->e_param_change);
+ writel(0x0, mfc_regs->e_rc_roi_ctrl);
+ writel(0x0, mfc_regs->e_picture_tag);
+
+ writel(0x0, mfc_regs->e_bit_count_enable);
+ writel(0x0, mfc_regs->e_max_bit_count);
+ writel(0x0, mfc_regs->e_min_bit_count);
+
+ writel(0x0, mfc_regs->e_metadata_buffer_addr);
+ writel(0x0, mfc_regs->e_metadata_buffer_size);
+
+ mfc_debug_leave();
+
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264;
+ unsigned int reg = 0;
+ int i;
+
+ mfc_debug_enter();
+
+ s5p_mfc_set_enc_params(ctx);
+
+ /* pictype : number of B */
+ reg = readl(mfc_regs->e_gop_config);
+ reg &= ~(0x3 << 16);
+ reg |= ((p->num_b_frame & 0x3) << 16);
+ writel(reg, mfc_regs->e_gop_config);
+
+ /* profile & level */
+ reg = 0;
+ /** level */
+ reg |= ((p_h264->level & 0xFF) << 8);
+ /** profile - 0 ~ 3 */
+ reg |= p_h264->profile & 0x3F;
+ writel(reg, mfc_regs->e_picture_profile);
+
+ /* rate control config. */
+ reg = readl(mfc_regs->e_rc_config);
+ /** macroblock level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= ((p->rc_mb & 0x1) << 8);
+ writel(reg, mfc_regs->e_rc_config);
+
+ /** frame QP */
+ reg &= ~(0x3F);
+ reg |= p_h264->rc_frame_qp & 0x3F;
+ writel(reg, mfc_regs->e_rc_config);
+
+ /* max & min value of QP */
+ reg = 0;
+ /** max QP */
+ reg |= ((p_h264->rc_max_qp & 0x3F) << 8);
+ /** min QP */
+ reg |= p_h264->rc_min_qp & 0x3F;
+ writel(reg, mfc_regs->e_rc_qp_bound);
+
+ /* other QPs */
+ writel(0x0, mfc_regs->e_fixed_picture_qp);
+ if (!p->rc_frame && !p->rc_mb) {
+ reg = 0;
+ reg |= ((p_h264->rc_b_frame_qp & 0x3F) << 16);
+ reg |= ((p_h264->rc_p_frame_qp & 0x3F) << 8);
+ reg |= p_h264->rc_frame_qp & 0x3F;
+ writel(reg, mfc_regs->e_fixed_picture_qp);
+ }
+
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
+ reg = 0;
+ reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
+ reg |= p->rc_framerate_denom & 0xFFFF;
+ writel(reg, mfc_regs->e_rc_frame_rate);
+ }
+
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ writel(p_h264->cpb_size & 0xFFFF,
+ mfc_regs->e_vbv_buffer_size);
+
+ if (p->rc_frame)
+ writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
+ }
+
+ /* interlace */
+ reg = 0;
+ reg |= ((p_h264->interlace & 0x1) << 3);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* height */
+ if (p_h264->interlace) {
+ writel(ctx->img_height >> 1,
+ mfc_regs->e_frame_height); /* 32 align */
+ /* cropped height */
+ writel(ctx->img_height >> 1,
+ mfc_regs->e_cropped_frame_height);
+ }
+
+ /* loop filter ctrl */
+ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x3 << 1);
+ reg |= ((p_h264->loop_filter_mode & 0x3) << 1);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* loopfilter alpha offset */
+ if (p_h264->loop_filter_alpha < 0) {
+ reg = 0x10;
+ reg |= (0xFF - p_h264->loop_filter_alpha) + 1;
+ } else {
+ reg = 0x00;
+ reg |= (p_h264->loop_filter_alpha & 0xF);
+ }
+ writel(reg, mfc_regs->e_h264_lf_alpha_offset);
+
+ /* loopfilter beta offset */
+ if (p_h264->loop_filter_beta < 0) {
+ reg = 0x10;
+ reg |= (0xFF - p_h264->loop_filter_beta) + 1;
+ } else {
+ reg = 0x00;
+ reg |= (p_h264->loop_filter_beta & 0xF);
+ }
+ writel(reg, mfc_regs->e_h264_lf_beta_offset);
+
+ /* entropy coding mode */
+ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1);
+ reg |= p_h264->entropy_mode & 0x1;
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* number of ref. picture */
+ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 7);
+ reg |= (((p_h264->num_ref_pic_4p - 1) & 0x1) << 7);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* 8x8 transform enable */
+ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x3 << 12);
+ reg |= ((p_h264->_8x8_transform & 0x3) << 12);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* macroblock adaptive scaling features */
+ writel(0x0, mfc_regs->e_mb_rc_config);
+ if (p->rc_mb) {
+ reg = 0;
+ /** dark region */
+ reg |= ((p_h264->rc_mb_dark & 0x1) << 3);
+ /** smooth region */
+ reg |= ((p_h264->rc_mb_smooth & 0x1) << 2);
+ /** static region */
+ reg |= ((p_h264->rc_mb_static & 0x1) << 1);
+ /** high activity region */
+ reg |= p_h264->rc_mb_activity & 0x1;
+ writel(reg, mfc_regs->e_mb_rc_config);
+ }
+
+ /* aspect ratio VUI */
+ readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 5);
+ reg |= ((p_h264->vui_sar & 0x1) << 5);
+ writel(reg, mfc_regs->e_h264_options);
+
+ writel(0x0, mfc_regs->e_aspect_ratio);
+ writel(0x0, mfc_regs->e_extended_sar);
+ if (p_h264->vui_sar) {
+ /* aspect ration IDC */
+ reg = 0;
+ reg |= p_h264->vui_sar_idc & 0xFF;
+ writel(reg, mfc_regs->e_aspect_ratio);
+ if (p_h264->vui_sar_idc == 0xFF) {
+ /* extended SAR */
+ reg = 0;
+ reg |= (p_h264->vui_ext_sar_width & 0xFFFF) << 16;
+ reg |= p_h264->vui_ext_sar_height & 0xFFFF;
+ writel(reg, mfc_regs->e_extended_sar);
+ }
+ }
+
+ /* intra picture period for H.264 open GOP */
+ /* control */
+ readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 4);
+ reg |= ((p_h264->open_gop & 0x1) << 4);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* value */
+ writel(0x0, mfc_regs->e_h264_i_period);
+ if (p_h264->open_gop) {
+ reg = 0;
+ reg |= p_h264->open_gop_size & 0xFFFF;
+ writel(reg, mfc_regs->e_h264_i_period);
+ }
+
+ /* 'WEIGHTED_BI_PREDICTION' for B is disable */
+ readl(mfc_regs->e_h264_options);
+ reg &= ~(0x3 << 9);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */
+ readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 14);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* ASO */
+ readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 6);
+ reg |= ((p_h264->aso & 0x1) << 6);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* hier qp enable */
+ readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 8);
+ reg |= ((p_h264->open_gop & 0x1) << 8);
+ writel(reg, mfc_regs->e_h264_options);
+ reg = 0;
+ if (p_h264->hier_qp && p_h264->hier_qp_layer) {
+ reg |= (p_h264->hier_qp_type & 0x1) << 0x3;
+ reg |= p_h264->hier_qp_layer & 0x7;
+ writel(reg, mfc_regs->e_h264_num_t_layer);
+ /* QP value for each layer */
+ for (i = 0; i < p_h264->hier_qp_layer &&
+ i < ARRAY_SIZE(p_h264->hier_qp_layer_qp); i++) {
+ writel(p_h264->hier_qp_layer_qp[i],
+ mfc_regs->e_h264_hierarchical_qp_layer0
+ + i * 4);
+ }
+ }
+ /* number of coding layer should be zero when hierarchical is disable */
+ writel(reg, mfc_regs->e_h264_num_t_layer);
+
+ /* frame packing SEI generation */
+ readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 25);
+ reg |= ((p_h264->sei_frame_packing & 0x1) << 25);
+ writel(reg, mfc_regs->e_h264_options);
+ if (p_h264->sei_frame_packing) {
+ reg = 0;
+ /** current frame0 flag */
+ reg |= ((p_h264->sei_fp_curr_frame_0 & 0x1) << 2);
+ /** arrangement type */
+ reg |= p_h264->sei_fp_arrangement_type & 0x3;
+ writel(reg, mfc_regs->e_h264_frame_packing_sei_info);
+ }
+
+ if (p_h264->fmo) {
+ switch (p_h264->fmo_map_type) {
+ case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES:
+ if (p_h264->fmo_slice_grp > 4)
+ p_h264->fmo_slice_grp = 4;
+ for (i = 0; i < (p_h264->fmo_slice_grp & 0xF); i++)
+ writel(p_h264->fmo_run_len[i] - 1,
+ mfc_regs->e_h264_fmo_run_length_minus1_0
+ + i * 4);
+ break;
+ case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES:
+ if (p_h264->fmo_slice_grp > 4)
+ p_h264->fmo_slice_grp = 4;
+ break;
+ case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN:
+ case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN:
+ if (p_h264->fmo_slice_grp > 2)
+ p_h264->fmo_slice_grp = 2;
+ writel(p_h264->fmo_chg_dir & 0x1,
+ mfc_regs->e_h264_fmo_slice_grp_change_dir);
+ /* the valid range is 0 ~ number of macroblocks -1 */
+ writel(p_h264->fmo_chg_rate,
+ mfc_regs->e_h264_fmo_slice_grp_change_rate_minus1);
+ break;
+ default:
+ mfc_err("Unsupported map type for FMO: %d\n",
+ p_h264->fmo_map_type);
+ p_h264->fmo_map_type = 0;
+ p_h264->fmo_slice_grp = 1;
+ break;
+ }
+
+ writel(p_h264->fmo_map_type,
+ mfc_regs->e_h264_fmo_slice_grp_map_type);
+ writel(p_h264->fmo_slice_grp - 1,
+ mfc_regs->e_h264_fmo_num_slice_grp_minus1);
+ } else {
+ writel(0, mfc_regs->e_h264_fmo_num_slice_grp_minus1);
+ }
+
+ mfc_debug_leave();
+
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
+ unsigned int reg = 0;
+
+ mfc_debug_enter();
+
+ s5p_mfc_set_enc_params(ctx);
+
+ /* pictype : number of B */
+ reg = readl(mfc_regs->e_gop_config);
+ reg &= ~(0x3 << 16);
+ reg |= ((p->num_b_frame & 0x3) << 16);
+ writel(reg, mfc_regs->e_gop_config);
+
+ /* profile & level */
+ reg = 0;
+ /** level */
+ reg |= ((p_mpeg4->level & 0xFF) << 8);
+ /** profile - 0 ~ 1 */
+ reg |= p_mpeg4->profile & 0x3F;
+ writel(reg, mfc_regs->e_picture_profile);
+
+ /* rate control config. */
+ reg = readl(mfc_regs->e_rc_config);
+ /** macroblock level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= ((p->rc_mb & 0x1) << 8);
+ writel(reg, mfc_regs->e_rc_config);
+
+ /** frame QP */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->rc_frame_qp & 0x3F;
+ writel(reg, mfc_regs->e_rc_config);
+
+ /* max & min value of QP */
+ reg = 0;
+ /** max QP */
+ reg |= ((p_mpeg4->rc_max_qp & 0x3F) << 8);
+ /** min QP */
+ reg |= p_mpeg4->rc_min_qp & 0x3F;
+ writel(reg, mfc_regs->e_rc_qp_bound);
+
+ /* other QPs */
+ writel(0x0, mfc_regs->e_fixed_picture_qp);
+ if (!p->rc_frame && !p->rc_mb) {
+ reg = 0;
+ reg |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 16);
+ reg |= ((p_mpeg4->rc_p_frame_qp & 0x3F) << 8);
+ reg |= p_mpeg4->rc_frame_qp & 0x3F;
+ writel(reg, mfc_regs->e_fixed_picture_qp);
+ }
+
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
+ reg = 0;
+ reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
+ reg |= p->rc_framerate_denom & 0xFFFF;
+ writel(reg, mfc_regs->e_rc_frame_rate);
+ }
+
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ writel(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
+
+ if (p->rc_frame)
+ writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
+ }
+
+ /* Disable HEC */
+ writel(0x0, mfc_regs->e_mpeg4_options);
+ writel(0x0, mfc_regs->e_mpeg4_hec_period);
+
+ mfc_debug_leave();
+
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
+ unsigned int reg = 0;
+
+ mfc_debug_enter();
+
+ s5p_mfc_set_enc_params(ctx);
+
+ /* profile & level */
+ reg = 0;
+ /** profile */
+ reg |= (0x1 << 4);
+ writel(reg, mfc_regs->e_picture_profile);
+
+ /* rate control config. */
+ reg = readl(mfc_regs->e_rc_config);
+ /** macroblock level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= ((p->rc_mb & 0x1) << 8);
+ writel(reg, mfc_regs->e_rc_config);
+
+ /** frame QP */
+ reg &= ~(0x3F);
+ reg |= p_h263->rc_frame_qp & 0x3F;
+ writel(reg, mfc_regs->e_rc_config);
+
+ /* max & min value of QP */
+ reg = 0;
+ /** max QP */
+ reg |= ((p_h263->rc_max_qp & 0x3F) << 8);
+ /** min QP */
+ reg |= p_h263->rc_min_qp & 0x3F;
+ writel(reg, mfc_regs->e_rc_qp_bound);
+
+ /* other QPs */
+ writel(0x0, mfc_regs->e_fixed_picture_qp);
+ if (!p->rc_frame && !p->rc_mb) {
+ reg = 0;
+ reg |= ((p_h263->rc_b_frame_qp & 0x3F) << 16);
+ reg |= ((p_h263->rc_p_frame_qp & 0x3F) << 8);
+ reg |= p_h263->rc_frame_qp & 0x3F;
+ writel(reg, mfc_regs->e_fixed_picture_qp);
+ }
+
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
+ reg = 0;
+ reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
+ reg |= p->rc_framerate_denom & 0xFFFF;
+ writel(reg, mfc_regs->e_rc_frame_rate);
+ }
+
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ writel(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
+
+ if (p->rc_frame)
+ writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
+ }
+
+ mfc_debug_leave();
+
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_vp8_enc_params *p_vp8 = &p->codec.vp8;
+ unsigned int reg = 0;
+ unsigned int val = 0;
+
+ mfc_debug_enter();
+
+ s5p_mfc_set_enc_params(ctx);
+
+ /* pictype : number of B */
+ reg = readl(mfc_regs->e_gop_config);
+ reg &= ~(0x3 << 16);
+ reg |= ((p->num_b_frame & 0x3) << 16);
+ writel(reg, mfc_regs->e_gop_config);
+
+ /* profile - 0 ~ 3 */
+ reg = p_vp8->profile & 0x3;
+ writel(reg, mfc_regs->e_picture_profile);
+
+ /* rate control config. */
+ reg = readl(mfc_regs->e_rc_config);
+ /** macroblock level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= ((p->rc_mb & 0x1) << 8);
+ writel(reg, mfc_regs->e_rc_config);
+
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
+ reg = 0;
+ reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
+ reg |= p->rc_framerate_denom & 0xFFFF;
+ writel(reg, mfc_regs->e_rc_frame_rate);
+ }
+
+ /* frame QP */
+ reg &= ~(0x7F);
+ reg |= p_vp8->rc_frame_qp & 0x7F;
+ writel(reg, mfc_regs->e_rc_config);
+
+ /* other QPs */
+ writel(0x0, mfc_regs->e_fixed_picture_qp);
+ if (!p->rc_frame && !p->rc_mb) {
+ reg = 0;
+ reg |= ((p_vp8->rc_p_frame_qp & 0x7F) << 8);
+ reg |= p_vp8->rc_frame_qp & 0x7F;
+ writel(reg, mfc_regs->e_fixed_picture_qp);
+ }
+
+ /* max QP */
+ reg = ((p_vp8->rc_max_qp & 0x7F) << 8);
+ /* min QP */
+ reg |= p_vp8->rc_min_qp & 0x7F;
+ writel(reg, mfc_regs->e_rc_qp_bound);
+
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ writel(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
+
+ if (p->rc_frame)
+ writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
+ }
+
+ /* VP8 specific params */
+ reg = 0;
+ reg |= (p_vp8->imd_4x4 & 0x1) << 10;
+ switch (p_vp8->num_partitions) {
+ case V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION:
+ val = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS:
+ val = 2;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS:
+ val = 4;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS:
+ val = 8;
+ break;
+ }
+ reg |= (val & 0xF) << 3;
+ reg |= (p_vp8->num_ref & 0x2);
+ writel(reg, mfc_regs->e_vp8_options);
+
+ mfc_debug_leave();
+
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_hevc(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_hevc_enc_params *p_hevc = &p->codec.hevc;
+ unsigned int reg = 0;
+ int i;
+
+ mfc_debug_enter();
+
+ s5p_mfc_set_enc_params(ctx);
+
+ /* pictype : number of B */
+ reg = readl(mfc_regs->e_gop_config);
+ /* num_b_frame - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ reg |= (p->num_b_frame << 16);
+ writel(reg, mfc_regs->e_gop_config);
+
+ /* UHD encoding case */
+ if ((ctx->img_width == 3840) && (ctx->img_height == 2160)) {
+ p_hevc->level = 51;
+ p_hevc->tier = 0;
+ /* this tier can be changed */
+ }
+
+ /* tier & level */
+ reg = 0;
+ /* profile */
+ reg |= p_hevc->profile & 0x3;
+ /* level */
+ reg &= ~(0xFF << 8);
+ reg |= (p_hevc->level << 8);
+ /* tier - 0 ~ 1 */
+ reg |= (p_hevc->tier << 16);
+ writel(reg, mfc_regs->e_picture_profile);
+
+ switch (p_hevc->loopfilter) {
+ case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED:
+ p_hevc->loopfilter_disable = 1;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED:
+ p_hevc->loopfilter_disable = 0;
+ p_hevc->loopfilter_across = 1;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
+ p_hevc->loopfilter_disable = 0;
+ p_hevc->loopfilter_across = 0;
+ break;
+ }
+
+ /* max partition depth */
+ reg = 0;
+ reg |= (p_hevc->max_partition_depth & 0x1);
+ reg |= (p_hevc->num_refs_for_p-1) << 2;
+ reg |= (p_hevc->refreshtype & 0x3) << 3;
+ reg |= (p_hevc->const_intra_period_enable & 0x1) << 5;
+ reg |= (p_hevc->lossless_cu_enable & 0x1) << 6;
+ reg |= (p_hevc->wavefront_enable & 0x1) << 7;
+ reg |= (p_hevc->loopfilter_disable & 0x1) << 8;
+ reg |= (p_hevc->loopfilter_across & 0x1) << 9;
+ reg |= (p_hevc->enable_ltr & 0x1) << 10;
+ reg |= (p_hevc->hier_qp_enable & 0x1) << 11;
+ reg |= (p_hevc->general_pb_enable & 0x1) << 13;
+ reg |= (p_hevc->temporal_id_enable & 0x1) << 14;
+ reg |= (p_hevc->strong_intra_smooth & 0x1) << 15;
+ reg |= (p_hevc->intra_pu_split_disable & 0x1) << 16;
+ reg |= (p_hevc->tmv_prediction_disable & 0x1) << 17;
+ reg |= (p_hevc->max_num_merge_mv & 0x7) << 18;
+ reg |= (p_hevc->encoding_nostartcode_enable & 0x1) << 23;
+ reg |= (p_hevc->prepend_sps_pps_to_idr << 26);
+
+ writel(reg, mfc_regs->e_hevc_options);
+ /* refresh period */
+ if (p_hevc->refreshtype) {
+ reg = 0;
+ reg |= (p_hevc->refreshperiod & 0xFFFF);
+ writel(reg, mfc_regs->e_hevc_refresh_period);
+ }
+ /* loop filter setting */
+ if (!(p_hevc->loopfilter_disable & 0x1)) {
+ reg = 0;
+ reg |= (p_hevc->lf_beta_offset_div2);
+ writel(reg, mfc_regs->e_hevc_lf_beta_offset_div2);
+ reg = 0;
+ reg |= (p_hevc->lf_tc_offset_div2);
+ writel(reg, mfc_regs->e_hevc_lf_tc_offset_div2);
+ }
+ /* hier qp enable */
+ if (p_hevc->num_hier_layer) {
+ reg = 0;
+ reg |= (p_hevc->hier_qp_type & 0x1) << 0x3;
+ reg |= p_hevc->num_hier_layer & 0x7;
+ writel(reg, mfc_regs->e_num_t_layer);
+ /* QP value for each layer */
+ if (p_hevc->hier_qp_enable) {
+ for (i = 0; i < 7; i++)
+ writel(p_hevc->hier_qp_layer[i],
+ mfc_regs->e_hier_qp_layer0 + i * 4);
+ }
+ if (p->rc_frame) {
+ for (i = 0; i < 7; i++)
+ writel(p_hevc->hier_bit_layer[i],
+ mfc_regs->e_hier_bit_rate_layer0
+ + i * 4);
+ }
+ }
+
+ /* rate control config. */
+ reg = readl(mfc_regs->e_rc_config);
+ /* macroblock level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= (p->rc_mb << 8);
+ writel(reg, mfc_regs->e_rc_config);
+ /* frame QP */
+ reg &= ~(0xFF);
+ reg |= p_hevc->rc_frame_qp;
+ writel(reg, mfc_regs->e_rc_config);
+
+ /* frame rate */
+ if (p->rc_frame) {
+ reg = 0;
+ reg &= ~(0xFFFF << 16);
+ reg |= ((p_hevc->rc_framerate) << 16);
+ reg &= ~(0xFFFF);
+ reg |= FRAME_DELTA_DEFAULT;
+ writel(reg, mfc_regs->e_rc_frame_rate);
+ }
+
+ /* max & min value of QP */
+ reg = 0;
+ /* max QP */
+ reg &= ~(0xFF << 8);
+ reg |= (p_hevc->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0xFF);
+ reg |= p_hevc->rc_min_qp;
+ writel(reg, mfc_regs->e_rc_qp_bound);
+
+ writel(0x0, mfc_regs->e_fixed_picture_qp);
+ if (!p->rc_frame && !p->rc_mb) {
+ reg = 0;
+ reg &= ~(0xFF << 16);
+ reg |= (p_hevc->rc_b_frame_qp << 16);
+ reg &= ~(0xFF << 8);
+ reg |= (p_hevc->rc_p_frame_qp << 8);
+ reg &= ~(0xFF);
+ reg |= p_hevc->rc_frame_qp;
+ writel(reg, mfc_regs->e_fixed_picture_qp);
+ }
+ mfc_debug_leave();
+
+ return 0;
+}
+
+/* Initialize decoding */
+static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ unsigned int reg = 0;
+ int fmo_aso_ctrl = 0;
+
+ mfc_debug_enter();
+ mfc_debug(2, "InstNo: %d/%d\n", ctx->inst_no,
+ S5P_FIMV_CH_SEQ_HEADER_V6);
+ mfc_debug(2, "BUFs: %08x %08x %08x\n",
+ readl(mfc_regs->d_cpb_buffer_addr),
+ readl(mfc_regs->d_cpb_buffer_addr),
+ readl(mfc_regs->d_cpb_buffer_addr));
+
+ /* FMO_ASO_CTRL - 0: Enable, 1: Disable */
+ reg |= (fmo_aso_ctrl << S5P_FIMV_D_OPT_FMO_ASO_CTRL_MASK_V6);
+
+ if (ctx->display_delay_enable) {
+ reg |= (0x1 << S5P_FIMV_D_OPT_DDELAY_EN_SHIFT_V6);
+ writel(ctx->display_delay, mfc_regs->d_display_delay);
+ }
+
+ if (IS_MFCV7_PLUS(dev) || IS_MFCV6_V2(dev)) {
+ writel(reg, mfc_regs->d_dec_options);
+ reg = 0;
+ }
+
+ /* Setup loop filter, for decoding this is only valid for MPEG4 */
+ if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC) {
+ mfc_debug(2, "Set loop filter to: %d\n",
+ ctx->loop_filter_mpeg4);
+ reg |= (ctx->loop_filter_mpeg4 <<
+ S5P_FIMV_D_OPT_LF_CTRL_SHIFT_V6);
+ }
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)
+ reg |= (0x1 << S5P_FIMV_D_OPT_TILE_MODE_SHIFT_V6);
+
+ if (IS_MFCV7_PLUS(dev) || IS_MFCV6_V2(dev))
+ writel(reg, mfc_regs->d_init_buffer_options);
+ else
+ writel(reg, mfc_regs->d_dec_options);
+
+ /* 0: NV12(CbCr), 1: NV21(CrCb) */
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV21M)
+ writel(0x1, mfc_regs->pixel_format);
+ else
+ writel(0x0, mfc_regs->pixel_format);
+
+
+ /* sei parse */
+ writel(ctx->sei_fp_parse & 0x1, mfc_regs->d_sei_enable);
+
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ S5P_FIMV_CH_SEQ_HEADER_V6, NULL);
+
+ mfc_debug_leave();
+ return 0;
+}
+
+static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+
+ if (flush) {
+ dev->curr_ctx = ctx->num;
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ S5P_FIMV_H2R_CMD_FLUSH_V6, NULL);
+ }
+}
+
+/* Decode a single frame */
+static int s5p_mfc_decode_one_frame_v6(struct s5p_mfc_ctx *ctx,
+ enum s5p_mfc_decode_arg last_frame)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+
+ writel(ctx->dec_dst_flag, mfc_regs->d_available_dpb_flag_lower);
+ writel(ctx->slice_interface & 0x1, mfc_regs->d_slice_if_enable);
+
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ /* Issue different commands to instance basing on whether it
+ * is the last frame or not. */
+ switch (last_frame) {
+ case 0:
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ S5P_FIMV_CH_FRAME_START_V6, NULL);
+ break;
+ case 1:
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ S5P_FIMV_CH_LAST_FRAME_V6, NULL);
+ break;
+ default:
+ mfc_err("Unsupported last frame arg.\n");
+ return -EINVAL;
+ }
+
+ mfc_debug(2, "Decoding a usual frame.\n");
+ return 0;
+}
+
+static int s5p_mfc_init_encode_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
+ s5p_mfc_set_enc_params_h264(ctx);
+ else if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_ENC)
+ s5p_mfc_set_enc_params_mpeg4(ctx);
+ else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC)
+ s5p_mfc_set_enc_params_h263(ctx);
+ else if (ctx->codec_mode == S5P_MFC_CODEC_VP8_ENC)
+ s5p_mfc_set_enc_params_vp8(ctx);
+ else if (ctx->codec_mode == S5P_FIMV_CODEC_HEVC_ENC)
+ s5p_mfc_set_enc_params_hevc(ctx);
+ else {
+ mfc_err("Unknown codec for encoding (%x).\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ }
+
+ /* Set stride lengths for v7 & above */
+ if (IS_MFCV7_PLUS(dev)) {
+ writel(ctx->img_width, mfc_regs->e_source_first_plane_stride);
+ writel(ctx->img_width, mfc_regs->e_source_second_plane_stride);
+ }
+
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ S5P_FIMV_CH_SEQ_HEADER_V6, NULL);
+
+ return 0;
+}
+
+static int s5p_mfc_h264_set_aso_slice_order_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264;
+ int i;
+
+ if (p_h264->aso) {
+ for (i = 0; i < ARRAY_SIZE(p_h264->aso_slice_order); i++) {
+ writel(p_h264->aso_slice_order[i],
+ mfc_regs->e_h264_aso_slice_order_0 + i * 4);
+ }
+ }
+ return 0;
+}
+
+/* Encode a single frame */
+static int s5p_mfc_encode_one_frame_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ int cmd;
+
+ mfc_debug(2, "++\n");
+
+ /* memory structure cur. frame */
+
+ if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
+ s5p_mfc_h264_set_aso_slice_order_v6(ctx);
+
+ s5p_mfc_set_slice_mode(ctx);
+
+ if (ctx->state != MFCINST_FINISHING)
+ cmd = S5P_FIMV_CH_FRAME_START_V6;
+ else
+ cmd = S5P_FIMV_CH_LAST_FRAME_V6;
+
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, cmd, NULL);
+
+ mfc_debug(2, "--\n");
+
+ return 0;
+}
+
+static inline void s5p_mfc_run_dec_last_frames(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ s5p_mfc_set_dec_stream_buffer_v6(ctx, 0, 0, 0);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_decode_one_frame_v6(ctx, MFC_DEC_LAST_FRAME);
+}
+
+static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *temp_vb;
+ int last_frame = 0;
+
+ if (ctx->state == MFCINST_FINISHING) {
+ last_frame = MFC_DEC_LAST_FRAME;
+ s5p_mfc_set_dec_stream_buffer_v6(ctx, 0, 0, 0);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_decode_one_frame_v6(ctx, last_frame);
+ return 0;
+ }
+
+ /* Frames are being decoded */
+ if (list_empty(&ctx->src_queue)) {
+ mfc_debug(2, "No src buffers.\n");
+ return -EAGAIN;
+ }
+ /* Get the next source buffer */
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ temp_vb->flags |= MFC_BUF_FLAG_USED;
+ s5p_mfc_set_dec_stream_buffer_v6(ctx,
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+ ctx->consumed_stream,
+ temp_vb->b->vb2_buf.planes[0].bytesused);
+
+ dev->curr_ctx = ctx->num;
+ if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) {
+ last_frame = 1;
+ mfc_debug(2, "Setting ctx->state to FINISHING\n");
+ ctx->state = MFCINST_FINISHING;
+ }
+ s5p_mfc_decode_one_frame_v6(ctx, last_frame);
+
+ return 0;
+}
+
+static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ struct s5p_mfc_buf *src_mb;
+ unsigned long src_y_addr, src_c_addr, dst_addr;
+ /*
+ unsigned int src_y_size, src_c_size;
+ */
+ unsigned int dst_size;
+
+ if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) {
+ mfc_debug(2, "no src buffers.\n");
+ return -EAGAIN;
+ }
+
+ if (list_empty(&ctx->dst_queue)) {
+ mfc_debug(2, "no dst buffers.\n");
+ return -EAGAIN;
+ }
+
+ if (list_empty(&ctx->src_queue)) {
+ /* send null frame */
+ s5p_mfc_set_enc_frame_buffer_v6(ctx, 0, 0);
+ src_mb = NULL;
+ } else {
+ src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ src_mb->flags |= MFC_BUF_FLAG_USED;
+ if (src_mb->b->vb2_buf.planes[0].bytesused == 0) {
+ s5p_mfc_set_enc_frame_buffer_v6(ctx, 0, 0);
+ ctx->state = MFCINST_FINISHING;
+ } else {
+ src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
+
+ mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
+ mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
+
+ s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
+ if (src_mb->flags & MFC_BUF_FLAG_EOS)
+ ctx->state = MFCINST_FINISHING;
+ }
+ }
+
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_mb->flags |= MFC_BUF_FLAG_USED;
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
+
+ s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
+
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_encode_one_frame_v6(ctx);
+
+ return 0;
+}
+
+static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *temp_vb;
+
+ /* Initializing decoding - parsing header */
+ mfc_debug(2, "Preparing to init decoding.\n");
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ mfc_debug(2, "Header size: %d\n", temp_vb->b->vb2_buf.planes[0].bytesused);
+ s5p_mfc_set_dec_stream_buffer_v6(ctx,
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), 0,
+ temp_vb->b->vb2_buf.planes[0].bytesused);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_init_decode_v6(ctx);
+}
+
+static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned long dst_addr;
+ unsigned int dst_size;
+
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
+ s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_init_encode_v6(ctx);
+}
+
+static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ int ret;
+ /* Header was parsed now start processing
+ * First set the output frame buffers
+ * s5p_mfc_alloc_dec_buffers(ctx); */
+
+ if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
+ mfc_err("It seems that not all destination buffers were\n"
+ "mmaped.MFC requires that all destination are mmaped\n"
+ "before starting processing.\n");
+ return -EAGAIN;
+ }
+
+ dev->curr_ctx = ctx->num;
+ ret = s5p_mfc_set_dec_frame_buffer_v6(ctx);
+ if (ret) {
+ mfc_err("Failed to alloc frame mem.\n");
+ ctx->state = MFCINST_ERROR;
+ }
+ return ret;
+}
+
+static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ int ret;
+
+ dev->curr_ctx = ctx->num;
+ ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
+ if (ret) {
+ mfc_err("Failed to alloc frame mem.\n");
+ ctx->state = MFCINST_ERROR;
+ }
+ return ret;
+}
+
+/* Try running an operation on hardware */
+static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_ctx *ctx;
+ int new_ctx;
+ unsigned int ret = 0;
+
+ mfc_debug(1, "Try run dev: %p\n", dev);
+
+ /* Check whether hardware is not running */
+ if (test_and_set_bit(0, &dev->hw_lock) != 0) {
+ /* This is perfectly ok, the scheduled ctx should wait */
+ mfc_debug(1, "Couldn't lock HW.\n");
+ return;
+ }
+
+ /* Choose the context to run */
+ new_ctx = s5p_mfc_get_new_ctx(dev);
+ if (new_ctx < 0) {
+ /* No contexts to run */
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
+ mfc_err("Failed to unlock hardware.\n");
+ return;
+ }
+
+ mfc_debug(1, "No ctx is scheduled to be run.\n");
+ return;
+ }
+
+ mfc_debug(1, "New context: %d\n", new_ctx);
+ ctx = dev->ctx[new_ctx];
+ mfc_debug(1, "Setting new context to %p\n", ctx);
+ /* Got context to run in ctx */
+ mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n",
+ ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt);
+ mfc_debug(1, "ctx->state=%d\n", ctx->state);
+ /* Last frame has already been sent to MFC
+ * Now obtaining frames from MFC buffer */
+
+ s5p_mfc_clock_on();
+ s5p_mfc_clean_ctx_int_flags(ctx);
+
+ if (ctx->type == MFCINST_DECODER) {
+ switch (ctx->state) {
+ case MFCINST_FINISHING:
+ s5p_mfc_run_dec_last_frames(ctx);
+ break;
+ case MFCINST_RUNNING:
+ ret = s5p_mfc_run_dec_frame(ctx);
+ break;
+ case MFCINST_INIT:
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
+ ctx);
+ break;
+ case MFCINST_RETURN_INST:
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
+ ctx);
+ break;
+ case MFCINST_GOT_INST:
+ s5p_mfc_run_init_dec(ctx);
+ break;
+ case MFCINST_HEAD_PARSED:
+ ret = s5p_mfc_run_init_dec_buffers(ctx);
+ break;
+ case MFCINST_FLUSH:
+ s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
+ break;
+ case MFCINST_RES_CHANGE_INIT:
+ s5p_mfc_run_dec_last_frames(ctx);
+ break;
+ case MFCINST_RES_CHANGE_FLUSH:
+ s5p_mfc_run_dec_last_frames(ctx);
+ break;
+ case MFCINST_RES_CHANGE_END:
+ mfc_debug(2, "Finished remaining frames after resolution change.\n");
+ ctx->capture_state = QUEUE_FREE;
+ mfc_debug(2, "Will re-init the codec`.\n");
+ s5p_mfc_run_init_dec(ctx);
+ break;
+ default:
+ ret = -EAGAIN;
+ }
+ } else if (ctx->type == MFCINST_ENCODER) {
+ switch (ctx->state) {
+ case MFCINST_FINISHING:
+ case MFCINST_RUNNING:
+ ret = s5p_mfc_run_enc_frame(ctx);
+ break;
+ case MFCINST_INIT:
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
+ ctx);
+ break;
+ case MFCINST_RETURN_INST:
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
+ ctx);
+ break;
+ case MFCINST_GOT_INST:
+ s5p_mfc_run_init_enc(ctx);
+ break;
+ case MFCINST_HEAD_PRODUCED:
+ ret = s5p_mfc_run_init_enc_buffers(ctx);
+ break;
+ default:
+ ret = -EAGAIN;
+ }
+ } else {
+ mfc_err("invalid context type: %d\n", ctx->type);
+ ret = -EAGAIN;
+ }
+
+ if (ret) {
+ /* Free hardware lock */
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ mfc_err("Failed to unlock hardware.\n");
+
+ /* This is in deed imporant, as no operation has been
+ * scheduled, reduce the clock count as no one will
+ * ever do this, because no interrupt related to this try_run
+ * will ever come from hardware. */
+ s5p_mfc_clock_off();
+ }
+}
+
+static void s5p_mfc_clear_int_flags_v6(struct s5p_mfc_dev *dev)
+{
+ const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ writel(0, mfc_regs->risc2host_command);
+ writel(0, mfc_regs->risc2host_int);
+}
+
+static unsigned int
+s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned long ofs)
+{
+ int ret;
+
+ s5p_mfc_clock_on();
+ ret = readl((void __iomem *)ofs);
+ s5p_mfc_clock_off();
+
+ return ret;
+}
+
+static int s5p_mfc_get_dspl_y_adr_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_display_first_plane_addr);
+}
+
+static int s5p_mfc_get_dec_y_adr_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_decoded_first_plane_addr);
+}
+
+static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_display_status);
+}
+
+static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_decoded_status);
+}
+
+static int s5p_mfc_get_dec_frame_type_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_decoded_frame_type) &
+ S5P_FIMV_DECODE_FRAME_MASK_V6;
+}
+
+static int s5p_mfc_get_disp_frame_type_v6(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ return readl(dev->mfc_regs->d_display_frame_type) &
+ S5P_FIMV_DECODE_FRAME_MASK_V6;
+}
+
+static int s5p_mfc_get_consumed_stream_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_decoded_nal_size);
+}
+
+static int s5p_mfc_get_int_reason_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->risc2host_command) &
+ S5P_FIMV_RISC2HOST_CMD_MASK;
+}
+
+static int s5p_mfc_get_int_err_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->error_code);
+}
+
+static int s5p_mfc_err_dec_v6(unsigned int err)
+{
+ return (err & S5P_FIMV_ERR_DEC_MASK_V6) >> S5P_FIMV_ERR_DEC_SHIFT_V6;
+}
+
+static int s5p_mfc_get_img_width_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_display_frame_width);
+}
+
+static int s5p_mfc_get_img_height_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_display_frame_height);
+}
+
+static int s5p_mfc_get_dpb_count_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_min_num_dpb);
+}
+
+static int s5p_mfc_get_mv_count_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_min_num_mv);
+}
+
+static int s5p_mfc_get_min_scratch_buf_size(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->d_min_scratch_buffer_size);
+}
+
+static int s5p_mfc_get_e_min_scratch_buf_size(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->e_min_scratch_buffer_size);
+}
+
+static int s5p_mfc_get_inst_no_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->ret_instance_id);
+}
+
+static int s5p_mfc_get_enc_dpb_count_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->e_num_dpb);
+}
+
+static int s5p_mfc_get_enc_strm_size_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->e_stream_size);
+}
+
+static int s5p_mfc_get_enc_slice_type_v6(struct s5p_mfc_dev *dev)
+{
+ return readl(dev->mfc_regs->e_slice_type);
+}
+
+static unsigned int s5p_mfc_get_pic_type_top_v6(struct s5p_mfc_ctx *ctx)
+{
+ return s5p_mfc_read_info_v6(ctx,
+ (__force unsigned long) ctx->dev->mfc_regs->d_ret_picture_tag_top);
+}
+
+static unsigned int s5p_mfc_get_pic_type_bot_v6(struct s5p_mfc_ctx *ctx)
+{
+ return s5p_mfc_read_info_v6(ctx,
+ (__force unsigned long) ctx->dev->mfc_regs->d_ret_picture_tag_bot);
+}
+
+static unsigned int s5p_mfc_get_crop_info_h_v6(struct s5p_mfc_ctx *ctx)
+{
+ return s5p_mfc_read_info_v6(ctx,
+ (__force unsigned long) ctx->dev->mfc_regs->d_display_crop_info1);
+}
+
+static unsigned int s5p_mfc_get_crop_info_v_v6(struct s5p_mfc_ctx *ctx)
+{
+ return s5p_mfc_read_info_v6(ctx,
+ (__force unsigned long) ctx->dev->mfc_regs->d_display_crop_info2);
+}
+
+static struct s5p_mfc_regs mfc_regs;
+
+/* Initialize registers for MFC v6 onwards */
+const struct s5p_mfc_regs *s5p_mfc_init_regs_v6_plus(struct s5p_mfc_dev *dev)
+{
+ memset(&mfc_regs, 0, sizeof(mfc_regs));
+
+#define S5P_MFC_REG_ADDR(dev, reg) ((dev)->regs_base + (reg))
+#define R(m, r) mfc_regs.m = S5P_MFC_REG_ADDR(dev, r)
+ /* codec common registers */
+ R(risc_on, S5P_FIMV_RISC_ON_V6);
+ R(risc2host_int, S5P_FIMV_RISC2HOST_INT_V6);
+ R(host2risc_int, S5P_FIMV_HOST2RISC_INT_V6);
+ R(risc_base_address, S5P_FIMV_RISC_BASE_ADDRESS_V6);
+ R(mfc_reset, S5P_FIMV_MFC_RESET_V6);
+ R(host2risc_command, S5P_FIMV_HOST2RISC_CMD_V6);
+ R(risc2host_command, S5P_FIMV_RISC2HOST_CMD_V6);
+ R(firmware_version, S5P_FIMV_FW_VERSION_V6);
+ R(instance_id, S5P_FIMV_INSTANCE_ID_V6);
+ R(codec_type, S5P_FIMV_CODEC_TYPE_V6);
+ R(context_mem_addr, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
+ R(context_mem_size, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
+ R(pixel_format, S5P_FIMV_PIXEL_FORMAT_V6);
+ R(ret_instance_id, S5P_FIMV_RET_INSTANCE_ID_V6);
+ R(error_code, S5P_FIMV_ERROR_CODE_V6);
+
+ /* decoder registers */
+ R(d_crc_ctrl, S5P_FIMV_D_CRC_CTRL_V6);
+ R(d_dec_options, S5P_FIMV_D_DEC_OPTIONS_V6);
+ R(d_display_delay, S5P_FIMV_D_DISPLAY_DELAY_V6);
+ R(d_sei_enable, S5P_FIMV_D_SEI_ENABLE_V6);
+ R(d_min_num_dpb, S5P_FIMV_D_MIN_NUM_DPB_V6);
+ R(d_min_num_mv, S5P_FIMV_D_MIN_NUM_MV_V6);
+ R(d_mvc_num_views, S5P_FIMV_D_MVC_NUM_VIEWS_V6);
+ R(d_num_dpb, S5P_FIMV_D_NUM_DPB_V6);
+ R(d_num_mv, S5P_FIMV_D_NUM_MV_V6);
+ R(d_init_buffer_options, S5P_FIMV_D_INIT_BUFFER_OPTIONS_V6);
+ R(d_first_plane_dpb_size, S5P_FIMV_D_LUMA_DPB_SIZE_V6);
+ R(d_second_plane_dpb_size, S5P_FIMV_D_CHROMA_DPB_SIZE_V6);
+ R(d_mv_buffer_size, S5P_FIMV_D_MV_BUFFER_SIZE_V6);
+ R(d_first_plane_dpb, S5P_FIMV_D_LUMA_DPB_V6);
+ R(d_second_plane_dpb, S5P_FIMV_D_CHROMA_DPB_V6);
+ R(d_mv_buffer, S5P_FIMV_D_MV_BUFFER_V6);
+ R(d_scratch_buffer_addr, S5P_FIMV_D_SCRATCH_BUFFER_ADDR_V6);
+ R(d_scratch_buffer_size, S5P_FIMV_D_SCRATCH_BUFFER_SIZE_V6);
+ R(d_cpb_buffer_addr, S5P_FIMV_D_CPB_BUFFER_ADDR_V6);
+ R(d_cpb_buffer_size, S5P_FIMV_D_CPB_BUFFER_SIZE_V6);
+ R(d_available_dpb_flag_lower, S5P_FIMV_D_AVAILABLE_DPB_FLAG_LOWER_V6);
+ R(d_cpb_buffer_offset, S5P_FIMV_D_CPB_BUFFER_OFFSET_V6);
+ R(d_slice_if_enable, S5P_FIMV_D_SLICE_IF_ENABLE_V6);
+ R(d_stream_data_size, S5P_FIMV_D_STREAM_DATA_SIZE_V6);
+ R(d_display_frame_width, S5P_FIMV_D_DISPLAY_FRAME_WIDTH_V6);
+ R(d_display_frame_height, S5P_FIMV_D_DISPLAY_FRAME_HEIGHT_V6);
+ R(d_display_status, S5P_FIMV_D_DISPLAY_STATUS_V6);
+ R(d_display_first_plane_addr, S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6);
+ R(d_display_second_plane_addr, S5P_FIMV_D_DISPLAY_CHROMA_ADDR_V6);
+ R(d_display_frame_type, S5P_FIMV_D_DISPLAY_FRAME_TYPE_V6);
+ R(d_display_crop_info1, S5P_FIMV_D_DISPLAY_CROP_INFO1_V6);
+ R(d_display_crop_info2, S5P_FIMV_D_DISPLAY_CROP_INFO2_V6);
+ R(d_display_aspect_ratio, S5P_FIMV_D_DISPLAY_ASPECT_RATIO_V6);
+ R(d_display_extended_ar, S5P_FIMV_D_DISPLAY_EXTENDED_AR_V6);
+ R(d_decoded_status, S5P_FIMV_D_DECODED_STATUS_V6);
+ R(d_decoded_first_plane_addr, S5P_FIMV_D_DECODED_LUMA_ADDR_V6);
+ R(d_decoded_second_plane_addr, S5P_FIMV_D_DECODED_CHROMA_ADDR_V6);
+ R(d_decoded_frame_type, S5P_FIMV_D_DECODED_FRAME_TYPE_V6);
+ R(d_decoded_nal_size, S5P_FIMV_D_DECODED_NAL_SIZE_V6);
+ R(d_ret_picture_tag_top, S5P_FIMV_D_RET_PICTURE_TAG_TOP_V6);
+ R(d_ret_picture_tag_bot, S5P_FIMV_D_RET_PICTURE_TAG_BOT_V6);
+ R(d_h264_info, S5P_FIMV_D_H264_INFO_V6);
+ R(d_mvc_view_id, S5P_FIMV_D_MVC_VIEW_ID_V6);
+ R(d_frame_pack_sei_avail, S5P_FIMV_D_FRAME_PACK_SEI_AVAIL_V6);
+
+ /* encoder registers */
+ R(e_frame_width, S5P_FIMV_E_FRAME_WIDTH_V6);
+ R(e_frame_height, S5P_FIMV_E_FRAME_HEIGHT_V6);
+ R(e_cropped_frame_width, S5P_FIMV_E_CROPPED_FRAME_WIDTH_V6);
+ R(e_cropped_frame_height, S5P_FIMV_E_CROPPED_FRAME_HEIGHT_V6);
+ R(e_frame_crop_offset, S5P_FIMV_E_FRAME_CROP_OFFSET_V6);
+ R(e_enc_options, S5P_FIMV_E_ENC_OPTIONS_V6);
+ R(e_picture_profile, S5P_FIMV_E_PICTURE_PROFILE_V6);
+ R(e_vbv_buffer_size, S5P_FIMV_E_VBV_BUFFER_SIZE_V6);
+ R(e_vbv_init_delay, S5P_FIMV_E_VBV_INIT_DELAY_V6);
+ R(e_fixed_picture_qp, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
+ R(e_rc_config, S5P_FIMV_E_RC_CONFIG_V6);
+ R(e_rc_qp_bound, S5P_FIMV_E_RC_QP_BOUND_V6);
+ R(e_rc_mode, S5P_FIMV_E_RC_RPARAM_V6);
+ R(e_mb_rc_config, S5P_FIMV_E_MB_RC_CONFIG_V6);
+ R(e_padding_ctrl, S5P_FIMV_E_PADDING_CTRL_V6);
+ R(e_mv_hor_range, S5P_FIMV_E_MV_HOR_RANGE_V6);
+ R(e_mv_ver_range, S5P_FIMV_E_MV_VER_RANGE_V6);
+ R(e_num_dpb, S5P_FIMV_E_NUM_DPB_V6);
+ R(e_luma_dpb, S5P_FIMV_E_LUMA_DPB_V6);
+ R(e_chroma_dpb, S5P_FIMV_E_CHROMA_DPB_V6);
+ R(e_me_buffer, S5P_FIMV_E_ME_BUFFER_V6);
+ R(e_scratch_buffer_addr, S5P_FIMV_E_SCRATCH_BUFFER_ADDR_V6);
+ R(e_scratch_buffer_size, S5P_FIMV_E_SCRATCH_BUFFER_SIZE_V6);
+ R(e_tmv_buffer0, S5P_FIMV_E_TMV_BUFFER0_V6);
+ R(e_tmv_buffer1, S5P_FIMV_E_TMV_BUFFER1_V6);
+ R(e_source_first_plane_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6);
+ R(e_source_second_plane_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6);
+ R(e_stream_buffer_addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6);
+ R(e_stream_buffer_size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6);
+ R(e_roi_buffer_addr, S5P_FIMV_E_ROI_BUFFER_ADDR_V6);
+ R(e_param_change, S5P_FIMV_E_PARAM_CHANGE_V6);
+ R(e_ir_size, S5P_FIMV_E_IR_SIZE_V6);
+ R(e_gop_config, S5P_FIMV_E_GOP_CONFIG_V6);
+ R(e_mslice_mode, S5P_FIMV_E_MSLICE_MODE_V6);
+ R(e_mslice_size_mb, S5P_FIMV_E_MSLICE_SIZE_MB_V6);
+ R(e_mslice_size_bits, S5P_FIMV_E_MSLICE_SIZE_BITS_V6);
+ R(e_frame_insertion, S5P_FIMV_E_FRAME_INSERTION_V6);
+ R(e_rc_frame_rate, S5P_FIMV_E_RC_FRAME_RATE_V6);
+ R(e_rc_bit_rate, S5P_FIMV_E_RC_BIT_RATE_V6);
+ R(e_rc_roi_ctrl, S5P_FIMV_E_RC_ROI_CTRL_V6);
+ R(e_picture_tag, S5P_FIMV_E_PICTURE_TAG_V6);
+ R(e_bit_count_enable, S5P_FIMV_E_BIT_COUNT_ENABLE_V6);
+ R(e_max_bit_count, S5P_FIMV_E_MAX_BIT_COUNT_V6);
+ R(e_min_bit_count, S5P_FIMV_E_MIN_BIT_COUNT_V6);
+ R(e_metadata_buffer_addr, S5P_FIMV_E_METADATA_BUFFER_ADDR_V6);
+ R(e_metadata_buffer_size, S5P_FIMV_E_METADATA_BUFFER_SIZE_V6);
+ R(e_encoded_source_first_plane_addr,
+ S5P_FIMV_E_ENCODED_SOURCE_LUMA_ADDR_V6);
+ R(e_encoded_source_second_plane_addr,
+ S5P_FIMV_E_ENCODED_SOURCE_CHROMA_ADDR_V6);
+ R(e_stream_size, S5P_FIMV_E_STREAM_SIZE_V6);
+ R(e_slice_type, S5P_FIMV_E_SLICE_TYPE_V6);
+ R(e_picture_count, S5P_FIMV_E_PICTURE_COUNT_V6);
+ R(e_ret_picture_tag, S5P_FIMV_E_RET_PICTURE_TAG_V6);
+ R(e_recon_luma_dpb_addr, S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6);
+ R(e_recon_chroma_dpb_addr, S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6);
+ R(e_mpeg4_options, S5P_FIMV_E_MPEG4_OPTIONS_V6);
+ R(e_mpeg4_hec_period, S5P_FIMV_E_MPEG4_HEC_PERIOD_V6);
+ R(e_aspect_ratio, S5P_FIMV_E_ASPECT_RATIO_V6);
+ R(e_extended_sar, S5P_FIMV_E_EXTENDED_SAR_V6);
+ R(e_h264_options, S5P_FIMV_E_H264_OPTIONS_V6);
+ R(e_h264_lf_alpha_offset, S5P_FIMV_E_H264_LF_ALPHA_OFFSET_V6);
+ R(e_h264_lf_beta_offset, S5P_FIMV_E_H264_LF_BETA_OFFSET_V6);
+ R(e_h264_i_period, S5P_FIMV_E_H264_I_PERIOD_V6);
+ R(e_h264_fmo_slice_grp_map_type,
+ S5P_FIMV_E_H264_FMO_SLICE_GRP_MAP_TYPE_V6);
+ R(e_h264_fmo_num_slice_grp_minus1,
+ S5P_FIMV_E_H264_FMO_NUM_SLICE_GRP_MINUS1_V6);
+ R(e_h264_fmo_slice_grp_change_dir,
+ S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_DIR_V6);
+ R(e_h264_fmo_slice_grp_change_rate_minus1,
+ S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_RATE_MINUS1_V6);
+ R(e_h264_fmo_run_length_minus1_0,
+ S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_0_V6);
+ R(e_h264_aso_slice_order_0, S5P_FIMV_E_H264_ASO_SLICE_ORDER_0_V6);
+ R(e_h264_num_t_layer, S5P_FIMV_E_H264_NUM_T_LAYER_V6);
+ R(e_h264_hierarchical_qp_layer0,
+ S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER0_V6);
+ R(e_h264_frame_packing_sei_info,
+ S5P_FIMV_E_H264_FRAME_PACKING_SEI_INFO_V6);
+
+ if (!IS_MFCV7_PLUS(dev))
+ goto done;
+
+ /* Initialize registers used in MFC v7+ */
+ R(e_source_first_plane_addr, S5P_FIMV_E_SOURCE_FIRST_ADDR_V7);
+ R(e_source_second_plane_addr, S5P_FIMV_E_SOURCE_SECOND_ADDR_V7);
+ R(e_source_third_plane_addr, S5P_FIMV_E_SOURCE_THIRD_ADDR_V7);
+ R(e_source_first_plane_stride, S5P_FIMV_E_SOURCE_FIRST_STRIDE_V7);
+ R(e_source_second_plane_stride, S5P_FIMV_E_SOURCE_SECOND_STRIDE_V7);
+ R(e_source_third_plane_stride, S5P_FIMV_E_SOURCE_THIRD_STRIDE_V7);
+ R(e_encoded_source_first_plane_addr,
+ S5P_FIMV_E_ENCODED_SOURCE_FIRST_ADDR_V7);
+ R(e_encoded_source_second_plane_addr,
+ S5P_FIMV_E_ENCODED_SOURCE_SECOND_ADDR_V7);
+ R(e_vp8_options, S5P_FIMV_E_VP8_OPTIONS_V7);
+
+ if (!IS_MFCV8_PLUS(dev))
+ goto done;
+
+ /* Initialize registers used in MFC v8 only.
+ * Also, over-write the registers which have
+ * a different offset for MFC v8. */
+ R(d_stream_data_size, S5P_FIMV_D_STREAM_DATA_SIZE_V8);
+ R(d_cpb_buffer_addr, S5P_FIMV_D_CPB_BUFFER_ADDR_V8);
+ R(d_cpb_buffer_size, S5P_FIMV_D_CPB_BUFFER_SIZE_V8);
+ R(d_cpb_buffer_offset, S5P_FIMV_D_CPB_BUFFER_OFFSET_V8);
+ R(d_first_plane_dpb_size, S5P_FIMV_D_FIRST_PLANE_DPB_SIZE_V8);
+ R(d_second_plane_dpb_size, S5P_FIMV_D_SECOND_PLANE_DPB_SIZE_V8);
+ R(d_scratch_buffer_addr, S5P_FIMV_D_SCRATCH_BUFFER_ADDR_V8);
+ R(d_scratch_buffer_size, S5P_FIMV_D_SCRATCH_BUFFER_SIZE_V8);
+ R(d_first_plane_dpb_stride_size,
+ S5P_FIMV_D_FIRST_PLANE_DPB_STRIDE_SIZE_V8);
+ R(d_second_plane_dpb_stride_size,
+ S5P_FIMV_D_SECOND_PLANE_DPB_STRIDE_SIZE_V8);
+ R(d_mv_buffer_size, S5P_FIMV_D_MV_BUFFER_SIZE_V8);
+ R(d_num_mv, S5P_FIMV_D_NUM_MV_V8);
+ R(d_first_plane_dpb, S5P_FIMV_D_FIRST_PLANE_DPB_V8);
+ R(d_second_plane_dpb, S5P_FIMV_D_SECOND_PLANE_DPB_V8);
+ R(d_mv_buffer, S5P_FIMV_D_MV_BUFFER_V8);
+ R(d_init_buffer_options, S5P_FIMV_D_INIT_BUFFER_OPTIONS_V8);
+ R(d_available_dpb_flag_lower, S5P_FIMV_D_AVAILABLE_DPB_FLAG_LOWER_V8);
+ R(d_slice_if_enable, S5P_FIMV_D_SLICE_IF_ENABLE_V8);
+ R(d_display_first_plane_addr, S5P_FIMV_D_DISPLAY_FIRST_PLANE_ADDR_V8);
+ R(d_display_second_plane_addr, S5P_FIMV_D_DISPLAY_SECOND_PLANE_ADDR_V8);
+ R(d_decoded_first_plane_addr, S5P_FIMV_D_DECODED_FIRST_PLANE_ADDR_V8);
+ R(d_decoded_second_plane_addr, S5P_FIMV_D_DECODED_SECOND_PLANE_ADDR_V8);
+ R(d_display_status, S5P_FIMV_D_DISPLAY_STATUS_V8);
+ R(d_decoded_status, S5P_FIMV_D_DECODED_STATUS_V8);
+ R(d_decoded_frame_type, S5P_FIMV_D_DECODED_FRAME_TYPE_V8);
+ R(d_display_frame_type, S5P_FIMV_D_DISPLAY_FRAME_TYPE_V8);
+ R(d_decoded_nal_size, S5P_FIMV_D_DECODED_NAL_SIZE_V8);
+ R(d_display_frame_width, S5P_FIMV_D_DISPLAY_FRAME_WIDTH_V8);
+ R(d_display_frame_height, S5P_FIMV_D_DISPLAY_FRAME_HEIGHT_V8);
+ R(d_frame_pack_sei_avail, S5P_FIMV_D_FRAME_PACK_SEI_AVAIL_V8);
+ R(d_mvc_num_views, S5P_FIMV_D_MVC_NUM_VIEWS_V8);
+ R(d_mvc_view_id, S5P_FIMV_D_MVC_VIEW_ID_V8);
+ R(d_ret_picture_tag_top, S5P_FIMV_D_RET_PICTURE_TAG_TOP_V8);
+ R(d_ret_picture_tag_bot, S5P_FIMV_D_RET_PICTURE_TAG_BOT_V8);
+ R(d_display_crop_info1, S5P_FIMV_D_DISPLAY_CROP_INFO1_V8);
+ R(d_display_crop_info2, S5P_FIMV_D_DISPLAY_CROP_INFO2_V8);
+ R(d_min_scratch_buffer_size, S5P_FIMV_D_MIN_SCRATCH_BUFFER_SIZE_V8);
+
+ /* encoder registers */
+ R(e_padding_ctrl, S5P_FIMV_E_PADDING_CTRL_V8);
+ R(e_rc_config, S5P_FIMV_E_RC_CONFIG_V8);
+ R(e_rc_mode, S5P_FIMV_E_RC_RPARAM_V8);
+ R(e_mv_hor_range, S5P_FIMV_E_MV_HOR_RANGE_V8);
+ R(e_mv_ver_range, S5P_FIMV_E_MV_VER_RANGE_V8);
+ R(e_rc_qp_bound, S5P_FIMV_E_RC_QP_BOUND_V8);
+ R(e_fixed_picture_qp, S5P_FIMV_E_FIXED_PICTURE_QP_V8);
+ R(e_vbv_buffer_size, S5P_FIMV_E_VBV_BUFFER_SIZE_V8);
+ R(e_vbv_init_delay, S5P_FIMV_E_VBV_INIT_DELAY_V8);
+ R(e_mb_rc_config, S5P_FIMV_E_MB_RC_CONFIG_V8);
+ R(e_aspect_ratio, S5P_FIMV_E_ASPECT_RATIO_V8);
+ R(e_extended_sar, S5P_FIMV_E_EXTENDED_SAR_V8);
+ R(e_h264_options, S5P_FIMV_E_H264_OPTIONS_V8);
+ R(e_min_scratch_buffer_size, S5P_FIMV_E_MIN_SCRATCH_BUFFER_SIZE_V8);
+
+ if (!IS_MFCV10(dev))
+ goto done;
+
+ /* Initialize registers used in MFC v10 only.
+ * Also, over-write the registers which have
+ * a different offset for MFC v10.
+ */
+
+ /* decoder registers */
+ R(d_static_buffer_addr, S5P_FIMV_D_STATIC_BUFFER_ADDR_V10);
+ R(d_static_buffer_size, S5P_FIMV_D_STATIC_BUFFER_SIZE_V10);
+
+ /* encoder registers */
+ R(e_num_t_layer, S5P_FIMV_E_NUM_T_LAYER_V10);
+ R(e_hier_qp_layer0, S5P_FIMV_E_HIERARCHICAL_QP_LAYER0_V10);
+ R(e_hier_bit_rate_layer0, S5P_FIMV_E_HIERARCHICAL_BIT_RATE_LAYER0_V10);
+ R(e_hevc_options, S5P_FIMV_E_HEVC_OPTIONS_V10);
+ R(e_hevc_refresh_period, S5P_FIMV_E_HEVC_REFRESH_PERIOD_V10);
+ R(e_hevc_lf_beta_offset_div2, S5P_FIMV_E_HEVC_LF_BETA_OFFSET_DIV2_V10);
+ R(e_hevc_lf_tc_offset_div2, S5P_FIMV_E_HEVC_LF_TC_OFFSET_DIV2_V10);
+ R(e_hevc_nal_control, S5P_FIMV_E_HEVC_NAL_CONTROL_V10);
+
+done:
+ return &mfc_regs;
+#undef S5P_MFC_REG_ADDR
+#undef R
+}
+
+/* Initialize opr function pointers for MFC v6 */
+static struct s5p_mfc_hw_ops s5p_mfc_ops_v6 = {
+ .alloc_dec_temp_buffers = s5p_mfc_alloc_dec_temp_buffers_v6,
+ .release_dec_desc_buffer = s5p_mfc_release_dec_desc_buffer_v6,
+ .alloc_codec_buffers = s5p_mfc_alloc_codec_buffers_v6,
+ .release_codec_buffers = s5p_mfc_release_codec_buffers_v6,
+ .alloc_instance_buffer = s5p_mfc_alloc_instance_buffer_v6,
+ .release_instance_buffer = s5p_mfc_release_instance_buffer_v6,
+ .alloc_dev_context_buffer =
+ s5p_mfc_alloc_dev_context_buffer_v6,
+ .release_dev_context_buffer =
+ s5p_mfc_release_dev_context_buffer_v6,
+ .dec_calc_dpb_size = s5p_mfc_dec_calc_dpb_size_v6,
+ .enc_calc_src_size = s5p_mfc_enc_calc_src_size_v6,
+ .set_enc_stream_buffer = s5p_mfc_set_enc_stream_buffer_v6,
+ .set_enc_frame_buffer = s5p_mfc_set_enc_frame_buffer_v6,
+ .get_enc_frame_buffer = s5p_mfc_get_enc_frame_buffer_v6,
+ .try_run = s5p_mfc_try_run_v6,
+ .clear_int_flags = s5p_mfc_clear_int_flags_v6,
+ .get_dspl_y_adr = s5p_mfc_get_dspl_y_adr_v6,
+ .get_dec_y_adr = s5p_mfc_get_dec_y_adr_v6,
+ .get_dspl_status = s5p_mfc_get_dspl_status_v6,
+ .get_dec_status = s5p_mfc_get_dec_status_v6,
+ .get_dec_frame_type = s5p_mfc_get_dec_frame_type_v6,
+ .get_disp_frame_type = s5p_mfc_get_disp_frame_type_v6,
+ .get_consumed_stream = s5p_mfc_get_consumed_stream_v6,
+ .get_int_reason = s5p_mfc_get_int_reason_v6,
+ .get_int_err = s5p_mfc_get_int_err_v6,
+ .err_dec = s5p_mfc_err_dec_v6,
+ .get_img_width = s5p_mfc_get_img_width_v6,
+ .get_img_height = s5p_mfc_get_img_height_v6,
+ .get_dpb_count = s5p_mfc_get_dpb_count_v6,
+ .get_mv_count = s5p_mfc_get_mv_count_v6,
+ .get_inst_no = s5p_mfc_get_inst_no_v6,
+ .get_enc_strm_size = s5p_mfc_get_enc_strm_size_v6,
+ .get_enc_slice_type = s5p_mfc_get_enc_slice_type_v6,
+ .get_enc_dpb_count = s5p_mfc_get_enc_dpb_count_v6,
+ .get_pic_type_top = s5p_mfc_get_pic_type_top_v6,
+ .get_pic_type_bot = s5p_mfc_get_pic_type_bot_v6,
+ .get_crop_info_h = s5p_mfc_get_crop_info_h_v6,
+ .get_crop_info_v = s5p_mfc_get_crop_info_v_v6,
+ .get_min_scratch_buf_size = s5p_mfc_get_min_scratch_buf_size,
+ .get_e_min_scratch_buf_size = s5p_mfc_get_e_min_scratch_buf_size,
+};
+
+struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void)
+{
+ return &s5p_mfc_ops_v6;
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h
new file mode 100644
index 000000000..f013b291a
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h
@@ -0,0 +1,60 @@
+/*
+ * drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * Contains declarations of hw related functions.
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_OPR_V6_H_
+#define S5P_MFC_OPR_V6_H_
+
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_opr.h"
+
+#define MFC_CTRL_MODE_CUSTOM MFC_CTRL_MODE_SFR
+
+#define MB_WIDTH(x_size) DIV_ROUND_UP(x_size, 16)
+#define MB_HEIGHT(y_size) DIV_ROUND_UP(y_size, 16)
+#define S5P_MFC_DEC_MV_SIZE_V6(x, y) (MB_WIDTH(x) * \
+ (((MB_HEIGHT(y)+1)/2)*2) * 64 + 128)
+#define S5P_MFC_DEC_MV_SIZE_V10(x, y) (MB_WIDTH(x) * \
+ (((MB_HEIGHT(y)+1)/2)*2) * 64 + 512)
+#define S5P_MFC_LCU_WIDTH(x_size) DIV_ROUND_UP(x_size, 32)
+#define S5P_MFC_LCU_HEIGHT(y_size) DIV_ROUND_UP(y_size, 32)
+
+#define s5p_mfc_dec_hevc_mv_size(x, y) \
+ (DIV_ROUND_UP(x, 64) * DIV_ROUND_UP(y, 64) * 256 + 512)
+
+/* Definition */
+#define ENC_MULTI_SLICE_MB_MAX ((1 << 30) - 1)
+#define ENC_MULTI_SLICE_BIT_MIN 2800
+#define ENC_INTRA_REFRESH_MB_MAX ((1 << 18) - 1)
+#define ENC_VBV_BUF_SIZE_MAX ((1 << 30) - 1)
+#define ENC_H264_LOOP_FILTER_AB_MIN -12
+#define ENC_H264_LOOP_FILTER_AB_MAX 12
+#define ENC_H264_RC_FRAME_RATE_MAX ((1 << 16) - 1)
+#define ENC_H263_RC_FRAME_RATE_MAX ((1 << 16) - 1)
+#define ENC_H264_PROFILE_MAX 3
+#define ENC_H264_LEVEL_MAX 42
+#define ENC_MPEG4_VOP_TIME_RES_MAX ((1 << 16) - 1)
+#define FRAME_DELTA_H264_H263 1
+#define TIGHT_CBR_MAX 10
+#define ENC_HEVC_RC_FRAME_RATE_MAX ((1 << 16) - 1)
+#define ENC_HEVC_QP_INDEX_MIN -12
+#define ENC_HEVC_QP_INDEX_MAX 12
+#define ENC_HEVC_LOOP_FILTER_MIN -12
+#define ENC_HEVC_LOOP_FILTER_MAX 12
+#define ENC_HEVC_LEVEL_MAX 62
+
+#define FRAME_DELTA_DEFAULT 1
+
+struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void);
+const struct s5p_mfc_regs *s5p_mfc_init_regs_v6_plus(struct s5p_mfc_dev *dev);
+#endif /* S5P_MFC_OPR_V6_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
new file mode 100644
index 000000000..95abf2bd7
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -0,0 +1,125 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_pm.h"
+
+static struct s5p_mfc_pm *pm;
+static struct s5p_mfc_dev *p_dev;
+static atomic_t clk_ref;
+
+int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
+{
+ int i;
+
+ pm = &dev->pm;
+ p_dev = dev;
+
+ pm->num_clocks = dev->variant->num_clocks;
+ pm->clk_names = dev->variant->clk_names;
+ pm->device = &dev->plat_dev->dev;
+ pm->clock_gate = NULL;
+
+ /* clock control */
+ for (i = 0; i < pm->num_clocks; i++) {
+ pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
+ if (IS_ERR(pm->clocks[i])) {
+ /* additional clocks are optional */
+ if (i && PTR_ERR(pm->clocks[i]) == -ENOENT) {
+ pm->clocks[i] = NULL;
+ continue;
+ }
+ mfc_err("Failed to get clock: %s\n",
+ pm->clk_names[i]);
+ return PTR_ERR(pm->clocks[i]);
+ }
+ }
+
+ if (dev->variant->use_clock_gating)
+ pm->clock_gate = pm->clocks[0];
+
+ pm_runtime_enable(pm->device);
+ atomic_set(&clk_ref, 0);
+ return 0;
+}
+
+void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
+{
+ pm_runtime_disable(pm->device);
+}
+
+int s5p_mfc_clock_on(void)
+{
+ atomic_inc(&clk_ref);
+ mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
+
+ return clk_enable(pm->clock_gate);
+}
+
+void s5p_mfc_clock_off(void)
+{
+ atomic_dec(&clk_ref);
+ mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
+
+ clk_disable(pm->clock_gate);
+}
+
+int s5p_mfc_power_on(void)
+{
+ int i, ret = 0;
+
+ ret = pm_runtime_get_sync(pm->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(pm->device);
+ return ret;
+ }
+
+ /* clock control */
+ for (i = 0; i < pm->num_clocks; i++) {
+ ret = clk_prepare_enable(pm->clocks[i]);
+ if (ret < 0) {
+ mfc_err("clock prepare failed for clock: %s\n",
+ pm->clk_names[i]);
+ i++;
+ goto err;
+ }
+ }
+
+ /* prepare for software clock gating */
+ clk_disable(pm->clock_gate);
+
+ return 0;
+err:
+ while (--i > 0)
+ clk_disable_unprepare(pm->clocks[i]);
+ pm_runtime_put(pm->device);
+ return ret;
+}
+
+int s5p_mfc_power_off(void)
+{
+ int i;
+
+ /* finish software clock gating */
+ clk_enable(pm->clock_gate);
+
+ for (i = 0; i < pm->num_clocks; i++)
+ clk_disable_unprepare(pm->clocks[i]);
+
+ return pm_runtime_put_sync(pm->device);
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h
new file mode 100644
index 000000000..875c5346b
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h
@@ -0,0 +1,24 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_PM_H_
+#define S5P_MFC_PM_H_
+
+int s5p_mfc_init_pm(struct s5p_mfc_dev *dev);
+void s5p_mfc_final_pm(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_clock_on(void);
+void s5p_mfc_clock_off(void);
+int s5p_mfc_power_on(void);
+int s5p_mfc_power_off(void);
+
+#endif /* S5P_MFC_PM_H_ */
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
new file mode 100644
index 000000000..03ee9839a
--- /dev/null
+++ b/drivers/media/platform/sh_veu.c
@@ -0,0 +1,1208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sh-mobile VEU mem2mem driver
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ * Copyright (C) 2008 Magnus Damm
+ */
+
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define VEU_STR 0x00 /* start register */
+#define VEU_SWR 0x10 /* src: line length */
+#define VEU_SSR 0x14 /* src: image size */
+#define VEU_SAYR 0x18 /* src: y/rgb plane address */
+#define VEU_SACR 0x1c /* src: c plane address */
+#define VEU_BSSR 0x20 /* bundle mode register */
+#define VEU_EDWR 0x30 /* dst: line length */
+#define VEU_DAYR 0x34 /* dst: y/rgb plane address */
+#define VEU_DACR 0x38 /* dst: c plane address */
+#define VEU_TRCR 0x50 /* transform control */
+#define VEU_RFCR 0x54 /* resize scale */
+#define VEU_RFSR 0x58 /* resize clip */
+#define VEU_ENHR 0x5c /* enhance */
+#define VEU_FMCR 0x70 /* filter mode */
+#define VEU_VTCR 0x74 /* lowpass vertical */
+#define VEU_HTCR 0x78 /* lowpass horizontal */
+#define VEU_APCR 0x80 /* color match */
+#define VEU_ECCR 0x84 /* color replace */
+#define VEU_AFXR 0x90 /* fixed mode */
+#define VEU_SWPR 0x94 /* swap */
+#define VEU_EIER 0xa0 /* interrupt mask */
+#define VEU_EVTR 0xa4 /* interrupt event */
+#define VEU_STAR 0xb0 /* status */
+#define VEU_BSRR 0xb4 /* reset */
+
+#define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */
+#define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */
+#define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */
+#define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */
+#define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */
+#define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */
+#define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */
+#define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */
+#define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */
+#define VEU_COFFR 0x224 /* color conversion offset */
+#define VEU_CBR 0x228 /* color conversion clip */
+
+/*
+ * 4092x4092 max size is the normal case. In some cases it can be reduced to
+ * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188.
+ */
+#define MAX_W 4092
+#define MAX_H 4092
+#define MIN_W 8
+#define MIN_H 8
+#define ALIGN_W 4
+
+/* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */
+#define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024)
+
+#define MEM2MEM_DEF_TRANSLEN 1
+
+struct sh_veu_dev;
+
+struct sh_veu_file {
+ struct sh_veu_dev *veu_dev;
+ bool cfg_needed;
+};
+
+struct sh_veu_format {
+ char *name;
+ u32 fourcc;
+ unsigned int depth;
+ unsigned int ydepth;
+};
+
+/* video data format */
+struct sh_veu_vfmt {
+ /* Replace with v4l2_rect */
+ struct v4l2_rect frame;
+ unsigned int bytesperline;
+ unsigned int offset_y;
+ unsigned int offset_c;
+ const struct sh_veu_format *fmt;
+};
+
+struct sh_veu_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct device *dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct sh_veu_vfmt vfmt_out;
+ struct sh_veu_vfmt vfmt_in;
+ /* Only single user per direction so far */
+ struct sh_veu_file *capture;
+ struct sh_veu_file *output;
+ struct mutex fop_lock;
+ void __iomem *base;
+ spinlock_t lock;
+ bool is_2h;
+ unsigned int xaction;
+ bool aborting;
+};
+
+enum sh_veu_fmt_idx {
+ SH_VEU_FMT_NV12,
+ SH_VEU_FMT_NV16,
+ SH_VEU_FMT_NV24,
+ SH_VEU_FMT_RGB332,
+ SH_VEU_FMT_RGB444,
+ SH_VEU_FMT_RGB565,
+ SH_VEU_FMT_RGB666,
+ SH_VEU_FMT_RGB24,
+};
+
+#define DEFAULT_IN_WIDTH VGA_WIDTH
+#define DEFAULT_IN_HEIGHT VGA_HEIGHT
+#define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12
+#define DEFAULT_OUT_WIDTH VGA_WIDTH
+#define DEFAULT_OUT_HEIGHT VGA_HEIGHT
+#define DEFAULT_OUT_FMTIDX SH_VEU_FMT_RGB565
+
+/*
+ * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte
+ * aligned for NV24.
+ */
+static const struct sh_veu_format sh_veu_fmt[] = {
+ [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12 },
+ [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .name = "NV16", .fourcc = V4L2_PIX_FMT_NV16 },
+ [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .name = "NV24", .fourcc = V4L2_PIX_FMT_NV24 },
+ [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .name = "RGB332", .fourcc = V4L2_PIX_FMT_RGB332 },
+ [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .name = "RGB444", .fourcc = V4L2_PIX_FMT_RGB444 },
+ [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565 },
+ [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666 },
+ [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .name = "RGB24", .fourcc = V4L2_PIX_FMT_RGB24 },
+};
+
+#define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \
+ .frame = { \
+ .width = VGA_WIDTH, \
+ .height = VGA_HEIGHT, \
+ }, \
+ .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3, \
+ .fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX], \
+}
+
+#define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){ \
+ .frame = { \
+ .width = VGA_WIDTH, \
+ .height = VGA_HEIGHT, \
+ }, \
+ .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3, \
+ .fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX], \
+}
+
+/*
+ * TODO: add support for further output formats:
+ * SH_VEU_FMT_NV12,
+ * SH_VEU_FMT_NV16,
+ * SH_VEU_FMT_NV24,
+ * SH_VEU_FMT_RGB332,
+ * SH_VEU_FMT_RGB444,
+ * SH_VEU_FMT_RGB666,
+ * SH_VEU_FMT_RGB24,
+ */
+
+static const int sh_veu_fmt_out[] = {
+ SH_VEU_FMT_RGB565,
+};
+
+/*
+ * TODO: add support for further input formats:
+ * SH_VEU_FMT_NV16,
+ * SH_VEU_FMT_NV24,
+ * SH_VEU_FMT_RGB565,
+ * SH_VEU_FMT_RGB666,
+ * SH_VEU_FMT_RGB24,
+ */
+static const int sh_veu_fmt_in[] = {
+ SH_VEU_FMT_NV12,
+};
+
+static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
+{
+ switch (fourcc) {
+ default:
+ BUG();
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ return V4L2_COLORSPACE_SMPTE170M;
+ case V4L2_PIX_FMT_RGB332:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR666:
+ case V4L2_PIX_FMT_RGB24:
+ return V4L2_COLORSPACE_SRGB;
+ }
+}
+
+static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg)
+{
+ return ioread32(veu->base + reg);
+}
+
+static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg,
+ u32 value)
+{
+ iowrite32(value, veu->base + reg);
+}
+
+ /* ========== mem2mem callbacks ========== */
+
+static void sh_veu_job_abort(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ veu->aborting = true;
+}
+
+static void sh_veu_process(struct sh_veu_dev *veu,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf)
+{
+ dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+
+ sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y);
+ sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ?
+ addr + veu->vfmt_out.offset_c : 0);
+ dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__,
+ (unsigned long)addr,
+ veu->vfmt_out.offset_y, veu->vfmt_out.offset_c);
+
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y);
+ sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ?
+ addr + veu->vfmt_in.offset_c : 0);
+ dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__,
+ (unsigned long)addr,
+ veu->vfmt_in.offset_y, veu->vfmt_in.offset_c);
+
+ sh_veu_reg_write(veu, VEU_STR, 1);
+
+ sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
+}
+
+/*
+ * sh_veu_device_run() - prepares and starts the device
+ *
+ * This will be called by the framework when it decides to schedule a particular
+ * instance.
+ */
+static void sh_veu_device_run(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
+
+ if (src_buf && dst_buf)
+ sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
+}
+
+ /* ========== video ioctls ========== */
+
+static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
+ enum v4l2_buf_type type)
+{
+ return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ veu_file == veu->capture) ||
+ (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ veu_file == veu->output);
+}
+
+static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+
+/*
+ * It is not unusual to have video nodes open()ed multiple times. While some
+ * V4L2 operations are non-intrusive, like querying formats and various
+ * parameters, others, like setting formats, starting and stopping streaming,
+ * queuing and dequeuing buffers, directly affect hardware configuration and /
+ * or execution. This function verifies availability of the requested interface
+ * and, if available, reserves it for the requesting user.
+ */
+static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
+ enum v4l2_buf_type type)
+{
+ struct sh_veu_file **stream;
+
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ stream = &veu->capture;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ stream = &veu->output;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (*stream == veu_file)
+ return 0;
+
+ if (*stream)
+ return -EBUSY;
+
+ *stream = veu_file;
+
+ return 0;
+}
+
+static int sh_veu_context_init(struct sh_veu_dev *veu)
+{
+ if (veu->m2m_ctx)
+ return 0;
+
+ veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
+ sh_veu_queue_init);
+
+ return PTR_ERR_OR_ZERO(veu->m2m_ctx);
+}
+
+static int sh_veu_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, "sh-veu", sizeof(cap->driver));
+ strlcpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
+{
+ if (f->index >= fmt_num)
+ return -EINVAL;
+
+ strlcpy(f->description, sh_veu_fmt[fmt[f->index]].name, sizeof(f->description));
+ f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
+ return 0;
+}
+
+static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out));
+}
+
+static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in));
+}
+
+static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &veu->vfmt_out;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &veu->vfmt_in;
+ default:
+ return NULL;
+ }
+}
+
+static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ struct sh_veu_vfmt *vfmt;
+
+ vfmt = sh_veu_get_vfmt(veu, f->type);
+
+ pix->width = vfmt->frame.width;
+ pix->height = vfmt->frame.height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = vfmt->fmt->fourcc;
+ pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
+ pix->bytesperline = vfmt->bytesperline;
+ pix->sizeimage = vfmt->bytesperline * pix->height *
+ vfmt->fmt->depth / vfmt->fmt->ydepth;
+ dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__,
+ f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat);
+
+ return 0;
+}
+
+static int sh_veu_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return sh_veu_g_fmt(priv, f);
+}
+
+static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return sh_veu_g_fmt(priv, f);
+}
+
+static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ unsigned int y_bytes_used;
+
+ /*
+ * V4L2 specification suggests, that the driver should correct the
+ * format struct if any of the dimensions is unsupported
+ */
+ switch (pix->field) {
+ default:
+ case V4L2_FIELD_ANY:
+ pix->field = V4L2_FIELD_NONE;
+ /* fall through: continue handling V4L2_FIELD_NONE */
+ case V4L2_FIELD_NONE:
+ break;
+ }
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W,
+ &pix->height, MIN_H, MAX_H, 0, 0);
+
+ y_bytes_used = (pix->width * fmt->ydepth) >> 3;
+
+ if (pix->bytesperline < y_bytes_used)
+ pix->bytesperline = y_bytes_used;
+ pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
+
+ pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage);
+
+ return 0;
+}
+
+static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f)
+{
+ const int *fmt;
+ int i, n, dflt;
+
+ pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field);
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ fmt = sh_veu_fmt_out;
+ n = ARRAY_SIZE(sh_veu_fmt_out);
+ dflt = DEFAULT_OUT_FMTIDX;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ default:
+ fmt = sh_veu_fmt_in;
+ n = ARRAY_SIZE(sh_veu_fmt_in);
+ dflt = DEFAULT_IN_FMTIDX;
+ break;
+ }
+
+ for (i = 0; i < n; i++)
+ if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat)
+ return &sh_veu_fmt[fmt[i]];
+
+ return &sh_veu_fmt[dflt];
+}
+
+static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ const struct sh_veu_format *fmt;
+
+ fmt = sh_veu_find_fmt(f);
+ if (!fmt)
+ /* wrong buffer type */
+ return -EINVAL;
+
+ return sh_veu_try_fmt(f, fmt);
+}
+
+static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ const struct sh_veu_format *fmt;
+
+ fmt = sh_veu_find_fmt(f);
+ if (!fmt)
+ /* wrong buffer type */
+ return -EINVAL;
+
+ return sh_veu_try_fmt(f, fmt);
+}
+
+static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt)
+{
+ /* dst_left and dst_top validity will be verified in CROP / COMPOSE */
+ unsigned int left = vfmt->frame.left & ~0x03;
+ unsigned int top = vfmt->frame.top;
+ dma_addr_t offset = (dma_addr_t)top * veu->vfmt_out.bytesperline +
+ (((dma_addr_t)left * veu->vfmt_out.fmt->depth) >> 3);
+ unsigned int y_line;
+
+ vfmt->offset_y = offset;
+
+ switch (vfmt->fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ y_line = ALIGN(vfmt->frame.width, 16);
+ vfmt->offset_c = offset + y_line * vfmt->frame.height;
+ break;
+ case V4L2_PIX_FMT_RGB332:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR666:
+ case V4L2_PIX_FMT_RGB24:
+ vfmt->offset_c = 0;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ struct sh_veu_vfmt *vfmt;
+ struct vb2_queue *vq;
+ int ret = sh_veu_context_init(veu);
+ if (ret < 0)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ vfmt = sh_veu_get_vfmt(veu, f->type);
+ /* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */
+
+ vfmt->fmt = sh_veu_find_fmt(f);
+ /* vfmt->fmt != NULL following the same argument as above */
+ vfmt->frame.width = pix->width;
+ vfmt->frame.height = pix->height;
+ vfmt->bytesperline = pix->bytesperline;
+
+ sh_veu_colour_offset(veu, vfmt);
+
+ /*
+ * We could also verify and require configuration only if any parameters
+ * actually have changed, but it is unlikely, that the user requests the
+ * same configuration several times without closing the device.
+ */
+ veu_file->cfg_needed = true;
+
+ dev_dbg(veu->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %x\n",
+ f->type, pix->width, pix->height, vfmt->fmt->fourcc);
+
+ return 0;
+}
+
+static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret = sh_veu_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return sh_veu_s_fmt(priv, f);
+}
+
+static int sh_veu_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret = sh_veu_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return sh_veu_s_fmt(priv, f);
+}
+
+static int sh_veu_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct sh_veu_file *veu_file = priv;
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ int ret = sh_veu_context_init(veu);
+ if (ret < 0)
+ return ret;
+
+ ret = sh_veu_stream_init(veu, veu_file, reqbufs->type);
+ if (ret < 0)
+ return ret;
+
+ return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs);
+}
+
+static int sh_veu_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
+ return -EBUSY;
+
+ return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf);
+}
+
+static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
+ return -EBUSY;
+
+ return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf);
+}
+
+static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
+ return -EBUSY;
+
+ return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf);
+}
+
+static void sh_veu_calc_scale(struct sh_veu_dev *veu,
+ int size_in, int size_out, int crop_out,
+ u32 *mant, u32 *frac, u32 *rep)
+{
+ u32 fixpoint;
+
+ /* calculate FRAC and MANT */
+ *rep = *mant = *frac = 0;
+
+ if (size_in == size_out) {
+ if (crop_out != size_out)
+ *mant = 1; /* needed for cropping */
+ return;
+ }
+
+ /* VEU2H special upscale */
+ if (veu->is_2h && size_out > size_in) {
+ u32 fixpoint = (4096 * size_in) / size_out;
+ *mant = fixpoint / 4096;
+ *frac = (fixpoint - (*mant * 4096)) & ~0x07;
+
+ switch (*frac) {
+ case 0x800:
+ *rep = 1;
+ break;
+ case 0x400:
+ *rep = 3;
+ break;
+ case 0x200:
+ *rep = 7;
+ break;
+ }
+ if (*rep)
+ return;
+ }
+
+ fixpoint = (4096 * (size_in - 1)) / (size_out + 1);
+ *mant = fixpoint / 4096;
+ *frac = fixpoint - (*mant * 4096);
+
+ if (*frac & 0x07) {
+ /*
+ * FIXME: do we really have to round down twice in the
+ * up-scaling case?
+ */
+ *frac &= ~0x07;
+ if (size_out > size_in)
+ *frac -= 8; /* round down if scaling up */
+ else
+ *frac += 8; /* round up if scaling down */
+ }
+}
+
+static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu,
+ int size_in, int size_out, int crop_out)
+{
+ u32 mant, frac, value, rep;
+
+ sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
+
+ /* set scale */
+ value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) |
+ (((mant << 12) | frac) << 16);
+
+ sh_veu_reg_write(veu, VEU_RFCR, value);
+
+ /* set clip */
+ value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) |
+ (((rep << 12) | crop_out) << 16);
+
+ sh_veu_reg_write(veu, VEU_RFSR, value);
+
+ return ALIGN((size_in * crop_out) / size_out, 4);
+}
+
+static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu,
+ int size_in, int size_out, int crop_out)
+{
+ u32 mant, frac, value, rep;
+
+ sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
+
+ /* set scale */
+ value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) |
+ (mant << 12) | frac;
+
+ sh_veu_reg_write(veu, VEU_RFCR, value);
+
+ /* set clip */
+ value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) |
+ (rep << 12) | crop_out;
+
+ sh_veu_reg_write(veu, VEU_RFSR, value);
+
+ return ALIGN((size_in * crop_out) / size_out, 4);
+}
+
+static void sh_veu_configure(struct sh_veu_dev *veu)
+{
+ u32 src_width, src_stride, src_height;
+ u32 dst_width, dst_stride, dst_height;
+ u32 real_w, real_h;
+
+ /* reset VEU */
+ sh_veu_reg_write(veu, VEU_BSRR, 0x100);
+
+ src_width = veu->vfmt_in.frame.width;
+ src_height = veu->vfmt_in.frame.height;
+ src_stride = ALIGN(veu->vfmt_in.frame.width, 16);
+
+ dst_width = real_w = veu->vfmt_out.frame.width;
+ dst_height = real_h = veu->vfmt_out.frame.height;
+ /* Datasheet is unclear - whether it's always number of bytes or not */
+ dst_stride = veu->vfmt_out.bytesperline;
+
+ /*
+ * So far real_w == dst_width && real_h == dst_height, but it wasn't
+ * necessarily the case in the original vidix driver, so, it may change
+ * here in the future too.
+ */
+ src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width);
+ src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height);
+
+ sh_veu_reg_write(veu, VEU_SWR, src_stride);
+ sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16));
+ sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */
+
+ sh_veu_reg_write(veu, VEU_EDWR, dst_stride);
+ sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */
+
+ sh_veu_reg_write(veu, VEU_SWPR, 0x67);
+ sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4);
+
+ if (veu->is_2h) {
+ sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5);
+ sh_veu_reg_write(veu, VEU_MCR01, 0x0950);
+ sh_veu_reg_write(veu, VEU_MCR02, 0x0000);
+
+ sh_veu_reg_write(veu, VEU_MCR10, 0x397f);
+ sh_veu_reg_write(veu, VEU_MCR11, 0x0950);
+ sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd);
+
+ sh_veu_reg_write(veu, VEU_MCR20, 0x0000);
+ sh_veu_reg_write(veu, VEU_MCR21, 0x0950);
+ sh_veu_reg_write(veu, VEU_MCR22, 0x1023);
+
+ sh_veu_reg_write(veu, VEU_COFFR, 0x00800010);
+ }
+}
+
+static int sh_veu_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
+ return -EBUSY;
+
+ if (veu_file->cfg_needed) {
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ veu_file->cfg_needed = false;
+ sh_veu_configure(veu_file->veu_dev);
+ veu->xaction = 0;
+ veu->aborting = false;
+ }
+
+ return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type);
+}
+
+static int sh_veu_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
+ return -EBUSY;
+
+ return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
+ .vidioc_querycap = sh_veu_querycap,
+
+ .vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = sh_veu_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = sh_veu_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = sh_veu_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = sh_veu_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = sh_veu_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = sh_veu_s_fmt_vid_out,
+
+ .vidioc_reqbufs = sh_veu_reqbufs,
+ .vidioc_querybuf = sh_veu_querybuf,
+
+ .vidioc_qbuf = sh_veu_qbuf,
+ .vidioc_dqbuf = sh_veu_dqbuf,
+
+ .vidioc_streamon = sh_veu_streamon,
+ .vidioc_streamoff = sh_veu_streamoff,
+};
+
+ /* ========== Queue operations ========== */
+
+static int sh_veu_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
+ struct sh_veu_vfmt *vfmt = sh_veu_get_vfmt(veu, vq->type);
+ unsigned int count = *nbuffers;
+ unsigned int size = vfmt->bytesperline * vfmt->frame.height *
+ vfmt->fmt->depth / vfmt->fmt->ydepth;
+
+ if (count < 2)
+ *nbuffers = count = 2;
+
+ if (size * count > VIDEO_MEM_LIMIT) {
+ count = VIDEO_MEM_LIMIT / size;
+ *nbuffers = count;
+ }
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+ return 0;
+}
+
+static int sh_veu_buf_prepare(struct vb2_buffer *vb)
+{
+ struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
+ struct sh_veu_vfmt *vfmt;
+ unsigned int sizeimage;
+
+ vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type);
+ sizeimage = vfmt->bytesperline * vfmt->frame.height *
+ vfmt->fmt->depth / vfmt->fmt->ydepth;
+
+ if (vb2_plane_size(vb, 0) < sizeimage) {
+ dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, sizeimage);
+
+ return 0;
+}
+
+static void sh_veu_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
+ dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->type);
+ v4l2_m2m_buf_queue(veu->m2m_ctx, vbuf);
+}
+
+static const struct vb2_ops sh_veu_qops = {
+ .queue_setup = sh_veu_queue_setup,
+ .buf_prepare = sh_veu_buf_prepare,
+ .buf_queue = sh_veu_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct sh_veu_dev *veu = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = veu;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &sh_veu_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->lock = &veu->fop_lock;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = veu->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret < 0)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = veu;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &sh_veu_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->lock = &veu->fop_lock;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = veu->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+ /* ========== File operations ========== */
+
+static int sh_veu_open(struct file *file)
+{
+ struct sh_veu_dev *veu = video_drvdata(file);
+ struct sh_veu_file *veu_file;
+
+ veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL);
+ if (!veu_file)
+ return -ENOMEM;
+
+ veu_file->veu_dev = veu;
+ veu_file->cfg_needed = true;
+
+ file->private_data = veu_file;
+
+ pm_runtime_get_sync(veu->dev);
+
+ dev_dbg(veu->dev, "Created instance %p\n", veu_file);
+
+ return 0;
+}
+
+static int sh_veu_release(struct file *file)
+{
+ struct sh_veu_dev *veu = video_drvdata(file);
+ struct sh_veu_file *veu_file = file->private_data;
+
+ dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
+
+ if (veu_file == veu->capture) {
+ veu->capture = NULL;
+ vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
+ }
+
+ if (veu_file == veu->output) {
+ veu->output = NULL;
+ vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT));
+ }
+
+ if (!veu->output && !veu->capture && veu->m2m_ctx) {
+ v4l2_m2m_ctx_release(veu->m2m_ctx);
+ veu->m2m_ctx = NULL;
+ }
+
+ pm_runtime_put(veu->dev);
+
+ kfree(veu_file);
+
+ return 0;
+}
+
+static __poll_t sh_veu_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct sh_veu_file *veu_file = file->private_data;
+
+ return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait);
+}
+
+static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sh_veu_file *veu_file = file->private_data;
+
+ return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations sh_veu_fops = {
+ .owner = THIS_MODULE,
+ .open = sh_veu_open,
+ .release = sh_veu_release,
+ .poll = sh_veu_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = sh_veu_mmap,
+};
+
+static const struct video_device sh_veu_videodev = {
+ .name = "sh-veu",
+ .fops = &sh_veu_fops,
+ .ioctl_ops = &sh_veu_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
+ .device_run = sh_veu_device_run,
+ .job_abort = sh_veu_job_abort,
+};
+
+static irqreturn_t sh_veu_bh(int irq, void *dev_id)
+{
+ struct sh_veu_dev *veu = dev_id;
+
+ if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) {
+ v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx);
+ veu->xaction = 0;
+ } else {
+ sh_veu_device_run(veu);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sh_veu_isr(int irq, void *dev_id)
+{
+ struct sh_veu_dev *veu = dev_id;
+ struct vb2_v4l2_buffer *dst;
+ struct vb2_v4l2_buffer *src;
+ u32 status = sh_veu_reg_read(veu, VEU_EVTR);
+
+ /* bundle read mode not used */
+ if (!(status & 1))
+ return IRQ_NONE;
+
+ /* disable interrupt in VEU */
+ sh_veu_reg_write(veu, VEU_EIER, 0);
+ /* halt operation */
+ sh_veu_reg_write(veu, VEU_STR, 0);
+ /* ack int, write 0 to clear bits */
+ sh_veu_reg_write(veu, VEU_EVTR, status & ~1);
+
+ /* conversion completed */
+ dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx);
+ src = v4l2_m2m_src_buf_remove(veu->m2m_ctx);
+ if (!src || !dst)
+ return IRQ_NONE;
+
+ dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
+ dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->flags |=
+ src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->timecode = src->timecode;
+
+ spin_lock(&veu->lock);
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ spin_unlock(&veu->lock);
+
+ veu->xaction++;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int sh_veu_probe(struct platform_device *pdev)
+{
+ struct sh_veu_dev *veu;
+ struct resource *reg_res;
+ struct video_device *vdev;
+ int irq, ret;
+
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+
+ if (!reg_res || irq <= 0) {
+ dev_err(&pdev->dev, "Insufficient VEU platform information.\n");
+ return -ENODEV;
+ }
+
+ veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL);
+ if (!veu)
+ return -ENOMEM;
+
+ veu->is_2h = resource_size(reg_res) == 0x22c;
+
+ veu->base = devm_ioremap_resource(&pdev->dev, reg_res);
+ if (IS_ERR(veu->base))
+ return PTR_ERR(veu->base);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh,
+ 0, "veu", veu);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error registering v4l2 device\n");
+ return ret;
+ }
+
+ vdev = &veu->vdev;
+
+ *vdev = sh_veu_videodev;
+ vdev->v4l2_dev = &veu->v4l2_dev;
+ spin_lock_init(&veu->lock);
+ mutex_init(&veu->fop_lock);
+ vdev->lock = &veu->fop_lock;
+
+ video_set_drvdata(vdev, veu);
+
+ veu->dev = &pdev->dev;
+ veu->vfmt_out = DEFAULT_OUT_VFMT;
+ veu->vfmt_in = DEFAULT_IN_VFMT;
+
+ veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops);
+ if (IS_ERR(veu->m2m_dev)) {
+ ret = PTR_ERR(veu->m2m_dev);
+ v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret);
+ goto em2minit;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ pm_runtime_suspend(&pdev->dev);
+ if (ret < 0)
+ goto evidreg;
+
+ return ret;
+
+evidreg:
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(veu->m2m_dev);
+em2minit:
+ v4l2_device_unregister(&veu->v4l2_dev);
+ return ret;
+}
+
+static int sh_veu_remove(struct platform_device *pdev)
+{
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct sh_veu_dev *veu = container_of(v4l2_dev,
+ struct sh_veu_dev, v4l2_dev);
+
+ video_unregister_device(&veu->vdev);
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(veu->m2m_dev);
+ v4l2_device_unregister(&veu->v4l2_dev);
+
+ return 0;
+}
+
+static struct platform_driver __refdata sh_veu_pdrv = {
+ .remove = sh_veu_remove,
+ .driver = {
+ .name = "sh_veu",
+ },
+};
+
+module_platform_driver_probe(sh_veu_pdrv, sh_veu_probe);
+
+MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver");
+MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
new file mode 100644
index 000000000..6135e13e2
--- /dev/null
+++ b/drivers/media/platform/sh_vou.c
@@ -0,0 +1,1380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SuperH Video Output Unit (VOU) driver
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/module.h>
+
+#include <media/drv-intf/sh_vou.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+/* Mirror addresses are not available for all registers */
+#define VOUER 0
+#define VOUCR 4
+#define VOUSTR 8
+#define VOUVCR 0xc
+#define VOUISR 0x10
+#define VOUBCR 0x14
+#define VOUDPR 0x18
+#define VOUDSR 0x1c
+#define VOUVPR 0x20
+#define VOUIR 0x24
+#define VOUSRR 0x28
+#define VOUMSR 0x2c
+#define VOUHIR 0x30
+#define VOUDFR 0x34
+#define VOUAD1R 0x38
+#define VOUAD2R 0x3c
+#define VOUAIR 0x40
+#define VOUSWR 0x44
+#define VOURCR 0x48
+#define VOURPR 0x50
+
+enum sh_vou_status {
+ SH_VOU_IDLE,
+ SH_VOU_INITIALISING,
+ SH_VOU_RUNNING,
+};
+
+#define VOU_MIN_IMAGE_WIDTH 16
+#define VOU_MAX_IMAGE_WIDTH 720
+#define VOU_MIN_IMAGE_HEIGHT 16
+
+struct sh_vou_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+static inline struct
+sh_vou_buffer *to_sh_vou_buffer(struct vb2_v4l2_buffer *vb2)
+{
+ return container_of(vb2, struct sh_vou_buffer, vb);
+}
+
+struct sh_vou_device {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct sh_vou_pdata *pdata;
+ spinlock_t lock;
+ void __iomem *base;
+ /* State information */
+ struct v4l2_pix_format pix;
+ struct v4l2_rect rect;
+ struct list_head buf_list;
+ v4l2_std_id std;
+ int pix_idx;
+ struct vb2_queue queue;
+ struct sh_vou_buffer *active;
+ enum sh_vou_status status;
+ unsigned sequence;
+ struct mutex fop_lock;
+};
+
+/* Register access routines for sides A, B and mirror addresses */
+static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_ab_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+ __raw_writel(value, vou_dev->base + reg + 0x1000);
+}
+
+static void sh_vou_reg_m_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg + 0x2000);
+}
+
+static u32 sh_vou_reg_a_read(struct sh_vou_device *vou_dev, unsigned int reg)
+{
+ return __raw_readl(vou_dev->base + reg);
+}
+
+static void sh_vou_reg_a_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ u32 old = __raw_readl(vou_dev->base + reg);
+
+ value = (value & mask) | (old & ~mask);
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_b_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg + 0x1000, value, mask);
+}
+
+static void sh_vou_reg_ab_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg, value, mask);
+ sh_vou_reg_b_set(vou_dev, reg, value, mask);
+}
+
+struct sh_vou_fmt {
+ u32 pfmt;
+ char *desc;
+ unsigned char bpp;
+ unsigned char bpl;
+ unsigned char rgb;
+ unsigned char yf;
+ unsigned char pkf;
+};
+
+/* Further pixel formats can be added */
+static struct sh_vou_fmt vou_fmt[] = {
+ {
+ .pfmt = V4L2_PIX_FMT_NV12,
+ .bpp = 12,
+ .bpl = 1,
+ .desc = "YVU420 planar",
+ .yf = 0,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_NV16,
+ .bpp = 16,
+ .bpl = 1,
+ .desc = "YVYU planar",
+ .yf = 1,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB24,
+ .bpp = 24,
+ .bpl = 3,
+ .desc = "RGB24",
+ .pkf = 2,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565,
+ .bpp = 16,
+ .bpl = 2,
+ .desc = "RGB565",
+ .pkf = 3,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565X,
+ .bpp = 16,
+ .bpl = 2,
+ .desc = "RGB565 byteswapped",
+ .pkf = 3,
+ .rgb = 1,
+ },
+};
+
+static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ dma_addr_t addr1, addr2;
+
+ addr1 = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ switch (vou_dev->pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ addr2 = addr1 + vou_dev->pix.width * vou_dev->pix.height;
+ break;
+ default:
+ addr2 = 0;
+ }
+
+ sh_vou_reg_m_write(vou_dev, VOUAD1R, addr1);
+ sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2);
+}
+
+static void sh_vou_stream_config(struct sh_vou_device *vou_dev)
+{
+ unsigned int row_coeff;
+#ifdef __LITTLE_ENDIAN
+ u32 dataswap = 7;
+#else
+ u32 dataswap = 0;
+#endif
+
+ switch (vou_dev->pix.pixelformat) {
+ default:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ row_coeff = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ dataswap ^= 1;
+ /* fall through */
+ case V4L2_PIX_FMT_RGB565X:
+ row_coeff = 2;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ row_coeff = 3;
+ break;
+ }
+
+ sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap);
+ sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff);
+}
+
+/* Locking: caller holds fop_lock mutex */
+static int sh_vou_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ if (*nplanes)
+ return sizes[0] < pix->height * bytes_per_line ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = pix->height * bytes_per_line;
+ return 0;
+}
+
+static int sh_vou_buf_prepare(struct vb2_buffer *vb)
+{
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ unsigned bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+ unsigned size = pix->height * bytes_per_line;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ if (vb2_plane_size(vb, 0) < size) {
+ /* User buffer too small */
+ dev_warn(vou_dev->v4l2_dev.dev, "buffer too small (%lu < %u)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ return 0;
+}
+
+/* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */
+static void sh_vou_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vou_dev->lock, flags);
+ list_add_tail(&shbuf->list, &vou_dev->buf_list);
+ spin_unlock_irqrestore(&vou_dev->lock, flags);
+}
+
+static int sh_vou_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+ struct sh_vou_buffer *buf, *node;
+ int ret;
+
+ vou_dev->sequence = 0;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+ video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ list_del(&buf->list);
+ }
+ vou_dev->active = NULL;
+ return ret;
+ }
+
+ buf = list_entry(vou_dev->buf_list.next, struct sh_vou_buffer, list);
+
+ vou_dev->active = buf;
+
+ /* Start from side A: we use mirror addresses, so, set B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 1);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: first buffer status 0x%x\n",
+ __func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
+ sh_vou_schedule_next(vou_dev, &buf->vb);
+
+ buf = list_entry(buf->list.next, struct sh_vou_buffer, list);
+
+ /* Second buffer - initialise register side B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 0);
+ sh_vou_schedule_next(vou_dev, &buf->vb);
+
+ /* Register side switching with frame VSYNC */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 5);
+
+ sh_vou_stream_config(vou_dev);
+ /* Enable End-of-Frame (VSYNC) interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
+
+ /* Two buffers on the queue - activate the hardware */
+ vou_dev->status = SH_VOU_RUNNING;
+ sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
+ return 0;
+}
+
+static void sh_vou_stop_streaming(struct vb2_queue *vq)
+{
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+ struct sh_vou_buffer *buf, *node;
+ unsigned long flags;
+
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+ video, s_stream, 0);
+ /* disable output */
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+ /* ...but the current frame will complete */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ msleep(50);
+ spin_lock_irqsave(&vou_dev->lock, flags);
+ list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ list_del(&buf->list);
+ }
+ vou_dev->active = NULL;
+ spin_unlock_irqrestore(&vou_dev->lock, flags);
+}
+
+static const struct vb2_ops sh_vou_qops = {
+ .queue_setup = sh_vou_queue_setup,
+ .buf_prepare = sh_vou_buf_prepare,
+ .buf_queue = sh_vou_buf_queue,
+ .start_streaming = sh_vou_start_streaming,
+ .stop_streaming = sh_vou_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/* Video IOCTLs */
+static int sh_vou_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
+ strlcpy(cap->driver, "sh-vou", sizeof(cap->driver));
+ strlcpy(cap->bus_info, "platform:sh-vou", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+/* Enumerate formats, that the device can accept from the user */
+static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ if (fmt->index >= ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ strlcpy(fmt->description, vou_fmt[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = vou_fmt[fmt->index].pfmt;
+
+ return 0;
+}
+
+static int sh_vou_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ fmt->fmt.pix = vou_dev->pix;
+
+ return 0;
+}
+
+static const unsigned char vou_scale_h_num[] = {1, 9, 2, 9, 4};
+static const unsigned char vou_scale_h_den[] = {1, 8, 1, 4, 1};
+static const unsigned char vou_scale_h_fld[] = {0, 2, 1, 3};
+static const unsigned char vou_scale_v_num[] = {1, 2, 4};
+static const unsigned char vou_scale_v_den[] = {1, 1, 1};
+static const unsigned char vou_scale_v_fld[] = {0, 1};
+
+static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
+ int pix_idx, int w_idx, int h_idx)
+{
+ struct sh_vou_fmt *fmt = vou_fmt + pix_idx;
+ unsigned int black_left, black_top, width_max,
+ frame_in_height, frame_out_height, frame_out_top;
+ struct v4l2_rect *rect = &vou_dev->rect;
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ u32 vouvcr = 0, dsr_h, dsr_v;
+
+ if (vou_dev->std & V4L2_STD_525_60) {
+ width_max = 858;
+ /* height_max = 262; */
+ } else {
+ width_max = 864;
+ /* height_max = 312; */
+ }
+
+ frame_in_height = pix->height / 2;
+ frame_out_height = rect->height / 2;
+ frame_out_top = rect->top / 2;
+
+ /*
+ * Cropping scheme: max useful image is 720x480, and the total video
+ * area is 858x525 (NTSC) or 864x625 (PAL). AK8813 / 8814 starts
+ * sampling data beginning with fixed 276th (NTSC) / 288th (PAL) clock,
+ * of which the first 33 / 25 clocks HSYNC must be held active. This
+ * has to be configured in CR[HW]. 1 pixel equals 2 clock periods.
+ * This gives CR[HW] = 16 / 12, VPR[HVP] = 138 / 144, which gives
+ * exactly 858 - 138 = 864 - 144 = 720! We call the out-of-display area,
+ * beyond DSR, specified on the left and top by the VPR register "black
+ * pixels" and out-of-image area (DPR) "background pixels." We fix VPR
+ * at 138 / 144 : 20, because that's the HSYNC timing, that our first
+ * client requires, and that's exactly what leaves us 720 pixels for the
+ * image; we leave VPR[VVP] at default 20 for now, because the client
+ * doesn't seem to have any special requirements for it. Otherwise we
+ * could also set it to max - 240 = 22 / 72. Thus VPR depends only on
+ * the selected standard, and DPR and DSR are selected according to
+ * cropping. Q: how does the client detect the first valid line? Does
+ * HSYNC stay inactive during invalid (black) lines?
+ */
+ black_left = width_max - VOU_MAX_IMAGE_WIDTH;
+ black_top = 20;
+
+ dsr_h = rect->width + rect->left;
+ dsr_v = frame_out_height + frame_out_top;
+
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "image %ux%u, black %u:%u, offset %u:%u, display %ux%u\n",
+ pix->width, frame_in_height, black_left, black_top,
+ rect->left, frame_out_top, dsr_h, dsr_v);
+
+ /* VOUISR height - half of a frame height in frame mode */
+ sh_vou_reg_ab_write(vou_dev, VOUISR, (pix->width << 16) | frame_in_height);
+ sh_vou_reg_ab_write(vou_dev, VOUVPR, (black_left << 16) | black_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDPR, (rect->left << 16) | frame_out_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDSR, (dsr_h << 16) | dsr_v);
+
+ /*
+ * if necessary, we could set VOUHIR to
+ * max(black_left + dsr_h, width_max) here
+ */
+
+ if (w_idx)
+ vouvcr |= (1 << 15) | (vou_scale_h_fld[w_idx - 1] << 4);
+ if (h_idx)
+ vouvcr |= (1 << 14) | vou_scale_v_fld[h_idx - 1];
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: scaling 0x%x\n", fmt->desc, vouvcr);
+
+ /* To produce a colour bar for testing set bit 23 of VOUVCR */
+ sh_vou_reg_ab_write(vou_dev, VOUVCR, vouvcr);
+ sh_vou_reg_ab_write(vou_dev, VOUDFR,
+ fmt->pkf | (fmt->yf << 8) | (fmt->rgb << 16));
+}
+
+struct sh_vou_geometry {
+ struct v4l2_rect output;
+ unsigned int in_width;
+ unsigned int in_height;
+ int scale_idx_h;
+ int scale_idx_v;
+};
+
+/*
+ * Find input geometry, that we can use to produce output, closest to the
+ * requested rectangle, using VOU scaling
+ */
+static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ /* The compiler cannot know, that best and idx will indeed be set */
+ unsigned int best_err = UINT_MAX, best = 0, img_height_max;
+ int i, idx = 0;
+
+ if (std & V4L2_STD_525_60)
+ img_height_max = 480;
+ else
+ img_height_max = 576;
+
+ /* Image width must be a multiple of 4 */
+ v4l_bound_align_image(&geo->in_width,
+ VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2,
+ &geo->in_height,
+ VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.width * vou_scale_h_den[i] /
+ vou_scale_h_num[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_width);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_width = best;
+ geo->scale_idx_h = idx;
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = ARRAY_SIZE(vou_scale_v_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.height * vou_scale_v_den[i] /
+ vou_scale_v_num[i];
+
+ if (found > img_height_max)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_height);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_height = best;
+ geo->scale_idx_v = idx;
+}
+
+/*
+ * Find output geometry, that we can produce, using VOU scaling, closest to
+ * the requested rectangle
+ */
+static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ unsigned int best_err = UINT_MAX, best = geo->in_width,
+ width_max, height_max, img_height_max;
+ int i, idx_h = 0, idx_v = 0;
+
+ if (std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262 * 2;
+ img_height_max = 480;
+ } else {
+ width_max = 864;
+ height_max = 312 * 2;
+ img_height_max = 576;
+ }
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_h_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_width * vou_scale_h_num[i] /
+ vou_scale_h_den[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.width);
+ if (err < best_err) {
+ best_err = err;
+ idx_h = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.width = best;
+ geo->scale_idx_h = idx_h;
+ if (geo->output.left + best > width_max)
+ geo->output.left = width_max - best;
+
+ pr_debug("%s(): W %u * %u/%u = %u\n", __func__, geo->in_width,
+ vou_scale_h_num[idx_h], vou_scale_h_den[idx_h], best);
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_v_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_height * vou_scale_v_num[i] /
+ vou_scale_v_den[i];
+
+ if (found > img_height_max)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.height);
+ if (err < best_err) {
+ best_err = err;
+ idx_v = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.height = best;
+ geo->scale_idx_v = idx_v;
+ if (geo->output.top + best > height_max)
+ geo->output.top = height_max - best;
+
+ pr_debug("%s(): H %u * %u/%u = %u\n", __func__, geo->in_height,
+ vou_scale_v_num[idx_v], vou_scale_v_den[idx_v], best);
+}
+
+static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ unsigned int img_height_max;
+ int pix_idx;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ pix->field = V4L2_FIELD_INTERLACED;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ pix->ycbcr_enc = pix->quantization = 0;
+
+ for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+ if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+ break;
+
+ if (pix_idx == ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ if (vou_dev->std & V4L2_STD_525_60)
+ img_height_max = 480;
+ else
+ img_height_max = 576;
+
+ v4l_bound_align_image(&pix->width,
+ VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2,
+ &pix->height,
+ VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
+ pix->bytesperline = pix->width * vou_fmt[pix_idx].bpl;
+ pix->sizeimage = pix->height * ((pix->width * vou_fmt[pix_idx].bpp) >> 3);
+
+ return 0;
+}
+
+static int sh_vou_set_fmt_vid_out(struct sh_vou_device *vou_dev,
+ struct v4l2_pix_format *pix)
+{
+ unsigned int img_height_max;
+ struct sh_vou_geometry geo;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ /* Revisit: is this the correct code? */
+ .format.code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .format.field = V4L2_FIELD_INTERLACED,
+ .format.colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ struct v4l2_mbus_framefmt *mbfmt = &format.format;
+ int pix_idx;
+ int ret;
+
+ if (vb2_is_busy(&vou_dev->queue))
+ return -EBUSY;
+
+ for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+ if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+ break;
+
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+ geo.output = vou_dev->rect;
+
+ vou_adjust_output(&geo, vou_dev->std);
+
+ mbfmt->width = geo.output.width;
+ mbfmt->height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
+ set_fmt, NULL, &format);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+ geo.output.width, geo.output.height, mbfmt->width, mbfmt->height);
+
+ if (vou_dev->std & V4L2_STD_525_60)
+ img_height_max = 480;
+ else
+ img_height_max = 576;
+
+ /* Sanity checks */
+ if ((unsigned)mbfmt->width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)mbfmt->height > img_height_max ||
+ mbfmt->code != MEDIA_BUS_FMT_YUYV8_2X8)
+ return -EIO;
+
+ if (mbfmt->width != geo.output.width ||
+ mbfmt->height != geo.output.height) {
+ geo.output.width = mbfmt->width;
+ geo.output.height = mbfmt->height;
+
+ vou_adjust_input(&geo, vou_dev->std);
+ }
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u\n", __func__,
+ pix->width, pix->height);
+
+ vou_dev->pix_idx = pix_idx;
+
+ vou_dev->pix = *pix;
+
+ sh_vou_configure_geometry(vou_dev, pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ int ret = sh_vou_try_fmt_vid_out(file, priv, fmt);
+
+ if (ret)
+ return ret;
+ return sh_vou_set_fmt_vid_out(vou_dev, &fmt->fmt.pix);
+}
+
+static int sh_vou_enum_output(struct file *file, void *fh,
+ struct v4l2_output *a)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ if (a->index)
+ return -EINVAL;
+ strlcpy(a->name, "Video Out", sizeof(a->name));
+ a->type = V4L2_OUTPUT_TYPE_ANALOG;
+ a->std = vou_dev->vdev.tvnorms;
+ return 0;
+}
+
+static int sh_vou_g_output(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int sh_vou_s_output(struct file *file, void *fh, unsigned int i)
+{
+ return i ? -EINVAL : 0;
+}
+
+static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
+{
+ switch (bus_fmt) {
+ default:
+ pr_warn("%s(): Invalid bus-format code %d, using default 8-bit\n",
+ __func__, bus_fmt);
+ /* fall through */
+ case SH_VOU_BUS_8BIT:
+ return 1;
+ case SH_VOU_BUS_16BIT:
+ return 0;
+ case SH_VOU_BUS_BT656:
+ return 3;
+ }
+}
+
+static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, std_id);
+
+ if (std_id == vou_dev->std)
+ return 0;
+
+ if (vb2_is_busy(&vou_dev->queue))
+ return -EBUSY;
+
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_std_output, std_id);
+ /* Shall we continue, if the subdev doesn't support .s_std_output()? */
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ vou_dev->rect.top = vou_dev->rect.left = 0;
+ vou_dev->rect.width = VOU_MAX_IMAGE_WIDTH;
+ if (std_id & V4L2_STD_525_60) {
+ sh_vou_reg_ab_set(vou_dev, VOUCR,
+ sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29);
+ vou_dev->rect.height = 480;
+ } else {
+ sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29);
+ vou_dev->rect.height = 576;
+ }
+
+ vou_dev->pix.width = vou_dev->rect.width;
+ vou_dev->pix.height = vou_dev->rect.height;
+ vou_dev->pix.bytesperline =
+ vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpl;
+ vou_dev->pix.sizeimage = vou_dev->pix.height *
+ ((vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpp) >> 3);
+ vou_dev->std = std_id;
+ sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix);
+
+ return 0;
+}
+
+static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ *std = vou_dev->std;
+
+ return 0;
+}
+
+static int sh_vou_log_status(struct file *file, void *priv)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ pr_info("VOUER: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUER));
+ pr_info("VOUCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUCR));
+ pr_info("VOUSTR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSTR));
+ pr_info("VOUVCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVCR));
+ pr_info("VOUISR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUISR));
+ pr_info("VOUBCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUBCR));
+ pr_info("VOUDPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDPR));
+ pr_info("VOUDSR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDSR));
+ pr_info("VOUVPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVPR));
+ pr_info("VOUIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUIR));
+ pr_info("VOUSRR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSRR));
+ pr_info("VOUMSR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUMSR));
+ pr_info("VOUHIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUHIR));
+ pr_info("VOUDFR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDFR));
+ pr_info("VOUAD1R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD1R));
+ pr_info("VOUAD2R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD2R));
+ pr_info("VOUAIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAIR));
+ pr_info("VOUSWR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSWR));
+ pr_info("VOURCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURCR));
+ pr_info("VOURPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURPR));
+ return 0;
+}
+
+static int sh_vou_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = vou_dev->rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = VOU_MAX_IMAGE_WIDTH;
+ if (vou_dev->std & V4L2_STD_525_60)
+ sel->r.height = 480;
+ else
+ sel->r.height = 576;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Assume a dull encoder, do all the work ourselves. */
+static int sh_vou_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct v4l2_rect *rect = &sel->r;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ struct v4l2_subdev_selection sd_sel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_COMPOSE,
+ };
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ struct sh_vou_geometry geo;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ /* Revisit: is this the correct code? */
+ .format.code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .format.field = V4L2_FIELD_INTERLACED,
+ .format.colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ unsigned int img_height_max;
+ int ret;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ if (vb2_is_busy(&vou_dev->queue))
+ return -EBUSY;
+
+ if (vou_dev->std & V4L2_STD_525_60)
+ img_height_max = 480;
+ else
+ img_height_max = 576;
+
+ v4l_bound_align_image(&rect->width,
+ VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 1,
+ &rect->height,
+ VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
+
+ if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH)
+ rect->left = VOU_MAX_IMAGE_WIDTH - rect->width;
+
+ if (rect->height + rect->top > img_height_max)
+ rect->top = img_height_max - rect->height;
+
+ geo.output = *rect;
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+
+ /* Configure the encoder one-to-one, position at 0, ignore errors */
+ sd_sel.r.width = geo.output.width;
+ sd_sel.r.height = geo.output.height;
+ /*
+ * We first issue a S_SELECTION, so that the subsequent S_FMT delivers the
+ * final encoder configuration.
+ */
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
+ set_selection, NULL, &sd_sel);
+ format.format.width = geo.output.width;
+ format.format.height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
+ set_fmt, NULL, &format);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ /* Sanity checks */
+ if ((unsigned)format.format.width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)format.format.height > img_height_max ||
+ format.format.code != MEDIA_BUS_FMT_YUYV8_2X8)
+ return -EIO;
+
+ geo.output.width = format.format.width;
+ geo.output.height = format.format.height;
+
+ /*
+ * No down-scaling. According to the API, current call has precedence:
+ * http://v4l2spec.bytesex.org/spec/x1904.htm#AEN1954 paragraph two.
+ */
+ vou_adjust_input(&geo, vou_dev->std);
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ sh_vou_configure_geometry(vou_dev, vou_dev->pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+static irqreturn_t sh_vou_isr(int irq, void *dev_id)
+{
+ struct sh_vou_device *vou_dev = dev_id;
+ static unsigned long j;
+ struct sh_vou_buffer *vb;
+ static int cnt;
+ u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
+ u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
+
+ if (!(irq_status & 0x300)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev, "IRQ status 0x%x!\n",
+ irq_status);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&vou_dev->lock);
+ if (!vou_dev->active || list_empty(&vou_dev->buf_list)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev,
+ "IRQ without active buffer: %x!\n", irq_status);
+ /* Just ack: buf_release will disable further interrupts */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x300);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ masked = ~(0x300 & irq_status) & irq_status & 0x30304;
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "IRQ status 0x%x -> 0x%x, VOU status 0x%x, cnt %d\n",
+ irq_status, masked, vou_status, cnt);
+
+ cnt++;
+ /* side = vou_status & 0x10000; */
+
+ /* Clear only set interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, masked);
+
+ vb = vou_dev->active;
+ if (list_is_singular(&vb->list)) {
+ /* Keep cycling while no next buffer is available */
+ sh_vou_schedule_next(vou_dev, &vb->vb);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ list_del(&vb->list);
+
+ vb->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb->vb.sequence = vou_dev->sequence++;
+ vb->vb.field = V4L2_FIELD_INTERLACED;
+ vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE);
+
+ vou_dev->active = list_entry(vou_dev->buf_list.next,
+ struct sh_vou_buffer, list);
+
+ if (list_is_singular(&vou_dev->buf_list)) {
+ /* Keep cycling while no next buffer is available */
+ sh_vou_schedule_next(vou_dev, &vou_dev->active->vb);
+ } else {
+ struct sh_vou_buffer *new = list_entry(vou_dev->active->list.next,
+ struct sh_vou_buffer, list);
+ sh_vou_schedule_next(vou_dev, &new->vb);
+ }
+
+ spin_unlock(&vou_dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
+{
+ struct sh_vou_pdata *pdata = vou_dev->pdata;
+ u32 voucr = sh_vou_ntsc_mode(pdata->bus_fmt) << 29;
+ int i = 100;
+
+ /* Disable all IRQs */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0);
+
+ /* Reset VOU interfaces - registers unaffected */
+ sh_vou_reg_a_write(vou_dev, VOUSRR, 0x101);
+ while (--i && (sh_vou_reg_a_read(vou_dev, VOUSRR) & 0x101))
+ udelay(1);
+
+ if (!i)
+ return -ETIMEDOUT;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "Reset took %dus\n", 100 - i);
+
+ if (pdata->flags & SH_VOU_PCLK_FALLING)
+ voucr |= 1 << 28;
+ if (pdata->flags & SH_VOU_HSYNC_LOW)
+ voucr |= 1 << 27;
+ if (pdata->flags & SH_VOU_VSYNC_LOW)
+ voucr |= 1 << 26;
+ sh_vou_reg_ab_set(vou_dev, VOUCR, voucr, 0xfc000000);
+
+ /* Manual register side switching at first */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 4);
+ /* Default - fixed HSYNC length, can be made configurable is required */
+ sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000);
+
+ sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix);
+
+ return 0;
+}
+
+/* File operations */
+static int sh_vou_open(struct file *file)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ int err;
+
+ if (mutex_lock_interruptible(&vou_dev->fop_lock))
+ return -ERESTARTSYS;
+
+ err = v4l2_fh_open(file);
+ if (err)
+ goto done_open;
+ if (v4l2_fh_is_singular_file(file) &&
+ vou_dev->status == SH_VOU_INITIALISING) {
+ /* First open */
+ pm_runtime_get_sync(vou_dev->v4l2_dev.dev);
+ err = sh_vou_hw_init(vou_dev);
+ if (err < 0) {
+ pm_runtime_put(vou_dev->v4l2_dev.dev);
+ v4l2_fh_release(file);
+ } else {
+ vou_dev->status = SH_VOU_IDLE;
+ }
+ }
+done_open:
+ mutex_unlock(&vou_dev->fop_lock);
+ return err;
+}
+
+static int sh_vou_release(struct file *file)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ bool is_last;
+
+ mutex_lock(&vou_dev->fop_lock);
+ is_last = v4l2_fh_is_singular_file(file);
+ _vb2_fop_release(file, NULL);
+ if (is_last) {
+ /* Last close */
+ vou_dev->status = SH_VOU_INITIALISING;
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
+ pm_runtime_put(vou_dev->v4l2_dev.dev);
+ }
+ mutex_unlock(&vou_dev->fop_lock);
+ return 0;
+}
+
+/* sh_vou display ioctl operations */
+static const struct v4l2_ioctl_ops sh_vou_ioctl_ops = {
+ .vidioc_querycap = sh_vou_querycap,
+ .vidioc_enum_fmt_vid_out = sh_vou_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = sh_vou_g_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = sh_vou_s_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = sh_vou_try_fmt_vid_out,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_g_output = sh_vou_g_output,
+ .vidioc_s_output = sh_vou_s_output,
+ .vidioc_enum_output = sh_vou_enum_output,
+ .vidioc_s_std = sh_vou_s_std,
+ .vidioc_g_std = sh_vou_g_std,
+ .vidioc_g_selection = sh_vou_g_selection,
+ .vidioc_s_selection = sh_vou_s_selection,
+ .vidioc_log_status = sh_vou_log_status,
+};
+
+static const struct v4l2_file_operations sh_vou_fops = {
+ .owner = THIS_MODULE,
+ .open = sh_vou_open,
+ .release = sh_vou_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+ .write = vb2_fop_write,
+};
+
+static const struct video_device sh_vou_video_template = {
+ .name = "sh_vou",
+ .fops = &sh_vou_fops,
+ .ioctl_ops = &sh_vou_ioctl_ops,
+ .tvnorms = V4L2_STD_525_60, /* PAL only supported in 8-bit non-bt656 mode */
+ .vfl_dir = VFL_DIR_TX,
+};
+
+static int sh_vou_probe(struct platform_device *pdev)
+{
+ struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data;
+ struct v4l2_rect *rect;
+ struct v4l2_pix_format *pix;
+ struct i2c_adapter *i2c_adap;
+ struct video_device *vdev;
+ struct sh_vou_device *vou_dev;
+ struct resource *reg_res;
+ struct v4l2_subdev *subdev;
+ struct vb2_queue *q;
+ int irq, ret;
+
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+
+ if (!vou_pdata || !reg_res || irq <= 0) {
+ dev_err(&pdev->dev, "Insufficient VOU platform information.\n");
+ return -ENODEV;
+ }
+
+ vou_dev = devm_kzalloc(&pdev->dev, sizeof(*vou_dev), GFP_KERNEL);
+ if (!vou_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vou_dev->buf_list);
+ spin_lock_init(&vou_dev->lock);
+ mutex_init(&vou_dev->fop_lock);
+ vou_dev->pdata = vou_pdata;
+ vou_dev->status = SH_VOU_INITIALISING;
+ vou_dev->pix_idx = 1;
+
+ rect = &vou_dev->rect;
+ pix = &vou_dev->pix;
+
+ /* Fill in defaults */
+ vou_dev->std = V4L2_STD_NTSC_M;
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = VOU_MAX_IMAGE_WIDTH;
+ rect->height = 480;
+ pix->width = VOU_MAX_IMAGE_WIDTH;
+ pix->height = 480;
+ pix->pixelformat = V4L2_PIX_FMT_NV16;
+ pix->field = V4L2_FIELD_INTERLACED;
+ pix->bytesperline = VOU_MAX_IMAGE_WIDTH;
+ pix->sizeimage = VOU_MAX_IMAGE_WIDTH * 2 * 480;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ vou_dev->base = devm_ioremap_resource(&pdev->dev, reg_res);
+ if (IS_ERR(vou_dev->base))
+ return PTR_ERR(vou_dev->base);
+
+ ret = devm_request_irq(&pdev->dev, irq, sh_vou_isr, 0, "vou", vou_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error registering v4l2 device\n");
+ return ret;
+ }
+
+ vdev = &vou_dev->vdev;
+ *vdev = sh_vou_video_template;
+ if (vou_pdata->bus_fmt == SH_VOU_BUS_8BIT)
+ vdev->tvnorms |= V4L2_STD_PAL;
+ vdev->v4l2_dev = &vou_dev->v4l2_dev;
+ vdev->release = video_device_release_empty;
+ vdev->lock = &vou_dev->fop_lock;
+
+ video_set_drvdata(vdev, vou_dev);
+
+ /* Initialize the vb2 queue */
+ q = &vou_dev->queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+ q->drv_priv = vou_dev;
+ q->buf_struct_size = sizeof(struct sh_vou_buffer);
+ q->ops = &sh_vou_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &vou_dev->fop_lock;
+ q->dev = &pdev->dev;
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto ei2cgadap;
+
+ vdev->queue = q;
+ INIT_LIST_HEAD(&vou_dev->buf_list);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ i2c_adap = i2c_get_adapter(vou_pdata->i2c_adap);
+ if (!i2c_adap) {
+ ret = -ENODEV;
+ goto ei2cgadap;
+ }
+
+ ret = sh_vou_hw_init(vou_dev);
+ if (ret < 0)
+ goto ereset;
+
+ subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap,
+ vou_pdata->board_info, NULL);
+ if (!subdev) {
+ ret = -ENOMEM;
+ goto ei2cnd;
+ }
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ goto evregdev;
+
+ return 0;
+
+evregdev:
+ei2cnd:
+ereset:
+ i2c_put_adapter(i2c_adap);
+ei2cgadap:
+ pm_runtime_disable(&pdev->dev);
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+ return ret;
+}
+
+static int sh_vou_remove(struct platform_device *pdev)
+{
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct sh_vou_device *vou_dev = container_of(v4l2_dev,
+ struct sh_vou_device, v4l2_dev);
+ struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
+ struct v4l2_subdev, list);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ pm_runtime_disable(&pdev->dev);
+ video_unregister_device(&vou_dev->vdev);
+ i2c_put_adapter(client->adapter);
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+ return 0;
+}
+
+static struct platform_driver __refdata sh_vou = {
+ .remove = sh_vou_remove,
+ .driver = {
+ .name = "sh-vou",
+ },
+};
+
+module_platform_driver_probe(sh_vou, sh_vou_probe);
+
+MODULE_DESCRIPTION("SuperH VOU driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1.0");
+MODULE_ALIAS("platform:sh-vou");
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
new file mode 100644
index 000000000..669d116b8
--- /dev/null
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -0,0 +1,26 @@
+config SOC_CAMERA
+ tristate "SoC camera support"
+ depends on VIDEO_V4L2 && HAS_DMA && I2C
+ select VIDEOBUF2_CORE
+ help
+ SoC Camera is a common API to several cameras, not connecting
+ over a bus like PCI or USB. For example some i2c camera connected
+ directly to the data bus of an SoC.
+
+config SOC_CAMERA_SCALE_CROP
+ tristate
+
+config SOC_CAMERA_PLATFORM
+ tristate "platform camera support"
+ depends on SOC_CAMERA
+ help
+ This is a generic SoC camera platform driver, useful for testing
+
+config VIDEO_SH_MOBILE_CEU
+ tristate "SuperH Mobile CEU Interface driver"
+ depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select SOC_CAMERA_SCALE_CROP
+ ---help---
+ This is a v4l2 driver for the SuperH Mobile CEU Interface
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
new file mode 100644
index 000000000..07a451e8b
--- /dev/null
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
+obj-$(CONFIG_SOC_CAMERA_SCALE_CROP) += soc_scale_crop.o
+
+# a platform subdevice driver stub, allowing to support cameras by adding a
+# couple of callback functions to the board code
+obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
+
+# soc-camera host drivers have to be linked after camera drivers
+obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
new file mode 100644
index 000000000..0a2c0daaf
--- /dev/null
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -0,0 +1,1810 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Driver for SuperH Mobile CEU interface
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on V4L2 Driver for PXA camera host - "pxa_camera.c",
+ *
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/soc_camera.h>
+#include <media/drv-intf/sh_mobile_ceu.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-mediabus.h>
+#include <media/drv-intf/soc_mediabus.h>
+
+#include "soc_scale_crop.h"
+
+/* register offsets for sh7722 / sh7723 */
+
+#define CAPSR 0x00 /* Capture start register */
+#define CAPCR 0x04 /* Capture control register */
+#define CAMCR 0x08 /* Capture interface control register */
+#define CMCYR 0x0c /* Capture interface cycle register */
+#define CAMOR 0x10 /* Capture interface offset register */
+#define CAPWR 0x14 /* Capture interface width register */
+#define CAIFR 0x18 /* Capture interface input format register */
+#define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */
+#define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */
+#define CRCNTR 0x28 /* CEU register control register */
+#define CRCMPR 0x2c /* CEU register forcible control register */
+#define CFLCR 0x30 /* Capture filter control register */
+#define CFSZR 0x34 /* Capture filter size clip register */
+#define CDWDR 0x38 /* Capture destination width register */
+#define CDAYR 0x3c /* Capture data address Y register */
+#define CDACR 0x40 /* Capture data address C register */
+#define CDBYR 0x44 /* Capture data bottom-field address Y register */
+#define CDBCR 0x48 /* Capture data bottom-field address C register */
+#define CBDSR 0x4c /* Capture bundle destination size register */
+#define CFWCR 0x5c /* Firewall operation control register */
+#define CLFCR 0x60 /* Capture low-pass filter control register */
+#define CDOCR 0x64 /* Capture data output control register */
+#define CDDCR 0x68 /* Capture data complexity level register */
+#define CDDAR 0x6c /* Capture data complexity level address register */
+#define CEIER 0x70 /* Capture event interrupt enable register */
+#define CETCR 0x74 /* Capture event flag clear register */
+#define CSTSR 0x7c /* Capture status register */
+#define CSRTR 0x80 /* Capture software reset register */
+#define CDSSR 0x84 /* Capture data size register */
+#define CDAYR2 0x90 /* Capture data address Y register 2 */
+#define CDACR2 0x94 /* Capture data address C register 2 */
+#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */
+#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */
+
+#undef DEBUG_GEOMETRY
+#ifdef DEBUG_GEOMETRY
+#define dev_geo dev_info
+#else
+#define dev_geo dev_dbg
+#endif
+
+/* per video frame buffer */
+struct sh_mobile_ceu_buffer {
+ struct vb2_v4l2_buffer vb; /* v4l buffer must be first */
+ struct list_head queue;
+};
+
+struct sh_mobile_ceu_dev {
+ struct soc_camera_host ici;
+
+ unsigned int irq;
+ void __iomem *base;
+ size_t video_limit;
+ size_t buf_total;
+
+ spinlock_t lock; /* Protects video buffer lists */
+ struct list_head capture;
+ struct vb2_v4l2_buffer *active;
+
+ struct sh_mobile_ceu_info *pdata;
+ struct completion complete;
+
+ u32 cflcr;
+
+ /* static max sizes either from platform data or default */
+ int max_width;
+ int max_height;
+
+ enum v4l2_field field;
+ int sequence;
+ unsigned long flags;
+
+ unsigned int image_mode:1;
+ unsigned int is_16bit:1;
+ unsigned int frozen:1;
+};
+
+struct sh_mobile_ceu_cam {
+ /* CEU offsets within the camera output, before the CEU scaler */
+ unsigned int ceu_left;
+ unsigned int ceu_top;
+ /* Client output, as seen by the CEU */
+ unsigned int width;
+ unsigned int height;
+ /*
+ * User window from S_SELECTION / G_SELECTION, produced by client cropping and
+ * scaling, CEU scaling and CEU cropping, mapped back onto the client
+ * input window
+ */
+ struct v4l2_rect subrect;
+ /* Camera cropping rectangle */
+ struct v4l2_rect rect;
+ const struct soc_mbus_pixelfmt *extra_fmt;
+ u32 code;
+};
+
+static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct sh_mobile_ceu_buffer, vb);
+}
+
+static void ceu_write(struct sh_mobile_ceu_dev *priv,
+ unsigned long reg_offs, u32 data)
+{
+ iowrite32(data, priv->base + reg_offs);
+}
+
+static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs)
+{
+ return ioread32(priv->base + reg_offs);
+}
+
+static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
+{
+ int i, success = 0;
+
+ ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
+
+ /* wait CSTSR.CPTON bit */
+ for (i = 0; i < 1000; i++) {
+ if (!(ceu_read(pcdev, CSTSR) & 1)) {
+ success++;
+ break;
+ }
+ udelay(1);
+ }
+
+ /* wait CAPSR.CPKIL bit */
+ for (i = 0; i < 1000; i++) {
+ if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) {
+ success++;
+ break;
+ }
+ udelay(1);
+ }
+
+ if (2 != success) {
+ dev_warn(pcdev->ici.v4l2_dev.dev, "soft reset time out\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Videobuf operations
+ */
+
+/*
+ * .queue_setup() is called to check, whether the driver can accept the
+ * requested number of buffers and to fill in plane sizes
+ * for the current frame format if required
+ */
+static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ if (!vq->num_buffers)
+ pcdev->sequence = 0;
+
+ if (!*count)
+ *count = 2;
+
+ /* Called from VIDIOC_REQBUFS or in compatibility mode */
+ if (!*num_planes)
+ sizes[0] = icd->sizeimage;
+ else if (sizes[0] < icd->sizeimage)
+ return -EINVAL;
+
+ /* If *num_planes != 0, we have already verified *count. */
+ if (pcdev->video_limit) {
+ size_t size = PAGE_ALIGN(sizes[0]) * *count;
+
+ if (size + pcdev->buf_total > pcdev->video_limit)
+ *count = (pcdev->video_limit - pcdev->buf_total) /
+ PAGE_ALIGN(sizes[0]);
+ }
+
+ *num_planes = 1;
+
+ dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
+
+ return 0;
+}
+
+#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */
+#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */
+#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */
+#define CEU_CEIER_VBP (1 << 20) /* vbp error */
+#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */
+#define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP)
+
+
+/*
+ * return value doesn't reflex the success/failure to queue the new buffer,
+ * but rather the status of the previous buffer.
+ */
+static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
+{
+ struct soc_camera_device *icd = pcdev->ici.icd;
+ dma_addr_t phys_addr_top, phys_addr_bottom;
+ unsigned long top1, top2;
+ unsigned long bottom1, bottom2;
+ u32 status;
+ bool planar;
+ int ret = 0;
+
+ /*
+ * The hardware is _very_ picky about this sequence. Especially
+ * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
+ * several not-so-well documented interrupt sources in CETCR.
+ */
+ ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK);
+ status = ceu_read(pcdev, CETCR);
+ ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC);
+ if (!pcdev->frozen)
+ ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK);
+ ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
+ ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);
+
+ /*
+ * When a VBP interrupt occurs, a capture end interrupt does not occur
+ * and the image of that frame is not captured correctly. So, soft reset
+ * is needed here.
+ */
+ if (status & CEU_CEIER_VBP) {
+ sh_mobile_ceu_soft_reset(pcdev);
+ ret = -EIO;
+ }
+
+ if (pcdev->frozen) {
+ complete(&pcdev->complete);
+ return ret;
+ }
+
+ if (!pcdev->active)
+ return ret;
+
+ if (V4L2_FIELD_INTERLACED_BT == pcdev->field) {
+ top1 = CDBYR;
+ top2 = CDBCR;
+ bottom1 = CDAYR;
+ bottom2 = CDACR;
+ } else {
+ top1 = CDAYR;
+ top2 = CDACR;
+ bottom1 = CDBYR;
+ bottom2 = CDBCR;
+ }
+
+ phys_addr_top =
+ vb2_dma_contig_plane_dma_addr(&pcdev->active->vb2_buf, 0);
+
+ switch (icd->current_fmt->host_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ planar = true;
+ break;
+ default:
+ planar = false;
+ }
+
+ ceu_write(pcdev, top1, phys_addr_top);
+ if (V4L2_FIELD_NONE != pcdev->field) {
+ phys_addr_bottom = phys_addr_top + icd->bytesperline;
+ ceu_write(pcdev, bottom1, phys_addr_bottom);
+ }
+
+ if (planar) {
+ phys_addr_top += icd->bytesperline * icd->user_height;
+ ceu_write(pcdev, top2, phys_addr_top);
+ if (V4L2_FIELD_NONE != pcdev->field) {
+ phys_addr_bottom = phys_addr_top + icd->bytesperline;
+ ceu_write(pcdev, bottom2, phys_addr_bottom);
+ }
+ }
+
+ ceu_write(pcdev, CAPSR, 0x1); /* start capture */
+
+ return ret;
+}
+
+static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
+
+ /* Added list head initialization on alloc */
+ WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
+
+ return 0;
+}
+
+static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
+ unsigned long size;
+
+ size = icd->sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
+ vb->index, vb2_plane_size(vb, 0), size);
+ goto error;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+#ifdef DEBUG
+ /*
+ * This can be useful if you want to see if we actually fill
+ * the buffer with something
+ */
+ if (vb2_plane_vaddr(vb, 0))
+ memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
+#endif
+
+ spin_lock_irq(&pcdev->lock);
+ list_add_tail(&buf->queue, &pcdev->capture);
+
+ if (!pcdev->active) {
+ /*
+ * Because there were no active buffer at this moment,
+ * we are not interested in the return value of
+ * sh_mobile_ceu_capture here.
+ */
+ pcdev->active = vbuf;
+ sh_mobile_ceu_capture(pcdev);
+ }
+ spin_unlock_irq(&pcdev->lock);
+
+ return;
+
+error:
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ spin_lock_irq(&pcdev->lock);
+
+ if (pcdev->active == vbuf) {
+ /* disable capture (release DMA buffer), reset */
+ ceu_write(pcdev, CAPSR, 1 << 16);
+ pcdev->active = NULL;
+ }
+
+ /*
+ * Doesn't hurt also if the list is empty, but it hurts, if queuing the
+ * buffer failed, and .buf_init() hasn't been called
+ */
+ if (buf->queue.next)
+ list_del_init(&buf->queue);
+
+ pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0));
+ dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
+ pcdev->buf_total);
+
+ spin_unlock_irq(&pcdev->lock);
+}
+
+static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0));
+ dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
+ pcdev->buf_total);
+
+ /* This is for locking debugging only */
+ INIT_LIST_HEAD(&to_ceu_vb(vbuf)->queue);
+ return 0;
+}
+
+static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(q);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct list_head *buf_head, *tmp;
+ struct vb2_v4l2_buffer *vbuf;
+
+ spin_lock_irq(&pcdev->lock);
+
+ pcdev->active = NULL;
+
+ list_for_each_safe(buf_head, tmp, &pcdev->capture) {
+ vbuf = &list_entry(buf_head, struct sh_mobile_ceu_buffer,
+ queue)->vb;
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+ list_del_init(buf_head);
+ }
+
+ spin_unlock_irq(&pcdev->lock);
+
+ sh_mobile_ceu_soft_reset(pcdev);
+}
+
+static const struct vb2_ops sh_mobile_ceu_videobuf_ops = {
+ .queue_setup = sh_mobile_ceu_videobuf_setup,
+ .buf_prepare = sh_mobile_ceu_videobuf_prepare,
+ .buf_queue = sh_mobile_ceu_videobuf_queue,
+ .buf_cleanup = sh_mobile_ceu_videobuf_release,
+ .buf_init = sh_mobile_ceu_videobuf_init,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = sh_mobile_ceu_stop_streaming,
+};
+
+static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
+{
+ struct sh_mobile_ceu_dev *pcdev = data;
+ struct vb2_v4l2_buffer *vbuf;
+ int ret;
+
+ spin_lock(&pcdev->lock);
+
+ vbuf = pcdev->active;
+ if (!vbuf)
+ /* Stale interrupt from a released buffer */
+ goto out;
+
+ list_del_init(&to_ceu_vb(vbuf)->queue);
+
+ if (!list_empty(&pcdev->capture))
+ pcdev->active = &list_entry(pcdev->capture.next,
+ struct sh_mobile_ceu_buffer, queue)->vb;
+ else
+ pcdev->active = NULL;
+
+ ret = sh_mobile_ceu_capture(pcdev);
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ if (!ret) {
+ vbuf->field = pcdev->field;
+ vbuf->sequence = pcdev->sequence++;
+ }
+ vb2_buffer_done(&vbuf->vb2_buf,
+ ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+out:
+ spin_unlock(&pcdev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
+{
+ dev_info(icd->parent,
+ "SuperH Mobile CEU driver attached to camera %d\n",
+ icd->devnum);
+
+ return 0;
+}
+
+static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
+{
+ dev_info(icd->parent,
+ "SuperH Mobile CEU driver detached from camera %d\n",
+ icd->devnum);
+}
+
+/* Called with .host_lock held */
+static int sh_mobile_ceu_clock_start(struct soc_camera_host *ici)
+{
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ pm_runtime_get_sync(ici->v4l2_dev.dev);
+
+ pcdev->buf_total = 0;
+
+ sh_mobile_ceu_soft_reset(pcdev);
+
+ return 0;
+}
+
+/* Called with .host_lock held */
+static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici)
+{
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ /* disable capture, disable interrupts */
+ ceu_write(pcdev, CEIER, 0);
+ sh_mobile_ceu_soft_reset(pcdev);
+
+ /* make sure active buffer is canceled */
+ spin_lock_irq(&pcdev->lock);
+ if (pcdev->active) {
+ list_del_init(&to_ceu_vb(pcdev->active)->queue);
+ vb2_buffer_done(&pcdev->active->vb2_buf, VB2_BUF_STATE_ERROR);
+ pcdev->active = NULL;
+ }
+ spin_unlock_irq(&pcdev->lock);
+
+ pm_runtime_put(ici->v4l2_dev.dev);
+}
+
+/*
+ * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)"
+ * in SH7722 Hardware Manual
+ */
+static unsigned int size_dst(unsigned int src, unsigned int scale)
+{
+ unsigned int mant_pre = scale >> 12;
+ if (!src || !scale)
+ return src;
+ return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) *
+ mant_pre * 4096 / scale + 1;
+}
+
+static u16 calc_scale(unsigned int src, unsigned int *dst)
+{
+ u16 scale;
+
+ if (src == *dst)
+ return 0;
+
+ scale = (src * 4096 / *dst) & ~7;
+
+ while (scale > 4096 && size_dst(src, scale) < *dst)
+ scale -= 8;
+
+ *dst = size_dst(src, scale);
+
+ return scale;
+}
+
+/* rect is guaranteed to not exceed the scaled camera rectangle */
+static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ unsigned int height, width, cdwdr_width, in_width, in_height;
+ unsigned int left_offset, top_offset;
+ u32 camor;
+
+ dev_geo(icd->parent, "Crop %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
+
+ left_offset = cam->ceu_left;
+ top_offset = cam->ceu_top;
+
+ WARN_ON(icd->user_width & 3 || icd->user_height & 3);
+
+ width = icd->user_width;
+
+ if (pcdev->image_mode) {
+ in_width = cam->width;
+ if (!pcdev->is_16bit) {
+ in_width *= 2;
+ left_offset *= 2;
+ }
+ } else {
+ unsigned int w_factor;
+
+ switch (icd->current_fmt->host_fmt->packing) {
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ w_factor = 2;
+ break;
+ default:
+ w_factor = 1;
+ }
+
+ in_width = cam->width * w_factor;
+ left_offset *= w_factor;
+ }
+
+ cdwdr_width = icd->bytesperline;
+
+ height = icd->user_height;
+ in_height = cam->height;
+ if (V4L2_FIELD_NONE != pcdev->field) {
+ height = (height / 2) & ~3;
+ in_height /= 2;
+ top_offset /= 2;
+ cdwdr_width *= 2;
+ }
+
+ /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
+ camor = left_offset | (top_offset << 16);
+
+ dev_geo(icd->parent,
+ "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
+ (in_height << 16) | in_width, (height << 16) | width,
+ cdwdr_width);
+
+ ceu_write(pcdev, CAMOR, camor);
+ ceu_write(pcdev, CAPWR, (in_height << 16) | in_width);
+ /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */
+ ceu_write(pcdev, CFSZR, (height << 16) | width);
+ ceu_write(pcdev, CDWDR, cdwdr_width);
+}
+
+static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev)
+{
+ u32 capsr = ceu_read(pcdev, CAPSR);
+ ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */
+ return capsr;
+}
+
+static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
+{
+ unsigned long timeout = jiffies + 10 * HZ;
+
+ /*
+ * Wait until the end of the current frame. It can take a long time,
+ * but if it has been aborted by a CAPSR reset, it shoule exit sooner.
+ */
+ while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout))
+ msleep(1);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(pcdev->ici.v4l2_dev.dev,
+ "Timeout waiting for frame end! Interface problem?\n");
+ return;
+ }
+
+ /* Wait until reset clears, this shall not hang... */
+ while (ceu_read(pcdev, CAPSR) & (1 << 16))
+ udelay(10);
+
+ /* Anything to restore? */
+ if (capsr & ~(1 << 16))
+ ceu_write(pcdev, CAPSR, capsr);
+}
+
+#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH)
+
+/* Capture is not running, no interrupts, no locking needed */
+static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long value, common_flags = CEU_BUS_FLAGS;
+ u32 capsr = capture_save_reset(pcdev);
+ unsigned int yuv_lineskip;
+ int ret;
+
+ /*
+ * If the client doesn't implement g_mbus_config, we just use our
+ * platform data
+ */
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ common_flags);
+ if (!common_flags)
+ return -EINVAL;
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ }
+
+ /* Make choises, based on platform preferences */
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+ if (pcdev->flags & SH_CEU_FLAG_HSYNC_LOW)
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+ if (pcdev->flags & SH_CEU_FLAG_VSYNC_LOW)
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ }
+
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (icd->current_fmt->host_fmt->bits_per_sample > 8)
+ pcdev->is_16bit = 1;
+ else
+ pcdev->is_16bit = 0;
+
+ ceu_write(pcdev, CRCNTR, 0);
+ ceu_write(pcdev, CRCMPR, 0);
+
+ value = 0x00000010; /* data fetch by default */
+ yuv_lineskip = 0x10;
+
+ switch (icd->current_fmt->host_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ /* convert 4:2:2 -> 4:2:0 */
+ yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */
+ /* fall-through */
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ switch (cam->code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
+ break;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
+ icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
+ value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
+
+ value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
+ value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
+
+ if (pcdev->is_16bit)
+ value |= 1 << 12;
+ else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT)
+ value |= 2 << 12;
+
+ ceu_write(pcdev, CAMCR, value);
+
+ ceu_write(pcdev, CAPCR, 0x00300000);
+
+ switch (pcdev->field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ value = 0x101;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ value = 0x102;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ ceu_write(pcdev, CAIFR, value);
+
+ sh_mobile_ceu_set_rect(icd);
+ mdelay(1);
+
+ dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr);
+ ceu_write(pcdev, CFLCR, pcdev->cflcr);
+
+ /*
+ * A few words about byte order (observed in Big Endian mode)
+ *
+ * In data fetch mode bytes are received in chunks of 8 bytes.
+ * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
+ *
+ * The data is however by default written to memory in reverse order:
+ * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
+ *
+ * The lowest three bits of CDOCR allows us to do swapping,
+ * using 7 we swap the data bytes to match the incoming order:
+ * D0, D1, D2, D3, D4, D5, D6, D7
+ */
+ value = 0x00000007 | yuv_lineskip;
+
+ ceu_write(pcdev, CDOCR, value);
+ ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
+
+ capture_restore(pcdev, capsr);
+
+ /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */
+ return 0;
+}
+
+static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
+ unsigned char buswidth)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ unsigned long common_flags = CEU_BUS_FLAGS;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ int ret;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret)
+ common_flags = soc_mbus_config_compatible(&cfg,
+ common_flags);
+ else if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (!common_flags || buswidth > 16)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .name = "NV12",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .name = "NV21",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .name = "NV16",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .name = "NV61",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
+ },
+};
+
+/* This will be corrected as we get more formats */
+static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_1_5X8) ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct soc_camera_device,
+ ctrl_handler);
+}
+
+static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct soc_camera_device *icd = ctrl_to_icd(ctrl);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ switch (ctrl->id) {
+ case V4L2_CID_SHARPNESS:
+ switch (icd->current_fmt->host_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ ceu_write(pcdev, CLFCR, !ctrl->val);
+ return 0;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = {
+ .s_ctrl = sh_mobile_ceu_s_ctrl,
+};
+
+static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ int ret, k, n;
+ int formats = 0;
+ struct sh_mobile_ceu_cam *cam;
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .index = idx,
+ };
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code.code);
+ if (!fmt) {
+ dev_warn(dev, "unsupported format code #%u: %d\n", idx, code.code);
+ return 0;
+ }
+
+ ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
+ if (ret < 0)
+ return 0;
+
+ if (!icd->host_priv) {
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mf = &fmt.format;
+ struct v4l2_rect rect;
+ int shift = 0;
+
+ /* Add our control */
+ v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 1, 1, 1);
+ if (icd->ctrl_handler.error)
+ return icd->ctrl_handler.error;
+
+ /* FIXME: subwindow is lost between close / open */
+
+ /* Cache current client geometry */
+ ret = soc_camera_client_g_rect(sd, &rect);
+ if (ret < 0)
+ return ret;
+
+ /* First time */
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * All currently existing CEU implementations support 2560x1920
+ * or larger frames. If the sensor is proposing too big a frame,
+ * don't bother with possibly supportred by the CEU larger
+ * sizes, just try VGA multiples. If needed, this can be
+ * adjusted in the future.
+ */
+ while ((mf->width > pcdev->max_width ||
+ mf->height > pcdev->max_height) && shift < 4) {
+ /* Try 2560x1920, 1280x960, 640x480, 320x240 */
+ mf->width = 2560 >> shift;
+ mf->height = 1920 >> shift;
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), pad,
+ set_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret;
+ shift++;
+ }
+
+ if (shift == 4) {
+ dev_err(dev, "Failed to configure the client below %ux%x\n",
+ mf->width, mf->height);
+ return -EIO;
+ }
+
+ dev_geo(dev, "camera fmt %ux%u\n", mf->width, mf->height);
+
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (!cam)
+ return -ENOMEM;
+
+ /* We are called with current camera crop, initialise subrect with it */
+ cam->rect = rect;
+ cam->subrect = rect;
+
+ cam->width = mf->width;
+ cam->height = mf->height;
+
+ icd->host_priv = cam;
+ } else {
+ cam = icd->host_priv;
+ }
+
+ /* Beginning of a pass */
+ if (!idx)
+ cam->extra_fmt = NULL;
+
+ switch (code.code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ if (cam->extra_fmt)
+ break;
+
+ /*
+ * Our case is simple so far: for any of the above four camera
+ * formats we add all our four synthesized NV* formats, so,
+ * just marking the device with a single flag suffices. If
+ * the format generation rules are more complex, you would have
+ * to actually hang your already added / counted formats onto
+ * the host_priv pointer and check whether the format you're
+ * going to add now is already there.
+ */
+ cam->extra_fmt = sh_mobile_ceu_formats;
+
+ n = ARRAY_SIZE(sh_mobile_ceu_formats);
+ formats += n;
+ for (k = 0; xlate && k < n; k++) {
+ xlate->host_fmt = &sh_mobile_ceu_formats[k];
+ xlate->code = code.code;
+ xlate++;
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ sh_mobile_ceu_formats[k].name, code.code);
+ }
+ break;
+ default:
+ if (!sh_mobile_ceu_packing_supported(fmt))
+ return 0;
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code.code;
+ xlate++;
+ dev_dbg(dev, "Providing format %s in pass-through mode\n",
+ fmt->name);
+ }
+
+ return formats;
+}
+
+static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd)
+{
+ kfree(icd->host_priv);
+ icd->host_priv = NULL;
+}
+
+#define scale_down(size, scale) soc_camera_shift_scale(size, 12, scale)
+#define calc_generic_scale(in, out) soc_camera_calc_scale(in, 12, out)
+
+/*
+ * CEU can scale and crop, but we don't want to waste bandwidth and kill the
+ * framerate by always requesting the maximum image from the client. See
+ * Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst for a description of
+ * scaling and cropping algorithms and for the meaning of referenced here steps.
+ */
+static int sh_mobile_ceu_set_selection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
+{
+ struct v4l2_rect *rect = &sel->r;
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_selection cam_sel;
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct v4l2_rect *cam_rect = &cam_sel.r;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mf = &fmt.format;
+ unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
+ out_width, out_height;
+ int interm_width, interm_height;
+ u32 capsr, cflcr;
+ int ret;
+
+ dev_geo(dev, "S_SELECTION(%ux%u@%u:%u)\n", rect->width, rect->height,
+ rect->left, rect->top);
+
+ /* During camera cropping its output window can change too, stop CEU */
+ capsr = capture_save_reset(pcdev);
+ dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
+
+ /*
+ * 1. - 2. Apply iterative camera S_SELECTION for new input window, read back
+ * actual camera rectangle.
+ */
+ ret = soc_camera_client_s_selection(sd, sel, &cam_sel,
+ &cam->rect, &cam->subrect);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n",
+ cam_rect->width, cam_rect->height,
+ cam_rect->left, cam_rect->top);
+
+ /* On success cam_crop contains current camera crop */
+
+ /* 3. Retrieve camera output window */
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret;
+
+ if (mf->width > pcdev->max_width || mf->height > pcdev->max_height)
+ return -EINVAL;
+
+ /* 4. Calculate camera scales */
+ scale_cam_h = calc_generic_scale(cam_rect->width, mf->width);
+ scale_cam_v = calc_generic_scale(cam_rect->height, mf->height);
+
+ /* Calculate intermediate window */
+ interm_width = scale_down(rect->width, scale_cam_h);
+ interm_height = scale_down(rect->height, scale_cam_v);
+
+ if (interm_width < icd->user_width) {
+ u32 new_scale_h;
+
+ new_scale_h = calc_generic_scale(rect->width, icd->user_width);
+
+ mf->width = scale_down(cam_rect->width, new_scale_h);
+ }
+
+ if (interm_height < icd->user_height) {
+ u32 new_scale_v;
+
+ new_scale_v = calc_generic_scale(rect->height, icd->user_height);
+
+ mf->height = scale_down(cam_rect->height, new_scale_v);
+ }
+
+ if (interm_width < icd->user_width || interm_height < icd->user_height) {
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), pad,
+ set_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "New camera output %ux%u\n", mf->width, mf->height);
+ scale_cam_h = calc_generic_scale(cam_rect->width, mf->width);
+ scale_cam_v = calc_generic_scale(cam_rect->height, mf->height);
+ interm_width = scale_down(rect->width, scale_cam_h);
+ interm_height = scale_down(rect->height, scale_cam_v);
+ }
+
+ /* Cache camera output window */
+ cam->width = mf->width;
+ cam->height = mf->height;
+
+ if (pcdev->image_mode) {
+ out_width = min(interm_width, icd->user_width);
+ out_height = min(interm_height, icd->user_height);
+ } else {
+ out_width = interm_width;
+ out_height = interm_height;
+ }
+
+ /*
+ * 5. Calculate CEU scales from camera scales from results of (5) and
+ * the user window
+ */
+ scale_ceu_h = calc_scale(interm_width, &out_width);
+ scale_ceu_v = calc_scale(interm_height, &out_height);
+
+ dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
+
+ /* Apply CEU scales. */
+ cflcr = scale_ceu_h | (scale_ceu_v << 16);
+ if (cflcr != pcdev->cflcr) {
+ pcdev->cflcr = cflcr;
+ ceu_write(pcdev, CFLCR, cflcr);
+ }
+
+ icd->user_width = out_width & ~3;
+ icd->user_height = out_height & ~3;
+ /* Offsets are applied at the CEU scaling filter input */
+ cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1;
+ cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1;
+
+ /* 6. Use CEU cropping to crop to the new window. */
+ sh_mobile_ceu_set_rect(icd);
+
+ cam->subrect = *rect;
+
+ dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height,
+ cam->ceu_left, cam->ceu_top);
+
+ /* Restore capture. The CE bit can be cleared by the hardware */
+ if (pcdev->active)
+ capsr |= 1;
+ capture_restore(pcdev, capsr);
+
+ /* Even if only camera cropping succeeded */
+ return ret;
+}
+
+static int sh_mobile_ceu_get_selection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+
+ sel->r = cam->subrect;
+
+ return 0;
+}
+
+/* Similar to set_crop multistage iterative algorithm */
+static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ __u32 pixfmt = pix->pixelformat;
+ const struct soc_camera_format_xlate *xlate;
+ unsigned int ceu_sub_width = pcdev->max_width,
+ ceu_sub_height = pcdev->max_height;
+ u16 scale_v, scale_h;
+ int ret;
+ bool image_mode;
+ enum v4l2_field field;
+
+ switch (pix->field) {
+ default:
+ pix->field = V4L2_FIELD_NONE;
+ /* fall-through */
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_NONE:
+ field = pix->field;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ field = V4L2_FIELD_INTERLACED_TB;
+ break;
+ }
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate) {
+ dev_warn(dev, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ /* 1.-4. Calculate desired client output geometry */
+ soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf, 12);
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ image_mode = true;
+ break;
+ default:
+ image_mode = false;
+ }
+
+ dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code,
+ pix->width, pix->height);
+
+ dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
+
+ /* 5. - 9. */
+ ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect,
+ &mf, &ceu_sub_width, &ceu_sub_height,
+ image_mode && V4L2_FIELD_NONE == field, 12);
+
+ dev_geo(dev, "5-9: client scale return %d\n", ret);
+
+ /* Done with the camera. Now see if we can improve the result */
+
+ dev_geo(dev, "fmt %ux%u, requested %ux%u\n",
+ mf.width, mf.height, pix->width, pix->height);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ /* 9. Prepare CEU crop */
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ /* 10. Use CEU scaling to scale to the requested user window. */
+
+ /* We cannot scale up */
+ if (pix->width > ceu_sub_width)
+ ceu_sub_width = pix->width;
+
+ if (pix->height > ceu_sub_height)
+ ceu_sub_height = pix->height;
+
+ pix->colorspace = mf.colorspace;
+
+ if (image_mode) {
+ /* Scale pix->{width x height} down to width x height */
+ scale_h = calc_scale(ceu_sub_width, &pix->width);
+ scale_v = calc_scale(ceu_sub_height, &pix->height);
+ } else {
+ pix->width = ceu_sub_width;
+ pix->height = ceu_sub_height;
+ scale_h = 0;
+ scale_v = 0;
+ }
+
+ pcdev->cflcr = scale_h | (scale_v << 16);
+
+ /*
+ * We have calculated CFLCR, the actual configuration will be performed
+ * in sh_mobile_ceu_set_bus_param()
+ */
+
+ dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
+ ceu_sub_width, scale_h, pix->width,
+ ceu_sub_height, scale_v, pix->height);
+
+ cam->code = xlate->code;
+ icd->current_fmt = xlate;
+
+ pcdev->field = field;
+ pcdev->image_mode = image_mode;
+
+ /* CFSZR requirement */
+ pix->width &= ~3;
+ pix->height &= ~3;
+
+ return 0;
+}
+
+#define CEU_CHDW_MAX 8188U /* Maximum line stride */
+
+static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ struct v4l2_mbus_framefmt *mf = &format.format;
+ __u32 pixfmt = pix->pixelformat;
+ int width, height;
+ int ret;
+
+ dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
+ pixfmt, pix->width, pix->height);
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate) {
+ xlate = icd->current_fmt;
+ dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
+ pixfmt, xlate->host_fmt->fourcc);
+ pixfmt = xlate->host_fmt->fourcc;
+ pix->pixelformat = pixfmt;
+ pix->colorspace = icd->colorspace;
+ }
+
+ /* FIXME: calculate using depth and bus width */
+
+ /* CFSZR requires height and width to be 4-pixel aligned */
+ v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2,
+ &pix->height, 4, pcdev->max_height, 2, 0);
+
+ width = pix->width;
+ height = pix->height;
+
+ /* limit to sensor capabilities */
+ mf->width = pix->width;
+ mf->height = pix->height;
+ mf->field = pix->field;
+ mf->code = xlate->code;
+ mf->colorspace = pix->colorspace;
+
+ ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+ pad, set_fmt, &pad_cfg, &format);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf->width;
+ pix->height = mf->height;
+ pix->field = mf->field;
+ pix->colorspace = mf->colorspace;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ /* FIXME: check against rect_max after converting soc-camera */
+ /* We can scale precisely, need a bigger image from camera */
+ if (pix->width < width || pix->height < height) {
+ /*
+ * We presume, the sensor behaves sanely, i.e., if
+ * requested a bigger rectangle, it will not return a
+ * smaller one.
+ */
+ mf->width = pcdev->max_width;
+ mf->height = pcdev->max_height;
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), pad,
+ set_fmt, &pad_cfg, &format);
+ if (ret < 0) {
+ /* Shouldn't actually happen... */
+ dev_err(icd->parent,
+ "FIXME: client try_fmt() = %d\n", ret);
+ return ret;
+ }
+ }
+ /* We will scale exactly */
+ if (mf->width > width)
+ pix->width = width;
+ if (mf->height > height)
+ pix->height = height;
+
+ pix->bytesperline = max(pix->bytesperline, pix->width);
+ pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX);
+ pix->bytesperline &= ~3;
+ break;
+
+ default:
+ /* Configurable stride isn't supported in pass-through mode. */
+ pix->bytesperline = 0;
+ }
+
+ pix->width &= ~3;
+ pix->height &= ~3;
+ pix->sizeimage = 0;
+
+ dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
+ __func__, ret, pix->pixelformat, pix->width, pix->height);
+
+ return ret;
+}
+
+static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ u32 out_width = icd->user_width, out_height = icd->user_height;
+ int ret;
+
+ /* Freeze queue */
+ pcdev->frozen = 1;
+ /* Wait for frame */
+ ret = wait_for_completion_interruptible(&pcdev->complete);
+ /* Stop the client */
+ ret = v4l2_subdev_call(sd, video, s_stream, 0);
+ if (ret < 0)
+ dev_warn(icd->parent,
+ "Client failed to stop the stream: %d\n", ret);
+ else
+ /* Do the crop, if it fails, there's nothing more we can do */
+ sh_mobile_ceu_set_selection(icd, sel);
+
+ dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
+
+ if (icd->user_width != out_width || icd->user_height != out_height) {
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = out_width,
+ .height = out_height,
+ .pixelformat = icd->current_fmt->host_fmt->fourcc,
+ .field = pcdev->field,
+ .colorspace = icd->colorspace,
+ },
+ };
+ ret = sh_mobile_ceu_set_fmt(icd, &f);
+ if (!ret && (out_width != f.fmt.pix.width ||
+ out_height != f.fmt.pix.height))
+ ret = -EINVAL;
+ if (!ret) {
+ icd->user_width = out_width & ~3;
+ icd->user_height = out_height & ~3;
+ ret = sh_mobile_ceu_set_bus_param(icd);
+ }
+ }
+
+ /* Thaw the queue */
+ pcdev->frozen = 0;
+ spin_lock_irq(&pcdev->lock);
+ sh_mobile_ceu_capture(pcdev);
+ spin_unlock_irq(&pcdev->lock);
+ /* Start the client */
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ return ret;
+}
+
+static __poll_t sh_mobile_ceu_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
+ strlcpy(cap->driver, "sh_mobile_ceu", sizeof(cap->driver));
+ strlcpy(cap->bus_info, "platform:sh_mobile_ceu", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
+ struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = icd;
+ q->ops = &sh_mobile_ceu_videobuf_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ici->host_lock;
+ q->dev = ici->v4l2_dev.dev;
+
+ return vb2_queue_init(q);
+}
+
+static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
+ .owner = THIS_MODULE,
+ .add = sh_mobile_ceu_add_device,
+ .remove = sh_mobile_ceu_remove_device,
+ .clock_start = sh_mobile_ceu_clock_start,
+ .clock_stop = sh_mobile_ceu_clock_stop,
+ .get_formats = sh_mobile_ceu_get_formats,
+ .put_formats = sh_mobile_ceu_put_formats,
+ .get_selection = sh_mobile_ceu_get_selection,
+ .set_selection = sh_mobile_ceu_set_selection,
+ .set_liveselection = sh_mobile_ceu_set_liveselection,
+ .set_fmt = sh_mobile_ceu_set_fmt,
+ .try_fmt = sh_mobile_ceu_try_fmt,
+ .poll = sh_mobile_ceu_poll,
+ .querycap = sh_mobile_ceu_querycap,
+ .set_bus_param = sh_mobile_ceu_set_bus_param,
+ .init_videobuf2 = sh_mobile_ceu_init_videobuf,
+};
+
+struct bus_wait {
+ struct notifier_block notifier;
+ struct completion completion;
+ struct device *dev;
+};
+
+static int bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct bus_wait *wait = container_of(nb, struct bus_wait, notifier);
+
+ if (wait->dev != dev)
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ /* Protect from module unloading */
+ wait_for_completion(&wait->completion);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+static int sh_mobile_ceu_probe(struct platform_device *pdev)
+{
+ struct sh_mobile_ceu_dev *pcdev;
+ struct resource *res;
+ void __iomem *base;
+ unsigned int irq;
+ int err;
+ struct bus_wait wait = {
+ .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
+ .notifier.notifier_call = bus_notify,
+ };
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || (int)irq <= 0) {
+ dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
+ return -ENODEV;
+ }
+
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
+ if (!pcdev) {
+ dev_err(&pdev->dev, "Could not allocate pcdev\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&pcdev->capture);
+ spin_lock_init(&pcdev->lock);
+ init_completion(&pcdev->complete);
+
+ pcdev->pdata = pdev->dev.platform_data;
+ if (!pcdev->pdata && !pdev->dev.of_node) {
+ dev_err(&pdev->dev, "CEU platform data not set.\n");
+ return -EINVAL;
+ }
+
+ /* TODO: implement per-device bus flags */
+ if (pcdev->pdata) {
+ pcdev->max_width = pcdev->pdata->max_width;
+ pcdev->max_height = pcdev->pdata->max_height;
+ pcdev->flags = pcdev->pdata->flags;
+ }
+ pcdev->field = V4L2_FIELD_NONE;
+
+ if (!pcdev->max_width) {
+ unsigned int v;
+ err = of_property_read_u32(pdev->dev.of_node, "renesas,max-width", &v);
+ if (!err)
+ pcdev->max_width = v;
+
+ if (!pcdev->max_width)
+ pcdev->max_width = 2560;
+ }
+ if (!pcdev->max_height) {
+ unsigned int v;
+ err = of_property_read_u32(pdev->dev.of_node, "renesas,max-height", &v);
+ if (!err)
+ pcdev->max_height = v;
+
+ if (!pcdev->max_height)
+ pcdev->max_height = 1920;
+ }
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ pcdev->irq = irq;
+ pcdev->base = base;
+ pcdev->video_limit = 0; /* only enabled if second resource exists */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ err = dma_declare_coherent_memory(&pdev->dev, res->start,
+ res->start,
+ resource_size(res),
+ DMA_MEMORY_EXCLUSIVE);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to declare CEU memory.\n");
+ return err;
+ }
+
+ pcdev->video_limit = resource_size(res);
+ }
+
+ /* request irq */
+ err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq,
+ 0, dev_name(&pdev->dev), pcdev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
+ goto exit_release_mem;
+ }
+
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ pcdev->ici.priv = pcdev;
+ pcdev->ici.v4l2_dev.dev = &pdev->dev;
+ pcdev->ici.nr = pdev->id;
+ pcdev->ici.drv_name = dev_name(&pdev->dev);
+ pcdev->ici.ops = &sh_mobile_ceu_host_ops;
+ pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
+
+ if (pcdev->pdata && pcdev->pdata->asd_sizes) {
+ pcdev->ici.asd = pcdev->pdata->asd;
+ pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes;
+ }
+
+ err = soc_camera_host_register(&pcdev->ici);
+ if (err)
+ goto exit_free_clk;
+
+ return 0;
+
+exit_free_clk:
+ pm_runtime_disable(&pdev->dev);
+exit_release_mem:
+ if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
+ dma_release_declared_memory(&pdev->dev);
+ return err;
+}
+
+static int sh_mobile_ceu_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+
+ soc_camera_host_unregister(soc_host);
+ pm_runtime_disable(&pdev->dev);
+ if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
+ dma_release_declared_memory(&pdev->dev);
+
+ return 0;
+}
+
+static int sh_mobile_ceu_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
+ .runtime_suspend = sh_mobile_ceu_runtime_nop,
+ .runtime_resume = sh_mobile_ceu_runtime_nop,
+};
+
+static const struct of_device_id sh_mobile_ceu_of_match[] = {
+ { .compatible = "renesas,sh-mobile-ceu" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_ceu_of_match);
+
+static struct platform_driver sh_mobile_ceu_driver = {
+ .driver = {
+ .name = "sh_mobile_ceu",
+ .pm = &sh_mobile_ceu_dev_pm_ops,
+ .of_match_table = sh_mobile_ceu_of_match,
+ },
+ .probe = sh_mobile_ceu_probe,
+ .remove = sh_mobile_ceu_remove,
+};
+
+module_platform_driver(sh_mobile_ceu_driver);
+
+MODULE_DESCRIPTION("SuperH Mobile CEU driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.0");
+MODULE_ALIAS("platform:sh_mobile_ceu");
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
new file mode 100644
index 000000000..66d613629
--- /dev/null
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -0,0 +1,2154 @@
+/*
+ * camera image capture (abstract) bus driver
+ *
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This driver provides an interface between platform-specific camera
+ * busses and camera devices. It should be used if the camera is
+ * connected not over a "proper" bus like PCI or USB, but over a
+ * special bus, like, for example, the Quick Capture interface on PXA270
+ * SoCs. Later it should also be used for i.MX31 SoCs from Freescale.
+ * It can handle multiple cameras and / or multiple busses, which can
+ * be used, e.g., in stereo-vision applications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/soc_camera.h>
+#include <media/drv-intf/soc_mediabus.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-clk.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fwnode.h>
+#include <media/videobuf2-v4l2.h>
+
+/* Default to VGA resolution */
+#define DEFAULT_WIDTH 640
+#define DEFAULT_HEIGHT 480
+
+#define MAP_MAX_NUM 32
+static DECLARE_BITMAP(device_map, MAP_MAX_NUM);
+static LIST_HEAD(hosts);
+static LIST_HEAD(devices);
+/*
+ * Protects lists and bitmaps of hosts and devices.
+ * Lock nesting: Ok to take ->host_lock under list_lock.
+ */
+static DEFINE_MUTEX(list_lock);
+
+struct soc_camera_async_client {
+ struct v4l2_async_subdev *sensor;
+ struct v4l2_async_notifier notifier;
+ struct platform_device *pdev;
+ struct list_head list; /* needed for clean up */
+};
+
+static int soc_camera_video_start(struct soc_camera_device *icd);
+static int video_dev_create(struct soc_camera_device *icd);
+
+int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd,
+ struct v4l2_clk *clk)
+{
+ int ret;
+ bool clock_toggle;
+
+ if (clk && (!ssdd->unbalanced_power ||
+ !test_and_set_bit(0, &ssdd->clock_state))) {
+ ret = v4l2_clk_enable(clk);
+ if (ret < 0) {
+ dev_err(dev, "Cannot enable clock: %d\n", ret);
+ return ret;
+ }
+ clock_toggle = true;
+ } else {
+ clock_toggle = false;
+ }
+
+ ret = regulator_bulk_enable(ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
+ if (ret < 0) {
+ dev_err(dev, "Cannot enable regulators\n");
+ goto eregenable;
+ }
+
+ if (ssdd->power) {
+ ret = ssdd->power(dev, 1);
+ if (ret < 0) {
+ dev_err(dev,
+ "Platform failed to power-on the camera.\n");
+ goto epwron;
+ }
+ }
+
+ return 0;
+
+epwron:
+ regulator_bulk_disable(ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
+eregenable:
+ if (clock_toggle)
+ v4l2_clk_disable(clk);
+
+ return ret;
+}
+EXPORT_SYMBOL(soc_camera_power_on);
+
+int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd,
+ struct v4l2_clk *clk)
+{
+ int ret = 0;
+ int err;
+
+ if (ssdd->power) {
+ err = ssdd->power(dev, 0);
+ if (err < 0) {
+ dev_err(dev,
+ "Platform failed to power-off the camera.\n");
+ ret = err;
+ }
+ }
+
+ err = regulator_bulk_disable(ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
+ if (err < 0) {
+ dev_err(dev, "Cannot disable regulators\n");
+ ret = ret ? : err;
+ }
+
+ if (clk && (!ssdd->unbalanced_power || test_and_clear_bit(0, &ssdd->clock_state)))
+ v4l2_clk_disable(clk);
+
+ return ret;
+}
+EXPORT_SYMBOL(soc_camera_power_off);
+
+int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd)
+{
+ /* Should not have any effect in synchronous case */
+ return devm_regulator_bulk_get(dev, ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
+}
+EXPORT_SYMBOL(soc_camera_power_init);
+
+static int __soc_camera_power_on(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int ret;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+static int __soc_camera_power_off(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int ret;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+static int soc_camera_clock_start(struct soc_camera_host *ici)
+{
+ int ret;
+
+ if (!ici->ops->clock_start)
+ return 0;
+
+ mutex_lock(&ici->clk_lock);
+ ret = ici->ops->clock_start(ici);
+ mutex_unlock(&ici->clk_lock);
+
+ return ret;
+}
+
+static void soc_camera_clock_stop(struct soc_camera_host *ici)
+{
+ if (!ici->ops->clock_stop)
+ return;
+
+ mutex_lock(&ici->clk_lock);
+ ici->ops->clock_stop(ici);
+ mutex_unlock(&ici->clk_lock);
+}
+
+const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
+ struct soc_camera_device *icd, unsigned int fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < icd->num_user_formats; i++)
+ if (icd->user_formats[i].host_fmt->fourcc == fourcc)
+ return icd->user_formats + i;
+ return NULL;
+}
+EXPORT_SYMBOL(soc_camera_xlate_by_fourcc);
+
+/**
+ * soc_camera_apply_board_flags() - apply platform SOCAM_SENSOR_INVERT_* flags
+ * @ssdd: camera platform parameters
+ * @cfg: media bus configuration
+ * @return: resulting flags
+ */
+unsigned long soc_camera_apply_board_flags(struct soc_camera_subdev_desc *ssdd,
+ const struct v4l2_mbus_config *cfg)
+{
+ unsigned long f, flags = cfg->flags;
+
+ /* If only one of the two polarities is supported, switch to the opposite */
+ if (ssdd->flags & SOCAM_SENSOR_INVERT_HSYNC) {
+ f = flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ if (f == V4L2_MBUS_HSYNC_ACTIVE_HIGH || f == V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ flags ^= V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ }
+
+ if (ssdd->flags & SOCAM_SENSOR_INVERT_VSYNC) {
+ f = flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ if (f == V4L2_MBUS_VSYNC_ACTIVE_HIGH || f == V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ flags ^= V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ }
+
+ if (ssdd->flags & SOCAM_SENSOR_INVERT_PCLK) {
+ f = flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ if (f == V4L2_MBUS_PCLK_SAMPLE_RISING || f == V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ flags ^= V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ }
+
+ return flags;
+}
+EXPORT_SYMBOL(soc_camera_apply_board_flags);
+
+#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
+ ((x) >> 24) & 0xff
+
+static int soc_camera_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
+ pixfmtstr(pix->pixelformat), pix->width, pix->height);
+
+ if (pix->pixelformat != V4L2_PIX_FMT_JPEG &&
+ !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
+ pix->bytesperline = 0;
+ pix->sizeimage = 0;
+ }
+
+ ret = ici->ops->try_fmt(icd, f);
+ if (ret < 0)
+ return ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate)
+ return -EINVAL;
+
+ ret = soc_mbus_bytes_per_line(pix->width, xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ pix->bytesperline = max_t(u32, pix->bytesperline, ret);
+
+ ret = soc_mbus_image_size(xlate->host_fmt, pix->bytesperline,
+ pix->height);
+ if (ret < 0)
+ return ret;
+
+ pix->sizeimage = max_t(u32, pix->sizeimage, ret);
+
+ return 0;
+}
+
+static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ WARN_ON(priv != file->private_data);
+
+ /* Only single-plane capture is supported so far */
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ /* limit format to hardware capabilities */
+ return soc_camera_try_fmt(icd, f);
+}
+
+static int soc_camera_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ if (inp->index != 0)
+ return -EINVAL;
+
+ /* default is camera */
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = icd->vdev->tvnorms;
+ strcpy(inp->name, "Camera");
+
+ return 0;
+}
+
+static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int soc_camera_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_subdev_call(sd, video, s_std, a);
+}
+
+static int soc_camera_g_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_subdev_call(sd, video, g_std, a);
+}
+
+static int soc_camera_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ return ici->ops->enum_framesizes(icd, fsize);
+}
+
+static int soc_camera_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ int ret;
+ struct soc_camera_device *icd = file->private_data;
+
+ WARN_ON(priv != file->private_data);
+
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ ret = vb2_reqbufs(&icd->vb2_vidq, p);
+ if (!ret)
+ icd->streamer = p->count ? file : NULL;
+ return ret;
+}
+
+static int soc_camera_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ WARN_ON(priv != file->private_data);
+
+ return vb2_querybuf(&icd->vb2_vidq, p);
+}
+
+static int soc_camera_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ WARN_ON(priv != file->private_data);
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ return vb2_qbuf(&icd->vb2_vidq, p);
+}
+
+static int soc_camera_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ WARN_ON(priv != file->private_data);
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ return vb2_dqbuf(&icd->vb2_vidq, p, file->f_flags & O_NONBLOCK);
+}
+
+static int soc_camera_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct soc_camera_device *icd = file->private_data;
+ int ret;
+
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ ret = vb2_create_bufs(&icd->vb2_vidq, create);
+ if (!ret)
+ icd->streamer = file;
+ return ret;
+}
+
+static int soc_camera_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ return vb2_prepare_buf(&icd->vb2_vidq, b);
+}
+
+static int soc_camera_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *p)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+ return vb2_expbuf(&icd->vb2_vidq, p);
+}
+
+/* Always entered with .host_lock held */
+static int soc_camera_init_user_formats(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ unsigned int i, fmts = 0, raw_fmts = 0;
+ int ret;
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ while (!v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code)) {
+ raw_fmts++;
+ code.index++;
+ }
+
+ if (!ici->ops->get_formats)
+ /*
+ * Fallback mode - the host will have to serve all
+ * sensor-provided formats one-to-one to the user
+ */
+ fmts = raw_fmts;
+ else
+ /*
+ * First pass - only count formats this host-sensor
+ * configuration can provide
+ */
+ for (i = 0; i < raw_fmts; i++) {
+ ret = ici->ops->get_formats(icd, i, NULL);
+ if (ret < 0)
+ return ret;
+ fmts += ret;
+ }
+
+ if (!fmts)
+ return -ENXIO;
+
+ icd->user_formats =
+ vmalloc(array_size(fmts,
+ sizeof(struct soc_camera_format_xlate)));
+ if (!icd->user_formats)
+ return -ENOMEM;
+
+ dev_dbg(icd->pdev, "Found %d supported formats.\n", fmts);
+
+ /* Second pass - actually fill data formats */
+ fmts = 0;
+ for (i = 0; i < raw_fmts; i++)
+ if (!ici->ops->get_formats) {
+ code.index = i;
+ v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
+ icd->user_formats[fmts].host_fmt =
+ soc_mbus_get_fmtdesc(code.code);
+ if (icd->user_formats[fmts].host_fmt)
+ icd->user_formats[fmts++].code = code.code;
+ } else {
+ ret = ici->ops->get_formats(icd, i,
+ &icd->user_formats[fmts]);
+ if (ret < 0)
+ goto egfmt;
+ fmts += ret;
+ }
+
+ icd->num_user_formats = fmts;
+ icd->current_fmt = &icd->user_formats[0];
+
+ return 0;
+
+egfmt:
+ vfree(icd->user_formats);
+ return ret;
+}
+
+/* Always entered with .host_lock held */
+static void soc_camera_free_user_formats(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ if (ici->ops->put_formats)
+ ici->ops->put_formats(icd);
+ icd->current_fmt = NULL;
+ icd->num_user_formats = 0;
+ vfree(icd->user_formats);
+ icd->user_formats = NULL;
+}
+
+/* Called with .vb_lock held, or from the first open(2), see comment there */
+static int soc_camera_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n",
+ pixfmtstr(pix->pixelformat), pix->width, pix->height);
+
+ /* We always call try_fmt() before set_fmt() or set_selection() */
+ ret = soc_camera_try_fmt(icd, f);
+ if (ret < 0)
+ return ret;
+
+ ret = ici->ops->set_fmt(icd, f);
+ if (ret < 0) {
+ return ret;
+ } else if (!icd->current_fmt ||
+ icd->current_fmt->host_fmt->fourcc != pix->pixelformat) {
+ dev_err(icd->pdev,
+ "Host driver hasn't set up current format correctly!\n");
+ return -EINVAL;
+ }
+
+ icd->user_width = pix->width;
+ icd->user_height = pix->height;
+ icd->bytesperline = pix->bytesperline;
+ icd->sizeimage = pix->sizeimage;
+ icd->colorspace = pix->colorspace;
+ icd->field = pix->field;
+
+ dev_dbg(icd->pdev, "set width: %d height: %d\n",
+ icd->user_width, icd->user_height);
+
+ /* set physical bus parameters */
+ return ici->ops->set_bus_param(icd);
+}
+
+static int soc_camera_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ int ret;
+
+ if (ici->icd)
+ return -EBUSY;
+
+ if (!icd->clk) {
+ ret = soc_camera_clock_start(ici);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ici->ops->add) {
+ ret = ici->ops->add(icd);
+ if (ret < 0)
+ goto eadd;
+ }
+
+ ici->icd = icd;
+
+ return 0;
+
+eadd:
+ if (!icd->clk)
+ soc_camera_clock_stop(ici);
+ return ret;
+}
+
+static void soc_camera_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ if (WARN_ON(icd != ici->icd))
+ return;
+
+ if (ici->ops->remove)
+ ici->ops->remove(icd);
+ if (!icd->clk)
+ soc_camera_clock_stop(ici);
+ ici->icd = NULL;
+}
+
+static int soc_camera_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct soc_camera_device *icd;
+ struct soc_camera_host *ici;
+ int ret;
+
+ /*
+ * Don't mess with the host during probe: wait until the loop in
+ * scan_add_host() completes. Also protect against a race with
+ * soc_camera_host_unregister().
+ */
+ if (mutex_lock_interruptible(&list_lock))
+ return -ERESTARTSYS;
+
+ if (!vdev || !video_is_registered(vdev)) {
+ mutex_unlock(&list_lock);
+ return -ENODEV;
+ }
+
+ icd = video_get_drvdata(vdev);
+ ici = to_soc_camera_host(icd->parent);
+
+ ret = try_module_get(ici->ops->owner) ? 0 : -ENODEV;
+ mutex_unlock(&list_lock);
+
+ if (ret < 0) {
+ dev_err(icd->pdev, "Couldn't lock capture bus driver.\n");
+ return ret;
+ }
+
+ if (!to_soc_camera_control(icd)) {
+ /* No device driver attached */
+ ret = -ENODEV;
+ goto econtrol;
+ }
+
+ if (mutex_lock_interruptible(&ici->host_lock)) {
+ ret = -ERESTARTSYS;
+ goto elockhost;
+ }
+ icd->use_count++;
+
+ /* Now we really have to activate the camera */
+ if (icd->use_count == 1) {
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
+ /* Restore parameters before the last close() per V4L2 API */
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = icd->user_width,
+ .height = icd->user_height,
+ .field = icd->field,
+ .colorspace = icd->colorspace,
+ .pixelformat =
+ icd->current_fmt->host_fmt->fourcc,
+ },
+ };
+
+ /* The camera could have been already on, try to reset */
+ if (sdesc->subdev_desc.reset)
+ if (icd->control)
+ sdesc->subdev_desc.reset(icd->control);
+
+ ret = soc_camera_add_device(icd);
+ if (ret < 0) {
+ dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
+ goto eiciadd;
+ }
+
+ ret = __soc_camera_power_on(icd);
+ if (ret < 0)
+ goto epower;
+
+ pm_runtime_enable(&icd->vdev->dev);
+ ret = pm_runtime_resume(&icd->vdev->dev);
+ if (ret < 0 && ret != -ENOSYS)
+ goto eresume;
+
+ /*
+ * Try to configure with default parameters. Notice: this is the
+ * very first open, so, we cannot race against other calls,
+ * apart from someone else calling open() simultaneously, but
+ * .host_lock is protecting us against it.
+ */
+ ret = soc_camera_set_fmt(icd, &f);
+ if (ret < 0)
+ goto esfmt;
+
+ ret = ici->ops->init_videobuf2(&icd->vb2_vidq, icd);
+ if (ret < 0)
+ goto einitvb;
+ v4l2_ctrl_handler_setup(&icd->ctrl_handler);
+ }
+ mutex_unlock(&ici->host_lock);
+
+ file->private_data = icd;
+ dev_dbg(icd->pdev, "camera device open\n");
+
+ return 0;
+
+ /*
+ * All errors are entered with the .host_lock held, first four also
+ * with use_count == 1
+ */
+einitvb:
+esfmt:
+ pm_runtime_disable(&icd->vdev->dev);
+eresume:
+ __soc_camera_power_off(icd);
+epower:
+ soc_camera_remove_device(icd);
+eiciadd:
+ icd->use_count--;
+ mutex_unlock(&ici->host_lock);
+elockhost:
+econtrol:
+ module_put(ici->ops->owner);
+
+ return ret;
+}
+
+static int soc_camera_close(struct file *file)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ mutex_lock(&ici->host_lock);
+ if (icd->streamer == file) {
+ if (ici->ops->init_videobuf2)
+ vb2_queue_release(&icd->vb2_vidq);
+ icd->streamer = NULL;
+ }
+ icd->use_count--;
+ if (!icd->use_count) {
+ pm_runtime_suspend(&icd->vdev->dev);
+ pm_runtime_disable(&icd->vdev->dev);
+
+ __soc_camera_power_off(icd);
+
+ soc_camera_remove_device(icd);
+ }
+
+ mutex_unlock(&ici->host_lock);
+
+ module_put(ici->ops->owner);
+
+ dev_dbg(icd->pdev, "camera device close\n");
+
+ return 0;
+}
+
+static ssize_t soc_camera_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ dev_dbg(icd->pdev, "read called, buf %p\n", buf);
+
+ if (ici->ops->init_videobuf2 && icd->vb2_vidq.io_modes & VB2_READ)
+ return vb2_read(&icd->vb2_vidq, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+
+ dev_err(icd->pdev, "camera device read not implemented\n");
+
+ return -EINVAL;
+}
+
+static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ int err;
+
+ dev_dbg(icd->pdev, "mmap called, vma=%p\n", vma);
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ if (mutex_lock_interruptible(&ici->host_lock))
+ return -ERESTARTSYS;
+ err = vb2_mmap(&icd->vb2_vidq, vma);
+ mutex_unlock(&ici->host_lock);
+
+ dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ (unsigned long)vma->vm_start,
+ (unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
+ err);
+
+ return err;
+}
+
+static __poll_t soc_camera_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ __poll_t res = EPOLLERR;
+
+ if (icd->streamer != file)
+ return EPOLLERR;
+
+ mutex_lock(&ici->host_lock);
+ res = ici->ops->poll(file, pt);
+ mutex_unlock(&ici->host_lock);
+ return res;
+}
+
+static const struct v4l2_file_operations soc_camera_fops = {
+ .owner = THIS_MODULE,
+ .open = soc_camera_open,
+ .release = soc_camera_close,
+ .unlocked_ioctl = video_ioctl2,
+ .read = soc_camera_read,
+ .mmap = soc_camera_mmap,
+ .poll = soc_camera_poll,
+};
+
+static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct soc_camera_device *icd = file->private_data;
+ int ret;
+
+ WARN_ON(priv != file->private_data);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_warn(icd->pdev, "Wrong buf-type %d\n", f->type);
+ return -EINVAL;
+ }
+
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ if (vb2_is_streaming(&icd->vb2_vidq)) {
+ dev_err(icd->pdev, "S_FMT denied: queue initialised\n");
+ return -EBUSY;
+ }
+
+ ret = soc_camera_set_fmt(icd, f);
+
+ if (!ret && !icd->streamer)
+ icd->streamer = file;
+
+ return ret;
+}
+
+static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct soc_camera_device *icd = file->private_data;
+ const struct soc_mbus_pixelfmt *format;
+
+ WARN_ON(priv != file->private_data);
+
+ if (f->index >= icd->num_user_formats)
+ return -EINVAL;
+
+ format = icd->user_formats[f->index].host_fmt;
+
+ if (format->name)
+ strlcpy(f->description, format->name, sizeof(f->description));
+ f->pixelformat = format->fourcc;
+ return 0;
+}
+
+static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ WARN_ON(priv != file->private_data);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ pix->width = icd->user_width;
+ pix->height = icd->user_height;
+ pix->bytesperline = icd->bytesperline;
+ pix->sizeimage = icd->sizeimage;
+ pix->field = icd->field;
+ pix->pixelformat = icd->current_fmt->host_fmt->fourcc;
+ pix->colorspace = icd->colorspace;
+ dev_dbg(icd->pdev, "current_fmt->fourcc: 0x%08x\n",
+ icd->current_fmt->host_fmt->fourcc);
+ return 0;
+}
+
+static int soc_camera_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ WARN_ON(priv != file->private_data);
+
+ strlcpy(cap->driver, ici->drv_name, sizeof(cap->driver));
+ return ici->ops->querycap(ici, cap);
+}
+
+static int soc_camera_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type i)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int ret;
+
+ WARN_ON(priv != file->private_data);
+
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ /* This calls buf_queue from host driver's videobuf2_queue_ops */
+ ret = vb2_streamon(&icd->vb2_vidq, i);
+ if (!ret)
+ v4l2_subdev_call(sd, video, s_stream, 1);
+
+ return ret;
+}
+
+static int soc_camera_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type i)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int ret;
+
+ WARN_ON(priv != file->private_data);
+
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ /*
+ * This calls buf_release from host driver's videobuf2_queue_ops for all
+ * remaining buffers. When the last buffer is freed, stop capture
+ */
+ ret = vb2_streamoff(&icd->vb2_vidq, i);
+
+ v4l2_subdev_call(sd, video, s_stream, 0);
+
+ return ret;
+}
+
+static int soc_camera_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ /* With a wrong type no need to try to fall back to cropping */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return ici->ops->get_selection(icd, s);
+}
+
+static int soc_camera_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ int ret;
+
+ /* In all these cases cropping emulation will not help */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ (s->target != V4L2_SEL_TGT_COMPOSE &&
+ s->target != V4L2_SEL_TGT_CROP))
+ return -EINVAL;
+
+ if (s->target == V4L2_SEL_TGT_COMPOSE) {
+ /* No output size change during a running capture! */
+ if (vb2_is_streaming(&icd->vb2_vidq) &&
+ (icd->user_width != s->r.width ||
+ icd->user_height != s->r.height))
+ return -EBUSY;
+
+ /*
+ * Only one user is allowed to change the output format, touch
+ * buffers, start / stop streaming, poll for data
+ */
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+ }
+
+ if (s->target == V4L2_SEL_TGT_CROP &&
+ vb2_is_streaming(&icd->vb2_vidq) &&
+ ici->ops->set_liveselection)
+ ret = ici->ops->set_liveselection(icd, s);
+ else
+ ret = ici->ops->set_selection(icd, s);
+ if (!ret &&
+ s->target == V4L2_SEL_TGT_COMPOSE) {
+ icd->user_width = s->r.width;
+ icd->user_height = s->r.height;
+ if (!icd->streamer)
+ icd->streamer = file;
+ }
+
+ return ret;
+}
+
+static int soc_camera_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ if (ici->ops->get_parm)
+ return ici->ops->get_parm(icd, a);
+
+ return -ENOIOCTLCMD;
+}
+
+static int soc_camera_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ if (ici->ops->set_parm)
+ return ici->ops->set_parm(icd, a);
+
+ return -ENOIOCTLCMD;
+}
+
+static int soc_camera_probe(struct soc_camera_host *ici,
+ struct soc_camera_device *icd);
+
+/* So far this function cannot fail */
+static void scan_add_host(struct soc_camera_host *ici)
+{
+ struct soc_camera_device *icd;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(icd, &devices, list)
+ if (icd->iface == ici->nr) {
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
+ struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc;
+
+ /* The camera could have been already on, try to reset */
+ if (ssdd->reset)
+ if (icd->control)
+ ssdd->reset(icd->control);
+
+ icd->parent = ici->v4l2_dev.dev;
+
+ /* Ignore errors */
+ soc_camera_probe(ici, icd);
+ }
+
+ mutex_unlock(&list_lock);
+}
+
+/*
+ * It is invalid to call v4l2_clk_enable() after a successful probing
+ * asynchronously outside of V4L2 operations, i.e. with .host_lock not held.
+ */
+static int soc_camera_clk_enable(struct v4l2_clk *clk)
+{
+ struct soc_camera_device *icd = clk->priv;
+ struct soc_camera_host *ici;
+
+ if (!icd || !icd->parent)
+ return -ENODEV;
+
+ ici = to_soc_camera_host(icd->parent);
+
+ if (!try_module_get(ici->ops->owner))
+ return -ENODEV;
+
+ /*
+ * If a different client is currently being probed, the host will tell
+ * you to go
+ */
+ return soc_camera_clock_start(ici);
+}
+
+static void soc_camera_clk_disable(struct v4l2_clk *clk)
+{
+ struct soc_camera_device *icd = clk->priv;
+ struct soc_camera_host *ici;
+
+ if (!icd || !icd->parent)
+ return;
+
+ ici = to_soc_camera_host(icd->parent);
+
+ soc_camera_clock_stop(ici);
+
+ module_put(ici->ops->owner);
+}
+
+/*
+ * Eventually, it would be more logical to make the respective host the clock
+ * owner, but then we would have to copy this struct for each ici. Besides, it
+ * would introduce the circular dependency problem, unless we port all client
+ * drivers to release the clock, when not in use.
+ */
+static const struct v4l2_clk_ops soc_camera_clk_ops = {
+ .owner = THIS_MODULE,
+ .enable = soc_camera_clk_enable,
+ .disable = soc_camera_clk_disable,
+};
+
+static int soc_camera_dyn_pdev(struct soc_camera_desc *sdesc,
+ struct soc_camera_async_client *sasc)
+{
+ struct platform_device *pdev;
+ int ret, i;
+
+ mutex_lock(&list_lock);
+ i = find_first_zero_bit(device_map, MAP_MAX_NUM);
+ if (i < MAP_MAX_NUM)
+ set_bit(i, device_map);
+ mutex_unlock(&list_lock);
+ if (i >= MAP_MAX_NUM)
+ return -ENOMEM;
+
+ pdev = platform_device_alloc("soc-camera-pdrv", i);
+ if (!pdev)
+ return -ENOMEM;
+
+ ret = platform_device_add_data(pdev, sdesc, sizeof(*sdesc));
+ if (ret < 0) {
+ platform_device_put(pdev);
+ return ret;
+ }
+
+ sasc->pdev = pdev;
+
+ return 0;
+}
+
+static struct soc_camera_device *soc_camera_add_pdev(struct soc_camera_async_client *sasc)
+{
+ struct platform_device *pdev = sasc->pdev;
+ int ret;
+
+ ret = platform_device_add(pdev);
+ if (ret < 0 || !pdev->dev.driver)
+ return NULL;
+
+ return platform_get_drvdata(pdev);
+}
+
+/* Locking: called with .host_lock held */
+static int soc_camera_probe_finish(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mf = &fmt.format;
+ int ret;
+
+ sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(sd, icd);
+
+ v4l2_subdev_call(sd, video, g_tvnorms, &icd->vdev->tvnorms);
+
+ ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = soc_camera_add_device(icd);
+ if (ret < 0) {
+ dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
+ return ret;
+ }
+
+ /* At this point client .probe() should have run already */
+ ret = soc_camera_init_user_formats(icd);
+ if (ret < 0)
+ goto eusrfmt;
+
+ icd->field = V4L2_FIELD_ANY;
+
+ ret = soc_camera_video_start(icd);
+ if (ret < 0)
+ goto evidstart;
+
+ /* Try to improve our guess of a reasonable window format */
+ if (!v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt)) {
+ icd->user_width = mf->width;
+ icd->user_height = mf->height;
+ icd->colorspace = mf->colorspace;
+ icd->field = mf->field;
+ }
+ soc_camera_remove_device(icd);
+
+ return 0;
+
+evidstart:
+ soc_camera_free_user_formats(icd);
+eusrfmt:
+ soc_camera_remove_device(icd);
+
+ return ret;
+}
+
+#ifdef CONFIG_I2C_BOARDINFO
+static int soc_camera_i2c_init(struct soc_camera_device *icd,
+ struct soc_camera_desc *sdesc)
+{
+ struct soc_camera_subdev_desc *ssdd;
+ struct i2c_client *client;
+ struct soc_camera_host *ici;
+ struct soc_camera_host_desc *shd = &sdesc->host_desc;
+ struct i2c_adapter *adap;
+ struct v4l2_subdev *subdev;
+ char clk_name[V4L2_CLK_NAME_SIZE];
+ int ret;
+
+ /* First find out how we link the main client */
+ if (icd->sasc) {
+ /* Async non-OF probing handled by the subdevice list */
+ return -EPROBE_DEFER;
+ }
+
+ ici = to_soc_camera_host(icd->parent);
+ adap = i2c_get_adapter(shd->i2c_adapter_id);
+ if (!adap) {
+ dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n",
+ shd->i2c_adapter_id);
+ return -ENODEV;
+ }
+
+ ssdd = kmemdup(&sdesc->subdev_desc, sizeof(*ssdd), GFP_KERNEL);
+ if (!ssdd) {
+ ret = -ENOMEM;
+ goto ealloc;
+ }
+ /*
+ * In synchronous case we request regulators ourselves in
+ * soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try
+ * to allocate them again.
+ */
+ ssdd->sd_pdata.num_regulators = 0;
+ ssdd->sd_pdata.regulators = NULL;
+ shd->board_info->platform_data = ssdd;
+
+ v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
+ shd->i2c_adapter_id, shd->board_info->addr);
+
+ icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd);
+ if (IS_ERR(icd->clk)) {
+ ret = PTR_ERR(icd->clk);
+ goto eclkreg;
+ }
+
+ subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap,
+ shd->board_info, NULL);
+ if (!subdev) {
+ ret = -ENODEV;
+ goto ei2cnd;
+ }
+
+ client = v4l2_get_subdevdata(subdev);
+
+ /* Use to_i2c_client(dev) to recover the i2c client */
+ icd->control = &client->dev;
+
+ return 0;
+ei2cnd:
+ v4l2_clk_unregister(icd->clk);
+ icd->clk = NULL;
+eclkreg:
+ kfree(ssdd);
+ealloc:
+ i2c_put_adapter(adap);
+ return ret;
+}
+
+static void soc_camera_i2c_free(struct soc_camera_device *icd)
+{
+ struct i2c_client *client =
+ to_i2c_client(to_soc_camera_control(icd));
+ struct i2c_adapter *adap;
+ struct soc_camera_subdev_desc *ssdd;
+
+ icd->control = NULL;
+ if (icd->sasc)
+ return;
+
+ adap = client->adapter;
+ ssdd = client->dev.platform_data;
+ v4l2_device_unregister_subdev(i2c_get_clientdata(client));
+ i2c_unregister_device(client);
+ i2c_put_adapter(adap);
+ kfree(ssdd);
+ v4l2_clk_unregister(icd->clk);
+ icd->clk = NULL;
+}
+
+/*
+ * V4L2 asynchronous notifier callbacks. They are all called under a v4l2-async
+ * internal global mutex, therefore cannot race against other asynchronous
+ * events. Until notifier->complete() (soc_camera_async_complete()) is called,
+ * the video device node is not registered and no V4L fops can occur. Unloading
+ * of the host driver also calls a v4l2-async function, so also there we're
+ * protected.
+ */
+static int soc_camera_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct soc_camera_async_client *sasc = container_of(notifier,
+ struct soc_camera_async_client, notifier);
+ struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev);
+
+ if (asd == sasc->sensor && !WARN_ON(icd->control)) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /*
+ * Only now we get subdevice-specific information like
+ * regulators, flags, callbacks, etc.
+ */
+ if (client) {
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
+ struct soc_camera_subdev_desc *ssdd =
+ soc_camera_i2c_to_desc(client);
+ if (ssdd) {
+ memcpy(&sdesc->subdev_desc, ssdd,
+ sizeof(sdesc->subdev_desc));
+ if (ssdd->reset)
+ ssdd->reset(&client->dev);
+ }
+
+ icd->control = &client->dev;
+ }
+ }
+
+ return 0;
+}
+
+static void soc_camera_async_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct soc_camera_async_client *sasc = container_of(notifier,
+ struct soc_camera_async_client, notifier);
+ struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev);
+
+ icd->control = NULL;
+
+ if (icd->clk) {
+ v4l2_clk_unregister(icd->clk);
+ icd->clk = NULL;
+ }
+}
+
+static int soc_camera_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct soc_camera_async_client *sasc = container_of(notifier,
+ struct soc_camera_async_client, notifier);
+ struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev);
+
+ if (to_soc_camera_control(icd)) {
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ int ret;
+
+ mutex_lock(&list_lock);
+ ret = soc_camera_probe(ici, icd);
+ mutex_unlock(&list_lock);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations soc_camera_async_ops = {
+ .bound = soc_camera_async_bound,
+ .unbind = soc_camera_async_unbind,
+ .complete = soc_camera_async_complete,
+};
+
+static int scan_async_group(struct soc_camera_host *ici,
+ struct v4l2_async_subdev **asd, unsigned int size)
+{
+ struct soc_camera_async_subdev *sasd;
+ struct soc_camera_async_client *sasc;
+ struct soc_camera_device *icd;
+ struct soc_camera_desc sdesc = {.host_desc.bus_id = ici->nr,};
+ char clk_name[V4L2_CLK_NAME_SIZE];
+ unsigned int i;
+ int ret;
+
+ /* First look for a sensor */
+ for (i = 0; i < size; i++) {
+ sasd = container_of(asd[i], struct soc_camera_async_subdev, asd);
+ if (sasd->role == SOCAM_SUBDEV_DATA_SOURCE)
+ break;
+ }
+
+ if (i >= size || asd[i]->match_type != V4L2_ASYNC_MATCH_I2C) {
+ /* All useless */
+ dev_err(ici->v4l2_dev.dev, "No I2C data source found!\n");
+ return -ENODEV;
+ }
+
+ /* Or shall this be managed by the soc-camera device? */
+ sasc = devm_kzalloc(ici->v4l2_dev.dev, sizeof(*sasc), GFP_KERNEL);
+ if (!sasc)
+ return -ENOMEM;
+
+ /* HACK: just need a != NULL */
+ sdesc.host_desc.board_info = ERR_PTR(-ENODATA);
+
+ ret = soc_camera_dyn_pdev(&sdesc, sasc);
+ if (ret < 0)
+ goto eallocpdev;
+
+ sasc->sensor = &sasd->asd;
+
+ icd = soc_camera_add_pdev(sasc);
+ if (!icd) {
+ ret = -ENOMEM;
+ goto eaddpdev;
+ }
+
+ sasc->notifier.subdevs = asd;
+ sasc->notifier.num_subdevs = size;
+ sasc->notifier.ops = &soc_camera_async_ops;
+
+ icd->sasc = sasc;
+ icd->parent = ici->v4l2_dev.dev;
+
+ v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
+ sasd->asd.match.i2c.adapter_id,
+ sasd->asd.match.i2c.address);
+
+ icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd);
+ if (IS_ERR(icd->clk)) {
+ ret = PTR_ERR(icd->clk);
+ goto eclkreg;
+ }
+
+ ret = v4l2_async_notifier_register(&ici->v4l2_dev, &sasc->notifier);
+ if (!ret)
+ return 0;
+
+ v4l2_clk_unregister(icd->clk);
+eclkreg:
+ icd->clk = NULL;
+ platform_device_del(sasc->pdev);
+eaddpdev:
+ platform_device_put(sasc->pdev);
+eallocpdev:
+ devm_kfree(ici->v4l2_dev.dev, sasc);
+ dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret);
+
+ return ret;
+}
+
+static void scan_async_host(struct soc_camera_host *ici)
+{
+ struct v4l2_async_subdev **asd;
+ int j;
+
+ for (j = 0, asd = ici->asd; ici->asd_sizes[j]; j++) {
+ scan_async_group(ici, asd, ici->asd_sizes[j]);
+ asd += ici->asd_sizes[j];
+ }
+}
+#else
+#define soc_camera_i2c_init(icd, sdesc) (-ENODEV)
+#define soc_camera_i2c_free(icd) do {} while (0)
+#define scan_async_host(ici) do {} while (0)
+#endif
+
+#ifdef CONFIG_OF
+
+struct soc_of_info {
+ struct soc_camera_async_subdev sasd;
+ struct soc_camera_async_client sasc;
+ struct v4l2_async_subdev *subdev;
+};
+
+static int soc_of_bind(struct soc_camera_host *ici,
+ struct device_node *ep,
+ struct device_node *remote)
+{
+ struct soc_camera_device *icd;
+ struct soc_camera_desc sdesc = {.host_desc.bus_id = ici->nr,};
+ struct soc_camera_async_client *sasc;
+ struct soc_of_info *info;
+ struct i2c_client *client;
+ char clk_name[V4L2_CLK_NAME_SIZE];
+ int ret;
+
+ /* allocate a new subdev and add match info to it */
+ info = devm_kzalloc(ici->v4l2_dev.dev, sizeof(struct soc_of_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->sasd.asd.match.fwnode = of_fwnode_handle(remote);
+ info->sasd.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ info->subdev = &info->sasd.asd;
+
+ /* Or shall this be managed by the soc-camera device? */
+ sasc = &info->sasc;
+
+ /* HACK: just need a != NULL */
+ sdesc.host_desc.board_info = ERR_PTR(-ENODATA);
+
+ ret = soc_camera_dyn_pdev(&sdesc, sasc);
+ if (ret < 0)
+ goto eallocpdev;
+
+ sasc->sensor = &info->sasd.asd;
+
+ icd = soc_camera_add_pdev(sasc);
+ if (!icd) {
+ ret = -ENOMEM;
+ goto eaddpdev;
+ }
+
+ sasc->notifier.subdevs = &info->subdev;
+ sasc->notifier.num_subdevs = 1;
+ sasc->notifier.ops = &soc_camera_async_ops;
+
+ icd->sasc = sasc;
+ icd->parent = ici->v4l2_dev.dev;
+
+ client = of_find_i2c_device_by_node(remote);
+
+ if (client)
+ v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
+ client->adapter->nr, client->addr);
+ else
+ v4l2_clk_name_of(clk_name, sizeof(clk_name), remote);
+
+ icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd);
+ if (IS_ERR(icd->clk)) {
+ ret = PTR_ERR(icd->clk);
+ goto eclkreg;
+ }
+
+ ret = v4l2_async_notifier_register(&ici->v4l2_dev, &sasc->notifier);
+ if (!ret)
+ return 0;
+
+ v4l2_clk_unregister(icd->clk);
+eclkreg:
+ icd->clk = NULL;
+ platform_device_del(sasc->pdev);
+eaddpdev:
+ platform_device_put(sasc->pdev);
+eallocpdev:
+ devm_kfree(ici->v4l2_dev.dev, info);
+ dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret);
+
+ return ret;
+}
+
+static void scan_of_host(struct soc_camera_host *ici)
+{
+ struct device *dev = ici->v4l2_dev.dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *epn = NULL, *ren;
+ unsigned int i;
+
+ for (i = 0; ; i++) {
+ epn = of_graph_get_next_endpoint(np, epn);
+ if (!epn)
+ break;
+
+ ren = of_graph_get_remote_port(epn);
+ if (!ren) {
+ dev_notice(dev, "no remote for %pOF\n", epn);
+ continue;
+ }
+
+ /* so we now have a remote node to connect */
+ if (!i)
+ soc_of_bind(ici, epn, ren->parent);
+
+ of_node_put(ren);
+
+ if (i) {
+ dev_err(dev, "multiple subdevices aren't supported yet!\n");
+ break;
+ }
+ }
+
+ of_node_put(epn);
+}
+
+#else
+static inline void scan_of_host(struct soc_camera_host *ici) { }
+#endif
+
+/* Called during host-driver probe */
+static int soc_camera_probe(struct soc_camera_host *ici,
+ struct soc_camera_device *icd)
+{
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
+ struct soc_camera_host_desc *shd = &sdesc->host_desc;
+ struct device *control = NULL;
+ int ret;
+
+ dev_info(icd->pdev, "Probing %s\n", dev_name(icd->pdev));
+
+ /*
+ * Currently the subdev with the largest number of controls (13) is
+ * ov6550. So let's pick 16 as a hint for the control handler. Note
+ * that this is a hint only: too large and you waste some memory, too
+ * small and there is a (very) small performance hit when looking up
+ * controls in the internal hash.
+ */
+ ret = v4l2_ctrl_handler_init(&icd->ctrl_handler, 16);
+ if (ret < 0)
+ return ret;
+
+ /* Must have icd->vdev before registering the device */
+ ret = video_dev_create(icd);
+ if (ret < 0)
+ goto evdc;
+
+ /*
+ * ..._video_start() will create a device node, video_register_device()
+ * itself is protected against concurrent open() calls, but we also have
+ * to protect our data also during client probing.
+ */
+
+ /* Non-i2c cameras, e.g., soc_camera_platform, have no board_info */
+ if (shd->board_info) {
+ ret = soc_camera_i2c_init(icd, sdesc);
+ if (ret < 0 && ret != -EPROBE_DEFER)
+ goto eadd;
+ } else if (!shd->add_device || !shd->del_device) {
+ ret = -EINVAL;
+ goto eadd;
+ } else {
+ ret = soc_camera_clock_start(ici);
+ if (ret < 0)
+ goto eadd;
+
+ if (shd->module_name)
+ ret = request_module(shd->module_name);
+
+ ret = shd->add_device(icd);
+ if (ret < 0)
+ goto eadddev;
+
+ /*
+ * FIXME: this is racy, have to use driver-binding notification,
+ * when it is available
+ */
+ control = to_soc_camera_control(icd);
+ if (!control || !control->driver || !dev_get_drvdata(control) ||
+ !try_module_get(control->driver->owner)) {
+ shd->del_device(icd);
+ ret = -ENODEV;
+ goto enodrv;
+ }
+ }
+
+ mutex_lock(&ici->host_lock);
+ ret = soc_camera_probe_finish(icd);
+ mutex_unlock(&ici->host_lock);
+ if (ret < 0)
+ goto efinish;
+
+ return 0;
+
+efinish:
+ if (shd->board_info) {
+ soc_camera_i2c_free(icd);
+ } else {
+ shd->del_device(icd);
+ module_put(control->driver->owner);
+enodrv:
+eadddev:
+ soc_camera_clock_stop(ici);
+ }
+eadd:
+ if (icd->vdev) {
+ video_device_release(icd->vdev);
+ icd->vdev = NULL;
+ }
+evdc:
+ v4l2_ctrl_handler_free(&icd->ctrl_handler);
+ return ret;
+}
+
+/*
+ * This is called on device_unregister, which only means we have to disconnect
+ * from the host, but not remove ourselves from the device list. With
+ * asynchronous client probing this can also be called without
+ * soc_camera_probe_finish() having run. Careful with clean up.
+ */
+static int soc_camera_remove(struct soc_camera_device *icd)
+{
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
+ struct video_device *vdev = icd->vdev;
+
+ v4l2_ctrl_handler_free(&icd->ctrl_handler);
+ if (vdev) {
+ video_unregister_device(vdev);
+ icd->vdev = NULL;
+ }
+
+ if (sdesc->host_desc.board_info) {
+ soc_camera_i2c_free(icd);
+ } else {
+ struct device *dev = to_soc_camera_control(icd);
+ struct device_driver *drv = dev ? dev->driver : NULL;
+ if (drv) {
+ sdesc->host_desc.del_device(icd);
+ module_put(drv->owner);
+ }
+ }
+
+ if (icd->num_user_formats)
+ soc_camera_free_user_formats(icd);
+
+ if (icd->clk) {
+ /* For the synchronous case */
+ v4l2_clk_unregister(icd->clk);
+ icd->clk = NULL;
+ }
+
+ if (icd->sasc)
+ platform_device_unregister(icd->sasc->pdev);
+
+ return 0;
+}
+
+static int default_g_selection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
+ if (ret)
+ return ret;
+ sel->r = sdsel.r;
+ return 0;
+}
+
+static int default_s_selection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ .flags = sel->flags,
+ .r = sel->r,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+ if (ret)
+ return ret;
+ sel->r = sdsel.r;
+ return 0;
+}
+
+static int default_g_parm(struct soc_camera_device *icd,
+ struct v4l2_streamparm *a)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_g_parm_cap(icd->vdev, sd, a);
+}
+
+static int default_s_parm(struct soc_camera_device *icd,
+ struct v4l2_streamparm *a)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_s_parm_cap(icd->vdev, sd, a);
+}
+
+static int default_enum_framesizes(struct soc_camera_device *icd,
+ struct v4l2_frmsizeenum *fsize)
+{
+ int ret;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ xlate = soc_camera_xlate_by_fourcc(icd, fsize->pixel_format);
+ if (!xlate)
+ return -EINVAL;
+ fse.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
+ if (ret < 0)
+ return ret;
+
+ if (fse.min_width == fse.max_width &&
+ fse.min_height == fse.max_height) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.min_width;
+ fsize->discrete.height = fse.min_height;
+ return 0;
+ }
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = fse.min_width;
+ fsize->stepwise.max_width = fse.max_width;
+ fsize->stepwise.min_height = fse.min_height;
+ fsize->stepwise.max_height = fse.max_height;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+ return 0;
+}
+
+int soc_camera_host_register(struct soc_camera_host *ici)
+{
+ struct soc_camera_host *ix;
+ int ret;
+
+ if (!ici || !ici->ops ||
+ !ici->ops->try_fmt ||
+ !ici->ops->set_fmt ||
+ !ici->ops->set_bus_param ||
+ !ici->ops->querycap ||
+ !ici->ops->init_videobuf2 ||
+ !ici->ops->poll ||
+ !ici->v4l2_dev.dev)
+ return -EINVAL;
+
+ if (!ici->ops->set_selection)
+ ici->ops->set_selection = default_s_selection;
+ if (!ici->ops->get_selection)
+ ici->ops->get_selection = default_g_selection;
+ if (!ici->ops->set_parm)
+ ici->ops->set_parm = default_s_parm;
+ if (!ici->ops->get_parm)
+ ici->ops->get_parm = default_g_parm;
+ if (!ici->ops->enum_framesizes)
+ ici->ops->enum_framesizes = default_enum_framesizes;
+
+ mutex_lock(&list_lock);
+ list_for_each_entry(ix, &hosts, list) {
+ if (ix->nr == ici->nr) {
+ ret = -EBUSY;
+ goto edevreg;
+ }
+ }
+
+ ret = v4l2_device_register(ici->v4l2_dev.dev, &ici->v4l2_dev);
+ if (ret < 0)
+ goto edevreg;
+
+ list_add_tail(&ici->list, &hosts);
+ mutex_unlock(&list_lock);
+
+ mutex_init(&ici->host_lock);
+ mutex_init(&ici->clk_lock);
+
+ if (ici->v4l2_dev.dev->of_node)
+ scan_of_host(ici);
+ else if (ici->asd_sizes)
+ /*
+ * No OF, host with a list of subdevices. Don't try to mix
+ * modes by initialising some groups statically and some
+ * dynamically!
+ */
+ scan_async_host(ici);
+ else
+ /* Legacy: static platform devices from board data */
+ scan_add_host(ici);
+
+ return 0;
+
+edevreg:
+ mutex_unlock(&list_lock);
+ return ret;
+}
+EXPORT_SYMBOL(soc_camera_host_register);
+
+/* Unregister all clients! */
+void soc_camera_host_unregister(struct soc_camera_host *ici)
+{
+ struct soc_camera_device *icd, *tmp;
+ struct soc_camera_async_client *sasc;
+ LIST_HEAD(notifiers);
+
+ mutex_lock(&list_lock);
+ list_del(&ici->list);
+ list_for_each_entry(icd, &devices, list)
+ if (icd->iface == ici->nr && icd->sasc) {
+ /* as long as we hold the device, sasc won't be freed */
+ get_device(icd->pdev);
+ list_add(&icd->sasc->list, &notifiers);
+ }
+ mutex_unlock(&list_lock);
+
+ list_for_each_entry(sasc, &notifiers, list) {
+ /* Must call unlocked to avoid AB-BA dead-lock */
+ v4l2_async_notifier_unregister(&sasc->notifier);
+ put_device(&sasc->pdev->dev);
+ }
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry_safe(icd, tmp, &devices, list)
+ if (icd->iface == ici->nr)
+ soc_camera_remove(icd);
+
+ mutex_unlock(&list_lock);
+
+ v4l2_device_unregister(&ici->v4l2_dev);
+}
+EXPORT_SYMBOL(soc_camera_host_unregister);
+
+/* Image capture device */
+static int soc_camera_device_register(struct soc_camera_device *icd)
+{
+ struct soc_camera_device *ix;
+ int num = -1, i;
+
+ mutex_lock(&list_lock);
+ for (i = 0; i < 256 && num < 0; i++) {
+ num = i;
+ /* Check if this index is available on this interface */
+ list_for_each_entry(ix, &devices, list) {
+ if (ix->iface == icd->iface && ix->devnum == i) {
+ num = -1;
+ break;
+ }
+ }
+ }
+
+ if (num < 0) {
+ /*
+ * ok, we have 256 cameras on this host...
+ * man, stay reasonable...
+ */
+ mutex_unlock(&list_lock);
+ return -ENOMEM;
+ }
+
+ icd->devnum = num;
+ icd->use_count = 0;
+ icd->host_priv = NULL;
+
+ /*
+ * Dynamically allocated devices set the bit earlier, but it doesn't hurt setting
+ * it again
+ */
+ i = to_platform_device(icd->pdev)->id;
+ if (i < 0)
+ /* One static (legacy) soc-camera platform device */
+ i = 0;
+ if (i >= MAP_MAX_NUM) {
+ mutex_unlock(&list_lock);
+ return -EBUSY;
+ }
+ set_bit(i, device_map);
+ list_add_tail(&icd->list, &devices);
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
+ .vidioc_querycap = soc_camera_querycap,
+ .vidioc_try_fmt_vid_cap = soc_camera_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = soc_camera_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = soc_camera_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = soc_camera_enum_fmt_vid_cap,
+ .vidioc_enum_input = soc_camera_enum_input,
+ .vidioc_g_input = soc_camera_g_input,
+ .vidioc_s_input = soc_camera_s_input,
+ .vidioc_s_std = soc_camera_s_std,
+ .vidioc_g_std = soc_camera_g_std,
+ .vidioc_enum_framesizes = soc_camera_enum_framesizes,
+ .vidioc_reqbufs = soc_camera_reqbufs,
+ .vidioc_querybuf = soc_camera_querybuf,
+ .vidioc_qbuf = soc_camera_qbuf,
+ .vidioc_dqbuf = soc_camera_dqbuf,
+ .vidioc_create_bufs = soc_camera_create_bufs,
+ .vidioc_prepare_buf = soc_camera_prepare_buf,
+ .vidioc_expbuf = soc_camera_expbuf,
+ .vidioc_streamon = soc_camera_streamon,
+ .vidioc_streamoff = soc_camera_streamoff,
+ .vidioc_g_selection = soc_camera_g_selection,
+ .vidioc_s_selection = soc_camera_s_selection,
+ .vidioc_g_parm = soc_camera_g_parm,
+ .vidioc_s_parm = soc_camera_s_parm,
+};
+
+static int video_dev_create(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct video_device *vdev = video_device_alloc();
+
+ if (!vdev)
+ return -ENOMEM;
+
+ strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name));
+
+ vdev->v4l2_dev = &ici->v4l2_dev;
+ vdev->fops = &soc_camera_fops;
+ vdev->ioctl_ops = &soc_camera_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->ctrl_handler = &icd->ctrl_handler;
+ vdev->lock = &ici->host_lock;
+
+ icd->vdev = vdev;
+
+ return 0;
+}
+
+/*
+ * Called from soc_camera_probe() above with .host_lock held
+ */
+static int soc_camera_video_start(struct soc_camera_device *icd)
+{
+ const struct device_type *type = icd->vdev->dev.type;
+ int ret;
+
+ if (!icd->parent)
+ return -ENODEV;
+
+ video_set_drvdata(icd->vdev, icd);
+ if (icd->vdev->tvnorms == 0) {
+ /* disable the STD API if there are no tvnorms defined */
+ v4l2_disable_ioctl(icd->vdev, VIDIOC_G_STD);
+ v4l2_disable_ioctl(icd->vdev, VIDIOC_S_STD);
+ v4l2_disable_ioctl(icd->vdev, VIDIOC_ENUMSTD);
+ }
+ ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(icd->pdev, "video_register_device failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Restore device type, possibly set by the subdevice driver */
+ icd->vdev->dev.type = type;
+
+ return 0;
+}
+
+static int soc_camera_pdrv_probe(struct platform_device *pdev)
+{
+ struct soc_camera_desc *sdesc = pdev->dev.platform_data;
+ struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc;
+ struct soc_camera_device *icd;
+ int ret;
+
+ if (!sdesc)
+ return -EINVAL;
+
+ icd = devm_kzalloc(&pdev->dev, sizeof(*icd), GFP_KERNEL);
+ if (!icd)
+ return -ENOMEM;
+
+ /*
+ * In the asynchronous case ssdd->num_regulators == 0 yet, so, the below
+ * regulator allocation is a dummy. They are actually requested by the
+ * subdevice driver, using soc_camera_power_init(). Also note, that in
+ * that case regulators are attached to the I2C device and not to the
+ * camera platform device.
+ */
+ ret = devm_regulator_bulk_get(&pdev->dev, ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
+ if (ret < 0)
+ return ret;
+
+ icd->iface = sdesc->host_desc.bus_id;
+ icd->sdesc = sdesc;
+ icd->pdev = &pdev->dev;
+ platform_set_drvdata(pdev, icd);
+
+ icd->user_width = DEFAULT_WIDTH;
+ icd->user_height = DEFAULT_HEIGHT;
+
+ return soc_camera_device_register(icd);
+}
+
+/*
+ * Only called on rmmod for each platform device, since they are not
+ * hot-pluggable. Now we know, that all our users - hosts and devices have
+ * been unloaded already
+ */
+static int soc_camera_pdrv_remove(struct platform_device *pdev)
+{
+ struct soc_camera_device *icd = platform_get_drvdata(pdev);
+ int i;
+
+ if (!icd)
+ return -EINVAL;
+
+ i = pdev->id;
+ if (i < 0)
+ i = 0;
+
+ /*
+ * In synchronous mode with static platform devices this is called in a
+ * loop from drivers/base/dd.c::driver_detach(), no parallel execution,
+ * no need to lock. In asynchronous case the caller -
+ * soc_camera_host_unregister() - already holds the lock
+ */
+ if (test_bit(i, device_map)) {
+ clear_bit(i, device_map);
+ list_del(&icd->list);
+ }
+
+ return 0;
+}
+
+static struct platform_driver __refdata soc_camera_pdrv = {
+ .probe = soc_camera_pdrv_probe,
+ .remove = soc_camera_pdrv_remove,
+ .driver = {
+ .name = "soc-camera-pdrv",
+ },
+};
+
+module_platform_driver(soc_camera_pdrv);
+
+MODULE_DESCRIPTION("Image capture bus driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:soc-camera-pdrv");
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
new file mode 100644
index 000000000..6745a6e3f
--- /dev/null
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Platform Camera Driver
+ *
+ * Copyright (C) 2008 Magnus Damm
+ * Based on mt9m001 driver,
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+#include <media/soc_camera.h>
+#include <linux/platform_data/media/soc_camera_platform.h>
+
+struct soc_camera_platform_priv {
+ struct v4l2_subdev subdev;
+};
+
+static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+ return container_of(subdev, struct soc_camera_platform_priv, subdev);
+}
+
+static int soc_camera_platform_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+ return p->set_capture(p, enable);
+}
+
+static int soc_camera_platform_fill_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+
+ mf->width = p->format.width;
+ mf->height = p->format.height;
+ mf->code = p->format.code;
+ mf->colorspace = p->format.colorspace;
+ mf->field = p->format.field;
+
+ return 0;
+}
+
+static int soc_camera_platform_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ return soc_camera_set_power(p->icd->control, &p->icd->sdesc->subdev_desc, NULL, on);
+}
+
+static const struct v4l2_subdev_core_ops platform_subdev_core_ops = {
+ .s_power = soc_camera_platform_s_power,
+};
+
+static int soc_camera_platform_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ if (code->pad || code->index)
+ return -EINVAL;
+
+ code->code = p->format.code;
+ return 0;
+}
+
+static int soc_camera_platform_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = p->format.width;
+ sel->r.height = p->format.height;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ cfg->flags = p->mbus_param;
+ cfg->type = p->mbus_type;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops platform_subdev_video_ops = {
+ .s_stream = soc_camera_platform_s_stream,
+ .g_mbus_config = soc_camera_platform_g_mbus_config,
+};
+
+static const struct v4l2_subdev_pad_ops platform_subdev_pad_ops = {
+ .enum_mbus_code = soc_camera_platform_enum_mbus_code,
+ .get_selection = soc_camera_platform_get_selection,
+ .get_fmt = soc_camera_platform_fill_fmt,
+ .set_fmt = soc_camera_platform_fill_fmt,
+};
+
+static const struct v4l2_subdev_ops platform_subdev_ops = {
+ .core = &platform_subdev_core_ops,
+ .video = &platform_subdev_video_ops,
+ .pad = &platform_subdev_pad_ops,
+};
+
+static int soc_camera_platform_probe(struct platform_device *pdev)
+{
+ struct soc_camera_host *ici;
+ struct soc_camera_platform_priv *priv;
+ struct soc_camera_platform_info *p = pdev->dev.platform_data;
+ struct soc_camera_device *icd;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!p->icd) {
+ dev_err(&pdev->dev,
+ "Platform has not set soc_camera_device pointer!\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ icd = p->icd;
+
+ /* soc-camera convention: control's drvdata points to the subdev */
+ platform_set_drvdata(pdev, &priv->subdev);
+ /* Set the control device reference */
+ icd->control = &pdev->dev;
+
+ ici = to_soc_camera_host(icd->parent);
+
+ v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
+ v4l2_set_subdevdata(&priv->subdev, p);
+ strlcpy(priv->subdev.name, dev_name(&pdev->dev),
+ sizeof(priv->subdev.name));
+
+ return v4l2_device_register_subdev(&ici->v4l2_dev, &priv->subdev);
+}
+
+static int soc_camera_platform_remove(struct platform_device *pdev)
+{
+ struct soc_camera_platform_priv *priv = get_priv(pdev);
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(&priv->subdev);
+
+ p->icd->control = NULL;
+ v4l2_device_unregister_subdev(&priv->subdev);
+ return 0;
+}
+
+static struct platform_driver soc_camera_platform_driver = {
+ .driver = {
+ .name = "soc_camera_platform",
+ },
+ .probe = soc_camera_platform_probe,
+ .remove = soc_camera_platform_remove,
+};
+
+module_platform_driver(soc_camera_platform_driver);
+
+MODULE_DESCRIPTION("SoC Camera Platform driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:soc_camera_platform");
diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c
new file mode 100644
index 000000000..0ad4b2826
--- /dev/null
+++ b/drivers/media/platform/soc_camera/soc_mediabus.c
@@ -0,0 +1,533 @@
+/*
+ * soc-camera media bus helper routines
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/drv-intf/soc_mediabus.h>
+
+static const struct soc_mbus_lookup mbus_fmt[] = {
+{
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .name = "RGB555",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .name = "RGB555X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .name = "RGB565",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .name = "RGB565X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB666_1X18,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .name = "RGB666/32bpp",
+ .bits_per_sample = 18,
+ .packing = SOC_MBUS_PACKING_EXTEND32,
+ .order = SOC_MBUS_ORDER_LE,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .name = "RGB888/32bpp",
+ .bits_per_sample = 24,
+ .packing = SOC_MBUS_PACKING_EXTEND32,
+ .order = SOC_MBUS_ORDER_LE,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .name = "RGB888/32bpp",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND32,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .name = "RGB888/32bpp",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND32,
+ .order = SOC_MBUS_ORDER_LE,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .name = "Bayer 8 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .name = "Grey",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .name = "Grey 10bit",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADLO,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADLO,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .name = "JPEG",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_VARIABLE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .name = "RGB444",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .name = "YUYV 4:2:0",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YVYU8_1_5X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .name = "YVYU 4:2:0",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY 16bit",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY 16bit",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV 16bit",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU 16bit",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .name = "Bayer 8 GRBG",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8,
+ .name = "Bayer 10 BGGR DPCM 8",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .name = "Bayer 10 GBRG",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .name = "Bayer 10 GRBG",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .name = "Bayer 10 RGGB",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .name = "Bayer 12 BGGR",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .name = "Bayer 12 GBRG",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .name = "Bayer 12 GRBG",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .name = "Bayer 12 RGGB",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+},
+};
+
+int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf,
+ unsigned int *numerator, unsigned int *denominator)
+{
+ switch (mf->packing) {
+ case SOC_MBUS_PACKING_NONE:
+ case SOC_MBUS_PACKING_EXTEND16:
+ *numerator = 1;
+ *denominator = 1;
+ return 0;
+ case SOC_MBUS_PACKING_EXTEND32:
+ *numerator = 1;
+ *denominator = 1;
+ return 0;
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ case SOC_MBUS_PACKING_2X8_PADLO:
+ *numerator = 2;
+ *denominator = 1;
+ return 0;
+ case SOC_MBUS_PACKING_1_5X8:
+ *numerator = 3;
+ *denominator = 2;
+ return 0;
+ case SOC_MBUS_PACKING_VARIABLE:
+ *numerator = 0;
+ *denominator = 1;
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(soc_mbus_samples_per_pixel);
+
+s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
+{
+ if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
+ return width * mf->bits_per_sample / 8;
+
+ switch (mf->packing) {
+ case SOC_MBUS_PACKING_NONE:
+ return width * mf->bits_per_sample / 8;
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ case SOC_MBUS_PACKING_2X8_PADLO:
+ case SOC_MBUS_PACKING_EXTEND16:
+ return width * 2;
+ case SOC_MBUS_PACKING_1_5X8:
+ return width * 3 / 2;
+ case SOC_MBUS_PACKING_VARIABLE:
+ return 0;
+ case SOC_MBUS_PACKING_EXTEND32:
+ return width * 4;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(soc_mbus_bytes_per_line);
+
+s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
+ u32 bytes_per_line, u32 height)
+{
+ if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
+ return bytes_per_line * height;
+
+ switch (mf->packing) {
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ case SOC_MBUS_PACKING_2X8_PADLO:
+ return bytes_per_line * height * 2;
+ case SOC_MBUS_PACKING_1_5X8:
+ return bytes_per_line * height * 3 / 2;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(soc_mbus_image_size);
+
+const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
+ u32 code,
+ const struct soc_mbus_lookup *lookup,
+ int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ if (lookup[i].code == code)
+ return &lookup[i].fmt;
+
+ return NULL;
+}
+EXPORT_SYMBOL(soc_mbus_find_fmtdesc);
+
+const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
+ u32 code)
+{
+ return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt));
+}
+EXPORT_SYMBOL(soc_mbus_get_fmtdesc);
+
+unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
+ unsigned int flags)
+{
+ unsigned long common_flags;
+ bool hsync = true, vsync = true, pclk, data, mode;
+ bool mipi_lanes, mipi_clock;
+
+ common_flags = cfg->flags & flags;
+
+ switch (cfg->type) {
+ case V4L2_MBUS_PARALLEL:
+ hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ /* fall through */
+ case V4L2_MBUS_BT656:
+ pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW);
+ mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE);
+ return (!hsync || !vsync || !pclk || !data || !mode) ?
+ 0 : common_flags;
+ case V4L2_MBUS_CSI2:
+ mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES;
+ mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK |
+ V4L2_MBUS_CSI2_CONTINUOUS_CLOCK);
+ return (!mipi_lanes || !mipi_clock) ? 0 : common_flags;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(soc_mbus_config_compatible);
+
+static int __init soc_mbus_init(void)
+{
+ return 0;
+}
+
+static void __exit soc_mbus_exit(void)
+{
+}
+
+module_init(soc_mbus_init);
+module_exit(soc_mbus_exit);
+
+MODULE_DESCRIPTION("soc-camera media bus interface");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
new file mode 100644
index 000000000..6164102e6
--- /dev/null
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -0,0 +1,426 @@
+/*
+ * soc-camera generic scaling-cropping manipulation functions
+ *
+ * Copyright (C) 2013 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <media/soc_camera.h>
+#include <media/v4l2-common.h>
+
+#include "soc_scale_crop.h"
+
+#ifdef DEBUG_GEOMETRY
+#define dev_geo dev_info
+#else
+#define dev_geo dev_dbg
+#endif
+
+/* Check if any dimension of r1 is smaller than respective one of r2 */
+static bool is_smaller(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
+{
+ return r1->width < r2->width || r1->height < r2->height;
+}
+
+/* Check if r1 fails to cover r2 */
+static bool is_inside(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
+{
+ return r1->left > r2->left || r1->top > r2->top ||
+ r1->left + r1->width < r2->left + r2->width ||
+ r1->top + r1->height < r2->top + r2->height;
+}
+
+/* Get and store current client crop */
+int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
+{
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
+ if (!ret) {
+ *rect = sdsel.r;
+ return ret;
+ }
+
+ sdsel.target = V4L2_SEL_TGT_CROP_DEFAULT;
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
+ if (!ret)
+ *rect = sdsel.r;
+
+ return ret;
+}
+EXPORT_SYMBOL(soc_camera_client_g_rect);
+
+/* Client crop has changed, update our sub-rectangle to remain within the area */
+static void move_and_crop_subrect(struct v4l2_rect *rect,
+ struct v4l2_rect *subrect)
+{
+ if (rect->width < subrect->width)
+ subrect->width = rect->width;
+
+ if (rect->height < subrect->height)
+ subrect->height = rect->height;
+
+ if (rect->left > subrect->left)
+ subrect->left = rect->left;
+ else if (rect->left + rect->width <
+ subrect->left + subrect->width)
+ subrect->left = rect->left + rect->width -
+ subrect->width;
+
+ if (rect->top > subrect->top)
+ subrect->top = rect->top;
+ else if (rect->top + rect->height <
+ subrect->top + subrect->height)
+ subrect->top = rect->top + rect->height -
+ subrect->height;
+}
+
+/*
+ * The common for both scaling and cropping iterative approach is:
+ * 1. try if the client can produce exactly what requested by the user
+ * 2. if (1) failed, try to double the client image until we get one big enough
+ * 3. if (2) failed, try to request the maximum image
+ */
+int soc_camera_client_s_selection(struct v4l2_subdev *sd,
+ struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
+ struct v4l2_rect *target_rect, struct v4l2_rect *subrect)
+{
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ .flags = sel->flags,
+ .r = sel->r,
+ };
+ struct v4l2_subdev_selection bounds = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ struct v4l2_rect *rect = &sel->r, *cam_rect = &cam_sel->r;
+ struct device *dev = sd->v4l2_dev->dev;
+ int ret;
+ unsigned int width, height;
+
+ v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+ sel->r = sdsel.r;
+ ret = soc_camera_client_g_rect(sd, cam_rect);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Now cam_crop contains the current camera input rectangle, and it must
+ * be within camera cropcap bounds
+ */
+ if (!memcmp(rect, cam_rect, sizeof(*rect))) {
+ /* Even if camera S_SELECTION failed, but camera rectangle matches */
+ dev_dbg(dev, "Camera S_SELECTION successful for %dx%d@%d:%d\n",
+ rect->width, rect->height, rect->left, rect->top);
+ *target_rect = *cam_rect;
+ return 0;
+ }
+
+ /* Try to fix cropping, that camera hasn't managed to set */
+ dev_geo(dev, "Fix camera S_SELECTION for %dx%d@%d:%d to %dx%d@%d:%d\n",
+ cam_rect->width, cam_rect->height,
+ cam_rect->left, cam_rect->top,
+ rect->width, rect->height, rect->left, rect->top);
+
+ /* We need sensor maximum rectangle */
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &bounds);
+ if (ret < 0)
+ return ret;
+
+ /* Put user requested rectangle within sensor bounds */
+ soc_camera_limit_side(&rect->left, &rect->width, sdsel.r.left, 2,
+ bounds.r.width);
+ soc_camera_limit_side(&rect->top, &rect->height, sdsel.r.top, 4,
+ bounds.r.height);
+
+ /*
+ * Popular special case - some cameras can only handle fixed sizes like
+ * QVGA, VGA,... Take care to avoid infinite loop.
+ */
+ width = max_t(unsigned int, cam_rect->width, 2);
+ height = max_t(unsigned int, cam_rect->height, 2);
+
+ /*
+ * Loop as long as sensor is not covering the requested rectangle and
+ * is still within its bounds
+ */
+ while (!ret && (is_smaller(cam_rect, rect) ||
+ is_inside(cam_rect, rect)) &&
+ (bounds.r.width > width || bounds.r.height > height)) {
+
+ width *= 2;
+ height *= 2;
+
+ cam_rect->width = width;
+ cam_rect->height = height;
+
+ /*
+ * We do not know what capabilities the camera has to set up
+ * left and top borders. We could try to be smarter in iterating
+ * them, e.g., if camera current left is to the right of the
+ * target left, set it to the middle point between the current
+ * left and minimum left. But that would add too much
+ * complexity: we would have to iterate each border separately.
+ * Instead we just drop to the left and top bounds.
+ */
+ if (cam_rect->left > rect->left)
+ cam_rect->left = bounds.r.left;
+
+ if (cam_rect->left + cam_rect->width < rect->left + rect->width)
+ cam_rect->width = rect->left + rect->width -
+ cam_rect->left;
+
+ if (cam_rect->top > rect->top)
+ cam_rect->top = bounds.r.top;
+
+ if (cam_rect->top + cam_rect->height < rect->top + rect->height)
+ cam_rect->height = rect->top + rect->height -
+ cam_rect->top;
+
+ sdsel.r = *cam_rect;
+ v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+ *cam_rect = sdsel.r;
+ ret = soc_camera_client_g_rect(sd, cam_rect);
+ dev_geo(dev, "Camera S_SELECTION %d for %dx%d@%d:%d\n", ret,
+ cam_rect->width, cam_rect->height,
+ cam_rect->left, cam_rect->top);
+ }
+
+ /* S_SELECTION must not modify the rectangle */
+ if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) {
+ /*
+ * The camera failed to configure a suitable cropping,
+ * we cannot use the current rectangle, set to max
+ */
+ sdsel.r = bounds.r;
+ v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+ *cam_rect = sdsel.r;
+
+ ret = soc_camera_client_g_rect(sd, cam_rect);
+ dev_geo(dev, "Camera S_SELECTION %d for max %dx%d@%d:%d\n", ret,
+ cam_rect->width, cam_rect->height,
+ cam_rect->left, cam_rect->top);
+ }
+
+ if (!ret) {
+ *target_rect = *cam_rect;
+ move_and_crop_subrect(target_rect, subrect);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(soc_camera_client_s_selection);
+
+/* Iterative set_fmt, also updates cached client crop on success */
+static int client_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_rect *rect, struct v4l2_rect *subrect,
+ unsigned int max_width, unsigned int max_height,
+ struct v4l2_subdev_format *format, bool host_can_scale)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ bool host_1to1;
+ int ret;
+
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), pad,
+ set_fmt, NULL, format);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
+
+ if (width == mf->width && height == mf->height) {
+ /* Perfect! The client has done it all. */
+ host_1to1 = true;
+ goto update_cache;
+ }
+
+ host_1to1 = false;
+ if (!host_can_scale)
+ goto update_cache;
+
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
+ if (ret < 0)
+ return ret;
+
+ if (max_width > sdsel.r.width)
+ max_width = sdsel.r.width;
+ if (max_height > sdsel.r.height)
+ max_height = sdsel.r.height;
+
+ /* Camera set a format, but geometry is not precise, try to improve */
+ tmp_w = mf->width;
+ tmp_h = mf->height;
+
+ /* width <= max_width && height <= max_height - guaranteed by try_fmt */
+ while ((width > tmp_w || height > tmp_h) &&
+ tmp_w < max_width && tmp_h < max_height) {
+ tmp_w = min(2 * tmp_w, max_width);
+ tmp_h = min(2 * tmp_h, max_height);
+ mf->width = tmp_w;
+ mf->height = tmp_h;
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), pad,
+ set_fmt, NULL, format);
+ dev_geo(dev, "Camera scaled to %ux%u\n",
+ mf->width, mf->height);
+ if (ret < 0) {
+ /* This shouldn't happen */
+ dev_err(dev, "Client failed to set format: %d\n", ret);
+ return ret;
+ }
+ }
+
+update_cache:
+ /* Update cache */
+ ret = soc_camera_client_g_rect(sd, rect);
+ if (ret < 0)
+ return ret;
+
+ if (host_1to1)
+ *subrect = *rect;
+ else
+ move_and_crop_subrect(rect, subrect);
+
+ return 0;
+}
+
+/**
+ * soc_camera_client_scale
+ * @icd: soc-camera device
+ * @rect: camera cropping window
+ * @subrect: part of rect, sent to the user
+ * @mf: in- / output camera output window
+ * @width: on input: max host input width;
+ * on output: user width, mapped back to input
+ * @height: on input: max host input height;
+ * on output: user height, mapped back to input
+ * @host_can_scale: host can scale this pixel format
+ * @shift: shift, used for scaling
+ */
+int soc_camera_client_scale(struct soc_camera_device *icd,
+ struct v4l2_rect *rect, struct v4l2_rect *subrect,
+ struct v4l2_mbus_framefmt *mf,
+ unsigned int *width, unsigned int *height,
+ bool host_can_scale, unsigned int shift)
+{
+ struct device *dev = icd->parent;
+ struct v4l2_subdev_format fmt_tmp = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = *mf,
+ };
+ struct v4l2_mbus_framefmt *mf_tmp = &fmt_tmp.format;
+ unsigned int scale_h, scale_v;
+ int ret;
+
+ /*
+ * 5. Apply iterative camera S_FMT for camera user window (also updates
+ * client crop cache and the imaginary sub-rectangle).
+ */
+ ret = client_set_fmt(icd, rect, subrect, *width, *height,
+ &fmt_tmp, host_can_scale);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "5: camera scaled to %ux%u\n",
+ mf_tmp->width, mf_tmp->height);
+
+ /* 6. Retrieve camera output window (g_fmt) */
+
+ /* unneeded - it is already in "mf_tmp" */
+
+ /* 7. Calculate new client scales. */
+ scale_h = soc_camera_calc_scale(rect->width, shift, mf_tmp->width);
+ scale_v = soc_camera_calc_scale(rect->height, shift, mf_tmp->height);
+
+ mf->width = mf_tmp->width;
+ mf->height = mf_tmp->height;
+ mf->colorspace = mf_tmp->colorspace;
+
+ /*
+ * 8. Calculate new host crop - apply camera scales to previously
+ * updated "effective" crop.
+ */
+ *width = soc_camera_shift_scale(subrect->width, shift, scale_h);
+ *height = soc_camera_shift_scale(subrect->height, shift, scale_v);
+
+ dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height);
+
+ return 0;
+}
+EXPORT_SYMBOL(soc_camera_client_scale);
+
+/*
+ * Calculate real client output window by applying new scales to the current
+ * client crop. New scales are calculated from the requested output format and
+ * host crop, mapped backed onto the client input (subrect).
+ */
+void soc_camera_calc_client_output(struct soc_camera_device *icd,
+ struct v4l2_rect *rect, struct v4l2_rect *subrect,
+ const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf,
+ unsigned int shift)
+{
+ struct device *dev = icd->parent;
+ unsigned int scale_v, scale_h;
+
+ if (subrect->width == rect->width &&
+ subrect->height == rect->height) {
+ /* No sub-cropping */
+ mf->width = pix->width;
+ mf->height = pix->height;
+ return;
+ }
+
+ /* 1.-2. Current camera scales and subwin - cached. */
+
+ dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
+ subrect->width, subrect->height,
+ subrect->left, subrect->top);
+
+ /*
+ * 3. Calculate new combined scales from input sub-window to requested
+ * user window.
+ */
+
+ /*
+ * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF
+ * (128x96) or larger than VGA. This and similar limitations have to be
+ * taken into account here.
+ */
+ scale_h = soc_camera_calc_scale(subrect->width, shift, pix->width);
+ scale_v = soc_camera_calc_scale(subrect->height, shift, pix->height);
+
+ dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
+
+ /*
+ * 4. Calculate desired client output window by applying combined scales
+ * to client (real) input window.
+ */
+ mf->width = soc_camera_shift_scale(rect->width, shift, scale_h);
+ mf->height = soc_camera_shift_scale(rect->height, shift, scale_v);
+}
+EXPORT_SYMBOL(soc_camera_calc_client_output);
+
+MODULE_DESCRIPTION("soc-camera scaling-cropping functions");
+MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.h b/drivers/media/platform/soc_camera/soc_scale_crop.h
new file mode 100644
index 000000000..9ca469312
--- /dev/null
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.h
@@ -0,0 +1,47 @@
+/*
+ * soc-camera generic scaling-cropping manipulation functions
+ *
+ * Copyright (C) 2013 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef SOC_SCALE_CROP_H
+#define SOC_SCALE_CROP_H
+
+#include <linux/kernel.h>
+
+struct soc_camera_device;
+
+struct v4l2_selection;
+struct v4l2_mbus_framefmt;
+struct v4l2_pix_format;
+struct v4l2_rect;
+struct v4l2_subdev;
+
+static inline unsigned int soc_camera_shift_scale(unsigned int size,
+ unsigned int shift, unsigned int scale)
+{
+ return DIV_ROUND_CLOSEST(size << shift, scale);
+}
+
+#define soc_camera_calc_scale(in, shift, out) soc_camera_shift_scale(in, shift, out)
+
+int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
+int soc_camera_client_s_selection(struct v4l2_subdev *sd,
+ struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
+ struct v4l2_rect *target_rect, struct v4l2_rect *subrect);
+int soc_camera_client_scale(struct soc_camera_device *icd,
+ struct v4l2_rect *rect, struct v4l2_rect *subrect,
+ struct v4l2_mbus_framefmt *mf,
+ unsigned int *width, unsigned int *height,
+ bool host_can_scale, unsigned int shift);
+void soc_camera_calc_client_output(struct soc_camera_device *icd,
+ struct v4l2_rect *rect, struct v4l2_rect *subrect,
+ const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf,
+ unsigned int shift);
+
+#endif
diff --git a/drivers/media/platform/sti/bdisp/Makefile b/drivers/media/platform/sti/bdisp/Makefile
new file mode 100644
index 000000000..bc53496fa
--- /dev/null
+++ b/drivers/media/platform/sti/bdisp/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_STI_BDISP) := bdisp.o
+
+bdisp-objs := bdisp-v4l2.o bdisp-hw.o bdisp-debug.o
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
new file mode 100644
index 000000000..c6a4e2de5
--- /dev/null
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
@@ -0,0 +1,687 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "bdisp.h"
+#include "bdisp-filter.h"
+#include "bdisp-reg.h"
+
+void bdisp_dbg_perf_begin(struct bdisp_dev *bdisp)
+{
+ bdisp->dbg.hw_start = ktime_get();
+}
+
+void bdisp_dbg_perf_end(struct bdisp_dev *bdisp)
+{
+ s64 time_us;
+
+ time_us = ktime_us_delta(ktime_get(), bdisp->dbg.hw_start);
+
+ if (!bdisp->dbg.min_duration)
+ bdisp->dbg.min_duration = time_us;
+ else
+ bdisp->dbg.min_duration = min(time_us, bdisp->dbg.min_duration);
+
+ bdisp->dbg.last_duration = time_us;
+ bdisp->dbg.max_duration = max(time_us, bdisp->dbg.max_duration);
+ bdisp->dbg.tot_duration += time_us;
+}
+
+static void bdisp_dbg_dump_ins(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "INS\t0x%08X\t", val);
+
+ switch (val & BLT_INS_S1_MASK) {
+ case BLT_INS_S1_OFF:
+ break;
+ case BLT_INS_S1_MEM:
+ seq_puts(s, "SRC1=mem - ");
+ break;
+ case BLT_INS_S1_CF:
+ seq_puts(s, "SRC1=ColorFill - ");
+ break;
+ case BLT_INS_S1_COPY:
+ seq_puts(s, "SRC1=copy - ");
+ break;
+ case BLT_INS_S1_FILL:
+ seq_puts(s, "SRC1=fil - ");
+ break;
+ default:
+ seq_puts(s, "SRC1=??? - ");
+ break;
+ }
+
+ switch (val & BLT_INS_S2_MASK) {
+ case BLT_INS_S2_OFF:
+ break;
+ case BLT_INS_S2_MEM:
+ seq_puts(s, "SRC2=mem - ");
+ break;
+ case BLT_INS_S2_CF:
+ seq_puts(s, "SRC2=ColorFill - ");
+ break;
+ default:
+ seq_puts(s, "SRC2=??? - ");
+ break;
+ }
+
+ if ((val & BLT_INS_S3_MASK) == BLT_INS_S3_MEM)
+ seq_puts(s, "SRC3=mem - ");
+
+ if (val & BLT_INS_IVMX)
+ seq_puts(s, "IVMX - ");
+ if (val & BLT_INS_CLUT)
+ seq_puts(s, "CLUT - ");
+ if (val & BLT_INS_SCALE)
+ seq_puts(s, "Scale - ");
+ if (val & BLT_INS_FLICK)
+ seq_puts(s, "Flicker - ");
+ if (val & BLT_INS_CLIP)
+ seq_puts(s, "Clip - ");
+ if (val & BLT_INS_CKEY)
+ seq_puts(s, "ColorKey - ");
+ if (val & BLT_INS_OVMX)
+ seq_puts(s, "OVMX - ");
+ if (val & BLT_INS_DEI)
+ seq_puts(s, "Deint - ");
+ if (val & BLT_INS_PMASK)
+ seq_puts(s, "PlaneMask - ");
+ if (val & BLT_INS_VC1R)
+ seq_puts(s, "VC1R - ");
+ if (val & BLT_INS_ROTATE)
+ seq_puts(s, "Rotate - ");
+ if (val & BLT_INS_GRAD)
+ seq_puts(s, "GradFill - ");
+ if (val & BLT_INS_AQLOCK)
+ seq_puts(s, "AQLock - ");
+ if (val & BLT_INS_PACE)
+ seq_puts(s, "Pace - ");
+ if (val & BLT_INS_IRQ)
+ seq_puts(s, "IRQ - ");
+
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "TTY\t0x%08X\t", val);
+ seq_printf(s, "Pitch=%d - ", val & 0xFFFF);
+
+ switch ((val & BLT_TTY_COL_MASK) >> BLT_TTY_COL_SHIFT) {
+ case BDISP_RGB565:
+ seq_puts(s, "RGB565 - ");
+ break;
+ case BDISP_RGB888:
+ seq_puts(s, "RGB888 - ");
+ break;
+ case BDISP_XRGB8888:
+ seq_puts(s, "xRGB888 - ");
+ break;
+ case BDISP_ARGB8888:
+ seq_puts(s, "ARGB8888 - ");
+ break;
+ case BDISP_NV12:
+ seq_puts(s, "NV12 - ");
+ break;
+ case BDISP_YUV_3B:
+ seq_puts(s, "YUV420P - ");
+ break;
+ default:
+ seq_puts(s, "ColorFormat ??? - ");
+ break;
+ }
+
+ if (val & BLT_TTY_ALPHA_R)
+ seq_puts(s, "AlphaRange - ");
+ if (val & BLT_TTY_CR_NOT_CB)
+ seq_puts(s, "CrNotCb - ");
+ if (val & BLT_TTY_MB)
+ seq_puts(s, "MB - ");
+ if (val & BLT_TTY_HSO)
+ seq_puts(s, "HSO inverse - ");
+ if (val & BLT_TTY_VSO)
+ seq_puts(s, "VSO inverse - ");
+ if (val & BLT_TTY_DITHER)
+ seq_puts(s, "Dither - ");
+ if (val & BLT_TTY_CHROMA)
+ seq_puts(s, "Write CHROMA - ");
+ if (val & BLT_TTY_BIG_END)
+ seq_puts(s, "BigEndian - ");
+
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_xy(struct seq_file *s, u32 val, char *name)
+{
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+ seq_printf(s, "(%d,%d)\n", val & 0xFFFF, (val >> 16));
+}
+
+static void bdisp_dbg_dump_sz(struct seq_file *s, u32 val, char *name)
+{
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+ seq_printf(s, "%dx%d\n", val & 0x1FFF, (val >> 16) & 0x1FFF);
+}
+
+static void bdisp_dbg_dump_sty(struct seq_file *s,
+ u32 val, u32 addr, char *name)
+{
+ bool s1, s2, s3;
+
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+
+ if (!addr || !name || (strlen(name) < 2))
+ goto done;
+
+ s1 = name[strlen(name) - 1] == '1';
+ s2 = name[strlen(name) - 1] == '2';
+ s3 = name[strlen(name) - 1] == '3';
+
+ seq_printf(s, "Pitch=%d - ", val & 0xFFFF);
+
+ switch ((val & BLT_TTY_COL_MASK) >> BLT_TTY_COL_SHIFT) {
+ case BDISP_RGB565:
+ seq_puts(s, "RGB565 - ");
+ break;
+ case BDISP_RGB888:
+ seq_puts(s, "RGB888 - ");
+ break;
+ case BDISP_XRGB8888:
+ seq_puts(s, "xRGB888 - ");
+ break;
+ case BDISP_ARGB8888:
+ seq_puts(s, "ARGB888 - ");
+ break;
+ case BDISP_NV12:
+ seq_puts(s, "NV12 - ");
+ break;
+ case BDISP_YUV_3B:
+ seq_puts(s, "YUV420P - ");
+ break;
+ default:
+ seq_puts(s, "ColorFormat ??? - ");
+ break;
+ }
+
+ if ((val & BLT_TTY_ALPHA_R) && !s3)
+ seq_puts(s, "AlphaRange - ");
+ if ((val & BLT_S1TY_A1_SUBSET) && !s3)
+ seq_puts(s, "A1SubSet - ");
+ if ((val & BLT_TTY_MB) && !s1)
+ seq_puts(s, "MB - ");
+ if (val & BLT_TTY_HSO)
+ seq_puts(s, "HSO inverse - ");
+ if (val & BLT_TTY_VSO)
+ seq_puts(s, "VSO inverse - ");
+ if ((val & BLT_S1TY_CHROMA_EXT) && (s1 || s2))
+ seq_puts(s, "ChromaExt - ");
+ if ((val & BLT_S3TY_BLANK_ACC) && s3)
+ seq_puts(s, "Blank Acc - ");
+ if ((val & BTL_S1TY_SUBBYTE) && !s3)
+ seq_puts(s, "SubByte - ");
+ if ((val & BLT_S1TY_RGB_EXP) && !s3)
+ seq_puts(s, "RGBExpand - ");
+ if ((val & BLT_TTY_BIG_END) && !s3)
+ seq_puts(s, "BigEndian - ");
+
+done:
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "FCTL\t0x%08X\t", val);
+
+ if ((val & BLT_FCTL_Y_HV_SCALE) == BLT_FCTL_Y_HV_SCALE)
+ seq_puts(s, "Resize Luma - ");
+ else if ((val & BLT_FCTL_Y_HV_SCALE) == BLT_FCTL_Y_HV_SAMPLE)
+ seq_puts(s, "Sample Luma - ");
+
+ if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SCALE)
+ seq_puts(s, "Resize Chroma");
+ else if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SAMPLE)
+ seq_puts(s, "Sample Chroma");
+
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name)
+{
+ u32 inc;
+
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+
+ if (!val)
+ goto done;
+
+ inc = val & 0xFFFF;
+ seq_printf(s, "H: %d(6.10) / scale~%dx0.1 - ", inc, 1024 * 10 / inc);
+
+ inc = val >> 16;
+ seq_printf(s, "V: %d(6.10) / scale~%dx0.1", inc, 1024 * 10 / inc);
+
+done:
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name)
+{
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+
+ if (!val)
+ goto done;
+
+ seq_printf(s, "H: init=%d repeat=%d - ", val & 0x3FF, (val >> 12) & 7);
+ val >>= 16;
+ seq_printf(s, "V: init=%d repeat=%d", val & 0x3FF, (val >> 12) & 7);
+
+done:
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_ivmx(struct seq_file *s,
+ u32 c0, u32 c1, u32 c2, u32 c3)
+{
+ seq_printf(s, "IVMX0\t0x%08X\n", c0);
+ seq_printf(s, "IVMX1\t0x%08X\n", c1);
+ seq_printf(s, "IVMX2\t0x%08X\n", c2);
+ seq_printf(s, "IVMX3\t0x%08X\t", c3);
+
+ if (!c0 && !c1 && !c2 && !c3) {
+ seq_putc(s, '\n');
+ return;
+ }
+
+ if ((c0 == bdisp_rgb_to_yuv[0]) &&
+ (c1 == bdisp_rgb_to_yuv[1]) &&
+ (c2 == bdisp_rgb_to_yuv[2]) &&
+ (c3 == bdisp_rgb_to_yuv[3])) {
+ seq_puts(s, "RGB to YUV\n");
+ return;
+ }
+
+ if ((c0 == bdisp_yuv_to_rgb[0]) &&
+ (c1 == bdisp_yuv_to_rgb[1]) &&
+ (c2 == bdisp_yuv_to_rgb[2]) &&
+ (c3 == bdisp_yuv_to_rgb[3])) {
+ seq_puts(s, "YUV to RGB\n");
+ return;
+ }
+ seq_puts(s, "Unknown conversion\n");
+}
+
+static int bdisp_dbg_last_nodes(struct seq_file *s, void *data)
+{
+ /* Not dumping all fields, focusing on significant ones */
+ struct bdisp_dev *bdisp = s->private;
+ struct bdisp_node *node;
+ int i = 0;
+
+ if (!bdisp->dbg.copy_node[0]) {
+ seq_puts(s, "No node built yet\n");
+ return 0;
+ }
+
+ do {
+ node = bdisp->dbg.copy_node[i];
+ if (!node)
+ break;
+ seq_printf(s, "--------\nNode %d:\n", i);
+ seq_puts(s, "-- General --\n");
+ seq_printf(s, "NIP\t0x%08X\n", node->nip);
+ seq_printf(s, "CIC\t0x%08X\n", node->cic);
+ bdisp_dbg_dump_ins(s, node->ins);
+ seq_printf(s, "ACK\t0x%08X\n", node->ack);
+ seq_puts(s, "-- Target --\n");
+ seq_printf(s, "TBA\t0x%08X\n", node->tba);
+ bdisp_dbg_dump_tty(s, node->tty);
+ bdisp_dbg_dump_xy(s, node->txy, "TXY");
+ bdisp_dbg_dump_sz(s, node->tsz, "TSZ");
+ /* Color Fill not dumped */
+ seq_puts(s, "-- Source 1 --\n");
+ seq_printf(s, "S1BA\t0x%08X\n", node->s1ba);
+ bdisp_dbg_dump_sty(s, node->s1ty, node->s1ba, "S1TY");
+ bdisp_dbg_dump_xy(s, node->s1xy, "S1XY");
+ seq_puts(s, "-- Source 2 --\n");
+ seq_printf(s, "S2BA\t0x%08X\n", node->s2ba);
+ bdisp_dbg_dump_sty(s, node->s2ty, node->s2ba, "S2TY");
+ bdisp_dbg_dump_xy(s, node->s2xy, "S2XY");
+ bdisp_dbg_dump_sz(s, node->s2sz, "S2SZ");
+ seq_puts(s, "-- Source 3 --\n");
+ seq_printf(s, "S3BA\t0x%08X\n", node->s3ba);
+ bdisp_dbg_dump_sty(s, node->s3ty, node->s3ba, "S3TY");
+ bdisp_dbg_dump_xy(s, node->s3xy, "S3XY");
+ bdisp_dbg_dump_sz(s, node->s3sz, "S3SZ");
+ /* Clipping not dumped */
+ /* CLUT not dumped */
+ seq_puts(s, "-- Filter & Mask --\n");
+ bdisp_dbg_dump_fctl(s, node->fctl);
+ /* PMK not dumped */
+ seq_puts(s, "-- Chroma Filter --\n");
+ bdisp_dbg_dump_rsf(s, node->rsf, "RSF");
+ bdisp_dbg_dump_rzi(s, node->rzi, "RZI");
+ seq_printf(s, "HFP\t0x%08X\n", node->hfp);
+ seq_printf(s, "VFP\t0x%08X\n", node->vfp);
+ seq_puts(s, "-- Luma Filter --\n");
+ bdisp_dbg_dump_rsf(s, node->y_rsf, "Y_RSF");
+ bdisp_dbg_dump_rzi(s, node->y_rzi, "Y_RZI");
+ seq_printf(s, "Y_HFP\t0x%08X\n", node->y_hfp);
+ seq_printf(s, "Y_VFP\t0x%08X\n", node->y_vfp);
+ /* Flicker not dumped */
+ /* Color key not dumped */
+ /* Reserved not dumped */
+ /* Static Address & User not dumped */
+ seq_puts(s, "-- Input Versatile Matrix --\n");
+ bdisp_dbg_dump_ivmx(s, node->ivmx0, node->ivmx1,
+ node->ivmx2, node->ivmx3);
+ /* Output Versatile Matrix not dumped */
+ /* Pace not dumped */
+ /* VC1R & DEI not dumped */
+ /* Gradient Fill not dumped */
+ } while ((++i < MAX_NB_NODE) && node->nip);
+
+ return 0;
+}
+
+static int bdisp_dbg_last_nodes_raw(struct seq_file *s, void *data)
+{
+ struct bdisp_dev *bdisp = s->private;
+ struct bdisp_node *node;
+ u32 *val;
+ int j, i = 0;
+
+ if (!bdisp->dbg.copy_node[0]) {
+ seq_puts(s, "No node built yet\n");
+ return 0;
+ }
+
+ do {
+ node = bdisp->dbg.copy_node[i];
+ if (!node)
+ break;
+
+ seq_printf(s, "--------\nNode %d:\n", i);
+ val = (u32 *)node;
+ for (j = 0; j < sizeof(struct bdisp_node) / sizeof(u32); j++)
+ seq_printf(s, "0x%08X\n", *val++);
+ } while ((++i < MAX_NB_NODE) && node->nip);
+
+ return 0;
+}
+
+static const char *bdisp_fmt_to_str(struct bdisp_frame frame)
+{
+ switch (frame.fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ return "YUV420P";
+ case V4L2_PIX_FMT_NV12:
+ if (frame.field == V4L2_FIELD_INTERLACED)
+ return "NV12 interlaced";
+ else
+ return "NV12";
+ case V4L2_PIX_FMT_RGB565:
+ return "RGB16";
+ case V4L2_PIX_FMT_RGB24:
+ return "RGB24";
+ case V4L2_PIX_FMT_XBGR32:
+ return "XRGB";
+ case V4L2_PIX_FMT_ABGR32:
+ return "ARGB";
+ default:
+ return "????";
+ }
+}
+
+static int bdisp_dbg_last_request(struct seq_file *s, void *data)
+{
+ struct bdisp_dev *bdisp = s->private;
+ struct bdisp_request *request = &bdisp->dbg.copy_request;
+ struct bdisp_frame src, dst;
+
+ if (!request->nb_req) {
+ seq_puts(s, "No request\n");
+ return 0;
+ }
+
+ src = request->src;
+ dst = request->dst;
+
+ seq_printf(s, "\nRequest #%d\n", request->nb_req);
+
+ seq_printf(s, "Format: %s\t\t\t%s\n",
+ bdisp_fmt_to_str(src), bdisp_fmt_to_str(dst));
+ seq_printf(s, "Crop area: %dx%d @ %d,%d ==>\t%dx%d @ %d,%d\n",
+ src.crop.width, src.crop.height,
+ src.crop.left, src.crop.top,
+ dst.crop.width, dst.crop.height,
+ dst.crop.left, dst.crop.top);
+ seq_printf(s, "Buff size: %dx%d\t\t%dx%d\n\n",
+ src.width, src.height, dst.width, dst.height);
+
+ if (request->hflip)
+ seq_puts(s, "Horizontal flip\n\n");
+
+ if (request->vflip)
+ seq_puts(s, "Vertical flip\n\n");
+
+ return 0;
+}
+
+#define DUMP(reg) seq_printf(s, #reg " \t0x%08X\n", readl(bdisp->regs + reg))
+
+static int bdisp_dbg_regs(struct seq_file *s, void *data)
+{
+ struct bdisp_dev *bdisp = s->private;
+ int ret;
+ unsigned int i;
+
+ ret = pm_runtime_get_sync(bdisp->dev);
+ if (ret < 0) {
+ seq_puts(s, "Cannot wake up IP\n");
+ return 0;
+ }
+
+ seq_printf(s, "Reg @ = 0x%p\n", bdisp->regs);
+
+ seq_puts(s, "\nStatic:\n");
+ DUMP(BLT_CTL);
+ DUMP(BLT_ITS);
+ DUMP(BLT_STA1);
+ DUMP(BLT_AQ1_CTL);
+ DUMP(BLT_AQ1_IP);
+ DUMP(BLT_AQ1_LNA);
+ DUMP(BLT_AQ1_STA);
+ DUMP(BLT_ITM0);
+
+ seq_puts(s, "\nPlugs:\n");
+ DUMP(BLT_PLUGS1_OP2);
+ DUMP(BLT_PLUGS1_CHZ);
+ DUMP(BLT_PLUGS1_MSZ);
+ DUMP(BLT_PLUGS1_PGZ);
+ DUMP(BLT_PLUGS2_OP2);
+ DUMP(BLT_PLUGS2_CHZ);
+ DUMP(BLT_PLUGS2_MSZ);
+ DUMP(BLT_PLUGS2_PGZ);
+ DUMP(BLT_PLUGS3_OP2);
+ DUMP(BLT_PLUGS3_CHZ);
+ DUMP(BLT_PLUGS3_MSZ);
+ DUMP(BLT_PLUGS3_PGZ);
+ DUMP(BLT_PLUGT_OP2);
+ DUMP(BLT_PLUGT_CHZ);
+ DUMP(BLT_PLUGT_MSZ);
+ DUMP(BLT_PLUGT_PGZ);
+
+ seq_puts(s, "\nNode:\n");
+ DUMP(BLT_NIP);
+ DUMP(BLT_CIC);
+ DUMP(BLT_INS);
+ DUMP(BLT_ACK);
+ DUMP(BLT_TBA);
+ DUMP(BLT_TTY);
+ DUMP(BLT_TXY);
+ DUMP(BLT_TSZ);
+ DUMP(BLT_S1BA);
+ DUMP(BLT_S1TY);
+ DUMP(BLT_S1XY);
+ DUMP(BLT_S2BA);
+ DUMP(BLT_S2TY);
+ DUMP(BLT_S2XY);
+ DUMP(BLT_S2SZ);
+ DUMP(BLT_S3BA);
+ DUMP(BLT_S3TY);
+ DUMP(BLT_S3XY);
+ DUMP(BLT_S3SZ);
+ DUMP(BLT_FCTL);
+ DUMP(BLT_RSF);
+ DUMP(BLT_RZI);
+ DUMP(BLT_HFP);
+ DUMP(BLT_VFP);
+ DUMP(BLT_Y_RSF);
+ DUMP(BLT_Y_RZI);
+ DUMP(BLT_Y_HFP);
+ DUMP(BLT_Y_VFP);
+ DUMP(BLT_IVMX0);
+ DUMP(BLT_IVMX1);
+ DUMP(BLT_IVMX2);
+ DUMP(BLT_IVMX3);
+ DUMP(BLT_OVMX0);
+ DUMP(BLT_OVMX1);
+ DUMP(BLT_OVMX2);
+ DUMP(BLT_OVMX3);
+ DUMP(BLT_DEI);
+
+ seq_puts(s, "\nFilter:\n");
+ for (i = 0; i < BLT_NB_H_COEF; i++) {
+ seq_printf(s, "BLT_HFC%d \t0x%08X\n", i,
+ readl(bdisp->regs + BLT_HFC_N + i * 4));
+ }
+ for (i = 0; i < BLT_NB_V_COEF; i++) {
+ seq_printf(s, "BLT_VFC%d \t0x%08X\n", i,
+ readl(bdisp->regs + BLT_VFC_N + i * 4));
+ }
+
+ seq_puts(s, "\nLuma filter:\n");
+ for (i = 0; i < BLT_NB_H_COEF; i++) {
+ seq_printf(s, "BLT_Y_HFC%d \t0x%08X\n", i,
+ readl(bdisp->regs + BLT_Y_HFC_N + i * 4));
+ }
+ for (i = 0; i < BLT_NB_V_COEF; i++) {
+ seq_printf(s, "BLT_Y_VFC%d \t0x%08X\n", i,
+ readl(bdisp->regs + BLT_Y_VFC_N + i * 4));
+ }
+
+ pm_runtime_put(bdisp->dev);
+
+ return 0;
+}
+
+#define SECOND 1000000
+
+static int bdisp_dbg_perf(struct seq_file *s, void *data)
+{
+ struct bdisp_dev *bdisp = s->private;
+ struct bdisp_request *request = &bdisp->dbg.copy_request;
+ s64 avg_time_us;
+ int avg_fps, min_fps, max_fps, last_fps;
+
+ if (!request->nb_req) {
+ seq_puts(s, "No request\n");
+ return 0;
+ }
+
+ avg_time_us = div64_s64(bdisp->dbg.tot_duration, request->nb_req);
+ if (avg_time_us > SECOND)
+ avg_fps = 0;
+ else
+ avg_fps = SECOND / (s32)avg_time_us;
+
+ if (bdisp->dbg.min_duration > SECOND)
+ min_fps = 0;
+ else
+ min_fps = SECOND / (s32)bdisp->dbg.min_duration;
+
+ if (bdisp->dbg.max_duration > SECOND)
+ max_fps = 0;
+ else
+ max_fps = SECOND / (s32)bdisp->dbg.max_duration;
+
+ if (bdisp->dbg.last_duration > SECOND)
+ last_fps = 0;
+ else
+ last_fps = SECOND / (s32)bdisp->dbg.last_duration;
+
+ seq_printf(s, "HW processing (%d requests):\n", request->nb_req);
+ seq_printf(s, " Average: %5lld us (%3d fps)\n",
+ avg_time_us, avg_fps);
+ seq_printf(s, " Min-Max: %5lld us (%3d fps) - %5lld us (%3d fps)\n",
+ bdisp->dbg.min_duration, min_fps,
+ bdisp->dbg.max_duration, max_fps);
+ seq_printf(s, " Last: %5lld us (%3d fps)\n",
+ bdisp->dbg.last_duration, last_fps);
+
+ return 0;
+}
+
+#define bdisp_dbg_declare(name) \
+ static int bdisp_dbg_##name##_open(struct inode *i, struct file *f) \
+ { \
+ return single_open(f, bdisp_dbg_##name, i->i_private); \
+ } \
+ static const struct file_operations bdisp_dbg_##name##_fops = { \
+ .open = bdisp_dbg_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define bdisp_dbg_create_entry(name) \
+ debugfs_create_file(#name, S_IRUGO, bdisp->dbg.debugfs_entry, bdisp, \
+ &bdisp_dbg_##name##_fops)
+
+bdisp_dbg_declare(regs);
+bdisp_dbg_declare(last_nodes);
+bdisp_dbg_declare(last_nodes_raw);
+bdisp_dbg_declare(last_request);
+bdisp_dbg_declare(perf);
+
+int bdisp_debugfs_create(struct bdisp_dev *bdisp)
+{
+ char dirname[16];
+
+ snprintf(dirname, sizeof(dirname), "%s%d", BDISP_NAME, bdisp->id);
+ bdisp->dbg.debugfs_entry = debugfs_create_dir(dirname, NULL);
+ if (!bdisp->dbg.debugfs_entry)
+ goto err;
+
+ if (!bdisp_dbg_create_entry(regs))
+ goto err;
+
+ if (!bdisp_dbg_create_entry(last_nodes))
+ goto err;
+
+ if (!bdisp_dbg_create_entry(last_nodes_raw))
+ goto err;
+
+ if (!bdisp_dbg_create_entry(last_request))
+ goto err;
+
+ if (!bdisp_dbg_create_entry(perf))
+ goto err;
+
+ return 0;
+
+err:
+ bdisp_debugfs_remove(bdisp);
+ return -ENOMEM;
+}
+
+void bdisp_debugfs_remove(struct bdisp_dev *bdisp)
+{
+ debugfs_remove_recursive(bdisp->dbg.debugfs_entry);
+ bdisp->dbg.debugfs_entry = NULL;
+}
diff --git a/drivers/media/platform/sti/bdisp/bdisp-filter.h b/drivers/media/platform/sti/bdisp/bdisp-filter.h
new file mode 100644
index 000000000..d25adb57e
--- /dev/null
+++ b/drivers/media/platform/sti/bdisp/bdisp-filter.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#define BDISP_HF_NB 64
+#define BDISP_VF_NB 40
+
+/**
+ * struct bdisp_filter_h_spec - Horizontal filter specification
+ *
+ * @min: min scale factor for this filter (6.10 fixed point)
+ * @max: max scale factor for this filter (6.10 fixed point)
+ * coef: filter coefficients
+ */
+struct bdisp_filter_h_spec {
+ const u16 min;
+ const u16 max;
+ const u8 coef[BDISP_HF_NB];
+};
+/**
+ * struct bdisp_filter_v_spec - Vertical filter specification
+ *
+ * @min: min scale factor for this filter (6.10 fixed point)
+ * @max: max scale factor for this filter (6.10 fixed point)
+ * coef: filter coefficients
+ */
+struct bdisp_filter_v_spec {
+ const u16 min;
+ const u16 max;
+ const u8 coef[BDISP_VF_NB];
+};
+
+/* RGB YUV 601 standard conversion */
+static const u32 bdisp_rgb_to_yuv[] = {
+ 0x0e1e8bee, 0x08420419, 0xfb5ed471, 0x08004080,
+};
+
+static const u32 bdisp_yuv_to_rgb[] = {
+ 0x3324a800, 0xe604ab9c, 0x0004a957, 0x32121eeb,
+};
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
new file mode 100644
index 000000000..d57f659d7
--- /dev/null
+++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
@@ -0,0 +1,1118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/delay.h>
+
+#include "bdisp.h"
+#include "bdisp-filter.h"
+#include "bdisp-reg.h"
+
+/* Max width of the source frame in a single node */
+#define MAX_SRC_WIDTH 2048
+
+/* Reset & boot poll config */
+#define POLL_RST_MAX 500
+#define POLL_RST_DELAY_MS 2
+
+enum bdisp_target_plan {
+ BDISP_RGB,
+ BDISP_Y,
+ BDISP_CBCR
+};
+
+struct bdisp_op_cfg {
+ bool cconv; /* RGB - YUV conversion */
+ bool hflip; /* Horizontal flip */
+ bool vflip; /* Vertical flip */
+ bool wide; /* Wide (>MAX_SRC_WIDTH) */
+ bool scale; /* Scale */
+ u16 h_inc; /* Horizontal increment in 6.10 format */
+ u16 v_inc; /* Vertical increment in 6.10 format */
+ bool src_interlaced; /* is the src an interlaced buffer */
+ u8 src_nbp; /* nb of planes of the src */
+ bool src_yuv; /* is the src a YUV color format */
+ bool src_420; /* is the src 4:2:0 chroma subsampled */
+ u8 dst_nbp; /* nb of planes of the dst */
+ bool dst_yuv; /* is the dst a YUV color format */
+ bool dst_420; /* is the dst 4:2:0 chroma subsampled */
+};
+
+struct bdisp_filter_addr {
+ u16 min; /* Filter min scale factor (6.10 fixed point) */
+ u16 max; /* Filter max scale factor (6.10 fixed point) */
+ void *virt; /* Virtual address for filter table */
+ dma_addr_t paddr; /* Physical address for filter table */
+};
+
+static const struct bdisp_filter_h_spec bdisp_h_spec[] = {
+ {
+ .min = 0,
+ .max = 921,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0x07, 0x3d, 0xfc, 0x01, 0x00,
+ 0x00, 0x01, 0xfd, 0x11, 0x36, 0xf9, 0x02, 0x00,
+ 0x00, 0x01, 0xfb, 0x1b, 0x2e, 0xf9, 0x02, 0x00,
+ 0x00, 0x01, 0xf9, 0x26, 0x26, 0xf9, 0x01, 0x00,
+ 0x00, 0x02, 0xf9, 0x30, 0x19, 0xfb, 0x01, 0x00,
+ 0x00, 0x02, 0xf9, 0x39, 0x0e, 0xfd, 0x01, 0x00,
+ 0x00, 0x01, 0xfc, 0x3e, 0x06, 0xff, 0x00, 0x00
+ }
+ },
+ {
+ .min = 921,
+ .max = 1024,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ }
+ },
+ {
+ .min = 1024,
+ .max = 1126,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ }
+ },
+ {
+ .min = 1126,
+ .max = 1228,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ }
+ },
+ {
+ .min = 1228,
+ .max = 1331,
+ .coef = {
+ 0xfd, 0x04, 0xfc, 0x05, 0x39, 0x05, 0xfc, 0x04,
+ 0xfc, 0x06, 0xf9, 0x0c, 0x39, 0xfe, 0x00, 0x02,
+ 0xfb, 0x08, 0xf6, 0x17, 0x35, 0xf9, 0x02, 0x00,
+ 0xfc, 0x08, 0xf4, 0x20, 0x30, 0xf4, 0x05, 0xff,
+ 0xfd, 0x07, 0xf4, 0x29, 0x28, 0xf3, 0x07, 0xfd,
+ 0xff, 0x05, 0xf5, 0x31, 0x1f, 0xf3, 0x08, 0xfc,
+ 0x00, 0x02, 0xf9, 0x38, 0x14, 0xf6, 0x08, 0xfb,
+ 0x02, 0x00, 0xff, 0x3a, 0x0b, 0xf8, 0x06, 0xfc
+ }
+ },
+ {
+ .min = 1331,
+ .max = 1433,
+ .coef = {
+ 0xfc, 0x06, 0xf9, 0x09, 0x34, 0x09, 0xf9, 0x06,
+ 0xfd, 0x07, 0xf7, 0x10, 0x32, 0x02, 0xfc, 0x05,
+ 0xfe, 0x07, 0xf6, 0x17, 0x2f, 0xfc, 0xff, 0x04,
+ 0xff, 0x06, 0xf5, 0x20, 0x2a, 0xf9, 0x01, 0x02,
+ 0x00, 0x04, 0xf6, 0x27, 0x25, 0xf6, 0x04, 0x00,
+ 0x02, 0x01, 0xf9, 0x2d, 0x1d, 0xf5, 0x06, 0xff,
+ 0x04, 0xff, 0xfd, 0x31, 0x15, 0xf5, 0x07, 0xfe,
+ 0x05, 0xfc, 0x02, 0x35, 0x0d, 0xf7, 0x07, 0xfd
+ }
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .coef = {
+ 0xfe, 0x06, 0xf8, 0x0b, 0x30, 0x0b, 0xf8, 0x06,
+ 0xff, 0x06, 0xf7, 0x12, 0x2d, 0x05, 0xfa, 0x06,
+ 0x00, 0x04, 0xf6, 0x18, 0x2c, 0x00, 0xfc, 0x06,
+ 0x01, 0x02, 0xf7, 0x1f, 0x27, 0xfd, 0xff, 0x04,
+ 0x03, 0x00, 0xf9, 0x24, 0x24, 0xf9, 0x00, 0x03,
+ 0x04, 0xff, 0xfd, 0x29, 0x1d, 0xf7, 0x02, 0x01,
+ 0x06, 0xfc, 0x00, 0x2d, 0x17, 0xf6, 0x04, 0x00,
+ 0x06, 0xfa, 0x05, 0x30, 0x0f, 0xf7, 0x06, 0xff
+ }
+ },
+ {
+ .min = 1536,
+ .max = 2048,
+ .coef = {
+ 0x05, 0xfd, 0xfb, 0x13, 0x25, 0x13, 0xfb, 0xfd,
+ 0x05, 0xfc, 0xfd, 0x17, 0x24, 0x0f, 0xf9, 0xff,
+ 0x04, 0xfa, 0xff, 0x1b, 0x24, 0x0b, 0xf9, 0x00,
+ 0x03, 0xf9, 0x01, 0x1f, 0x23, 0x08, 0xf8, 0x01,
+ 0x02, 0xf9, 0x04, 0x22, 0x20, 0x04, 0xf9, 0x02,
+ 0x01, 0xf8, 0x08, 0x25, 0x1d, 0x01, 0xf9, 0x03,
+ 0x00, 0xf9, 0x0c, 0x25, 0x1a, 0xfe, 0xfa, 0x04,
+ 0xff, 0xf9, 0x10, 0x26, 0x15, 0xfc, 0xfc, 0x05
+ }
+ },
+ {
+ .min = 2048,
+ .max = 3072,
+ .coef = {
+ 0xfc, 0xfd, 0x06, 0x13, 0x18, 0x13, 0x06, 0xfd,
+ 0xfc, 0xfe, 0x08, 0x15, 0x17, 0x12, 0x04, 0xfc,
+ 0xfb, 0xfe, 0x0a, 0x16, 0x18, 0x10, 0x03, 0xfc,
+ 0xfb, 0x00, 0x0b, 0x18, 0x17, 0x0f, 0x01, 0xfb,
+ 0xfb, 0x00, 0x0d, 0x19, 0x17, 0x0d, 0x00, 0xfb,
+ 0xfb, 0x01, 0x0f, 0x19, 0x16, 0x0b, 0x00, 0xfb,
+ 0xfc, 0x03, 0x11, 0x19, 0x15, 0x09, 0xfe, 0xfb,
+ 0xfc, 0x04, 0x12, 0x1a, 0x12, 0x08, 0xfe, 0xfc
+ }
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .coef = {
+ 0xfe, 0x02, 0x09, 0x0f, 0x0e, 0x0f, 0x09, 0x02,
+ 0xff, 0x02, 0x09, 0x0f, 0x10, 0x0e, 0x08, 0x01,
+ 0xff, 0x03, 0x0a, 0x10, 0x10, 0x0d, 0x07, 0x00,
+ 0x00, 0x04, 0x0b, 0x10, 0x0f, 0x0c, 0x06, 0x00,
+ 0x00, 0x05, 0x0c, 0x10, 0x0e, 0x0c, 0x05, 0x00,
+ 0x00, 0x06, 0x0c, 0x11, 0x0e, 0x0b, 0x04, 0x00,
+ 0x00, 0x07, 0x0d, 0x11, 0x0f, 0x0a, 0x03, 0xff,
+ 0x01, 0x08, 0x0e, 0x11, 0x0e, 0x09, 0x02, 0xff
+ }
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .coef = {
+ 0x00, 0x04, 0x09, 0x0c, 0x0e, 0x0c, 0x09, 0x04,
+ 0x01, 0x05, 0x09, 0x0c, 0x0d, 0x0c, 0x08, 0x04,
+ 0x01, 0x05, 0x0a, 0x0c, 0x0e, 0x0b, 0x08, 0x03,
+ 0x02, 0x06, 0x0a, 0x0d, 0x0c, 0x0b, 0x07, 0x03,
+ 0x02, 0x07, 0x0a, 0x0d, 0x0d, 0x0a, 0x07, 0x02,
+ 0x03, 0x07, 0x0b, 0x0d, 0x0c, 0x0a, 0x06, 0x02,
+ 0x03, 0x08, 0x0b, 0x0d, 0x0d, 0x0a, 0x05, 0x01,
+ 0x04, 0x08, 0x0c, 0x0d, 0x0c, 0x09, 0x05, 0x01
+ }
+ },
+ {
+ .min = 5120,
+ .max = 65535,
+ .coef = {
+ 0x03, 0x06, 0x09, 0x0b, 0x09, 0x0b, 0x09, 0x06,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x04, 0x07, 0x09, 0x0b, 0x0b, 0x0a, 0x08, 0x04,
+ 0x04, 0x07, 0x0a, 0x0b, 0x0b, 0x0a, 0x07, 0x04,
+ 0x04, 0x08, 0x0a, 0x0b, 0x0b, 0x09, 0x07, 0x04,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03
+ }
+ }
+};
+
+#define NB_H_FILTER ARRAY_SIZE(bdisp_h_spec)
+
+
+static const struct bdisp_filter_v_spec bdisp_v_spec[] = {
+ {
+ .min = 0,
+ .max = 1024,
+ .coef = {
+ 0x00, 0x00, 0x40, 0x00, 0x00,
+ 0x00, 0x06, 0x3d, 0xfd, 0x00,
+ 0xfe, 0x0f, 0x38, 0xfb, 0x00,
+ 0xfd, 0x19, 0x2f, 0xfb, 0x00,
+ 0xfc, 0x24, 0x24, 0xfc, 0x00,
+ 0xfb, 0x2f, 0x19, 0xfd, 0x00,
+ 0xfb, 0x38, 0x0f, 0xfe, 0x00,
+ 0xfd, 0x3d, 0x06, 0x00, 0x00
+ }
+ },
+ {
+ .min = 1024,
+ .max = 1331,
+ .coef = {
+ 0xfc, 0x05, 0x3e, 0x05, 0xfc,
+ 0xf8, 0x0e, 0x3b, 0xff, 0x00,
+ 0xf5, 0x18, 0x38, 0xf9, 0x02,
+ 0xf4, 0x21, 0x31, 0xf5, 0x05,
+ 0xf4, 0x2a, 0x27, 0xf4, 0x07,
+ 0xf6, 0x30, 0x1e, 0xf4, 0x08,
+ 0xf9, 0x35, 0x15, 0xf6, 0x07,
+ 0xff, 0x37, 0x0b, 0xf9, 0x06
+ }
+ },
+ {
+ .min = 1331,
+ .max = 1433,
+ .coef = {
+ 0xf8, 0x0a, 0x3c, 0x0a, 0xf8,
+ 0xf6, 0x12, 0x3b, 0x02, 0xfb,
+ 0xf4, 0x1b, 0x35, 0xfd, 0xff,
+ 0xf4, 0x23, 0x30, 0xf8, 0x01,
+ 0xf6, 0x29, 0x27, 0xf6, 0x04,
+ 0xf9, 0x2e, 0x1e, 0xf5, 0x06,
+ 0xfd, 0x31, 0x16, 0xf6, 0x06,
+ 0x02, 0x32, 0x0d, 0xf8, 0x07
+ }
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .coef = {
+ 0xf6, 0x0e, 0x38, 0x0e, 0xf6,
+ 0xf5, 0x15, 0x38, 0x06, 0xf8,
+ 0xf5, 0x1d, 0x33, 0x00, 0xfb,
+ 0xf6, 0x23, 0x2d, 0xfc, 0xfe,
+ 0xf9, 0x28, 0x26, 0xf9, 0x00,
+ 0xfc, 0x2c, 0x1e, 0xf7, 0x03,
+ 0x00, 0x2e, 0x18, 0xf6, 0x04,
+ 0x05, 0x2e, 0x11, 0xf7, 0x05
+ }
+ },
+ {
+ .min = 1536,
+ .max = 2048,
+ .coef = {
+ 0xfb, 0x13, 0x24, 0x13, 0xfb,
+ 0xfd, 0x17, 0x23, 0x0f, 0xfa,
+ 0xff, 0x1a, 0x23, 0x0b, 0xf9,
+ 0x01, 0x1d, 0x22, 0x07, 0xf9,
+ 0x04, 0x20, 0x1f, 0x04, 0xf9,
+ 0x07, 0x22, 0x1c, 0x01, 0xfa,
+ 0x0b, 0x24, 0x17, 0xff, 0xfb,
+ 0x0f, 0x24, 0x14, 0xfd, 0xfc
+ }
+ },
+ {
+ .min = 2048,
+ .max = 3072,
+ .coef = {
+ 0x05, 0x10, 0x16, 0x10, 0x05,
+ 0x06, 0x11, 0x16, 0x0f, 0x04,
+ 0x08, 0x13, 0x15, 0x0e, 0x02,
+ 0x09, 0x14, 0x16, 0x0c, 0x01,
+ 0x0b, 0x15, 0x15, 0x0b, 0x00,
+ 0x0d, 0x16, 0x13, 0x0a, 0x00,
+ 0x0f, 0x17, 0x13, 0x08, 0xff,
+ 0x11, 0x18, 0x12, 0x07, 0xfe
+ }
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .coef = {
+ 0x09, 0x0f, 0x10, 0x0f, 0x09,
+ 0x09, 0x0f, 0x12, 0x0e, 0x08,
+ 0x0a, 0x10, 0x11, 0x0e, 0x07,
+ 0x0b, 0x11, 0x11, 0x0d, 0x06,
+ 0x0c, 0x11, 0x12, 0x0c, 0x05,
+ 0x0d, 0x12, 0x11, 0x0c, 0x04,
+ 0x0e, 0x12, 0x11, 0x0b, 0x04,
+ 0x0f, 0x13, 0x11, 0x0a, 0x03
+ }
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .coef = {
+ 0x0a, 0x0e, 0x10, 0x0e, 0x0a,
+ 0x0b, 0x0e, 0x0f, 0x0e, 0x0a,
+ 0x0b, 0x0f, 0x10, 0x0d, 0x09,
+ 0x0c, 0x0f, 0x10, 0x0d, 0x08,
+ 0x0d, 0x0f, 0x0f, 0x0d, 0x08,
+ 0x0d, 0x10, 0x10, 0x0c, 0x07,
+ 0x0e, 0x10, 0x0f, 0x0c, 0x07,
+ 0x0f, 0x10, 0x10, 0x0b, 0x06
+ }
+ },
+ {
+ .min = 5120,
+ .max = 65535,
+ .coef = {
+ 0x0b, 0x0e, 0x0e, 0x0e, 0x0b,
+ 0x0b, 0x0e, 0x0f, 0x0d, 0x0b,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0d, 0x0f, 0x0e, 0x0d, 0x09,
+ 0x0d, 0x0f, 0x0f, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0e, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0f, 0x0c, 0x08
+ }
+ }
+};
+
+#define NB_V_FILTER ARRAY_SIZE(bdisp_v_spec)
+
+static struct bdisp_filter_addr bdisp_h_filter[NB_H_FILTER];
+static struct bdisp_filter_addr bdisp_v_filter[NB_V_FILTER];
+
+/**
+ * bdisp_hw_reset
+ * @bdisp: bdisp entity
+ *
+ * Resets HW
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+int bdisp_hw_reset(struct bdisp_dev *bdisp)
+{
+ unsigned int i;
+
+ dev_dbg(bdisp->dev, "%s\n", __func__);
+
+ /* Mask Interrupt */
+ writel(0, bdisp->regs + BLT_ITM0);
+
+ /* Reset */
+ writel(readl(bdisp->regs + BLT_CTL) | BLT_CTL_RESET,
+ bdisp->regs + BLT_CTL);
+ writel(0, bdisp->regs + BLT_CTL);
+
+ /* Wait for reset done */
+ for (i = 0; i < POLL_RST_MAX; i++) {
+ if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE)
+ break;
+ udelay(POLL_RST_DELAY_MS * 1000);
+ }
+ if (i == POLL_RST_MAX)
+ dev_err(bdisp->dev, "Reset timeout\n");
+
+ return (i == POLL_RST_MAX) ? -EAGAIN : 0;
+}
+
+/**
+ * bdisp_hw_get_and_clear_irq
+ * @bdisp: bdisp entity
+ *
+ * Read then reset interrupt status
+ *
+ * RETURNS:
+ * 0 if expected interrupt was raised.
+ */
+int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp)
+{
+ u32 its;
+
+ its = readl(bdisp->regs + BLT_ITS);
+
+ /* Check for the only expected IT: LastNode of AQ1 */
+ if (!(its & BLT_ITS_AQ1_LNA)) {
+ dev_dbg(bdisp->dev, "Unexpected IT status: 0x%08X\n", its);
+ writel(its, bdisp->regs + BLT_ITS);
+ return -1;
+ }
+
+ /* Clear and mask */
+ writel(its, bdisp->regs + BLT_ITS);
+ writel(0, bdisp->regs + BLT_ITM0);
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_free_nodes
+ * @ctx: bdisp context
+ *
+ * Free node memory
+ *
+ * RETURNS:
+ * None
+ */
+void bdisp_hw_free_nodes(struct bdisp_ctx *ctx)
+{
+ if (ctx && ctx->node[0])
+ dma_free_attrs(ctx->bdisp_dev->dev,
+ sizeof(struct bdisp_node) * MAX_NB_NODE,
+ ctx->node[0], ctx->node_paddr[0],
+ DMA_ATTR_WRITE_COMBINE);
+}
+
+/**
+ * bdisp_hw_alloc_nodes
+ * @ctx: bdisp context
+ *
+ * Allocate dma memory for nodes
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx)
+{
+ struct device *dev = ctx->bdisp_dev->dev;
+ unsigned int i, node_size = sizeof(struct bdisp_node);
+ void *base;
+ dma_addr_t paddr;
+
+ /* Allocate all the nodes within a single memory page */
+ base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
+ GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
+ if (!base) {
+ dev_err(dev, "%s no mem\n", __func__);
+ return -ENOMEM;
+ }
+
+ memset(base, 0, node_size * MAX_NB_NODE);
+
+ for (i = 0; i < MAX_NB_NODE; i++) {
+ ctx->node[i] = base;
+ ctx->node_paddr[i] = paddr;
+ dev_dbg(dev, "node[%d]=0x%p (paddr=%pad)\n", i, ctx->node[i],
+ &paddr);
+ base += node_size;
+ paddr += node_size;
+ }
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_free_filters
+ * @dev: device
+ *
+ * Free filters memory
+ *
+ * RETURNS:
+ * None
+ */
+void bdisp_hw_free_filters(struct device *dev)
+{
+ int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
+
+ if (bdisp_h_filter[0].virt)
+ dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
+ bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE);
+}
+
+/**
+ * bdisp_hw_alloc_filters
+ * @dev: device
+ *
+ * Allocate dma memory for filters
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int bdisp_hw_alloc_filters(struct device *dev)
+{
+ unsigned int i, size;
+ void *base;
+ dma_addr_t paddr;
+
+ /* Allocate all the filters within a single memory page */
+ size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
+ base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA,
+ DMA_ATTR_WRITE_COMBINE);
+ if (!base)
+ return -ENOMEM;
+
+ /* Setup filter addresses */
+ for (i = 0; i < NB_H_FILTER; i++) {
+ bdisp_h_filter[i].min = bdisp_h_spec[i].min;
+ bdisp_h_filter[i].max = bdisp_h_spec[i].max;
+ memcpy(base, bdisp_h_spec[i].coef, BDISP_HF_NB);
+ bdisp_h_filter[i].virt = base;
+ bdisp_h_filter[i].paddr = paddr;
+ base += BDISP_HF_NB;
+ paddr += BDISP_HF_NB;
+ }
+
+ for (i = 0; i < NB_V_FILTER; i++) {
+ bdisp_v_filter[i].min = bdisp_v_spec[i].min;
+ bdisp_v_filter[i].max = bdisp_v_spec[i].max;
+ memcpy(base, bdisp_v_spec[i].coef, BDISP_VF_NB);
+ bdisp_v_filter[i].virt = base;
+ bdisp_v_filter[i].paddr = paddr;
+ base += BDISP_VF_NB;
+ paddr += BDISP_VF_NB;
+ }
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_get_hf_addr
+ * @inc: resize increment
+ *
+ * Find the horizontal filter table that fits the resize increment
+ *
+ * RETURNS:
+ * table physical address
+ */
+static dma_addr_t bdisp_hw_get_hf_addr(u16 inc)
+{
+ unsigned int i;
+
+ for (i = NB_H_FILTER - 1; i > 0; i--)
+ if ((bdisp_h_filter[i].min < inc) &&
+ (inc <= bdisp_h_filter[i].max))
+ break;
+
+ return bdisp_h_filter[i].paddr;
+}
+
+/**
+ * bdisp_hw_get_vf_addr
+ * @inc: resize increment
+ *
+ * Find the vertical filter table that fits the resize increment
+ *
+ * RETURNS:
+ * table physical address
+ */
+static dma_addr_t bdisp_hw_get_vf_addr(u16 inc)
+{
+ unsigned int i;
+
+ for (i = NB_V_FILTER - 1; i > 0; i--)
+ if ((bdisp_v_filter[i].min < inc) &&
+ (inc <= bdisp_v_filter[i].max))
+ break;
+
+ return bdisp_v_filter[i].paddr;
+}
+
+/**
+ * bdisp_hw_get_inc
+ * @from: input size
+ * @to: output size
+ * @inc: resize increment in 6.10 format
+ *
+ * Computes the increment (inverse of scale) in 6.10 format
+ *
+ * RETURNS:
+ * 0 on success
+ */
+static int bdisp_hw_get_inc(u32 from, u32 to, u16 *inc)
+{
+ u32 tmp;
+
+ if (!to)
+ return -EINVAL;
+
+ if (to == from) {
+ *inc = 1 << 10;
+ return 0;
+ }
+
+ tmp = (from << 10) / to;
+ if ((tmp > 0xFFFF) || (!tmp))
+ /* overflow (downscale x 63) or too small (upscale x 1024) */
+ return -EINVAL;
+
+ *inc = (u16)tmp;
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_get_hv_inc
+ * @ctx: device context
+ * @h_inc: horizontal increment
+ * @v_inc: vertical increment
+ *
+ * Computes the horizontal & vertical increments (inverse of scale)
+ *
+ * RETURNS:
+ * 0 on success
+ */
+static int bdisp_hw_get_hv_inc(struct bdisp_ctx *ctx, u16 *h_inc, u16 *v_inc)
+{
+ u32 src_w, src_h, dst_w, dst_h;
+
+ src_w = ctx->src.crop.width;
+ src_h = ctx->src.crop.height;
+ dst_w = ctx->dst.crop.width;
+ dst_h = ctx->dst.crop.height;
+
+ if (bdisp_hw_get_inc(src_w, dst_w, h_inc) ||
+ bdisp_hw_get_inc(src_h, dst_h, v_inc)) {
+ dev_err(ctx->bdisp_dev->dev,
+ "scale factors failed (%dx%d)->(%dx%d)\n",
+ src_w, src_h, dst_w, dst_h);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_get_op_cfg
+ * @ctx: device context
+ * @c: operation configuration
+ *
+ * Check which blitter operations are expected and sets the scaling increments
+ *
+ * RETURNS:
+ * 0 on success
+ */
+static int bdisp_hw_get_op_cfg(struct bdisp_ctx *ctx, struct bdisp_op_cfg *c)
+{
+ struct device *dev = ctx->bdisp_dev->dev;
+ struct bdisp_frame *src = &ctx->src;
+ struct bdisp_frame *dst = &ctx->dst;
+
+ if (src->width > MAX_SRC_WIDTH * MAX_VERTICAL_STRIDES) {
+ dev_err(dev, "Image width out of HW caps\n");
+ return -EINVAL;
+ }
+
+ c->wide = src->width > MAX_SRC_WIDTH;
+
+ c->hflip = ctx->hflip;
+ c->vflip = ctx->vflip;
+
+ c->src_interlaced = (src->field == V4L2_FIELD_INTERLACED);
+
+ c->src_nbp = src->fmt->nb_planes;
+ c->src_yuv = (src->fmt->pixelformat == V4L2_PIX_FMT_NV12) ||
+ (src->fmt->pixelformat == V4L2_PIX_FMT_YUV420);
+ c->src_420 = c->src_yuv;
+
+ c->dst_nbp = dst->fmt->nb_planes;
+ c->dst_yuv = (dst->fmt->pixelformat == V4L2_PIX_FMT_NV12) ||
+ (dst->fmt->pixelformat == V4L2_PIX_FMT_YUV420);
+ c->dst_420 = c->dst_yuv;
+
+ c->cconv = (c->src_yuv != c->dst_yuv);
+
+ if (bdisp_hw_get_hv_inc(ctx, &c->h_inc, &c->v_inc)) {
+ dev_err(dev, "Scale factor out of HW caps\n");
+ return -EINVAL;
+ }
+
+ /* Deinterlacing adjustment : stretch a field to a frame */
+ if (c->src_interlaced)
+ c->v_inc /= 2;
+
+ if ((c->h_inc != (1 << 10)) || (c->v_inc != (1 << 10)))
+ c->scale = true;
+ else
+ c->scale = false;
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_color_format
+ * @pixelformat: v4l2 pixel format
+ *
+ * v4l2 to bdisp pixel format convert
+ *
+ * RETURNS:
+ * bdisp pixel format
+ */
+static u32 bdisp_hw_color_format(u32 pixelformat)
+{
+ u32 ret;
+
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ ret = (BDISP_YUV_3B << BLT_TTY_COL_SHIFT);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ ret = (BDISP_NV12 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ ret = (BDISP_RGB565 << BLT_TTY_COL_SHIFT);
+ break;
+ case V4L2_PIX_FMT_XBGR32: /* This V4L format actually refers to xRGB */
+ ret = (BDISP_XRGB8888 << BLT_TTY_COL_SHIFT);
+ break;
+ case V4L2_PIX_FMT_RGB24: /* RGB888 format */
+ ret = (BDISP_RGB888 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END;
+ break;
+ case V4L2_PIX_FMT_ABGR32: /* This V4L format actually refers to ARGB */
+
+ default:
+ ret = (BDISP_ARGB8888 << BLT_TTY_COL_SHIFT) | BLT_TTY_ALPHA_R;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * bdisp_hw_build_node
+ * @ctx: device context
+ * @cfg: operation configuration
+ * @node: node to be set
+ * @t_plan: whether the node refers to a RGB/Y or a CbCr plane
+ * @src_x_offset: x offset in the source image
+ *
+ * Build a node
+ *
+ * RETURNS:
+ * None
+ */
+static void bdisp_hw_build_node(struct bdisp_ctx *ctx,
+ struct bdisp_op_cfg *cfg,
+ struct bdisp_node *node,
+ enum bdisp_target_plan t_plan, int src_x_offset)
+{
+ struct bdisp_frame *src = &ctx->src;
+ struct bdisp_frame *dst = &ctx->dst;
+ u16 h_inc, v_inc, yh_inc, yv_inc;
+ struct v4l2_rect src_rect = src->crop;
+ struct v4l2_rect dst_rect = dst->crop;
+ int dst_x_offset;
+ s32 dst_width = dst->crop.width;
+ u32 src_fmt, dst_fmt;
+ const u32 *ivmx;
+
+ dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);
+
+ memset(node, 0, sizeof(*node));
+
+ /* Adjust src and dst areas wrt src_x_offset */
+ src_rect.left += src_x_offset;
+ src_rect.width -= src_x_offset;
+ src_rect.width = min_t(__s32, MAX_SRC_WIDTH, src_rect.width);
+
+ dst_x_offset = (src_x_offset * dst_width) / ctx->src.crop.width;
+ dst_rect.left += dst_x_offset;
+ dst_rect.width = (src_rect.width * dst_width) / ctx->src.crop.width;
+
+ /* General */
+ src_fmt = src->fmt->pixelformat;
+ dst_fmt = dst->fmt->pixelformat;
+
+ node->nip = 0;
+ node->cic = BLT_CIC_ALL_GRP;
+ node->ack = BLT_ACK_BYPASS_S2S3;
+
+ switch (cfg->src_nbp) {
+ case 1:
+ /* Src2 = RGB / Src1 = Src3 = off */
+ node->ins = BLT_INS_S1_OFF | BLT_INS_S2_MEM | BLT_INS_S3_OFF;
+ break;
+ case 2:
+ /* Src3 = Y
+ * Src2 = CbCr or ColorFill if writing the Y plane
+ * Src1 = off */
+ node->ins = BLT_INS_S1_OFF | BLT_INS_S3_MEM;
+ if (t_plan == BDISP_Y)
+ node->ins |= BLT_INS_S2_CF;
+ else
+ node->ins |= BLT_INS_S2_MEM;
+ break;
+ case 3:
+ default:
+ /* Src3 = Y
+ * Src2 = Cb or ColorFill if writing the Y plane
+ * Src1 = Cr or ColorFill if writing the Y plane */
+ node->ins = BLT_INS_S3_MEM;
+ if (t_plan == BDISP_Y)
+ node->ins |= BLT_INS_S2_CF | BLT_INS_S1_CF;
+ else
+ node->ins |= BLT_INS_S2_MEM | BLT_INS_S1_MEM;
+ break;
+ }
+
+ /* Color convert */
+ node->ins |= cfg->cconv ? BLT_INS_IVMX : 0;
+ /* Scale needed if scaling OR 4:2:0 up/downsampling */
+ node->ins |= (cfg->scale || cfg->src_420 || cfg->dst_420) ?
+ BLT_INS_SCALE : 0;
+
+ /* Target */
+ node->tba = (t_plan == BDISP_CBCR) ? dst->paddr[1] : dst->paddr[0];
+
+ node->tty = dst->bytesperline;
+ node->tty |= bdisp_hw_color_format(dst_fmt);
+ node->tty |= BLT_TTY_DITHER;
+ node->tty |= (t_plan == BDISP_CBCR) ? BLT_TTY_CHROMA : 0;
+ node->tty |= cfg->hflip ? BLT_TTY_HSO : 0;
+ node->tty |= cfg->vflip ? BLT_TTY_VSO : 0;
+
+ if (cfg->dst_420 && (t_plan == BDISP_CBCR)) {
+ /* 420 chroma downsampling */
+ dst_rect.height /= 2;
+ dst_rect.width /= 2;
+ dst_rect.left /= 2;
+ dst_rect.top /= 2;
+ dst_x_offset /= 2;
+ dst_width /= 2;
+ }
+
+ node->txy = cfg->vflip ? (dst_rect.height - 1) : dst_rect.top;
+ node->txy <<= 16;
+ node->txy |= cfg->hflip ? (dst_width - dst_x_offset - 1) :
+ dst_rect.left;
+
+ node->tsz = dst_rect.height << 16 | dst_rect.width;
+
+ if (cfg->src_interlaced) {
+ /* handle only the top field which is half height of a frame */
+ src_rect.top /= 2;
+ src_rect.height /= 2;
+ }
+
+ if (cfg->src_nbp == 1) {
+ /* Src 2 : RGB */
+ node->s2ba = src->paddr[0];
+
+ node->s2ty = src->bytesperline;
+ if (cfg->src_interlaced)
+ node->s2ty *= 2;
+
+ node->s2ty |= bdisp_hw_color_format(src_fmt);
+
+ node->s2xy = src_rect.top << 16 | src_rect.left;
+ node->s2sz = src_rect.height << 16 | src_rect.width;
+ } else {
+ /* Src 2 : Cb or CbCr */
+ if (cfg->src_420) {
+ /* 420 chroma upsampling */
+ src_rect.top /= 2;
+ src_rect.left /= 2;
+ src_rect.width /= 2;
+ src_rect.height /= 2;
+ }
+
+ node->s2ba = src->paddr[1];
+
+ node->s2ty = src->bytesperline;
+ if (cfg->src_nbp == 3)
+ node->s2ty /= 2;
+ if (cfg->src_interlaced)
+ node->s2ty *= 2;
+
+ node->s2ty |= bdisp_hw_color_format(src_fmt);
+
+ node->s2xy = src_rect.top << 16 | src_rect.left;
+ node->s2sz = src_rect.height << 16 | src_rect.width;
+
+ if (cfg->src_nbp == 3) {
+ /* Src 1 : Cr */
+ node->s1ba = src->paddr[2];
+
+ node->s1ty = node->s2ty;
+ node->s1xy = node->s2xy;
+ }
+
+ /* Src 3 : Y */
+ node->s3ba = src->paddr[0];
+
+ node->s3ty = src->bytesperline;
+ if (cfg->src_interlaced)
+ node->s3ty *= 2;
+ node->s3ty |= bdisp_hw_color_format(src_fmt);
+
+ if ((t_plan != BDISP_CBCR) && cfg->src_420) {
+ /* No chroma upsampling for output RGB / Y plane */
+ node->s3xy = node->s2xy * 2;
+ node->s3sz = node->s2sz * 2;
+ } else {
+ /* No need to read Y (Src3) when writing Chroma */
+ node->s3ty |= BLT_S3TY_BLANK_ACC;
+ node->s3xy = node->s2xy;
+ node->s3sz = node->s2sz;
+ }
+ }
+
+ /* Resize (scale OR 4:2:0: chroma up/downsampling) */
+ if (node->ins & BLT_INS_SCALE) {
+ /* no need to compute Y when writing CbCr from RGB input */
+ bool skip_y = (t_plan == BDISP_CBCR) && !cfg->src_yuv;
+
+ /* FCTL */
+ if (cfg->scale) {
+ node->fctl = BLT_FCTL_HV_SCALE;
+ if (!skip_y)
+ node->fctl |= BLT_FCTL_Y_HV_SCALE;
+ } else {
+ node->fctl = BLT_FCTL_HV_SAMPLE;
+ if (!skip_y)
+ node->fctl |= BLT_FCTL_Y_HV_SAMPLE;
+ }
+
+ /* RSF - Chroma may need to be up/downsampled */
+ h_inc = cfg->h_inc;
+ v_inc = cfg->v_inc;
+ if (!cfg->src_420 && cfg->dst_420 && (t_plan == BDISP_CBCR)) {
+ /* RGB to 4:2:0 for Chroma: downsample */
+ h_inc *= 2;
+ v_inc *= 2;
+ } else if (cfg->src_420 && !cfg->dst_420) {
+ /* 4:2:0: to RGB: upsample*/
+ h_inc /= 2;
+ v_inc /= 2;
+ }
+ node->rsf = v_inc << 16 | h_inc;
+
+ /* RZI */
+ node->rzi = BLT_RZI_DEFAULT;
+
+ /* Filter table physical addr */
+ node->hfp = bdisp_hw_get_hf_addr(h_inc);
+ node->vfp = bdisp_hw_get_vf_addr(v_inc);
+
+ /* Y version */
+ if (!skip_y) {
+ yh_inc = cfg->h_inc;
+ yv_inc = cfg->v_inc;
+
+ node->y_rsf = yv_inc << 16 | yh_inc;
+ node->y_rzi = BLT_RZI_DEFAULT;
+ node->y_hfp = bdisp_hw_get_hf_addr(yh_inc);
+ node->y_vfp = bdisp_hw_get_vf_addr(yv_inc);
+ }
+ }
+
+ /* Versatile matrix for RGB / YUV conversion */
+ if (cfg->cconv) {
+ ivmx = cfg->src_yuv ? bdisp_yuv_to_rgb : bdisp_rgb_to_yuv;
+
+ node->ivmx0 = ivmx[0];
+ node->ivmx1 = ivmx[1];
+ node->ivmx2 = ivmx[2];
+ node->ivmx3 = ivmx[3];
+ }
+}
+
+/**
+ * bdisp_hw_build_all_nodes
+ * @ctx: device context
+ *
+ * Build all the nodes for the blitter operation
+ *
+ * RETURNS:
+ * 0 on success
+ */
+static int bdisp_hw_build_all_nodes(struct bdisp_ctx *ctx)
+{
+ struct bdisp_op_cfg cfg;
+ unsigned int i, nid = 0;
+ int src_x_offset = 0;
+
+ for (i = 0; i < MAX_NB_NODE; i++)
+ if (!ctx->node[i]) {
+ dev_err(ctx->bdisp_dev->dev, "node %d is null\n", i);
+ return -EINVAL;
+ }
+
+ /* Get configuration (scale, flip, ...) */
+ if (bdisp_hw_get_op_cfg(ctx, &cfg))
+ return -EINVAL;
+
+ /* Split source in vertical strides (HW constraint) */
+ for (i = 0; i < MAX_VERTICAL_STRIDES; i++) {
+ /* Build RGB/Y node and link it to the previous node */
+ bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
+ cfg.dst_nbp == 1 ? BDISP_RGB : BDISP_Y,
+ src_x_offset);
+ if (nid)
+ ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
+ nid++;
+
+ /* Build additional Cb(Cr) node, link it to the previous one */
+ if (cfg.dst_nbp > 1) {
+ bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
+ BDISP_CBCR, src_x_offset);
+ ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
+ nid++;
+ }
+
+ /* Next stride until full width covered */
+ src_x_offset += MAX_SRC_WIDTH;
+ if (src_x_offset >= ctx->src.crop.width)
+ break;
+ }
+
+ /* Mark last node as the last */
+ ctx->node[nid - 1]->nip = 0;
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_save_request
+ * @ctx: device context
+ *
+ * Save a copy of the request and of the built nodes
+ *
+ * RETURNS:
+ * None
+ */
+static void bdisp_hw_save_request(struct bdisp_ctx *ctx)
+{
+ struct bdisp_node **copy_node = ctx->bdisp_dev->dbg.copy_node;
+ struct bdisp_request *request = &ctx->bdisp_dev->dbg.copy_request;
+ struct bdisp_node **node = ctx->node;
+ int i;
+
+ /* Request copy */
+ request->src = ctx->src;
+ request->dst = ctx->dst;
+ request->hflip = ctx->hflip;
+ request->vflip = ctx->vflip;
+ request->nb_req++;
+
+ /* Nodes copy */
+ for (i = 0; i < MAX_NB_NODE; i++) {
+ /* Allocate memory if not done yet */
+ if (!copy_node[i]) {
+ copy_node[i] = devm_kzalloc(ctx->bdisp_dev->dev,
+ sizeof(*copy_node[i]),
+ GFP_ATOMIC);
+ if (!copy_node[i])
+ return;
+ }
+ *copy_node[i] = *node[i];
+ }
+}
+
+/**
+ * bdisp_hw_update
+ * @ctx: device context
+ *
+ * Send the request to the HW
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int bdisp_hw_update(struct bdisp_ctx *ctx)
+{
+ int ret;
+ struct bdisp_dev *bdisp = ctx->bdisp_dev;
+ struct device *dev = bdisp->dev;
+ unsigned int node_id;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* build nodes */
+ ret = bdisp_hw_build_all_nodes(ctx);
+ if (ret) {
+ dev_err(dev, "cannot build nodes (%d)\n", ret);
+ return ret;
+ }
+
+ /* Save a copy of the request */
+ bdisp_hw_save_request(ctx);
+
+ /* Configure interrupt to 'Last Node Reached for AQ1' */
+ writel(BLT_AQ1_CTL_CFG, bdisp->regs + BLT_AQ1_CTL);
+ writel(BLT_ITS_AQ1_LNA, bdisp->regs + BLT_ITM0);
+
+ /* Write first node addr */
+ writel(ctx->node_paddr[0], bdisp->regs + BLT_AQ1_IP);
+
+ /* Find and write last node addr : this starts the HW processing */
+ for (node_id = 0; node_id < MAX_NB_NODE - 1; node_id++) {
+ if (!ctx->node[node_id]->nip)
+ break;
+ }
+ writel(ctx->node_paddr[node_id], bdisp->regs + BLT_AQ1_LNA);
+
+ return 0;
+}
diff --git a/drivers/media/platform/sti/bdisp/bdisp-reg.h b/drivers/media/platform/sti/bdisp/bdisp-reg.h
new file mode 100644
index 000000000..b07ecc903
--- /dev/null
+++ b/drivers/media/platform/sti/bdisp/bdisp-reg.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+struct bdisp_node {
+ /* 0 - General */
+ u32 nip;
+ u32 cic;
+ u32 ins;
+ u32 ack;
+ /* 1 - Target */
+ u32 tba;
+ u32 tty;
+ u32 txy;
+ u32 tsz;
+ /* 2 - Color Fill */
+ u32 s1cf;
+ u32 s2cf;
+ /* 3 - Source 1 */
+ u32 s1ba;
+ u32 s1ty;
+ u32 s1xy;
+ u32 s1sz_tsz;
+ /* 4 - Source 2 */
+ u32 s2ba;
+ u32 s2ty;
+ u32 s2xy;
+ u32 s2sz;
+ /* 5 - Source 3 */
+ u32 s3ba;
+ u32 s3ty;
+ u32 s3xy;
+ u32 s3sz;
+ /* 6 - Clipping */
+ u32 cwo;
+ u32 cws;
+ /* 7 - CLUT */
+ u32 cco;
+ u32 cml;
+ /* 8 - Filter & Mask */
+ u32 fctl;
+ u32 pmk;
+ /* 9 - Chroma Filter */
+ u32 rsf;
+ u32 rzi;
+ u32 hfp;
+ u32 vfp;
+ /* 10 - Luma Filter */
+ u32 y_rsf;
+ u32 y_rzi;
+ u32 y_hfp;
+ u32 y_vfp;
+ /* 11 - Flicker */
+ u32 ff0;
+ u32 ff1;
+ u32 ff2;
+ u32 ff3;
+ /* 12 - Color Key */
+ u32 key1;
+ u32 key2;
+ /* 14 - Static Address & User */
+ u32 sar;
+ u32 usr;
+ /* 15 - Input Versatile Matrix */
+ u32 ivmx0;
+ u32 ivmx1;
+ u32 ivmx2;
+ u32 ivmx3;
+ /* 16 - Output Versatile Matrix */
+ u32 ovmx0;
+ u32 ovmx1;
+ u32 ovmx2;
+ u32 ovmx3;
+ /* 17 - Pace */
+ u32 pace;
+ /* 18 - VC1R & DEI */
+ u32 vc1r;
+ u32 dei;
+ /* 19 - Gradient Fill */
+ u32 hgf;
+ u32 vgf;
+};
+
+/* HW registers : static */
+#define BLT_CTL 0x0A00
+#define BLT_ITS 0x0A04
+#define BLT_STA1 0x0A08
+#define BLT_AQ1_CTL 0x0A60
+#define BLT_AQ1_IP 0x0A64
+#define BLT_AQ1_LNA 0x0A68
+#define BLT_AQ1_STA 0x0A6C
+#define BLT_ITM0 0x0AD0
+/* HW registers : plugs */
+#define BLT_PLUGS1_OP2 0x0B04
+#define BLT_PLUGS1_CHZ 0x0B08
+#define BLT_PLUGS1_MSZ 0x0B0C
+#define BLT_PLUGS1_PGZ 0x0B10
+#define BLT_PLUGS2_OP2 0x0B24
+#define BLT_PLUGS2_CHZ 0x0B28
+#define BLT_PLUGS2_MSZ 0x0B2C
+#define BLT_PLUGS2_PGZ 0x0B30
+#define BLT_PLUGS3_OP2 0x0B44
+#define BLT_PLUGS3_CHZ 0x0B48
+#define BLT_PLUGS3_MSZ 0x0B4C
+#define BLT_PLUGS3_PGZ 0x0B50
+#define BLT_PLUGT_OP2 0x0B84
+#define BLT_PLUGT_CHZ 0x0B88
+#define BLT_PLUGT_MSZ 0x0B8C
+#define BLT_PLUGT_PGZ 0x0B90
+/* HW registers : node */
+#define BLT_NIP 0x0C00
+#define BLT_CIC 0x0C04
+#define BLT_INS 0x0C08
+#define BLT_ACK 0x0C0C
+#define BLT_TBA 0x0C10
+#define BLT_TTY 0x0C14
+#define BLT_TXY 0x0C18
+#define BLT_TSZ 0x0C1C
+#define BLT_S1BA 0x0C28
+#define BLT_S1TY 0x0C2C
+#define BLT_S1XY 0x0C30
+#define BLT_S2BA 0x0C38
+#define BLT_S2TY 0x0C3C
+#define BLT_S2XY 0x0C40
+#define BLT_S2SZ 0x0C44
+#define BLT_S3BA 0x0C48
+#define BLT_S3TY 0x0C4C
+#define BLT_S3XY 0x0C50
+#define BLT_S3SZ 0x0C54
+#define BLT_FCTL 0x0C68
+#define BLT_RSF 0x0C70
+#define BLT_RZI 0x0C74
+#define BLT_HFP 0x0C78
+#define BLT_VFP 0x0C7C
+#define BLT_Y_RSF 0x0C80
+#define BLT_Y_RZI 0x0C84
+#define BLT_Y_HFP 0x0C88
+#define BLT_Y_VFP 0x0C8C
+#define BLT_IVMX0 0x0CC0
+#define BLT_IVMX1 0x0CC4
+#define BLT_IVMX2 0x0CC8
+#define BLT_IVMX3 0x0CCC
+#define BLT_OVMX0 0x0CD0
+#define BLT_OVMX1 0x0CD4
+#define BLT_OVMX2 0x0CD8
+#define BLT_OVMX3 0x0CDC
+#define BLT_DEI 0x0CEC
+/* HW registers : filters */
+#define BLT_HFC_N 0x0D00
+#define BLT_VFC_N 0x0D90
+#define BLT_Y_HFC_N 0x0E00
+#define BLT_Y_VFC_N 0x0E90
+#define BLT_NB_H_COEF 16
+#define BLT_NB_V_COEF 10
+
+/* Registers values */
+#define BLT_CTL_RESET BIT(31) /* Global soft reset */
+
+#define BLT_ITS_AQ1_LNA BIT(12) /* AQ1 LNA reached */
+
+#define BLT_STA1_IDLE BIT(0) /* BDISP idle */
+
+#define BLT_AQ1_CTL_CFG 0x80400003 /* Enable, P3, LNA reached */
+
+#define BLT_INS_S1_MASK (BIT(0) | BIT(1) | BIT(2))
+#define BLT_INS_S1_OFF 0x00000000 /* src1 disabled */
+#define BLT_INS_S1_MEM 0x00000001 /* src1 fetched from memory */
+#define BLT_INS_S1_CF 0x00000003 /* src1 color fill */
+#define BLT_INS_S1_COPY 0x00000004 /* src1 direct copy */
+#define BLT_INS_S1_FILL 0x00000007 /* src1 firect fill */
+#define BLT_INS_S2_MASK (BIT(3) | BIT(4))
+#define BLT_INS_S2_OFF 0x00000000 /* src2 disabled */
+#define BLT_INS_S2_MEM 0x00000008 /* src2 fetched from memory */
+#define BLT_INS_S2_CF 0x00000018 /* src2 color fill */
+#define BLT_INS_S3_MASK BIT(5)
+#define BLT_INS_S3_OFF 0x00000000 /* src3 disabled */
+#define BLT_INS_S3_MEM 0x00000020 /* src3 fetched from memory */
+#define BLT_INS_IVMX BIT(6) /* Input versatile matrix */
+#define BLT_INS_CLUT BIT(7) /* Color Look Up Table */
+#define BLT_INS_SCALE BIT(8) /* Scaling */
+#define BLT_INS_FLICK BIT(9) /* Flicker filter */
+#define BLT_INS_CLIP BIT(10) /* Clipping */
+#define BLT_INS_CKEY BIT(11) /* Color key */
+#define BLT_INS_OVMX BIT(12) /* Output versatile matrix */
+#define BLT_INS_DEI BIT(13) /* Deinterlace */
+#define BLT_INS_PMASK BIT(14) /* Plane mask */
+#define BLT_INS_VC1R BIT(17) /* VC1 Range mapping */
+#define BLT_INS_ROTATE BIT(18) /* Rotation */
+#define BLT_INS_GRAD BIT(19) /* Gradient fill */
+#define BLT_INS_AQLOCK BIT(29) /* AQ lock */
+#define BLT_INS_PACE BIT(30) /* Pace down */
+#define BLT_INS_IRQ BIT(31) /* Raise IRQ when node done */
+#define BLT_CIC_ALL_GRP 0x000FDFFC /* all valid groups present */
+#define BLT_ACK_BYPASS_S2S3 0x00000007 /* Bypass src2 and src3 */
+
+#define BLT_TTY_COL_SHIFT 16 /* Color format */
+#define BLT_TTY_COL_MASK 0x001F0000 /* Color format mask */
+#define BLT_TTY_ALPHA_R BIT(21) /* Alpha range */
+#define BLT_TTY_CR_NOT_CB BIT(22) /* CR not Cb */
+#define BLT_TTY_MB BIT(23) /* MB frame / field*/
+#define BLT_TTY_HSO BIT(24) /* H scan order */
+#define BLT_TTY_VSO BIT(25) /* V scan order */
+#define BLT_TTY_DITHER BIT(26) /* Dithering */
+#define BLT_TTY_CHROMA BIT(27) /* Write chroma / luma */
+#define BLT_TTY_BIG_END BIT(30) /* Big endianness */
+
+#define BLT_S1TY_A1_SUBSET BIT(22) /* A1 subset */
+#define BLT_S1TY_CHROMA_EXT BIT(26) /* Chroma Extended */
+#define BTL_S1TY_SUBBYTE BIT(28) /* Sub-byte fmt, pixel order */
+#define BLT_S1TY_RGB_EXP BIT(29) /* RGB expansion mode */
+
+#define BLT_S2TY_A1_SUBSET BIT(22) /* A1 subset */
+#define BLT_S2TY_CHROMA_EXT BIT(26) /* Chroma Extended */
+#define BTL_S2TY_SUBBYTE BIT(28) /* Sub-byte fmt, pixel order */
+#define BLT_S2TY_RGB_EXP BIT(29) /* RGB expansion mode */
+
+#define BLT_S3TY_BLANK_ACC BIT(26) /* Blank access */
+
+#define BLT_FCTL_HV_SCALE 0x00000055 /* H/V resize + color filter */
+#define BLT_FCTL_Y_HV_SCALE 0x33000000 /* Luma version */
+
+#define BLT_FCTL_HV_SAMPLE 0x00000044 /* H/V resize */
+#define BLT_FCTL_Y_HV_SAMPLE 0x22000000 /* Luma version */
+
+#define BLT_RZI_DEFAULT 0x20003000 /* H/VNB_repeat = 3/2 */
+
+/* Color format */
+#define BDISP_RGB565 0x00 /* RGB565 */
+#define BDISP_RGB888 0x01 /* RGB888 */
+#define BDISP_XRGB8888 0x02 /* RGB888_32 */
+#define BDISP_ARGB8888 0x05 /* ARGB888 */
+#define BDISP_NV12 0x16 /* YCbCr42x R2B */
+#define BDISP_YUV_3B 0x1E /* YUV (3 buffer) */
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
new file mode 100644
index 000000000..00f6e3f06
--- /dev/null
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -0,0 +1,1435 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "bdisp.h"
+
+#define BDISP_MAX_CTRL_NUM 10
+
+#define BDISP_WORK_TIMEOUT ((100 * HZ) / 1000)
+
+/* User configuration change */
+#define BDISP_PARAMS BIT(0) /* Config updated */
+#define BDISP_SRC_FMT BIT(1) /* Source set */
+#define BDISP_DST_FMT BIT(2) /* Destination set */
+#define BDISP_CTX_STOP_REQ BIT(3) /* Stop request */
+#define BDISP_CTX_ABORT BIT(4) /* Abort while device run */
+
+#define BDISP_MIN_W 1
+#define BDISP_MAX_W 8191
+#define BDISP_MIN_H 1
+#define BDISP_MAX_H 8191
+
+#define fh_to_ctx(__fh) container_of(__fh, struct bdisp_ctx, fh)
+
+enum bdisp_dev_flags {
+ ST_M2M_OPEN, /* Driver opened */
+ ST_M2M_RUNNING, /* HW device running */
+ ST_M2M_SUSPENDED, /* Driver suspended */
+ ST_M2M_SUSPENDING, /* Driver being suspended */
+};
+
+static const struct bdisp_fmt bdisp_formats[] = {
+ /* ARGB888. [31:0] A:R:G:B 8:8:8:8 little endian */
+ {
+ .pixelformat = V4L2_PIX_FMT_ABGR32, /* is actually ARGB */
+ .nb_planes = 1,
+ .bpp = 32,
+ .bpp_plane0 = 32,
+ .w_align = 1,
+ .h_align = 1
+ },
+ /* XRGB888. [31:0] x:R:G:B 8:8:8:8 little endian */
+ {
+ .pixelformat = V4L2_PIX_FMT_XBGR32, /* is actually xRGB */
+ .nb_planes = 1,
+ .bpp = 32,
+ .bpp_plane0 = 32,
+ .w_align = 1,
+ .h_align = 1
+ },
+ /* RGB565. [15:0] R:G:B 5:6:5 little endian */
+ {
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .nb_planes = 1,
+ .bpp = 16,
+ .bpp_plane0 = 16,
+ .w_align = 1,
+ .h_align = 1
+ },
+ /* NV12. YUV420SP - 1 plane for Y + 1 plane for (CbCr) */
+ {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .nb_planes = 2,
+ .bpp = 12,
+ .bpp_plane0 = 8,
+ .w_align = 2,
+ .h_align = 2
+ },
+ /* RGB888. [23:0] B:G:R 8:8:8 little endian */
+ {
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .nb_planes = 1,
+ .bpp = 24,
+ .bpp_plane0 = 24,
+ .w_align = 1,
+ .h_align = 1
+ },
+ /* YU12. YUV420P - 1 plane for Y + 1 plane for Cb + 1 plane for Cr
+ * To keep as the LAST element of this table (no support on capture)
+ */
+ {
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .nb_planes = 3,
+ .bpp = 12,
+ .bpp_plane0 = 8,
+ .w_align = 2,
+ .h_align = 2
+ }
+};
+
+/* Default format : HD ARGB32*/
+#define BDISP_DEF_WIDTH 1920
+#define BDISP_DEF_HEIGHT 1080
+
+static const struct bdisp_frame bdisp_dflt_fmt = {
+ .width = BDISP_DEF_WIDTH,
+ .height = BDISP_DEF_HEIGHT,
+ .fmt = &bdisp_formats[0],
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = BDISP_DEF_WIDTH * 4,
+ .sizeimage = BDISP_DEF_WIDTH * BDISP_DEF_HEIGHT * 4,
+ .colorspace = V4L2_COLORSPACE_REC709,
+ .crop = {0, 0, BDISP_DEF_WIDTH, BDISP_DEF_HEIGHT},
+ .paddr = {0, 0, 0, 0}
+};
+
+static inline void bdisp_ctx_state_lock_set(u32 state, struct bdisp_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->bdisp_dev->slock, flags);
+ ctx->state |= state;
+ spin_unlock_irqrestore(&ctx->bdisp_dev->slock, flags);
+}
+
+static inline void bdisp_ctx_state_lock_clear(u32 state, struct bdisp_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->bdisp_dev->slock, flags);
+ ctx->state &= ~state;
+ spin_unlock_irqrestore(&ctx->bdisp_dev->slock, flags);
+}
+
+static inline bool bdisp_ctx_state_is_set(u32 mask, struct bdisp_ctx *ctx)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&ctx->bdisp_dev->slock, flags);
+ ret = (ctx->state & mask) == mask;
+ spin_unlock_irqrestore(&ctx->bdisp_dev->slock, flags);
+
+ return ret;
+}
+
+static const struct bdisp_fmt *bdisp_find_fmt(u32 pixelformat)
+{
+ const struct bdisp_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bdisp_formats); i++) {
+ fmt = &bdisp_formats[i];
+ if (fmt->pixelformat == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct bdisp_frame *ctx_get_frame(struct bdisp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->src;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->dst;
+ default:
+ dev_err(ctx->bdisp_dev->dev,
+ "Wrong buffer/video queue type (%d)\n", type);
+ break;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state)
+{
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ if (WARN(!ctx || !ctx->fh.m2m_ctx, "Null hardware context\n"))
+ return;
+
+ dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->timecode = src_vb->timecode;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |= src_vb->flags &
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src_vb, vb_state);
+ v4l2_m2m_buf_done(dst_vb, vb_state);
+
+ v4l2_m2m_job_finish(ctx->bdisp_dev->m2m.m2m_dev,
+ ctx->fh.m2m_ctx);
+ }
+}
+
+static int bdisp_ctx_stop_req(struct bdisp_ctx *ctx)
+{
+ struct bdisp_ctx *curr_ctx;
+ struct bdisp_dev *bdisp = ctx->bdisp_dev;
+ int ret;
+
+ dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);
+
+ cancel_delayed_work(&bdisp->timeout_work);
+
+ curr_ctx = v4l2_m2m_get_curr_priv(bdisp->m2m.m2m_dev);
+ if (!test_bit(ST_M2M_RUNNING, &bdisp->state) || (curr_ctx != ctx))
+ return 0;
+
+ bdisp_ctx_state_lock_set(BDISP_CTX_STOP_REQ, ctx);
+
+ ret = wait_event_timeout(bdisp->irq_queue,
+ !bdisp_ctx_state_is_set(BDISP_CTX_STOP_REQ, ctx),
+ BDISP_WORK_TIMEOUT);
+
+ if (!ret) {
+ dev_err(ctx->bdisp_dev->dev, "%s IRQ timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void __bdisp_job_abort(struct bdisp_ctx *ctx)
+{
+ int ret;
+
+ ret = bdisp_ctx_stop_req(ctx);
+ if ((ret == -ETIMEDOUT) || (ctx->state & BDISP_CTX_ABORT)) {
+ bdisp_ctx_state_lock_clear(BDISP_CTX_STOP_REQ | BDISP_CTX_ABORT,
+ ctx);
+ bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void bdisp_job_abort(void *priv)
+{
+ __bdisp_job_abort((struct bdisp_ctx *)priv);
+}
+
+static int bdisp_get_addr(struct bdisp_ctx *ctx, struct vb2_buffer *vb,
+ struct bdisp_frame *frame, dma_addr_t *paddr)
+{
+ if (!vb || !frame)
+ return -EINVAL;
+
+ paddr[0] = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (frame->fmt->nb_planes > 1)
+ /* UV (NV12) or U (420P) */
+ paddr[1] = (dma_addr_t)(paddr[0] +
+ frame->bytesperline * frame->height);
+
+ if (frame->fmt->nb_planes > 2)
+ /* V (420P) */
+ paddr[2] = (dma_addr_t)(paddr[1] +
+ (frame->bytesperline * frame->height) / 4);
+
+ if (frame->fmt->nb_planes > 3)
+ dev_dbg(ctx->bdisp_dev->dev, "ignoring some planes\n");
+
+ dev_dbg(ctx->bdisp_dev->dev,
+ "%s plane[0]=%pad plane[1]=%pad plane[2]=%pad\n",
+ __func__, &paddr[0], &paddr[1], &paddr[2]);
+
+ return 0;
+}
+
+static int bdisp_get_bufs(struct bdisp_ctx *ctx)
+{
+ struct bdisp_frame *src, *dst;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ int ret;
+
+ src = &ctx->src;
+ dst = &ctx->dst;
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ ret = bdisp_get_addr(ctx, &src_vb->vb2_buf, src, src->paddr);
+ if (ret)
+ return ret;
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ ret = bdisp_get_addr(ctx, &dst_vb->vb2_buf, dst, dst->paddr);
+ if (ret)
+ return ret;
+
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+
+ return 0;
+}
+
+static void bdisp_device_run(void *priv)
+{
+ struct bdisp_ctx *ctx = priv;
+ struct bdisp_dev *bdisp;
+ unsigned long flags;
+ int err = 0;
+
+ if (WARN(!ctx, "Null hardware context\n"))
+ return;
+
+ bdisp = ctx->bdisp_dev;
+ dev_dbg(bdisp->dev, "%s\n", __func__);
+ spin_lock_irqsave(&bdisp->slock, flags);
+
+ if (bdisp->m2m.ctx != ctx) {
+ dev_dbg(bdisp->dev, "ctx updated: %p -> %p\n",
+ bdisp->m2m.ctx, ctx);
+ ctx->state |= BDISP_PARAMS;
+ bdisp->m2m.ctx = ctx;
+ }
+
+ if (ctx->state & BDISP_CTX_STOP_REQ) {
+ ctx->state &= ~BDISP_CTX_STOP_REQ;
+ ctx->state |= BDISP_CTX_ABORT;
+ wake_up(&bdisp->irq_queue);
+ goto out;
+ }
+
+ err = bdisp_get_bufs(ctx);
+ if (err) {
+ dev_err(bdisp->dev, "cannot get address\n");
+ goto out;
+ }
+
+ bdisp_dbg_perf_begin(bdisp);
+
+ err = bdisp_hw_reset(bdisp);
+ if (err) {
+ dev_err(bdisp->dev, "could not get HW ready\n");
+ goto out;
+ }
+
+ err = bdisp_hw_update(ctx);
+ if (err) {
+ dev_err(bdisp->dev, "could not send HW request\n");
+ goto out;
+ }
+
+ queue_delayed_work(bdisp->work_queue, &bdisp->timeout_work,
+ BDISP_WORK_TIMEOUT);
+ set_bit(ST_M2M_RUNNING, &bdisp->state);
+out:
+ ctx->state &= ~BDISP_PARAMS;
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+ if (err)
+ bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR);
+}
+
+static const struct v4l2_m2m_ops bdisp_m2m_ops = {
+ .device_run = bdisp_device_run,
+ .job_abort = bdisp_job_abort,
+};
+
+static int __bdisp_s_ctrl(struct bdisp_ctx *ctx, struct v4l2_ctrl *ctrl)
+{
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ default:
+ dev_err(ctx->bdisp_dev->dev, "unknown control %d\n", ctrl->id);
+ return -EINVAL;
+ }
+
+ ctx->state |= BDISP_PARAMS;
+
+ return 0;
+}
+
+static int bdisp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct bdisp_ctx *ctx = container_of(ctrl->handler, struct bdisp_ctx,
+ ctrl_handler);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctx->bdisp_dev->slock, flags);
+ ret = __bdisp_s_ctrl(ctx, ctrl);
+ spin_unlock_irqrestore(&ctx->bdisp_dev->slock, flags);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops bdisp_c_ops = {
+ .s_ctrl = bdisp_s_ctrl,
+};
+
+static int bdisp_ctrls_create(struct bdisp_ctx *ctx)
+{
+ if (ctx->ctrls_rdy)
+ return 0;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, BDISP_MAX_CTRL_NUM);
+
+ ctx->bdisp_ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &bdisp_c_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctx->bdisp_ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &bdisp_c_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ ctx->ctrls_rdy = true;
+
+ return 0;
+}
+
+static void bdisp_ctrls_delete(struct bdisp_ctx *ctx)
+{
+ if (ctx->ctrls_rdy) {
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ ctx->ctrls_rdy = false;
+ }
+}
+
+static int bdisp_queue_setup(struct vb2_queue *vq,
+ unsigned int *nb_buf, unsigned int *nb_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct bdisp_ctx *ctx = vb2_get_drv_priv(vq);
+ struct bdisp_frame *frame = ctx_get_frame(ctx, vq->type);
+
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ if (!frame->fmt) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid format\n");
+ return -EINVAL;
+ }
+
+ if (*nb_planes)
+ return sizes[0] < frame->sizeimage ? -EINVAL : 0;
+
+ *nb_planes = 1;
+ sizes[0] = frame->sizeimage;
+
+ return 0;
+}
+
+static int bdisp_buf_prepare(struct vb2_buffer *vb)
+{
+ struct bdisp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct bdisp_frame *frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ vb2_set_plane_payload(vb, 0, frame->sizeimage);
+
+ return 0;
+}
+
+static void bdisp_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct bdisp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* return to V4L2 any 0-size buffer so it can be dequeued by user */
+ if (!vb2_get_plane_payload(vb, 0)) {
+ dev_dbg(ctx->bdisp_dev->dev, "0 data buffer, skip it\n");
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ return;
+ }
+
+ if (ctx->fh.m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct bdisp_ctx *ctx = q->drv_priv;
+ struct vb2_v4l2_buffer *buf;
+ int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev);
+
+ if (ret < 0) {
+ dev_err(ctx->bdisp_dev->dev, "failed to set runtime PM\n");
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bdisp_stop_streaming(struct vb2_queue *q)
+{
+ struct bdisp_ctx *ctx = q->drv_priv;
+
+ __bdisp_job_abort(ctx);
+
+ pm_runtime_put(ctx->bdisp_dev->dev);
+}
+
+static const struct vb2_ops bdisp_qops = {
+ .queue_setup = bdisp_queue_setup,
+ .buf_prepare = bdisp_buf_prepare,
+ .buf_queue = bdisp_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = bdisp_stop_streaming,
+ .start_streaming = bdisp_start_streaming,
+};
+
+static int queue_init(void *priv,
+ struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct bdisp_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &bdisp_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->bdisp_dev->lock;
+ src_vq->dev = ctx->bdisp_dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &bdisp_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->bdisp_dev->lock;
+ dst_vq->dev = ctx->bdisp_dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int bdisp_open(struct file *file)
+{
+ struct bdisp_dev *bdisp = video_drvdata(file);
+ struct bdisp_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&bdisp->lock))
+ return -ERESTARTSYS;
+
+ /* Allocate memory for both context and node */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ ctx->bdisp_dev = bdisp;
+
+ if (bdisp_hw_alloc_nodes(ctx)) {
+ dev_err(bdisp->dev, "no memory for nodes\n");
+ ret = -ENOMEM;
+ goto mem_ctx;
+ }
+
+ v4l2_fh_init(&ctx->fh, bdisp->m2m.vdev);
+
+ ret = bdisp_ctrls_create(ctx);
+ if (ret) {
+ dev_err(bdisp->dev, "Failed to create control\n");
+ goto error_fh;
+ }
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ /* Default format */
+ ctx->src = bdisp_dflt_fmt;
+ ctx->dst = bdisp_dflt_fmt;
+
+ /* Setup the device context for mem2mem mode. */
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(bdisp->m2m.m2m_dev, ctx,
+ queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ dev_err(bdisp->dev, "Failed to initialize m2m context\n");
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto error_ctrls;
+ }
+
+ bdisp->m2m.refcnt++;
+ set_bit(ST_M2M_OPEN, &bdisp->state);
+
+ dev_dbg(bdisp->dev, "driver opened, ctx = 0x%p\n", ctx);
+
+ mutex_unlock(&bdisp->lock);
+
+ return 0;
+
+error_ctrls:
+ bdisp_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+error_fh:
+ v4l2_fh_exit(&ctx->fh);
+ bdisp_hw_free_nodes(ctx);
+mem_ctx:
+ kfree(ctx);
+unlock:
+ mutex_unlock(&bdisp->lock);
+
+ return ret;
+}
+
+static int bdisp_release(struct file *file)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(file->private_data);
+ struct bdisp_dev *bdisp = ctx->bdisp_dev;
+
+ dev_dbg(bdisp->dev, "%s\n", __func__);
+
+ mutex_lock(&bdisp->lock);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ bdisp_ctrls_delete(ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ if (--bdisp->m2m.refcnt <= 0)
+ clear_bit(ST_M2M_OPEN, &bdisp->state);
+
+ bdisp_hw_free_nodes(ctx);
+
+ kfree(ctx);
+
+ mutex_unlock(&bdisp->lock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations bdisp_fops = {
+ .owner = THIS_MODULE,
+ .open = bdisp_open,
+ .release = bdisp_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int bdisp_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_dev *bdisp = ctx->bdisp_dev;
+
+ strlcpy(cap->driver, bdisp->pdev->name, sizeof(cap->driver));
+ strlcpy(cap->card, bdisp->pdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s%d",
+ BDISP_NAME, bdisp->id);
+
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int bdisp_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ const struct bdisp_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(bdisp_formats))
+ return -EINVAL;
+
+ fmt = &bdisp_formats[f->index];
+
+ if ((fmt->pixelformat == V4L2_PIX_FMT_YUV420) &&
+ (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
+ dev_dbg(ctx->bdisp_dev->dev, "No YU12 on capture\n");
+ return -EINVAL;
+ }
+ f->pixelformat = fmt->pixelformat;
+
+ return 0;
+}
+
+static int bdisp_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_pix_format *pix;
+ struct bdisp_frame *frame = ctx_get_frame(ctx, f->type);
+
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ pix = &f->fmt.pix;
+ pix->width = frame->width;
+ pix->height = frame->height;
+ pix->pixelformat = frame->fmt->pixelformat;
+ pix->field = frame->field;
+ pix->bytesperline = frame->bytesperline;
+ pix->sizeimage = frame->sizeimage;
+ pix->colorspace = (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ frame->colorspace : bdisp_dflt_fmt.colorspace;
+
+ return 0;
+}
+
+static int bdisp_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ const struct bdisp_fmt *format;
+ u32 in_w, in_h;
+
+ format = bdisp_find_fmt(pix->pixelformat);
+ if (!format) {
+ dev_dbg(ctx->bdisp_dev->dev, "Unknown format 0x%x\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ /* YUV420P only supported for VIDEO_OUTPUT */
+ if ((format->pixelformat == V4L2_PIX_FMT_YUV420) &&
+ (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
+ dev_dbg(ctx->bdisp_dev->dev, "No YU12 on capture\n");
+ return -EINVAL;
+ }
+
+ /* Field (interlaced only supported on OUTPUT) */
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
+ (pix->field != V4L2_FIELD_INTERLACED))
+ pix->field = V4L2_FIELD_NONE;
+
+ /* Adjust width & height */
+ in_w = pix->width;
+ in_h = pix->height;
+ v4l_bound_align_image(&pix->width,
+ BDISP_MIN_W, BDISP_MAX_W,
+ ffs(format->w_align) - 1,
+ &pix->height,
+ BDISP_MIN_H, BDISP_MAX_H,
+ ffs(format->h_align) - 1,
+ 0);
+ if ((pix->width != in_w) || (pix->height != in_h))
+ dev_dbg(ctx->bdisp_dev->dev,
+ "%s size updated: %dx%d -> %dx%d\n", __func__,
+ in_w, in_h, pix->width, pix->height);
+
+ pix->bytesperline = (pix->width * format->bpp_plane0) / 8;
+ pix->sizeimage = (pix->width * pix->height * format->bpp) / 8;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ pix->colorspace = bdisp_dflt_fmt.colorspace;
+
+ return 0;
+}
+
+static int bdisp_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct vb2_queue *vq;
+ struct bdisp_frame *frame;
+ struct v4l2_pix_format *pix;
+ int ret;
+ u32 state;
+
+ ret = bdisp_try_fmt(file, fh, f);
+ if (ret) {
+ dev_err(ctx->bdisp_dev->dev, "Cannot set format\n");
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_err(ctx->bdisp_dev->dev, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ frame = (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ &ctx->src : &ctx->dst;
+ pix = &f->fmt.pix;
+ frame->fmt = bdisp_find_fmt(pix->pixelformat);
+ if (!frame->fmt) {
+ dev_err(ctx->bdisp_dev->dev, "Unknown format 0x%x\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ frame->width = pix->width;
+ frame->height = pix->height;
+ frame->bytesperline = pix->bytesperline;
+ frame->sizeimage = pix->sizeimage;
+ frame->field = pix->field;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ frame->colorspace = pix->colorspace;
+
+ frame->crop.width = frame->width;
+ frame->crop.height = frame->height;
+ frame->crop.left = 0;
+ frame->crop.top = 0;
+
+ state = BDISP_PARAMS;
+ state |= (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+ BDISP_DST_FMT : BDISP_SRC_FMT;
+ bdisp_ctx_state_lock_set(state, ctx);
+
+ return 0;
+}
+
+static int bdisp_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct bdisp_frame *frame;
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+
+ frame = ctx_get_frame(ctx, s->type);
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ switch (s->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* cropped frame */
+ s->r = frame->crop;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ /* complete frame */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ break;
+ default:
+ dev_err(ctx->bdisp_dev->dev, "Invalid target\n");
+ return -EINVAL;
+ }
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ /* composed (cropped) frame */
+ s->r = frame->crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* complete frame */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ break;
+ default:
+ dev_err(ctx->bdisp_dev->dev, "Invalid target\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ dev_err(ctx->bdisp_dev->dev, "Invalid type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int is_rect_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ /* Return 1 if a is enclosed in b, or 0 otherwise. */
+
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int bdisp_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct bdisp_frame *frame;
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_rect *in, out;
+ bool valid = false;
+
+ if ((s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) &&
+ (s->target == V4L2_SEL_TGT_CROP))
+ valid = true;
+
+ if ((s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->target == V4L2_SEL_TGT_COMPOSE))
+ valid = true;
+
+ if (!valid) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid type / target\n");
+ return -EINVAL;
+ }
+
+ frame = ctx_get_frame(ctx, s->type);
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ in = &s->r;
+ out = *in;
+
+ /* Align and check origin */
+ out.left = ALIGN(in->left, frame->fmt->w_align);
+ out.top = ALIGN(in->top, frame->fmt->h_align);
+
+ if ((out.left < 0) || (out.left >= frame->width) ||
+ (out.top < 0) || (out.top >= frame->height)) {
+ dev_err(ctx->bdisp_dev->dev,
+ "Invalid crop: %dx%d@(%d,%d) vs frame: %dx%d\n",
+ out.width, out.height, out.left, out.top,
+ frame->width, frame->height);
+ return -EINVAL;
+ }
+
+ /* Align and check size */
+ out.width = ALIGN(in->width, frame->fmt->w_align);
+ out.height = ALIGN(in->height, frame->fmt->w_align);
+
+ if (((out.left + out.width) > frame->width) ||
+ ((out.top + out.height) > frame->height)) {
+ dev_err(ctx->bdisp_dev->dev,
+ "Invalid crop: %dx%d@(%d,%d) vs frame: %dx%d\n",
+ out.width, out.height, out.left, out.top,
+ frame->width, frame->height);
+ return -EINVAL;
+ }
+
+ /* Checks adjust constraints flags */
+ if (s->flags & V4L2_SEL_FLAG_LE && !is_rect_enclosed(&out, in))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE && !is_rect_enclosed(in, &out))
+ return -ERANGE;
+
+ if ((out.left != in->left) || (out.top != in->top) ||
+ (out.width != in->width) || (out.height != in->height)) {
+ dev_dbg(ctx->bdisp_dev->dev,
+ "%s crop updated: %dx%d@(%d,%d) -> %dx%d@(%d,%d)\n",
+ __func__, in->width, in->height, in->left, in->top,
+ out.width, out.height, out.left, out.top);
+ *in = out;
+ }
+
+ frame->crop = out;
+
+ bdisp_ctx_state_lock_set(BDISP_PARAMS, ctx);
+
+ return 0;
+}
+
+static int bdisp_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+
+ if ((type == V4L2_BUF_TYPE_VIDEO_OUTPUT) &&
+ !bdisp_ctx_state_is_set(BDISP_SRC_FMT, ctx)) {
+ dev_err(ctx->bdisp_dev->dev, "src not defined\n");
+ return -EINVAL;
+ }
+
+ if ((type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ !bdisp_ctx_state_is_set(BDISP_DST_FMT, ctx)) {
+ dev_err(ctx->bdisp_dev->dev, "dst not defined\n");
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops bdisp_ioctl_ops = {
+ .vidioc_querycap = bdisp_querycap,
+ .vidioc_enum_fmt_vid_cap = bdisp_enum_fmt,
+ .vidioc_enum_fmt_vid_out = bdisp_enum_fmt,
+ .vidioc_g_fmt_vid_cap = bdisp_g_fmt,
+ .vidioc_g_fmt_vid_out = bdisp_g_fmt,
+ .vidioc_try_fmt_vid_cap = bdisp_try_fmt,
+ .vidioc_try_fmt_vid_out = bdisp_try_fmt,
+ .vidioc_s_fmt_vid_cap = bdisp_s_fmt,
+ .vidioc_s_fmt_vid_out = bdisp_s_fmt,
+ .vidioc_g_selection = bdisp_g_selection,
+ .vidioc_s_selection = bdisp_s_selection,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = bdisp_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int bdisp_register_device(struct bdisp_dev *bdisp)
+{
+ int ret;
+
+ if (!bdisp)
+ return -ENODEV;
+
+ bdisp->vdev.fops = &bdisp_fops;
+ bdisp->vdev.ioctl_ops = &bdisp_ioctl_ops;
+ bdisp->vdev.release = video_device_release_empty;
+ bdisp->vdev.lock = &bdisp->lock;
+ bdisp->vdev.vfl_dir = VFL_DIR_M2M;
+ bdisp->vdev.v4l2_dev = &bdisp->v4l2_dev;
+ snprintf(bdisp->vdev.name, sizeof(bdisp->vdev.name), "%s.%d",
+ BDISP_NAME, bdisp->id);
+
+ video_set_drvdata(&bdisp->vdev, bdisp);
+
+ bdisp->m2m.vdev = &bdisp->vdev;
+ bdisp->m2m.m2m_dev = v4l2_m2m_init(&bdisp_m2m_ops);
+ if (IS_ERR(bdisp->m2m.m2m_dev)) {
+ dev_err(bdisp->dev, "failed to initialize v4l2-m2m device\n");
+ return PTR_ERR(bdisp->m2m.m2m_dev);
+ }
+
+ ret = video_register_device(&bdisp->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(bdisp->dev,
+ "%s(): failed to register video device\n", __func__);
+ v4l2_m2m_release(bdisp->m2m.m2m_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bdisp_unregister_device(struct bdisp_dev *bdisp)
+{
+ if (!bdisp)
+ return;
+
+ if (bdisp->m2m.m2m_dev)
+ v4l2_m2m_release(bdisp->m2m.m2m_dev);
+
+ video_unregister_device(bdisp->m2m.vdev);
+}
+
+static irqreturn_t bdisp_irq_thread(int irq, void *priv)
+{
+ struct bdisp_dev *bdisp = priv;
+ struct bdisp_ctx *ctx;
+
+ spin_lock(&bdisp->slock);
+
+ bdisp_dbg_perf_end(bdisp);
+
+ cancel_delayed_work(&bdisp->timeout_work);
+
+ if (!test_and_clear_bit(ST_M2M_RUNNING, &bdisp->state))
+ goto isr_unlock;
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDING, &bdisp->state)) {
+ set_bit(ST_M2M_SUSPENDED, &bdisp->state);
+ wake_up(&bdisp->irq_queue);
+ goto isr_unlock;
+ }
+
+ ctx = v4l2_m2m_get_curr_priv(bdisp->m2m.m2m_dev);
+ if (!ctx || !ctx->fh.m2m_ctx)
+ goto isr_unlock;
+
+ spin_unlock(&bdisp->slock);
+
+ bdisp_job_finish(ctx, VB2_BUF_STATE_DONE);
+
+ if (bdisp_ctx_state_is_set(BDISP_CTX_STOP_REQ, ctx)) {
+ bdisp_ctx_state_lock_clear(BDISP_CTX_STOP_REQ, ctx);
+ wake_up(&bdisp->irq_queue);
+ }
+
+ return IRQ_HANDLED;
+
+isr_unlock:
+ spin_unlock(&bdisp->slock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bdisp_irq_handler(int irq, void *priv)
+{
+ if (bdisp_hw_get_and_clear_irq((struct bdisp_dev *)priv))
+ return IRQ_NONE;
+ else
+ return IRQ_WAKE_THREAD;
+}
+
+static void bdisp_irq_timeout(struct work_struct *ptr)
+{
+ struct delayed_work *twork = to_delayed_work(ptr);
+ struct bdisp_dev *bdisp = container_of(twork, struct bdisp_dev,
+ timeout_work);
+ struct bdisp_ctx *ctx;
+
+ ctx = v4l2_m2m_get_curr_priv(bdisp->m2m.m2m_dev);
+
+ dev_err(ctx->bdisp_dev->dev, "Device work timeout\n");
+
+ spin_lock(&bdisp->slock);
+ clear_bit(ST_M2M_RUNNING, &bdisp->state);
+ spin_unlock(&bdisp->slock);
+
+ bdisp_hw_reset(bdisp);
+
+ bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR);
+}
+
+static int bdisp_m2m_suspend(struct bdisp_dev *bdisp)
+{
+ unsigned long flags;
+ int timeout;
+
+ spin_lock_irqsave(&bdisp->slock, flags);
+ if (!test_bit(ST_M2M_RUNNING, &bdisp->state)) {
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+ return 0;
+ }
+ clear_bit(ST_M2M_SUSPENDED, &bdisp->state);
+ set_bit(ST_M2M_SUSPENDING, &bdisp->state);
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+
+ timeout = wait_event_timeout(bdisp->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &bdisp->state),
+ BDISP_WORK_TIMEOUT);
+
+ clear_bit(ST_M2M_SUSPENDING, &bdisp->state);
+
+ if (!timeout) {
+ dev_err(bdisp->dev, "%s IRQ timeout\n", __func__);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int bdisp_m2m_resume(struct bdisp_dev *bdisp)
+{
+ struct bdisp_ctx *ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bdisp->slock, flags);
+ ctx = bdisp->m2m.ctx;
+ bdisp->m2m.ctx = NULL;
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDED, &bdisp->state))
+ bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+ return 0;
+}
+
+static int bdisp_runtime_resume(struct device *dev)
+{
+ struct bdisp_dev *bdisp = dev_get_drvdata(dev);
+ int ret = clk_enable(bdisp->clock);
+
+ if (ret)
+ return ret;
+
+ return bdisp_m2m_resume(bdisp);
+}
+
+static int bdisp_runtime_suspend(struct device *dev)
+{
+ struct bdisp_dev *bdisp = dev_get_drvdata(dev);
+ int ret = bdisp_m2m_suspend(bdisp);
+
+ if (!ret)
+ clk_disable(bdisp->clock);
+
+ return ret;
+}
+
+static int bdisp_resume(struct device *dev)
+{
+ struct bdisp_dev *bdisp = dev_get_drvdata(dev);
+ unsigned long flags;
+ int opened;
+
+ spin_lock_irqsave(&bdisp->slock, flags);
+ opened = test_bit(ST_M2M_OPEN, &bdisp->state);
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+
+ if (!opened)
+ return 0;
+
+ if (!pm_runtime_suspended(dev))
+ return bdisp_runtime_resume(dev);
+
+ return 0;
+}
+
+static int bdisp_suspend(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ return bdisp_runtime_suspend(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops bdisp_pm_ops = {
+ .suspend = bdisp_suspend,
+ .resume = bdisp_resume,
+ .runtime_suspend = bdisp_runtime_suspend,
+ .runtime_resume = bdisp_runtime_resume,
+};
+
+static int bdisp_remove(struct platform_device *pdev)
+{
+ struct bdisp_dev *bdisp = platform_get_drvdata(pdev);
+
+ bdisp_unregister_device(bdisp);
+
+ bdisp_hw_free_filters(bdisp->dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ bdisp_debugfs_remove(bdisp);
+
+ v4l2_device_unregister(&bdisp->v4l2_dev);
+
+ if (!IS_ERR(bdisp->clock))
+ clk_unprepare(bdisp->clock);
+
+ dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+
+ return 0;
+}
+
+static int bdisp_probe(struct platform_device *pdev)
+{
+ struct bdisp_dev *bdisp;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ bdisp = devm_kzalloc(dev, sizeof(struct bdisp_dev), GFP_KERNEL);
+ if (!bdisp)
+ return -ENOMEM;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ bdisp->pdev = pdev;
+ bdisp->dev = dev;
+ platform_set_drvdata(pdev, bdisp);
+
+ if (dev->of_node)
+ bdisp->id = of_alias_get_id(pdev->dev.of_node, BDISP_NAME);
+ else
+ bdisp->id = pdev->id;
+
+ init_waitqueue_head(&bdisp->irq_queue);
+ INIT_DELAYED_WORK(&bdisp->timeout_work, bdisp_irq_timeout);
+ bdisp->work_queue = create_workqueue(BDISP_NAME);
+
+ spin_lock_init(&bdisp->slock);
+ mutex_init(&bdisp->lock);
+
+ /* get resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bdisp->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(bdisp->regs)) {
+ dev_err(dev, "failed to get regs\n");
+ return PTR_ERR(bdisp->regs);
+ }
+
+ bdisp->clock = devm_clk_get(dev, BDISP_NAME);
+ if (IS_ERR(bdisp->clock)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(bdisp->clock);
+ }
+
+ ret = clk_prepare(bdisp->clock);
+ if (ret < 0) {
+ dev_err(dev, "clock prepare failed\n");
+ bdisp->clock = ERR_PTR(-EINVAL);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to get IRQ resource\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
+ ret = devm_request_threaded_irq(dev, res->start, bdisp_irq_handler,
+ bdisp_irq_thread, IRQF_ONESHOT,
+ pdev->name, bdisp);
+ if (ret) {
+ dev_err(dev, "failed to install irq\n");
+ goto err_clk;
+ }
+
+ /* v4l2 register */
+ ret = v4l2_device_register(dev, &bdisp->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "failed to register\n");
+ goto err_clk;
+ }
+
+ /* Debug */
+ ret = bdisp_debugfs_create(bdisp);
+ if (ret) {
+ dev_err(dev, "failed to create debugfs\n");
+ goto err_v4l2;
+ }
+
+ /* Power management */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to set PM\n");
+ goto err_pm;
+ }
+
+ /* Filters */
+ if (bdisp_hw_alloc_filters(bdisp->dev)) {
+ dev_err(bdisp->dev, "no memory for filters\n");
+ ret = -ENOMEM;
+ goto err_pm;
+ }
+
+ /* Register */
+ ret = bdisp_register_device(bdisp);
+ if (ret) {
+ dev_err(dev, "failed to register\n");
+ goto err_filter;
+ }
+
+ dev_info(dev, "%s%d registered as /dev/video%d\n", BDISP_NAME,
+ bdisp->id, bdisp->vdev.num);
+
+ pm_runtime_put(dev);
+
+ return 0;
+
+err_filter:
+ bdisp_hw_free_filters(bdisp->dev);
+err_pm:
+ pm_runtime_put(dev);
+ bdisp_debugfs_remove(bdisp);
+err_v4l2:
+ v4l2_device_unregister(&bdisp->v4l2_dev);
+err_clk:
+ if (!IS_ERR(bdisp->clock))
+ clk_unprepare(bdisp->clock);
+
+ return ret;
+}
+
+static const struct of_device_id bdisp_match_types[] = {
+ {
+ .compatible = "st,stih407-bdisp",
+ },
+ { /* end node */ }
+};
+
+MODULE_DEVICE_TABLE(of, bdisp_match_types);
+
+static struct platform_driver bdisp_driver = {
+ .probe = bdisp_probe,
+ .remove = bdisp_remove,
+ .driver = {
+ .name = BDISP_NAME,
+ .of_match_table = bdisp_match_types,
+ .pm = &bdisp_pm_ops,
+ },
+};
+
+module_platform_driver(bdisp_driver);
+
+MODULE_DESCRIPTION("2D blitter for STMicroelectronics SoC");
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sti/bdisp/bdisp.h b/drivers/media/platform/sti/bdisp/bdisp.h
new file mode 100644
index 000000000..e309cde37
--- /dev/null
+++ b/drivers/media/platform/sti/bdisp/bdisp.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/ktime.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#define BDISP_NAME "bdisp"
+
+/*
+ * Max nb of nodes in node-list:
+ * - 2 nodes to handle wide 4K pictures
+ * - 2 nodes to handle two planes (Y & CbCr) */
+#define MAX_OUTPUT_PLANES 2
+#define MAX_VERTICAL_STRIDES 2
+#define MAX_NB_NODE (MAX_OUTPUT_PLANES * MAX_VERTICAL_STRIDES)
+
+/* struct bdisp_ctrls - bdisp control set
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ */
+struct bdisp_ctrls {
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+};
+
+/**
+ * struct bdisp_fmt - driver's internal color format data
+ * @pixelformat:fourcc code for this format
+ * @nb_planes: number of planes (ex: [0]=RGB/Y - [1]=Cb/Cr, ...)
+ * @bpp: bits per pixel (general)
+ * @bpp_plane0: byte per pixel for the 1st plane
+ * @w_align: width alignment in pixel (multiple of)
+ * @h_align: height alignment in pixel (multiple of)
+ */
+struct bdisp_fmt {
+ u32 pixelformat;
+ u8 nb_planes;
+ u8 bpp;
+ u8 bpp_plane0;
+ u8 w_align;
+ u8 h_align;
+};
+
+/**
+ * struct bdisp_frame - frame properties
+ *
+ * @width: frame width (including padding)
+ * @height: frame height (including padding)
+ * @fmt: pointer to frame format descriptor
+ * @field: frame / field type
+ * @bytesperline: stride of the 1st plane
+ * @sizeimage: image size in bytes
+ * @colorspace: colorspace
+ * @crop: crop area
+ * @paddr: image physical addresses per plane ([0]=RGB/Y - [1]=Cb/Cr, ...)
+ */
+struct bdisp_frame {
+ u32 width;
+ u32 height;
+ const struct bdisp_fmt *fmt;
+ enum v4l2_field field;
+ u32 bytesperline;
+ u32 sizeimage;
+ enum v4l2_colorspace colorspace;
+ struct v4l2_rect crop;
+ dma_addr_t paddr[4];
+};
+
+/**
+ * struct bdisp_request - bdisp request
+ *
+ * @src: source frame properties
+ * @dst: destination frame properties
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @nb_req: number of run request
+ */
+struct bdisp_request {
+ struct bdisp_frame src;
+ struct bdisp_frame dst;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ int nb_req;
+};
+
+/**
+ * struct bdisp_ctx - device context data
+ *
+ * @src: source frame properties
+ * @dst: destination frame properties
+ * @state: flags to keep track of user configuration
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @bdisp_dev: the device this context applies to
+ * @node: node array
+ * @node_paddr: node physical address array
+ * @fh: v4l2 file handle
+ * @ctrl_handler: v4l2 controls handler
+ * @bdisp_ctrls: bdisp control set
+ * @ctrls_rdy: true if the control handler is initialized
+ */
+struct bdisp_ctx {
+ struct bdisp_frame src;
+ struct bdisp_frame dst;
+ u32 state;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ struct bdisp_dev *bdisp_dev;
+ struct bdisp_node *node[MAX_NB_NODE];
+ dma_addr_t node_paddr[MAX_NB_NODE];
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct bdisp_ctrls bdisp_ctrls;
+ bool ctrls_rdy;
+};
+
+/**
+ * struct bdisp_m2m_device - v4l2 memory-to-memory device data
+ *
+ * @vdev: video device node for v4l2 m2m mode
+ * @m2m_dev: v4l2 m2m device data
+ * @ctx: hardware context data
+ * @refcnt: reference counter
+ */
+struct bdisp_m2m_device {
+ struct video_device *vdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct bdisp_ctx *ctx;
+ int refcnt;
+};
+
+/**
+ * struct bdisp_dbg - debug info
+ *
+ * @debugfs_entry: debugfs
+ * @copy_node: array of last used nodes
+ * @copy_request: last bdisp request
+ * @hw_start: start time of last HW request
+ * @last_duration: last HW processing duration in microsecs
+ * @min_duration: min HW processing duration in microsecs
+ * @max_duration: max HW processing duration in microsecs
+ * @tot_duration: total HW processing duration in microsecs
+ */
+struct bdisp_dbg {
+ struct dentry *debugfs_entry;
+ struct bdisp_node *copy_node[MAX_NB_NODE];
+ struct bdisp_request copy_request;
+ ktime_t hw_start;
+ s64 last_duration;
+ s64 min_duration;
+ s64 max_duration;
+ s64 tot_duration;
+};
+
+/**
+ * struct bdisp_dev - abstraction for bdisp entity
+ *
+ * @v4l2_dev: v4l2 device
+ * @vdev: video device
+ * @pdev: platform device
+ * @dev: device
+ * @lock: mutex protecting this data structure
+ * @slock: spinlock protecting this data structure
+ * @id: device index
+ * @m2m: memory-to-memory V4L2 device information
+ * @state: flags used to synchronize m2m and capture mode operation
+ * @clock: IP clock
+ * @regs: registers
+ * @irq_queue: interrupt handler waitqueue
+ * @work_queue: workqueue to handle timeouts
+ * @timeout_work: IRQ timeout structure
+ * @dbg: debug info
+ */
+struct bdisp_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct platform_device *pdev;
+ struct device *dev;
+ spinlock_t slock;
+ struct mutex lock;
+ u16 id;
+ struct bdisp_m2m_device m2m;
+ unsigned long state;
+ struct clk *clock;
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+ struct workqueue_struct *work_queue;
+ struct delayed_work timeout_work;
+ struct bdisp_dbg dbg;
+};
+
+void bdisp_hw_free_nodes(struct bdisp_ctx *ctx);
+int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx);
+void bdisp_hw_free_filters(struct device *dev);
+int bdisp_hw_alloc_filters(struct device *dev);
+int bdisp_hw_reset(struct bdisp_dev *bdisp);
+int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp);
+int bdisp_hw_update(struct bdisp_ctx *ctx);
+
+void bdisp_debugfs_remove(struct bdisp_dev *bdisp);
+int bdisp_debugfs_create(struct bdisp_dev *bdisp);
+void bdisp_dbg_perf_begin(struct bdisp_dev *bdisp);
+void bdisp_dbg_perf_end(struct bdisp_dev *bdisp);
diff --git a/drivers/media/platform/sti/c8sectpfe/Kconfig b/drivers/media/platform/sti/c8sectpfe/Kconfig
new file mode 100644
index 000000000..7420a5057
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/Kconfig
@@ -0,0 +1,27 @@
+config DVB_C8SECTPFE
+ tristate "STMicroelectronics C8SECTPFE DVB support"
+ depends on PINCTRL && DVB_CORE && I2C
+ depends on ARCH_STI || ARCH_MULTIPLATFORM || COMPILE_TEST
+ select FW_LOADER
+ select DEBUG_FS
+ select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV090x if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
+
+ ---help---
+ This adds support for DVB front-end cards connected
+ to TS inputs of STiH407/410 SoC.
+
+ The driver currently supports C8SECTPFE's TS input block,
+ memdma engine, and HW PID filtering.
+
+ Supported DVB front-end cards are:
+ - STMicroelectronics DVB-T B2100A (STV0367 + TDA18212)
+ - STMicroelectronics DVB-S/S2 STV0903 + STV6110 + LNBP24 board
+
+ To compile this driver as a module, choose M here: the
+ module will be called c8sectpfe.
diff --git a/drivers/media/platform/sti/c8sectpfe/Makefile b/drivers/media/platform/sti/c8sectpfe/Makefile
new file mode 100644
index 000000000..34d69472b
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+c8sectpfe-y += c8sectpfe-core.o c8sectpfe-common.o c8sectpfe-dvb.o \
+ c8sectpfe-debugfs.o
+
+obj-$(CONFIG_DVB_C8SECTPFE) += c8sectpfe.o
+
+ccflags-y += -Idrivers/media/common
+ccflags-y += -Idrivers/media/dvb-frontends/
+ccflags-y += -Idrivers/media/tuners/
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c
new file mode 100644
index 000000000..5df67da25
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * c8sectpfe-common.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dvb/dmx.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+
+#include <media/dmxdev.h>
+#include <media/dvbdev.h>
+#include <media/dvb_demux.h>
+#include <media/dvb_frontend.h>
+#include <media/dvb_net.h>
+
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-dvb.h"
+
+static int register_dvb(struct stdemux *demux, struct dvb_adapter *adap,
+ void *start_feed, void *stop_feed,
+ struct c8sectpfei *fei)
+{
+ int result;
+
+ demux->dvb_demux.dmx.capabilities = DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING;
+
+ demux->dvb_demux.priv = demux;
+ demux->dvb_demux.filternum = C8SECTPFE_MAXCHANNEL;
+ demux->dvb_demux.feednum = C8SECTPFE_MAXCHANNEL;
+
+ demux->dvb_demux.start_feed = start_feed;
+ demux->dvb_demux.stop_feed = stop_feed;
+ demux->dvb_demux.write_to_decoder = NULL;
+
+ result = dvb_dmx_init(&demux->dvb_demux);
+ if (result < 0) {
+ dev_err(fei->dev, "dvb_dmx_init failed (errno = %d)\n",
+ result);
+ goto err_dmx;
+ }
+
+ demux->dmxdev.filternum = demux->dvb_demux.filternum;
+ demux->dmxdev.demux = &demux->dvb_demux.dmx;
+ demux->dmxdev.capabilities = 0;
+
+ result = dvb_dmxdev_init(&demux->dmxdev, adap);
+ if (result < 0) {
+ dev_err(fei->dev, "dvb_dmxdev_init failed (errno = %d)\n",
+ result);
+
+ goto err_dmxdev;
+ }
+
+ demux->hw_frontend.source = DMX_FRONTEND_0 + demux->tsin_index;
+
+ result = demux->dvb_demux.dmx.add_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+ if (result < 0) {
+ dev_err(fei->dev, "add_frontend failed (errno = %d)\n", result);
+ goto err_fe_hw;
+ }
+
+ demux->mem_frontend.source = DMX_MEMORY_FE;
+ result = demux->dvb_demux.dmx.add_frontend(&demux->dvb_demux.dmx,
+ &demux->mem_frontend);
+ if (result < 0) {
+ dev_err(fei->dev, "add_frontend failed (%d)\n", result);
+ goto err_fe_mem;
+ }
+
+ result = demux->dvb_demux.dmx.connect_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+ if (result < 0) {
+ dev_err(fei->dev, "connect_frontend (%d)\n", result);
+ goto err_fe_con;
+ }
+
+ return 0;
+
+err_fe_con:
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->mem_frontend);
+err_fe_mem:
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+err_fe_hw:
+ dvb_dmxdev_release(&demux->dmxdev);
+err_dmxdev:
+ dvb_dmx_release(&demux->dvb_demux);
+err_dmx:
+ return result;
+
+}
+
+static void unregister_dvb(struct stdemux *demux)
+{
+
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->mem_frontend);
+
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+
+ dvb_dmxdev_release(&demux->dmxdev);
+
+ dvb_dmx_release(&demux->dvb_demux);
+}
+
+static struct c8sectpfe *c8sectpfe_create(struct c8sectpfei *fei,
+ void *start_feed,
+ void *stop_feed)
+{
+ struct c8sectpfe *c8sectpfe;
+ int result;
+ int i, j;
+
+ short int ids[] = { -1 };
+
+ c8sectpfe = kzalloc(sizeof(struct c8sectpfe), GFP_KERNEL);
+ if (!c8sectpfe)
+ goto err1;
+
+ mutex_init(&c8sectpfe->lock);
+
+ c8sectpfe->device = fei->dev;
+
+ result = dvb_register_adapter(&c8sectpfe->adapter, "STi c8sectpfe",
+ THIS_MODULE, fei->dev, ids);
+ if (result < 0) {
+ dev_err(fei->dev, "dvb_register_adapter failed (errno = %d)\n",
+ result);
+ goto err2;
+ }
+
+ c8sectpfe->adapter.priv = fei;
+
+ for (i = 0; i < fei->tsin_count; i++) {
+
+ c8sectpfe->demux[i].tsin_index = i;
+ c8sectpfe->demux[i].c8sectpfei = fei;
+
+ result = register_dvb(&c8sectpfe->demux[i], &c8sectpfe->adapter,
+ start_feed, stop_feed, fei);
+ if (result < 0) {
+ dev_err(fei->dev,
+ "register_dvb feed=%d failed (errno = %d)\n",
+ result, i);
+
+ /* we take a all or nothing approach */
+ for (j = 0; j < i; j++)
+ unregister_dvb(&c8sectpfe->demux[j]);
+ goto err3;
+ }
+ }
+
+ c8sectpfe->num_feeds = fei->tsin_count;
+
+ return c8sectpfe;
+err3:
+ dvb_unregister_adapter(&c8sectpfe->adapter);
+err2:
+ kfree(c8sectpfe);
+err1:
+ return NULL;
+};
+
+static void c8sectpfe_delete(struct c8sectpfe *c8sectpfe)
+{
+ int i;
+
+ if (!c8sectpfe)
+ return;
+
+ for (i = 0; i < c8sectpfe->num_feeds; i++)
+ unregister_dvb(&c8sectpfe->demux[i]);
+
+ dvb_unregister_adapter(&c8sectpfe->adapter);
+
+ kfree(c8sectpfe);
+};
+
+void c8sectpfe_tuner_unregister_frontend(struct c8sectpfe *c8sectpfe,
+ struct c8sectpfei *fei)
+{
+ int n;
+ struct channel_info *tsin;
+
+ for (n = 0; n < fei->tsin_count; n++) {
+
+ tsin = fei->channel_data[n];
+
+ if (tsin) {
+ if (tsin->frontend) {
+ dvb_unregister_frontend(tsin->frontend);
+ dvb_frontend_detach(tsin->frontend);
+ }
+
+ i2c_put_adapter(tsin->i2c_adapter);
+
+ if (tsin->i2c_client) {
+ module_put(tsin->i2c_client->dev.driver->owner);
+ i2c_unregister_device(tsin->i2c_client);
+ }
+ }
+ }
+
+ c8sectpfe_delete(c8sectpfe);
+};
+
+int c8sectpfe_tuner_register_frontend(struct c8sectpfe **c8sectpfe,
+ struct c8sectpfei *fei,
+ void *start_feed,
+ void *stop_feed)
+{
+ struct channel_info *tsin;
+ struct dvb_frontend *frontend;
+ int n, res;
+
+ *c8sectpfe = c8sectpfe_create(fei, start_feed, stop_feed);
+ if (!*c8sectpfe)
+ return -ENOMEM;
+
+ for (n = 0; n < fei->tsin_count; n++) {
+ tsin = fei->channel_data[n];
+
+ res = c8sectpfe_frontend_attach(&frontend, *c8sectpfe, tsin, n);
+ if (res)
+ goto err;
+
+ res = dvb_register_frontend(&c8sectpfe[0]->adapter, frontend);
+ if (res < 0) {
+ dev_err(fei->dev, "dvb_register_frontend failed (%d)\n",
+ res);
+ goto err;
+ }
+
+ tsin->frontend = frontend;
+ }
+
+ return 0;
+
+err:
+ c8sectpfe_tuner_unregister_frontend(*c8sectpfe, fei);
+ return res;
+}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h
new file mode 100644
index 000000000..5ab7ca448
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * c8sectpfe-common.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#ifndef _C8SECTPFE_COMMON_H_
+#define _C8SECTPFE_COMMON_H_
+
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/frontend.h>
+#include <linux/gpio.h>
+#include <linux/version.h>
+
+#include <media/dmxdev.h>
+#include <media/dvb_demux.h>
+#include <media/dvb_frontend.h>
+#include <media/dvb_net.h>
+
+/* Maximum number of channels */
+#define C8SECTPFE_MAXADAPTER (4)
+#define C8SECTPFE_MAXCHANNEL 64
+#define STPTI_MAXCHANNEL 64
+
+#define MAX_INPUTBLOCKS 7
+
+struct c8sectpfe;
+struct stdemux;
+
+struct stdemux {
+ struct dvb_demux dvb_demux;
+ struct dmxdev dmxdev;
+ struct dmx_frontend hw_frontend;
+ struct dmx_frontend mem_frontend;
+ int tsin_index;
+ int running_feed_count;
+ struct c8sectpfei *c8sectpfei;
+};
+
+struct c8sectpfe {
+ struct stdemux demux[MAX_INPUTBLOCKS];
+ struct mutex lock;
+ struct dvb_adapter adapter;
+ struct device *device;
+ int mapping;
+ int num_feeds;
+};
+
+/* Channel registration */
+int c8sectpfe_tuner_register_frontend(struct c8sectpfe **c8sectpfe,
+ struct c8sectpfei *fei,
+ void *start_feed,
+ void *stop_feed);
+
+void c8sectpfe_tuner_unregister_frontend(struct c8sectpfe *c8sectpfe,
+ struct c8sectpfei *fei);
+
+#endif
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
new file mode 100644
index 000000000..3c05b3dc4
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -0,0 +1,1206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * c8sectpfe-core.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author:Peter Bennett <peter.bennett@st.com>
+ * Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/frontend.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/version.h>
+#include <linux/wait.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-debugfs.h"
+#include <media/dmxdev.h>
+#include <media/dvb_demux.h>
+#include <media/dvb_frontend.h>
+#include <media/dvb_net.h>
+
+#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
+MODULE_FIRMWARE(FIRMWARE_MEMDMA);
+
+#define PID_TABLE_SIZE 1024
+#define POLL_MSECS 50
+
+static int load_c8sectpfe_fw(struct c8sectpfei *fei);
+
+#define TS_PKT_SIZE 188
+#define HEADER_SIZE (4)
+#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
+
+#define FEI_ALIGNMENT (32)
+/* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
+#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
+
+#define FIFO_LEN 1024
+
+static void c8sectpfe_timer_interrupt(struct timer_list *t)
+{
+ struct c8sectpfei *fei = from_timer(fei, t, timer);
+ struct channel_info *channel;
+ int chan_num;
+
+ /* iterate through input block channels */
+ for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
+ channel = fei->channel_data[chan_num];
+
+ /* is this descriptor initialised and TP enabled */
+ if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
+ tasklet_schedule(&channel->tsklet);
+ }
+
+ fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
+ add_timer(&fei->timer);
+}
+
+static void channel_swdemux_tsklet(unsigned long data)
+{
+ struct channel_info *channel = (struct channel_info *)data;
+ struct c8sectpfei *fei;
+ unsigned long wp, rp;
+ int pos, num_packets, n, size;
+ u8 *buf;
+
+ if (unlikely(!channel || !channel->irec))
+ return;
+
+ fei = channel->fei;
+
+ wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
+ rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
+
+ pos = rp - channel->back_buffer_busaddr;
+
+ /* has it wrapped */
+ if (wp < rp)
+ wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
+
+ size = wp - rp;
+ num_packets = size / PACKET_SIZE;
+
+ /* manage cache so data is visible to CPU */
+ dma_sync_single_for_cpu(fei->dev,
+ rp,
+ size,
+ DMA_FROM_DEVICE);
+
+ buf = (u8 *) channel->back_buffer_aligned;
+
+ dev_dbg(fei->dev,
+ "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
+ channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
+
+ for (n = 0; n < num_packets; n++) {
+ dvb_dmx_swfilter_packets(
+ &fei->c8sectpfe[0]->
+ demux[channel->demux_mapping].dvb_demux,
+ &buf[pos], 1);
+
+ pos += PACKET_SIZE;
+ }
+
+ /* advance the read pointer */
+ if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
+ writel(channel->back_buffer_busaddr, channel->irec +
+ DMA_PRDS_BUSRP_TP(0));
+ else
+ writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
+}
+
+static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *demux = dvbdmxfeed->demux;
+ struct stdemux *stdemux = (struct stdemux *)demux->priv;
+ struct c8sectpfei *fei = stdemux->c8sectpfei;
+ struct channel_info *channel;
+ u32 tmp;
+ unsigned long *bitmap;
+ int ret;
+
+ switch (dvbdmxfeed->type) {
+ case DMX_TYPE_TS:
+ break;
+ case DMX_TYPE_SEC:
+ break;
+ default:
+ dev_err(fei->dev, "%s:%d Error bailing\n"
+ , __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (dvbdmxfeed->type == DMX_TYPE_TS) {
+ switch (dvbdmxfeed->pes_type) {
+ case DMX_PES_VIDEO:
+ case DMX_PES_AUDIO:
+ case DMX_PES_TELETEXT:
+ case DMX_PES_PCR:
+ case DMX_PES_OTHER:
+ break;
+ default:
+ dev_err(fei->dev, "%s:%d Error bailing\n"
+ , __func__, __LINE__);
+ return -EINVAL;
+ }
+ }
+
+ if (!atomic_read(&fei->fw_loaded)) {
+ ret = load_c8sectpfe_fw(fei);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&fei->lock);
+
+ channel = fei->channel_data[stdemux->tsin_index];
+
+ bitmap = (unsigned long *) channel->pid_buffer_aligned;
+
+ /* 8192 is a special PID */
+ if (dvbdmxfeed->pid == 8192) {
+ tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+ tmp &= ~C8SECTPFE_PID_ENABLE;
+ writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+
+ } else {
+ bitmap_set(bitmap, dvbdmxfeed->pid, 1);
+ }
+
+ /* manage cache so PID bitmap is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ channel->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ channel->active = 1;
+
+ if (fei->global_feed_count == 0) {
+ fei->timer.expires = jiffies +
+ msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
+
+ add_timer(&fei->timer);
+ }
+
+ if (stdemux->running_feed_count == 0) {
+
+ dev_dbg(fei->dev, "Starting channel=%p\n", channel);
+
+ tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
+ (unsigned long) channel);
+
+ /* Reset the internal inputblock sram pointers */
+ writel(channel->fifo,
+ fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
+ writel(channel->fifo + FIFO_LEN - 1,
+ fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
+
+ writel(channel->fifo,
+ fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
+ writel(channel->fifo,
+ fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
+
+
+ /* reset read / write memdma ptrs for this channel */
+ writel(channel->back_buffer_busaddr, channel->irec +
+ DMA_PRDS_BUSBASE_TP(0));
+
+ tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+ writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
+
+ writel(channel->back_buffer_busaddr, channel->irec +
+ DMA_PRDS_BUSWP_TP(0));
+
+ /* Issue a reset and enable InputBlock */
+ writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
+ , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
+
+ /* and enable the tp */
+ writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
+
+ dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
+ , __func__, __LINE__, stdemux);
+ }
+
+ stdemux->running_feed_count++;
+ fei->global_feed_count++;
+
+ mutex_unlock(&fei->lock);
+
+ return 0;
+}
+
+static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+
+ struct dvb_demux *demux = dvbdmxfeed->demux;
+ struct stdemux *stdemux = (struct stdemux *)demux->priv;
+ struct c8sectpfei *fei = stdemux->c8sectpfei;
+ struct channel_info *channel;
+ int idlereq;
+ u32 tmp;
+ int ret;
+ unsigned long *bitmap;
+
+ if (!atomic_read(&fei->fw_loaded)) {
+ ret = load_c8sectpfe_fw(fei);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&fei->lock);
+
+ channel = fei->channel_data[stdemux->tsin_index];
+
+ bitmap = (unsigned long *) channel->pid_buffer_aligned;
+
+ if (dvbdmxfeed->pid == 8192) {
+ tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+ tmp |= C8SECTPFE_PID_ENABLE;
+ writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+ } else {
+ bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
+ }
+
+ /* manage cache so data is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ channel->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ if (--stdemux->running_feed_count == 0) {
+
+ channel = fei->channel_data[stdemux->tsin_index];
+
+ /* TP re-configuration on page 168 of functional spec */
+
+ /* disable IB (prevents more TS data going to memdma) */
+ writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
+
+ /* disable this channels descriptor */
+ writel(0, channel->irec + DMA_PRDS_TPENABLE);
+
+ tasklet_disable(&channel->tsklet);
+
+ /* now request memdma channel goes idle */
+ idlereq = (1 << channel->tsin_id) | IDLEREQ;
+ writel(idlereq, fei->io + DMA_IDLE_REQ);
+
+ /* wait for idle irq handler to signal completion */
+ ret = wait_for_completion_timeout(&channel->idle_completion,
+ msecs_to_jiffies(100));
+
+ if (ret == 0)
+ dev_warn(fei->dev,
+ "Timeout waiting for idle irq on tsin%d\n",
+ channel->tsin_id);
+
+ reinit_completion(&channel->idle_completion);
+
+ /* reset read / write ptrs for this channel */
+
+ writel(channel->back_buffer_busaddr,
+ channel->irec + DMA_PRDS_BUSBASE_TP(0));
+
+ tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+ writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
+
+ writel(channel->back_buffer_busaddr,
+ channel->irec + DMA_PRDS_BUSWP_TP(0));
+
+ dev_dbg(fei->dev,
+ "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
+ __func__, __LINE__, stdemux, channel->tsin_id);
+
+ /* turn off all PIDS in the bitmap */
+ memset((void *)channel->pid_buffer_aligned
+ , 0x00, PID_TABLE_SIZE);
+
+ /* manage cache so data is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ channel->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ channel->active = 0;
+ }
+
+ if (--fei->global_feed_count == 0) {
+ dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
+ , __func__, __LINE__, fei->global_feed_count);
+
+ del_timer(&fei->timer);
+ }
+
+ mutex_unlock(&fei->lock);
+
+ return 0;
+}
+
+static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
+{
+ int i;
+
+ for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
+ if (!fei->channel_data[i])
+ continue;
+
+ if (fei->channel_data[i]->tsin_id == tsin_num)
+ return fei->channel_data[i];
+ }
+
+ return NULL;
+}
+
+static void c8sectpfe_getconfig(struct c8sectpfei *fei)
+{
+ struct c8sectpfe_hw *hw = &fei->hw_stats;
+
+ hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
+ hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
+ hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
+ hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
+ hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
+ hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
+ hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
+
+ dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
+ dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
+ dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
+ dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
+ , hw->num_swts);
+ dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
+ dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
+ dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
+ dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
+ , hw->num_tp);
+}
+
+static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
+{
+ struct c8sectpfei *fei = priv;
+ struct channel_info *chan;
+ int bit;
+ unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
+
+ /* page 168 of functional spec: Clear the idle request
+ by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
+
+ /* signal idle completion */
+ for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
+
+ chan = find_channel(fei, bit);
+
+ if (chan)
+ complete(&chan->idle_completion);
+ }
+
+ writel(0, fei->io + DMA_IDLE_REQ);
+
+ return IRQ_HANDLED;
+}
+
+
+static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
+{
+ if (!fei || !tsin)
+ return;
+
+ if (tsin->back_buffer_busaddr)
+ if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
+ dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
+ FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
+
+ kfree(tsin->back_buffer_start);
+
+ if (tsin->pid_buffer_busaddr)
+ if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
+ dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
+ PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
+
+ kfree(tsin->pid_buffer_start);
+}
+
+#define MAX_NAME 20
+
+static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
+ struct channel_info *tsin)
+{
+ int ret;
+ u32 tmp;
+ char tsin_pin_name[MAX_NAME];
+
+ if (!fei || !tsin)
+ return -EINVAL;
+
+ dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
+ , __func__, __LINE__, tsin, tsin->tsin_id);
+
+ init_completion(&tsin->idle_completion);
+
+ tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
+ FEI_ALIGNMENT, GFP_KERNEL);
+
+ if (!tsin->back_buffer_start) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ /* Ensure backbuffer is 32byte aligned */
+ tsin->back_buffer_aligned = tsin->back_buffer_start
+ + FEI_ALIGNMENT;
+
+ tsin->back_buffer_aligned = (void *)
+ (((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
+
+ tsin->back_buffer_busaddr = dma_map_single(fei->dev,
+ (void *)tsin->back_buffer_aligned,
+ FEI_BUFFER_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
+ dev_err(fei->dev, "failed to map back_buffer\n");
+ ret = -EFAULT;
+ goto err_unmap;
+ }
+
+ /*
+ * The pid buffer can be configured (in hw) for byte or bit
+ * per pid. By powers of deduction we conclude stih407 family
+ * is configured (at SoC design stage) for bit per pid.
+ */
+ tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
+
+ if (!tsin->pid_buffer_start) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ /*
+ * PID buffer needs to be aligned to size of the pid table
+ * which at bit per pid is 1024 bytes (8192 pids / 8).
+ * PIDF_BASE register enforces this alignment when writing
+ * the register.
+ */
+
+ tsin->pid_buffer_aligned = tsin->pid_buffer_start +
+ PID_TABLE_SIZE;
+
+ tsin->pid_buffer_aligned = (void *)
+ (((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
+
+ tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
+ tsin->pid_buffer_aligned,
+ PID_TABLE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
+ dev_err(fei->dev, "failed to map pid_bitmap\n");
+ ret = -EFAULT;
+ goto err_unmap;
+ }
+
+ /* manage cache so pid bitmap is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ tsin->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
+ (tsin->serial_not_parallel ? "serial" : "parallel"));
+
+ tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
+ if (IS_ERR(tsin->pstate)) {
+ dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
+ , __func__, tsin_pin_name);
+ ret = PTR_ERR(tsin->pstate);
+ goto err_unmap;
+ }
+
+ ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
+
+ if (ret) {
+ dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
+ , __func__);
+ goto err_unmap;
+ }
+
+ /* Enable this input block */
+ tmp = readl(fei->io + SYS_INPUT_CLKEN);
+ tmp |= BIT(tsin->tsin_id);
+ writel(tmp, fei->io + SYS_INPUT_CLKEN);
+
+ if (tsin->serial_not_parallel)
+ tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
+
+ if (tsin->invert_ts_clk)
+ tmp |= C8SECTPFE_INVERT_TSCLK;
+
+ if (tsin->async_not_sync)
+ tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
+
+ tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
+
+ writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
+
+ writel(C8SECTPFE_SYNC(0x9) |
+ C8SECTPFE_DROP(0x9) |
+ C8SECTPFE_TOKEN(0x47),
+ fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
+
+ writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
+
+ /* Place the FIFO's at the end of the irec descriptors */
+
+ tsin->fifo = (tsin->tsin_id * FIFO_LEN);
+
+ writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
+ writel(tsin->fifo + FIFO_LEN - 1,
+ fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
+
+ writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
+ writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
+
+ writel(tsin->pid_buffer_busaddr,
+ fei->io + PIDF_BASE(tsin->tsin_id));
+
+ dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
+ tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
+ &tsin->pid_buffer_busaddr);
+
+ /* Configure and enable HW PID filtering */
+
+ /*
+ * The PID value is created by assembling the first 8 bytes of
+ * the TS packet into a 64-bit word in big-endian format. A
+ * slice of that 64-bit word is taken from
+ * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
+ */
+ tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
+ | C8SECTPFE_PID_OFFSET(40));
+
+ writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
+
+ dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
+ tsin->tsin_id,
+ readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
+ readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
+ readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
+ readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
+
+ /* Get base addpress of pointer record block from DMEM */
+ tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
+ readl(fei->io + DMA_PTRREC_BASE);
+
+ /* fill out pointer record data structure */
+
+ /* advance pointer record block to our channel */
+ tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
+
+ writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
+
+ writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
+
+ writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
+
+ writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
+
+ /* read/write pointers with physical bus address */
+
+ writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
+
+ tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+ writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
+
+ writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
+ writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
+
+ /* initialize tasklet */
+ tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
+ (unsigned long) tsin);
+
+ return 0;
+
+err_unmap:
+ free_input_block(fei, tsin);
+ return ret;
+}
+
+static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
+{
+ struct c8sectpfei *fei = priv;
+
+ dev_err(fei->dev, "%s: error handling not yet implemented\n"
+ , __func__);
+
+ /*
+ * TODO FIXME we should detect some error conditions here
+ * and ideally so something about them!
+ */
+
+ return IRQ_HANDLED;
+}
+
+static int c8sectpfe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *np = dev->of_node;
+ struct c8sectpfei *fei;
+ struct resource *res;
+ int ret, index = 0;
+ struct channel_info *tsin;
+
+ /* Allocate the c8sectpfei structure */
+ fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
+ if (!fei)
+ return -ENOMEM;
+
+ fei->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
+ fei->io = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fei->io))
+ return PTR_ERR(fei->io);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "c8sectpfe-ram");
+ fei->sram = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fei->sram))
+ return PTR_ERR(fei->sram);
+
+ fei->sram_size = resource_size(res);
+
+ fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
+ if (fei->idle_irq < 0) {
+ dev_err(dev, "Can't get c8sectpfe-idle-irq\n");
+ return fei->idle_irq;
+ }
+
+ fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
+ if (fei->error_irq < 0) {
+ dev_err(dev, "Can't get c8sectpfe-error-irq\n");
+ return fei->error_irq;
+ }
+
+ platform_set_drvdata(pdev, fei);
+
+ fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
+ if (IS_ERR(fei->c8sectpfeclk)) {
+ dev_err(dev, "c8sectpfe clk not found\n");
+ return PTR_ERR(fei->c8sectpfeclk);
+ }
+
+ ret = clk_prepare_enable(fei->c8sectpfeclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable c8sectpfe clock\n");
+ return ret;
+ }
+
+ /* to save power disable all IP's (on by default) */
+ writel(0, fei->io + SYS_INPUT_CLKEN);
+
+ /* Enable memdma clock */
+ writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
+
+ /* clear internal sram */
+ memset_io(fei->sram, 0x0, fei->sram_size);
+
+ c8sectpfe_getconfig(fei);
+
+ ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
+ 0, "c8sectpfe-idle-irq", fei);
+ if (ret) {
+ dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
+ goto err_clk_disable;
+ }
+
+ ret = devm_request_irq(dev, fei->error_irq,
+ c8sectpfe_error_irq_handler, 0,
+ "c8sectpfe-error-irq", fei);
+ if (ret) {
+ dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
+ goto err_clk_disable;
+ }
+
+ fei->tsin_count = of_get_child_count(np);
+
+ if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
+ fei->tsin_count > fei->hw_stats.num_ib) {
+
+ dev_err(dev, "More tsin declared than exist on SoC!\n");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ fei->pinctrl = devm_pinctrl_get(dev);
+
+ if (IS_ERR(fei->pinctrl)) {
+ dev_err(dev, "Error getting tsin pins\n");
+ ret = PTR_ERR(fei->pinctrl);
+ goto err_clk_disable;
+ }
+
+ for_each_child_of_node(np, child) {
+ struct device_node *i2c_bus;
+
+ fei->channel_data[index] = devm_kzalloc(dev,
+ sizeof(struct channel_info),
+ GFP_KERNEL);
+
+ if (!fei->channel_data[index]) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ tsin = fei->channel_data[index];
+
+ tsin->fei = fei;
+
+ ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
+ if (ret) {
+ dev_err(&pdev->dev, "No tsin_num found\n");
+ goto err_clk_disable;
+ }
+
+ /* sanity check value */
+ if (tsin->tsin_id > fei->hw_stats.num_ib) {
+ dev_err(&pdev->dev,
+ "tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
+ tsin->tsin_id, fei->hw_stats.num_ib);
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ tsin->invert_ts_clk = of_property_read_bool(child,
+ "invert-ts-clk");
+
+ tsin->serial_not_parallel = of_property_read_bool(child,
+ "serial-not-parallel");
+
+ tsin->async_not_sync = of_property_read_bool(child,
+ "async-not-sync");
+
+ ret = of_property_read_u32(child, "dvb-card",
+ &tsin->dvb_card);
+ if (ret) {
+ dev_err(&pdev->dev, "No dvb-card found\n");
+ goto err_clk_disable;
+ }
+
+ i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
+ if (!i2c_bus) {
+ dev_err(&pdev->dev, "No i2c-bus found\n");
+ ret = -ENODEV;
+ goto err_clk_disable;
+ }
+ tsin->i2c_adapter =
+ of_find_i2c_adapter_by_node(i2c_bus);
+ if (!tsin->i2c_adapter) {
+ dev_err(&pdev->dev, "No i2c adapter found\n");
+ of_node_put(i2c_bus);
+ ret = -ENODEV;
+ goto err_clk_disable;
+ }
+ of_node_put(i2c_bus);
+
+ tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
+
+ ret = gpio_is_valid(tsin->rst_gpio);
+ if (!ret) {
+ dev_err(dev,
+ "reset gpio for tsin%d not valid (gpio=%d)\n",
+ tsin->tsin_id, tsin->rst_gpio);
+ goto err_clk_disable;
+ }
+
+ ret = devm_gpio_request_one(dev, tsin->rst_gpio,
+ GPIOF_OUT_INIT_LOW, "NIM reset");
+ if (ret && ret != -EBUSY) {
+ dev_err(dev, "Can't request tsin%d reset gpio\n"
+ , fei->channel_data[index]->tsin_id);
+ goto err_clk_disable;
+ }
+
+ if (!ret) {
+ /* toggle reset lines */
+ gpio_direction_output(tsin->rst_gpio, 0);
+ usleep_range(3500, 5000);
+ gpio_direction_output(tsin->rst_gpio, 1);
+ usleep_range(3000, 5000);
+ }
+
+ tsin->demux_mapping = index;
+
+ dev_dbg(fei->dev,
+ "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
+ fei->channel_data[index], index,
+ tsin->tsin_id, tsin->invert_ts_clk,
+ tsin->serial_not_parallel, tsin->async_not_sync,
+ tsin->dvb_card);
+
+ index++;
+ }
+
+ /* Setup timer interrupt */
+ timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
+
+ mutex_init(&fei->lock);
+
+ /* Get the configuration information about the tuners */
+ ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
+ (void *)fei,
+ c8sectpfe_start_feed,
+ c8sectpfe_stop_feed);
+ if (ret) {
+ dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
+ ret);
+ goto err_clk_disable;
+ }
+
+ c8sectpfe_debugfs_init(fei);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(fei->c8sectpfeclk);
+ return ret;
+}
+
+static int c8sectpfe_remove(struct platform_device *pdev)
+{
+ struct c8sectpfei *fei = platform_get_drvdata(pdev);
+ struct channel_info *channel;
+ int i;
+
+ wait_for_completion(&fei->fw_ack);
+
+ c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
+
+ /*
+ * Now loop through and un-configure each of the InputBlock resources
+ */
+ for (i = 0; i < fei->tsin_count; i++) {
+ channel = fei->channel_data[i];
+ free_input_block(fei, channel);
+ }
+
+ c8sectpfe_debugfs_exit(fei);
+
+ dev_info(fei->dev, "Stopping memdma SLIM core\n");
+ if (readl(fei->io + DMA_CPU_RUN))
+ writel(0x0, fei->io + DMA_CPU_RUN);
+
+ /* unclock all internal IP's */
+ if (readl(fei->io + SYS_INPUT_CLKEN))
+ writel(0, fei->io + SYS_INPUT_CLKEN);
+
+ if (readl(fei->io + SYS_OTHER_CLKEN))
+ writel(0, fei->io + SYS_OTHER_CLKEN);
+
+ if (fei->c8sectpfeclk)
+ clk_disable_unprepare(fei->c8sectpfeclk);
+
+ return 0;
+}
+
+
+static int configure_channels(struct c8sectpfei *fei)
+{
+ int index = 0, ret;
+ struct channel_info *tsin;
+ struct device_node *child, *np = fei->dev->of_node;
+
+ /* iterate round each tsin and configure memdma descriptor and IB hw */
+ for_each_child_of_node(np, child) {
+
+ tsin = fei->channel_data[index];
+
+ ret = configure_memdma_and_inputblock(fei,
+ fei->channel_data[index]);
+
+ if (ret) {
+ dev_err(fei->dev,
+ "configure_memdma_and_inputblock failed\n");
+ goto err_unmap;
+ }
+ index++;
+ }
+
+ return 0;
+
+err_unmap:
+ for (index = 0; index < fei->tsin_count; index++) {
+ tsin = fei->channel_data[index];
+ free_input_block(fei, tsin);
+ }
+ return ret;
+}
+
+static int
+c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr;
+ char class;
+
+ if (!fw) {
+ dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
+ return -EINVAL;
+ }
+
+ if (fw->size < sizeof(struct elf32_hdr)) {
+ dev_err(fei->dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ ehdr = (struct elf32_hdr *)fw->data;
+
+ /* We only support ELF32 at this point */
+ class = ehdr->e_ident[EI_CLASS];
+ if (class != ELFCLASS32) {
+ dev_err(fei->dev, "Unsupported class: %d\n", class);
+ return -EINVAL;
+ }
+
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
+ dev_err(fei->dev, "Unsupported firmware endianness\n");
+ return -EINVAL;
+ }
+
+ if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
+ dev_err(fei->dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(fei->dev, "Image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
+ /* Check ELF magic */
+ ehdr = (Elf32_Ehdr *)fw->data;
+ if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
+ ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
+ ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
+ ehdr->e_ident[EI_MAG3] != ELFMAG3) {
+ dev_err(fei->dev, "Invalid ELF magic\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_type != ET_EXEC) {
+ dev_err(fei->dev, "Unsupported ELF header type\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_phoff > fw->size) {
+ dev_err(fei->dev, "Firmware size is too small\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
+ const struct firmware *fw, u8 __iomem *dest,
+ int seg_num)
+{
+ const u8 *imem_src = fw->data + phdr->p_offset;
+ int i;
+
+ /*
+ * For IMEM segments, the segment contains 24-bit
+ * instructions which must be padded to 32-bit
+ * instructions before being written. The written
+ * segment is padded with NOP instructions.
+ */
+
+ dev_dbg(fei->dev,
+ "Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
+seg_num,
+ phdr->p_paddr, phdr->p_filesz,
+ dest, phdr->p_memsz + phdr->p_memsz / 3);
+
+ for (i = 0; i < phdr->p_filesz; i++) {
+
+ writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
+
+ /* Every 3 bytes, add an additional
+ * padding zero in destination */
+ if (i % 3 == 2) {
+ dest++;
+ writeb(0x00, (void __iomem *)dest);
+ }
+
+ dest++;
+ imem_src++;
+ }
+}
+
+static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
+ const struct firmware *fw, u8 __iomem *dst, int seg_num)
+{
+ /*
+ * For DMEM segments copy the segment data from the ELF
+ * file and pad segment with zeroes
+ */
+
+ dev_dbg(fei->dev,
+ "Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
+ seg_num, phdr->p_paddr, phdr->p_filesz,
+ dst, phdr->p_memsz);
+
+ memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
+ phdr->p_filesz);
+
+ memset((void __force *)dst + phdr->p_filesz, 0,
+ phdr->p_memsz - phdr->p_filesz);
+}
+
+static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
+{
+ Elf32_Ehdr *ehdr;
+ Elf32_Phdr *phdr;
+ u8 __iomem *dst;
+ int err = 0, i;
+
+ if (!fw || !fei)
+ return -EINVAL;
+
+ ehdr = (Elf32_Ehdr *)fw->data;
+ phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+
+ /* Only consider LOAD segments */
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ /*
+ * Check segment is contained within the fw->data buffer
+ */
+ if (phdr->p_offset + phdr->p_filesz > fw->size) {
+ dev_err(fei->dev,
+ "Segment %d is outside of firmware file\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * MEMDMA IMEM has executable flag set, otherwise load
+ * this segment into DMEM.
+ *
+ */
+
+ if (phdr->p_flags & PF_X) {
+ dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
+ /*
+ * The Slim ELF file uses 32-bit word addressing for
+ * load offsets.
+ */
+ dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
+ load_imem_segment(fei, phdr, fw, dst, i);
+ } else {
+ dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
+ /*
+ * The Slim ELF file uses 32-bit word addressing for
+ * load offsets.
+ */
+ dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
+ load_dmem_segment(fei, phdr, fw, dst, i);
+ }
+ }
+
+ release_firmware(fw);
+ return err;
+}
+
+static int load_c8sectpfe_fw(struct c8sectpfei *fei)
+{
+ const struct firmware *fw;
+ int err;
+
+ dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
+
+ err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
+ if (err)
+ return err;
+
+ err = c8sectpfe_elf_sanity_check(fei, fw);
+ if (err) {
+ dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
+ , err);
+ release_firmware(fw);
+ return err;
+ }
+
+ err = load_slim_core_fw(fw, fei);
+ if (err) {
+ dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
+ return err;
+ }
+
+ /* now the firmware is loaded configure the input blocks */
+ err = configure_channels(fei);
+ if (err) {
+ dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
+ return err;
+ }
+
+ /*
+ * STBus target port can access IMEM and DMEM ports
+ * without waiting for CPU
+ */
+ writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
+
+ dev_info(fei->dev, "Boot the memdma SLIM core\n");
+ writel(0x1, fei->io + DMA_CPU_RUN);
+
+ atomic_set(&fei->fw_loaded, 1);
+
+ return 0;
+}
+
+static const struct of_device_id c8sectpfe_match[] = {
+ { .compatible = "st,stih407-c8sectpfe" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, c8sectpfe_match);
+
+static struct platform_driver c8sectpfe_driver = {
+ .driver = {
+ .name = "c8sectpfe",
+ .of_match_table = of_match_ptr(c8sectpfe_match),
+ },
+ .probe = c8sectpfe_probe,
+ .remove = c8sectpfe_remove,
+};
+
+module_platform_driver(c8sectpfe_driver);
+
+MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
new file mode 100644
index 000000000..3dbb3a287
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * c8sectpfe-core.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author:Peter Bennett <peter.bennett@st.com>
+ * Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#ifndef _C8SECTPFE_CORE_H_
+#define _C8SECTPFE_CORE_H_
+
+#define C8SECTPFEI_MAXCHANNEL 16
+#define C8SECTPFEI_MAXADAPTER 3
+
+#define C8SECTPFE_MAX_TSIN_CHAN 8
+
+struct channel_info {
+
+ int tsin_id;
+ bool invert_ts_clk;
+ bool serial_not_parallel;
+ bool async_not_sync;
+ int i2c;
+ int dvb_card;
+
+ int rst_gpio;
+
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_adapter *tuner_i2c;
+ struct i2c_adapter *lnb_i2c;
+ struct i2c_client *i2c_client;
+ struct dvb_frontend *frontend;
+
+ struct pinctrl_state *pstate;
+
+ int demux_mapping;
+ int active;
+
+ void *back_buffer_start;
+ void *back_buffer_aligned;
+ dma_addr_t back_buffer_busaddr;
+
+ void *pid_buffer_start;
+ void *pid_buffer_aligned;
+ dma_addr_t pid_buffer_busaddr;
+
+ unsigned long fifo;
+
+ struct completion idle_completion;
+ struct tasklet_struct tsklet;
+
+ struct c8sectpfei *fei;
+ void __iomem *irec;
+
+};
+
+struct c8sectpfe_hw {
+ int num_ib;
+ int num_mib;
+ int num_swts;
+ int num_tsout;
+ int num_ccsc;
+ int num_ram;
+ int num_tp;
+};
+
+struct c8sectpfei {
+
+ struct device *dev;
+ struct pinctrl *pinctrl;
+
+ struct dentry *root;
+ struct debugfs_regset32 *regset;
+ struct completion fw_ack;
+ atomic_t fw_loaded;
+
+ int tsin_count;
+
+ struct c8sectpfe_hw hw_stats;
+
+ struct c8sectpfe *c8sectpfe[C8SECTPFEI_MAXADAPTER];
+
+ int mapping[C8SECTPFEI_MAXCHANNEL];
+
+ struct mutex lock;
+
+ struct timer_list timer; /* timer interrupts for outputs */
+
+ void __iomem *io;
+ void __iomem *sram;
+
+ unsigned long sram_size;
+
+ struct channel_info *channel_data[C8SECTPFE_MAX_TSIN_CHAN];
+
+ struct clk *c8sectpfeclk;
+ int nima_rst_gpio;
+ int nimb_rst_gpio;
+
+ int idle_irq;
+ int error_irq;
+
+ int global_feed_count;
+};
+
+/* C8SECTPFE SYS Regs list */
+
+#define SYS_INPUT_ERR_STATUS 0x0
+#define SYS_OTHER_ERR_STATUS 0x8
+#define SYS_INPUT_ERR_MASK 0x10
+#define SYS_OTHER_ERR_MASK 0x18
+#define SYS_DMA_ROUTE 0x20
+#define SYS_INPUT_CLKEN 0x30
+#define IBENABLE_MASK 0x7F
+
+#define SYS_OTHER_CLKEN 0x38
+#define TSDMAENABLE BIT(1)
+#define MEMDMAENABLE BIT(0)
+
+#define SYS_CFG_NUM_IB 0x200
+#define SYS_CFG_NUM_MIB 0x204
+#define SYS_CFG_NUM_SWTS 0x208
+#define SYS_CFG_NUM_TSOUT 0x20C
+#define SYS_CFG_NUM_CCSC 0x210
+#define SYS_CFG_NUM_RAM 0x214
+#define SYS_CFG_NUM_TP 0x218
+
+/* Input Block Regs */
+
+#define C8SECTPFE_INPUTBLK_OFFSET 0x1000
+#define C8SECTPFE_CHANNEL_OFFSET(x) ((x*0x40) + C8SECTPFE_INPUTBLK_OFFSET)
+
+#define C8SECTPFE_IB_IP_FMT_CFG(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x00)
+#define C8SECTPFE_IGNORE_ERR_AT_SOP BIT(7)
+#define C8SECTPFE_IGNORE_ERR_IN_PKT BIT(6)
+#define C8SECTPFE_IGNORE_ERR_IN_BYTE BIT(5)
+#define C8SECTPFE_INVERT_TSCLK BIT(4)
+#define C8SECTPFE_ALIGN_BYTE_SOP BIT(3)
+#define C8SECTPFE_ASYNC_NOT_SYNC BIT(2)
+#define C8SECTPFE_BYTE_ENDIANNESS_MSB BIT(1)
+#define C8SECTPFE_SERIAL_NOT_PARALLEL BIT(0)
+
+#define C8SECTPFE_IB_SYNCLCKDRP_CFG(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x04)
+#define C8SECTPFE_SYNC(x) (x & 0xf)
+#define C8SECTPFE_DROP(x) ((x<<4) & 0xf)
+#define C8SECTPFE_TOKEN(x) ((x<<8) & 0xff00)
+#define C8SECTPFE_SLDENDIANNESS BIT(16)
+
+#define C8SECTPFE_IB_TAGBYTES_CFG(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x08)
+#define C8SECTPFE_TAG_HEADER(x) (x << 16)
+#define C8SECTPFE_TAG_COUNTER(x) ((x<<1) & 0x7fff)
+#define C8SECTPFE_TAG_ENABLE BIT(0)
+
+#define C8SECTPFE_IB_PID_SET(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x0C)
+#define C8SECTPFE_PID_OFFSET(x) (x & 0x3f)
+#define C8SECTPFE_PID_NUMBITS(x) ((x << 6) & 0xfff)
+#define C8SECTPFE_PID_ENABLE BIT(31)
+
+#define C8SECTPFE_IB_PKT_LEN(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x10)
+
+#define C8SECTPFE_IB_BUFF_STRT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x14)
+#define C8SECTPFE_IB_BUFF_END(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x18)
+#define C8SECTPFE_IB_READ_PNT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x1C)
+#define C8SECTPFE_IB_WRT_PNT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x20)
+
+#define C8SECTPFE_IB_PRI_THRLD(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x24)
+#define C8SECTPFE_PRI_VALUE(x) (x & 0x7fffff)
+#define C8SECTPFE_PRI_LOWPRI(x) ((x & 0xf) << 24)
+#define C8SECTPFE_PRI_HIGHPRI(x) ((x & 0xf) << 28)
+
+#define C8SECTPFE_IB_STAT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x28)
+#define C8SECTPFE_STAT_FIFO_OVERFLOW(x) (x & 0x1)
+#define C8SECTPFE_STAT_BUFFER_OVERFLOW(x) (x & 0x2)
+#define C8SECTPFE_STAT_OUTOFORDERRP(x) (x & 0x4)
+#define C8SECTPFE_STAT_PID_OVERFLOW(x) (x & 0x8)
+#define C8SECTPFE_STAT_PKT_OVERFLOW(x) (x & 0x10)
+#define C8SECTPFE_STAT_ERROR_PACKETS(x) ((x >> 8) & 0xf)
+#define C8SECTPFE_STAT_SHORT_PACKETS(x) ((x >> 12) & 0xf)
+
+#define C8SECTPFE_IB_MASK(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x2C)
+#define C8SECTPFE_MASK_FIFO_OVERFLOW BIT(0)
+#define C8SECTPFE_MASK_BUFFER_OVERFLOW BIT(1)
+#define C8SECTPFE_MASK_OUTOFORDERRP(x) BIT(2)
+#define C8SECTPFE_MASK_PID_OVERFLOW(x) BIT(3)
+#define C8SECTPFE_MASK_PKT_OVERFLOW(x) BIT(4)
+#define C8SECTPFE_MASK_ERROR_PACKETS(x) ((x & 0xf) << 8)
+#define C8SECTPFE_MASK_SHORT_PACKETS(x) ((x & 0xf) >> 12)
+
+#define C8SECTPFE_IB_SYS(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x30)
+#define C8SECTPFE_SYS_RESET BIT(1)
+#define C8SECTPFE_SYS_ENABLE BIT(0)
+
+/*
+ * Ponter record data structure required for each input block
+ * see Table 82 on page 167 of functional specification.
+ */
+
+#define DMA_PRDS_MEMBASE 0x0 /* Internal sram base address */
+#define DMA_PRDS_MEMTOP 0x4 /* Internal sram top address */
+
+/*
+ * TS packet size, including tag bytes added by input block,
+ * rounded up to the next multiple of 8 bytes. The packet size,
+ * including any tagging bytes and rounded up to the nearest
+ * multiple of 8 bytes must be less than 255 bytes.
+ */
+#define DMA_PRDS_PKTSIZE 0x8
+#define DMA_PRDS_TPENABLE 0xc
+
+#define TP0_OFFSET 0x10
+#define DMA_PRDS_BUSBASE_TP(x) ((0x10*x) + TP0_OFFSET)
+#define DMA_PRDS_BUSTOP_TP(x) ((0x10*x) + TP0_OFFSET + 0x4)
+#define DMA_PRDS_BUSWP_TP(x) ((0x10*x) + TP0_OFFSET + 0x8)
+#define DMA_PRDS_BUSRP_TP(x) ((0x10*x) + TP0_OFFSET + 0xc)
+
+#define DMA_PRDS_SIZE (0x20)
+
+#define DMA_MEMDMA_OFFSET 0x4000
+#define DMA_IMEM_OFFSET 0x0
+#define DMA_DMEM_OFFSET 0x4000
+#define DMA_CPU 0x8000
+#define DMA_PER_OFFSET 0xb000
+
+#define DMA_MEMDMA_DMEM (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET)
+#define DMA_MEMDMA_IMEM (DMA_MEMDMA_OFFSET + DMA_IMEM_OFFSET)
+
+/* XP70 Slim core regs */
+#define DMA_CPU_ID (DMA_MEMDMA_OFFSET + DMA_CPU + 0x0)
+#define DMA_CPU_VCR (DMA_MEMDMA_OFFSET + DMA_CPU + 0x4)
+#define DMA_CPU_RUN (DMA_MEMDMA_OFFSET + DMA_CPU + 0x8)
+#define DMA_CPU_CLOCKGATE (DMA_MEMDMA_OFFSET + DMA_CPU + 0xc)
+#define DMA_CPU_PC (DMA_MEMDMA_OFFSET + DMA_CPU + 0x20)
+
+/* Enable Interrupt for a IB */
+#define DMA_PER_TPn_DREQ_MASK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xd00)
+/* Ack interrupt by setting corresponding bit */
+#define DMA_PER_TPn_DACK_SET (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xd80)
+#define DMA_PER_TPn_DREQ (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xe00)
+#define DMA_PER_TPn_DACK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xe80)
+#define DMA_PER_DREQ_MODE (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf80)
+#define DMA_PER_STBUS_SYNC (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf88)
+#define DMA_PER_STBUS_ACCESS (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf8c)
+#define DMA_PER_STBUS_ADDRESS (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf90)
+#define DMA_PER_IDLE_INT (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfa8)
+#define DMA_PER_PRIORITY (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfac)
+#define DMA_PER_MAX_OPCODE (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfb0)
+#define DMA_PER_MAX_CHUNK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfb4)
+#define DMA_PER_PAGE_SIZE (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfbc)
+#define DMA_PER_MBOX_STATUS (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfc0)
+#define DMA_PER_MBOX_SET (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfc8)
+#define DMA_PER_MBOX_CLEAR (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfd0)
+#define DMA_PER_MBOX_MASK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfd8)
+#define DMA_PER_INJECT_PKT_SRC (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe0)
+#define DMA_PER_INJECT_PKT_DEST (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe4)
+#define DMA_PER_INJECT_PKT_ADDR (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe8)
+#define DMA_PER_INJECT_PKT (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfec)
+#define DMA_PER_PAT_PTR_INIT (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff0)
+#define DMA_PER_PAT_PTR (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff4)
+#define DMA_PER_SLEEP_MASK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff8)
+#define DMA_PER_SLEEP_COUNTER (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xffc)
+/* #define DMA_RF_CPUREGn DMA_RFBASEADDR n=0 to 15) slim regsa */
+
+/* The following are from DMA_DMEM_BaseAddress */
+#define DMA_FIRMWARE_VERSION (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x0)
+#define DMA_PTRREC_BASE (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x4)
+#define DMA_PTRREC_INPUT_OFFSET (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x8)
+#define DMA_ERRREC_BASE (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0xc)
+#define DMA_ERROR_RECORD(n) ((n*4) + DMA_ERRREC_BASE + 0x4)
+#define DMA_IDLE_REQ (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x10)
+#define IDLEREQ BIT(31)
+
+#define DMA_FIRMWARE_CONFIG (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x14)
+
+/* Regs for PID Filter */
+
+#define PIDF_OFFSET 0x2800
+#define PIDF_BASE(n) ((n*4) + PIDF_OFFSET)
+#define PIDF_LEAK_ENABLE (PIDF_OFFSET + 0x100)
+#define PIDF_LEAK_STATUS (PIDF_OFFSET + 0x108)
+#define PIDF_LEAK_COUNT_RESET (PIDF_OFFSET + 0x110)
+#define PIDF_LEAK_COUNTER (PIDF_OFFSET + 0x114)
+
+#endif /* _C8SECTPFE_CORE_H_ */
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
new file mode 100644
index 000000000..8f0ddcbee
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * c8sectpfe-debugfs.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "c8sectpfe-debugfs.h"
+
+#define dump_register(nm ...) \
+{ \
+ .name = #nm, \
+ .offset = nm, \
+}
+
+static const struct debugfs_reg32 fei_sys_regs[] = {
+ dump_register(SYS_INPUT_ERR_STATUS),
+ dump_register(SYS_OTHER_ERR_STATUS),
+ dump_register(SYS_INPUT_ERR_MASK),
+ dump_register(SYS_DMA_ROUTE),
+ dump_register(SYS_INPUT_CLKEN),
+ dump_register(IBENABLE_MASK),
+ dump_register(SYS_OTHER_CLKEN),
+ dump_register(SYS_CFG_NUM_IB),
+ dump_register(SYS_CFG_NUM_MIB),
+ dump_register(SYS_CFG_NUM_SWTS),
+ dump_register(SYS_CFG_NUM_TSOUT),
+ dump_register(SYS_CFG_NUM_CCSC),
+ dump_register(SYS_CFG_NUM_RAM),
+ dump_register(SYS_CFG_NUM_TP),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(0)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(0)),
+ dump_register(C8SECTPFE_IB_PID_SET(0)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(0)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(0)),
+ dump_register(C8SECTPFE_IB_BUFF_END(0)),
+ dump_register(C8SECTPFE_IB_READ_PNT(0)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(0)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(0)),
+ dump_register(C8SECTPFE_IB_STAT(0)),
+ dump_register(C8SECTPFE_IB_MASK(0)),
+ dump_register(C8SECTPFE_IB_SYS(0)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(1)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(1)),
+ dump_register(C8SECTPFE_IB_PID_SET(1)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(1)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(1)),
+ dump_register(C8SECTPFE_IB_BUFF_END(1)),
+ dump_register(C8SECTPFE_IB_READ_PNT(1)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(1)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(1)),
+ dump_register(C8SECTPFE_IB_STAT(1)),
+ dump_register(C8SECTPFE_IB_MASK(1)),
+ dump_register(C8SECTPFE_IB_SYS(1)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(2)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(2)),
+ dump_register(C8SECTPFE_IB_PID_SET(2)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(2)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(2)),
+ dump_register(C8SECTPFE_IB_BUFF_END(2)),
+ dump_register(C8SECTPFE_IB_READ_PNT(2)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(2)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(2)),
+ dump_register(C8SECTPFE_IB_STAT(2)),
+ dump_register(C8SECTPFE_IB_MASK(2)),
+ dump_register(C8SECTPFE_IB_SYS(2)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(3)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(3)),
+ dump_register(C8SECTPFE_IB_PID_SET(3)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(3)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(3)),
+ dump_register(C8SECTPFE_IB_BUFF_END(3)),
+ dump_register(C8SECTPFE_IB_READ_PNT(3)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(3)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(3)),
+ dump_register(C8SECTPFE_IB_STAT(3)),
+ dump_register(C8SECTPFE_IB_MASK(3)),
+ dump_register(C8SECTPFE_IB_SYS(3)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(4)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(4)),
+ dump_register(C8SECTPFE_IB_PID_SET(4)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(4)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(4)),
+ dump_register(C8SECTPFE_IB_BUFF_END(4)),
+ dump_register(C8SECTPFE_IB_READ_PNT(4)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(4)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(4)),
+ dump_register(C8SECTPFE_IB_STAT(4)),
+ dump_register(C8SECTPFE_IB_MASK(4)),
+ dump_register(C8SECTPFE_IB_SYS(4)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(5)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(5)),
+ dump_register(C8SECTPFE_IB_PID_SET(5)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(5)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(5)),
+ dump_register(C8SECTPFE_IB_BUFF_END(5)),
+ dump_register(C8SECTPFE_IB_READ_PNT(5)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(5)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(5)),
+ dump_register(C8SECTPFE_IB_STAT(5)),
+ dump_register(C8SECTPFE_IB_MASK(5)),
+ dump_register(C8SECTPFE_IB_SYS(5)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(6)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(6)),
+ dump_register(C8SECTPFE_IB_PID_SET(6)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(6)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(6)),
+ dump_register(C8SECTPFE_IB_BUFF_END(6)),
+ dump_register(C8SECTPFE_IB_READ_PNT(6)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(6)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(6)),
+ dump_register(C8SECTPFE_IB_STAT(6)),
+ dump_register(C8SECTPFE_IB_MASK(6)),
+ dump_register(C8SECTPFE_IB_SYS(6)),
+
+ dump_register(DMA_CPU_ID),
+ dump_register(DMA_CPU_VCR),
+ dump_register(DMA_CPU_RUN),
+ dump_register(DMA_CPU_PC),
+
+ dump_register(DMA_PER_TPn_DREQ_MASK),
+ dump_register(DMA_PER_TPn_DACK_SET),
+ dump_register(DMA_PER_TPn_DREQ),
+ dump_register(DMA_PER_TPn_DACK),
+ dump_register(DMA_PER_DREQ_MODE),
+ dump_register(DMA_PER_STBUS_SYNC),
+ dump_register(DMA_PER_STBUS_ACCESS),
+ dump_register(DMA_PER_STBUS_ADDRESS),
+ dump_register(DMA_PER_IDLE_INT),
+ dump_register(DMA_PER_PRIORITY),
+ dump_register(DMA_PER_MAX_OPCODE),
+ dump_register(DMA_PER_MAX_CHUNK),
+ dump_register(DMA_PER_PAGE_SIZE),
+ dump_register(DMA_PER_MBOX_STATUS),
+ dump_register(DMA_PER_MBOX_SET),
+ dump_register(DMA_PER_MBOX_CLEAR),
+ dump_register(DMA_PER_MBOX_MASK),
+ dump_register(DMA_PER_INJECT_PKT_SRC),
+ dump_register(DMA_PER_INJECT_PKT_DEST),
+ dump_register(DMA_PER_INJECT_PKT_ADDR),
+ dump_register(DMA_PER_INJECT_PKT),
+ dump_register(DMA_PER_PAT_PTR_INIT),
+ dump_register(DMA_PER_PAT_PTR),
+ dump_register(DMA_PER_SLEEP_MASK),
+ dump_register(DMA_PER_SLEEP_COUNTER),
+
+ dump_register(DMA_FIRMWARE_VERSION),
+ dump_register(DMA_PTRREC_BASE),
+ dump_register(DMA_PTRREC_INPUT_OFFSET),
+ dump_register(DMA_ERRREC_BASE),
+
+ dump_register(DMA_ERROR_RECORD(0)),
+ dump_register(DMA_ERROR_RECORD(1)),
+ dump_register(DMA_ERROR_RECORD(2)),
+ dump_register(DMA_ERROR_RECORD(3)),
+ dump_register(DMA_ERROR_RECORD(4)),
+ dump_register(DMA_ERROR_RECORD(5)),
+ dump_register(DMA_ERROR_RECORD(6)),
+ dump_register(DMA_ERROR_RECORD(7)),
+ dump_register(DMA_ERROR_RECORD(8)),
+ dump_register(DMA_ERROR_RECORD(9)),
+ dump_register(DMA_ERROR_RECORD(10)),
+ dump_register(DMA_ERROR_RECORD(11)),
+ dump_register(DMA_ERROR_RECORD(12)),
+ dump_register(DMA_ERROR_RECORD(13)),
+ dump_register(DMA_ERROR_RECORD(14)),
+ dump_register(DMA_ERROR_RECORD(15)),
+ dump_register(DMA_ERROR_RECORD(16)),
+ dump_register(DMA_ERROR_RECORD(17)),
+ dump_register(DMA_ERROR_RECORD(18)),
+ dump_register(DMA_ERROR_RECORD(19)),
+ dump_register(DMA_ERROR_RECORD(20)),
+ dump_register(DMA_ERROR_RECORD(21)),
+ dump_register(DMA_ERROR_RECORD(22)),
+
+ dump_register(DMA_IDLE_REQ),
+ dump_register(DMA_FIRMWARE_CONFIG),
+
+ dump_register(PIDF_BASE(0)),
+ dump_register(PIDF_BASE(1)),
+ dump_register(PIDF_BASE(2)),
+ dump_register(PIDF_BASE(3)),
+ dump_register(PIDF_BASE(4)),
+ dump_register(PIDF_BASE(5)),
+ dump_register(PIDF_BASE(6)),
+ dump_register(PIDF_BASE(7)),
+ dump_register(PIDF_BASE(8)),
+ dump_register(PIDF_BASE(9)),
+ dump_register(PIDF_BASE(10)),
+ dump_register(PIDF_BASE(11)),
+ dump_register(PIDF_BASE(12)),
+ dump_register(PIDF_BASE(13)),
+ dump_register(PIDF_BASE(14)),
+ dump_register(PIDF_BASE(15)),
+ dump_register(PIDF_BASE(16)),
+ dump_register(PIDF_BASE(17)),
+ dump_register(PIDF_BASE(18)),
+ dump_register(PIDF_BASE(19)),
+ dump_register(PIDF_BASE(20)),
+ dump_register(PIDF_BASE(21)),
+ dump_register(PIDF_BASE(22)),
+ dump_register(PIDF_LEAK_ENABLE),
+ dump_register(PIDF_LEAK_STATUS),
+ dump_register(PIDF_LEAK_COUNT_RESET),
+ dump_register(PIDF_LEAK_COUNTER),
+};
+
+void c8sectpfe_debugfs_init(struct c8sectpfei *fei)
+{
+ struct dentry *root;
+ struct dentry *file;
+
+ root = debugfs_create_dir("c8sectpfe", NULL);
+ if (!root)
+ goto err;
+
+ fei->root = root;
+
+ fei->regset = devm_kzalloc(fei->dev, sizeof(*fei->regset), GFP_KERNEL);
+ if (!fei->regset)
+ goto err;
+
+ fei->regset->regs = fei_sys_regs;
+ fei->regset->nregs = ARRAY_SIZE(fei_sys_regs);
+ fei->regset->base = fei->io;
+
+ file = debugfs_create_regset32("registers", S_IRUGO, root,
+ fei->regset);
+ if (!file) {
+ dev_err(fei->dev,
+ "%s not able to create 'registers' debugfs\n"
+ , __func__);
+ goto err;
+ }
+
+ return;
+
+err:
+ debugfs_remove_recursive(root);
+}
+
+void c8sectpfe_debugfs_exit(struct c8sectpfei *fei)
+{
+ debugfs_remove_recursive(fei->root);
+ fei->root = NULL;
+}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h
new file mode 100644
index 000000000..b8c30bcc8
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * c8sectpfe-debugfs.h - C8SECTPFE STi DVB driver debugfs header
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Authors: Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+
+#ifndef __C8SECTPFE_DEBUG_H
+#define __C8SECTPFE_DEBUG_H
+
+#include "c8sectpfe-core.h"
+
+void c8sectpfe_debugfs_init(struct c8sectpfei *);
+void c8sectpfe_debugfs_exit(struct c8sectpfei *);
+
+#endif /* __C8SECTPFE_DEBUG_H */
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
new file mode 100644
index 000000000..075d4695e
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * c8sectpfe-dvb.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+
+#include <dt-bindings/media/c8sectpfe.h>
+
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-dvb.h"
+
+#include "dvb-pll.h"
+#include "lnbh24.h"
+#include "stv0367.h"
+#include "stv0367_priv.h"
+#include "stv6110x.h"
+#include "stv090x.h"
+#include "tda18212.h"
+
+static inline const char *dvb_card_str(unsigned int c)
+{
+ switch (c) {
+ case STV0367_TDA18212_NIMA_1: return "STV0367_TDA18212_NIMA_1";
+ case STV0367_TDA18212_NIMA_2: return "STV0367_TDA18212_NIMA_2";
+ case STV0367_TDA18212_NIMB_1: return "STV0367_TDA18212_NIMB_1";
+ case STV0367_TDA18212_NIMB_2: return "STV0367_TDA18212_NIMB_2";
+ case STV0903_6110_LNB24_NIMA: return "STV0903_6110_LNB24_NIMA";
+ case STV0903_6110_LNB24_NIMB: return "STV0903_6110_LNB24_NIMB";
+ default: return "unknown dvb frontend card";
+ }
+}
+
+static struct stv090x_config stv090x_config = {
+ .device = STV0903,
+ .demod_mode = STV090x_SINGLE,
+ .clk_mode = STV090x_CLK_EXT,
+ .xtal = 16000000,
+ .address = 0x69,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
+ .ts2_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
+
+ .repeater_level = STV090x_RPTLEVEL_64,
+
+ .tuner_init = NULL,
+ .tuner_set_mode = NULL,
+ .tuner_set_frequency = NULL,
+ .tuner_get_frequency = NULL,
+ .tuner_set_bandwidth = NULL,
+ .tuner_get_bandwidth = NULL,
+ .tuner_set_bbgain = NULL,
+ .tuner_get_bbgain = NULL,
+ .tuner_set_refclk = NULL,
+ .tuner_get_status = NULL,
+};
+
+static struct stv6110x_config stv6110x_config = {
+ .addr = 0x60,
+ .refclk = 16000000,
+};
+
+#define NIMA 0
+#define NIMB 1
+
+static struct stv0367_config stv0367_tda18212_config[] = {
+ {
+ .demod_address = 0x1c,
+ .xtal = 16000000,
+ .if_khz = 4500,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ }, {
+ .demod_address = 0x1d,
+ .xtal = 16000000,
+ .if_khz = 4500,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ }, {
+ .demod_address = 0x1e,
+ .xtal = 16000000,
+ .if_khz = 4500,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ },
+};
+
+static struct tda18212_config tda18212_conf = {
+ .if_dvbt_6 = 4150,
+ .if_dvbt_7 = 4150,
+ .if_dvbt_8 = 4500,
+ .if_dvbc = 5000,
+};
+
+int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
+ struct c8sectpfe *c8sectpfe,
+ struct channel_info *tsin, int chan_num)
+{
+ struct tda18212_config *tda18212;
+ const struct stv6110x_devctl *fe2;
+ struct i2c_client *client;
+ struct i2c_board_info tda18212_info = {
+ .type = "tda18212",
+ .addr = 0x60,
+ };
+
+ if (!tsin)
+ return -EINVAL;
+
+ switch (tsin->dvb_card) {
+
+ case STV0367_TDA18212_NIMA_1:
+ case STV0367_TDA18212_NIMA_2:
+ case STV0367_TDA18212_NIMB_1:
+ case STV0367_TDA18212_NIMB_2:
+ if (tsin->dvb_card == STV0367_TDA18212_NIMA_1)
+ *fe = dvb_attach(stv0367ter_attach,
+ &stv0367_tda18212_config[0],
+ tsin->i2c_adapter);
+ else if (tsin->dvb_card == STV0367_TDA18212_NIMB_1)
+ *fe = dvb_attach(stv0367ter_attach,
+ &stv0367_tda18212_config[1],
+ tsin->i2c_adapter);
+ else
+ *fe = dvb_attach(stv0367ter_attach,
+ &stv0367_tda18212_config[2],
+ tsin->i2c_adapter);
+
+ if (!*fe) {
+ dev_err(c8sectpfe->device,
+ "%s: stv0367ter_attach failed for NIM card %s\n"
+ , __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ };
+
+ /*
+ * init the demod so that i2c gate_ctrl
+ * to the tuner works correctly
+ */
+ (*fe)->ops.init(*fe);
+
+ /* Allocate the tda18212 structure */
+ tda18212 = devm_kzalloc(c8sectpfe->device,
+ sizeof(struct tda18212_config),
+ GFP_KERNEL);
+ if (!tda18212) {
+ dev_err(c8sectpfe->device,
+ "%s: devm_kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(tda18212, &tda18212_conf,
+ sizeof(struct tda18212_config));
+
+ tda18212->fe = (*fe);
+
+ tda18212_info.platform_data = tda18212;
+
+ /* attach tuner */
+ request_module("tda18212");
+ client = i2c_new_device(tsin->i2c_adapter, &tda18212_info);
+ if (!client || !client->dev.driver) {
+ dvb_frontend_detach(*fe);
+ return -ENODEV;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ dvb_frontend_detach(*fe);
+ return -ENODEV;
+ }
+
+ tsin->i2c_client = client;
+
+ break;
+
+ case STV0903_6110_LNB24_NIMA:
+ *fe = dvb_attach(stv090x_attach, &stv090x_config,
+ tsin->i2c_adapter, STV090x_DEMODULATOR_0);
+ if (!*fe) {
+ dev_err(c8sectpfe->device, "%s: stv090x_attach failed\n"
+ "\tfor NIM card %s\n",
+ __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ }
+
+ fe2 = dvb_attach(stv6110x_attach, *fe,
+ &stv6110x_config, tsin->i2c_adapter);
+ if (!fe2) {
+ dev_err(c8sectpfe->device,
+ "%s: stv6110x_attach failed for NIM card %s\n"
+ , __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ };
+
+ stv090x_config.tuner_init = fe2->tuner_init;
+ stv090x_config.tuner_set_mode = fe2->tuner_set_mode;
+ stv090x_config.tuner_set_frequency = fe2->tuner_set_frequency;
+ stv090x_config.tuner_get_frequency = fe2->tuner_get_frequency;
+ stv090x_config.tuner_set_bandwidth = fe2->tuner_set_bandwidth;
+ stv090x_config.tuner_get_bandwidth = fe2->tuner_get_bandwidth;
+ stv090x_config.tuner_set_bbgain = fe2->tuner_set_bbgain;
+ stv090x_config.tuner_get_bbgain = fe2->tuner_get_bbgain;
+ stv090x_config.tuner_set_refclk = fe2->tuner_set_refclk;
+ stv090x_config.tuner_get_status = fe2->tuner_get_status;
+
+ dvb_attach(lnbh24_attach, *fe, tsin->i2c_adapter, 0, 0, 0x9);
+ break;
+
+ default:
+ dev_err(c8sectpfe->device,
+ "%s: DVB frontend card %s not yet supported\n",
+ __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ }
+
+ (*fe)->id = chan_num;
+
+ dev_info(c8sectpfe->device,
+ "DVB frontend card %s successfully attached",
+ dvb_card_str(tsin->dvb_card));
+ return 0;
+}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h
new file mode 100644
index 000000000..3d87a9ae8
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * c8sectpfe-common.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#ifndef _C8SECTPFE_DVB_H_
+#define _C8SECTPFE_DVB_H_
+
+int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
+ struct c8sectpfe *c8sectpfe, struct channel_info *tsin,
+ int chan_num);
+
+#endif
diff --git a/drivers/media/platform/sti/cec/Makefile b/drivers/media/platform/sti/cec/Makefile
new file mode 100644
index 000000000..f07905e14
--- /dev/null
+++ b/drivers/media/platform/sti/cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += stih-cec.o
diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/platform/sti/cec/stih-cec.c
new file mode 100644
index 000000000..d34099f75
--- /dev/null
+++ b/drivers/media/platform/sti/cec/stih-cec.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STIH4xx CEC driver
+ * Copyright (C) STMicroelectronics SA 2016
+ *
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+#define CEC_NAME "stih-cec"
+
+/* CEC registers */
+#define CEC_CLK_DIV 0x0
+#define CEC_CTRL 0x4
+#define CEC_IRQ_CTRL 0x8
+#define CEC_STATUS 0xC
+#define CEC_EXT_STATUS 0x10
+#define CEC_TX_CTRL 0x14
+#define CEC_FREE_TIME_THRESH 0x18
+#define CEC_BIT_TOUT_THRESH 0x1C
+#define CEC_BIT_PULSE_THRESH 0x20
+#define CEC_DATA 0x24
+#define CEC_TX_ARRAY_CTRL 0x28
+#define CEC_CTRL2 0x2C
+#define CEC_TX_ERROR_STS 0x30
+#define CEC_ADDR_TABLE 0x34
+#define CEC_DATA_ARRAY_CTRL 0x38
+#define CEC_DATA_ARRAY_STATUS 0x3C
+#define CEC_TX_DATA_BASE 0x40
+#define CEC_TX_DATA_TOP 0x50
+#define CEC_TX_DATA_SIZE 0x1
+#define CEC_RX_DATA_BASE 0x54
+#define CEC_RX_DATA_TOP 0x64
+#define CEC_RX_DATA_SIZE 0x1
+
+/* CEC_CTRL2 */
+#define CEC_LINE_INACTIVE_EN BIT(0)
+#define CEC_AUTO_BUS_ERR_EN BIT(1)
+#define CEC_STOP_ON_ARB_ERR_EN BIT(2)
+#define CEC_TX_REQ_WAIT_EN BIT(3)
+
+/* CEC_DATA_ARRAY_CTRL */
+#define CEC_TX_ARRAY_EN BIT(0)
+#define CEC_RX_ARRAY_EN BIT(1)
+#define CEC_TX_ARRAY_RESET BIT(2)
+#define CEC_RX_ARRAY_RESET BIT(3)
+#define CEC_TX_N_OF_BYTES_IRQ_EN BIT(4)
+#define CEC_TX_STOP_ON_NACK BIT(7)
+
+/* CEC_TX_ARRAY_CTRL */
+#define CEC_TX_N_OF_BYTES 0x1F
+#define CEC_TX_START BIT(5)
+#define CEC_TX_AUTO_SOM_EN BIT(6)
+#define CEC_TX_AUTO_EOM_EN BIT(7)
+
+/* CEC_IRQ_CTRL */
+#define CEC_TX_DONE_IRQ_EN BIT(0)
+#define CEC_ERROR_IRQ_EN BIT(2)
+#define CEC_RX_DONE_IRQ_EN BIT(3)
+#define CEC_RX_SOM_IRQ_EN BIT(4)
+#define CEC_RX_EOM_IRQ_EN BIT(5)
+#define CEC_FREE_TIME_IRQ_EN BIT(6)
+#define CEC_PIN_STS_IRQ_EN BIT(7)
+
+/* CEC_CTRL */
+#define CEC_IN_FILTER_EN BIT(0)
+#define CEC_PWR_SAVE_EN BIT(1)
+#define CEC_EN BIT(4)
+#define CEC_ACK_CTRL BIT(5)
+#define CEC_RX_RESET_EN BIT(6)
+#define CEC_IGNORE_RX_ERROR BIT(7)
+
+/* CEC_STATUS */
+#define CEC_TX_DONE_STS BIT(0)
+#define CEC_TX_ACK_GET_STS BIT(1)
+#define CEC_ERROR_STS BIT(2)
+#define CEC_RX_DONE_STS BIT(3)
+#define CEC_RX_SOM_STS BIT(4)
+#define CEC_RX_EOM_STS BIT(5)
+#define CEC_FREE_TIME_IRQ_STS BIT(6)
+#define CEC_PIN_STS BIT(7)
+#define CEC_SBIT_TOUT_STS BIT(8)
+#define CEC_DBIT_TOUT_STS BIT(9)
+#define CEC_LPULSE_ERROR_STS BIT(10)
+#define CEC_HPULSE_ERROR_STS BIT(11)
+#define CEC_TX_ERROR BIT(12)
+#define CEC_TX_ARB_ERROR BIT(13)
+#define CEC_RX_ERROR_MIN BIT(14)
+#define CEC_RX_ERROR_MAX BIT(15)
+
+/* Signal free time in bit periods (2.4ms) */
+#define CEC_PRESENT_INIT_SFT 7
+#define CEC_NEW_INIT_SFT 5
+#define CEC_RETRANSMIT_SFT 3
+
+/* Constants for CEC_BIT_TOUT_THRESH register */
+#define CEC_SBIT_TOUT_47MS BIT(1)
+#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1))
+#define CEC_SBIT_TOUT_50MS BIT(2)
+#define CEC_DBIT_TOUT_27MS BIT(0)
+#define CEC_DBIT_TOUT_28MS BIT(1)
+#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1))
+
+/* Constants for CEC_BIT_PULSE_THRESH register */
+#define CEC_BIT_LPULSE_03MS BIT(1)
+#define CEC_BIT_HPULSE_03MS BIT(3)
+
+/* Constants for CEC_DATA_ARRAY_STATUS register */
+#define CEC_RX_N_OF_BYTES 0x1F
+#define CEC_TX_N_OF_BYTES_SENT BIT(5)
+#define CEC_RX_OVERRUN BIT(6)
+
+struct stih_cec {
+ struct cec_adapter *adap;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *regs;
+ int irq;
+ u32 irq_status;
+ struct cec_notifier *notifier;
+};
+
+static int stih_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct stih_cec *cec = cec_get_drvdata(adap);
+
+ if (enable) {
+ /* The doc says (input TCLK_PERIOD * CEC_CLK_DIV) = 0.1ms */
+ unsigned long clk_freq = clk_get_rate(cec->clk);
+ u32 cec_clk_div = clk_freq / 10000;
+
+ writel(cec_clk_div, cec->regs + CEC_CLK_DIV);
+
+ /* Configuration of the durations activating a timeout */
+ writel(CEC_SBIT_TOUT_47MS | (CEC_DBIT_TOUT_28MS << 4),
+ cec->regs + CEC_BIT_TOUT_THRESH);
+
+ /* Configuration of the smallest allowed duration for pulses */
+ writel(CEC_BIT_LPULSE_03MS | CEC_BIT_HPULSE_03MS,
+ cec->regs + CEC_BIT_PULSE_THRESH);
+
+ /* Minimum received bit period threshold */
+ writel(BIT(5) | BIT(7), cec->regs + CEC_TX_CTRL);
+
+ /* Configuration of transceiver data arrays */
+ writel(CEC_TX_ARRAY_EN | CEC_RX_ARRAY_EN | CEC_TX_STOP_ON_NACK,
+ cec->regs + CEC_DATA_ARRAY_CTRL);
+
+ /* Configuration of the control bits for CEC Transceiver */
+ writel(CEC_IN_FILTER_EN | CEC_EN | CEC_RX_RESET_EN,
+ cec->regs + CEC_CTRL);
+
+ /* Clear logical addresses */
+ writel(0, cec->regs + CEC_ADDR_TABLE);
+
+ /* Clear the status register */
+ writel(0x0, cec->regs + CEC_STATUS);
+
+ /* Enable the interrupts */
+ writel(CEC_TX_DONE_IRQ_EN | CEC_RX_DONE_IRQ_EN |
+ CEC_RX_SOM_IRQ_EN | CEC_RX_EOM_IRQ_EN |
+ CEC_ERROR_IRQ_EN,
+ cec->regs + CEC_IRQ_CTRL);
+
+ } else {
+ /* Clear logical addresses */
+ writel(0, cec->regs + CEC_ADDR_TABLE);
+
+ /* Clear the status register */
+ writel(0x0, cec->regs + CEC_STATUS);
+
+ /* Disable the interrupts */
+ writel(0, cec->regs + CEC_IRQ_CTRL);
+ }
+
+ return 0;
+}
+
+static int stih_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct stih_cec *cec = cec_get_drvdata(adap);
+ u32 reg = readl(cec->regs + CEC_ADDR_TABLE);
+
+ reg |= 1 << logical_addr;
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ reg = 0;
+
+ writel(reg, cec->regs + CEC_ADDR_TABLE);
+
+ return 0;
+}
+
+static int stih_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct stih_cec *cec = cec_get_drvdata(adap);
+ int i;
+
+ /* Copy message into registers */
+ for (i = 0; i < msg->len; i++)
+ writeb(msg->msg[i], cec->regs + CEC_TX_DATA_BASE + i);
+
+ /*
+ * Start transmission, configure hardware to add start and stop bits
+ * Signal free time is handled by the hardware
+ */
+ writel(CEC_TX_AUTO_SOM_EN | CEC_TX_AUTO_EOM_EN | CEC_TX_START |
+ msg->len, cec->regs + CEC_TX_ARRAY_CTRL);
+
+ return 0;
+}
+
+static void stih_tx_done(struct stih_cec *cec, u32 status)
+{
+ if (status & CEC_TX_ERROR) {
+ cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_ERROR);
+ return;
+ }
+
+ if (status & CEC_TX_ARB_ERROR) {
+ cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_ARB_LOST);
+ return;
+ }
+
+ if (!(status & CEC_TX_ACK_GET_STS)) {
+ cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_NACK);
+ return;
+ }
+
+ cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_OK);
+}
+
+static void stih_rx_done(struct stih_cec *cec, u32 status)
+{
+ struct cec_msg msg = {};
+ u8 i;
+
+ if (status & CEC_RX_ERROR_MIN)
+ return;
+
+ if (status & CEC_RX_ERROR_MAX)
+ return;
+
+ msg.len = readl(cec->regs + CEC_DATA_ARRAY_STATUS) & 0x1f;
+
+ if (!msg.len)
+ return;
+
+ if (msg.len > 16)
+ msg.len = 16;
+
+ for (i = 0; i < msg.len; i++)
+ msg.msg[i] = readl(cec->regs + CEC_RX_DATA_BASE + i);
+
+ cec_received_msg(cec->adap, &msg);
+}
+
+static irqreturn_t stih_cec_irq_handler_thread(int irq, void *priv)
+{
+ struct stih_cec *cec = priv;
+
+ if (cec->irq_status & CEC_TX_DONE_STS)
+ stih_tx_done(cec, cec->irq_status);
+
+ if (cec->irq_status & CEC_RX_DONE_STS)
+ stih_rx_done(cec, cec->irq_status);
+
+ cec->irq_status = 0;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stih_cec_irq_handler(int irq, void *priv)
+{
+ struct stih_cec *cec = priv;
+
+ cec->irq_status = readl(cec->regs + CEC_STATUS);
+ writel(cec->irq_status, cec->regs + CEC_STATUS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static const struct cec_adap_ops sti_cec_adap_ops = {
+ .adap_enable = stih_cec_adap_enable,
+ .adap_log_addr = stih_cec_adap_log_addr,
+ .adap_transmit = stih_cec_adap_transmit,
+};
+
+static int stih_cec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct stih_cec *cec;
+ struct device_node *np;
+ struct platform_device *hdmi_dev;
+ int ret;
+
+ cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
+ return -ENODEV;
+ }
+
+ hdmi_dev = of_find_device_by_node(np);
+ if (!hdmi_dev)
+ return -EPROBE_DEFER;
+
+ cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ if (!cec->notifier)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cec->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cec->regs))
+ return PTR_ERR(cec->regs);
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0)
+ return cec->irq;
+
+ ret = devm_request_threaded_irq(dev, cec->irq, stih_cec_irq_handler,
+ stih_cec_irq_handler_thread, 0,
+ pdev->name, cec);
+ if (ret)
+ return ret;
+
+ cec->clk = devm_clk_get(dev, "cec-clk");
+ if (IS_ERR(cec->clk)) {
+ dev_err(dev, "Cannot get cec clock\n");
+ return PTR_ERR(cec->clk);
+ }
+
+ cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
+ CEC_NAME, CEC_CAP_DEFAULTS, CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(cec->adap);
+ if (ret)
+ return ret;
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ cec_register_cec_notifier(cec->adap, cec->notifier);
+
+ platform_set_drvdata(pdev, cec);
+ return 0;
+}
+
+static int stih_cec_remove(struct platform_device *pdev)
+{
+ struct stih_cec *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notifier);
+
+ return 0;
+}
+
+static const struct of_device_id stih_cec_match[] = {
+ {
+ .compatible = "st,stih-cec",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stih_cec_match);
+
+static struct platform_driver stih_cec_pdrv = {
+ .probe = stih_cec_probe,
+ .remove = stih_cec_remove,
+ .driver = {
+ .name = CEC_NAME,
+ .of_match_table = stih_cec_match,
+ },
+};
+
+module_platform_driver(stih_cec_pdrv);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@linaro.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("STIH4xx CEC driver");
diff --git a/drivers/media/platform/sti/delta/Makefile b/drivers/media/platform/sti/delta/Makefile
new file mode 100644
index 000000000..8d032508a
--- /dev/null
+++ b/drivers/media/platform/sti/delta/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) := st-delta.o
+st-delta-y := delta-v4l2.o delta-mem.o delta-ipc.o delta-debug.o
+
+# MJPEG support
+st-delta-$(CONFIG_VIDEO_STI_DELTA_MJPEG) += delta-mjpeg-hdr.o
+st-delta-$(CONFIG_VIDEO_STI_DELTA_MJPEG) += delta-mjpeg-dec.o
diff --git a/drivers/media/platform/sti/delta/delta-cfg.h b/drivers/media/platform/sti/delta/delta-cfg.h
new file mode 100644
index 000000000..f47c6e6ff
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-cfg.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_CFG_H
+#define DELTA_CFG_H
+
+#define DELTA_FW_VERSION "21.1-3"
+
+#define DELTA_MIN_WIDTH 32
+#define DELTA_MAX_WIDTH 4096
+#define DELTA_MIN_HEIGHT 32
+#define DELTA_MAX_HEIGHT 2400
+
+/* DELTA requires a 32x32 pixels alignment for frames */
+#define DELTA_WIDTH_ALIGNMENT 32
+#define DELTA_HEIGHT_ALIGNMENT 32
+
+#define DELTA_DEFAULT_WIDTH DELTA_MIN_WIDTH
+#define DELTA_DEFAULT_HEIGHT DELTA_MIN_HEIGHT
+#define DELTA_DEFAULT_FRAMEFORMAT V4L2_PIX_FMT_NV12
+#define DELTA_DEFAULT_STREAMFORMAT V4L2_PIX_FMT_MJPEG
+
+#define DELTA_MAX_RESO (DELTA_MAX_WIDTH * DELTA_MAX_HEIGHT)
+
+/* guard value for number of access units */
+#define DELTA_MAX_AUS 10
+
+/* IP perf dependent, can be tuned */
+#define DELTA_PEAK_FRAME_SMOOTHING 2
+
+/*
+ * guard output frame count:
+ * - at least 1 frame needed for display
+ * - at worst 21
+ * ( max h264 dpb (16) +
+ * decoding peak smoothing (2) +
+ * user display pipeline (3) )
+ */
+#define DELTA_MIN_FRAME_USER 1
+#define DELTA_MAX_DPB 16
+#define DELTA_MAX_FRAME_USER 3 /* platform/use-case dependent */
+#define DELTA_MAX_FRAMES (DELTA_MAX_DPB + DELTA_PEAK_FRAME_SMOOTHING +\
+ DELTA_MAX_FRAME_USER)
+
+#if DELTA_MAX_FRAMES > VIDEO_MAX_FRAME
+#undef DELTA_MAX_FRAMES
+#define DELTA_MAX_FRAMES (VIDEO_MAX_FRAME)
+#endif
+
+/* extra space to be allocated to store codec specific data per frame */
+#define DELTA_MAX_FRAME_PRIV_SIZE 100
+
+/* PM runtime auto power-off after 5ms of inactivity */
+#define DELTA_HW_AUTOSUSPEND_DELAY_MS 5
+
+#define DELTA_MAX_DECODERS 10
+#ifdef CONFIG_VIDEO_STI_DELTA_MJPEG
+extern const struct delta_dec mjpegdec;
+#endif
+
+#endif /* DELTA_CFG_H */
diff --git a/drivers/media/platform/sti/delta/delta-debug.c b/drivers/media/platform/sti/delta/delta-debug.c
new file mode 100644
index 000000000..4b2eb6b63
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-debug.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Hugues Fruchet <hugues.fruchet@st.com>
+ * Fabrice Lecoultre <fabrice.lecoultre@st.com>
+ * for STMicroelectronics.
+ */
+
+#include "delta.h"
+#include "delta-debug.h"
+
+char *delta_streaminfo_str(struct delta_streaminfo *s, char *str,
+ unsigned int len)
+{
+ if (!s)
+ return NULL;
+
+ snprintf(str, len,
+ "%4.4s %dx%d %s %s dpb=%d %s %s %s%dx%d@(%d,%d) %s%d/%d",
+ (char *)&s->streamformat, s->width, s->height,
+ s->profile, s->level, s->dpb,
+ (s->field == V4L2_FIELD_NONE) ? "progressive" : "interlaced",
+ s->other,
+ s->flags & DELTA_STREAMINFO_FLAG_CROP ? "crop=" : "",
+ s->crop.width, s->crop.height,
+ s->crop.left, s->crop.top,
+ s->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT ? "par=" : "",
+ s->pixelaspect.numerator,
+ s->pixelaspect.denominator);
+
+ return str;
+}
+
+char *delta_frameinfo_str(struct delta_frameinfo *f, char *str,
+ unsigned int len)
+{
+ if (!f)
+ return NULL;
+
+ snprintf(str, len,
+ "%4.4s %dx%d aligned %dx%d %s %s%dx%d@(%d,%d) %s%d/%d",
+ (char *)&f->pixelformat, f->width, f->height,
+ f->aligned_width, f->aligned_height,
+ (f->field == V4L2_FIELD_NONE) ? "progressive" : "interlaced",
+ f->flags & DELTA_STREAMINFO_FLAG_CROP ? "crop=" : "",
+ f->crop.width, f->crop.height,
+ f->crop.left, f->crop.top,
+ f->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT ? "par=" : "",
+ f->pixelaspect.numerator,
+ f->pixelaspect.denominator);
+
+ return str;
+}
+
+void delta_trace_summary(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+ struct delta_streaminfo *s = &ctx->streaminfo;
+ unsigned char str[100] = "";
+
+ if (!(ctx->flags & DELTA_FLAG_STREAMINFO))
+ return;
+
+ dev_dbg(delta->dev, "%s %s, %d frames decoded, %d frames output, %d frames dropped, %d stream errors, %d decode errors",
+ ctx->name,
+ delta_streaminfo_str(s, str, sizeof(str)),
+ ctx->decoded_frames,
+ ctx->output_frames,
+ ctx->dropped_frames,
+ ctx->stream_errors,
+ ctx->decode_errors);
+}
diff --git a/drivers/media/platform/sti/delta/delta-debug.h b/drivers/media/platform/sti/delta/delta-debug.h
new file mode 100644
index 000000000..fa9025262
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-debug.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Hugues Fruchet <hugues.fruchet@st.com>
+ * Fabrice Lecoultre <fabrice.lecoultre@st.com>
+ * for STMicroelectronics.
+ */
+
+#ifndef DELTA_DEBUG_H
+#define DELTA_DEBUG_H
+
+char *delta_streaminfo_str(struct delta_streaminfo *s, char *str,
+ unsigned int len);
+char *delta_frameinfo_str(struct delta_frameinfo *f, char *str,
+ unsigned int len);
+void delta_trace_summary(struct delta_ctx *ctx);
+
+#endif /* DELTA_DEBUG_H */
diff --git a/drivers/media/platform/sti/delta/delta-ipc.c b/drivers/media/platform/sti/delta/delta-ipc.c
new file mode 100644
index 000000000..a4603d573
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-ipc.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#include <linux/rpmsg.h>
+
+#include "delta.h"
+#include "delta-ipc.h"
+#include "delta-mem.h"
+
+#define IPC_TIMEOUT 100
+#define IPC_SANITY_TAG 0xDEADBEEF
+
+enum delta_ipc_fw_command {
+ DELTA_IPC_OPEN,
+ DELTA_IPC_SET_STREAM,
+ DELTA_IPC_DECODE,
+ DELTA_IPC_CLOSE
+};
+
+#define to_rpmsg_driver(__drv) container_of(__drv, struct rpmsg_driver, drv)
+#define to_delta(__d) container_of(__d, struct delta_dev, rpmsg_driver)
+
+#define to_ctx(hdl) ((struct delta_ipc_ctx *)hdl)
+#define to_pctx(ctx) container_of(ctx, struct delta_ctx, ipc_ctx)
+
+struct delta_ipc_header_msg {
+ u32 tag;
+ void *host_hdl;
+ u32 copro_hdl;
+ u32 command;
+};
+
+#define to_host_hdl(ctx) ((void *)ctx)
+
+#define msg_to_ctx(msg) ((struct delta_ipc_ctx *)(msg)->header.host_hdl)
+#define msg_to_copro_hdl(msg) ((msg)->header.copro_hdl)
+
+static inline dma_addr_t to_paddr(struct delta_ipc_ctx *ctx, void *vaddr)
+{
+ return (ctx->ipc_buf->paddr + (vaddr - ctx->ipc_buf->vaddr));
+}
+
+static inline bool is_valid_data(struct delta_ipc_ctx *ctx,
+ void *data, u32 size)
+{
+ return ((data >= ctx->ipc_buf->vaddr) &&
+ ((data + size) <= (ctx->ipc_buf->vaddr + ctx->ipc_buf->size)));
+}
+
+/*
+ * IPC shared memory (@ipc_buf_size, @ipc_buf_paddr) is sent to copro
+ * at each instance opening. This memory is allocated by IPC client
+ * and given through delta_ipc_open(). All messages parameters
+ * (open, set_stream, decode) will have their phy address within
+ * this IPC shared memory, avoiding de-facto recopies inside delta-ipc.
+ * All the below messages structures are used on both host and firmware
+ * side and are packed (use only of 32 bits size fields in messages
+ * structures to ensure packing):
+ * - struct delta_ipc_open_msg
+ * - struct delta_ipc_set_stream_msg
+ * - struct delta_ipc_decode_msg
+ * - struct delta_ipc_close_msg
+ * - struct delta_ipc_cb_msg
+ */
+struct delta_ipc_open_msg {
+ struct delta_ipc_header_msg header;
+ u32 ipc_buf_size;
+ dma_addr_t ipc_buf_paddr;
+ char name[32];
+ u32 param_size;
+ dma_addr_t param_paddr;
+};
+
+struct delta_ipc_set_stream_msg {
+ struct delta_ipc_header_msg header;
+ u32 param_size;
+ dma_addr_t param_paddr;
+};
+
+struct delta_ipc_decode_msg {
+ struct delta_ipc_header_msg header;
+ u32 param_size;
+ dma_addr_t param_paddr;
+ u32 status_size;
+ dma_addr_t status_paddr;
+};
+
+struct delta_ipc_close_msg {
+ struct delta_ipc_header_msg header;
+};
+
+struct delta_ipc_cb_msg {
+ struct delta_ipc_header_msg header;
+ int err;
+};
+
+static void build_msg_header(struct delta_ipc_ctx *ctx,
+ enum delta_ipc_fw_command command,
+ struct delta_ipc_header_msg *header)
+{
+ header->tag = IPC_SANITY_TAG;
+ header->host_hdl = to_host_hdl(ctx);
+ header->copro_hdl = ctx->copro_hdl;
+ header->command = command;
+}
+
+int delta_ipc_open(struct delta_ctx *pctx, const char *name,
+ struct delta_ipc_param *param, u32 ipc_buf_size,
+ struct delta_buf **ipc_buf, void **hdl)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_ctx *ctx = &pctx->ipc_ctx;
+ struct delta_ipc_open_msg msg;
+ struct delta_buf *buf = &ctx->ipc_buf_struct;
+ int ret;
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, rpmsg is not initialized\n",
+ pctx->name);
+ pctx->sys_errors++;
+ return -EINVAL;
+ }
+
+ if (!name) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, no name given\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!param || !param->data || !param->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, empty parameter\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!ipc_buf_size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, no size given for ipc buffer\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (param->size > ipc_buf_size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, too large ipc parameter (%d bytes while max %d expected)\n",
+ pctx->name,
+ param->size, ctx->ipc_buf->size);
+ return -EINVAL;
+ }
+
+ /* init */
+ init_completion(&ctx->done);
+
+ /*
+ * allocation of contiguous buffer for
+ * data of commands exchanged between
+ * host and firmware coprocessor
+ */
+ ret = hw_alloc(pctx, ipc_buf_size,
+ "ipc data buffer", buf);
+ if (ret)
+ return ret;
+ ctx->ipc_buf = buf;
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_OPEN, &msg.header);
+
+ msg.ipc_buf_size = ipc_buf_size;
+ msg.ipc_buf_paddr = ctx->ipc_buf->paddr;
+
+ memcpy(msg.name, name, sizeof(msg.name));
+ msg.name[sizeof(msg.name) - 1] = 0;
+
+ msg.param_size = param->size;
+ memcpy(ctx->ipc_buf->vaddr, param->data, msg.param_size);
+ msg.param_paddr = ctx->ipc_buf->paddr;
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, rpmsg_send failed (%d) for DELTA_IPC_OPEN (name=%s, size=%d, data=%p)\n",
+ pctx->name,
+ ret, name, param->size, param->data);
+ goto err;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, timeout waiting for DELTA_IPC_OPEN callback (name=%s, size=%d, data=%p)\n",
+ pctx->name,
+ name, param->size, param->data);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ /* command completed, check error */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, DELTA_IPC_OPEN completed but with error (%d) (name=%s, size=%d, data=%p)\n",
+ pctx->name,
+ ctx->cb_err, name, param->size, param->data);
+ ret = -EIO;
+ goto err;
+ }
+
+ *ipc_buf = ctx->ipc_buf;
+ *hdl = (void *)ctx;
+
+ return 0;
+
+err:
+ pctx->sys_errors++;
+ if (ctx->ipc_buf) {
+ hw_free(pctx, ctx->ipc_buf);
+ ctx->ipc_buf = NULL;
+ }
+
+ return ret;
+};
+
+int delta_ipc_set_stream(void *hdl, struct delta_ipc_param *param)
+{
+ struct delta_ipc_ctx *ctx = to_ctx(hdl);
+ struct delta_ctx *pctx = to_pctx(ctx);
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_set_stream_msg msg;
+ int ret;
+
+ if (!hdl) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, invalid ipc handle\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, rpmsg is not initialized\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!param || !param->data || !param->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, empty parameter\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (param->size > ctx->ipc_buf->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, too large ipc parameter(%d bytes while max %d expected)\n",
+ pctx->name,
+ param->size, ctx->ipc_buf->size);
+ return -EINVAL;
+ }
+
+ if (!is_valid_data(ctx, param->data, param->size)) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, parameter is not in expected address range (size=%d, data=%p not in %p..%p)\n",
+ pctx->name,
+ param->size,
+ param->data,
+ ctx->ipc_buf->vaddr,
+ ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
+ return -EINVAL;
+ }
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_SET_STREAM, &msg.header);
+
+ msg.param_size = param->size;
+ msg.param_paddr = to_paddr(ctx, param->data);
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, rpmsg_send failed (%d) for DELTA_IPC_SET_STREAM (size=%d, data=%p)\n",
+ pctx->name,
+ ret, param->size, param->data);
+ pctx->sys_errors++;
+ return ret;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, timeout waiting for DELTA_IPC_SET_STREAM callback (size=%d, data=%p)\n",
+ pctx->name,
+ param->size, param->data);
+ pctx->sys_errors++;
+ return -ETIMEDOUT;
+ }
+
+ /* command completed, check status */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, DELTA_IPC_SET_STREAM completed but with error (%d) (size=%d, data=%p)\n",
+ pctx->name,
+ ctx->cb_err, param->size, param->data);
+ pctx->sys_errors++;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int delta_ipc_decode(void *hdl, struct delta_ipc_param *param,
+ struct delta_ipc_param *status)
+{
+ struct delta_ipc_ctx *ctx = to_ctx(hdl);
+ struct delta_ctx *pctx = to_pctx(ctx);
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_decode_msg msg;
+ int ret;
+
+ if (!hdl) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, invalid ipc handle\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, rpmsg is not initialized\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!param || !param->data || !param->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, empty parameter\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!status || !status->data || !status->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, empty status\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (param->size + status->size > ctx->ipc_buf->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, too large ipc parameter (%d bytes (param) + %d bytes (status) while max %d expected)\n",
+ pctx->name,
+ param->size,
+ status->size,
+ ctx->ipc_buf->size);
+ return -EINVAL;
+ }
+
+ if (!is_valid_data(ctx, param->data, param->size)) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, parameter is not in expected address range (size=%d, data=%p not in %p..%p)\n",
+ pctx->name,
+ param->size,
+ param->data,
+ ctx->ipc_buf->vaddr,
+ ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
+ return -EINVAL;
+ }
+
+ if (!is_valid_data(ctx, status->data, status->size)) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, status is not in expected address range (size=%d, data=%p not in %p..%p)\n",
+ pctx->name,
+ status->size,
+ status->data,
+ ctx->ipc_buf->vaddr,
+ ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
+ return -EINVAL;
+ }
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_DECODE, &msg.header);
+
+ msg.param_size = param->size;
+ msg.param_paddr = to_paddr(ctx, param->data);
+
+ msg.status_size = status->size;
+ msg.status_paddr = to_paddr(ctx, status->data);
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, rpmsg_send failed (%d) for DELTA_IPC_DECODE (size=%d, data=%p)\n",
+ pctx->name,
+ ret, param->size, param->data);
+ pctx->sys_errors++;
+ return ret;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, timeout waiting for DELTA_IPC_DECODE callback (size=%d, data=%p)\n",
+ pctx->name,
+ param->size, param->data);
+ pctx->sys_errors++;
+ return -ETIMEDOUT;
+ }
+
+ /* command completed, check status */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, DELTA_IPC_DECODE completed but with error (%d) (size=%d, data=%p)\n",
+ pctx->name,
+ ctx->cb_err, param->size, param->data);
+ pctx->sys_errors++;
+ return -EIO;
+ }
+
+ return 0;
+};
+
+void delta_ipc_close(void *hdl)
+{
+ struct delta_ipc_ctx *ctx = to_ctx(hdl);
+ struct delta_ctx *pctx = to_pctx(ctx);
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_close_msg msg;
+ int ret;
+
+ if (!hdl) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, invalid ipc handle\n",
+ pctx->name);
+ return;
+ }
+
+ if (ctx->ipc_buf) {
+ hw_free(pctx, ctx->ipc_buf);
+ ctx->ipc_buf = NULL;
+ }
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, rpmsg is not initialized\n",
+ pctx->name);
+ return;
+ }
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_CLOSE, &msg.header);
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, rpmsg_send failed (%d) for DELTA_IPC_CLOSE\n",
+ pctx->name, ret);
+ pctx->sys_errors++;
+ return;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, timeout waiting for DELTA_IPC_CLOSE callback\n",
+ pctx->name);
+ pctx->sys_errors++;
+ return;
+ }
+
+ /* command completed, check status */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, DELTA_IPC_CLOSE completed but with error (%d)\n",
+ pctx->name, ctx->cb_err);
+ pctx->sys_errors++;
+ }
+};
+
+static int delta_ipc_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct delta_ipc_ctx *ctx;
+ struct delta_ipc_cb_msg *msg;
+
+ /* sanity check */
+ if (!rpdev) {
+ dev_err(NULL, "rpdev is NULL\n");
+ return -EINVAL;
+ }
+
+ if (!data || !len) {
+ dev_err(&rpdev->dev,
+ "unexpected empty message received from src=%d\n", src);
+ return -EINVAL;
+ }
+
+ if (len != sizeof(*msg)) {
+ dev_err(&rpdev->dev,
+ "unexpected message length received from src=%d (received %d bytes while %zu bytes expected)\n",
+ len, src, sizeof(*msg));
+ return -EINVAL;
+ }
+
+ msg = (struct delta_ipc_cb_msg *)data;
+ if (msg->header.tag != IPC_SANITY_TAG) {
+ dev_err(&rpdev->dev,
+ "unexpected message tag received from src=%d (received %x tag while %x expected)\n",
+ src, msg->header.tag, IPC_SANITY_TAG);
+ return -EINVAL;
+ }
+
+ ctx = msg_to_ctx(msg);
+ if (!ctx) {
+ dev_err(&rpdev->dev,
+ "unexpected message with NULL host_hdl received from src=%d\n",
+ src);
+ return -EINVAL;
+ }
+
+ /*
+ * if not already known, save copro instance context
+ * to ensure re-entrance on copro side
+ */
+ if (!ctx->copro_hdl)
+ ctx->copro_hdl = msg_to_copro_hdl(msg);
+
+ /*
+ * all is fine,
+ * update status & complete command
+ */
+ ctx->cb_err = msg->err;
+ complete(&ctx->done);
+
+ return 0;
+}
+
+static int delta_ipc_probe(struct rpmsg_device *rpmsg_device)
+{
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpmsg_device->dev.driver);
+ struct delta_dev *delta = to_delta(rpdrv);
+
+ delta->rpmsg_device = rpmsg_device;
+
+ return 0;
+}
+
+static void delta_ipc_remove(struct rpmsg_device *rpmsg_device)
+{
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpmsg_device->dev.driver);
+ struct delta_dev *delta = to_delta(rpdrv);
+
+ delta->rpmsg_device = NULL;
+}
+
+static struct rpmsg_device_id delta_ipc_device_id_table[] = {
+ {.name = "rpmsg-delta"},
+ {},
+};
+
+static struct rpmsg_driver delta_rpmsg_driver = {
+ .drv = {.name = KBUILD_MODNAME},
+ .id_table = delta_ipc_device_id_table,
+ .probe = delta_ipc_probe,
+ .callback = delta_ipc_cb,
+ .remove = delta_ipc_remove,
+};
+
+int delta_ipc_init(struct delta_dev *delta)
+{
+ delta->rpmsg_driver = delta_rpmsg_driver;
+
+ return register_rpmsg_driver(&delta->rpmsg_driver);
+}
+
+void delta_ipc_exit(struct delta_dev *delta)
+{
+ unregister_rpmsg_driver(&delta->rpmsg_driver);
+}
diff --git a/drivers/media/platform/sti/delta/delta-ipc.h b/drivers/media/platform/sti/delta/delta-ipc.h
new file mode 100644
index 000000000..9fba6b5d1
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-ipc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_IPC_H
+#define DELTA_IPC_H
+
+int delta_ipc_init(struct delta_dev *delta);
+void delta_ipc_exit(struct delta_dev *delta);
+
+/*
+ * delta_ipc_open - open a decoding instance on firmware side
+ * @ctx: (in) delta context
+ * @name: (in) name of decoder to be used
+ * @param: (in) open command parameters specific to decoder
+ * @param.size: (in) size of parameter
+ * @param.data: (in) virtual address of parameter
+ * @ipc_buf_size: (in) size of IPC shared buffer between host
+ * and copro used to share command data.
+ * Client have to set here the size of the biggest
+ * command parameters (+ status if any).
+ * Allocation will be done in this function which
+ * will give back to client in @ipc_buf the virtual
+ * & physical addresses & size of shared IPC buffer.
+ * All the further command data (parameters + status)
+ * have to be written in this shared IPC buffer
+ * virtual memory. This is done to avoid
+ * unnecessary copies of command data.
+ * @ipc_buf: (out) allocated IPC shared buffer
+ * @ipc_buf.size: (out) allocated size
+ * @ipc_buf.vaddr: (out) virtual address where to copy
+ * further command data
+ * @hdl: (out) handle of decoding instance.
+ */
+
+int delta_ipc_open(struct delta_ctx *ctx, const char *name,
+ struct delta_ipc_param *param, u32 ipc_buf_size,
+ struct delta_buf **ipc_buf, void **hdl);
+
+/*
+ * delta_ipc_set_stream - set information about stream to decoder
+ * @hdl: (in) handle of decoding instance.
+ * @param: (in) set stream command parameters specific to decoder
+ * @param.size: (in) size of parameter
+ * @param.data: (in) virtual address of parameter. Must be
+ * within IPC shared buffer range
+ */
+int delta_ipc_set_stream(void *hdl, struct delta_ipc_param *param);
+
+/*
+ * delta_ipc_decode - frame decoding synchronous request, returns only
+ * after decoding completion on firmware side.
+ * @hdl: (in) handle of decoding instance.
+ * @param: (in) decode command parameters specific to decoder
+ * @param.size: (in) size of parameter
+ * @param.data: (in) virtual address of parameter. Must be
+ * within IPC shared buffer range
+ * @status: (in/out) decode command status specific to decoder
+ * @status.size: (in) size of status
+ * @status.data: (in/out) virtual address of status. Must be
+ * within IPC shared buffer range.
+ * Status is filled by decoding instance
+ * after decoding completion.
+ */
+int delta_ipc_decode(void *hdl, struct delta_ipc_param *param,
+ struct delta_ipc_param *status);
+
+/*
+ * delta_ipc_close - close decoding instance
+ * @hdl: (in) handle of decoding instance to close.
+ */
+void delta_ipc_close(void *hdl);
+
+#endif /* DELTA_IPC_H */
diff --git a/drivers/media/platform/sti/delta/delta-mem.c b/drivers/media/platform/sti/delta/delta-mem.c
new file mode 100644
index 000000000..aeccd5058
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mem.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#include "delta.h"
+#include "delta-mem.h"
+
+int hw_alloc(struct delta_ctx *ctx, u32 size, const char *name,
+ struct delta_buf *buf)
+{
+ struct delta_dev *delta = ctx->dev;
+ dma_addr_t dma_addr;
+ void *addr;
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
+
+ addr = dma_alloc_attrs(delta->dev, size, &dma_addr,
+ GFP_KERNEL | __GFP_NOWARN, attrs);
+ if (!addr) {
+ dev_err(delta->dev,
+ "%s hw_alloc:dma_alloc_coherent failed for %s (size=%d)\n",
+ ctx->name, name, size);
+ ctx->sys_errors++;
+ return -ENOMEM;
+ }
+
+ buf->size = size;
+ buf->paddr = dma_addr;
+ buf->vaddr = addr;
+ buf->name = name;
+ buf->attrs = attrs;
+
+ dev_dbg(delta->dev,
+ "%s allocate %d bytes of HW memory @(virt=0x%p, phy=0x%pad): %s\n",
+ ctx->name, size, buf->vaddr, &buf->paddr, buf->name);
+
+ return 0;
+}
+
+void hw_free(struct delta_ctx *ctx, struct delta_buf *buf)
+{
+ struct delta_dev *delta = ctx->dev;
+
+ dev_dbg(delta->dev,
+ "%s free %d bytes of HW memory @(virt=0x%p, phy=0x%pad): %s\n",
+ ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);
+
+ dma_free_attrs(delta->dev, buf->size,
+ buf->vaddr, buf->paddr, buf->attrs);
+}
diff --git a/drivers/media/platform/sti/delta/delta-mem.h b/drivers/media/platform/sti/delta/delta-mem.h
new file mode 100644
index 000000000..ff7d02f00
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mem.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_MEM_H
+#define DELTA_MEM_H
+
+int hw_alloc(struct delta_ctx *ctx, u32 size, const char *name,
+ struct delta_buf *buf);
+void hw_free(struct delta_ctx *ctx, struct delta_buf *buf);
+
+#endif /* DELTA_MEM_H */
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/sti/delta/delta-mjpeg-dec.c
new file mode 100644
index 000000000..0533d4a08
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mjpeg-dec.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#include <linux/slab.h>
+
+#include "delta.h"
+#include "delta-ipc.h"
+#include "delta-mjpeg.h"
+#include "delta-mjpeg-fw.h"
+
+#define DELTA_MJPEG_MAX_RESO DELTA_MAX_RESO
+
+struct delta_mjpeg_ctx {
+ /* jpeg header */
+ struct mjpeg_header header_struct;
+ struct mjpeg_header *header;
+
+ /* ipc */
+ void *ipc_hdl;
+ struct delta_buf *ipc_buf;
+
+ /* decoded output frame */
+ struct delta_frame *out_frame;
+
+ unsigned char str[3000];
+};
+
+#define to_ctx(ctx) ((struct delta_mjpeg_ctx *)(ctx)->priv)
+
+static char *ipc_open_param_str(struct jpeg_video_decode_init_params_t *p,
+ char *str, unsigned int len)
+{
+ char *b = str;
+
+ if (!p)
+ return "";
+
+ b += snprintf(b, len,
+ "jpeg_video_decode_init_params_t\n"
+ "circular_buffer_begin_addr_p 0x%x\n"
+ "circular_buffer_end_addr_p 0x%x\n",
+ p->circular_buffer_begin_addr_p,
+ p->circular_buffer_end_addr_p);
+
+ return str;
+}
+
+static char *ipc_decode_param_str(struct jpeg_decode_params_t *p,
+ char *str, unsigned int len)
+{
+ char *b = str;
+
+ if (!p)
+ return "";
+
+ b += snprintf(b, len,
+ "jpeg_decode_params_t\n"
+ "picture_start_addr_p 0x%x\n"
+ "picture_end_addr_p 0x%x\n"
+ "decoding_mode %d\n"
+ "display_buffer_addr.display_decimated_luma_p 0x%x\n"
+ "display_buffer_addr.display_decimated_chroma_p 0x%x\n"
+ "main_aux_enable %d\n"
+ "additional_flags 0x%x\n"
+ "field_flag %x\n"
+ "is_jpeg_image %x\n",
+ p->picture_start_addr_p,
+ p->picture_end_addr_p,
+ p->decoding_mode,
+ p->display_buffer_addr.display_decimated_luma_p,
+ p->display_buffer_addr.display_decimated_chroma_p,
+ p->main_aux_enable, p->additional_flags,
+ p->field_flag,
+ p->is_jpeg_image);
+
+ return str;
+}
+
+static inline bool is_stream_error(enum jpeg_decoding_error_t err)
+{
+ switch (err) {
+ case JPEG_DECODER_UNDEFINED_HUFF_TABLE:
+ case JPEG_DECODER_BAD_RESTART_MARKER:
+ case JPEG_DECODER_BAD_SOS_SPECTRAL:
+ case JPEG_DECODER_BAD_SOS_SUCCESSIVE:
+ case JPEG_DECODER_BAD_HEADER_LENGTH:
+ case JPEG_DECODER_BAD_COUNT_VALUE:
+ case JPEG_DECODER_BAD_DHT_MARKER:
+ case JPEG_DECODER_BAD_INDEX_VALUE:
+ case JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES:
+ case JPEG_DECODER_BAD_QUANT_TABLE_LENGTH:
+ case JPEG_DECODER_BAD_NUMBER_QUANT_TABLES:
+ case JPEG_DECODER_BAD_COMPONENT_COUNT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline const char *err_str(enum jpeg_decoding_error_t err)
+{
+ switch (err) {
+ case JPEG_DECODER_NO_ERROR:
+ return "JPEG_DECODER_NO_ERROR";
+ case JPEG_DECODER_UNDEFINED_HUFF_TABLE:
+ return "JPEG_DECODER_UNDEFINED_HUFF_TABLE";
+ case JPEG_DECODER_UNSUPPORTED_MARKER:
+ return "JPEG_DECODER_UNSUPPORTED_MARKER";
+ case JPEG_DECODER_UNABLE_ALLOCATE_MEMORY:
+ return "JPEG_DECODER_UNABLE_ALLOCATE_MEMORY";
+ case JPEG_DECODER_NON_SUPPORTED_SAMP_FACTORS:
+ return "JPEG_DECODER_NON_SUPPORTED_SAMP_FACTORS";
+ case JPEG_DECODER_BAD_PARAMETER:
+ return "JPEG_DECODER_BAD_PARAMETER";
+ case JPEG_DECODER_DECODE_ERROR:
+ return "JPEG_DECODER_DECODE_ERROR";
+ case JPEG_DECODER_BAD_RESTART_MARKER:
+ return "JPEG_DECODER_BAD_RESTART_MARKER";
+ case JPEG_DECODER_UNSUPPORTED_COLORSPACE:
+ return "JPEG_DECODER_UNSUPPORTED_COLORSPACE";
+ case JPEG_DECODER_BAD_SOS_SPECTRAL:
+ return "JPEG_DECODER_BAD_SOS_SPECTRAL";
+ case JPEG_DECODER_BAD_SOS_SUCCESSIVE:
+ return "JPEG_DECODER_BAD_SOS_SUCCESSIVE";
+ case JPEG_DECODER_BAD_HEADER_LENGTH:
+ return "JPEG_DECODER_BAD_HEADER_LENGTH";
+ case JPEG_DECODER_BAD_COUNT_VALUE:
+ return "JPEG_DECODER_BAD_COUNT_VALUE";
+ case JPEG_DECODER_BAD_DHT_MARKER:
+ return "JPEG_DECODER_BAD_DHT_MARKER";
+ case JPEG_DECODER_BAD_INDEX_VALUE:
+ return "JPEG_DECODER_BAD_INDEX_VALUE";
+ case JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES:
+ return "JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES";
+ case JPEG_DECODER_BAD_QUANT_TABLE_LENGTH:
+ return "JPEG_DECODER_BAD_QUANT_TABLE_LENGTH";
+ case JPEG_DECODER_BAD_NUMBER_QUANT_TABLES:
+ return "JPEG_DECODER_BAD_NUMBER_QUANT_TABLES";
+ case JPEG_DECODER_BAD_COMPONENT_COUNT:
+ return "JPEG_DECODER_BAD_COMPONENT_COUNT";
+ case JPEG_DECODER_DIVIDE_BY_ZERO_ERROR:
+ return "JPEG_DECODER_DIVIDE_BY_ZERO_ERROR";
+ case JPEG_DECODER_NOT_JPG_IMAGE:
+ return "JPEG_DECODER_NOT_JPG_IMAGE";
+ case JPEG_DECODER_UNSUPPORTED_ROTATION_ANGLE:
+ return "JPEG_DECODER_UNSUPPORTED_ROTATION_ANGLE";
+ case JPEG_DECODER_UNSUPPORTED_SCALING:
+ return "JPEG_DECODER_UNSUPPORTED_SCALING";
+ case JPEG_DECODER_INSUFFICIENT_OUTPUTBUFFER_SIZE:
+ return "JPEG_DECODER_INSUFFICIENT_OUTPUTBUFFER_SIZE";
+ case JPEG_DECODER_BAD_HWCFG_GP_VERSION_VALUE:
+ return "JPEG_DECODER_BAD_HWCFG_GP_VERSION_VALUE";
+ case JPEG_DECODER_BAD_VALUE_FROM_RED:
+ return "JPEG_DECODER_BAD_VALUE_FROM_RED";
+ case JPEG_DECODER_BAD_SUBREGION_PARAMETERS:
+ return "JPEG_DECODER_BAD_SUBREGION_PARAMETERS";
+ case JPEG_DECODER_PROGRESSIVE_DECODE_NOT_SUPPORTED:
+ return "JPEG_DECODER_PROGRESSIVE_DECODE_NOT_SUPPORTED";
+ case JPEG_DECODER_ERROR_TASK_TIMEOUT:
+ return "JPEG_DECODER_ERROR_TASK_TIMEOUT";
+ case JPEG_DECODER_ERROR_FEATURE_NOT_SUPPORTED:
+ return "JPEG_DECODER_ERROR_FEATURE_NOT_SUPPORTED";
+ default:
+ return "!unknown MJPEG error!";
+ }
+}
+
+static bool delta_mjpeg_check_status(struct delta_ctx *pctx,
+ struct jpeg_decode_return_params_t *status)
+{
+ struct delta_dev *delta = pctx->dev;
+ bool dump = false;
+
+ if (status->error_code == JPEG_DECODER_NO_ERROR)
+ goto out;
+
+ if (is_stream_error(status->error_code)) {
+ dev_warn_ratelimited(delta->dev,
+ "%s firmware: stream error @ frame %d (%s)\n",
+ pctx->name, pctx->decoded_frames,
+ err_str(status->error_code));
+ pctx->stream_errors++;
+ } else {
+ dev_warn_ratelimited(delta->dev,
+ "%s firmware: decode error @ frame %d (%s)\n",
+ pctx->name, pctx->decoded_frames,
+ err_str(status->error_code));
+ pctx->decode_errors++;
+ dump = true;
+ }
+
+out:
+ dev_dbg(delta->dev,
+ "%s firmware: decoding time(us)=%d\n", pctx->name,
+ status->decode_time_in_us);
+
+ return dump;
+}
+
+static int delta_mjpeg_ipc_open(struct delta_ctx *pctx)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ int ret = 0;
+ struct jpeg_video_decode_init_params_t params_struct;
+ struct jpeg_video_decode_init_params_t *params = &params_struct;
+ struct delta_buf *ipc_buf;
+ u32 ipc_buf_size;
+ struct delta_ipc_param ipc_param;
+ void *hdl;
+
+ memset(params, 0, sizeof(*params));
+ params->circular_buffer_begin_addr_p = 0x00000000;
+ params->circular_buffer_end_addr_p = 0xffffffff;
+
+ dev_vdbg(delta->dev,
+ "%s %s\n", pctx->name,
+ ipc_open_param_str(params, ctx->str, sizeof(ctx->str)));
+
+ ipc_param.size = sizeof(*params);
+ ipc_param.data = params;
+ ipc_buf_size = sizeof(struct jpeg_decode_params_t) +
+ sizeof(struct jpeg_decode_return_params_t);
+ ret = delta_ipc_open(pctx, "JPEG_DECODER_HW0", &ipc_param,
+ ipc_buf_size, &ipc_buf, &hdl);
+ if (ret) {
+ dev_err(delta->dev,
+ "%s dumping command %s\n", pctx->name,
+ ipc_open_param_str(params, ctx->str, sizeof(ctx->str)));
+ return ret;
+ }
+
+ ctx->ipc_buf = ipc_buf;
+ ctx->ipc_hdl = hdl;
+
+ return 0;
+}
+
+static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ int ret = 0;
+ struct jpeg_decode_params_t *params = ctx->ipc_buf->vaddr;
+ struct jpeg_decode_return_params_t *status =
+ ctx->ipc_buf->vaddr + sizeof(*params);
+ struct delta_frame *frame;
+ struct delta_ipc_param ipc_param, ipc_status;
+
+ ret = delta_get_free_frame(pctx, &frame);
+ if (ret)
+ return ret;
+
+ memset(params, 0, sizeof(*params));
+
+ params->picture_start_addr_p = (u32)(au->paddr);
+ params->picture_end_addr_p = (u32)(au->paddr + au->size - 1);
+
+ /*
+ * !WARNING!
+ * the NV12 decoded frame is only available
+ * on decimated output when enabling flag
+ * "JPEG_ADDITIONAL_FLAG_420MB"...
+ * the non decimated output gives YUV422SP
+ */
+ params->main_aux_enable = JPEG_DISP_AUX_EN;
+ params->additional_flags = JPEG_ADDITIONAL_FLAG_420MB;
+ params->horizontal_decimation_factor = JPEG_HDEC_1;
+ params->vertical_decimation_factor = JPEG_VDEC_1;
+ params->decoding_mode = JPEG_NORMAL_DECODE;
+
+ params->display_buffer_addr.struct_size =
+ sizeof(struct jpeg_display_buffer_address_t);
+ params->display_buffer_addr.display_decimated_luma_p =
+ (u32)frame->paddr;
+ params->display_buffer_addr.display_decimated_chroma_p =
+ (u32)(frame->paddr
+ + frame->info.aligned_width * frame->info.aligned_height);
+
+ dev_vdbg(delta->dev,
+ "%s %s\n", pctx->name,
+ ipc_decode_param_str(params, ctx->str, sizeof(ctx->str)));
+
+ /* status */
+ memset(status, 0, sizeof(*status));
+ status->error_code = JPEG_DECODER_NO_ERROR;
+
+ ipc_param.size = sizeof(*params);
+ ipc_param.data = params;
+ ipc_status.size = sizeof(*status);
+ ipc_status.data = status;
+ ret = delta_ipc_decode(ctx->ipc_hdl, &ipc_param, &ipc_status);
+ if (ret) {
+ dev_err(delta->dev,
+ "%s dumping command %s\n", pctx->name,
+ ipc_decode_param_str(params, ctx->str,
+ sizeof(ctx->str)));
+ return ret;
+ }
+
+ pctx->decoded_frames++;
+
+ /* check firmware decoding status */
+ if (delta_mjpeg_check_status(pctx, status)) {
+ dev_err(delta->dev,
+ "%s dumping command %s\n", pctx->name,
+ ipc_decode_param_str(params, ctx->str,
+ sizeof(ctx->str)));
+ }
+
+ frame->field = V4L2_FIELD_NONE;
+ frame->flags = V4L2_BUF_FLAG_KEYFRAME;
+ frame->state |= DELTA_FRAME_DEC;
+
+ ctx->out_frame = frame;
+
+ return 0;
+}
+
+static int delta_mjpeg_open(struct delta_ctx *pctx)
+{
+ struct delta_mjpeg_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ pctx->priv = ctx;
+
+ return 0;
+}
+
+static int delta_mjpeg_close(struct delta_ctx *pctx)
+{
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+
+ if (ctx->ipc_hdl) {
+ delta_ipc_close(ctx->ipc_hdl);
+ ctx->ipc_hdl = NULL;
+ }
+
+ kfree(ctx);
+
+ return 0;
+}
+
+static int delta_mjpeg_get_streaminfo(struct delta_ctx *pctx,
+ struct delta_streaminfo *streaminfo)
+{
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+
+ if (!ctx->header)
+ goto nodata;
+
+ streaminfo->streamformat = V4L2_PIX_FMT_MJPEG;
+ streaminfo->width = ctx->header->frame_width;
+ streaminfo->height = ctx->header->frame_height;
+
+ /* progressive stream */
+ streaminfo->field = V4L2_FIELD_NONE;
+
+ streaminfo->dpb = 1;
+
+ return 0;
+
+nodata:
+ return -ENODATA;
+}
+
+static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ int ret;
+ struct delta_au au = *pau;
+ unsigned int data_offset = 0;
+ struct mjpeg_header *header = &ctx->header_struct;
+
+ if (!ctx->header) {
+ ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ header, &data_offset);
+ if (ret) {
+ pctx->stream_errors++;
+ goto err;
+ }
+ if (header->frame_width * header->frame_height >
+ DELTA_MJPEG_MAX_RESO) {
+ dev_err(delta->dev,
+ "%s stream resolution too large: %dx%d > %d pixels budget\n",
+ pctx->name,
+ header->frame_width,
+ header->frame_height, DELTA_MJPEG_MAX_RESO);
+ ret = -EINVAL;
+ goto err;
+ }
+ ctx->header = header;
+ goto out;
+ }
+
+ if (!ctx->ipc_hdl) {
+ ret = delta_mjpeg_ipc_open(pctx);
+ if (ret)
+ goto err;
+ }
+
+ ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ ctx->header, &data_offset);
+ if (ret) {
+ pctx->stream_errors++;
+ goto err;
+ }
+
+ au.paddr += data_offset;
+ au.vaddr += data_offset;
+
+ ret = delta_mjpeg_ipc_decode(pctx, &au);
+ if (ret)
+ goto err;
+
+out:
+ return 0;
+
+err:
+ return ret;
+}
+
+static int delta_mjpeg_get_frame(struct delta_ctx *pctx,
+ struct delta_frame **frame)
+{
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+
+ if (!ctx->out_frame)
+ return -ENODATA;
+
+ *frame = ctx->out_frame;
+
+ ctx->out_frame = NULL;
+
+ return 0;
+}
+
+const struct delta_dec mjpegdec = {
+ .name = "MJPEG",
+ .streamformat = V4L2_PIX_FMT_MJPEG,
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .open = delta_mjpeg_open,
+ .close = delta_mjpeg_close,
+ .get_streaminfo = delta_mjpeg_get_streaminfo,
+ .get_frameinfo = delta_get_frameinfo_default,
+ .decode = delta_mjpeg_decode,
+ .get_frame = delta_mjpeg_get_frame,
+ .recycle = delta_recycle_default,
+};
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-fw.h b/drivers/media/platform/sti/delta/delta-mjpeg-fw.h
new file mode 100644
index 000000000..5a9404f4d
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mjpeg-fw.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_MJPEG_FW_H
+#define DELTA_MJPEG_FW_H
+
+/*
+ * struct jpeg_decoded_buffer_address_t
+ *
+ * defines the addresses where the decoded picture/additional
+ * info related to the block structures will be stored
+ *
+ * @display_luma_p: address of the luma buffer
+ * @display_chroma_p: address of the chroma buffer
+ */
+struct jpeg_decoded_buffer_address_t {
+ u32 luma_p;
+ u32 chroma_p;
+};
+
+/*
+ * struct jpeg_display_buffer_address_t
+ *
+ * defines the addresses (used by the Display Reconstruction block)
+ * where the pictures to be displayed will be stored
+ *
+ * @struct_size: size of the structure in bytes
+ * @display_luma_p: address of the luma buffer
+ * @display_chroma_p: address of the chroma buffer
+ * @display_decimated_luma_p: address of the decimated luma buffer
+ * @display_decimated_chroma_p: address of the decimated chroma buffer
+ */
+struct jpeg_display_buffer_address_t {
+ u32 struct_size;
+ u32 display_luma_p;
+ u32 display_chroma_p;
+ u32 display_decimated_luma_p;
+ u32 display_decimated_chroma_p;
+};
+
+/*
+ * used for enabling main/aux outputs for both display &
+ * reference reconstruction blocks
+ */
+enum jpeg_rcn_ref_disp_enable_t {
+ /* enable decimated (for display) reconstruction */
+ JPEG_DISP_AUX_EN = 0x00000010,
+ /* enable main (for display) reconstruction */
+ JPEG_DISP_MAIN_EN = 0x00000020,
+ /* enable both main & decimated (for display) reconstruction */
+ JPEG_DISP_AUX_MAIN_EN = 0x00000030,
+ /* enable only reference output(ex. for trick modes) */
+ JPEG_REF_MAIN_EN = 0x00000100,
+ /*
+ * enable reference output with decimated
+ * (for display) reconstruction
+ */
+ JPEG_REF_MAIN_DISP_AUX_EN = 0x00000110,
+ /*
+ * enable reference output with main
+ * (for display) reconstruction
+ */
+ JPEG_REF_MAIN_DISP_MAIN_EN = 0x00000120,
+ /*
+ * enable reference output with main & decimated
+ * (for display) reconstruction
+ */
+ JPEG_REF_MAIN_DISP_MAIN_AUX_EN = 0x00000130
+};
+
+/* identifies the horizontal decimation factor */
+enum jpeg_horizontal_deci_factor_t {
+ /* no resize */
+ JPEG_HDEC_1 = 0x00000000,
+ /* Advanced H/2 resize using improved 8-tap filters */
+ JPEG_HDEC_ADVANCED_2 = 0x00000101,
+ /* Advanced H/4 resize using improved 8-tap filters */
+ JPEG_HDEC_ADVANCED_4 = 0x00000102
+};
+
+/* identifies the vertical decimation factor */
+enum jpeg_vertical_deci_factor_t {
+ /* no resize */
+ JPEG_VDEC_1 = 0x00000000,
+ /* V/2 , progressive resize */
+ JPEG_VDEC_ADVANCED_2_PROG = 0x00000204,
+ /* V/2 , interlaced resize */
+ JPEG_VDEC_ADVANCED_2_INT = 0x000000208
+};
+
+/* status of the decoding process */
+enum jpeg_decoding_error_t {
+ JPEG_DECODER_NO_ERROR = 0,
+ JPEG_DECODER_UNDEFINED_HUFF_TABLE = 1,
+ JPEG_DECODER_UNSUPPORTED_MARKER = 2,
+ JPEG_DECODER_UNABLE_ALLOCATE_MEMORY = 3,
+ JPEG_DECODER_NON_SUPPORTED_SAMP_FACTORS = 4,
+ JPEG_DECODER_BAD_PARAMETER = 5,
+ JPEG_DECODER_DECODE_ERROR = 6,
+ JPEG_DECODER_BAD_RESTART_MARKER = 7,
+ JPEG_DECODER_UNSUPPORTED_COLORSPACE = 8,
+ JPEG_DECODER_BAD_SOS_SPECTRAL = 9,
+ JPEG_DECODER_BAD_SOS_SUCCESSIVE = 10,
+ JPEG_DECODER_BAD_HEADER_LENGTH = 11,
+ JPEG_DECODER_BAD_COUNT_VALUE = 12,
+ JPEG_DECODER_BAD_DHT_MARKER = 13,
+ JPEG_DECODER_BAD_INDEX_VALUE = 14,
+ JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES = 15,
+ JPEG_DECODER_BAD_QUANT_TABLE_LENGTH = 16,
+ JPEG_DECODER_BAD_NUMBER_QUANT_TABLES = 17,
+ JPEG_DECODER_BAD_COMPONENT_COUNT = 18,
+ JPEG_DECODER_DIVIDE_BY_ZERO_ERROR = 19,
+ JPEG_DECODER_NOT_JPG_IMAGE = 20,
+ JPEG_DECODER_UNSUPPORTED_ROTATION_ANGLE = 21,
+ JPEG_DECODER_UNSUPPORTED_SCALING = 22,
+ JPEG_DECODER_INSUFFICIENT_OUTPUTBUFFER_SIZE = 23,
+ JPEG_DECODER_BAD_HWCFG_GP_VERSION_VALUE = 24,
+ JPEG_DECODER_BAD_VALUE_FROM_RED = 25,
+ JPEG_DECODER_BAD_SUBREGION_PARAMETERS = 26,
+ JPEG_DECODER_PROGRESSIVE_DECODE_NOT_SUPPORTED = 27,
+ JPEG_DECODER_ERROR_TASK_TIMEOUT = 28,
+ JPEG_DECODER_ERROR_FEATURE_NOT_SUPPORTED = 29
+};
+
+/* identifies the decoding mode */
+enum jpeg_decoding_mode_t {
+ JPEG_NORMAL_DECODE = 0,
+};
+
+enum jpeg_additional_flags_t {
+ JPEG_ADDITIONAL_FLAG_NONE = 0,
+ /* request firmware to return values of the CEH registers */
+ JPEG_ADDITIONAL_FLAG_CEH = 1,
+ /* output storage of auxiliary reconstruction in Raster format. */
+ JPEG_ADDITIONAL_FLAG_RASTER = 64,
+ /* output storage of auxiliary reconstruction in 420MB format. */
+ JPEG_ADDITIONAL_FLAG_420MB = 128
+};
+
+/*
+ * struct jpeg_video_decode_init_params_t - initialization command parameters
+ *
+ * @circular_buffer_begin_addr_p: start address of fw circular buffer
+ * @circular_buffer_end_addr_p: end address of fw circular buffer
+ */
+struct jpeg_video_decode_init_params_t {
+ u32 circular_buffer_begin_addr_p;
+ u32 circular_buffer_end_addr_p;
+ u32 reserved;
+};
+
+/*
+ * struct jpeg_decode_params_t - decode command parameters
+ *
+ * @picture_start_addr_p: start address of jpeg picture
+ * @picture_end_addr_p: end address of jpeg picture
+ * @decoded_buffer_addr: decoded picture buffer
+ * @display_buffer_addr: display picture buffer
+ * @main_aux_enable: enable main and/or aux outputs
+ * @horizontal_decimation_factor:horizontal decimation factor
+ * @vertical_decimation_factor: vertical decimation factor
+ * @xvalue0: the x(0) coordinate for subregion decoding
+ * @xvalue1: the x(1) coordinate for subregion decoding
+ * @yvalue0: the y(0) coordinate for subregion decoding
+ * @yvalue1: the y(1) coordinate for subregion decoding
+ * @decoding_mode: decoding mode
+ * @additional_flags: additional flags
+ * @field_flag: determines frame/field scan
+ * @is_jpeg_image: 1 = still jpeg, 0 = motion jpeg
+ */
+struct jpeg_decode_params_t {
+ u32 picture_start_addr_p;
+ u32 picture_end_addr_p;
+ struct jpeg_decoded_buffer_address_t decoded_buffer_addr;
+ struct jpeg_display_buffer_address_t display_buffer_addr;
+ enum jpeg_rcn_ref_disp_enable_t main_aux_enable;
+ enum jpeg_horizontal_deci_factor_t horizontal_decimation_factor;
+ enum jpeg_vertical_deci_factor_t vertical_decimation_factor;
+ u32 xvalue0;
+ u32 xvalue1;
+ u32 yvalue0;
+ u32 yvalue1;
+ enum jpeg_decoding_mode_t decoding_mode;
+ u32 additional_flags;
+ u32 field_flag;
+ u32 reserved;
+ u32 is_jpeg_image;
+};
+
+/*
+ * struct jpeg_decode_return_params_t
+ *
+ * status returned by firmware after decoding
+ *
+ * @decode_time_in_us: decoding time in microseconds
+ * @pm_cycles: profiling information
+ * @pm_dmiss: profiling information
+ * @pm_imiss: profiling information
+ * @pm_bundles: profiling information
+ * @pm_pft: profiling information
+ * @error_code: status of the decoding process
+ * @ceh_registers: array where values of the Contrast Enhancement
+ * Histogram (CEH) registers will be stored.
+ * ceh_registers[0] correspond to register MBE_CEH_0_7,
+ * ceh_registers[1] correspond to register MBE_CEH_8_15
+ * ceh_registers[2] correspond to register MBE_CEH_16_23
+ * Note that elements of this array will be updated only
+ * if additional_flags has JPEG_ADDITIONAL_FLAG_CEH set.
+ */
+struct jpeg_decode_return_params_t {
+ /* profiling info */
+ u32 decode_time_in_us;
+ u32 pm_cycles;
+ u32 pm_dmiss;
+ u32 pm_imiss;
+ u32 pm_bundles;
+ u32 pm_pft;
+ enum jpeg_decoding_error_t error_code;
+ u32 ceh_registers[32];
+};
+
+#endif /* DELTA_MJPEG_FW_H */
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-hdr.c b/drivers/media/platform/sti/delta/delta-mjpeg-hdr.c
new file mode 100644
index 000000000..90e5b2f72
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mjpeg-hdr.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#include "delta.h"
+#include "delta-mjpeg.h"
+
+#define MJPEG_SOF_0 0xc0
+#define MJPEG_SOF_1 0xc1
+#define MJPEG_SOI 0xd8
+#define MJPEG_MARKER 0xff
+
+static char *header_str(struct mjpeg_header *header,
+ char *str,
+ unsigned int len)
+{
+ char *cur = str;
+ unsigned int left = len;
+
+ if (!header)
+ return "";
+
+ snprintf(cur, left, "[MJPEG header]\n"
+ "|- length = %d\n"
+ "|- precision = %d\n"
+ "|- width = %d\n"
+ "|- height = %d\n"
+ "|- components = %d\n",
+ header->length,
+ header->sample_precision,
+ header->frame_width,
+ header->frame_height,
+ header->nb_of_components);
+
+ return str;
+}
+
+static int delta_mjpeg_read_sof(struct delta_ctx *pctx,
+ unsigned char *data, unsigned int size,
+ struct mjpeg_header *header)
+{
+ struct delta_dev *delta = pctx->dev;
+ unsigned int offset = 0;
+
+ if (size < 64)
+ goto err_no_more;
+
+ memset(header, 0, sizeof(*header));
+ header->length = be16_to_cpu(*(__be16 *)(data + offset));
+ offset += sizeof(u16);
+ header->sample_precision = *(u8 *)(data + offset);
+ offset += sizeof(u8);
+ header->frame_height = be16_to_cpu(*(__be16 *)(data + offset));
+ offset += sizeof(u16);
+ header->frame_width = be16_to_cpu(*(__be16 *)(data + offset));
+ offset += sizeof(u16);
+ header->nb_of_components = *(u8 *)(data + offset);
+ offset += sizeof(u8);
+
+ if (header->nb_of_components >= MJPEG_MAX_COMPONENTS) {
+ dev_err(delta->dev,
+ "%s unsupported number of components (%d > %d)\n",
+ pctx->name, header->nb_of_components,
+ MJPEG_MAX_COMPONENTS);
+ return -EINVAL;
+ }
+
+ if ((offset + header->nb_of_components *
+ sizeof(header->components[0])) > size)
+ goto err_no_more;
+
+ return 0;
+
+err_no_more:
+ dev_err(delta->dev,
+ "%s sof: reached end of %d size input stream\n",
+ pctx->name, size);
+ return -ENODATA;
+}
+
+int delta_mjpeg_read_header(struct delta_ctx *pctx,
+ unsigned char *data, unsigned int size,
+ struct mjpeg_header *header,
+ unsigned int *data_offset)
+{
+ struct delta_dev *delta = pctx->dev;
+ unsigned char str[200];
+
+ unsigned int ret = 0;
+ unsigned int offset = 0;
+ unsigned int soi = 0;
+
+ if (size < 2)
+ goto err_no_more;
+
+ offset = 0;
+ while (1) {
+ if (data[offset] == MJPEG_MARKER)
+ switch (data[offset + 1]) {
+ case MJPEG_SOI:
+ soi = 1;
+ *data_offset = offset;
+ break;
+
+ case MJPEG_SOF_0:
+ case MJPEG_SOF_1:
+ if (!soi) {
+ dev_err(delta->dev,
+ "%s wrong sequence, got SOF while SOI not seen\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ ret = delta_mjpeg_read_sof(pctx,
+ &data[offset + 2],
+ size - (offset + 2),
+ header);
+ if (ret)
+ goto err;
+
+ goto done;
+
+ default:
+ break;
+ }
+
+ offset++;
+ if ((offset + 2) >= size)
+ goto err_no_more;
+ }
+
+done:
+ dev_dbg(delta->dev,
+ "%s found header @ offset %d:\n%s", pctx->name,
+ *data_offset,
+ header_str(header, str, sizeof(str)));
+ return 0;
+
+err_no_more:
+ dev_err(delta->dev,
+ "%s no header found within %d bytes input stream\n",
+ pctx->name, size);
+ return -ENODATA;
+
+err:
+ return ret;
+}
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg.h b/drivers/media/platform/sti/delta/delta-mjpeg.h
new file mode 100644
index 000000000..43f7a88b6
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mjpeg.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_MJPEG_H
+#define DELTA_MJPEG_H
+
+#include "delta.h"
+
+struct mjpeg_component {
+ unsigned int id;/* 1=Y, 2=Cb, 3=Cr, 4=L, 5=Q */
+ unsigned int h_sampling_factor;
+ unsigned int v_sampling_factor;
+ unsigned int quant_table_index;
+};
+
+#define MJPEG_MAX_COMPONENTS 5
+
+struct mjpeg_header {
+ unsigned int length;
+ unsigned int sample_precision;
+ unsigned int frame_width;
+ unsigned int frame_height;
+ unsigned int nb_of_components;
+ struct mjpeg_component components[MJPEG_MAX_COMPONENTS];
+};
+
+int delta_mjpeg_read_header(struct delta_ctx *pctx,
+ unsigned char *data, unsigned int size,
+ struct mjpeg_header *header,
+ unsigned int *data_offset);
+
+#endif /* DELTA_MJPEG_H */
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
new file mode 100644
index 000000000..eb5e88679
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -0,0 +1,1979 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Hugues Fruchet <hugues.fruchet@st.com>
+ * Jean-Christophe Trotin <jean-christophe.trotin@st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "delta.h"
+#include "delta-debug.h"
+#include "delta-ipc.h"
+
+#define DELTA_NAME "st-delta"
+
+#define DELTA_PREFIX "[---:----]"
+
+#define to_ctx(__fh) container_of(__fh, struct delta_ctx, fh)
+#define to_au(__vbuf) container_of(__vbuf, struct delta_au, vbuf)
+#define to_frame(__vbuf) container_of(__vbuf, struct delta_frame, vbuf)
+
+#define call_dec_op(dec, op, args...)\
+ ((dec && (dec)->op) ? (dec)->op(args) : 0)
+
+/* registry of available decoders */
+static const struct delta_dec *delta_decoders[] = {
+#ifdef CONFIG_VIDEO_STI_DELTA_MJPEG
+ &mjpegdec,
+#endif
+};
+
+static inline int frame_size(u32 w, u32 h, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ return (w * h * 3) / 2;
+ default:
+ return 0;
+ }
+}
+
+static inline int frame_stride(u32 w, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ return w;
+ default:
+ return 0;
+ }
+}
+
+static void dump_au(struct delta_ctx *ctx, struct delta_au *au)
+{
+ struct delta_dev *delta = ctx->dev;
+ u32 size = 10; /* dump first & last 10 bytes */
+ u8 *data = (u8 *)(au->vaddr);
+
+ if (au->size <= (size * 2))
+ dev_dbg(delta->dev, "%s dump au[%d] dts=%lld size=%d data=%*ph\n",
+ ctx->name, au->vbuf.vb2_buf.index, au->dts, au->size,
+ au->size, data);
+ else
+ dev_dbg(delta->dev, "%s dump au[%d] dts=%lld size=%d data=%*ph..%*ph\n",
+ ctx->name, au->vbuf.vb2_buf.index, au->dts, au->size,
+ size, data, size, data + au->size - size);
+}
+
+static void dump_frame(struct delta_ctx *ctx, struct delta_frame *frame)
+{
+ struct delta_dev *delta = ctx->dev;
+ u32 size = 10; /* dump first 10 bytes */
+ u8 *data = (u8 *)(frame->vaddr);
+
+ dev_dbg(delta->dev, "%s dump frame[%d] dts=%lld type=%s field=%s data=%*ph\n",
+ ctx->name, frame->index, frame->dts,
+ frame_type_str(frame->flags),
+ frame_field_str(frame->field),
+ size, data);
+}
+
+static void delta_au_done(struct delta_ctx *ctx, struct delta_au *au, int err)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ vbuf = &au->vbuf;
+ vbuf->sequence = ctx->au_num++;
+ v4l2_m2m_buf_done(vbuf, err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+}
+
+static void delta_frame_done(struct delta_ctx *ctx, struct delta_frame *frame,
+ int err)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ dump_frame(ctx, frame);
+
+ /* decoded frame is now output to user */
+ frame->state |= DELTA_FRAME_OUT;
+
+ vbuf = &frame->vbuf;
+ vbuf->sequence = ctx->frame_num++;
+ v4l2_m2m_buf_done(vbuf, err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ if (frame->info.size) /* ignore EOS */
+ ctx->output_frames++;
+}
+
+static void requeue_free_frames(struct delta_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct delta_frame *frame;
+ unsigned int i;
+
+ /* requeue all free frames */
+ for (i = 0; i < ctx->nb_of_frames; i++) {
+ frame = ctx->frames[i];
+ if (frame->state == DELTA_FRAME_FREE) {
+ vbuf = &frame->vbuf;
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ frame->state = DELTA_FRAME_M2M;
+ }
+ }
+}
+
+static int delta_recycle(struct delta_ctx *ctx, struct delta_frame *frame)
+{
+ const struct delta_dec *dec = ctx->dec;
+
+ /* recycle frame on decoder side */
+ call_dec_op(dec, recycle, ctx, frame);
+
+ /* this frame is no more output */
+ frame->state &= ~DELTA_FRAME_OUT;
+
+ /* requeue free frame */
+ if (frame->state == DELTA_FRAME_FREE) {
+ struct vb2_v4l2_buffer *vbuf = &frame->vbuf;
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ frame->state = DELTA_FRAME_M2M;
+ }
+
+ /* reset other frame fields */
+ frame->flags = 0;
+ frame->dts = 0;
+
+ return 0;
+}
+
+static void delta_push_dts(struct delta_ctx *ctx, u64 val)
+{
+ struct delta_dts *dts;
+
+ dts = kzalloc(sizeof(*dts), GFP_KERNEL);
+ if (!dts)
+ return;
+
+ INIT_LIST_HEAD(&dts->list);
+
+ /*
+ * protected by global lock acquired
+ * by V4L2 when calling delta_vb2_au_queue
+ */
+ dts->val = val;
+ list_add_tail(&dts->list, &ctx->dts);
+}
+
+static void delta_pop_dts(struct delta_ctx *ctx, u64 *val)
+{
+ struct delta_dev *delta = ctx->dev;
+ struct delta_dts *dts;
+
+ /*
+ * protected by global lock acquired
+ * by V4L2 when calling delta_vb2_au_queue
+ */
+ if (list_empty(&ctx->dts)) {
+ dev_warn(delta->dev, "%s no dts to pop ... output dts = 0\n",
+ ctx->name);
+ *val = 0;
+ return;
+ }
+
+ dts = list_first_entry(&ctx->dts, struct delta_dts, list);
+ list_del(&dts->list);
+
+ *val = dts->val;
+
+ kfree(dts);
+}
+
+static void delta_flush_dts(struct delta_ctx *ctx)
+{
+ struct delta_dts *dts;
+ struct delta_dts *next;
+
+ /*
+ * protected by global lock acquired
+ * by V4L2 when calling delta_vb2_au_queue
+ */
+
+ /* free all pending dts */
+ list_for_each_entry_safe(dts, next, &ctx->dts, list)
+ kfree(dts);
+
+ /* reset list */
+ INIT_LIST_HEAD(&ctx->dts);
+}
+
+static inline int frame_alignment(u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ /* multiple of 2 */
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static inline int estimated_au_size(u32 w, u32 h)
+{
+ /*
+ * for a MJPEG stream encoded from YUV422 pixel format,
+ * assuming a compression ratio of 2, the maximum size
+ * of an access unit is (width x height x 2) / 2,
+ * so (width x height)
+ */
+ return (w * h);
+}
+
+static void set_default_params(struct delta_ctx *ctx)
+{
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+
+ memset(frameinfo, 0, sizeof(*frameinfo));
+ frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+ frameinfo->width = DELTA_DEFAULT_WIDTH;
+ frameinfo->height = DELTA_DEFAULT_HEIGHT;
+ frameinfo->aligned_width = ALIGN(frameinfo->width,
+ DELTA_WIDTH_ALIGNMENT);
+ frameinfo->aligned_height = ALIGN(frameinfo->height,
+ DELTA_HEIGHT_ALIGNMENT);
+ frameinfo->size = frame_size(frameinfo->aligned_width,
+ frameinfo->aligned_height,
+ frameinfo->pixelformat);
+ frameinfo->field = V4L2_FIELD_NONE;
+ frameinfo->colorspace = V4L2_COLORSPACE_REC709;
+ frameinfo->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ frameinfo->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ frameinfo->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ memset(streaminfo, 0, sizeof(*streaminfo));
+ streaminfo->streamformat = DELTA_DEFAULT_STREAMFORMAT;
+ streaminfo->width = DELTA_DEFAULT_WIDTH;
+ streaminfo->height = DELTA_DEFAULT_HEIGHT;
+ streaminfo->field = V4L2_FIELD_NONE;
+ streaminfo->colorspace = V4L2_COLORSPACE_REC709;
+ streaminfo->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ streaminfo->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ streaminfo->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ ctx->max_au_size = estimated_au_size(streaminfo->width,
+ streaminfo->height);
+}
+
+static const struct delta_dec *delta_find_decoder(struct delta_ctx *ctx,
+ u32 streamformat,
+ u32 pixelformat)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec;
+ unsigned int i;
+
+ for (i = 0; i < delta->nb_of_decoders; i++) {
+ dec = delta->decoders[i];
+ if ((dec->pixelformat == pixelformat) &&
+ (dec->streamformat == streamformat))
+ return dec;
+ }
+
+ return NULL;
+}
+
+static void register_format(u32 format, u32 formats[], u32 *nb_of_formats)
+{
+ u32 i;
+
+ for (i = 0; i < *nb_of_formats; i++) {
+ if (format == formats[i])
+ return;
+ }
+
+ formats[(*nb_of_formats)++] = format;
+}
+
+static void register_formats(struct delta_dev *delta)
+{
+ unsigned int i;
+
+ for (i = 0; i < delta->nb_of_decoders; i++) {
+ register_format(delta->decoders[i]->pixelformat,
+ delta->pixelformats,
+ &delta->nb_of_pixelformats);
+
+ register_format(delta->decoders[i]->streamformat,
+ delta->streamformats,
+ &delta->nb_of_streamformats);
+ }
+}
+
+static void register_decoders(struct delta_dev *delta)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(delta_decoders); i++) {
+ if (delta->nb_of_decoders >= DELTA_MAX_DECODERS) {
+ dev_dbg(delta->dev,
+ "%s failed to register %s decoder (%d maximum reached)\n",
+ DELTA_PREFIX, delta_decoders[i]->name,
+ DELTA_MAX_DECODERS);
+ return;
+ }
+
+ delta->decoders[delta->nb_of_decoders++] = delta_decoders[i];
+ dev_info(delta->dev, "%s %s decoder registered\n",
+ DELTA_PREFIX, delta_decoders[i]->name);
+ }
+}
+
+static int delta_open_decoder(struct delta_ctx *ctx, u32 streamformat,
+ u32 pixelformat, const struct delta_dec **pdec)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec;
+ int ret;
+
+ dec = delta_find_decoder(ctx, streamformat, ctx->frameinfo.pixelformat);
+ if (!dec) {
+ dev_err(delta->dev, "%s no decoder found matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&streamformat, (char *)&pixelformat);
+ return -EINVAL;
+ }
+
+ dev_dbg(delta->dev, "%s one decoder matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&streamformat, (char *)&pixelformat);
+
+ /* update instance name */
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:%4.4s]",
+ delta->instance_id, (char *)&streamformat);
+
+ /* open decoder instance */
+ ret = call_dec_op(dec, open, ctx);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to open decoder instance (%d)\n",
+ ctx->name, ret);
+ return ret;
+ }
+
+ dev_dbg(delta->dev, "%s %s decoder opened\n", ctx->name, dec->name);
+
+ *pdec = dec;
+
+ return ret;
+}
+
+/*
+ * V4L2 ioctl operations
+ */
+
+static int delta_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+
+ strlcpy(cap->driver, DELTA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, delta->vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ delta->pdev->name);
+
+ return 0;
+}
+
+static int delta_enum_fmt_stream(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+
+ if (unlikely(f->index >= delta->nb_of_streamformats))
+ return -EINVAL;
+
+ f->pixelformat = delta->streamformats[f->index];
+
+ return 0;
+}
+
+static int delta_enum_fmt_frame(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+
+ if (unlikely(f->index >= delta->nb_of_pixelformats))
+ return -EINVAL;
+
+ f->pixelformat = delta->pixelformats[f->index];
+
+ return 0;
+}
+
+static int delta_g_fmt_stream(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ unsigned char str[100] = "";
+
+ if (!(ctx->flags & DELTA_FLAG_STREAMINFO))
+ dev_dbg(delta->dev,
+ "%s V4L2 GET_FMT (OUTPUT): no stream information available, default to %s\n",
+ ctx->name,
+ delta_streaminfo_str(streaminfo, str, sizeof(str)));
+
+ pix->pixelformat = streaminfo->streamformat;
+ pix->width = streaminfo->width;
+ pix->height = streaminfo->height;
+ pix->field = streaminfo->field;
+ pix->bytesperline = 0;
+ pix->sizeimage = ctx->max_au_size;
+ pix->colorspace = streaminfo->colorspace;
+ pix->xfer_func = streaminfo->xfer_func;
+ pix->ycbcr_enc = streaminfo->ycbcr_enc;
+ pix->quantization = streaminfo->quantization;
+
+ return 0;
+}
+
+static int delta_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ unsigned char str[100] = "";
+
+ if (!(ctx->flags & DELTA_FLAG_FRAMEINFO))
+ dev_dbg(delta->dev,
+ "%s V4L2 GET_FMT (CAPTURE): no frame information available, default to %s\n",
+ ctx->name,
+ delta_frameinfo_str(frameinfo, str, sizeof(str)));
+
+ pix->pixelformat = frameinfo->pixelformat;
+ pix->width = frameinfo->aligned_width;
+ pix->height = frameinfo->aligned_height;
+ pix->field = frameinfo->field;
+ pix->bytesperline = frame_stride(frameinfo->aligned_width,
+ frameinfo->pixelformat);
+ pix->sizeimage = frameinfo->size;
+
+ if (ctx->flags & DELTA_FLAG_STREAMINFO) {
+ /* align colorspace & friends on stream ones if any set */
+ frameinfo->colorspace = streaminfo->colorspace;
+ frameinfo->xfer_func = streaminfo->xfer_func;
+ frameinfo->ycbcr_enc = streaminfo->ycbcr_enc;
+ frameinfo->quantization = streaminfo->quantization;
+ }
+ pix->colorspace = frameinfo->colorspace;
+ pix->xfer_func = frameinfo->xfer_func;
+ pix->ycbcr_enc = frameinfo->ycbcr_enc;
+ pix->quantization = frameinfo->quantization;
+
+ return 0;
+}
+
+static int delta_try_fmt_stream(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 streamformat = pix->pixelformat;
+ const struct delta_dec *dec;
+ u32 width, height;
+ u32 au_size;
+
+ dec = delta_find_decoder(ctx, streamformat, ctx->frameinfo.pixelformat);
+ if (!dec) {
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (OUTPUT): unsupported format %4.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return -EINVAL;
+ }
+
+ /* adjust width & height */
+ width = pix->width;
+ height = pix->height;
+ v4l_bound_align_image
+ (&pix->width,
+ DELTA_MIN_WIDTH,
+ dec->max_width ? dec->max_width : DELTA_MAX_WIDTH,
+ 0,
+ &pix->height,
+ DELTA_MIN_HEIGHT,
+ dec->max_height ? dec->max_height : DELTA_MAX_HEIGHT,
+ 0, 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (OUTPUT): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height,
+ pix->width, pix->height);
+
+ au_size = estimated_au_size(pix->width, pix->height);
+ if (pix->sizeimage < au_size) {
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (OUTPUT): size updated %d -> %d to fit estimated size\n",
+ ctx->name, pix->sizeimage, au_size);
+ pix->sizeimage = au_size;
+ }
+
+ pix->bytesperline = 0;
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int delta_try_fmt_frame(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 pixelformat = pix->pixelformat;
+ const struct delta_dec *dec;
+ u32 width, height;
+
+ dec = delta_find_decoder(ctx, ctx->streaminfo.streamformat,
+ pixelformat);
+ if (!dec) {
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (CAPTURE): unsupported format %4.4s\n",
+ ctx->name, (char *)&pixelformat);
+ return -EINVAL;
+ }
+
+ /* adjust width & height */
+ width = pix->width;
+ height = pix->height;
+ v4l_bound_align_image(&pix->width,
+ DELTA_MIN_WIDTH, DELTA_MAX_WIDTH,
+ frame_alignment(pixelformat) - 1,
+ &pix->height,
+ DELTA_MIN_HEIGHT, DELTA_MAX_HEIGHT,
+ frame_alignment(pixelformat) - 1, 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height, pix->width, pix->height);
+
+ /* default decoder alignment constraint */
+ width = ALIGN(pix->width, DELTA_WIDTH_ALIGNMENT);
+ height = ALIGN(pix->height, DELTA_HEIGHT_ALIGNMENT);
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit decoder alignment\n",
+ ctx->name, width, height, pix->width, pix->height);
+
+ if (!pix->colorspace) {
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ pix->width = width;
+ pix->height = height;
+ pix->bytesperline = frame_stride(pix->width, pixelformat);
+ pix->sizeimage = frame_size(pix->width, pix->height, pixelformat);
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int delta_s_fmt_stream(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_queue *vq;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ ret = delta_try_fmt_stream(file, fh, f);
+ if (ret) {
+ dev_dbg(delta->dev,
+ "%s V4L2 S_FMT (OUTPUT): unsupported format %4.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(delta->dev, "%s V4L2 S_FMT (OUTPUT): queue busy\n",
+ ctx->name);
+ return -EBUSY;
+ }
+
+ ctx->max_au_size = pix->sizeimage;
+ ctx->streaminfo.width = pix->width;
+ ctx->streaminfo.height = pix->height;
+ ctx->streaminfo.streamformat = pix->pixelformat;
+ ctx->streaminfo.colorspace = pix->colorspace;
+ ctx->streaminfo.xfer_func = pix->xfer_func;
+ ctx->streaminfo.ycbcr_enc = pix->ycbcr_enc;
+ ctx->streaminfo.quantization = pix->quantization;
+ ctx->flags |= DELTA_FLAG_STREAMINFO;
+
+ return 0;
+}
+
+static int delta_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct delta_frameinfo frameinfo;
+ unsigned char str[100] = "";
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(delta->dev, "%s V4L2 S_FMT (CAPTURE): queue busy\n",
+ ctx->name);
+ return -EBUSY;
+ }
+
+ if (ctx->state < DELTA_STATE_READY) {
+ /*
+ * decoder not yet opened and valid stream header not found,
+ * could not negotiate format with decoder, check at least
+ * pixel format & negotiate resolution boundaries
+ * and alignment...
+ */
+ ret = delta_try_fmt_frame(file, fh, f);
+ if (ret) {
+ dev_dbg(delta->dev,
+ "%s V4L2 S_FMT (CAPTURE): unsupported format %4.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* set frame information to decoder */
+ memset(&frameinfo, 0, sizeof(frameinfo));
+ frameinfo.pixelformat = pix->pixelformat;
+ frameinfo.width = pix->width;
+ frameinfo.height = pix->height;
+ frameinfo.aligned_width = pix->width;
+ frameinfo.aligned_height = pix->height;
+ frameinfo.size = pix->sizeimage;
+ frameinfo.field = pix->field;
+ frameinfo.colorspace = pix->colorspace;
+ frameinfo.xfer_func = pix->xfer_func;
+ frameinfo.ycbcr_enc = pix->ycbcr_enc;
+ frameinfo.quantization = pix->quantization;
+ ret = call_dec_op(dec, set_frameinfo, ctx, &frameinfo);
+ if (ret)
+ return ret;
+
+ /* then get what decoder can really do */
+ ret = call_dec_op(dec, get_frameinfo, ctx, &frameinfo);
+ if (ret)
+ return ret;
+
+ ctx->flags |= DELTA_FLAG_FRAMEINFO;
+ ctx->frameinfo = frameinfo;
+ dev_dbg(delta->dev,
+ "%s V4L2 SET_FMT (CAPTURE): frameinfo updated to %s\n",
+ ctx->name,
+ delta_frameinfo_str(&frameinfo, str, sizeof(str)));
+
+ pix->pixelformat = frameinfo.pixelformat;
+ pix->width = frameinfo.aligned_width;
+ pix->height = frameinfo.aligned_height;
+ pix->bytesperline = frame_stride(pix->width, pix->pixelformat);
+ pix->sizeimage = frameinfo.size;
+ pix->field = frameinfo.field;
+ pix->colorspace = frameinfo.colorspace;
+ pix->xfer_func = frameinfo.xfer_func;
+ pix->ycbcr_enc = frameinfo.ycbcr_enc;
+ pix->quantization = frameinfo.quantization;
+
+ return 0;
+}
+
+static int delta_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct delta_ctx *ctx = to_ctx(fh);
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ struct v4l2_rect crop;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if ((ctx->flags & DELTA_FLAG_FRAMEINFO) &&
+ (frameinfo->flags & DELTA_FRAMEINFO_FLAG_CROP)) {
+ crop = frameinfo->crop;
+ } else {
+ /* default to video dimensions */
+ crop.left = 0;
+ crop.top = 0;
+ crop.width = frameinfo->width;
+ crop.height = frameinfo->height;
+ }
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ /* visible area inside video */
+ s->r = crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* up to aligned dimensions */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frameinfo->aligned_width;
+ s->r.height = frameinfo->aligned_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void delta_complete_eos(struct delta_ctx *ctx,
+ struct delta_frame *frame)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct v4l2_event ev = {.type = V4L2_EVENT_EOS};
+
+ /*
+ * Send EOS to user:
+ * - by returning an empty frame flagged to V4L2_BUF_FLAG_LAST
+ * - and then send EOS event
+ */
+
+ /* empty frame */
+ frame->info.size = 0;
+
+ /* set the last buffer flag */
+ frame->flags |= V4L2_BUF_FLAG_LAST;
+
+ /* release frame to user */
+ delta_frame_done(ctx, frame, 0);
+
+ /* send EOS event */
+ v4l2_event_queue_fh(&ctx->fh, &ev);
+
+ dev_dbg(delta->dev, "%s EOS completed\n", ctx->name);
+}
+
+static int delta_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *cmd)
+{
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+
+ if (!(cmd->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) &&
+ (cmd->stop.pts != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int delta_decoder_stop_cmd(struct delta_ctx *ctx, void *fh)
+{
+ const struct delta_dec *dec = ctx->dec;
+ struct delta_dev *delta = ctx->dev;
+ struct delta_frame *frame = NULL;
+ int ret = 0;
+
+ dev_dbg(delta->dev, "%s EOS received\n", ctx->name);
+
+ if (ctx->state != DELTA_STATE_READY)
+ return 0;
+
+ /* drain the decoder */
+ call_dec_op(dec, drain, ctx);
+
+ /* release to user drained frames */
+ while (1) {
+ frame = NULL;
+ ret = call_dec_op(dec, get_frame, ctx, &frame);
+ if (ret == -ENODATA) {
+ /* no more decoded frames */
+ break;
+ }
+ if (frame) {
+ dev_dbg(delta->dev, "%s drain frame[%d]\n",
+ ctx->name, frame->index);
+
+ /* pop timestamp and mark frame with it */
+ delta_pop_dts(ctx, &frame->dts);
+
+ /* release decoded frame to user */
+ delta_frame_done(ctx, frame, 0);
+ }
+ }
+
+ /* try to complete EOS */
+ ret = delta_get_free_frame(ctx, &frame);
+ if (ret)
+ goto delay_eos;
+
+ /* new frame available, EOS can now be completed */
+ delta_complete_eos(ctx, frame);
+
+ ctx->state = DELTA_STATE_EOS;
+
+ return 0;
+
+delay_eos:
+ /*
+ * EOS completion from driver is delayed because
+ * we don't have a free empty frame available.
+ * EOS completion is so delayed till next frame_queue() call
+ * to be sure to have a free empty frame available.
+ */
+ ctx->state = DELTA_STATE_WF_EOS;
+ dev_dbg(delta->dev, "%s EOS delayed\n", ctx->name);
+
+ return 0;
+}
+
+static int delta_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *cmd)
+{
+ struct delta_ctx *ctx = to_ctx(fh);
+ int ret = 0;
+
+ ret = delta_try_decoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ return delta_decoder_stop_cmd(ctx, fh);
+}
+
+static int delta_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* v4l2 ioctl ops */
+static const struct v4l2_ioctl_ops delta_ioctl_ops = {
+ .vidioc_querycap = delta_querycap,
+ .vidioc_enum_fmt_vid_cap = delta_enum_fmt_frame,
+ .vidioc_g_fmt_vid_cap = delta_g_fmt_frame,
+ .vidioc_try_fmt_vid_cap = delta_try_fmt_frame,
+ .vidioc_s_fmt_vid_cap = delta_s_fmt_frame,
+ .vidioc_enum_fmt_vid_out = delta_enum_fmt_stream,
+ .vidioc_g_fmt_vid_out = delta_g_fmt_stream,
+ .vidioc_try_fmt_vid_out = delta_try_fmt_stream,
+ .vidioc_s_fmt_vid_out = delta_s_fmt_stream,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = delta_g_selection,
+ .vidioc_try_decoder_cmd = delta_try_decoder_cmd,
+ .vidioc_decoder_cmd = delta_decoder_cmd,
+ .vidioc_subscribe_event = delta_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * mem-to-mem operations
+ */
+
+static void delta_run_work(struct work_struct *work)
+{
+ struct delta_ctx *ctx = container_of(work, struct delta_ctx, run_work);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+ struct delta_au *au;
+ struct delta_frame *frame = NULL;
+ int ret = 0;
+ bool discard = false;
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (!dec) {
+ dev_err(delta->dev, "%s no decoder opened yet\n", ctx->name);
+ return;
+ }
+
+ /* protect instance against reentrancy */
+ mutex_lock(&ctx->lock);
+
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf) {
+ dev_err(delta->dev, "%s no buffer to decode\n", ctx->name);
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+ au = to_au(vbuf);
+ au->size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ au->dts = vbuf->vb2_buf.timestamp;
+
+ /* dump access unit */
+ dump_au(ctx, au);
+
+ /* enable the hardware */
+ if (!dec->pm) {
+ ret = delta_get_sync(ctx);
+ if (ret) {
+ delta_put_autosuspend(ctx);
+ goto err;
+ }
+ }
+
+ /* decode this access unit */
+ ret = call_dec_op(dec, decode, ctx, au);
+
+ /*
+ * if the (-ENODATA) value is returned, it refers to the interlaced
+ * stream case for which 2 access units are needed to get 1 frame.
+ * So, this returned value doesn't mean that the decoding fails, but
+ * indicates that the timestamp information of the access unit shall
+ * not be taken into account, and that the V4L2 buffer associated with
+ * the access unit shall be flagged with V4L2_BUF_FLAG_ERROR to inform
+ * the user of this situation
+ */
+ if (ret == -ENODATA) {
+ discard = true;
+ } else if (ret) {
+ dev_err(delta->dev, "%s decoding failed (%d)\n",
+ ctx->name, ret);
+
+ /* disable the hardware */
+ if (!dec->pm)
+ delta_put_autosuspend(ctx);
+
+ goto err;
+ }
+
+ /* disable the hardware */
+ if (!dec->pm)
+ delta_put_autosuspend(ctx);
+
+ /* push au timestamp in FIFO */
+ if (!discard)
+ delta_push_dts(ctx, au->dts);
+
+ /* get available decoded frames */
+ while (1) {
+ ret = call_dec_op(dec, get_frame, ctx, &frame);
+ if (ret == -ENODATA) {
+ /* no more decoded frames */
+ goto out;
+ }
+ if (ret) {
+ dev_err(delta->dev, "%s cannot get decoded frame (%d)\n",
+ ctx->name, ret);
+ goto out;
+ }
+ if (!frame) {
+ dev_err(delta->dev,
+ "%s NULL decoded frame\n",
+ ctx->name);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* pop timestamp and mark frame with it */
+ delta_pop_dts(ctx, &frame->dts);
+
+ /* release decoded frame to user */
+ delta_frame_done(ctx, frame, 0);
+ }
+
+out:
+ requeue_free_frames(ctx);
+ delta_au_done(ctx, au, (discard ? -ENODATA : 0));
+ mutex_unlock(&ctx->lock);
+ v4l2_m2m_job_finish(delta->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+
+err:
+ requeue_free_frames(ctx);
+ delta_au_done(ctx, au, ret);
+ mutex_unlock(&ctx->lock);
+ v4l2_m2m_job_finish(delta->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void delta_device_run(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+
+ queue_work(delta->work_queue, &ctx->run_work);
+}
+
+static void delta_job_abort(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+
+ dev_dbg(delta->dev, "%s aborting job\n", ctx->name);
+
+ ctx->aborting = true;
+}
+
+static int delta_job_ready(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+ int src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
+
+ if (!src_bufs) {
+ dev_dbg(delta->dev, "%s not ready: not enough video buffers.\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+ dev_dbg(delta->dev, "%s not ready: not enough video capture buffers.\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (ctx->aborting) {
+ dev_dbg(delta->dev, "%s job not ready: aborting\n", ctx->name);
+ return 0;
+ }
+
+ dev_dbg(delta->dev, "%s job ready\n", ctx->name);
+
+ return 1;
+}
+
+/* mem-to-mem ops */
+static const struct v4l2_m2m_ops delta_m2m_ops = {
+ .device_run = delta_device_run,
+ .job_ready = delta_job_ready,
+ .job_abort = delta_job_abort,
+};
+
+/*
+ * VB2 queue operations
+ */
+
+static int delta_vb2_au_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(vq);
+ unsigned int size = ctx->max_au_size;
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *num_planes = 1;
+ if (*num_buffers < 1)
+ *num_buffers = 1;
+ if (*num_buffers > DELTA_MAX_AUS)
+ *num_buffers = DELTA_MAX_AUS;
+
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int delta_vb2_au_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_au *au = to_au(vbuf);
+
+ if (!au->prepared) {
+ /* get memory addresses */
+ au->vaddr = vb2_plane_vaddr(&au->vbuf.vb2_buf, 0);
+ au->paddr = vb2_dma_contig_plane_dma_addr
+ (&au->vbuf.vb2_buf, 0);
+ au->prepared = true;
+ dev_dbg(delta->dev, "%s au[%d] prepared; virt=0x%p, phy=0x%pad\n",
+ ctx->name, vb->index, au->vaddr, &au->paddr);
+ }
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int delta_setup_frame(struct delta_ctx *ctx,
+ struct delta_frame *frame)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+
+ if (frame->index >= DELTA_MAX_FRAMES) {
+ dev_err(delta->dev,
+ "%s frame index=%d exceeds output frame count (%d)\n",
+ ctx->name, frame->index, DELTA_MAX_FRAMES);
+ return -EINVAL;
+ }
+
+ if (ctx->nb_of_frames >= DELTA_MAX_FRAMES) {
+ dev_err(delta->dev,
+ "%s number of frames exceeds output frame count (%d > %d)\n",
+ ctx->name, ctx->nb_of_frames, DELTA_MAX_FRAMES);
+ return -EINVAL;
+ }
+
+ if (frame->index != ctx->nb_of_frames) {
+ dev_warn(delta->dev,
+ "%s frame index discontinuity detected, expected %d, got %d\n",
+ ctx->name, ctx->nb_of_frames, frame->index);
+ }
+
+ frame->state = DELTA_FRAME_FREE;
+ ctx->frames[ctx->nb_of_frames] = frame;
+ ctx->nb_of_frames++;
+
+ /* setup frame on decoder side */
+ return call_dec_op(dec, setup_frame, ctx, frame);
+}
+
+/*
+ * default implementation of get_frameinfo decoder ops
+ * matching frame information from stream information
+ * & with default pixel format & default alignment.
+ */
+int delta_get_frameinfo_default(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo)
+{
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+
+ memset(frameinfo, 0, sizeof(*frameinfo));
+ frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+ frameinfo->width = streaminfo->width;
+ frameinfo->height = streaminfo->height;
+ frameinfo->aligned_width = ALIGN(streaminfo->width,
+ DELTA_WIDTH_ALIGNMENT);
+ frameinfo->aligned_height = ALIGN(streaminfo->height,
+ DELTA_HEIGHT_ALIGNMENT);
+ frameinfo->size = frame_size(frameinfo->aligned_width,
+ frameinfo->aligned_height,
+ frameinfo->pixelformat);
+ if (streaminfo->flags & DELTA_STREAMINFO_FLAG_CROP) {
+ frameinfo->flags |= DELTA_FRAMEINFO_FLAG_CROP;
+ frameinfo->crop = streaminfo->crop;
+ }
+ if (streaminfo->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT) {
+ frameinfo->flags |= DELTA_FRAMEINFO_FLAG_PIXELASPECT;
+ frameinfo->pixelaspect = streaminfo->pixelaspect;
+ }
+ frameinfo->field = streaminfo->field;
+
+ return 0;
+}
+
+/*
+ * default implementation of recycle decoder ops
+ * consisting to relax the "decoded" frame state
+ */
+int delta_recycle_default(struct delta_ctx *pctx,
+ struct delta_frame *frame)
+{
+ frame->state &= ~DELTA_FRAME_DEC;
+
+ return 0;
+}
+
+static void dump_frames_status(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+ unsigned int i;
+ struct delta_frame *frame;
+ unsigned char str[100] = "";
+
+ dev_info(delta->dev,
+ "%s dumping frames status...\n", ctx->name);
+
+ for (i = 0; i < ctx->nb_of_frames; i++) {
+ frame = ctx->frames[i];
+ dev_info(delta->dev,
+ "%s frame[%d] %s\n",
+ ctx->name, frame->index,
+ frame_state_str(frame->state,
+ str, sizeof(str)));
+ }
+}
+
+int delta_get_free_frame(struct delta_ctx *ctx,
+ struct delta_frame **pframe)
+{
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf;
+ struct delta_frame *frame;
+
+ *pframe = NULL;
+
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf) {
+ dev_err(delta->dev, "%s no frame available",
+ ctx->name);
+ return -EIO;
+ }
+
+ frame = to_frame(vbuf);
+ frame->state &= ~DELTA_FRAME_M2M;
+ if (frame->state != DELTA_FRAME_FREE) {
+ dev_err(delta->dev,
+ "%s frame[%d] is not free\n",
+ ctx->name, frame->index);
+ dump_frames_status(ctx);
+ return -ENODATA;
+ }
+
+ dev_dbg(delta->dev,
+ "%s get free frame[%d]\n", ctx->name, frame->index);
+
+ *pframe = frame;
+ return 0;
+}
+
+int delta_get_sync(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+ int ret = 0;
+
+ /* enable the hardware */
+ ret = pm_runtime_get_sync(delta->dev);
+ if (ret < 0) {
+ dev_err(delta->dev, "%s pm_runtime_get_sync failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void delta_put_autosuspend(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+
+ pm_runtime_put_autosuspend(delta->dev);
+}
+
+static void delta_vb2_au_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int delta_vb2_au_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+ struct delta_au *au;
+ int ret = 0;
+ struct vb2_v4l2_buffer *vbuf = NULL;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ unsigned char str1[100] = "";
+ unsigned char str2[100] = "";
+
+ if ((ctx->state != DELTA_STATE_WF_FORMAT) &&
+ (ctx->state != DELTA_STATE_WF_STREAMINFO))
+ return 0;
+
+ if (ctx->state == DELTA_STATE_WF_FORMAT) {
+ /* open decoder if not yet done */
+ ret = delta_open_decoder(ctx,
+ ctx->streaminfo.streamformat,
+ ctx->frameinfo.pixelformat, &dec);
+ if (ret)
+ goto err;
+ ctx->dec = dec;
+ ctx->state = DELTA_STATE_WF_STREAMINFO;
+ }
+
+ /*
+ * first buffer should contain stream header,
+ * decode it to get the infos related to stream
+ * such as width, height, dpb, ...
+ */
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf) {
+ dev_err(delta->dev, "%s failed to start streaming, no stream header buffer enqueued\n",
+ ctx->name);
+ ret = -EINVAL;
+ goto err;
+ }
+ au = to_au(vbuf);
+ au->size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ au->dts = vbuf->vb2_buf.timestamp;
+
+ delta_push_dts(ctx, au->dts);
+
+ /* dump access unit */
+ dump_au(ctx, au);
+
+ /* decode this access unit */
+ ret = call_dec_op(dec, decode, ctx, au);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to start streaming, header decoding failed (%d)\n",
+ ctx->name, ret);
+ goto err;
+ }
+
+ ret = call_dec_op(dec, get_streaminfo, ctx, streaminfo);
+ if (ret) {
+ dev_dbg_ratelimited(delta->dev,
+ "%s failed to start streaming, valid stream header not yet decoded\n",
+ ctx->name);
+ goto err;
+ }
+ ctx->flags |= DELTA_FLAG_STREAMINFO;
+
+ ret = call_dec_op(dec, get_frameinfo, ctx, frameinfo);
+ if (ret)
+ goto err;
+ ctx->flags |= DELTA_FLAG_FRAMEINFO;
+
+ ctx->state = DELTA_STATE_READY;
+
+ dev_dbg(delta->dev, "%s %s => %s\n", ctx->name,
+ delta_streaminfo_str(streaminfo, str1, sizeof(str1)),
+ delta_frameinfo_str(frameinfo, str2, sizeof(str2)));
+
+ delta_au_done(ctx, au, ret);
+ return 0;
+
+err:
+ /*
+ * return all buffers to vb2 in QUEUED state.
+ * This will give ownership back to userspace
+ */
+ if (vbuf)
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void delta_vb2_au_stop_streaming(struct vb2_queue *q)
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ delta_flush_dts(ctx);
+
+ /* return all buffers to vb2 in ERROR state */
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+
+ ctx->au_num = 0;
+
+ ctx->aborting = false;
+}
+
+static int delta_vb2_frame_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(vq);
+ struct delta_dev *delta = ctx->dev;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ unsigned int size = frameinfo->size;
+
+ /*
+ * the number of output buffers needed for decoding =
+ * user need (*num_buffers given, usually for display pipeline) +
+ * stream need (streaminfo->dpb) +
+ * decoding peak smoothing (depends on DELTA IP perf)
+ */
+ if (*num_buffers < DELTA_MIN_FRAME_USER) {
+ dev_dbg(delta->dev,
+ "%s num_buffers too low (%d), increasing to %d\n",
+ ctx->name, *num_buffers, DELTA_MIN_FRAME_USER);
+ *num_buffers = DELTA_MIN_FRAME_USER;
+ }
+
+ *num_buffers += streaminfo->dpb + DELTA_PEAK_FRAME_SMOOTHING;
+
+ if (*num_buffers > DELTA_MAX_FRAMES) {
+ dev_dbg(delta->dev,
+ "%s output frame count too high (%d), cut to %d\n",
+ ctx->name, *num_buffers, DELTA_MAX_FRAMES);
+ *num_buffers = DELTA_MAX_FRAMES;
+ }
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ /* single plane for Y and CbCr */
+ *num_planes = 1;
+
+ sizes[0] = size;
+
+ ctx->nb_of_frames = 0;
+
+ return 0;
+}
+
+static int delta_vb2_frame_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_frame *frame = to_frame(vbuf);
+ int ret = 0;
+
+ if (!frame->prepared) {
+ frame->index = vbuf->vb2_buf.index;
+ frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ frame->paddr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ frame->info = ctx->frameinfo;
+
+ ret = delta_setup_frame(ctx, frame);
+ if (ret) {
+ dev_err(delta->dev,
+ "%s setup_frame() failed (%d)\n",
+ ctx->name, ret);
+ return ret;
+ }
+ frame->prepared = true;
+ dev_dbg(delta->dev,
+ "%s frame[%d] prepared; virt=0x%p, phy=0x%pad\n",
+ ctx->name, vb->index, frame->vaddr,
+ &frame->paddr);
+ }
+
+ frame->flags = vbuf->flags;
+
+ return 0;
+}
+
+static void delta_vb2_frame_finish(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_frame *frame = to_frame(vbuf);
+
+ /* update V4L2 fields for user */
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, frame->info.size);
+ vb->timestamp = frame->dts;
+ vbuf->field = frame->field;
+ vbuf->flags = frame->flags;
+}
+
+static void delta_vb2_frame_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_frame *frame = to_frame(vbuf);
+
+ if (ctx->state == DELTA_STATE_WF_EOS) {
+ /* new frame available, EOS can now be completed */
+ delta_complete_eos(ctx, frame);
+
+ ctx->state = DELTA_STATE_EOS;
+
+ /* return, no need to recycle this buffer to decoder */
+ return;
+ }
+
+ /* recycle this frame */
+ delta_recycle(ctx, frame);
+}
+
+static void delta_vb2_frame_stop_streaming(struct vb2_queue *q)
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+ struct delta_frame *frame;
+ const struct delta_dec *dec = ctx->dec;
+ unsigned int i;
+
+ delta_flush_dts(ctx);
+
+ call_dec_op(dec, flush, ctx);
+
+ /*
+ * return all buffers to vb2 in ERROR state
+ * & reset each frame state to OUT
+ */
+ for (i = 0; i < ctx->nb_of_frames; i++) {
+ frame = ctx->frames[i];
+ if (!(frame->state & DELTA_FRAME_OUT)) {
+ vbuf = &frame->vbuf;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+ frame->state = DELTA_FRAME_OUT;
+ }
+
+ ctx->frame_num = 0;
+
+ ctx->aborting = false;
+}
+
+/* VB2 queue ops */
+static const struct vb2_ops delta_vb2_au_ops = {
+ .queue_setup = delta_vb2_au_queue_setup,
+ .buf_prepare = delta_vb2_au_prepare,
+ .buf_queue = delta_vb2_au_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = delta_vb2_au_start_streaming,
+ .stop_streaming = delta_vb2_au_stop_streaming,
+};
+
+static const struct vb2_ops delta_vb2_frame_ops = {
+ .queue_setup = delta_vb2_frame_queue_setup,
+ .buf_prepare = delta_vb2_frame_prepare,
+ .buf_finish = delta_vb2_frame_finish,
+ .buf_queue = delta_vb2_frame_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = delta_vb2_frame_stop_streaming,
+};
+
+/*
+ * V4L2 file operations
+ */
+
+static int queue_init(void *priv,
+ struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct vb2_queue *q;
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+ int ret;
+
+ /* setup vb2 queue for stream input */
+ q = src_vq;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = ctx;
+ /* overload vb2 buf with private au struct */
+ q->buf_struct_size = sizeof(struct delta_au);
+ q->ops = &delta_vb2_au_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q->lock = &delta->lock;
+ q->dev = delta->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ /* setup vb2 queue for frame output */
+ q = dst_vq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = ctx;
+ /* overload vb2 buf with private frame struct */
+ q->buf_struct_size = sizeof(struct delta_frame)
+ + DELTA_MAX_FRAME_PRIV_SIZE;
+ q->ops = &delta_vb2_frame_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q->lock = &delta->lock;
+ q->dev = delta->dev;
+
+ return vb2_queue_init(q);
+}
+
+static int delta_open(struct file *file)
+{
+ struct delta_dev *delta = video_drvdata(file);
+ struct delta_ctx *ctx = NULL;
+ int ret = 0;
+
+ mutex_lock(&delta->lock);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ctx->dev = delta;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ INIT_WORK(&ctx->run_work, delta_run_work);
+ mutex_init(&ctx->lock);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(delta->m2m_dev, ctx,
+ queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ dev_err(delta->dev, "%s failed to initialize m2m context (%d)\n",
+ DELTA_PREFIX, ret);
+ goto err_fh_del;
+ }
+
+ /*
+ * wait stream format to determine which
+ * decoder to open
+ */
+ ctx->state = DELTA_STATE_WF_FORMAT;
+
+ INIT_LIST_HEAD(&ctx->dts);
+
+ /* set the instance name */
+ delta->instance_id++;
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:----]",
+ delta->instance_id);
+
+ /* default parameters for frame and stream */
+ set_default_params(ctx);
+
+ /* enable ST231 clocks */
+ if (delta->clk_st231)
+ if (clk_prepare_enable(delta->clk_st231))
+ dev_warn(delta->dev, "failed to enable st231 clk\n");
+
+ /* enable FLASH_PROMIP clock */
+ if (delta->clk_flash_promip)
+ if (clk_prepare_enable(delta->clk_flash_promip))
+ dev_warn(delta->dev, "failed to enable delta promip clk\n");
+
+ mutex_unlock(&delta->lock);
+
+ dev_dbg(delta->dev, "%s decoder instance created\n", ctx->name);
+
+ return 0;
+
+err_fh_del:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+err:
+ mutex_unlock(&delta->lock);
+
+ return ret;
+}
+
+static int delta_release(struct file *file)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+
+ mutex_lock(&delta->lock);
+
+ /* close decoder */
+ call_dec_op(dec, close, ctx);
+
+ /*
+ * trace a summary of instance
+ * before closing (debug purpose)
+ */
+ delta_trace_summary(ctx);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ /* disable ST231 clocks */
+ if (delta->clk_st231)
+ clk_disable_unprepare(delta->clk_st231);
+
+ /* disable FLASH_PROMIP clock */
+ if (delta->clk_flash_promip)
+ clk_disable_unprepare(delta->clk_flash_promip);
+
+ dev_dbg(delta->dev, "%s decoder instance released\n", ctx->name);
+
+ kfree(ctx);
+
+ mutex_unlock(&delta->lock);
+ return 0;
+}
+
+/* V4L2 file ops */
+static const struct v4l2_file_operations delta_fops = {
+ .owner = THIS_MODULE,
+ .open = delta_open,
+ .release = delta_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+ .poll = v4l2_m2m_fop_poll,
+};
+
+/*
+ * Platform device operations
+ */
+
+static int delta_register_device(struct delta_dev *delta)
+{
+ int ret;
+ struct video_device *vdev;
+
+ if (!delta)
+ return -ENODEV;
+
+ delta->m2m_dev = v4l2_m2m_init(&delta_m2m_ops);
+ if (IS_ERR(delta->m2m_dev)) {
+ dev_err(delta->dev, "%s failed to initialize v4l2-m2m device\n",
+ DELTA_PREFIX);
+ ret = PTR_ERR(delta->m2m_dev);
+ goto err;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ dev_err(delta->dev, "%s failed to allocate video device\n",
+ DELTA_PREFIX);
+ ret = -ENOMEM;
+ goto err_m2m_release;
+ }
+
+ vdev->fops = &delta_fops;
+ vdev->ioctl_ops = &delta_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->lock = &delta->lock;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ vdev->v4l2_dev = &delta->v4l2_dev;
+ snprintf(vdev->name, sizeof(vdev->name), "%s-%s",
+ DELTA_NAME, DELTA_FW_VERSION);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to register video device\n",
+ DELTA_PREFIX);
+ goto err_vdev_release;
+ }
+
+ delta->vdev = vdev;
+ video_set_drvdata(vdev, delta);
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+err_m2m_release:
+ v4l2_m2m_release(delta->m2m_dev);
+err:
+ return ret;
+}
+
+static void delta_unregister_device(struct delta_dev *delta)
+{
+ if (!delta)
+ return;
+
+ if (delta->m2m_dev)
+ v4l2_m2m_release(delta->m2m_dev);
+
+ video_unregister_device(delta->vdev);
+}
+
+static int delta_probe(struct platform_device *pdev)
+{
+ struct delta_dev *delta;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ delta = devm_kzalloc(dev, sizeof(*delta), GFP_KERNEL);
+ if (!delta) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ delta->dev = dev;
+ delta->pdev = pdev;
+ platform_set_drvdata(pdev, delta);
+
+ mutex_init(&delta->lock);
+
+ /* get clock resources */
+ delta->clk_delta = devm_clk_get(dev, "delta");
+ if (IS_ERR(delta->clk_delta)) {
+ dev_dbg(dev, "%s can't get delta clock\n", DELTA_PREFIX);
+ delta->clk_delta = NULL;
+ }
+
+ delta->clk_st231 = devm_clk_get(dev, "delta-st231");
+ if (IS_ERR(delta->clk_st231)) {
+ dev_dbg(dev, "%s can't get delta-st231 clock\n", DELTA_PREFIX);
+ delta->clk_st231 = NULL;
+ }
+
+ delta->clk_flash_promip = devm_clk_get(dev, "delta-flash-promip");
+ if (IS_ERR(delta->clk_flash_promip)) {
+ dev_dbg(dev, "%s can't get delta-flash-promip clock\n",
+ DELTA_PREFIX);
+ delta->clk_flash_promip = NULL;
+ }
+
+ /* init pm_runtime used for power management */
+ pm_runtime_set_autosuspend_delay(dev, DELTA_HW_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ /* init firmware ipc channel */
+ ret = delta_ipc_init(delta);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to initialize firmware ipc channel\n",
+ DELTA_PREFIX);
+ goto err_pm_disable;
+ }
+
+ /* register all available decoders */
+ register_decoders(delta);
+
+ /* register all supported formats */
+ register_formats(delta);
+
+ /* register on V4L2 */
+ ret = v4l2_device_register(dev, &delta->v4l2_dev);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to register V4L2 device\n",
+ DELTA_PREFIX);
+ goto err_pm_disable;
+ }
+
+ delta->work_queue = create_workqueue(DELTA_NAME);
+ if (!delta->work_queue) {
+ dev_err(delta->dev, "%s failed to allocate work queue\n",
+ DELTA_PREFIX);
+ ret = -ENOMEM;
+ goto err_v4l2;
+ }
+
+ /* register device */
+ ret = delta_register_device(delta);
+ if (ret)
+ goto err_work_queue;
+
+ dev_info(dev, "%s %s registered as /dev/video%d\n",
+ DELTA_PREFIX, delta->vdev->name, delta->vdev->num);
+
+ return 0;
+
+err_work_queue:
+ destroy_workqueue(delta->work_queue);
+err_v4l2:
+ v4l2_device_unregister(&delta->v4l2_dev);
+err_pm_disable:
+ pm_runtime_disable(dev);
+err:
+ return ret;
+}
+
+static int delta_remove(struct platform_device *pdev)
+{
+ struct delta_dev *delta = platform_get_drvdata(pdev);
+
+ delta_ipc_exit(delta);
+
+ delta_unregister_device(delta);
+
+ destroy_workqueue(delta->work_queue);
+
+ pm_runtime_put_autosuspend(delta->dev);
+ pm_runtime_disable(delta->dev);
+
+ v4l2_device_unregister(&delta->v4l2_dev);
+
+ return 0;
+}
+
+static int delta_runtime_suspend(struct device *dev)
+{
+ struct delta_dev *delta = dev_get_drvdata(dev);
+
+ if (delta->clk_delta)
+ clk_disable_unprepare(delta->clk_delta);
+
+ return 0;
+}
+
+static int delta_runtime_resume(struct device *dev)
+{
+ struct delta_dev *delta = dev_get_drvdata(dev);
+
+ if (delta->clk_delta)
+ if (clk_prepare_enable(delta->clk_delta))
+ dev_warn(dev, "failed to prepare/enable delta clk\n");
+
+ return 0;
+}
+
+/* PM ops */
+static const struct dev_pm_ops delta_pm_ops = {
+ .runtime_suspend = delta_runtime_suspend,
+ .runtime_resume = delta_runtime_resume,
+};
+
+static const struct of_device_id delta_match_types[] = {
+ {
+ .compatible = "st,st-delta",
+ },
+ {
+ /* end node */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, delta_match_types);
+
+static struct platform_driver delta_driver = {
+ .probe = delta_probe,
+ .remove = delta_remove,
+ .driver = {
+ .name = DELTA_NAME,
+ .of_match_table = delta_match_types,
+ .pm = &delta_pm_ops},
+};
+
+module_platform_driver(delta_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DELTA video decoder V4L2 driver");
diff --git a/drivers/media/platform/sti/delta/delta.h b/drivers/media/platform/sti/delta/delta.h
new file mode 100644
index 000000000..2ba99922c
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta.h
@@ -0,0 +1,566 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_H
+#define DELTA_H
+
+#include <linux/rpmsg.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "delta-cfg.h"
+
+/*
+ * enum delta_state - state of decoding instance
+ *
+ *@DELTA_STATE_WF_FORMAT:
+ * Wait for compressed format to be set by V4L2 client in order
+ * to know what is the relevant decoder to open.
+ *
+ *@DELTA_STATE_WF_STREAMINFO:
+ * Wait for stream information to be available (bitstream
+ * header parsing is done).
+ *
+ *@DELTA_STATE_READY:
+ * Decoding instance is ready to decode compressed access unit.
+ *
+ *@DELTA_STATE_WF_EOS:
+ * Decoding instance is waiting for EOS (End Of Stream) completion.
+ *
+ *@DELTA_STATE_EOS:
+ * EOS (End Of Stream) is completed (signaled to user). Decoding instance
+ * should then be closed.
+ */
+enum delta_state {
+ DELTA_STATE_WF_FORMAT,
+ DELTA_STATE_WF_STREAMINFO,
+ DELTA_STATE_READY,
+ DELTA_STATE_WF_EOS,
+ DELTA_STATE_EOS
+};
+
+/*
+ * struct delta_streaminfo - information about stream to decode
+ *
+ * @flags: validity of fields (crop, pixelaspect, other)
+ * @width: width of video stream
+ * @height: height ""
+ * @streamformat: fourcc compressed format of video (MJPEG, MPEG2, ...)
+ * @dpb: number of frames needed to decode a single frame
+ * (h264 dpb, up to 16)
+ * @crop: cropping window inside decoded frame (1920x1080@0,0
+ * inside 1920x1088 frame for ex.)
+ * @pixelaspect: pixel aspect ratio of video (4/3, 5/4)
+ * @field: interlaced or not
+ * @profile: profile string
+ * @level: level string
+ * @other: other string information from codec
+ * @colorspace: colorspace identifier
+ * @xfer_func: transfer function identifier
+ * @ycbcr_enc: Y'CbCr encoding identifier
+ * @quantization: quantization identifier
+ */
+struct delta_streaminfo {
+ u32 flags;
+ u32 streamformat;
+ u32 width;
+ u32 height;
+ u32 dpb;
+ struct v4l2_rect crop;
+ struct v4l2_fract pixelaspect;
+ enum v4l2_field field;
+ u8 profile[32];
+ u8 level[32];
+ u8 other[32];
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+};
+
+#define DELTA_STREAMINFO_FLAG_CROP 0x0001
+#define DELTA_STREAMINFO_FLAG_PIXELASPECT 0x0002
+#define DELTA_STREAMINFO_FLAG_OTHER 0x0004
+
+/*
+ * struct delta_au - access unit structure.
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @prepared: if set vaddr/paddr are resolved
+ * @vaddr: virtual address (kernel can read/write)
+ * @paddr: physical address (for hardware)
+ * @flags: access unit type (V4L2_BUF_FLAG_KEYFRAME/PFRAME/BFRAME)
+ * @dts: decoding timestamp of this access unit
+ */
+struct delta_au {
+ struct vb2_v4l2_buffer vbuf; /* keep first */
+ struct list_head list; /* keep second */
+
+ bool prepared;
+ u32 size;
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 flags;
+ u64 dts;
+};
+
+/*
+ * struct delta_frameinfo - information about decoded frame
+ *
+ * @flags: validity of fields (crop, pixelaspect)
+ * @pixelformat: fourcc code for uncompressed video format
+ * @width: width of frame
+ * @height: height of frame
+ * @aligned_width: width of frame (with encoder or decoder alignment
+ * constraint)
+ * @aligned_height: height of frame (with encoder or decoder alignment
+ * constraint)
+ * @size: maximum size in bytes required for data
+ * @crop: cropping window inside frame (1920x1080@0,0
+ * inside 1920x1088 frame for ex.)
+ * @pixelaspect: pixel aspect ratio of video (4/3, 5/4)
+ * @field: interlaced mode
+ * @colorspace: colorspace identifier
+ * @xfer_func: transfer function identifier
+ * @ycbcr_enc: Y'CbCr encoding identifier
+ * @quantization: quantization identifier
+ */
+struct delta_frameinfo {
+ u32 flags;
+ u32 pixelformat;
+ u32 width;
+ u32 height;
+ u32 aligned_width;
+ u32 aligned_height;
+ u32 size;
+ struct v4l2_rect crop;
+ struct v4l2_fract pixelaspect;
+ enum v4l2_field field;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+};
+
+#define DELTA_FRAMEINFO_FLAG_CROP 0x0001
+#define DELTA_FRAMEINFO_FLAG_PIXELASPECT 0x0002
+
+/*
+ * struct delta_frame - frame structure.
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @info: frame information (width, height, format, alignment...)
+ * @prepared: if set pix/vaddr/paddr are resolved
+ * @index: frame index, aligned on V4L2 wow
+ * @vaddr: virtual address (kernel can read/write)
+ * @paddr: physical address (for hardware)
+ * @state: frame state for frame lifecycle tracking
+ * (DELTA_FRAME_FREE/DEC/OUT/REC/...)
+ * @flags: frame type (V4L2_BUF_FLAG_KEYFRAME/PFRAME/BFRAME)
+ * @dts: decoding timestamp of this frame
+ * @field: field order for interlaced frame
+ */
+struct delta_frame {
+ struct vb2_v4l2_buffer vbuf; /* keep first */
+ struct list_head list; /* keep second */
+
+ struct delta_frameinfo info;
+ bool prepared;
+ u32 index;
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 state;
+ u32 flags;
+ u64 dts;
+ enum v4l2_field field;
+};
+
+/* frame state for frame lifecycle tracking */
+#define DELTA_FRAME_FREE 0x00 /* is free and can be used for decoding */
+#define DELTA_FRAME_REF 0x01 /* is a reference frame */
+#define DELTA_FRAME_BSY 0x02 /* is owned by decoder and busy */
+#define DELTA_FRAME_DEC 0x04 /* contains decoded content */
+#define DELTA_FRAME_OUT 0x08 /* has been given to user */
+#define DELTA_FRAME_RDY 0x10 /* is ready but still held by decoder */
+#define DELTA_FRAME_M2M 0x20 /* is owned by mem2mem framework */
+
+/*
+ * struct delta_dts - decoding timestamp.
+ *
+ * @list: list to chain timestamps
+ * @val: timestamp in microseconds
+ */
+struct delta_dts {
+ struct list_head list;
+ u64 val;
+};
+
+struct delta_buf {
+ u32 size;
+ void *vaddr;
+ dma_addr_t paddr;
+ const char *name;
+ unsigned long attrs;
+};
+
+struct delta_ipc_ctx {
+ int cb_err;
+ u32 copro_hdl;
+ struct completion done;
+ struct delta_buf ipc_buf_struct;
+ struct delta_buf *ipc_buf;
+};
+
+struct delta_ipc_param {
+ u32 size;
+ void *data;
+};
+
+struct delta_ctx;
+
+/*
+ * struct delta_dec - decoder structure.
+ *
+ * @name: name of this decoder
+ * @streamformat: input stream format that this decoder support
+ * @pixelformat: pixel format of decoded frame that this decoder support
+ * @max_width: (optional) maximum width that can decode this decoder
+ * if not set, maximum width is DELTA_MAX_WIDTH
+ * @max_height: (optional) maximum height that can decode this decoder
+ * if not set, maximum height is DELTA_MAX_HEIGHT
+ * @pm: (optional) if set, decoder will manage power on its own
+ * @open: open this decoder
+ * @close: close this decoder
+ * @setup_frame: setup frame to be used by decoder, see below
+ * @get_streaminfo: get stream related infos, see below
+ * @get_frameinfo: get decoded frame related infos, see below
+ * @set_frameinfo: (optional) set decoded frame related infos, see below
+ * @setup_frame: setup frame to be used by decoder, see below
+ * @decode: decode a single access unit, see below
+ * @get_frame: get the next decoded frame available, see below
+ * @recycle: recycle the given frame, see below
+ * @flush: (optional) flush decoder, see below
+ * @drain: (optional) drain decoder, see below
+ */
+struct delta_dec {
+ const char *name;
+ u32 streamformat;
+ u32 pixelformat;
+ u32 max_width;
+ u32 max_height;
+ bool pm;
+
+ /*
+ * decoder ops
+ */
+ int (*open)(struct delta_ctx *ctx);
+ int (*close)(struct delta_ctx *ctx);
+
+ /*
+ * setup_frame() - setup frame to be used by decoder
+ * @ctx: (in) instance
+ * @frame: (in) frame to use
+ * @frame.index (in) identifier of frame
+ * @frame.vaddr (in) virtual address (kernel can read/write)
+ * @frame.paddr (in) physical address (for hardware)
+ *
+ * Frame is to be allocated by caller, then given
+ * to decoder through this call.
+ * Several frames must be given to decoder (dpb),
+ * each frame is identified using its index.
+ */
+ int (*setup_frame)(struct delta_ctx *ctx, struct delta_frame *frame);
+
+ /*
+ * get_streaminfo() - get stream related infos
+ * @ctx: (in) instance
+ * @streaminfo: (out) width, height, dpb,...
+ *
+ * Precondition: stream header must have been successfully
+ * parsed to have this call successful & @streaminfo valid.
+ * Header parsing must be done using decode(), giving
+ * explicitly header access unit or first access unit of bitstream.
+ * If no valid header is found, get_streaminfo will return -ENODATA,
+ * in this case the next bistream access unit must be decoded till
+ * get_streaminfo becomes successful.
+ */
+ int (*get_streaminfo)(struct delta_ctx *ctx,
+ struct delta_streaminfo *streaminfo);
+
+ /*
+ * get_frameinfo() - get decoded frame related infos
+ * @ctx: (in) instance
+ * @frameinfo: (out) width, height, alignment, crop, ...
+ *
+ * Precondition: get_streaminfo() must be successful
+ */
+ int (*get_frameinfo)(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo);
+
+ /*
+ * set_frameinfo() - set decoded frame related infos
+ * @ctx: (in) instance
+ * @frameinfo: (out) width, height, alignment, crop, ...
+ *
+ * Optional.
+ * Typically used to negotiate with decoder the output
+ * frame if decoder can do post-processing.
+ */
+ int (*set_frameinfo)(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo);
+
+ /*
+ * decode() - decode a single access unit
+ * @ctx: (in) instance
+ * @au: (in/out) access unit
+ * @au.size (in) size of au to decode
+ * @au.vaddr (in) virtual address (kernel can read/write)
+ * @au.paddr (in) physical address (for hardware)
+ * @au.flags (out) au type (V4L2_BUF_FLAG_KEYFRAME/
+ * PFRAME/BFRAME)
+ *
+ * Decode the access unit given. Decode is synchronous;
+ * access unit memory is no more needed after this call.
+ * After this call, none, one or several frames could
+ * have been decoded, which can be retrieved using
+ * get_frame().
+ */
+ int (*decode)(struct delta_ctx *ctx, struct delta_au *au);
+
+ /*
+ * get_frame() - get the next decoded frame available
+ * @ctx: (in) instance
+ * @frame: (out) frame with decoded data:
+ * @frame.index (out) identifier of frame
+ * @frame.field (out) field order for interlaced frame
+ * @frame.state (out) frame state for frame lifecycle tracking
+ * @frame.flags (out) frame type (V4L2_BUF_FLAG_KEYFRAME/
+ * PFRAME/BFRAME)
+ *
+ * Get the next available decoded frame.
+ * If no frame is available, -ENODATA is returned.
+ * If a frame is available, frame structure is filled with
+ * relevant data, frame.index identifying this exact frame.
+ * When this frame is no more needed by upper layers,
+ * recycle() must be called giving this frame identifier.
+ */
+ int (*get_frame)(struct delta_ctx *ctx, struct delta_frame **frame);
+
+ /*
+ * recycle() - recycle the given frame
+ * @ctx: (in) instance
+ * @frame: (in) frame to recycle:
+ * @frame.index (in) identifier of frame
+ *
+ * recycle() is to be called by user when the decoded frame
+ * is no more needed (composition/display done).
+ * This frame will then be reused by decoder to proceed
+ * with next frame decoding.
+ * If not enough frames have been provided through setup_frame(),
+ * or recycle() is not called fast enough, the decoder can run out
+ * of available frames to proceed with decoding (starvation).
+ * This case is guarded by wq_recycle wait queue which ensures that
+ * decoder is called only if at least one frame is available.
+ */
+ int (*recycle)(struct delta_ctx *ctx, struct delta_frame *frame);
+
+ /*
+ * flush() - flush decoder
+ * @ctx: (in) instance
+ *
+ * Optional.
+ * Reset decoder context and discard all internal buffers.
+ * This allows implementation of seek, which leads to discontinuity
+ * of input bitstream that decoder must know to restart its internal
+ * decoding logic.
+ */
+ int (*flush)(struct delta_ctx *ctx);
+
+ /*
+ * drain() - drain decoder
+ * @ctx: (in) instance
+ *
+ * Optional.
+ * Mark decoder pending frames (decoded but not yet output) as ready
+ * so that they can be output to client at EOS (End Of Stream).
+ * get_frame() is to be called in a loop right after drain() to
+ * get all those pending frames.
+ */
+ int (*drain)(struct delta_ctx *ctx);
+};
+
+struct delta_dev;
+
+/*
+ * struct delta_ctx - instance structure.
+ *
+ * @flags: validity of fields (streaminfo)
+ * @fh: V4L2 file handle
+ * @dev: device context
+ * @dec: selected decoder context for this instance
+ * @ipc_ctx: context of IPC communication with firmware
+ * @state: instance state
+ * @frame_num: frame number
+ * @au_num: access unit number
+ * @max_au_size: max size of an access unit
+ * @streaminfo: stream information (width, height, dpb, interlacing...)
+ * @frameinfo: frame information (width, height, format, alignment...)
+ * @nb_of_frames: number of frames available for decoding
+ * @frames: array of decoding frames to keep track of frame
+ * state and manage frame recycling
+ * @decoded_frames: nb of decoded frames from opening
+ * @output_frames: nb of output frames from opening
+ * @dropped_frames: nb of frames dropped (ie access unit not parsed
+ * or frame decoded but not output)
+ * @stream_errors: nb of stream errors (corrupted, not supported, ...)
+ * @decode_errors: nb of decode errors (firmware error)
+ * @sys_errors: nb of system errors (memory, ipc, ...)
+ * @dts: FIFO of decoding timestamp.
+ * output frames are timestamped with incoming access
+ * unit timestamps using this fifo.
+ * @name: string naming this instance (debug purpose)
+ * @run_work: decoding work
+ * @lock: lock for decoding work serialization
+ * @aborting: true if current job aborted
+ * @priv: private decoder context for this instance, allocated
+ * by decoder @open time.
+ */
+struct delta_ctx {
+ u32 flags;
+ struct v4l2_fh fh;
+ struct delta_dev *dev;
+ const struct delta_dec *dec;
+ struct delta_ipc_ctx ipc_ctx;
+
+ enum delta_state state;
+ u32 frame_num;
+ u32 au_num;
+ size_t max_au_size;
+ struct delta_streaminfo streaminfo;
+ struct delta_frameinfo frameinfo;
+ u32 nb_of_frames;
+ struct delta_frame *frames[DELTA_MAX_FRAMES];
+ u32 decoded_frames;
+ u32 output_frames;
+ u32 dropped_frames;
+ u32 stream_errors;
+ u32 decode_errors;
+ u32 sys_errors;
+ struct list_head dts;
+ char name[100];
+ struct work_struct run_work;
+ struct mutex lock;
+ bool aborting;
+ void *priv;
+};
+
+#define DELTA_FLAG_STREAMINFO 0x0001
+#define DELTA_FLAG_FRAMEINFO 0x0002
+
+#define DELTA_MAX_FORMATS DELTA_MAX_DECODERS
+
+/*
+ * struct delta_dev - device struct, 1 per probe (so single one for
+ * all platform life)
+ *
+ * @v4l2_dev: v4l2 device
+ * @vdev: v4l2 video device
+ * @pdev: platform device
+ * @dev: device
+ * @m2m_dev: memory-to-memory V4L2 device
+ * @lock: device lock, for crit section & V4L2 ops serialization.
+ * @clk_delta: delta main clock
+ * @clk_st231: st231 coprocessor main clock
+ * @clk_flash_promip: flash promip clock
+ * @decoders: list of registered decoders
+ * @nb_of_decoders: nb of registered decoders
+ * @pixelformats: supported uncompressed video formats
+ * @nb_of_pixelformats: number of supported umcompressed video formats
+ * @streamformats: supported compressed video formats
+ * @nb_of_streamformats:number of supported compressed video formats
+ * @instance_id: rolling counter identifying an instance (debug purpose)
+ * @work_queue: decoding job work queue
+ * @rpmsg_driver: rpmsg IPC driver
+ * @rpmsg_device: rpmsg IPC device
+ */
+struct delta_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct mutex lock;
+ struct clk *clk_delta;
+ struct clk *clk_st231;
+ struct clk *clk_flash_promip;
+ const struct delta_dec *decoders[DELTA_MAX_DECODERS];
+ u32 nb_of_decoders;
+ u32 pixelformats[DELTA_MAX_FORMATS];
+ u32 nb_of_pixelformats;
+ u32 streamformats[DELTA_MAX_FORMATS];
+ u32 nb_of_streamformats;
+ u8 instance_id;
+ struct workqueue_struct *work_queue;
+ struct rpmsg_driver rpmsg_driver;
+ struct rpmsg_device *rpmsg_device;
+};
+
+static inline char *frame_type_str(u32 flags)
+{
+ if (flags & V4L2_BUF_FLAG_KEYFRAME)
+ return "I";
+ if (flags & V4L2_BUF_FLAG_PFRAME)
+ return "P";
+ if (flags & V4L2_BUF_FLAG_BFRAME)
+ return "B";
+ if (flags & V4L2_BUF_FLAG_LAST)
+ return "EOS";
+ return "?";
+}
+
+static inline char *frame_field_str(enum v4l2_field field)
+{
+ if (field == V4L2_FIELD_NONE)
+ return "-";
+ if (field == V4L2_FIELD_TOP)
+ return "T";
+ if (field == V4L2_FIELD_BOTTOM)
+ return "B";
+ if (field == V4L2_FIELD_INTERLACED)
+ return "I";
+ if (field == V4L2_FIELD_INTERLACED_TB)
+ return "TB";
+ if (field == V4L2_FIELD_INTERLACED_BT)
+ return "BT";
+ return "?";
+}
+
+static inline char *frame_state_str(u32 state, char *str, unsigned int len)
+{
+ snprintf(str, len, "%s %s %s %s %s %s",
+ (state & DELTA_FRAME_REF) ? "ref" : " ",
+ (state & DELTA_FRAME_BSY) ? "bsy" : " ",
+ (state & DELTA_FRAME_DEC) ? "dec" : " ",
+ (state & DELTA_FRAME_OUT) ? "out" : " ",
+ (state & DELTA_FRAME_M2M) ? "m2m" : " ",
+ (state & DELTA_FRAME_RDY) ? "rdy" : " ");
+ return str;
+}
+
+int delta_get_frameinfo_default(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo);
+int delta_recycle_default(struct delta_ctx *pctx,
+ struct delta_frame *frame);
+
+int delta_get_free_frame(struct delta_ctx *ctx,
+ struct delta_frame **pframe);
+
+int delta_get_sync(struct delta_ctx *ctx);
+void delta_put_autosuspend(struct delta_ctx *ctx);
+
+#endif /* DELTA_H */
diff --git a/drivers/media/platform/sti/hva/Makefile b/drivers/media/platform/sti/hva/Makefile
new file mode 100644
index 000000000..e3ebe9684
--- /dev/null
+++ b/drivers/media/platform/sti/hva/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_STI_HVA) := st-hva.o
+st-hva-y := hva-v4l2.o hva-hw.o hva-mem.o hva-h264.o
+st-hva-$(CONFIG_VIDEO_STI_HVA_DEBUGFS) += hva-debugfs.o
diff --git a/drivers/media/platform/sti/hva/hva-debugfs.c b/drivers/media/platform/sti/hva/hva-debugfs.c
new file mode 100644
index 000000000..9f7e8ac87
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-debugfs.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "hva.h"
+#include "hva-hw.h"
+
+static void format_ctx(struct seq_file *s, struct hva_ctx *ctx)
+{
+ struct hva_streaminfo *stream = &ctx->streaminfo;
+ struct hva_frameinfo *frame = &ctx->frameinfo;
+ struct hva_controls *ctrls = &ctx->ctrls;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+ u32 bitrate_mode, aspect, entropy, vui_sar, sei_fp;
+
+ seq_printf(s, "|-%s\n |\n", ctx->name);
+
+ seq_printf(s, " |-[%sframe info]\n",
+ ctx->flags & HVA_FLAG_FRAMEINFO ? "" : "default ");
+ seq_printf(s, " | |- pixel format=%4.4s\n"
+ " | |- wxh=%dx%d\n"
+ " | |- wxh (w/ encoder alignment constraint)=%dx%d\n"
+ " |\n",
+ (char *)&frame->pixelformat,
+ frame->width, frame->height,
+ frame->aligned_width, frame->aligned_height);
+
+ seq_printf(s, " |-[%sstream info]\n",
+ ctx->flags & HVA_FLAG_STREAMINFO ? "" : "default ");
+ seq_printf(s, " | |- stream format=%4.4s\n"
+ " | |- wxh=%dx%d\n"
+ " | |- %s\n"
+ " | |- %s\n"
+ " |\n",
+ (char *)&stream->streamformat,
+ stream->width, stream->height,
+ stream->profile, stream->level);
+
+ bitrate_mode = V4L2_CID_MPEG_VIDEO_BITRATE_MODE;
+ aspect = V4L2_CID_MPEG_VIDEO_ASPECT;
+ seq_puts(s, " |-[parameters]\n");
+ seq_printf(s, " | |- %s\n"
+ " | |- bitrate=%d bps\n"
+ " | |- GOP size=%d\n"
+ " | |- video aspect=%s\n"
+ " | |- framerate=%d/%d\n",
+ v4l2_ctrl_get_menu(bitrate_mode)[ctrls->bitrate_mode],
+ ctrls->bitrate,
+ ctrls->gop_size,
+ v4l2_ctrl_get_menu(aspect)[ctrls->aspect],
+ ctrls->time_per_frame.denominator,
+ ctrls->time_per_frame.numerator);
+
+ entropy = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE;
+ vui_sar = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC;
+ sei_fp = V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE;
+ if (stream->streamformat == V4L2_PIX_FMT_H264) {
+ seq_printf(s, " | |- %s entropy mode\n"
+ " | |- CPB size=%d kB\n"
+ " | |- DCT8x8 enable=%s\n"
+ " | |- qpmin=%d\n"
+ " | |- qpmax=%d\n"
+ " | |- PAR enable=%s\n"
+ " | |- PAR id=%s\n"
+ " | |- SEI frame packing enable=%s\n"
+ " | |- SEI frame packing type=%s\n",
+ v4l2_ctrl_get_menu(entropy)[ctrls->entropy_mode],
+ ctrls->cpb_size,
+ ctrls->dct8x8 ? "true" : "false",
+ ctrls->qpmin,
+ ctrls->qpmax,
+ ctrls->vui_sar ? "true" : "false",
+ v4l2_ctrl_get_menu(vui_sar)[ctrls->vui_sar_idc],
+ ctrls->sei_fp ? "true" : "false",
+ v4l2_ctrl_get_menu(sei_fp)[ctrls->sei_fp_type]);
+ }
+
+ if (ctx->sys_errors || ctx->encode_errors || ctx->frame_errors) {
+ seq_puts(s, " |\n |-[errors]\n");
+ seq_printf(s, " | |- system=%d\n"
+ " | |- encoding=%d\n"
+ " | |- frame=%d\n",
+ ctx->sys_errors,
+ ctx->encode_errors,
+ ctx->frame_errors);
+ }
+
+ seq_puts(s, " |\n |-[performances]\n");
+ seq_printf(s, " | |- frames encoded=%d\n"
+ " | |- avg HW processing duration (0.1ms)=%d [min=%d, max=%d]\n"
+ " | |- avg encoding period (0.1ms)=%d [min=%d, max=%d]\n"
+ " | |- avg fps (0.1Hz)=%d\n"
+ " | |- max reachable fps (0.1Hz)=%d\n"
+ " | |- avg bitrate (kbps)=%d [min=%d, max=%d]\n"
+ " | |- last bitrate (kbps)=%d\n",
+ dbg->cnt_duration,
+ dbg->avg_duration,
+ dbg->min_duration,
+ dbg->max_duration,
+ dbg->avg_period,
+ dbg->min_period,
+ dbg->max_period,
+ dbg->avg_fps,
+ dbg->max_fps,
+ dbg->avg_bitrate,
+ dbg->min_bitrate,
+ dbg->max_bitrate,
+ dbg->last_bitrate);
+}
+
+/*
+ * performance debug info
+ */
+void hva_dbg_perf_begin(struct hva_ctx *ctx)
+{
+ u64 div;
+ u32 period;
+ u32 bitrate;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+ ktime_t prev = dbg->begin;
+
+ dbg->begin = ktime_get();
+
+ if (dbg->is_valid_period) {
+ /* encoding period */
+ div = (u64)ktime_us_delta(dbg->begin, prev);
+ do_div(div, 100);
+ period = (u32)div;
+ dbg->min_period = min(period, dbg->min_period);
+ dbg->max_period = max(period, dbg->max_period);
+ dbg->total_period += period;
+ dbg->cnt_period++;
+
+ /*
+ * minimum and maximum bitrates are based on the
+ * encoding period values upon a window of 32 samples
+ */
+ dbg->window_duration += period;
+ dbg->cnt_window++;
+ if (dbg->cnt_window >= 32) {
+ /*
+ * bitrate in kbps = (size * 8 / 1000) /
+ * (duration / 10000)
+ * = size * 80 / duration
+ */
+ if (dbg->window_duration > 0) {
+ div = (u64)dbg->window_stream_size * 80;
+ do_div(div, dbg->window_duration);
+ bitrate = (u32)div;
+ dbg->last_bitrate = bitrate;
+ dbg->min_bitrate = min(bitrate,
+ dbg->min_bitrate);
+ dbg->max_bitrate = max(bitrate,
+ dbg->max_bitrate);
+ }
+ dbg->window_stream_size = 0;
+ dbg->window_duration = 0;
+ dbg->cnt_window = 0;
+ }
+ }
+
+ /*
+ * filter sequences valid for performance:
+ * - begin/begin (no stream available) is an invalid sequence
+ * - begin/end is a valid sequence
+ */
+ dbg->is_valid_period = false;
+}
+
+void hva_dbg_perf_end(struct hva_ctx *ctx, struct hva_stream *stream)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ u64 div;
+ u32 duration;
+ u32 bytesused;
+ u32 timestamp;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+ ktime_t end = ktime_get();
+
+ /* stream bytesused and timestamp in us */
+ bytesused = vb2_get_plane_payload(&stream->vbuf.vb2_buf, 0);
+ div = stream->vbuf.vb2_buf.timestamp;
+ do_div(div, 1000);
+ timestamp = (u32)div;
+
+ /* encoding duration */
+ div = (u64)ktime_us_delta(end, dbg->begin);
+
+ dev_dbg(dev,
+ "%s perf stream[%d] dts=%d encoded using %d bytes in %d us",
+ ctx->name,
+ stream->vbuf.sequence,
+ timestamp,
+ bytesused, (u32)div);
+
+ do_div(div, 100);
+ duration = (u32)div;
+
+ dbg->min_duration = min(duration, dbg->min_duration);
+ dbg->max_duration = max(duration, dbg->max_duration);
+ dbg->total_duration += duration;
+ dbg->cnt_duration++;
+
+ /*
+ * the average bitrate is based on the total stream size
+ * and the total encoding periods
+ */
+ dbg->total_stream_size += bytesused;
+ dbg->window_stream_size += bytesused;
+
+ dbg->is_valid_period = true;
+}
+
+static void hva_dbg_perf_compute(struct hva_ctx *ctx)
+{
+ u64 div;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+
+ if (dbg->cnt_duration > 0) {
+ div = (u64)dbg->total_duration;
+ do_div(div, dbg->cnt_duration);
+ dbg->avg_duration = (u32)div;
+ } else {
+ dbg->avg_duration = 0;
+ }
+
+ if (dbg->total_duration > 0) {
+ div = (u64)dbg->cnt_duration * 100000;
+ do_div(div, dbg->total_duration);
+ dbg->max_fps = (u32)div;
+ } else {
+ dbg->max_fps = 0;
+ }
+
+ if (dbg->cnt_period > 0) {
+ div = (u64)dbg->total_period;
+ do_div(div, dbg->cnt_period);
+ dbg->avg_period = (u32)div;
+ } else {
+ dbg->avg_period = 0;
+ }
+
+ if (dbg->total_period > 0) {
+ div = (u64)dbg->cnt_period * 100000;
+ do_div(div, dbg->total_period);
+ dbg->avg_fps = (u32)div;
+ } else {
+ dbg->avg_fps = 0;
+ }
+
+ if (dbg->total_period > 0) {
+ /*
+ * bitrate in kbps = (video size * 8 / 1000) /
+ * (video duration / 10000)
+ * = video size * 80 / video duration
+ */
+ div = (u64)dbg->total_stream_size * 80;
+ do_div(div, dbg->total_period);
+ dbg->avg_bitrate = (u32)div;
+ } else {
+ dbg->avg_bitrate = 0;
+ }
+}
+
+/*
+ * device debug info
+ */
+
+static int hva_dbg_device(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+
+ seq_printf(s, "[%s]\n", hva->v4l2_dev.name);
+ seq_printf(s, "registered as /dev/video%d\n", hva->vdev->num);
+
+ return 0;
+}
+
+static int hva_dbg_encoders(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+ unsigned int i = 0;
+
+ seq_printf(s, "[encoders]\n|- %d registered encoders:\n",
+ hva->nb_of_encoders);
+
+ while (hva->encoders[i]) {
+ seq_printf(s, "|- %s: %4.4s => %4.4s\n", hva->encoders[i]->name,
+ (char *)&hva->encoders[i]->pixelformat,
+ (char *)&hva->encoders[i]->streamformat);
+ i++;
+ }
+
+ return 0;
+}
+
+static int hva_dbg_last(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+ struct hva_ctx *last_ctx = &hva->dbg.last_ctx;
+
+ if (last_ctx->flags & HVA_FLAG_STREAMINFO) {
+ seq_puts(s, "[last encoding]\n");
+
+ hva_dbg_perf_compute(last_ctx);
+ format_ctx(s, last_ctx);
+ } else {
+ seq_puts(s, "[no information recorded about last encoding]\n");
+ }
+
+ return 0;
+}
+
+static int hva_dbg_regs(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+
+ hva_hw_dump_regs(hva, s);
+
+ return 0;
+}
+
+#define hva_dbg_declare(name) \
+ static int hva_dbg_##name##_open(struct inode *i, struct file *f) \
+ { \
+ return single_open(f, hva_dbg_##name, i->i_private); \
+ } \
+ static const struct file_operations hva_dbg_##name##_fops = { \
+ .open = hva_dbg_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define hva_dbg_create_entry(name) \
+ debugfs_create_file(#name, 0444, hva->dbg.debugfs_entry, hva, \
+ &hva_dbg_##name##_fops)
+
+hva_dbg_declare(device);
+hva_dbg_declare(encoders);
+hva_dbg_declare(last);
+hva_dbg_declare(regs);
+
+void hva_debugfs_create(struct hva_dev *hva)
+{
+ hva->dbg.debugfs_entry = debugfs_create_dir(HVA_NAME, NULL);
+ if (!hva->dbg.debugfs_entry)
+ goto err;
+
+ if (!hva_dbg_create_entry(device))
+ goto err;
+
+ if (!hva_dbg_create_entry(encoders))
+ goto err;
+
+ if (!hva_dbg_create_entry(last))
+ goto err;
+
+ if (!hva_dbg_create_entry(regs))
+ goto err;
+
+ return;
+
+err:
+ hva_debugfs_remove(hva);
+}
+
+void hva_debugfs_remove(struct hva_dev *hva)
+{
+ debugfs_remove_recursive(hva->dbg.debugfs_entry);
+ hva->dbg.debugfs_entry = NULL;
+}
+
+/*
+ * context (instance) debug info
+ */
+
+static int hva_dbg_ctx(struct seq_file *s, void *data)
+{
+ struct hva_ctx *ctx = s->private;
+
+ seq_printf(s, "[running encoding %d]\n", ctx->id);
+
+ hva_dbg_perf_compute(ctx);
+ format_ctx(s, ctx);
+
+ return 0;
+}
+
+hva_dbg_declare(ctx);
+
+void hva_dbg_ctx_create(struct hva_ctx *ctx)
+{
+ struct hva_dev *hva = ctx->hva_dev;
+ char name[4] = "";
+
+ ctx->dbg.min_duration = UINT_MAX;
+ ctx->dbg.min_period = UINT_MAX;
+ ctx->dbg.min_bitrate = UINT_MAX;
+
+ snprintf(name, sizeof(name), "%d", hva->instance_id);
+
+ ctx->dbg.debugfs_entry = debugfs_create_file(name, 0444,
+ hva->dbg.debugfs_entry,
+ ctx, &hva_dbg_ctx_fops);
+}
+
+void hva_dbg_ctx_remove(struct hva_ctx *ctx)
+{
+ struct hva_dev *hva = ctx->hva_dev;
+
+ if (ctx->flags & HVA_FLAG_STREAMINFO)
+ /* save context before removing */
+ memcpy(&hva->dbg.last_ctx, ctx, sizeof(*ctx));
+
+ debugfs_remove(ctx->dbg.debugfs_entry);
+}
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c
new file mode 100644
index 000000000..b61a5d337
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-h264.c
@@ -0,0 +1,1061 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include "hva.h"
+#include "hva-hw.h"
+
+#define MAX_SPS_PPS_SIZE 128
+
+#define BITSTREAM_OFFSET_MASK 0x7F
+
+/* video max size*/
+#define H264_MAX_SIZE_W 1920
+#define H264_MAX_SIZE_H 1920
+
+/* macroBlocs number (width & height) */
+#define MB_W(w) ((w + 0xF) / 0x10)
+#define MB_H(h) ((h + 0xF) / 0x10)
+
+/* formula to get temporal or spatial data size */
+#define DATA_SIZE(w, h) (MB_W(w) * MB_H(h) * 16)
+
+#define SEARCH_WINDOW_BUFFER_MAX_SIZE(w) ((4 * MB_W(w) + 42) * 256 * 3 / 2)
+#define CABAC_CONTEXT_BUFFER_MAX_SIZE(w) (MB_W(w) * 16)
+#define CTX_MB_BUFFER_MAX_SIZE(w) (MB_W(w) * 16 * 8)
+#define SLICE_HEADER_SIZE (4 * 16)
+#define BRC_DATA_SIZE (5 * 16)
+
+/* source buffer copy in YUV 420 MB-tiled format with size=16*256*3/2 */
+#define CURRENT_WINDOW_BUFFER_MAX_SIZE (16 * 256 * 3 / 2)
+
+/*
+ * 4 lines of pixels (in Luma, Chroma blue and Chroma red) of top MB
+ * for deblocking with size=4*16*MBx*2
+ */
+#define LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(w) (4 * 16 * MB_W(w) * 2)
+
+/* factor for bitrate and cpb buffer size max values if profile >= high */
+#define H264_FACTOR_HIGH 1200
+
+/* factor for bitrate and cpb buffer size max values if profile < high */
+#define H264_FACTOR_BASELINE 1000
+
+/* number of bytes for NALU_TYPE_FILLER_DATA header and footer */
+#define H264_FILLER_DATA_SIZE 6
+
+struct h264_profile {
+ enum v4l2_mpeg_video_h264_level level;
+ u32 max_mb_per_seconds;
+ u32 max_frame_size;
+ u32 max_bitrate;
+ u32 max_cpb_size;
+ u32 min_comp_ratio;
+};
+
+static const struct h264_profile h264_infos_list[] = {
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_0, 1485, 99, 64, 175, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1B, 1485, 99, 128, 350, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_1, 3000, 396, 192, 500, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_2, 6000, 396, 384, 1000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_3, 11880, 396, 768, 2000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_2_0, 11880, 396, 2000, 2000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_2_1, 19800, 792, 4000, 4000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_2_2, 20250, 1620, 4000, 4000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_3_0, 40500, 1620, 10000, 10000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_3_1, 108000, 3600, 14000, 14000, 4},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_3_2, 216000, 5120, 20000, 20000, 4},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_4_0, 245760, 8192, 20000, 25000, 4},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_4_1, 245760, 8192, 50000, 62500, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_4_2, 522240, 8704, 50000, 62500, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 589824, 22080, 135000, 135000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 983040, 36864, 240000, 240000, 2}
+};
+
+enum hva_brc_type {
+ BRC_TYPE_NONE = 0,
+ BRC_TYPE_CBR = 1,
+ BRC_TYPE_VBR = 2,
+ BRC_TYPE_VBR_LOW_DELAY = 3
+};
+
+enum hva_entropy_coding_mode {
+ CAVLC = 0,
+ CABAC = 1
+};
+
+enum hva_picture_coding_type {
+ PICTURE_CODING_TYPE_I = 0,
+ PICTURE_CODING_TYPE_P = 1,
+ PICTURE_CODING_TYPE_B = 2
+};
+
+enum hva_h264_sampling_mode {
+ SAMPLING_MODE_NV12 = 0,
+ SAMPLING_MODE_UYVY = 1,
+ SAMPLING_MODE_RGB3 = 3,
+ SAMPLING_MODE_XRGB4 = 4,
+ SAMPLING_MODE_NV21 = 8,
+ SAMPLING_MODE_VYUY = 9,
+ SAMPLING_MODE_BGR3 = 11,
+ SAMPLING_MODE_XBGR4 = 12,
+ SAMPLING_MODE_RGBX4 = 20,
+ SAMPLING_MODE_BGRX4 = 28
+};
+
+enum hva_h264_nalu_type {
+ NALU_TYPE_UNKNOWN = 0,
+ NALU_TYPE_SLICE = 1,
+ NALU_TYPE_SLICE_DPA = 2,
+ NALU_TYPE_SLICE_DPB = 3,
+ NALU_TYPE_SLICE_DPC = 4,
+ NALU_TYPE_SLICE_IDR = 5,
+ NALU_TYPE_SEI = 6,
+ NALU_TYPE_SPS = 7,
+ NALU_TYPE_PPS = 8,
+ NALU_TYPE_AU_DELIMITER = 9,
+ NALU_TYPE_SEQ_END = 10,
+ NALU_TYPE_STREAM_END = 11,
+ NALU_TYPE_FILLER_DATA = 12,
+ NALU_TYPE_SPS_EXT = 13,
+ NALU_TYPE_PREFIX_UNIT = 14,
+ NALU_TYPE_SUBSET_SPS = 15,
+ NALU_TYPE_SLICE_AUX = 19,
+ NALU_TYPE_SLICE_EXT = 20
+};
+
+enum hva_h264_sei_payload_type {
+ SEI_BUFFERING_PERIOD = 0,
+ SEI_PICTURE_TIMING = 1,
+ SEI_STEREO_VIDEO_INFO = 21,
+ SEI_FRAME_PACKING_ARRANGEMENT = 45
+};
+
+/*
+ * stereo Video Info struct
+ */
+struct hva_h264_stereo_video_sei {
+ u8 field_views_flag;
+ u8 top_field_is_left_view_flag;
+ u8 current_frame_is_left_view_flag;
+ u8 next_frame_is_second_view_flag;
+ u8 left_view_self_contained_flag;
+ u8 right_view_self_contained_flag;
+};
+
+/*
+ * struct hva_h264_td
+ *
+ * @frame_width: width in pixels of the buffer containing the input frame
+ * @frame_height: height in pixels of the buffer containing the input frame
+ * @frame_num: the parameter to be written in the slice header
+ * @picture_coding_type: type I, P or B
+ * @pic_order_cnt_type: POC mode, as defined in H264 std : can be 0,1,2
+ * @first_picture_in_sequence: flag telling to encoder that this is the
+ * first picture in a video sequence.
+ * Used for VBR
+ * @slice_size_type: 0 = no constraint to close the slice
+ * 1= a slice is closed as soon as the slice_mb_size limit
+ * is reached
+ * 2= a slice is closed as soon as the slice_byte_size limit
+ * is reached
+ * 3= a slice is closed as soon as either the slice_byte_size
+ * limit or the slice_mb_size limit is reached
+ * @slice_mb_size: defines the slice size in number of macroblocks
+ * (used when slice_size_type=1 or slice_size_type=3)
+ * @ir_param_option: defines the number of macroblocks per frame to be
+ * refreshed by AIR algorithm OR the refresh period
+ * by CIR algorithm
+ * @intra_refresh_type: enables the adaptive intra refresh algorithm.
+ * Disable=0 / Adaptative=1 and Cycle=2 as intra refresh
+ * @use_constrained_intra_flag: constrained_intra_pred_flag from PPS
+ * @transform_mode: controls the use of 4x4/8x8 transform mode
+ * @disable_deblocking_filter_idc:
+ * 0: specifies that all luma and chroma block edges of
+ * the slice are filtered.
+ * 1: specifies that deblocking is disabled for all block
+ * edges of the slice.
+ * 2: specifies that all luma and chroma block edges of
+ * the slice are filtered with exception of the block edges
+ * that coincide with slice boundaries
+ * @slice_alpha_c0_offset_div2: to be written in slice header,
+ * controls deblocking
+ * @slice_beta_offset_div2: to be written in slice header,
+ * controls deblocking
+ * @encoder_complexity: encoder complexity control (IME).
+ * 0 = I_16x16, P_16x16, Full ME Complexity
+ * 1 = I_16x16, I_NxN, P_16x16, Full ME Complexity
+ * 2 = I_16x16, I_NXN, P_16x16, P_WxH, Full ME Complexity
+ * 4 = I_16x16, P_16x16, Reduced ME Complexity
+ * 5 = I_16x16, I_NxN, P_16x16, Reduced ME Complexity
+ * 6 = I_16x16, I_NXN, P_16x16, P_WxH, Reduced ME Complexity
+ * @chroma_qp_index_offset: coming from picture parameter set
+ * (PPS see [H.264 STD] 7.4.2.2)
+ * @entropy_coding_mode: entropy coding mode.
+ * 0 = CAVLC
+ * 1 = CABAC
+ * @brc_type: selects the bit-rate control algorithm
+ * 0 = constant Qp, (no BRC)
+ * 1 = CBR
+ * 2 = VBR
+ * @quant: Quantization param used in case of fix QP encoding (no BRC)
+ * @non_VCL_NALU_Size: size of non-VCL NALUs (SPS, PPS, filler),
+ * used by BRC
+ * @cpb_buffer_size: size of Coded Picture Buffer, used by BRC
+ * @bit_rate: target bitrate, for BRC
+ * @qp_min: min QP threshold
+ * @qp_max: max QP threshold
+ * @framerate_num: target framerate numerator , used by BRC
+ * @framerate_den: target framerate denomurator , used by BRC
+ * @delay: End-to-End Initial Delay
+ * @strict_HRD_compliancy: flag for HDR compliancy (1)
+ * May impact quality encoding
+ * @addr_source_buffer: address of input frame buffer for current frame
+ * @addr_fwd_Ref_Buffer: address of reference frame buffer
+ * @addr_rec_buffer: address of reconstructed frame buffer
+ * @addr_output_bitstream_start: output bitstream start address
+ * @addr_output_bitstream_end: output bitstream end address
+ * @addr_external_sw : address of external search window
+ * @addr_lctx : address of context picture buffer
+ * @addr_local_rec_buffer: address of local reconstructed buffer
+ * @addr_spatial_context: address of spatial context buffer
+ * @bitstream_offset: offset in bits between aligned bitstream start
+ * address and first bit to be written by HVA.
+ * Range value is [0..63]
+ * @sampling_mode: Input picture format .
+ * 0: YUV420 semi_planar Interleaved
+ * 1: YUV422 raster Interleaved
+ * @addr_param_out: address of output parameters structure
+ * @addr_scaling_matrix: address to the coefficient of
+ * the inverse scaling matrix
+ * @addr_scaling_matrix_dir: address to the coefficient of
+ * the direct scaling matrix
+ * @addr_cabac_context_buffer: address of cabac context buffer
+ * @GmvX: Input information about the horizontal global displacement of
+ * the encoded frame versus the previous one
+ * @GmvY: Input information about the vertical global displacement of
+ * the encoded frame versus the previous one
+ * @window_width: width in pixels of the window to be encoded inside
+ * the input frame
+ * @window_height: width in pixels of the window to be encoded inside
+ * the input frame
+ * @window_horizontal_offset: horizontal offset in pels for input window
+ * within input frame
+ * @window_vertical_offset: vertical offset in pels for input window
+ * within input frame
+ * @addr_roi: Map of QP offset for the Region of Interest algorithm and
+ * also used for Error map.
+ * Bit 0-6 used for qp offset (value -64 to 63).
+ * Bit 7 used to force intra
+ * @addr_slice_header: address to slice header
+ * @slice_header_size_in_bits: size in bits of the Slice header
+ * @slice_header_offset0: Slice header offset where to insert
+ * first_Mb_in_slice
+ * @slice_header_offset1: Slice header offset where to insert
+ * slice_qp_delta
+ * @slice_header_offset2: Slice header offset where to insert
+ * num_MBs_in_slice
+ * @slice_synchro_enable: enable "slice ready" interrupt after each slice
+ * @max_slice_number: Maximum number of slice in a frame
+ * (0 is strictly forbidden)
+ * @rgb2_yuv_y_coeff: Four coefficients (C0C1C2C3) to convert from RGB to
+ * YUV for the Y component.
+ * Y = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @rgb2_yuv_u_coeff: four coefficients (C0C1C2C3) to convert from RGB to
+ * YUV for the Y component.
+ * Y = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @rgb2_yuv_v_coeff: Four coefficients (C0C1C2C3) to convert from RGB to
+ * YUV for the U (Cb) component.
+ * U = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @slice_byte_size: maximum slice size in bytes
+ * (used when slice_size_type=2 or slice_size_type=3)
+ * @max_air_intra_mb_nb: Maximum number of intra macroblock in a frame
+ * for the AIR algorithm
+ * @brc_no_skip: Disable skipping in the Bitrate Controller
+ * @addr_brc_in_out_parameter: address of static buffer for BRC parameters
+ */
+struct hva_h264_td {
+ u16 frame_width;
+ u16 frame_height;
+ u32 frame_num;
+ u16 picture_coding_type;
+ u16 reserved1;
+ u16 pic_order_cnt_type;
+ u16 first_picture_in_sequence;
+ u16 slice_size_type;
+ u16 reserved2;
+ u32 slice_mb_size;
+ u16 ir_param_option;
+ u16 intra_refresh_type;
+ u16 use_constrained_intra_flag;
+ u16 transform_mode;
+ u16 disable_deblocking_filter_idc;
+ s16 slice_alpha_c0_offset_div2;
+ s16 slice_beta_offset_div2;
+ u16 encoder_complexity;
+ s16 chroma_qp_index_offset;
+ u16 entropy_coding_mode;
+ u16 brc_type;
+ u16 quant;
+ u32 non_vcl_nalu_size;
+ u32 cpb_buffer_size;
+ u32 bit_rate;
+ u16 qp_min;
+ u16 qp_max;
+ u16 framerate_num;
+ u16 framerate_den;
+ u16 delay;
+ u16 strict_hrd_compliancy;
+ u32 addr_source_buffer;
+ u32 addr_fwd_ref_buffer;
+ u32 addr_rec_buffer;
+ u32 addr_output_bitstream_start;
+ u32 addr_output_bitstream_end;
+ u32 addr_external_sw;
+ u32 addr_lctx;
+ u32 addr_local_rec_buffer;
+ u32 addr_spatial_context;
+ u16 bitstream_offset;
+ u16 sampling_mode;
+ u32 addr_param_out;
+ u32 addr_scaling_matrix;
+ u32 addr_scaling_matrix_dir;
+ u32 addr_cabac_context_buffer;
+ u32 reserved3;
+ u32 reserved4;
+ s16 gmv_x;
+ s16 gmv_y;
+ u16 window_width;
+ u16 window_height;
+ u16 window_horizontal_offset;
+ u16 window_vertical_offset;
+ u32 addr_roi;
+ u32 addr_slice_header;
+ u16 slice_header_size_in_bits;
+ u16 slice_header_offset0;
+ u16 slice_header_offset1;
+ u16 slice_header_offset2;
+ u32 reserved5;
+ u32 reserved6;
+ u16 reserved7;
+ u16 reserved8;
+ u16 slice_synchro_enable;
+ u16 max_slice_number;
+ u32 rgb2_yuv_y_coeff;
+ u32 rgb2_yuv_u_coeff;
+ u32 rgb2_yuv_v_coeff;
+ u32 slice_byte_size;
+ u16 max_air_intra_mb_nb;
+ u16 brc_no_skip;
+ u32 addr_temporal_context;
+ u32 addr_brc_in_out_parameter;
+};
+
+/*
+ * struct hva_h264_slice_po
+ *
+ * @ slice_size: slice size
+ * @ slice_start_time: start time
+ * @ slice_stop_time: stop time
+ * @ slice_num: slice number
+ */
+struct hva_h264_slice_po {
+ u32 slice_size;
+ u32 slice_start_time;
+ u32 slice_end_time;
+ u32 slice_num;
+};
+
+/*
+ * struct hva_h264_po
+ *
+ * @ bitstream_size: bitstream size
+ * @ dct_bitstream_size: dtc bitstream size
+ * @ stuffing_bits: number of stuffing bits inserted by the encoder
+ * @ removal_time: removal time of current frame (nb of ticks 1/framerate)
+ * @ hvc_start_time: hvc start time
+ * @ hvc_stop_time: hvc stop time
+ * @ slice_count: slice count
+ */
+struct hva_h264_po {
+ u32 bitstream_size;
+ u32 dct_bitstream_size;
+ u32 stuffing_bits;
+ u32 removal_time;
+ u32 hvc_start_time;
+ u32 hvc_stop_time;
+ u32 slice_count;
+ u32 reserved0;
+ struct hva_h264_slice_po slice_params[16];
+};
+
+struct hva_h264_task {
+ struct hva_h264_td td;
+ struct hva_h264_po po;
+};
+
+/*
+ * struct hva_h264_ctx
+ *
+ * @seq_info: sequence information buffer
+ * @ref_frame: reference frame buffer
+ * @rec_frame: reconstructed frame buffer
+ * @task: task descriptor
+ */
+struct hva_h264_ctx {
+ struct hva_buffer *seq_info;
+ struct hva_buffer *ref_frame;
+ struct hva_buffer *rec_frame;
+ struct hva_buffer *task;
+};
+
+static int hva_h264_fill_slice_header(struct hva_ctx *pctx,
+ u8 *slice_header_addr,
+ struct hva_controls *ctrls,
+ int frame_num,
+ u16 *header_size,
+ u16 *header_offset0,
+ u16 *header_offset1,
+ u16 *header_offset2)
+{
+ /*
+ * with this HVA hardware version, part of the slice header is computed
+ * on host and part by hardware.
+ * The part of host is precomputed and available through this array.
+ */
+ struct device *dev = ctx_to_dev(pctx);
+ int cabac = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC;
+ const unsigned char slice_header[] = { 0x00, 0x00, 0x00, 0x01,
+ 0x41, 0x34, 0x07, 0x00};
+ int idr_pic_id = frame_num % 2;
+ enum hva_picture_coding_type type;
+ u32 frame_order = frame_num % ctrls->gop_size;
+
+ if (!(frame_num % ctrls->gop_size))
+ type = PICTURE_CODING_TYPE_I;
+ else
+ type = PICTURE_CODING_TYPE_P;
+
+ memcpy(slice_header_addr, slice_header, sizeof(slice_header));
+
+ *header_size = 56;
+ *header_offset0 = 40;
+ *header_offset1 = 13;
+ *header_offset2 = 0;
+
+ if (type == PICTURE_CODING_TYPE_I) {
+ slice_header_addr[4] = 0x65;
+ slice_header_addr[5] = 0x11;
+
+ /* toggle the I frame */
+ if ((frame_num / ctrls->gop_size) % 2) {
+ *header_size += 4;
+ *header_offset1 += 4;
+ slice_header_addr[6] = 0x04;
+ slice_header_addr[7] = 0x70;
+
+ } else {
+ *header_size += 2;
+ *header_offset1 += 2;
+ slice_header_addr[6] = 0x09;
+ slice_header_addr[7] = 0xC0;
+ }
+ } else {
+ if (ctrls->entropy_mode == cabac) {
+ *header_size += 1;
+ *header_offset1 += 1;
+ slice_header_addr[7] = 0x80;
+ }
+ /*
+ * update slice header with P frame order
+ * frame order is limited to 16 (coded on 4bits only)
+ */
+ slice_header_addr[5] += ((frame_order & 0x0C) >> 2);
+ slice_header_addr[6] += ((frame_order & 0x03) << 6);
+ }
+
+ dev_dbg(dev,
+ "%s %s slice header order %d idrPicId %d header size %d\n",
+ pctx->name, __func__, frame_order, idr_pic_id, *header_size);
+ return 0;
+}
+
+static int hva_h264_fill_data_nal(struct hva_ctx *pctx,
+ unsigned int stuffing_bytes, u8 *addr,
+ unsigned int stream_size, unsigned int *size)
+{
+ struct device *dev = ctx_to_dev(pctx);
+ const u8 start[] = { 0x00, 0x00, 0x00, 0x01 };
+
+ dev_dbg(dev, "%s %s stuffing bytes %d\n", pctx->name, __func__,
+ stuffing_bytes);
+
+ if ((*size + stuffing_bytes + H264_FILLER_DATA_SIZE) > stream_size) {
+ dev_dbg(dev, "%s %s too many stuffing bytes %d\n",
+ pctx->name, __func__, stuffing_bytes);
+ return 0;
+ }
+
+ /* start code */
+ memcpy(addr + *size, start, sizeof(start));
+ *size += sizeof(start);
+
+ /* nal_unit_type */
+ addr[*size] = NALU_TYPE_FILLER_DATA;
+ *size += 1;
+
+ memset(addr + *size, 0xff, stuffing_bytes);
+ *size += stuffing_bytes;
+
+ addr[*size] = 0x80;
+ *size += 1;
+
+ return 0;
+}
+
+static int hva_h264_fill_sei_nal(struct hva_ctx *pctx,
+ enum hva_h264_sei_payload_type type,
+ u8 *addr, u32 *size)
+{
+ struct device *dev = ctx_to_dev(pctx);
+ const u8 start[] = { 0x00, 0x00, 0x00, 0x01 };
+ struct hva_h264_stereo_video_sei info;
+ u8 offset = 7;
+ u8 msg = 0;
+
+ /* start code */
+ memcpy(addr + *size, start, sizeof(start));
+ *size += sizeof(start);
+
+ /* nal_unit_type */
+ addr[*size] = NALU_TYPE_SEI;
+ *size += 1;
+
+ /* payload type */
+ addr[*size] = type;
+ *size += 1;
+
+ switch (type) {
+ case SEI_STEREO_VIDEO_INFO:
+ memset(&info, 0, sizeof(info));
+
+ /* set to top/bottom frame packing arrangement */
+ info.field_views_flag = 1;
+ info.top_field_is_left_view_flag = 1;
+
+ /* payload size */
+ addr[*size] = 1;
+ *size += 1;
+
+ /* payload */
+ msg = info.field_views_flag << offset--;
+
+ if (info.field_views_flag) {
+ msg |= info.top_field_is_left_view_flag <<
+ offset--;
+ } else {
+ msg |= info.current_frame_is_left_view_flag <<
+ offset--;
+ msg |= info.next_frame_is_second_view_flag <<
+ offset--;
+ }
+ msg |= info.left_view_self_contained_flag << offset--;
+ msg |= info.right_view_self_contained_flag << offset--;
+
+ addr[*size] = msg;
+ *size += 1;
+
+ addr[*size] = 0x80;
+ *size += 1;
+
+ return 0;
+ case SEI_BUFFERING_PERIOD:
+ case SEI_PICTURE_TIMING:
+ case SEI_FRAME_PACKING_ARRANGEMENT:
+ default:
+ dev_err(dev, "%s sei nal type not supported %d\n",
+ pctx->name, type);
+ return -EINVAL;
+ }
+}
+
+static int hva_h264_prepare_task(struct hva_ctx *pctx,
+ struct hva_h264_task *task,
+ struct hva_frame *frame,
+ struct hva_stream *stream)
+{
+ struct hva_dev *hva = ctx_to_hdev(pctx);
+ struct device *dev = ctx_to_dev(pctx);
+ struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct hva_buffer *seq_info = ctx->seq_info;
+ struct hva_buffer *fwd_ref_frame = ctx->ref_frame;
+ struct hva_buffer *loc_rec_frame = ctx->rec_frame;
+ struct hva_h264_td *td = &task->td;
+ struct hva_controls *ctrls = &pctx->ctrls;
+ struct v4l2_fract *time_per_frame = &pctx->ctrls.time_per_frame;
+ int cavlc = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
+ u32 frame_num = pctx->stream_num;
+ u32 addr_esram = hva->esram_addr;
+ enum v4l2_mpeg_video_h264_level level;
+ dma_addr_t paddr = 0;
+ u8 *slice_header_vaddr;
+ u32 frame_width = frame->info.aligned_width;
+ u32 frame_height = frame->info.aligned_height;
+ u32 max_cpb_buffer_size;
+ unsigned int payload = stream->bytesused;
+ u32 max_bitrate;
+
+ /* check width and height parameters */
+ if ((frame_width > max(H264_MAX_SIZE_W, H264_MAX_SIZE_H)) ||
+ (frame_height > max(H264_MAX_SIZE_W, H264_MAX_SIZE_H))) {
+ dev_err(dev,
+ "%s width(%d) or height(%d) exceeds limits (%dx%d)\n",
+ pctx->name, frame_width, frame_height,
+ H264_MAX_SIZE_W, H264_MAX_SIZE_H);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ level = ctrls->level;
+
+ memset(td, 0, sizeof(struct hva_h264_td));
+
+ td->frame_width = frame_width;
+ td->frame_height = frame_height;
+
+ /* set frame alignement */
+ td->window_width = frame_width;
+ td->window_height = frame_height;
+ td->window_horizontal_offset = 0;
+ td->window_vertical_offset = 0;
+
+ td->first_picture_in_sequence = (!frame_num) ? 1 : 0;
+
+ /* pic_order_cnt_type hard coded to '2' as only I & P frames */
+ td->pic_order_cnt_type = 2;
+
+ /* useConstrainedIntraFlag set to false for better coding efficiency */
+ td->use_constrained_intra_flag = false;
+ td->brc_type = (ctrls->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ ? BRC_TYPE_CBR : BRC_TYPE_VBR;
+
+ td->entropy_coding_mode = (ctrls->entropy_mode == cavlc) ? CAVLC :
+ CABAC;
+
+ td->bit_rate = ctrls->bitrate;
+
+ /* set framerate, framerate = 1 n/ time per frame */
+ if (time_per_frame->numerator >= 536) {
+ /*
+ * due to a hardware bug, framerate denominator can't exceed
+ * 536 (BRC overflow). Compute nearest framerate
+ */
+ td->framerate_den = 1;
+ td->framerate_num = (time_per_frame->denominator +
+ (time_per_frame->numerator >> 1) - 1) /
+ time_per_frame->numerator;
+
+ /*
+ * update bitrate to introduce a correction due to
+ * the new framerate
+ * new bitrate = (old bitrate * new framerate) / old framerate
+ */
+ td->bit_rate /= time_per_frame->numerator;
+ td->bit_rate *= time_per_frame->denominator;
+ td->bit_rate /= td->framerate_num;
+ } else {
+ td->framerate_den = time_per_frame->numerator;
+ td->framerate_num = time_per_frame->denominator;
+ }
+
+ /* compute maximum bitrate depending on profile */
+ if (ctrls->profile >= V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+ max_bitrate = h264_infos_list[level].max_bitrate *
+ H264_FACTOR_HIGH;
+ else
+ max_bitrate = h264_infos_list[level].max_bitrate *
+ H264_FACTOR_BASELINE;
+
+ /* check if bitrate doesn't exceed max size */
+ if (td->bit_rate > max_bitrate) {
+ dev_dbg(dev,
+ "%s bitrate (%d) larger than level and profile allow, clip to %d\n",
+ pctx->name, td->bit_rate, max_bitrate);
+ td->bit_rate = max_bitrate;
+ }
+
+ /* convert cpb_buffer_size in bits */
+ td->cpb_buffer_size = ctrls->cpb_size * 8000;
+
+ /* compute maximum cpb buffer size depending on profile */
+ if (ctrls->profile >= V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+ max_cpb_buffer_size =
+ h264_infos_list[level].max_cpb_size * H264_FACTOR_HIGH;
+ else
+ max_cpb_buffer_size =
+ h264_infos_list[level].max_cpb_size * H264_FACTOR_BASELINE;
+
+ /* check if cpb buffer size doesn't exceed max size */
+ if (td->cpb_buffer_size > max_cpb_buffer_size) {
+ dev_dbg(dev,
+ "%s cpb size larger than level %d allows, clip to %d\n",
+ pctx->name, td->cpb_buffer_size, max_cpb_buffer_size);
+ td->cpb_buffer_size = max_cpb_buffer_size;
+ }
+
+ /* enable skipping in the Bitrate Controller */
+ td->brc_no_skip = 0;
+
+ /* initial delay */
+ if ((ctrls->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) &&
+ td->bit_rate)
+ td->delay = 1000 * (td->cpb_buffer_size / td->bit_rate);
+ else
+ td->delay = 0;
+
+ switch (frame->info.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ td->sampling_mode = SAMPLING_MODE_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ td->sampling_mode = SAMPLING_MODE_NV21;
+ break;
+ default:
+ dev_err(dev, "%s invalid source pixel format\n",
+ pctx->name);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ /*
+ * fill matrix color converter (RGB to YUV)
+ * Y = 0,299 R + 0,587 G + 0,114 B
+ * Cb = -0,1687 R -0,3313 G + 0,5 B + 128
+ * Cr = 0,5 R - 0,4187 G - 0,0813 B + 128
+ */
+ td->rgb2_yuv_y_coeff = 0x12031008;
+ td->rgb2_yuv_u_coeff = 0x800EF7FB;
+ td->rgb2_yuv_v_coeff = 0x80FEF40E;
+
+ /* enable/disable transform mode */
+ td->transform_mode = ctrls->dct8x8;
+
+ /* encoder complexity fix to 2, ENCODE_I_16x16_I_NxN_P_16x16_P_WxH */
+ td->encoder_complexity = 2;
+
+ /* quant fix to 28, default VBR value */
+ td->quant = 28;
+
+ if (td->framerate_den == 0) {
+ dev_err(dev, "%s invalid framerate\n", pctx->name);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ /* if automatic framerate, deactivate bitrate controller */
+ if (td->framerate_num == 0)
+ td->brc_type = 0;
+
+ /* compliancy fix to true */
+ td->strict_hrd_compliancy = 1;
+
+ /* set minimum & maximum quantizers */
+ td->qp_min = clamp_val(ctrls->qpmin, 0, 51);
+ td->qp_max = clamp_val(ctrls->qpmax, 0, 51);
+
+ td->addr_source_buffer = frame->paddr;
+ td->addr_fwd_ref_buffer = fwd_ref_frame->paddr;
+ td->addr_rec_buffer = loc_rec_frame->paddr;
+
+ td->addr_output_bitstream_end = (u32)stream->paddr + stream->size;
+
+ td->addr_output_bitstream_start = (u32)stream->paddr;
+ td->bitstream_offset = (((u32)stream->paddr & 0xF) << 3) &
+ BITSTREAM_OFFSET_MASK;
+
+ td->addr_param_out = (u32)ctx->task->paddr +
+ offsetof(struct hva_h264_task, po);
+
+ /* swap spatial and temporal context */
+ if (frame_num % 2) {
+ paddr = seq_info->paddr;
+ td->addr_spatial_context = ALIGN(paddr, 0x100);
+ paddr = seq_info->paddr + DATA_SIZE(frame_width,
+ frame_height);
+ td->addr_temporal_context = ALIGN(paddr, 0x100);
+ } else {
+ paddr = seq_info->paddr;
+ td->addr_temporal_context = ALIGN(paddr, 0x100);
+ paddr = seq_info->paddr + DATA_SIZE(frame_width,
+ frame_height);
+ td->addr_spatial_context = ALIGN(paddr, 0x100);
+ }
+
+ paddr = seq_info->paddr + 2 * DATA_SIZE(frame_width, frame_height);
+
+ td->addr_brc_in_out_parameter = ALIGN(paddr, 0x100);
+
+ paddr = td->addr_brc_in_out_parameter + BRC_DATA_SIZE;
+ td->addr_slice_header = ALIGN(paddr, 0x100);
+ td->addr_external_sw = ALIGN(addr_esram, 0x100);
+
+ addr_esram += SEARCH_WINDOW_BUFFER_MAX_SIZE(frame_width);
+ td->addr_local_rec_buffer = ALIGN(addr_esram, 0x100);
+
+ addr_esram += LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(frame_width);
+ td->addr_lctx = ALIGN(addr_esram, 0x100);
+
+ addr_esram += CTX_MB_BUFFER_MAX_SIZE(max(frame_width, frame_height));
+ td->addr_cabac_context_buffer = ALIGN(addr_esram, 0x100);
+
+ if (!(frame_num % ctrls->gop_size)) {
+ td->picture_coding_type = PICTURE_CODING_TYPE_I;
+ stream->vbuf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ } else {
+ td->picture_coding_type = PICTURE_CODING_TYPE_P;
+ stream->vbuf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ }
+
+ /* fill the slice header part */
+ slice_header_vaddr = seq_info->vaddr + (td->addr_slice_header -
+ seq_info->paddr);
+
+ hva_h264_fill_slice_header(pctx, slice_header_vaddr, ctrls, frame_num,
+ &td->slice_header_size_in_bits,
+ &td->slice_header_offset0,
+ &td->slice_header_offset1,
+ &td->slice_header_offset2);
+
+ td->chroma_qp_index_offset = 2;
+ td->slice_synchro_enable = 0;
+ td->max_slice_number = 1;
+
+ /*
+ * check the sps/pps header size for key frame only
+ * sps/pps header was previously fill by libv4l
+ * during qbuf of stream buffer
+ */
+ if ((stream->vbuf.flags == V4L2_BUF_FLAG_KEYFRAME) &&
+ (payload > MAX_SPS_PPS_SIZE)) {
+ dev_err(dev, "%s invalid sps/pps size %d\n", pctx->name,
+ payload);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ if (stream->vbuf.flags != V4L2_BUF_FLAG_KEYFRAME)
+ payload = 0;
+
+ /* add SEI nal (video stereo info) */
+ if (ctrls->sei_fp && hva_h264_fill_sei_nal(pctx, SEI_STEREO_VIDEO_INFO,
+ (u8 *)stream->vaddr,
+ &payload)) {
+ dev_err(dev, "%s fail to get SEI nal\n", pctx->name);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ /* fill size of non-VCL NAL units (SPS, PPS, filler and SEI) */
+ td->non_vcl_nalu_size = payload * 8;
+
+ /* compute bitstream offset & new start address of bitstream */
+ td->addr_output_bitstream_start += ((payload >> 4) << 4);
+ td->bitstream_offset += (payload - ((payload >> 4) << 4)) * 8;
+
+ stream->bytesused = payload;
+
+ return 0;
+}
+
+static unsigned int hva_h264_get_stream_size(struct hva_h264_task *task)
+{
+ struct hva_h264_po *po = &task->po;
+
+ return po->bitstream_size;
+}
+
+static u32 hva_h264_get_stuffing_bytes(struct hva_h264_task *task)
+{
+ struct hva_h264_po *po = &task->po;
+
+ return po->stuffing_bits >> 3;
+}
+
+static int hva_h264_open(struct hva_ctx *pctx)
+{
+ struct device *dev = ctx_to_dev(pctx);
+ struct hva_h264_ctx *ctx;
+ struct hva_dev *hva = ctx_to_hdev(pctx);
+ u32 frame_width = pctx->frameinfo.aligned_width;
+ u32 frame_height = pctx->frameinfo.aligned_height;
+ u32 size;
+ int ret;
+
+ /* check esram size necessary to encode a frame */
+ size = SEARCH_WINDOW_BUFFER_MAX_SIZE(frame_width) +
+ LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(frame_width) +
+ CTX_MB_BUFFER_MAX_SIZE(max(frame_width, frame_height)) +
+ CABAC_CONTEXT_BUFFER_MAX_SIZE(frame_width);
+
+ if (hva->esram_size < size) {
+ dev_err(dev, "%s not enough esram (max:%d request:%d)\n",
+ pctx->name, hva->esram_size, size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* allocate context for codec */
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* allocate sequence info buffer */
+ ret = hva_mem_alloc(pctx,
+ 2 * DATA_SIZE(frame_width, frame_height) +
+ SLICE_HEADER_SIZE +
+ BRC_DATA_SIZE,
+ "hva sequence info",
+ &ctx->seq_info);
+ if (ret) {
+ dev_err(dev,
+ "%s failed to allocate sequence info buffer\n",
+ pctx->name);
+ goto err_ctx;
+ }
+
+ /* allocate reference frame buffer */
+ ret = hva_mem_alloc(pctx,
+ frame_width * frame_height * 3 / 2,
+ "hva reference frame",
+ &ctx->ref_frame);
+ if (ret) {
+ dev_err(dev, "%s failed to allocate reference frame buffer\n",
+ pctx->name);
+ goto err_seq_info;
+ }
+
+ /* allocate reconstructed frame buffer */
+ ret = hva_mem_alloc(pctx,
+ frame_width * frame_height * 3 / 2,
+ "hva reconstructed frame",
+ &ctx->rec_frame);
+ if (ret) {
+ dev_err(dev,
+ "%s failed to allocate reconstructed frame buffer\n",
+ pctx->name);
+ goto err_ref_frame;
+ }
+
+ /* allocate task descriptor */
+ ret = hva_mem_alloc(pctx,
+ sizeof(struct hva_h264_task),
+ "hva task descriptor",
+ &ctx->task);
+ if (ret) {
+ dev_err(dev,
+ "%s failed to allocate task descriptor\n",
+ pctx->name);
+ goto err_rec_frame;
+ }
+
+ pctx->priv = (void *)ctx;
+
+ return 0;
+
+err_rec_frame:
+ hva_mem_free(pctx, ctx->rec_frame);
+err_ref_frame:
+ hva_mem_free(pctx, ctx->ref_frame);
+err_seq_info:
+ hva_mem_free(pctx, ctx->seq_info);
+err_ctx:
+ devm_kfree(dev, ctx);
+err:
+ pctx->sys_errors++;
+ return ret;
+}
+
+static int hva_h264_close(struct hva_ctx *pctx)
+{
+ struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct device *dev = ctx_to_dev(pctx);
+
+ if (ctx->seq_info)
+ hva_mem_free(pctx, ctx->seq_info);
+
+ if (ctx->ref_frame)
+ hva_mem_free(pctx, ctx->ref_frame);
+
+ if (ctx->rec_frame)
+ hva_mem_free(pctx, ctx->rec_frame);
+
+ if (ctx->task)
+ hva_mem_free(pctx, ctx->task);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
+ struct hva_stream *stream)
+{
+ struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr;
+ u32 stuffing_bytes = 0;
+ int ret = 0;
+
+ ret = hva_h264_prepare_task(pctx, task, frame, stream);
+ if (ret)
+ goto err;
+
+ ret = hva_hw_execute_task(pctx, H264_ENC, ctx->task);
+ if (ret)
+ goto err;
+
+ pctx->stream_num++;
+ stream->bytesused += hva_h264_get_stream_size(task);
+
+ stuffing_bytes = hva_h264_get_stuffing_bytes(task);
+
+ if (stuffing_bytes)
+ hva_h264_fill_data_nal(pctx, stuffing_bytes,
+ (u8 *)stream->vaddr,
+ stream->size,
+ &stream->bytesused);
+
+ /* switch reference & reconstructed frame */
+ swap(ctx->ref_frame, ctx->rec_frame);
+
+ return 0;
+err:
+ stream->bytesused = 0;
+ return ret;
+}
+
+const struct hva_enc nv12h264enc = {
+ .name = "H264(NV12)",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .streamformat = V4L2_PIX_FMT_H264,
+ .max_width = H264_MAX_SIZE_W,
+ .max_height = H264_MAX_SIZE_H,
+ .open = hva_h264_open,
+ .close = hva_h264_close,
+ .encode = hva_h264_encode,
+};
+
+const struct hva_enc nv21h264enc = {
+ .name = "H264(NV21)",
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .streamformat = V4L2_PIX_FMT_H264,
+ .max_width = H264_MAX_SIZE_W,
+ .max_height = H264_MAX_SIZE_H,
+ .open = hva_h264_open,
+ .close = hva_h264_close,
+ .encode = hva_h264_encode,
+};
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
new file mode 100644
index 000000000..6b852b0bb
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+#include <linux/seq_file.h>
+#endif
+
+#include "hva.h"
+#include "hva-hw.h"
+
+/* HVA register offsets */
+#define HVA_HIF_REG_RST 0x0100U
+#define HVA_HIF_REG_RST_ACK 0x0104U
+#define HVA_HIF_REG_MIF_CFG 0x0108U
+#define HVA_HIF_REG_HEC_MIF_CFG 0x010CU
+#define HVA_HIF_REG_CFL 0x0110U
+#define HVA_HIF_FIFO_CMD 0x0114U
+#define HVA_HIF_FIFO_STS 0x0118U
+#define HVA_HIF_REG_SFL 0x011CU
+#define HVA_HIF_REG_IT_ACK 0x0120U
+#define HVA_HIF_REG_ERR_IT_ACK 0x0124U
+#define HVA_HIF_REG_LMI_ERR 0x0128U
+#define HVA_HIF_REG_EMI_ERR 0x012CU
+#define HVA_HIF_REG_HEC_MIF_ERR 0x0130U
+#define HVA_HIF_REG_HEC_STS 0x0134U
+#define HVA_HIF_REG_HVC_STS 0x0138U
+#define HVA_HIF_REG_HJE_STS 0x013CU
+#define HVA_HIF_REG_CNT 0x0140U
+#define HVA_HIF_REG_HEC_CHKSYN_DIS 0x0144U
+#define HVA_HIF_REG_CLK_GATING 0x0148U
+#define HVA_HIF_REG_VERSION 0x014CU
+#define HVA_HIF_REG_BSM 0x0150U
+
+/* define value for version id register (HVA_HIF_REG_VERSION) */
+#define VERSION_ID_MASK 0x0000FFFF
+
+/* define values for BSM register (HVA_HIF_REG_BSM) */
+#define BSM_CFG_VAL1 0x0003F000
+#define BSM_CFG_VAL2 0x003F0000
+
+/* define values for memory interface register (HVA_HIF_REG_MIF_CFG) */
+#define MIF_CFG_VAL1 0x04460446
+#define MIF_CFG_VAL2 0x04460806
+#define MIF_CFG_VAL3 0x00000000
+
+/* define value for HEC memory interface register (HVA_HIF_REG_MIF_CFG) */
+#define HEC_MIF_CFG_VAL 0x000000C4
+
+/* Bits definition for clock gating register (HVA_HIF_REG_CLK_GATING) */
+#define CLK_GATING_HVC BIT(0)
+#define CLK_GATING_HEC BIT(1)
+#define CLK_GATING_HJE BIT(2)
+
+/* fix hva clock rate */
+#define CLK_RATE 300000000
+
+/* fix delay for pmruntime */
+#define AUTOSUSPEND_DELAY_MS 3
+
+/*
+ * hw encode error values
+ * NO_ERROR: Success, Task OK
+ * H264_BITSTREAM_OVERSIZE: VECH264 Bitstream size > bitstream buffer
+ * H264_FRAME_SKIPPED: VECH264 Frame skipped (refers to CPB Buffer Size)
+ * H264_SLICE_LIMIT_SIZE: VECH264 MB > slice limit size
+ * H264_MAX_SLICE_NUMBER: VECH264 max slice number reached
+ * H264_SLICE_READY: VECH264 Slice ready
+ * TASK_LIST_FULL: HVA/FPC task list full
+ (discard latest transform command)
+ * UNKNOWN_COMMAND: Transform command not known by HVA/FPC
+ * WRONG_CODEC_OR_RESOLUTION: Wrong Codec or Resolution Selection
+ * NO_INT_COMPLETION: Time-out on interrupt completion
+ * LMI_ERR: Local Memory Interface Error
+ * EMI_ERR: External Memory Interface Error
+ * HECMI_ERR: HEC Memory Interface Error
+ */
+enum hva_hw_error {
+ NO_ERROR = 0x0,
+ H264_BITSTREAM_OVERSIZE = 0x2,
+ H264_FRAME_SKIPPED = 0x4,
+ H264_SLICE_LIMIT_SIZE = 0x5,
+ H264_MAX_SLICE_NUMBER = 0x7,
+ H264_SLICE_READY = 0x8,
+ TASK_LIST_FULL = 0xF0,
+ UNKNOWN_COMMAND = 0xF1,
+ WRONG_CODEC_OR_RESOLUTION = 0xF4,
+ NO_INT_COMPLETION = 0x100,
+ LMI_ERR = 0x101,
+ EMI_ERR = 0x102,
+ HECMI_ERR = 0x103,
+};
+
+static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
+{
+ struct hva_dev *hva = data;
+
+ /* read status registers */
+ hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
+ hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
+
+ /* acknowledge interruption */
+ writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
+{
+ struct hva_dev *hva = arg;
+ struct device *dev = hva_to_dev(hva);
+ u32 status = hva->sts_reg & 0xFF;
+ u8 ctx_id = 0;
+ struct hva_ctx *ctx = NULL;
+
+ dev_dbg(dev, "%s %s: status: 0x%02x fifo level: 0x%02x\n",
+ HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
+
+ /*
+ * status: task_id[31:16] client_id[15:8] status[7:0]
+ * the context identifier is retrieved from the client identifier
+ */
+ ctx_id = (hva->sts_reg & 0xFF00) >> 8;
+ if (ctx_id >= HVA_MAX_INSTANCES) {
+ dev_err(dev, "%s %s: bad context identifier: %d\n",
+ HVA_PREFIX, __func__, ctx_id);
+ goto out;
+ }
+
+ ctx = hva->instances[ctx_id];
+ if (!ctx)
+ goto out;
+
+ switch (status) {
+ case NO_ERROR:
+ dev_dbg(dev, "%s %s: no error\n",
+ ctx->name, __func__);
+ ctx->hw_err = false;
+ break;
+ case H264_SLICE_READY:
+ dev_dbg(dev, "%s %s: h264 slice ready\n",
+ ctx->name, __func__);
+ ctx->hw_err = false;
+ break;
+ case H264_FRAME_SKIPPED:
+ dev_dbg(dev, "%s %s: h264 frame skipped\n",
+ ctx->name, __func__);
+ ctx->hw_err = false;
+ break;
+ case H264_BITSTREAM_OVERSIZE:
+ dev_err(dev, "%s %s:h264 bitstream oversize\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case H264_SLICE_LIMIT_SIZE:
+ dev_err(dev, "%s %s: h264 slice limit size is reached\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case H264_MAX_SLICE_NUMBER:
+ dev_err(dev, "%s %s: h264 max slice number is reached\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case TASK_LIST_FULL:
+ dev_err(dev, "%s %s:task list full\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case UNKNOWN_COMMAND:
+ dev_err(dev, "%s %s: command not known\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case WRONG_CODEC_OR_RESOLUTION:
+ dev_err(dev, "%s %s: wrong codec or resolution\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ default:
+ dev_err(dev, "%s %s: status not recognized\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ }
+out:
+ complete(&hva->interrupt);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
+{
+ struct hva_dev *hva = data;
+
+ /* read status registers */
+ hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
+ hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
+
+ /* read error registers */
+ hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
+ hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
+ hva->hec_mif_err_reg = readl_relaxed(hva->regs +
+ HVA_HIF_REG_HEC_MIF_ERR);
+
+ /* acknowledge interruption */
+ writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
+{
+ struct hva_dev *hva = arg;
+ struct device *dev = hva_to_dev(hva);
+ u8 ctx_id = 0;
+ struct hva_ctx *ctx;
+
+ dev_dbg(dev, "%s status: 0x%02x fifo level: 0x%02x\n",
+ HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
+
+ /*
+ * status: task_id[31:16] client_id[15:8] status[7:0]
+ * the context identifier is retrieved from the client identifier
+ */
+ ctx_id = (hva->sts_reg & 0xFF00) >> 8;
+ if (ctx_id >= HVA_MAX_INSTANCES) {
+ dev_err(dev, "%s bad context identifier: %d\n", HVA_PREFIX,
+ ctx_id);
+ goto out;
+ }
+
+ ctx = hva->instances[ctx_id];
+ if (!ctx)
+ goto out;
+
+ if (hva->lmi_err_reg) {
+ dev_err(dev, "%s local memory interface error: 0x%08x\n",
+ ctx->name, hva->lmi_err_reg);
+ ctx->hw_err = true;
+ }
+
+ if (hva->emi_err_reg) {
+ dev_err(dev, "%s external memory interface error: 0x%08x\n",
+ ctx->name, hva->emi_err_reg);
+ ctx->hw_err = true;
+ }
+
+ if (hva->hec_mif_err_reg) {
+ dev_err(dev, "%s hec memory interface error: 0x%08x\n",
+ ctx->name, hva->hec_mif_err_reg);
+ ctx->hw_err = true;
+ }
+out:
+ complete(&hva->interrupt);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
+{
+ struct device *dev = hva_to_dev(hva);
+ unsigned long int version;
+
+ if (pm_runtime_get_sync(dev) < 0) {
+ dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
+ pm_runtime_put_noidle(dev);
+ mutex_unlock(&hva->protect_mutex);
+ return -EFAULT;
+ }
+
+ version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
+ VERSION_ID_MASK;
+
+ pm_runtime_put_autosuspend(dev);
+
+ switch (version) {
+ case HVA_VERSION_V400:
+ dev_dbg(dev, "%s IP hardware version 0x%lx\n",
+ HVA_PREFIX, version);
+ break;
+ default:
+ dev_err(dev, "%s unknown IP hardware version 0x%lx\n",
+ HVA_PREFIX, version);
+ version = HVA_VERSION_UNKNOWN;
+ break;
+ }
+
+ return version;
+}
+
+int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *regs;
+ struct resource *esram;
+ int ret;
+
+ WARN_ON(!hva);
+
+ /* get memory for registers */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hva->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(hva->regs)) {
+ dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
+ return PTR_ERR(hva->regs);
+ }
+
+ /* get memory for esram */
+ esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!esram) {
+ dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
+ return -ENODEV;
+ }
+ hva->esram_addr = esram->start;
+ hva->esram_size = resource_size(esram);
+
+ dev_info(dev, "%s esram reserved for address: 0x%x size:%d\n",
+ HVA_PREFIX, hva->esram_addr, hva->esram_size);
+
+ /* get clock resource */
+ hva->clk = devm_clk_get(dev, "clk_hva");
+ if (IS_ERR(hva->clk)) {
+ dev_err(dev, "%s failed to get clock\n", HVA_PREFIX);
+ return PTR_ERR(hva->clk);
+ }
+
+ ret = clk_prepare(hva->clk);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to prepare clock\n", HVA_PREFIX);
+ hva->clk = ERR_PTR(-EINVAL);
+ return ret;
+ }
+
+ /* get status interruption resource */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to get status IRQ\n", HVA_PREFIX);
+ goto err_clk;
+ }
+ hva->irq_its = ret;
+
+ ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
+ hva_hw_its_irq_thread,
+ IRQF_ONESHOT,
+ "hva_its_irq", hva);
+ if (ret) {
+ dev_err(dev, "%s failed to install status IRQ 0x%x\n",
+ HVA_PREFIX, hva->irq_its);
+ goto err_clk;
+ }
+ disable_irq(hva->irq_its);
+
+ /* get error interruption resource */
+ ret = platform_get_irq(pdev, 1);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to get error IRQ\n", HVA_PREFIX);
+ goto err_clk;
+ }
+ hva->irq_err = ret;
+
+ ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
+ hva_hw_err_irq_thread,
+ IRQF_ONESHOT,
+ "hva_err_irq", hva);
+ if (ret) {
+ dev_err(dev, "%s failed to install error IRQ 0x%x\n",
+ HVA_PREFIX, hva->irq_err);
+ goto err_clk;
+ }
+ disable_irq(hva->irq_err);
+
+ /* initialise protection mutex */
+ mutex_init(&hva->protect_mutex);
+
+ /* initialise completion signal */
+ init_completion(&hva->interrupt);
+
+ /* initialise runtime power management */
+ pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
+ goto err_pm;
+ }
+
+ /* check IP hardware version */
+ hva->ip_version = hva_hw_get_ip_version(hva);
+
+ if (hva->ip_version == HVA_VERSION_UNKNOWN) {
+ ret = -EINVAL;
+ goto err_pm;
+ }
+
+ dev_info(dev, "%s found hva device (version 0x%lx)\n", HVA_PREFIX,
+ hva->ip_version);
+
+ return 0;
+
+err_pm:
+ pm_runtime_put(dev);
+err_clk:
+ if (hva->clk)
+ clk_unprepare(hva->clk);
+
+ return ret;
+}
+
+void hva_hw_remove(struct hva_dev *hva)
+{
+ struct device *dev = hva_to_dev(hva);
+
+ disable_irq(hva->irq_its);
+ disable_irq(hva->irq_err);
+
+ pm_runtime_put_autosuspend(dev);
+ pm_runtime_disable(dev);
+}
+
+int hva_hw_runtime_suspend(struct device *dev)
+{
+ struct hva_dev *hva = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hva->clk);
+
+ return 0;
+}
+
+int hva_hw_runtime_resume(struct device *dev)
+{
+ struct hva_dev *hva = dev_get_drvdata(dev);
+
+ if (clk_prepare_enable(hva->clk)) {
+ dev_err(hva->dev, "%s failed to prepare hva clk\n",
+ HVA_PREFIX);
+ return -EINVAL;
+ }
+
+ if (clk_set_rate(hva->clk, CLK_RATE)) {
+ dev_err(dev, "%s failed to set clock frequency\n",
+ HVA_PREFIX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
+ struct hva_buffer *task)
+{
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = hva_to_dev(hva);
+ u8 client_id = ctx->id;
+ int ret;
+ u32 reg = 0;
+
+ mutex_lock(&hva->protect_mutex);
+
+ /* enable irqs */
+ enable_irq(hva->irq_its);
+ enable_irq(hva->irq_err);
+
+ if (pm_runtime_get_sync(dev) < 0) {
+ dev_err(dev, "%s failed to get pm_runtime\n", ctx->name);
+ ctx->sys_errors++;
+ ret = -EFAULT;
+ goto out;
+ }
+
+ reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
+ switch (cmd) {
+ case H264_ENC:
+ reg |= CLK_GATING_HVC;
+ break;
+ default:
+ dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
+ ctx->encode_errors++;
+ ret = -EFAULT;
+ goto out;
+ }
+ writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
+
+ dev_dbg(dev, "%s %s: write configuration registers\n", ctx->name,
+ __func__);
+
+ /* byte swap config */
+ writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
+
+ /* define Max Opcode Size and Max Message Size for LMI and EMI */
+ writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
+ writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
+
+ /*
+ * command FIFO: task_id[31:16] client_id[15:8] command_type[7:0]
+ * the context identifier is provided as client identifier to the
+ * hardware, and is retrieved in the interrupt functions from the
+ * status register
+ */
+ dev_dbg(dev, "%s %s: send task (cmd: %d, task_desc: %pad)\n",
+ ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
+ writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
+ writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
+
+ if (!wait_for_completion_timeout(&hva->interrupt,
+ msecs_to_jiffies(2000))) {
+ dev_err(dev, "%s %s: time out on completion\n", ctx->name,
+ __func__);
+ ctx->encode_errors++;
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* get encoding status */
+ ret = ctx->hw_err ? -EFAULT : 0;
+
+ ctx->encode_errors += ctx->hw_err ? 1 : 0;
+
+out:
+ disable_irq(hva->irq_its);
+ disable_irq(hva->irq_err);
+
+ switch (cmd) {
+ case H264_ENC:
+ reg &= ~CLK_GATING_HVC;
+ writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
+ break;
+ default:
+ dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
+ }
+
+ pm_runtime_put_autosuspend(dev);
+ mutex_unlock(&hva->protect_mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+#define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
+ #reg, readl_relaxed(hva->regs + reg))
+
+void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
+{
+ struct device *dev = hva_to_dev(hva);
+
+ mutex_lock(&hva->protect_mutex);
+
+ if (pm_runtime_get_sync(dev) < 0) {
+ seq_puts(s, "Cannot wake up IP\n");
+ pm_runtime_put_noidle(dev);
+ mutex_unlock(&hva->protect_mutex);
+ return;
+ }
+
+ seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
+
+ DUMP(HVA_HIF_REG_RST);
+ DUMP(HVA_HIF_REG_RST_ACK);
+ DUMP(HVA_HIF_REG_MIF_CFG);
+ DUMP(HVA_HIF_REG_HEC_MIF_CFG);
+ DUMP(HVA_HIF_REG_CFL);
+ DUMP(HVA_HIF_REG_SFL);
+ DUMP(HVA_HIF_REG_LMI_ERR);
+ DUMP(HVA_HIF_REG_EMI_ERR);
+ DUMP(HVA_HIF_REG_HEC_MIF_ERR);
+ DUMP(HVA_HIF_REG_HEC_STS);
+ DUMP(HVA_HIF_REG_HVC_STS);
+ DUMP(HVA_HIF_REG_HJE_STS);
+ DUMP(HVA_HIF_REG_CNT);
+ DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
+ DUMP(HVA_HIF_REG_CLK_GATING);
+ DUMP(HVA_HIF_REG_VERSION);
+
+ pm_runtime_put_autosuspend(dev);
+ mutex_unlock(&hva->protect_mutex);
+}
+#endif
diff --git a/drivers/media/platform/sti/hva/hva-hw.h b/drivers/media/platform/sti/hva/hva-hw.h
new file mode 100644
index 000000000..b29899026
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-hw.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#ifndef HVA_HW_H
+#define HVA_HW_H
+
+#include "hva-mem.h"
+
+/* HVA Versions */
+#define HVA_VERSION_UNKNOWN 0x000
+#define HVA_VERSION_V400 0x400
+
+/* HVA command types */
+enum hva_hw_cmd_type {
+ /* RESERVED = 0x00 */
+ /* RESERVED = 0x01 */
+ H264_ENC = 0x02,
+ /* RESERVED = 0x03 */
+ /* RESERVED = 0x04 */
+ /* RESERVED = 0x05 */
+ /* RESERVED = 0x06 */
+ /* RESERVED = 0x07 */
+ REMOVE_CLIENT = 0x08,
+ FREEZE_CLIENT = 0x09,
+ START_CLIENT = 0x0A,
+ FREEZE_ALL = 0x0B,
+ START_ALL = 0x0C,
+ REMOVE_ALL = 0x0D
+};
+
+int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva);
+void hva_hw_remove(struct hva_dev *hva);
+int hva_hw_runtime_suspend(struct device *dev);
+int hva_hw_runtime_resume(struct device *dev);
+int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
+ struct hva_buffer *task);
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s);
+#endif
+
+#endif /* HVA_HW_H */
diff --git a/drivers/media/platform/sti/hva/hva-mem.c b/drivers/media/platform/sti/hva/hva-mem.c
new file mode 100644
index 000000000..68047b60b
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-mem.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include "hva.h"
+#include "hva-mem.h"
+
+int hva_mem_alloc(struct hva_ctx *ctx, u32 size, const char *name,
+ struct hva_buffer **buf)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ struct hva_buffer *b;
+ dma_addr_t paddr;
+ void *base;
+
+ b = devm_kzalloc(dev, sizeof(*b), GFP_KERNEL);
+ if (!b) {
+ ctx->sys_errors++;
+ return -ENOMEM;
+ }
+
+ base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+ if (!base) {
+ dev_err(dev, "%s %s : dma_alloc_attrs failed for %s (size=%d)\n",
+ ctx->name, __func__, name, size);
+ ctx->sys_errors++;
+ devm_kfree(dev, b);
+ return -ENOMEM;
+ }
+
+ b->size = size;
+ b->paddr = paddr;
+ b->vaddr = base;
+ b->name = name;
+
+ dev_dbg(dev,
+ "%s allocate %d bytes of HW memory @(virt=%p, phy=%pad): %s\n",
+ ctx->name, size, b->vaddr, &b->paddr, b->name);
+
+ /* return hva buffer to user */
+ *buf = b;
+
+ return 0;
+}
+
+void hva_mem_free(struct hva_ctx *ctx, struct hva_buffer *buf)
+{
+ struct device *dev = ctx_to_dev(ctx);
+
+ dev_dbg(dev,
+ "%s free %d bytes of HW memory @(virt=%p, phy=%pad): %s\n",
+ ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);
+
+ dma_free_attrs(dev, buf->size, buf->vaddr, buf->paddr,
+ DMA_ATTR_WRITE_COMBINE);
+
+ devm_kfree(dev, buf);
+}
diff --git a/drivers/media/platform/sti/hva/hva-mem.h b/drivers/media/platform/sti/hva/hva-mem.h
new file mode 100644
index 000000000..fec549dff
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-mem.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#ifndef HVA_MEM_H
+#define HVA_MEM_H
+
+/**
+ * struct hva_buffer - hva buffer
+ *
+ * @name: name of requester
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @size: size of buffer
+ */
+struct hva_buffer {
+ const char *name;
+ dma_addr_t paddr;
+ void *vaddr;
+ u32 size;
+};
+
+int hva_mem_alloc(struct hva_ctx *ctx,
+ __u32 size,
+ const char *name,
+ struct hva_buffer **buf);
+
+void hva_mem_free(struct hva_ctx *ctx,
+ struct hva_buffer *buf);
+
+#endif /* HVA_MEM_H */
diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c
new file mode 100644
index 000000000..5a807c7c5
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-v4l2.c
@@ -0,0 +1,1474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "hva.h"
+#include "hva-hw.h"
+
+#define MIN_FRAMES 1
+#define MIN_STREAMS 1
+
+#define HVA_MIN_WIDTH 32
+#define HVA_MAX_WIDTH 1920
+#define HVA_MIN_HEIGHT 32
+#define HVA_MAX_HEIGHT 1920
+
+/* HVA requires a 16x16 pixels alignment for frames */
+#define HVA_WIDTH_ALIGNMENT 16
+#define HVA_HEIGHT_ALIGNMENT 16
+
+#define HVA_DEFAULT_WIDTH HVA_MIN_WIDTH
+#define HVA_DEFAULT_HEIGHT HVA_MIN_HEIGHT
+#define HVA_DEFAULT_FRAME_NUM 1
+#define HVA_DEFAULT_FRAME_DEN 30
+
+#define to_type_str(type) (type == V4L2_BUF_TYPE_VIDEO_OUTPUT ? \
+ "frame" : "stream")
+
+#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
+
+/* registry of available encoders */
+static const struct hva_enc *hva_encoders[] = {
+ &nv12h264enc,
+ &nv21h264enc,
+};
+
+static inline int frame_size(u32 w, u32 h, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ return (w * h * 3) / 2;
+ default:
+ return 0;
+ }
+}
+
+static inline int frame_stride(u32 w, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ return w;
+ default:
+ return 0;
+ }
+}
+
+static inline int frame_alignment(u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ /* multiple of 2 */
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static inline int estimated_stream_size(u32 w, u32 h)
+{
+ /*
+ * HVA only encodes in YUV420 format, whatever the frame format.
+ * A compression ratio of 2 is assumed: thus, the maximum size
+ * of a stream is estimated to ((width x height x 3 / 2) / 2)
+ */
+ return (w * h * 3) / 4;
+}
+
+static void set_default_params(struct hva_ctx *ctx)
+{
+ struct hva_frameinfo *frameinfo = &ctx->frameinfo;
+ struct hva_streaminfo *streaminfo = &ctx->streaminfo;
+
+ frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+ frameinfo->width = HVA_DEFAULT_WIDTH;
+ frameinfo->height = HVA_DEFAULT_HEIGHT;
+ frameinfo->aligned_width = ALIGN(frameinfo->width,
+ HVA_WIDTH_ALIGNMENT);
+ frameinfo->aligned_height = ALIGN(frameinfo->height,
+ HVA_HEIGHT_ALIGNMENT);
+ frameinfo->size = frame_size(frameinfo->aligned_width,
+ frameinfo->aligned_height,
+ frameinfo->pixelformat);
+
+ streaminfo->streamformat = V4L2_PIX_FMT_H264;
+ streaminfo->width = HVA_DEFAULT_WIDTH;
+ streaminfo->height = HVA_DEFAULT_HEIGHT;
+
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ ctx->max_stream_size = estimated_stream_size(streaminfo->width,
+ streaminfo->height);
+}
+
+static const struct hva_enc *hva_find_encoder(struct hva_ctx *ctx,
+ u32 pixelformat,
+ u32 streamformat)
+{
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ const struct hva_enc *enc;
+ unsigned int i;
+
+ for (i = 0; i < hva->nb_of_encoders; i++) {
+ enc = hva->encoders[i];
+ if ((enc->pixelformat == pixelformat) &&
+ (enc->streamformat == streamformat))
+ return enc;
+ }
+
+ return NULL;
+}
+
+static void register_format(u32 format, u32 formats[], u32 *nb_of_formats)
+{
+ u32 i;
+ bool found = false;
+
+ for (i = 0; i < *nb_of_formats; i++) {
+ if (format == formats[i]) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ formats[(*nb_of_formats)++] = format;
+}
+
+static void register_formats(struct hva_dev *hva)
+{
+ unsigned int i;
+
+ for (i = 0; i < hva->nb_of_encoders; i++) {
+ register_format(hva->encoders[i]->pixelformat,
+ hva->pixelformats,
+ &hva->nb_of_pixelformats);
+
+ register_format(hva->encoders[i]->streamformat,
+ hva->streamformats,
+ &hva->nb_of_streamformats);
+ }
+}
+
+static void register_encoders(struct hva_dev *hva)
+{
+ struct device *dev = hva_to_dev(hva);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hva_encoders); i++) {
+ if (hva->nb_of_encoders >= HVA_MAX_ENCODERS) {
+ dev_dbg(dev,
+ "%s failed to register %s encoder (%d maximum reached)\n",
+ HVA_PREFIX, hva_encoders[i]->name,
+ HVA_MAX_ENCODERS);
+ return;
+ }
+
+ hva->encoders[hva->nb_of_encoders++] = hva_encoders[i];
+ dev_info(dev, "%s %s encoder registered\n", HVA_PREFIX,
+ hva_encoders[i]->name);
+ }
+}
+
+static int hva_open_encoder(struct hva_ctx *ctx, u32 streamformat,
+ u32 pixelformat, struct hva_enc **penc)
+{
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ struct hva_enc *enc;
+ int ret;
+
+ /* find an encoder which can deal with these formats */
+ enc = (struct hva_enc *)hva_find_encoder(ctx, pixelformat,
+ streamformat);
+ if (!enc) {
+ dev_err(dev, "%s no encoder found matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&pixelformat, (char *)&streamformat);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s one encoder matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&pixelformat, (char *)&streamformat);
+
+ /* update instance name */
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:%4.4s]",
+ hva->instance_id, (char *)&streamformat);
+
+ /* open encoder instance */
+ ret = enc->open(ctx);
+ if (ret) {
+ dev_err(dev, "%s failed to open encoder instance (%d)\n",
+ ctx->name, ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s %s encoder opened\n", ctx->name, enc->name);
+
+ *penc = enc;
+
+ return ret;
+}
+
+static void hva_dbg_summary(struct hva_ctx *ctx)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ struct hva_streaminfo *stream = &ctx->streaminfo;
+ struct hva_frameinfo *frame = &ctx->frameinfo;
+
+ if (!(ctx->flags & HVA_FLAG_STREAMINFO))
+ return;
+
+ dev_dbg(dev, "%s %4.4s %dx%d > %4.4s %dx%d %s %s: %d frames encoded, %d system errors, %d encoding errors, %d frame errors\n",
+ ctx->name,
+ (char *)&frame->pixelformat,
+ frame->aligned_width, frame->aligned_height,
+ (char *)&stream->streamformat,
+ stream->width, stream->height,
+ stream->profile, stream->level,
+ ctx->encoded_frames,
+ ctx->sys_errors,
+ ctx->encode_errors,
+ ctx->frame_errors);
+}
+
+/*
+ * V4L2 ioctl operations
+ */
+
+static int hva_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ strlcpy(cap->driver, HVA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, hva->vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ hva->pdev->name);
+
+ return 0;
+}
+
+static int hva_enum_fmt_stream(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ if (unlikely(f->index >= hva->nb_of_streamformats))
+ return -EINVAL;
+
+ f->pixelformat = hva->streamformats[f->index];
+
+ return 0;
+}
+
+static int hva_enum_fmt_frame(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ if (unlikely(f->index >= hva->nb_of_pixelformats))
+ return -EINVAL;
+
+ f->pixelformat = hva->pixelformats[f->index];
+
+ return 0;
+}
+
+static int hva_g_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_streaminfo *streaminfo = &ctx->streaminfo;
+
+ f->fmt.pix.width = streaminfo->width;
+ f->fmt.pix.height = streaminfo->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
+ f->fmt.pix.pixelformat = streaminfo->streamformat;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = ctx->max_stream_size;
+
+ return 0;
+}
+
+static int hva_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_frameinfo *frameinfo = &ctx->frameinfo;
+
+ f->fmt.pix.width = frameinfo->width;
+ f->fmt.pix.height = frameinfo->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
+ f->fmt.pix.pixelformat = frameinfo->pixelformat;
+ f->fmt.pix.bytesperline = frame_stride(frameinfo->aligned_width,
+ frameinfo->pixelformat);
+ f->fmt.pix.sizeimage = frameinfo->size;
+
+ return 0;
+}
+
+static int hva_try_fmt_stream(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 streamformat = pix->pixelformat;
+ const struct hva_enc *enc;
+ u32 width, height;
+ u32 stream_size;
+
+ enc = hva_find_encoder(ctx, ctx->frameinfo.pixelformat, streamformat);
+ if (!enc) {
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (CAPTURE): unsupported format %.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return -EINVAL;
+ }
+
+ width = pix->width;
+ height = pix->height;
+ if (ctx->flags & HVA_FLAG_FRAMEINFO) {
+ /*
+ * if the frame resolution is already fixed, only allow the
+ * same stream resolution
+ */
+ pix->width = ctx->frameinfo.width;
+ pix->height = ctx->frameinfo.height;
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit frame resolution\n",
+ ctx->name, width, height,
+ pix->width, pix->height);
+ } else {
+ /* adjust width & height */
+ v4l_bound_align_image(&pix->width,
+ HVA_MIN_WIDTH, enc->max_width,
+ 0,
+ &pix->height,
+ HVA_MIN_HEIGHT, enc->max_height,
+ 0,
+ 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height,
+ pix->width, pix->height);
+ }
+
+ stream_size = estimated_stream_size(pix->width, pix->height);
+ if (pix->sizeimage < stream_size)
+ pix->sizeimage = stream_size;
+
+ pix->bytesperline = 0;
+ pix->colorspace = ctx->colorspace;
+ pix->xfer_func = ctx->xfer_func;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int hva_try_fmt_frame(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 pixelformat = pix->pixelformat;
+ const struct hva_enc *enc;
+ u32 width, height;
+
+ enc = hva_find_encoder(ctx, pixelformat, ctx->streaminfo.streamformat);
+ if (!enc) {
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (OUTPUT): unsupported format %.4s\n",
+ ctx->name, (char *)&pixelformat);
+ return -EINVAL;
+ }
+
+ /* adjust width & height */
+ width = pix->width;
+ height = pix->height;
+ v4l_bound_align_image(&pix->width,
+ HVA_MIN_WIDTH, HVA_MAX_WIDTH,
+ frame_alignment(pixelformat) - 1,
+ &pix->height,
+ HVA_MIN_HEIGHT, HVA_MAX_HEIGHT,
+ frame_alignment(pixelformat) - 1,
+ 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (OUTPUT): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height, pix->width, pix->height);
+
+ width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
+ height = ALIGN(pix->height, HVA_HEIGHT_ALIGNMENT);
+
+ if (!pix->colorspace) {
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ pix->bytesperline = frame_stride(width, pixelformat);
+ pix->sizeimage = frame_size(width, height, pixelformat);
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = hva_try_fmt_stream(file, fh, f);
+ if (ret) {
+ dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): unsupported format %.4s\n",
+ ctx->name, (char *)&f->fmt.pix.pixelformat);
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): queue busy\n",
+ ctx->name);
+ return -EBUSY;
+ }
+
+ ctx->max_stream_size = f->fmt.pix.sizeimage;
+ ctx->streaminfo.width = f->fmt.pix.width;
+ ctx->streaminfo.height = f->fmt.pix.height;
+ ctx->streaminfo.streamformat = f->fmt.pix.pixelformat;
+ ctx->flags |= HVA_FLAG_STREAMINFO;
+
+ return 0;
+}
+
+static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = hva_try_fmt_frame(file, fh, f);
+ if (ret) {
+ dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): unsupported format %.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): queue busy\n", ctx->name);
+ return -EBUSY;
+ }
+
+ ctx->colorspace = pix->colorspace;
+ ctx->xfer_func = pix->xfer_func;
+ ctx->ycbcr_enc = pix->ycbcr_enc;
+ ctx->quantization = pix->quantization;
+
+ ctx->frameinfo.aligned_width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
+ ctx->frameinfo.aligned_height = ALIGN(pix->height,
+ HVA_HEIGHT_ALIGNMENT);
+ ctx->frameinfo.size = pix->sizeimage;
+ ctx->frameinfo.pixelformat = pix->pixelformat;
+ ctx->frameinfo.width = pix->width;
+ ctx->frameinfo.height = pix->height;
+ ctx->flags |= HVA_FLAG_FRAMEINFO;
+
+ return 0;
+}
+
+static int hva_g_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ sp->parm.output.timeperframe.numerator = time_per_frame->numerator;
+ sp->parm.output.timeperframe.denominator =
+ time_per_frame->denominator;
+
+ return 0;
+}
+
+static int hva_s_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (!sp->parm.output.timeperframe.numerator ||
+ !sp->parm.output.timeperframe.denominator)
+ return hva_g_parm(file, fh, sp);
+
+ sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ time_per_frame->numerator = sp->parm.output.timeperframe.numerator;
+ time_per_frame->denominator =
+ sp->parm.output.timeperframe.denominator;
+
+ return 0;
+}
+
+static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ /*
+ * depending on the targeted compressed video format, the
+ * capture buffer might contain headers (e.g. H.264 SPS/PPS)
+ * filled in by the driver client; the size of these data is
+ * copied from the bytesused field of the V4L2 buffer in the
+ * payload field of the hva stream buffer
+ */
+ struct vb2_queue *vq;
+ struct hva_stream *stream;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, buf->type);
+
+ if (buf->index >= vq->num_buffers) {
+ dev_dbg(dev, "%s buffer index %d out of range (%d)\n",
+ ctx->name, buf->index, vq->num_buffers);
+ return -EINVAL;
+ }
+
+ stream = (struct hva_stream *)vq->bufs[buf->index];
+ stream->bytesused = buf->bytesused;
+ }
+
+ return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
+}
+
+/* V4L2 ioctl ops */
+static const struct v4l2_ioctl_ops hva_ioctl_ops = {
+ .vidioc_querycap = hva_querycap,
+ .vidioc_enum_fmt_vid_cap = hva_enum_fmt_stream,
+ .vidioc_enum_fmt_vid_out = hva_enum_fmt_frame,
+ .vidioc_g_fmt_vid_cap = hva_g_fmt_stream,
+ .vidioc_g_fmt_vid_out = hva_g_fmt_frame,
+ .vidioc_try_fmt_vid_cap = hva_try_fmt_stream,
+ .vidioc_try_fmt_vid_out = hva_try_fmt_frame,
+ .vidioc_s_fmt_vid_cap = hva_s_fmt_stream,
+ .vidioc_s_fmt_vid_out = hva_s_fmt_frame,
+ .vidioc_g_parm = hva_g_parm,
+ .vidioc_s_parm = hva_s_parm,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = hva_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * V4L2 control operations
+ */
+
+static int hva_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hva_ctx *ctx = container_of(ctrl->handler, struct hva_ctx,
+ ctrl_handler);
+ struct device *dev = ctx_to_dev(ctx);
+
+ dev_dbg(dev, "%s S_CTRL: id = %d, val = %d\n", ctx->name,
+ ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ ctx->ctrls.bitrate_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctx->ctrls.gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctx->ctrls.bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ ctx->ctrls.aspect = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ ctx->ctrls.profile = ctrl->val;
+ snprintf(ctx->streaminfo.profile,
+ sizeof(ctx->streaminfo.profile),
+ "%s profile",
+ v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ ctx->ctrls.level = ctrl->val;
+ snprintf(ctx->streaminfo.level,
+ sizeof(ctx->streaminfo.level),
+ "level %s",
+ v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ ctx->ctrls.entropy_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+ ctx->ctrls.cpb_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ ctx->ctrls.dct8x8 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ ctx->ctrls.qpmin = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ ctx->ctrls.qpmax = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ ctx->ctrls.vui_sar = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ ctx->ctrls.vui_sar_idc = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING:
+ ctx->ctrls.sei_fp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ ctx->ctrls.sei_fp_type = ctrl->val;
+ break;
+ default:
+ dev_dbg(dev, "%s S_CTRL: invalid control (id = %d)\n",
+ ctx->name, ctrl->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* V4L2 control ops */
+static const struct v4l2_ctrl_ops hva_ctrl_ops = {
+ .s_ctrl = hva_s_ctrl,
+};
+
+static int hva_ctrls_setup(struct hva_ctx *ctx)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ u64 mask;
+ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type sei_fp_type =
+ V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 15);
+
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ 0,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 1, 60, 1, 16);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ 1000, 60000000, 1000, 20000000);
+
+ mask = ~(1 << V4L2_MPEG_VIDEO_ASPECT_1x1);
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_ASPECT,
+ V4L2_MPEG_VIDEO_ASPECT_1x1,
+ mask,
+ V4L2_MPEG_VIDEO_ASPECT_1x1);
+
+ mask = ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH));
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH,
+ mask,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+ 0,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ 0,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+ 1, 10000, 1, 3000);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+ 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ 0, 51, 1, 5);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ 0, 51, 1, 51);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
+ 0, 1, 1, 1);
+
+ mask = ~(1 << V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1,
+ mask,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING,
+ 0, 1, 1, 0);
+
+ mask = ~(1 << sei_fp_type);
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE,
+ sei_fp_type,
+ mask,
+ sei_fp_type);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ dev_dbg(dev, "%s controls setup failed (%d)\n",
+ ctx->name, err);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ /* set default time per frame */
+ ctx->ctrls.time_per_frame.numerator = HVA_DEFAULT_FRAME_NUM;
+ ctx->ctrls.time_per_frame.denominator = HVA_DEFAULT_FRAME_DEN;
+
+ return 0;
+}
+
+/*
+ * mem-to-mem operations
+ */
+
+static void hva_run_work(struct work_struct *work)
+{
+ struct hva_ctx *ctx = container_of(work, struct hva_ctx, run_work);
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct hva_enc *enc = ctx->enc;
+ struct hva_frame *frame;
+ struct hva_stream *stream;
+ int ret;
+
+ /* protect instance against reentrancy */
+ mutex_lock(&ctx->lock);
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_perf_begin(ctx);
+#endif
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ frame = to_hva_frame(src_buf);
+ stream = to_hva_stream(dst_buf);
+ frame->vbuf.sequence = ctx->frame_num++;
+
+ ret = enc->encode(ctx, frame, stream);
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, stream->bytesused);
+ if (ret) {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ /* propagate frame timestamp */
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->field = V4L2_FIELD_NONE;
+ dst_buf->sequence = ctx->stream_num - 1;
+
+ ctx->encoded_frames++;
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_perf_end(ctx, stream);
+#endif
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ v4l2_m2m_job_finish(ctx->hva_dev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void hva_device_run(void *priv)
+{
+ struct hva_ctx *ctx = priv;
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ queue_work(hva->work_queue, &ctx->run_work);
+}
+
+static void hva_job_abort(void *priv)
+{
+ struct hva_ctx *ctx = priv;
+ struct device *dev = ctx_to_dev(ctx);
+
+ dev_dbg(dev, "%s aborting job\n", ctx->name);
+
+ ctx->aborting = true;
+}
+
+static int hva_job_ready(void *priv)
+{
+ struct hva_ctx *ctx = priv;
+ struct device *dev = ctx_to_dev(ctx);
+
+ if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
+ dev_dbg(dev, "%s job not ready: no frame buffers\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+ dev_dbg(dev, "%s job not ready: no stream buffers\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (ctx->aborting) {
+ dev_dbg(dev, "%s job not ready: aborting\n", ctx->name);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* mem-to-mem ops */
+static const struct v4l2_m2m_ops hva_m2m_ops = {
+ .device_run = hva_device_run,
+ .job_abort = hva_job_abort,
+ .job_ready = hva_job_ready,
+};
+
+/*
+ * VB2 queue operations
+ */
+
+static int hva_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+ struct device *dev = ctx_to_dev(ctx);
+ unsigned int size;
+
+ dev_dbg(dev, "%s %s queue setup: num_buffers %d\n", ctx->name,
+ to_type_str(vq->type), *num_buffers);
+
+ size = vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ?
+ ctx->frameinfo.size : ctx->max_stream_size;
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ /* only one plane supported */
+ *num_planes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int hva_buf_prepare(struct vb2_buffer *vb)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = ctx_to_dev(ctx);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ struct hva_frame *frame = to_hva_frame(vbuf);
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_dbg(dev,
+ "%s frame[%d] prepare: %d field not supported\n",
+ ctx->name, vb->index, vbuf->field);
+ return -EINVAL;
+ }
+
+ if (!frame->prepared) {
+ /* get memory addresses */
+ frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ frame->paddr = vb2_dma_contig_plane_dma_addr(
+ &vbuf->vb2_buf, 0);
+ frame->info = ctx->frameinfo;
+ frame->prepared = true;
+
+ dev_dbg(dev,
+ "%s frame[%d] prepared; virt=%p, phy=%pad\n",
+ ctx->name, vb->index,
+ frame->vaddr, &frame->paddr);
+ }
+ } else {
+ struct hva_stream *stream = to_hva_stream(vbuf);
+
+ if (!stream->prepared) {
+ /* get memory addresses */
+ stream->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ stream->paddr = vb2_dma_contig_plane_dma_addr(
+ &vbuf->vb2_buf, 0);
+ stream->size = vb2_plane_size(&vbuf->vb2_buf, 0);
+ stream->prepared = true;
+
+ dev_dbg(dev,
+ "%s stream[%d] prepared; virt=%p, phy=%pad\n",
+ ctx->name, vb->index,
+ stream->vaddr, &stream->paddr);
+ }
+ }
+
+ return 0;
+}
+
+static void hva_buf_queue(struct vb2_buffer *vb)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (ctx->fh.m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int hva_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ struct vb2_v4l2_buffer *vbuf;
+ int ret;
+ unsigned int i;
+ bool found = false;
+
+ dev_dbg(dev, "%s %s start streaming\n", ctx->name,
+ to_type_str(vq->type));
+
+ /* open encoder when both start_streaming have been called */
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->cap_q_ctx.q))
+ return 0;
+ } else {
+ if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->out_q_ctx.q))
+ return 0;
+ }
+
+ /* store the instance context in the instances array */
+ for (i = 0; i < HVA_MAX_INSTANCES; i++) {
+ if (!hva->instances[i]) {
+ hva->instances[i] = ctx;
+ /* save the context identifier in the context */
+ ctx->id = i;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(dev, "%s maximum instances reached\n", ctx->name);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hva->nb_of_instances++;
+
+ if (!ctx->enc) {
+ ret = hva_open_encoder(ctx,
+ ctx->streaminfo.streamformat,
+ ctx->frameinfo.pixelformat,
+ &ctx->enc);
+ if (ret < 0)
+ goto err_ctx;
+ }
+
+ return 0;
+
+err_ctx:
+ hva->instances[ctx->id] = NULL;
+ hva->nb_of_instances--;
+err:
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /* return of all pending buffers to vb2 (in queued state) */
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+ } else {
+ /* return of all pending buffers to vb2 (in queued state) */
+ while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+ }
+
+ ctx->sys_errors++;
+
+ return ret;
+}
+
+static void hva_stop_streaming(struct vb2_queue *vq)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ const struct hva_enc *enc = ctx->enc;
+ struct vb2_v4l2_buffer *vbuf;
+
+ dev_dbg(dev, "%s %s stop streaming\n", ctx->name,
+ to_type_str(vq->type));
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /* return of all pending buffers to vb2 (in error state) */
+ ctx->frame_num = 0;
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ } else {
+ /* return of all pending buffers to vb2 (in error state) */
+ ctx->stream_num = 0;
+ while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+
+ if ((V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) ||
+ (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) {
+ dev_dbg(dev, "%s %s out=%d cap=%d\n",
+ ctx->name, to_type_str(vq->type),
+ vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q),
+ vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q));
+ return;
+ }
+
+ /* close encoder when both stop_streaming have been called */
+ if (enc) {
+ dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
+ enc->close(ctx);
+ ctx->enc = NULL;
+
+ /* clear instance context in instances array */
+ hva->instances[ctx->id] = NULL;
+ hva->nb_of_instances--;
+ }
+
+ ctx->aborting = false;
+}
+
+/* VB2 queue ops */
+static const struct vb2_ops hva_qops = {
+ .queue_setup = hva_queue_setup,
+ .buf_prepare = hva_buf_prepare,
+ .buf_queue = hva_buf_queue,
+ .start_streaming = hva_start_streaming,
+ .stop_streaming = hva_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * V4L2 file operations
+ */
+
+static int queue_init(struct hva_ctx *ctx, struct vb2_queue *vq)
+{
+ vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vq->drv_priv = ctx;
+ vq->ops = &hva_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ vq->lock = &ctx->hva_dev->lock;
+
+ return vb2_queue_init(vq);
+}
+
+static int hva_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct hva_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->buf_struct_size = sizeof(struct hva_frame);
+ src_vq->min_buffers_needed = MIN_FRAMES;
+ src_vq->dev = ctx->hva_dev->dev;
+
+ ret = queue_init(ctx, src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->buf_struct_size = sizeof(struct hva_stream);
+ dst_vq->min_buffers_needed = MIN_STREAMS;
+ dst_vq->dev = ctx->hva_dev->dev;
+
+ return queue_init(ctx, dst_vq);
+}
+
+static int hva_open(struct file *file)
+{
+ struct hva_dev *hva = video_drvdata(file);
+ struct device *dev = hva_to_dev(hva);
+ struct hva_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ctx->hva_dev = hva;
+
+ INIT_WORK(&ctx->run_work, hva_run_work);
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ret = hva_ctrls_setup(ctx);
+ if (ret) {
+ dev_err(dev, "%s [x:x] failed to setup controls\n",
+ HVA_PREFIX);
+ ctx->sys_errors++;
+ goto err_fh;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ mutex_init(&ctx->lock);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(hva->m2m_dev, ctx,
+ &hva_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ dev_err(dev, "%s failed to initialize m2m context (%d)\n",
+ HVA_PREFIX, ret);
+ ctx->sys_errors++;
+ goto err_ctrls;
+ }
+
+ /* set the instance name */
+ mutex_lock(&hva->lock);
+ hva->instance_id++;
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:----]",
+ hva->instance_id);
+ mutex_unlock(&hva->lock);
+
+ /* default parameters for frame and stream */
+ set_default_params(ctx);
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_ctx_create(ctx);
+#endif
+
+ dev_info(dev, "%s encoder instance created\n", ctx->name);
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+err_fh:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+out:
+ return ret;
+}
+
+static int hva_release(struct file *file)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ const struct hva_enc *enc = ctx->enc;
+
+ if (enc) {
+ dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
+ enc->close(ctx);
+ ctx->enc = NULL;
+
+ /* clear instance context in instances array */
+ hva->instances[ctx->id] = NULL;
+ hva->nb_of_instances--;
+ }
+
+ /* trace a summary of instance before closing (debug purpose) */
+ hva_dbg_summary(ctx);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_ctx_remove(ctx);
+#endif
+
+ dev_info(dev, "%s encoder instance released\n", ctx->name);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+/* V4L2 file ops */
+static const struct v4l2_file_operations hva_fops = {
+ .owner = THIS_MODULE,
+ .open = hva_open,
+ .release = hva_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+ .poll = v4l2_m2m_fop_poll,
+};
+
+/*
+ * Platform device operations
+ */
+
+static int hva_register_device(struct hva_dev *hva)
+{
+ int ret;
+ struct video_device *vdev;
+ struct device *dev;
+
+ if (!hva)
+ return -ENODEV;
+ dev = hva_to_dev(hva);
+
+ hva->m2m_dev = v4l2_m2m_init(&hva_m2m_ops);
+ if (IS_ERR(hva->m2m_dev)) {
+ dev_err(dev, "%s failed to initialize v4l2-m2m device\n",
+ HVA_PREFIX);
+ ret = PTR_ERR(hva->m2m_dev);
+ goto err;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ dev_err(dev, "%s failed to allocate video device\n",
+ HVA_PREFIX);
+ ret = -ENOMEM;
+ goto err_m2m_release;
+ }
+
+ vdev->fops = &hva_fops;
+ vdev->ioctl_ops = &hva_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->lock = &hva->lock;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ vdev->v4l2_dev = &hva->v4l2_dev;
+ snprintf(vdev->name, sizeof(vdev->name), "%s%lx", HVA_NAME,
+ hva->ip_version);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(dev, "%s failed to register video device\n",
+ HVA_PREFIX);
+ goto err_vdev_release;
+ }
+
+ hva->vdev = vdev;
+ video_set_drvdata(vdev, hva);
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+err_m2m_release:
+ v4l2_m2m_release(hva->m2m_dev);
+err:
+ return ret;
+}
+
+static void hva_unregister_device(struct hva_dev *hva)
+{
+ if (!hva)
+ return;
+
+ if (hva->m2m_dev)
+ v4l2_m2m_release(hva->m2m_dev);
+
+ video_unregister_device(hva->vdev);
+}
+
+static int hva_probe(struct platform_device *pdev)
+{
+ struct hva_dev *hva;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ hva = devm_kzalloc(dev, sizeof(*hva), GFP_KERNEL);
+ if (!hva) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ hva->dev = dev;
+ hva->pdev = pdev;
+ platform_set_drvdata(pdev, hva);
+
+ mutex_init(&hva->lock);
+
+ /* probe hardware */
+ ret = hva_hw_probe(pdev, hva);
+ if (ret)
+ goto err;
+
+ /* register all available encoders */
+ register_encoders(hva);
+
+ /* register all supported formats */
+ register_formats(hva);
+
+ /* register on V4L2 */
+ ret = v4l2_device_register(dev, &hva->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "%s %s failed to register V4L2 device\n",
+ HVA_PREFIX, HVA_NAME);
+ goto err_hw;
+ }
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_debugfs_create(hva);
+#endif
+
+ hva->work_queue = create_workqueue(HVA_NAME);
+ if (!hva->work_queue) {
+ dev_err(dev, "%s %s failed to allocate work queue\n",
+ HVA_PREFIX, HVA_NAME);
+ ret = -ENOMEM;
+ goto err_v4l2;
+ }
+
+ /* register device */
+ ret = hva_register_device(hva);
+ if (ret)
+ goto err_work_queue;
+
+ dev_info(dev, "%s %s registered as /dev/video%d\n", HVA_PREFIX,
+ HVA_NAME, hva->vdev->num);
+
+ return 0;
+
+err_work_queue:
+ destroy_workqueue(hva->work_queue);
+err_v4l2:
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_debugfs_remove(hva);
+#endif
+ v4l2_device_unregister(&hva->v4l2_dev);
+err_hw:
+ hva_hw_remove(hva);
+err:
+ return ret;
+}
+
+static int hva_remove(struct platform_device *pdev)
+{
+ struct hva_dev *hva = platform_get_drvdata(pdev);
+ struct device *dev = hva_to_dev(hva);
+
+ hva_unregister_device(hva);
+
+ destroy_workqueue(hva->work_queue);
+
+ hva_hw_remove(hva);
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_debugfs_remove(hva);
+#endif
+
+ v4l2_device_unregister(&hva->v4l2_dev);
+
+ dev_info(dev, "%s %s removed\n", HVA_PREFIX, pdev->name);
+
+ return 0;
+}
+
+/* PM ops */
+static const struct dev_pm_ops hva_pm_ops = {
+ .runtime_suspend = hva_hw_runtime_suspend,
+ .runtime_resume = hva_hw_runtime_resume,
+};
+
+static const struct of_device_id hva_match_types[] = {
+ {
+ .compatible = "st,st-hva",
+ },
+ { /* end node */ }
+};
+
+MODULE_DEVICE_TABLE(of, hva_match_types);
+
+static struct platform_driver hva_driver = {
+ .probe = hva_probe,
+ .remove = hva_remove,
+ .driver = {
+ .name = HVA_NAME,
+ .of_match_table = hva_match_types,
+ .pm = &hva_pm_ops,
+ },
+};
+
+module_platform_driver(hva_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics HVA video encoder V4L2 driver");
diff --git a/drivers/media/platform/sti/hva/hva.h b/drivers/media/platform/sti/hva/hva.h
new file mode 100644
index 000000000..1226d60cc
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#ifndef HVA_H
+#define HVA_H
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
+
+#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
+
+#define hva_to_dev(h) (h->dev)
+
+#define ctx_to_dev(c) (c->hva_dev->dev)
+
+#define ctx_to_hdev(c) (c->hva_dev)
+
+#define HVA_NAME "st-hva"
+#define HVA_PREFIX "[---:----]"
+
+extern const struct hva_enc nv12h264enc;
+extern const struct hva_enc nv21h264enc;
+
+/**
+ * struct hva_frameinfo - information about hva frame
+ *
+ * @pixelformat: fourcc code for uncompressed video format
+ * @width: width of frame
+ * @height: height of frame
+ * @aligned_width: width of frame (with encoder alignment constraint)
+ * @aligned_height: height of frame (with encoder alignment constraint)
+ * @size: maximum size in bytes required for data
+*/
+struct hva_frameinfo {
+ u32 pixelformat;
+ u32 width;
+ u32 height;
+ u32 aligned_width;
+ u32 aligned_height;
+ u32 size;
+};
+
+/**
+ * struct hva_streaminfo - information about hva stream
+ *
+ * @streamformat: fourcc code of compressed video format (H.264...)
+ * @width: width of stream
+ * @height: height of stream
+ * @profile: profile string
+ * @level: level string
+ */
+struct hva_streaminfo {
+ u32 streamformat;
+ u32 width;
+ u32 height;
+ u8 profile[32];
+ u8 level[32];
+};
+
+/**
+ * struct hva_controls - hva controls set
+ *
+ * @time_per_frame: time per frame in seconds
+ * @bitrate_mode: bitrate mode (constant bitrate or variable bitrate)
+ * @gop_size: groupe of picture size
+ * @bitrate: bitrate (in bps)
+ * @aspect: video aspect
+ * @profile: H.264 profile
+ * @level: H.264 level
+ * @entropy_mode: H.264 entropy mode (CABAC or CVLC)
+ * @cpb_size: coded picture buffer size (in kB)
+ * @dct8x8: transform mode 8x8 enable
+ * @qpmin: minimum quantizer
+ * @qpmax: maximum quantizer
+ * @vui_sar: pixel aspect ratio enable
+ * @vui_sar_idc: pixel aspect ratio identifier
+ * @sei_fp: sei frame packing arrangement enable
+ * @sei_fp_type: sei frame packing arrangement type
+ */
+struct hva_controls {
+ struct v4l2_fract time_per_frame;
+ enum v4l2_mpeg_video_bitrate_mode bitrate_mode;
+ u32 gop_size;
+ u32 bitrate;
+ enum v4l2_mpeg_video_aspect aspect;
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_level level;
+ enum v4l2_mpeg_video_h264_entropy_mode entropy_mode;
+ u32 cpb_size;
+ bool dct8x8;
+ u32 qpmin;
+ u32 qpmax;
+ bool vui_sar;
+ enum v4l2_mpeg_video_h264_vui_sar_idc vui_sar_idc;
+ bool sei_fp;
+ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type sei_fp_type;
+};
+
+/**
+ * struct hva_frame - hva frame buffer (output)
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @info: frame information (width, height, format, alignment...)
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @prepared: true if vaddr/paddr are resolved
+ */
+struct hva_frame {
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head list;
+ struct hva_frameinfo info;
+ dma_addr_t paddr;
+ void *vaddr;
+ bool prepared;
+};
+
+/*
+ * to_hva_frame() - cast struct vb2_v4l2_buffer * to struct hva_frame *
+ */
+#define to_hva_frame(vb) \
+ container_of(vb, struct hva_frame, vbuf)
+
+/**
+ * struct hva_stream - hva stream buffer (capture)
+ *
+ * @v4l2: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @prepared: true if vaddr/paddr are resolved
+ * @size: size of the buffer in bytes
+ * @bytesused: number of bytes occupied by data in the buffer
+ */
+struct hva_stream {
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head list;
+ dma_addr_t paddr;
+ void *vaddr;
+ bool prepared;
+ unsigned int size;
+ unsigned int bytesused;
+};
+
+/*
+ * to_hva_stream() - cast struct vb2_v4l2_buffer * to struct hva_stream *
+ */
+#define to_hva_stream(vb) \
+ container_of(vb, struct hva_stream, vbuf)
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+/**
+ * struct hva_ctx_dbg - instance context debug info
+ *
+ * @debugfs_entry: debugfs entry
+ * @is_valid_period: true if the sequence is valid for performance
+ * @begin: start time of last HW task
+ * @total_duration: total HW processing durations in 0.1ms
+ * @cnt_duration: number of HW processings
+ * @min_duration: minimum HW processing duration in 0.1ms
+ * @max_duration: maximum HW processing duration in 0.1ms
+ * @avg_duration: average HW processing duration in 0.1ms
+ * @max_fps: maximum frames encoded per second (in 0.1Hz)
+ * @total_period: total encoding periods in 0.1ms
+ * @cnt_period: number of periods
+ * @min_period: minimum encoding period in 0.1ms
+ * @max_period: maximum encoding period in 0.1ms
+ * @avg_period: average encoding period in 0.1ms
+ * @total_stream_size: total number of encoded bytes
+ * @avg_fps: average frames encoded per second (in 0.1Hz)
+ * @window_duration: duration of the sampling window in 0.1ms
+ * @cnt_window: number of samples in the window
+ * @window_stream_size: number of encoded bytes upon the sampling window
+ * @last_bitrate: bitrate upon the last sampling window
+ * @min_bitrate: minimum bitrate in kbps
+ * @max_bitrate: maximum bitrate in kbps
+ * @avg_bitrate: average bitrate in kbps
+ */
+struct hva_ctx_dbg {
+ struct dentry *debugfs_entry;
+ bool is_valid_period;
+ ktime_t begin;
+ u32 total_duration;
+ u32 cnt_duration;
+ u32 min_duration;
+ u32 max_duration;
+ u32 avg_duration;
+ u32 max_fps;
+ u32 total_period;
+ u32 cnt_period;
+ u32 min_period;
+ u32 max_period;
+ u32 avg_period;
+ u32 total_stream_size;
+ u32 avg_fps;
+ u32 window_duration;
+ u32 cnt_window;
+ u32 window_stream_size;
+ u32 last_bitrate;
+ u32 min_bitrate;
+ u32 max_bitrate;
+ u32 avg_bitrate;
+};
+#endif
+
+struct hva_dev;
+struct hva_enc;
+
+/**
+ * struct hva_ctx - context of hva instance
+ *
+ * @hva_dev: the device that this instance is associated with
+ * @fh: V4L2 file handle
+ * @ctrl_handler: V4L2 controls handler
+ * @ctrls: hva controls set
+ * @id: instance identifier
+ * @aborting: true if current job aborted
+ * @name: instance name (debug purpose)
+ * @run_work: encode work
+ * @lock: mutex used to lock access of this context
+ * @flags: validity of streaminfo and frameinfo fields
+ * @frame_num: frame number
+ * @stream_num: stream number
+ * @max_stream_size: maximum size in bytes required for stream data
+ * @colorspace: colorspace identifier
+ * @xfer_func: transfer function identifier
+ * @ycbcr_enc: Y'CbCr encoding identifier
+ * @quantization: quantization identifier
+ * @streaminfo: stream properties
+ * @frameinfo: frame properties
+ * @enc: current encoder
+ * @priv: private codec data for this instance, allocated
+ * by encoder @open time
+ * @hw_err: true if hardware error detected
+ * @encoded_frames: number of encoded frames
+ * @sys_errors: number of system errors (memory, resource, pm...)
+ * @encode_errors: number of encoding errors (hw/driver errors)
+ * @frame_errors: number of frame errors (format, size, header...)
+ * @dbg: context debug info
+ */
+struct hva_ctx {
+ struct hva_dev *hva_dev;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct hva_controls ctrls;
+ u8 id;
+ bool aborting;
+ char name[100];
+ struct work_struct run_work;
+ /* mutex protecting this data structure */
+ struct mutex lock;
+ u32 flags;
+ u32 frame_num;
+ u32 stream_num;
+ u32 max_stream_size;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ struct hva_streaminfo streaminfo;
+ struct hva_frameinfo frameinfo;
+ struct hva_enc *enc;
+ void *priv;
+ bool hw_err;
+ u32 encoded_frames;
+ u32 sys_errors;
+ u32 encode_errors;
+ u32 frame_errors;
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ struct hva_ctx_dbg dbg;
+#endif
+};
+
+#define HVA_FLAG_STREAMINFO 0x0001
+#define HVA_FLAG_FRAMEINFO 0x0002
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+/**
+ * struct hva_dev_dbg - device debug info
+ *
+ * @debugfs_entry: debugfs entry
+ * @last_ctx: debug information about last running instance context
+ */
+struct hva_dev_dbg {
+ struct dentry *debugfs_entry;
+ struct hva_ctx last_ctx;
+};
+#endif
+
+#define HVA_MAX_INSTANCES 16
+#define HVA_MAX_ENCODERS 10
+#define HVA_MAX_FORMATS HVA_MAX_ENCODERS
+
+/**
+ * struct hva_dev - abstraction for hva entity
+ *
+ * @v4l2_dev: V4L2 device
+ * @vdev: video device
+ * @pdev: platform device
+ * @dev: device
+ * @lock: mutex used for critical sections & V4L2 ops
+ * serialization
+ * @m2m_dev: memory-to-memory V4L2 device information
+ * @instances: opened instances
+ * @nb_of_instances: number of opened instances
+ * @instance_id: rolling counter identifying an instance (debug purpose)
+ * @regs: register io memory access
+ * @esram_addr: esram address
+ * @esram_size: esram size
+ * @clk: hva clock
+ * @irq_its: status interruption
+ * @irq_err: error interruption
+ * @work_queue: work queue to handle the encode jobs
+ * @protect_mutex: mutex used to lock access of hardware
+ * @interrupt: completion interrupt
+ * @ip_version: IP hardware version
+ * @encoders: registered encoders
+ * @nb_of_encoders: number of registered encoders
+ * @pixelformats: supported uncompressed video formats
+ * @nb_of_pixelformats: number of supported umcompressed video formats
+ * @streamformats: supported compressed video formats
+ * @nb_of_streamformats: number of supported compressed video formats
+ * @sfl_reg: status fifo level register value
+ * @sts_reg: status register value
+ * @lmi_err_reg: local memory interface error register value
+ * @emi_err_reg: external memory interface error register value
+ * @hec_mif_err_reg: HEC memory interface error register value
+ * @dbg: device debug info
+ */
+struct hva_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct platform_device *pdev;
+ struct device *dev;
+ /* mutex protecting vb2_queue structure */
+ struct mutex lock;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct hva_ctx *instances[HVA_MAX_INSTANCES];
+ unsigned int nb_of_instances;
+ unsigned int instance_id;
+ void __iomem *regs;
+ u32 esram_addr;
+ u32 esram_size;
+ struct clk *clk;
+ int irq_its;
+ int irq_err;
+ struct workqueue_struct *work_queue;
+ /* mutex protecting hardware access */
+ struct mutex protect_mutex;
+ struct completion interrupt;
+ unsigned long int ip_version;
+ const struct hva_enc *encoders[HVA_MAX_ENCODERS];
+ u32 nb_of_encoders;
+ u32 pixelformats[HVA_MAX_FORMATS];
+ u32 nb_of_pixelformats;
+ u32 streamformats[HVA_MAX_FORMATS];
+ u32 nb_of_streamformats;
+ u32 sfl_reg;
+ u32 sts_reg;
+ u32 lmi_err_reg;
+ u32 emi_err_reg;
+ u32 hec_mif_err_reg;
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ struct hva_dev_dbg dbg;
+#endif
+};
+
+/**
+ * struct hva_enc - hva encoder
+ *
+ * @name: encoder name
+ * @streamformat: fourcc code for compressed video format (H.264...)
+ * @pixelformat: fourcc code for uncompressed video format
+ * @max_width: maximum width of frame for this encoder
+ * @max_height: maximum height of frame for this encoder
+ * @open: open encoder
+ * @close: close encoder
+ * @encode: encode a frame (struct hva_frame) in a stream
+ * (struct hva_stream)
+ */
+
+struct hva_enc {
+ const char *name;
+ u32 streamformat;
+ u32 pixelformat;
+ u32 max_width;
+ u32 max_height;
+ int (*open)(struct hva_ctx *ctx);
+ int (*close)(struct hva_ctx *ctx);
+ int (*encode)(struct hva_ctx *ctx, struct hva_frame *frame,
+ struct hva_stream *stream);
+};
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+void hva_debugfs_create(struct hva_dev *hva);
+void hva_debugfs_remove(struct hva_dev *hva);
+void hva_dbg_ctx_create(struct hva_ctx *ctx);
+void hva_dbg_ctx_remove(struct hva_ctx *ctx);
+void hva_dbg_perf_begin(struct hva_ctx *ctx);
+void hva_dbg_perf_end(struct hva_ctx *ctx, struct hva_stream *stream);
+#endif
+
+#endif /* HVA_H */
diff --git a/drivers/media/platform/stm32/Makefile b/drivers/media/platform/stm32/Makefile
new file mode 100644
index 000000000..073550913
--- /dev/null
+++ b/drivers/media/platform/stm32/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32-dcmi.o
+obj-$(CONFIG_VIDEO_STM32_HDMI_CEC) += stm32-cec.o
diff --git a/drivers/media/platform/stm32/stm32-cec.c b/drivers/media/platform/stm32/stm32-cec.c
new file mode 100644
index 000000000..7c496bc1c
--- /dev/null
+++ b/drivers/media/platform/stm32/stm32-cec.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STM32 CEC driver
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <media/cec.h>
+
+#define CEC_NAME "stm32-cec"
+
+/* CEC registers */
+#define CEC_CR 0x0000 /* Control Register */
+#define CEC_CFGR 0x0004 /* ConFiGuration Register */
+#define CEC_TXDR 0x0008 /* Rx data Register */
+#define CEC_RXDR 0x000C /* Rx data Register */
+#define CEC_ISR 0x0010 /* Interrupt and status Register */
+#define CEC_IER 0x0014 /* Interrupt enable Register */
+
+#define TXEOM BIT(2)
+#define TXSOM BIT(1)
+#define CECEN BIT(0)
+
+#define LSTN BIT(31)
+#define OAR GENMASK(30, 16)
+#define SFTOP BIT(8)
+#define BRDNOGEN BIT(7)
+#define LBPEGEN BIT(6)
+#define BREGEN BIT(5)
+#define BRESTP BIT(4)
+#define RXTOL BIT(3)
+#define SFT GENMASK(2, 0)
+#define FULL_CFG (LSTN | SFTOP | BRDNOGEN | LBPEGEN | BREGEN | BRESTP \
+ | RXTOL)
+
+#define TXACKE BIT(12)
+#define TXERR BIT(11)
+#define TXUDR BIT(10)
+#define TXEND BIT(9)
+#define TXBR BIT(8)
+#define ARBLST BIT(7)
+#define RXACKE BIT(6)
+#define RXOVR BIT(2)
+#define RXEND BIT(1)
+#define RXBR BIT(0)
+
+#define ALL_TX_IT (TXEND | TXBR | TXACKE | TXERR | TXUDR | ARBLST)
+#define ALL_RX_IT (RXEND | RXBR | RXACKE | RXOVR)
+
+struct stm32_cec {
+ struct cec_adapter *adap;
+ struct device *dev;
+ struct clk *clk_cec;
+ struct clk *clk_hdmi_cec;
+ struct reset_control *rstc;
+ struct regmap *regmap;
+ int irq;
+ u32 irq_status;
+ struct cec_msg rx_msg;
+ struct cec_msg tx_msg;
+ int tx_cnt;
+};
+
+static void cec_hw_init(struct stm32_cec *cec)
+{
+ regmap_update_bits(cec->regmap, CEC_CR, TXEOM | TXSOM | CECEN, 0);
+
+ regmap_update_bits(cec->regmap, CEC_IER, ALL_TX_IT | ALL_RX_IT,
+ ALL_TX_IT | ALL_RX_IT);
+
+ regmap_update_bits(cec->regmap, CEC_CFGR, FULL_CFG, FULL_CFG);
+}
+
+static void stm32_tx_done(struct stm32_cec *cec, u32 status)
+{
+ if (status & (TXERR | TXUDR)) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_ERROR,
+ 0, 0, 0, 1);
+ return;
+ }
+
+ if (status & ARBLST) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_ARB_LOST,
+ 1, 0, 0, 0);
+ return;
+ }
+
+ if (status & TXACKE) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_NACK,
+ 0, 1, 0, 0);
+ return;
+ }
+
+ if (cec->irq_status & TXBR) {
+ /* send next byte */
+ if (cec->tx_cnt < cec->tx_msg.len)
+ regmap_write(cec->regmap, CEC_TXDR,
+ cec->tx_msg.msg[cec->tx_cnt++]);
+
+ /* TXEOM is set to command transmission of the last byte */
+ if (cec->tx_cnt == cec->tx_msg.len)
+ regmap_update_bits(cec->regmap, CEC_CR, TXEOM, TXEOM);
+ }
+
+ if (cec->irq_status & TXEND)
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+}
+
+static void stm32_rx_done(struct stm32_cec *cec, u32 status)
+{
+ if (cec->irq_status & (RXACKE | RXOVR)) {
+ cec->rx_msg.len = 0;
+ return;
+ }
+
+ if (cec->irq_status & RXBR) {
+ u32 val;
+
+ regmap_read(cec->regmap, CEC_RXDR, &val);
+ cec->rx_msg.msg[cec->rx_msg.len++] = val & 0xFF;
+ }
+
+ if (cec->irq_status & RXEND) {
+ cec_received_msg(cec->adap, &cec->rx_msg);
+ cec->rx_msg.len = 0;
+ }
+}
+
+static irqreturn_t stm32_cec_irq_thread(int irq, void *arg)
+{
+ struct stm32_cec *cec = arg;
+
+ if (cec->irq_status & ALL_TX_IT)
+ stm32_tx_done(cec, cec->irq_status);
+
+ if (cec->irq_status & ALL_RX_IT)
+ stm32_rx_done(cec, cec->irq_status);
+
+ cec->irq_status = 0;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_cec_irq_handler(int irq, void *arg)
+{
+ struct stm32_cec *cec = arg;
+
+ regmap_read(cec->regmap, CEC_ISR, &cec->irq_status);
+
+ regmap_update_bits(cec->regmap, CEC_ISR,
+ ALL_TX_IT | ALL_RX_IT,
+ ALL_TX_IT | ALL_RX_IT);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int stm32_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct stm32_cec *cec = adap->priv;
+ int ret = 0;
+
+ if (enable) {
+ ret = clk_enable(cec->clk_cec);
+ if (ret)
+ dev_err(cec->dev, "fail to enable cec clock\n");
+
+ clk_enable(cec->clk_hdmi_cec);
+ regmap_update_bits(cec->regmap, CEC_CR, CECEN, CECEN);
+ } else {
+ clk_disable(cec->clk_cec);
+ clk_disable(cec->clk_hdmi_cec);
+ regmap_update_bits(cec->regmap, CEC_CR, CECEN, 0);
+ }
+
+ return ret;
+}
+
+static int stm32_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct stm32_cec *cec = adap->priv;
+ u32 oar = (1 << logical_addr) << 16;
+
+ regmap_update_bits(cec->regmap, CEC_CR, CECEN, 0);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ regmap_update_bits(cec->regmap, CEC_CFGR, OAR, 0);
+ else
+ regmap_update_bits(cec->regmap, CEC_CFGR, oar, oar);
+
+ regmap_update_bits(cec->regmap, CEC_CR, CECEN, CECEN);
+
+ return 0;
+}
+
+static int stm32_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct stm32_cec *cec = adap->priv;
+
+ /* Copy message */
+ cec->tx_msg = *msg;
+ cec->tx_cnt = 0;
+
+ /*
+ * If the CEC message consists of only one byte,
+ * TXEOM must be set before of TXSOM.
+ */
+ if (cec->tx_msg.len == 1)
+ regmap_update_bits(cec->regmap, CEC_CR, TXEOM, TXEOM);
+
+ /* TXSOM is set to command transmission of the first byte */
+ regmap_update_bits(cec->regmap, CEC_CR, TXSOM, TXSOM);
+
+ /* Write the header (first byte of message) */
+ regmap_write(cec->regmap, CEC_TXDR, cec->tx_msg.msg[0]);
+ cec->tx_cnt++;
+
+ return 0;
+}
+
+static const struct cec_adap_ops stm32_cec_adap_ops = {
+ .adap_enable = stm32_cec_adap_enable,
+ .adap_log_addr = stm32_cec_adap_log_addr,
+ .adap_transmit = stm32_cec_adap_transmit,
+};
+
+static const struct regmap_config stm32_cec_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x14,
+ .fast_io = true,
+};
+
+static int stm32_cec_probe(struct platform_device *pdev)
+{
+ u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_MODE_MONITOR_ALL;
+ struct resource *res;
+ struct stm32_cec *cec;
+ void __iomem *mmio;
+ int ret;
+
+ cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ cec->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "cec", mmio,
+ &stm32_cec_regmap_cfg);
+
+ if (IS_ERR(cec->regmap))
+ return PTR_ERR(cec->regmap);
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0)
+ return cec->irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, cec->irq,
+ stm32_cec_irq_handler,
+ stm32_cec_irq_thread,
+ 0,
+ pdev->name, cec);
+ if (ret)
+ return ret;
+
+ cec->clk_cec = devm_clk_get(&pdev->dev, "cec");
+ if (IS_ERR(cec->clk_cec)) {
+ dev_err(&pdev->dev, "Cannot get cec clock\n");
+ return PTR_ERR(cec->clk_cec);
+ }
+
+ ret = clk_prepare(cec->clk_cec);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to prepare cec clock\n");
+ return ret;
+ }
+
+ cec->clk_hdmi_cec = devm_clk_get(&pdev->dev, "hdmi-cec");
+ if (!IS_ERR(cec->clk_hdmi_cec)) {
+ ret = clk_prepare(cec->clk_hdmi_cec);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to prepare hdmi-cec clock\n");
+ return ret;
+ }
+ }
+
+ /*
+ * CEC_CAP_PHYS_ADDR caps should be removed when a cec notifier is
+ * available for example when a drm driver can provide edid
+ */
+ cec->adap = cec_allocate_adapter(&stm32_cec_adap_ops, cec,
+ CEC_NAME, caps, CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(cec->adap);
+ if (ret)
+ return ret;
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ cec_hw_init(cec);
+
+ platform_set_drvdata(pdev, cec);
+
+ return 0;
+}
+
+static int stm32_cec_remove(struct platform_device *pdev)
+{
+ struct stm32_cec *cec = platform_get_drvdata(pdev);
+
+ clk_unprepare(cec->clk_cec);
+ clk_unprepare(cec->clk_hdmi_cec);
+
+ cec_unregister_adapter(cec->adap);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_cec_of_match[] = {
+ { .compatible = "st,stm32-cec" },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, stm32_cec_of_match);
+
+static struct platform_driver stm32_cec_driver = {
+ .probe = stm32_cec_probe,
+ .remove = stm32_cec_remove,
+ .driver = {
+ .name = CEC_NAME,
+ .of_match_table = stm32_cec_of_match,
+ },
+};
+
+module_platform_driver(stm32_cec_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 Consumer Electronics Control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
new file mode 100644
index 000000000..ee1a21179
--- /dev/null
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -0,0 +1,1904 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Digital Camera Memory Interface
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * for STMicroelectronics.
+ *
+ * This driver is based on atmel_isi.c
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-rect.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define DRV_NAME "stm32-dcmi"
+
+/* Registers offset for DCMI */
+#define DCMI_CR 0x00 /* Control Register */
+#define DCMI_SR 0x04 /* Status Register */
+#define DCMI_RIS 0x08 /* Raw Interrupt Status register */
+#define DCMI_IER 0x0C /* Interrupt Enable Register */
+#define DCMI_MIS 0x10 /* Masked Interrupt Status register */
+#define DCMI_ICR 0x14 /* Interrupt Clear Register */
+#define DCMI_ESCR 0x18 /* Embedded Synchronization Code Register */
+#define DCMI_ESUR 0x1C /* Embedded Synchronization Unmask Register */
+#define DCMI_CWSTRT 0x20 /* Crop Window STaRT */
+#define DCMI_CWSIZE 0x24 /* Crop Window SIZE */
+#define DCMI_DR 0x28 /* Data Register */
+#define DCMI_IDR 0x2C /* IDentifier Register */
+
+/* Bits definition for control register (DCMI_CR) */
+#define CR_CAPTURE BIT(0)
+#define CR_CM BIT(1)
+#define CR_CROP BIT(2)
+#define CR_JPEG BIT(3)
+#define CR_ESS BIT(4)
+#define CR_PCKPOL BIT(5)
+#define CR_HSPOL BIT(6)
+#define CR_VSPOL BIT(7)
+#define CR_FCRC_0 BIT(8)
+#define CR_FCRC_1 BIT(9)
+#define CR_EDM_0 BIT(10)
+#define CR_EDM_1 BIT(11)
+#define CR_ENABLE BIT(14)
+
+/* Bits definition for status register (DCMI_SR) */
+#define SR_HSYNC BIT(0)
+#define SR_VSYNC BIT(1)
+#define SR_FNE BIT(2)
+
+/*
+ * Bits definition for interrupt registers
+ * (DCMI_RIS, DCMI_IER, DCMI_MIS, DCMI_ICR)
+ */
+#define IT_FRAME BIT(0)
+#define IT_OVR BIT(1)
+#define IT_ERR BIT(2)
+#define IT_VSYNC BIT(3)
+#define IT_LINE BIT(4)
+
+enum state {
+ STOPPED = 0,
+ WAIT_FOR_BUFFER,
+ RUNNING,
+};
+
+#define MIN_WIDTH 16U
+#define MAX_WIDTH 2592U
+#define MIN_HEIGHT 16U
+#define MAX_HEIGHT 2592U
+
+#define TIMEOUT_MS 1000
+
+struct dcmi_graph_entity {
+ struct device_node *node;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+};
+
+struct dcmi_format {
+ u32 fourcc;
+ u32 mbus_code;
+ u8 bpp;
+};
+
+struct dcmi_framesize {
+ u32 width;
+ u32 height;
+};
+
+struct dcmi_buf {
+ struct vb2_v4l2_buffer vb;
+ bool prepared;
+ dma_addr_t paddr;
+ size_t size;
+ struct list_head list;
+};
+
+struct stm32_dcmi {
+ /* Protects the access of variables shared within the interrupt */
+ spinlock_t irqlock;
+ struct device *dev;
+ void __iomem *regs;
+ struct resource *res;
+ struct reset_control *rstc;
+ int sequence;
+ struct list_head buffers;
+ struct dcmi_buf *active;
+
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct v4l2_async_notifier notifier;
+ struct dcmi_graph_entity entity;
+ struct v4l2_format fmt;
+ struct v4l2_rect crop;
+ bool do_crop;
+
+ const struct dcmi_format **sd_formats;
+ unsigned int num_of_sd_formats;
+ const struct dcmi_format *sd_format;
+ struct dcmi_framesize *sd_framesizes;
+ unsigned int num_of_sd_framesizes;
+ struct dcmi_framesize sd_framesize;
+ struct v4l2_rect sd_bounds;
+
+ /* Protect this data structure */
+ struct mutex lock;
+ struct vb2_queue queue;
+
+ struct v4l2_fwnode_bus_parallel bus;
+ struct completion complete;
+ struct clk *mclk;
+ enum state state;
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
+ u32 misr;
+ int errors_count;
+ int overrun_count;
+ int buffers_count;
+
+ /* Ensure DMA operations atomicity */
+ struct mutex dma_lock;
+};
+
+static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct stm32_dcmi, notifier);
+}
+
+static inline u32 reg_read(void __iomem *base, u32 reg)
+{
+ return readl_relaxed(base + reg);
+}
+
+static inline void reg_write(void __iomem *base, u32 reg, u32 val)
+{
+ writel_relaxed(val, base + reg);
+}
+
+static inline void reg_set(void __iomem *base, u32 reg, u32 mask)
+{
+ reg_write(base, reg, reg_read(base, reg) | mask);
+}
+
+static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
+{
+ reg_write(base, reg, reg_read(base, reg) & ~mask);
+}
+
+static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf);
+
+static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
+ struct dcmi_buf *buf,
+ size_t bytesused,
+ int err)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (!buf)
+ return;
+
+ list_del_init(&buf->list);
+
+ vbuf = &buf->vb;
+
+ vbuf->sequence = dcmi->sequence++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
+ vb2_buffer_done(&vbuf->vb2_buf,
+ err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dev_dbg(dcmi->dev, "buffer[%d] done seq=%d, bytesused=%zu\n",
+ vbuf->vb2_buf.index, vbuf->sequence, bytesused);
+
+ dcmi->buffers_count++;
+ dcmi->active = NULL;
+}
+
+static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
+{
+ struct dcmi_buf *buf;
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ if (dcmi->state != RUNNING) {
+ spin_unlock_irq(&dcmi->irqlock);
+ return -EINVAL;
+ }
+
+ /* Restart a new DMA transfer with next buffer */
+ if (list_empty(&dcmi->buffers)) {
+ dev_dbg(dcmi->dev, "Capture restart is deferred to next buffer queueing\n");
+ dcmi->state = WAIT_FOR_BUFFER;
+ spin_unlock_irq(&dcmi->irqlock);
+ return 0;
+ }
+ buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
+ dcmi->active = buf;
+
+ spin_unlock_irq(&dcmi->irqlock);
+
+ return dcmi_start_capture(dcmi, buf);
+}
+
+static void dcmi_dma_callback(void *param)
+{
+ struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
+ struct dma_tx_state state;
+ enum dma_status status;
+ struct dcmi_buf *buf = dcmi->active;
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ /* Check DMA status */
+ status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
+
+ switch (status) {
+ case DMA_IN_PROGRESS:
+ dev_dbg(dcmi->dev, "%s: Received DMA_IN_PROGRESS\n", __func__);
+ break;
+ case DMA_PAUSED:
+ dev_err(dcmi->dev, "%s: Received DMA_PAUSED\n", __func__);
+ break;
+ case DMA_ERROR:
+ dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
+
+ /* Return buffer to V4L2 in error state */
+ dcmi_buffer_done(dcmi, buf, 0, -EIO);
+ break;
+ case DMA_COMPLETE:
+ dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
+
+ /* Return buffer to V4L2 */
+ dcmi_buffer_done(dcmi, buf, buf->size, 0);
+
+ spin_unlock_irq(&dcmi->irqlock);
+
+ /* Restart capture */
+ if (dcmi_restart_capture(dcmi))
+ dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n",
+ __func__);
+ return;
+ default:
+ dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
+ break;
+ }
+
+ spin_unlock_irq(&dcmi->irqlock);
+}
+
+static int dcmi_start_dma(struct stm32_dcmi *dcmi,
+ struct dcmi_buf *buf)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_slave_config config;
+ int ret;
+
+ memset(&config, 0, sizeof(config));
+
+ config.src_addr = (dma_addr_t)dcmi->res->start + DCMI_DR;
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_maxburst = 4;
+
+ /* Configure DMA channel */
+ ret = dmaengine_slave_config(dcmi->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: DMA channel config failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Avoid call of dmaengine_terminate_all() between
+ * dmaengine_prep_slave_single() and dmaengine_submit()
+ * by locking the whole DMA submission sequence
+ */
+ mutex_lock(&dcmi->dma_lock);
+
+ /* Prepare a DMA transaction */
+ desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
+ buf->size,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n",
+ __func__, &buf->paddr, buf->size);
+ mutex_unlock(&dcmi->dma_lock);
+ return -EINVAL;
+ }
+
+ /* Set completion callback routine for notification */
+ desc->callback = dcmi_dma_callback;
+ desc->callback_param = dcmi;
+
+ /* Push current DMA transaction in the pending queue */
+ dcmi->dma_cookie = dmaengine_submit(desc);
+ if (dma_submit_error(dcmi->dma_cookie)) {
+ dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
+ mutex_unlock(&dcmi->dma_lock);
+ return -ENXIO;
+ }
+
+ mutex_unlock(&dcmi->dma_lock);
+
+ dma_async_issue_pending(dcmi->dma_chan);
+
+ return 0;
+}
+
+static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf)
+{
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = dcmi_start_dma(dcmi, buf);
+ if (ret) {
+ dcmi->errors_count++;
+ return ret;
+ }
+
+ /* Enable capture */
+ reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
+
+ return 0;
+}
+
+static void dcmi_set_crop(struct stm32_dcmi *dcmi)
+{
+ u32 size, start;
+
+ /* Crop resolution */
+ size = ((dcmi->crop.height - 1) << 16) |
+ ((dcmi->crop.width << 1) - 1);
+ reg_write(dcmi->regs, DCMI_CWSIZE, size);
+
+ /* Crop start point */
+ start = ((dcmi->crop.top) << 16) |
+ ((dcmi->crop.left << 1));
+ reg_write(dcmi->regs, DCMI_CWSTRT, start);
+
+ dev_dbg(dcmi->dev, "Cropping to %ux%u@%u:%u\n",
+ dcmi->crop.width, dcmi->crop.height,
+ dcmi->crop.left, dcmi->crop.top);
+
+ /* Enable crop */
+ reg_set(dcmi->regs, DCMI_CR, CR_CROP);
+}
+
+static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+ struct dcmi_buf *buf = dcmi->active;
+
+ if (!buf)
+ return;
+
+ /*
+ * Because of variable JPEG buffer size sent by sensor,
+ * DMA transfer never completes due to transfer size never reached.
+ * In order to ensure that all the JPEG data are transferred
+ * in active buffer memory, DMA is drained.
+ * Then DMA tx status gives the amount of data transferred
+ * to memory, which is then returned to V4L2 through the active
+ * buffer payload.
+ */
+
+ /* Drain DMA */
+ dmaengine_synchronize(dcmi->dma_chan);
+
+ /* Get DMA residue to get JPEG size */
+ status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
+ if (status != DMA_ERROR && state.residue < buf->size) {
+ /* Return JPEG buffer to V4L2 with received JPEG buffer size */
+ dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0);
+ } else {
+ dcmi->errors_count++;
+ dev_err(dcmi->dev, "%s: Cannot get JPEG size from DMA\n",
+ __func__);
+ /* Return JPEG buffer to V4L2 in ERROR state */
+ dcmi_buffer_done(dcmi, buf, 0, -EIO);
+ }
+
+ /* Abort DMA operation */
+ dmaengine_terminate_all(dcmi->dma_chan);
+
+ /* Restart capture */
+ if (dcmi_restart_capture(dcmi))
+ dev_err(dcmi->dev, "%s: Cannot restart capture on JPEG received\n",
+ __func__);
+}
+
+static irqreturn_t dcmi_irq_thread(int irq, void *arg)
+{
+ struct stm32_dcmi *dcmi = arg;
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) {
+ dcmi->errors_count++;
+ if (dcmi->misr & IT_OVR)
+ dcmi->overrun_count++;
+ }
+
+ if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG &&
+ dcmi->misr & IT_FRAME) {
+ /* JPEG received */
+ spin_unlock_irq(&dcmi->irqlock);
+ dcmi_process_jpeg(dcmi);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irq(&dcmi->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dcmi_irq_callback(int irq, void *arg)
+{
+ struct stm32_dcmi *dcmi = arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcmi->irqlock, flags);
+
+ dcmi->misr = reg_read(dcmi->regs, DCMI_MIS);
+
+ /* Clear interrupt */
+ reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
+
+ spin_unlock_irqrestore(&dcmi->irqlock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int dcmi_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
+ unsigned int size;
+
+ size = dcmi->fmt.fmt.pix.sizeimage;
+
+ /* Make sure the image size is large enough */
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n",
+ *nbuffers, size);
+
+ return 0;
+}
+
+static int dcmi_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int dcmi_buf_prepare(struct vb2_buffer *vb)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+ unsigned long size;
+
+ size = dcmi->fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(dcmi->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ if (!buf->prepared) {
+ /* Get memory addresses */
+ buf->paddr =
+ vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+ buf->prepared = true;
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
+
+ dev_dbg(dcmi->dev, "buffer[%d] phy=%pad size=%zu\n",
+ vb->index, &buf->paddr, buf->size);
+ }
+
+ return 0;
+}
+
+static void dcmi_buf_queue(struct vb2_buffer *vb)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ /* Enqueue to video buffers list */
+ list_add_tail(&buf->list, &dcmi->buffers);
+
+ if (dcmi->state == WAIT_FOR_BUFFER) {
+ dcmi->state = RUNNING;
+ dcmi->active = buf;
+
+ dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
+ buf->vb.vb2_buf.index);
+
+ spin_unlock_irq(&dcmi->irqlock);
+ if (dcmi_start_capture(dcmi, buf))
+ dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
+ __func__);
+ return;
+ }
+
+ spin_unlock_irq(&dcmi->irqlock);
+}
+
+static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
+ struct dcmi_buf *buf, *node;
+ u32 val = 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(dcmi->dev);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
+ __func__, ret);
+ goto err_pm_put;
+ }
+
+ /* Enable stream on the sub device */
+ ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error",
+ __func__);
+ goto err_pm_put;
+ }
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ /* Set bus width */
+ switch (dcmi->bus.bus_width) {
+ case 14:
+ val |= CR_EDM_0 | CR_EDM_1;
+ break;
+ case 12:
+ val |= CR_EDM_1;
+ break;
+ case 10:
+ val |= CR_EDM_0;
+ break;
+ default:
+ /* Set bus width to 8 bits by default */
+ break;
+ }
+
+ /* Set vertical synchronization polarity */
+ if (dcmi->bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ val |= CR_VSPOL;
+
+ /* Set horizontal synchronization polarity */
+ if (dcmi->bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ val |= CR_HSPOL;
+
+ /* Set pixel clock polarity */
+ if (dcmi->bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ val |= CR_PCKPOL;
+
+ reg_write(dcmi->regs, DCMI_CR, val);
+
+ /* Set crop */
+ if (dcmi->do_crop)
+ dcmi_set_crop(dcmi);
+
+ /* Enable jpeg capture */
+ if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG)
+ reg_set(dcmi->regs, DCMI_CR, CR_CM);/* Snapshot mode */
+
+ /* Enable dcmi */
+ reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
+
+ dcmi->sequence = 0;
+ dcmi->errors_count = 0;
+ dcmi->overrun_count = 0;
+ dcmi->buffers_count = 0;
+
+ /*
+ * Start transfer if at least one buffer has been queued,
+ * otherwise transfer is deferred at buffer queueing
+ */
+ if (list_empty(&dcmi->buffers)) {
+ dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n");
+ dcmi->state = WAIT_FOR_BUFFER;
+ spin_unlock_irq(&dcmi->irqlock);
+ return 0;
+ }
+
+ buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
+ dcmi->active = buf;
+
+ dcmi->state = RUNNING;
+
+ dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
+
+ spin_unlock_irq(&dcmi->irqlock);
+ ret = dcmi_start_capture(dcmi, buf);
+ if (ret) {
+ dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n",
+ __func__);
+ goto err_subdev_streamoff;
+ }
+
+ /* Enable interruptions */
+ reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
+
+ return 0;
+
+err_subdev_streamoff:
+ v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
+
+err_pm_put:
+ pm_runtime_put(dcmi->dev);
+ spin_lock_irq(&dcmi->irqlock);
+ /*
+ * Return all buffers to vb2 in QUEUED state.
+ * This will give ownership back to userspace
+ */
+ list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ dcmi->active = NULL;
+ spin_unlock_irq(&dcmi->irqlock);
+
+ return ret;
+}
+
+static void dcmi_stop_streaming(struct vb2_queue *vq)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
+ struct dcmi_buf *buf, *node;
+ int ret;
+
+ /* Disable stream on the sub device */
+ ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ dev_err(dcmi->dev, "%s: Failed to stop streaming, subdev streamoff error (%d)\n",
+ __func__, ret);
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ /* Disable interruptions */
+ reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
+
+ /* Disable DCMI */
+ reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
+
+ /* Return all queued buffers to vb2 in ERROR state */
+ list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ dcmi->active = NULL;
+ dcmi->state = STOPPED;
+
+ spin_unlock_irq(&dcmi->irqlock);
+
+ /* Stop all pending DMA operations */
+ mutex_lock(&dcmi->dma_lock);
+ dmaengine_terminate_all(dcmi->dma_chan);
+ mutex_unlock(&dcmi->dma_lock);
+
+ pm_runtime_put(dcmi->dev);
+
+ if (dcmi->errors_count)
+ dev_warn(dcmi->dev, "Some errors found while streaming: errors=%d (overrun=%d), buffers=%d\n",
+ dcmi->errors_count, dcmi->overrun_count,
+ dcmi->buffers_count);
+ dev_dbg(dcmi->dev, "Stop streaming, errors=%d (overrun=%d), buffers=%d\n",
+ dcmi->errors_count, dcmi->overrun_count,
+ dcmi->buffers_count);
+}
+
+static const struct vb2_ops dcmi_video_qops = {
+ .queue_setup = dcmi_queue_setup,
+ .buf_init = dcmi_buf_init,
+ .buf_prepare = dcmi_buf_prepare,
+ .buf_queue = dcmi_buf_queue,
+ .start_streaming = dcmi_start_streaming,
+ .stop_streaming = dcmi_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int dcmi_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ *fmt = dcmi->fmt;
+
+ return 0;
+}
+
+static const struct dcmi_format *find_format_by_fourcc(struct stm32_dcmi *dcmi,
+ unsigned int fourcc)
+{
+ unsigned int num_formats = dcmi->num_of_sd_formats;
+ const struct dcmi_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ fmt = dcmi->sd_formats[i];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static void __find_outer_frame_size(struct stm32_dcmi *dcmi,
+ struct v4l2_pix_format *pix,
+ struct dcmi_framesize *framesize)
+{
+ struct dcmi_framesize *match = NULL;
+ unsigned int i;
+ unsigned int min_err = UINT_MAX;
+
+ for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
+ struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
+ int w_err = (fsize->width - pix->width);
+ int h_err = (fsize->height - pix->height);
+ int err = w_err + h_err;
+
+ if (w_err >= 0 && h_err >= 0 && err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ }
+ if (!match)
+ match = &dcmi->sd_framesizes[0];
+
+ *framesize = *match;
+}
+
+static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
+ const struct dcmi_format **sd_format,
+ struct dcmi_framesize *sd_framesize)
+{
+ const struct dcmi_format *sd_fmt;
+ struct dcmi_framesize sd_fsize;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ bool do_crop;
+ int ret;
+
+ sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
+ if (!sd_fmt) {
+ if (!dcmi->num_of_sd_formats)
+ return -ENODATA;
+
+ sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
+ pix->pixelformat = sd_fmt->fourcc;
+ }
+
+ /* Limit to hardware capabilities */
+ pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
+ pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
+
+ /* No crop if JPEG is requested */
+ do_crop = dcmi->do_crop && (pix->pixelformat != V4L2_PIX_FMT_JPEG);
+
+ if (do_crop && dcmi->num_of_sd_framesizes) {
+ struct dcmi_framesize outer_sd_fsize;
+ /*
+ * If crop is requested and sensor have discrete frame sizes,
+ * select the frame size that is just larger than request
+ */
+ __find_outer_frame_size(dcmi, pix, &outer_sd_fsize);
+ pix->width = outer_sd_fsize.width;
+ pix->height = outer_sd_fsize.height;
+ }
+
+ v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
+ &pad_cfg, &format);
+ if (ret < 0)
+ return ret;
+
+ /* Update pix regarding to what sensor can do */
+ v4l2_fill_pix_format(pix, &format.format);
+
+ /* Save resolution that sensor can actually do */
+ sd_fsize.width = pix->width;
+ sd_fsize.height = pix->height;
+
+ if (do_crop) {
+ struct v4l2_rect c = dcmi->crop;
+ struct v4l2_rect max_rect;
+
+ /*
+ * Adjust crop by making the intersection between
+ * format resolution request and crop request
+ */
+ max_rect.top = 0;
+ max_rect.left = 0;
+ max_rect.width = pix->width;
+ max_rect.height = pix->height;
+ v4l2_rect_map_inside(&c, &max_rect);
+ c.top = clamp_t(s32, c.top, 0, pix->height - c.height);
+ c.left = clamp_t(s32, c.left, 0, pix->width - c.width);
+ dcmi->crop = c;
+
+ /* Adjust format resolution request to crop */
+ pix->width = dcmi->crop.width;
+ pix->height = dcmi->crop.height;
+ }
+
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = pix->width * sd_fmt->bpp;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ if (sd_format)
+ *sd_format = sd_fmt;
+ if (sd_framesize)
+ *sd_framesize = sd_fsize;
+
+ return 0;
+}
+
+static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct dcmi_format *sd_format;
+ struct dcmi_framesize sd_framesize;
+ struct v4l2_mbus_framefmt *mf = &format.format;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ /*
+ * Try format, fmt.width/height could have been changed
+ * to match sensor capability or crop request
+ * sd_format & sd_framesize will contain what subdev
+ * can do for this request.
+ */
+ ret = dcmi_try_fmt(dcmi, f, &sd_format, &sd_framesize);
+ if (ret)
+ return ret;
+
+ /* Disable crop if JPEG is requested */
+ if (pix->pixelformat == V4L2_PIX_FMT_JPEG)
+ dcmi->do_crop = false;
+
+ /* pix to mbus format */
+ v4l2_fill_mbus_format(mf, pix,
+ sd_format->mbus_code);
+ mf->width = sd_framesize.width;
+ mf->height = sd_framesize.height;
+
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
+ set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dcmi->dev, "Sensor format set to 0x%x %ux%u\n",
+ mf->code, mf->width, mf->height);
+ dev_dbg(dcmi->dev, "Buffer format set to %4.4s %ux%u\n",
+ (char *)&pix->pixelformat,
+ pix->width, pix->height);
+
+ dcmi->fmt = *f;
+ dcmi->sd_format = sd_format;
+ dcmi->sd_framesize = sd_framesize;
+
+ return 0;
+}
+
+static int dcmi_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ if (vb2_is_streaming(&dcmi->queue))
+ return -EBUSY;
+
+ return dcmi_set_fmt(dcmi, f);
+}
+
+static int dcmi_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ return dcmi_try_fmt(dcmi, f, NULL, NULL);
+}
+
+static int dcmi_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ if (f->index >= dcmi->num_of_sd_formats)
+ return -EINVAL;
+
+ f->pixelformat = dcmi->sd_formats[f->index]->fourcc;
+ return 0;
+}
+
+static int dcmi_get_sensor_format(struct stm32_dcmi *dcmi,
+ struct v4l2_pix_format *pix)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ v4l2_fill_pix_format(pix, &fmt.format);
+
+ return 0;
+}
+
+static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
+ struct v4l2_pix_format *pix)
+{
+ const struct dcmi_format *sd_fmt;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ struct v4l2_subdev_pad_config pad_cfg;
+ int ret;
+
+ sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
+ if (!sd_fmt) {
+ if (!dcmi->num_of_sd_formats)
+ return -ENODATA;
+
+ sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
+ pix->pixelformat = sd_fmt->fourcc;
+ }
+
+ v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
+ &pad_cfg, &format);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dcmi_get_sensor_bounds(struct stm32_dcmi *dcmi,
+ struct v4l2_rect *r)
+{
+ struct v4l2_subdev_selection bounds = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ unsigned int max_width, max_height, max_pixsize;
+ struct v4l2_pix_format pix;
+ unsigned int i;
+ int ret;
+
+ /*
+ * Get sensor bounds first
+ */
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_selection,
+ NULL, &bounds);
+ if (!ret)
+ *r = bounds.r;
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ /*
+ * If selection is not implemented,
+ * fallback by enumerating sensor frame sizes
+ * and take the largest one
+ */
+ max_width = 0;
+ max_height = 0;
+ max_pixsize = 0;
+ for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
+ struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
+ unsigned int pixsize = fsize->width * fsize->height;
+
+ if (pixsize > max_pixsize) {
+ max_pixsize = pixsize;
+ max_width = fsize->width;
+ max_height = fsize->height;
+ }
+ }
+ if (max_pixsize > 0) {
+ r->top = 0;
+ r->left = 0;
+ r->width = max_width;
+ r->height = max_height;
+ return 0;
+ }
+
+ /*
+ * If frame sizes enumeration is not implemented,
+ * fallback by getting current sensor frame size
+ */
+ ret = dcmi_get_sensor_format(dcmi, &pix);
+ if (ret)
+ return ret;
+
+ r->top = 0;
+ r->left = 0;
+ r->width = pix.width;
+ r->height = pix.height;
+
+ return 0;
+}
+
+static int dcmi_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r = dcmi->sd_bounds;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ if (dcmi->do_crop) {
+ s->r = dcmi->crop;
+ } else {
+ s->r.top = 0;
+ s->r.left = 0;
+ s->r.width = dcmi->fmt.fmt.pix.width;
+ s->r.height = dcmi->fmt.fmt.pix.height;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dcmi_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ struct v4l2_rect r = s->r;
+ struct v4l2_rect max_rect;
+ struct v4l2_pix_format pix;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /* Reset sensor resolution to max resolution */
+ pix.pixelformat = dcmi->fmt.fmt.pix.pixelformat;
+ pix.width = dcmi->sd_bounds.width;
+ pix.height = dcmi->sd_bounds.height;
+ dcmi_set_sensor_format(dcmi, &pix);
+
+ /*
+ * Make the intersection between
+ * sensor resolution
+ * and crop request
+ */
+ max_rect.top = 0;
+ max_rect.left = 0;
+ max_rect.width = pix.width;
+ max_rect.height = pix.height;
+ v4l2_rect_map_inside(&r, &max_rect);
+ r.top = clamp_t(s32, r.top, 0, pix.height - r.height);
+ r.left = clamp_t(s32, r.left, 0, pix.width - r.width);
+
+ if (!(r.top == dcmi->sd_bounds.top &&
+ r.left == dcmi->sd_bounds.left &&
+ r.width == dcmi->sd_bounds.width &&
+ r.height == dcmi->sd_bounds.height)) {
+ /* Crop if request is different than sensor resolution */
+ dcmi->do_crop = true;
+ dcmi->crop = r;
+ dev_dbg(dcmi->dev, "s_selection: crop %ux%u@(%u,%u) from %ux%u\n",
+ r.width, r.height, r.left, r.top,
+ pix.width, pix.height);
+ } else {
+ /* Disable crop */
+ dcmi->do_crop = false;
+ dev_dbg(dcmi->dev, "s_selection: crop is disabled\n");
+ }
+
+ s->r = r;
+ return 0;
+}
+
+static int dcmi_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, "STM32 Camera Memory Interface",
+ sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:dcmi", sizeof(cap->bus_info));
+ return 0;
+}
+
+static int dcmi_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index != 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, "Camera", sizeof(i->name));
+ return 0;
+}
+
+static int dcmi_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int dcmi_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int dcmi_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ const struct dcmi_format *sd_fmt;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ sd_fmt = find_format_by_fourcc(dcmi, fsize->pixel_format);
+ if (!sd_fmt)
+ return -EINVAL;
+
+ fse.code = sd_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int dcmi_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *p)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ return v4l2_g_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
+}
+
+static int dcmi_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *p)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ return v4l2_s_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
+}
+
+static int dcmi_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ const struct dcmi_format *sd_fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ sd_fmt = find_format_by_fourcc(dcmi, fival->pixel_format);
+ if (!sd_fmt)
+ return -EINVAL;
+
+ fie.code = sd_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
+ enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static const struct of_device_id stm32_dcmi_of_match[] = {
+ { .compatible = "st,stm32-dcmi"},
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_dcmi_of_match);
+
+static int dcmi_open(struct file *file)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ struct v4l2_subdev *sd = dcmi->entity.subdev;
+ int ret;
+
+ if (mutex_lock_interruptible(&dcmi->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto fh_rel;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto fh_rel;
+
+ ret = dcmi_set_fmt(dcmi, &dcmi->fmt);
+ if (ret)
+ v4l2_subdev_call(sd, core, s_power, 0);
+fh_rel:
+ if (ret)
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&dcmi->lock);
+ return ret;
+}
+
+static int dcmi_release(struct file *file)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ struct v4l2_subdev *sd = dcmi->entity.subdev;
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&dcmi->lock);
+
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (fh_singular)
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ mutex_unlock(&dcmi->lock);
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops dcmi_ioctl_ops = {
+ .vidioc_querycap = dcmi_querycap,
+
+ .vidioc_try_fmt_vid_cap = dcmi_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = dcmi_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = dcmi_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = dcmi_enum_fmt_vid_cap,
+ .vidioc_g_selection = dcmi_g_selection,
+ .vidioc_s_selection = dcmi_s_selection,
+
+ .vidioc_enum_input = dcmi_enum_input,
+ .vidioc_g_input = dcmi_g_input,
+ .vidioc_s_input = dcmi_s_input,
+
+ .vidioc_g_parm = dcmi_g_parm,
+ .vidioc_s_parm = dcmi_s_parm,
+
+ .vidioc_enum_framesizes = dcmi_enum_framesizes,
+ .vidioc_enum_frameintervals = dcmi_enum_frameintervals,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations dcmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = dcmi_open,
+ .release = dcmi_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = vb2_fop_get_unmapped_area,
+#endif
+ .read = vb2_fop_read,
+};
+
+static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi)
+{
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = CIF_WIDTH,
+ .height = CIF_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = dcmi->sd_formats[0]->fourcc,
+ },
+ };
+ int ret;
+
+ ret = dcmi_try_fmt(dcmi, &f, NULL, NULL);
+ if (ret)
+ return ret;
+ dcmi->sd_format = dcmi->sd_formats[0];
+ dcmi->fmt = f;
+ return 0;
+}
+
+static const struct dcmi_format dcmi_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
+ .bpp = 1,
+ },
+};
+
+static int dcmi_formats_init(struct stm32_dcmi *dcmi)
+{
+ const struct dcmi_format *sd_fmts[ARRAY_SIZE(dcmi_formats)];
+ unsigned int num_fmts = 0, i, j;
+ struct v4l2_subdev *subdev = dcmi->entity.subdev;
+ struct v4l2_subdev_mbus_code_enum mbus_code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code)) {
+ for (i = 0; i < ARRAY_SIZE(dcmi_formats); i++) {
+ if (dcmi_formats[i].mbus_code != mbus_code.code)
+ continue;
+
+ /* Code supported, have we got this fourcc yet? */
+ for (j = 0; j < num_fmts; j++)
+ if (sd_fmts[j]->fourcc ==
+ dcmi_formats[i].fourcc)
+ /* Already available */
+ break;
+ if (j == num_fmts)
+ /* New */
+ sd_fmts[num_fmts++] = dcmi_formats + i;
+ }
+ mbus_code.index++;
+ }
+
+ if (!num_fmts)
+ return -ENXIO;
+
+ dcmi->num_of_sd_formats = num_fmts;
+ dcmi->sd_formats = devm_kcalloc(dcmi->dev,
+ num_fmts, sizeof(struct dcmi_format *),
+ GFP_KERNEL);
+ if (!dcmi->sd_formats) {
+ dev_err(dcmi->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ memcpy(dcmi->sd_formats, sd_fmts,
+ num_fmts * sizeof(struct dcmi_format *));
+ dcmi->sd_format = dcmi->sd_formats[0];
+
+ return 0;
+}
+
+static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
+{
+ unsigned int num_fsize = 0;
+ struct v4l2_subdev *subdev = dcmi->entity.subdev;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .code = dcmi->sd_format->mbus_code,
+ };
+ unsigned int ret;
+ unsigned int i;
+
+ /* Allocate discrete framesizes array */
+ while (!v4l2_subdev_call(subdev, pad, enum_frame_size,
+ NULL, &fse))
+ fse.index++;
+
+ num_fsize = fse.index;
+ if (!num_fsize)
+ return 0;
+
+ dcmi->num_of_sd_framesizes = num_fsize;
+ dcmi->sd_framesizes = devm_kcalloc(dcmi->dev, num_fsize,
+ sizeof(struct dcmi_framesize),
+ GFP_KERNEL);
+ if (!dcmi->sd_framesizes) {
+ dev_err(dcmi->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Fill array with sensor supported framesizes */
+ dev_dbg(dcmi->dev, "Sensor supports %u frame sizes:\n", num_fsize);
+ for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
+ fse.index = i;
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+ dcmi->sd_framesizes[fse.index].width = fse.max_width;
+ dcmi->sd_framesizes[fse.index].height = fse.max_height;
+ dev_dbg(dcmi->dev, "%ux%u\n", fse.max_width, fse.max_height);
+ }
+
+ return 0;
+}
+
+static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+ int ret;
+
+ dcmi->vdev->ctrl_handler = dcmi->entity.subdev->ctrl_handler;
+ ret = dcmi_formats_init(dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "No supported mediabus format found\n");
+ return ret;
+ }
+
+ ret = dcmi_framesizes_init(dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "Could not initialize framesizes\n");
+ return ret;
+ }
+
+ ret = dcmi_get_sensor_bounds(dcmi, &dcmi->sd_bounds);
+ if (ret) {
+ dev_err(dcmi->dev, "Could not get sensor bounds\n");
+ return ret;
+ }
+
+ ret = dcmi_set_default_fmt(dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "Could not set default format\n");
+ return ret;
+ }
+
+ ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(dcmi->dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ dev_dbg(dcmi->dev, "Device registered as %s\n",
+ video_device_node_name(dcmi->vdev));
+ return 0;
+}
+
+static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+
+ dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev));
+
+ /* Checks internaly if vdev has been init or not */
+ video_unregister_device(dcmi->vdev);
+}
+
+static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+
+ dev_dbg(dcmi->dev, "Subdev %s bound\n", subdev->name);
+
+ dcmi->entity.subdev = subdev;
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
+ .bound = dcmi_graph_notify_bound,
+ .unbind = dcmi_graph_notify_unbind,
+ .complete = dcmi_graph_notify_complete,
+};
+
+static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
+{
+ struct device_node *ep = NULL;
+ struct device_node *remote;
+
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ return -EINVAL;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
+
+ /* Remote node to connect */
+ dcmi->entity.node = remote;
+ dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
+ return 0;
+}
+
+static int dcmi_graph_init(struct stm32_dcmi *dcmi)
+{
+ struct v4l2_async_subdev **subdevs = NULL;
+ int ret;
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = dcmi_graph_parse(dcmi, dcmi->dev->of_node);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "Graph parsing failed\n");
+ return ret;
+ }
+
+ /* Register the subdevices notifier. */
+ subdevs = devm_kzalloc(dcmi->dev, sizeof(*subdevs), GFP_KERNEL);
+ if (!subdevs) {
+ of_node_put(dcmi->entity.node);
+ return -ENOMEM;
+ }
+
+ subdevs[0] = &dcmi->entity.asd;
+
+ dcmi->notifier.subdevs = subdevs;
+ dcmi->notifier.num_subdevs = 1;
+ dcmi->notifier.ops = &dcmi_graph_notify_ops;
+
+ ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "Notifier registration failed\n");
+ of_node_put(dcmi->entity.node);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dcmi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match = NULL;
+ struct v4l2_fwnode_endpoint ep;
+ struct stm32_dcmi *dcmi;
+ struct vb2_queue *q;
+ struct dma_chan *chan;
+ struct clk *mclk;
+ int irq;
+ int ret = 0;
+
+ match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Could not find a match in devicetree\n");
+ return -ENODEV;
+ }
+
+ dcmi = devm_kzalloc(&pdev->dev, sizeof(struct stm32_dcmi), GFP_KERNEL);
+ if (!dcmi)
+ return -ENOMEM;
+
+ dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(dcmi->rstc)) {
+ dev_err(&pdev->dev, "Could not get reset control\n");
+ return PTR_ERR(dcmi->rstc);
+ }
+
+ /* Get bus characteristics from devicetree */
+ np = of_graph_get_next_endpoint(np, NULL);
+ if (!np) {
+ dev_err(&pdev->dev, "Could not find the endpoint\n");
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
+ of_node_put(np);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ return ret;
+ }
+
+ if (ep.bus_type == V4L2_MBUS_CSI2) {
+ dev_err(&pdev->dev, "CSI bus not supported\n");
+ return -ENODEV;
+ }
+ dcmi->bus.flags = ep.bus.parallel.flags;
+ dcmi->bus.bus_width = ep.bus.parallel.bus_width;
+ dcmi->bus.data_shift = ep.bus.parallel.data_shift;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get irq\n");
+ return irq ? irq : -ENXIO;
+ }
+
+ dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!dcmi->res) {
+ dev_err(&pdev->dev, "Could not get resource\n");
+ return -ENODEV;
+ }
+
+ dcmi->regs = devm_ioremap_resource(&pdev->dev, dcmi->res);
+ if (IS_ERR(dcmi->regs)) {
+ dev_err(&pdev->dev, "Could not map registers\n");
+ return PTR_ERR(dcmi->regs);
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
+ dcmi_irq_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), dcmi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ return ret;
+ }
+
+ mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(mclk)) {
+ if (PTR_ERR(mclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get mclk\n");
+ return PTR_ERR(mclk);
+ }
+
+ chan = dma_request_slave_channel(&pdev->dev, "tx");
+ if (!chan) {
+ dev_info(&pdev->dev, "Unable to request DMA channel, defer probing\n");
+ return -EPROBE_DEFER;
+ }
+
+ spin_lock_init(&dcmi->irqlock);
+ mutex_init(&dcmi->lock);
+ mutex_init(&dcmi->dma_lock);
+ init_completion(&dcmi->complete);
+ INIT_LIST_HEAD(&dcmi->buffers);
+
+ dcmi->dev = &pdev->dev;
+ dcmi->mclk = mclk;
+ dcmi->state = STOPPED;
+ dcmi->dma_chan = chan;
+
+ q = &dcmi->queue;
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
+ if (ret)
+ goto err_dma_release;
+
+ dcmi->vdev = video_device_alloc();
+ if (!dcmi->vdev) {
+ ret = -ENOMEM;
+ goto err_device_unregister;
+ }
+
+ /* Video node */
+ dcmi->vdev->fops = &dcmi_fops;
+ dcmi->vdev->v4l2_dev = &dcmi->v4l2_dev;
+ dcmi->vdev->queue = &dcmi->queue;
+ strlcpy(dcmi->vdev->name, KBUILD_MODNAME, sizeof(dcmi->vdev->name));
+ dcmi->vdev->release = video_device_release;
+ dcmi->vdev->ioctl_ops = &dcmi_ioctl_ops;
+ dcmi->vdev->lock = &dcmi->lock;
+ dcmi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ video_set_drvdata(dcmi->vdev, dcmi);
+
+ /* Buffer queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ q->lock = &dcmi->lock;
+ q->drv_priv = dcmi;
+ q->buf_struct_size = sizeof(struct dcmi_buf);
+ q->ops = &dcmi_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->dev = &pdev->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize vb2 queue\n");
+ goto err_device_release;
+ }
+
+ ret = dcmi_graph_init(dcmi);
+ if (ret < 0)
+ goto err_device_release;
+
+ /* Reset device */
+ ret = reset_control_assert(dcmi->rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to assert the reset line\n");
+ goto err_device_release;
+ }
+
+ usleep_range(3000, 5000);
+
+ ret = reset_control_deassert(dcmi->rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to deassert the reset line\n");
+ goto err_device_release;
+ }
+
+ dev_info(&pdev->dev, "Probe done\n");
+
+ platform_set_drvdata(pdev, dcmi);
+
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+err_device_release:
+ video_device_release(dcmi->vdev);
+err_device_unregister:
+ v4l2_device_unregister(&dcmi->v4l2_dev);
+err_dma_release:
+ dma_release_channel(dcmi->dma_chan);
+
+ return ret;
+}
+
+static int dcmi_remove(struct platform_device *pdev)
+{
+ struct stm32_dcmi *dcmi = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ v4l2_async_notifier_unregister(&dcmi->notifier);
+ v4l2_device_unregister(&dcmi->v4l2_dev);
+
+ dma_release_channel(dcmi->dma_chan);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_runtime_suspend(struct device *dev)
+{
+ struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dcmi->mclk);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_runtime_resume(struct device *dev)
+{
+ struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dcmi->mclk);
+ if (ret)
+ dev_err(dev, "%s: Failed to prepare_enable clock\n", __func__);
+
+ return ret;
+}
+
+static __maybe_unused int dcmi_suspend(struct device *dev)
+{
+ /* disable clock */
+ pm_runtime_force_suspend(dev);
+
+ /* change pinctrl state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_resume(struct device *dev)
+{
+ /* restore pinctl default state */
+ pinctrl_pm_select_default_state(dev);
+
+ /* clock enable */
+ pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dcmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dcmi_suspend, dcmi_resume)
+ SET_RUNTIME_PM_OPS(dcmi_runtime_suspend,
+ dcmi_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_dcmi_driver = {
+ .probe = dcmi_probe,
+ .remove = dcmi_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(stm32_dcmi_of_match),
+ .pm = &dcmi_pm_ops,
+ },
+};
+
+module_platform_driver(stm32_dcmi_driver);
+
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/tegra-cec/Makefile b/drivers/media/platform/tegra-cec/Makefile
new file mode 100644
index 000000000..f3d811275
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra_cec.o
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
new file mode 100644
index 000000000..a2c20ca79
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -0,0 +1,499 @@
+/*
+ * Tegra CEC implementation
+ *
+ * The original 3.10 CEC driver using a custom API:
+ *
+ * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Conversion to the CEC framework and to the mainline kernel:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/clk/tegra.h>
+
+#include <media/cec-notifier.h>
+
+#include "tegra_cec.h"
+
+#define TEGRA_CEC_NAME "tegra-cec"
+
+struct tegra_cec {
+ struct cec_adapter *adap;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *cec_base;
+ struct cec_notifier *notifier;
+ int tegra_cec_irq;
+ bool rx_done;
+ bool tx_done;
+ int tx_status;
+ u8 rx_buf[CEC_MAX_MSG_SIZE];
+ u8 rx_buf_cnt;
+ u32 tx_buf[CEC_MAX_MSG_SIZE];
+ u8 tx_buf_cur;
+ u8 tx_buf_cnt;
+};
+
+static inline u32 cec_read(struct tegra_cec *cec, u32 reg)
+{
+ return readl(cec->cec_base + reg);
+}
+
+static inline void cec_write(struct tegra_cec *cec, u32 reg, u32 val)
+{
+ writel(val, cec->cec_base + reg);
+}
+
+static void tegra_cec_error_recovery(struct tegra_cec *cec)
+{
+ u32 hw_ctrl;
+
+ hw_ctrl = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
+ cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, hw_ctrl);
+}
+
+static irqreturn_t tegra_cec_irq_thread_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tegra_cec *cec = dev_get_drvdata(dev);
+
+ if (cec->tx_done) {
+ cec_transmit_attempt_done(cec->adap, cec->tx_status);
+ cec->tx_done = false;
+ }
+ if (cec->rx_done) {
+ struct cec_msg msg = {};
+
+ msg.len = cec->rx_buf_cnt;
+ memcpy(msg.msg, cec->rx_buf, msg.len);
+ cec_received_msg(cec->adap, &msg);
+ cec->rx_done = false;
+ cec->rx_buf_cnt = 0;
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_cec_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tegra_cec *cec = dev_get_drvdata(dev);
+ u32 status, mask;
+
+ status = cec_read(cec, TEGRA_CEC_INT_STAT);
+ mask = cec_read(cec, TEGRA_CEC_INT_MASK);
+
+ status &= mask;
+
+ if (!status)
+ return IRQ_HANDLED;
+
+ if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN) {
+ dev_err(dev, "TX underrun, interrupt timing issue!\n");
+
+ tegra_cec_error_recovery(cec);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ return IRQ_WAKE_THREAD;
+ }
+
+ if ((status & TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED) ||
+ (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)) {
+ tegra_cec_error_recovery(cec);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ cec->tx_done = true;
+ if (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)
+ cec->tx_status = CEC_TX_STATUS_LOW_DRIVE;
+ else
+ cec->tx_status = CEC_TX_STATUS_ARB_LOST;
+ return IRQ_WAKE_THREAD;
+ }
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED) {
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED);
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD) {
+ tegra_cec_error_recovery(cec);
+
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ } else {
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_OK;
+ }
+ return IRQ_WAKE_THREAD;
+ }
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD)
+ dev_warn(dev, "TX NAKed on the fly!\n");
+
+ if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY) {
+ if (cec->tx_buf_cur == cec->tx_buf_cnt) {
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+ } else {
+ cec_write(cec, TEGRA_CEC_TX_REGISTER,
+ cec->tx_buf[cec->tx_buf_cur++]);
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY);
+ }
+ }
+
+ if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) {
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED);
+ cec->rx_done = false;
+ cec->rx_buf_cnt = 0;
+ }
+ if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
+ u32 v;
+
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_RX_REGISTER_FULL);
+ v = cec_read(cec, TEGRA_CEC_RX_REGISTER);
+ if (cec->rx_buf_cnt < CEC_MAX_MSG_SIZE)
+ cec->rx_buf[cec->rx_buf_cnt++] = v & 0xff;
+ if (v & TEGRA_CEC_RX_REGISTER_EOM) {
+ cec->rx_done = true;
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct tegra_cec *cec = adap->priv;
+
+ cec->rx_buf_cnt = 0;
+ cec->tx_buf_cnt = 0;
+ cec->tx_buf_cur = 0;
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
+ cec_write(cec, TEGRA_CEC_INT_MASK, 0);
+ cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
+ cec_write(cec, TEGRA_CEC_SW_CONTROL, 0);
+
+ if (!enable)
+ return 0;
+
+ cec_write(cec, TEGRA_CEC_INPUT_FILTER, (1U << 31) | 0x20);
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_0,
+ (0x7a << TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT) |
+ (0x6d << TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT) |
+ (0x93 << TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT) |
+ (0x86 << TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_1,
+ (0x35 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT) |
+ (0x21 << TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT) |
+ (0x56 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT) |
+ (0x40 << TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_2,
+ (0x50 << TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_0,
+ (0x74 << TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT) |
+ (0x8d << TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT) |
+ (0x08 << TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT) |
+ (0x71 << TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_1,
+ (0x2f << TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT) |
+ (0x13 << TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT) |
+ (0x4b << TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT) |
+ (0x21 << TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_2,
+ (0x07 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT) |
+ (0x05 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT) |
+ (0x03 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN |
+ TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD |
+ TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED |
+ TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |
+ TEGRA_CEC_INT_MASK_RX_REGISTER_FULL |
+ TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED);
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);
+ return 0;
+}
+
+static int tegra_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct tegra_cec *cec = adap->priv;
+ u32 state = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ state &= ~TEGRA_CEC_HWCTRL_RX_LADDR_MASK;
+ else
+ state |= TEGRA_CEC_HWCTRL_RX_LADDR((1 << logical_addr));
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, state);
+ return 0;
+}
+
+static int tegra_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct tegra_cec *cec = adap->priv;
+ u32 reg = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+
+ if (enable)
+ reg |= TEGRA_CEC_HWCTRL_RX_SNOOP;
+ else
+ reg &= ~TEGRA_CEC_HWCTRL_RX_SNOOP;
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, reg);
+ return 0;
+}
+
+static int tegra_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time_ms, struct cec_msg *msg)
+{
+ bool retry_xfer = signal_free_time_ms == CEC_SIGNAL_FREE_TIME_RETRY;
+ struct tegra_cec *cec = adap->priv;
+ unsigned int i;
+ u32 mode = 0;
+ u32 mask;
+
+ if (cec_msg_is_broadcast(msg))
+ mode = TEGRA_CEC_TX_REG_BCAST;
+
+ cec->tx_buf_cur = 0;
+ cec->tx_buf_cnt = msg->len;
+
+ for (i = 0; i < msg->len; i++) {
+ cec->tx_buf[i] = mode | msg->msg[i];
+ if (i == 0)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_START_BIT;
+ if (i == msg->len - 1)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_EOM;
+ if (i == 0 && retry_xfer)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_RETRY;
+ }
+
+ mask = cec_read(cec, TEGRA_CEC_INT_MASK);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask | TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ return 0;
+}
+
+static const struct cec_adap_ops tegra_cec_ops = {
+ .adap_enable = tegra_cec_adap_enable,
+ .adap_log_addr = tegra_cec_adap_log_addr,
+ .adap_transmit = tegra_cec_adap_transmit,
+ .adap_monitor_all_enable = tegra_cec_adap_monitor_all_enable,
+};
+
+static int tegra_cec_probe(struct platform_device *pdev)
+{
+ struct platform_device *hdmi_dev;
+ struct device_node *np;
+ struct tegra_cec *cec;
+ struct resource *res;
+ int ret = 0;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
+ return -ENODEV;
+ }
+ hdmi_dev = of_find_device_by_node(np);
+ if (hdmi_dev == NULL)
+ return -EPROBE_DEFER;
+
+ cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
+
+ if (!cec)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Unable to allocate resources for device\n");
+ return -EBUSY;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev,
+ "Unable to request mem region for device\n");
+ return -EBUSY;
+ }
+
+ cec->tegra_cec_irq = platform_get_irq(pdev, 0);
+
+ if (cec->tegra_cec_irq <= 0)
+ return -EBUSY;
+
+ cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+
+ if (!cec->cec_base) {
+ dev_err(&pdev->dev, "Unable to grab IOs for device\n");
+ return -EBUSY;
+ }
+
+ cec->clk = devm_clk_get(&pdev->dev, "cec");
+
+ if (IS_ERR_OR_NULL(cec->clk)) {
+ dev_err(&pdev->dev, "Can't get clock for CEC\n");
+ return -ENOENT;
+ }
+
+ ret = clk_prepare_enable(cec->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to prepare clock for CEC\n");
+ return ret;
+ }
+
+ /* set context info. */
+ cec->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, cec);
+
+ ret = devm_request_threaded_irq(&pdev->dev, cec->tegra_cec_irq,
+ tegra_cec_irq_handler, tegra_cec_irq_thread_handler,
+ 0, "cec_irq", &pdev->dev);
+
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to request interrupt for device\n");
+ goto clk_error;
+ }
+
+ cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto clk_error;
+ }
+
+ cec->adap = cec_allocate_adapter(&tegra_cec_ops, cec, TEGRA_CEC_NAME,
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL,
+ CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(cec->adap)) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Couldn't create cec adapter\n");
+ goto cec_error;
+ }
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register device\n");
+ goto cec_error;
+ }
+
+ cec_register_cec_notifier(cec->adap, cec->notifier);
+
+ return 0;
+
+cec_error:
+ if (cec->notifier)
+ cec_notifier_put(cec->notifier);
+ cec_delete_adapter(cec->adap);
+clk_error:
+ clk_disable_unprepare(cec->clk);
+ return ret;
+}
+
+static int tegra_cec_remove(struct platform_device *pdev)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(cec->clk);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notifier);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_cec_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(cec->clk);
+
+ dev_notice(&pdev->dev, "suspended\n");
+ return 0;
+}
+
+static int tegra_cec_resume(struct platform_device *pdev)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ dev_notice(&pdev->dev, "Resuming\n");
+
+ return clk_prepare_enable(cec->clk);
+}
+#endif
+
+static const struct of_device_id tegra_cec_of_match[] = {
+ { .compatible = "nvidia,tegra114-cec", },
+ { .compatible = "nvidia,tegra124-cec", },
+ { .compatible = "nvidia,tegra210-cec", },
+ {},
+};
+
+static struct platform_driver tegra_cec_driver = {
+ .driver = {
+ .name = TEGRA_CEC_NAME,
+ .of_match_table = of_match_ptr(tegra_cec_of_match),
+ },
+ .probe = tegra_cec_probe,
+ .remove = tegra_cec_remove,
+
+#ifdef CONFIG_PM
+ .suspend = tegra_cec_suspend,
+ .resume = tegra_cec_resume,
+#endif
+};
+
+module_platform_driver(tegra_cec_driver);
+
+MODULE_DESCRIPTION("Tegra HDMI CEC driver");
+MODULE_AUTHOR("NVIDIA CORPORATION");
+MODULE_AUTHOR("Cisco Systems, Inc. and/or its affiliates");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.h b/drivers/media/platform/tegra-cec/tegra_cec.h
new file mode 100644
index 000000000..e301513da
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/tegra_cec.h
@@ -0,0 +1,127 @@
+/*
+ * Tegra CEC register definitions
+ *
+ * The original 3.10 CEC driver using a custom API:
+ *
+ * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Conversion to the CEC framework and to the mainline kernel:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TEGRA_CEC_H
+#define TEGRA_CEC_H
+
+/* CEC registers */
+#define TEGRA_CEC_SW_CONTROL 0x000
+#define TEGRA_CEC_HW_CONTROL 0x004
+#define TEGRA_CEC_INPUT_FILTER 0x008
+#define TEGRA_CEC_TX_REGISTER 0x010
+#define TEGRA_CEC_RX_REGISTER 0x014
+#define TEGRA_CEC_RX_TIMING_0 0x018
+#define TEGRA_CEC_RX_TIMING_1 0x01c
+#define TEGRA_CEC_RX_TIMING_2 0x020
+#define TEGRA_CEC_TX_TIMING_0 0x024
+#define TEGRA_CEC_TX_TIMING_1 0x028
+#define TEGRA_CEC_TX_TIMING_2 0x02c
+#define TEGRA_CEC_INT_STAT 0x030
+#define TEGRA_CEC_INT_MASK 0x034
+#define TEGRA_CEC_HW_DEBUG_RX 0x038
+#define TEGRA_CEC_HW_DEBUG_TX 0x03c
+
+#define TEGRA_CEC_HWCTRL_RX_LADDR_MASK 0x7fff
+#define TEGRA_CEC_HWCTRL_RX_LADDR(x) \
+ ((x) & TEGRA_CEC_HWCTRL_RX_LADDR_MASK)
+#define TEGRA_CEC_HWCTRL_RX_SNOOP (1 << 15)
+#define TEGRA_CEC_HWCTRL_RX_NAK_MODE (1 << 16)
+#define TEGRA_CEC_HWCTRL_TX_NAK_MODE (1 << 24)
+#define TEGRA_CEC_HWCTRL_FAST_SIM_MODE (1 << 30)
+#define TEGRA_CEC_HWCTRL_TX_RX_MODE (1 << 31)
+
+#define TEGRA_CEC_INPUT_FILTER_MODE (1 << 31)
+#define TEGRA_CEC_INPUT_FILTER_FIFO_LENGTH_SHIFT 0
+
+#define TEGRA_CEC_TX_REG_DATA_SHIFT 0
+#define TEGRA_CEC_TX_REG_EOM (1 << 8)
+#define TEGRA_CEC_TX_REG_BCAST (1 << 12)
+#define TEGRA_CEC_TX_REG_START_BIT (1 << 16)
+#define TEGRA_CEC_TX_REG_RETRY (1 << 17)
+
+#define TEGRA_CEC_RX_REGISTER_SHIFT 0
+#define TEGRA_CEC_RX_REGISTER_EOM (1 << 8)
+#define TEGRA_CEC_RX_REGISTER_ACK (1 << 9)
+
+#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT 0
+#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT 8
+#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT 16
+#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT 24
+
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT 0
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT 8
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT 16
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT 24
+
+#define TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT 0
+
+#define TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT 0
+#define TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT 8
+#define TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT 16
+#define TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT 24
+
+#define TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT 0
+#define TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT 8
+#define TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT 16
+#define TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT 24
+
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT 0
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT 4
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT 8
+
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY (1 << 0)
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN (1 << 1)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
+#define TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED (1 << 3)
+#define TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED (1 << 4)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED (1 << 5)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_FULL (1 << 8)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN (1 << 9)
+#define TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED (1 << 10)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED (1 << 11)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED (1 << 12)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
+
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY (1 << 0)
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN (1 << 1)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
+#define TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED (1 << 3)
+#define TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED (1 << 4)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED (1 << 5)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_FULL (1 << 8)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN (1 << 9)
+#define TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED (1 << 10)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ANOMALY_DETECTED (1 << 11)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ERROR_DETECTED (1 << 12)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
+
+#define TEGRA_CEC_HW_DEBUG_TX_DURATION_COUNT_SHIFT 0
+#define TEGRA_CEC_HW_DEBUG_TX_TXBIT_COUNT_SHIFT 17
+#define TEGRA_CEC_HW_DEBUG_TX_STATE_SHIFT 21
+#define TEGRA_CEC_HW_DEBUG_TX_FORCELOOUT (1 << 25)
+#define TEGRA_CEC_HW_DEBUG_TX_TXDATABIT_SAMPLE_TIMER (1 << 26)
+
+#endif /* TEGRA_CEC_H */
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
new file mode 100644
index 000000000..886ac5ec0
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
+obj-$(CONFIG_VIDEO_TI_VPDMA) += ti-vpdma.o
+obj-$(CONFIG_VIDEO_TI_SC) += ti-sc.o
+obj-$(CONFIG_VIDEO_TI_CSC) += ti-csc.o
+
+ti-vpe-y := vpe.o
+ti-vpdma-y := vpdma.o
+ti-sc-y := sc.o
+ti-csc-y := csc.o
+
+ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o
+
+ti-cal-y := cal.o
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
new file mode 100644
index 000000000..d945323fc
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -0,0 +1,1934 @@
+/*
+ * TI CAL camera interface driver
+ *
+ * Copyright (c) 2015 Texas Instruments Inc.
+ * Benoit Parrot, <bparrot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include "cal_regs.h"
+
+#define CAL_MODULE_NAME "cal"
+
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1200
+
+#define CAL_VERSION "0.1.0"
+
+MODULE_DESCRIPTION("TI CAL driver");
+MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(CAL_VERSION);
+
+static unsigned video_nr = -1;
+module_param(video_nr, uint, 0644);
+MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
+
+static unsigned debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activates debug info");
+
+/* timeperframe: min/max and default */
+static const struct v4l2_fract
+ tpf_default = {.numerator = 1001, .denominator = 30000};
+
+#define cal_dbg(level, caldev, fmt, arg...) \
+ v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg)
+#define cal_info(caldev, fmt, arg...) \
+ v4l2_info(&caldev->v4l2_dev, fmt, ##arg)
+#define cal_err(caldev, fmt, arg...) \
+ v4l2_err(&caldev->v4l2_dev, fmt, ##arg)
+
+#define ctx_dbg(level, ctx, fmt, arg...) \
+ v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg)
+#define ctx_info(ctx, fmt, arg...) \
+ v4l2_info(&ctx->v4l2_dev, fmt, ##arg)
+#define ctx_err(ctx, fmt, arg...) \
+ v4l2_err(&ctx->v4l2_dev, fmt, ##arg)
+
+#define CAL_NUM_INPUT 1
+#define CAL_NUM_CONTEXT 2
+
+#define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
+
+#define reg_read(dev, offset) ioread32(dev->base + offset)
+#define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
+
+#define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \
+ mask)
+#define reg_write_field(dev, offset, field, mask) { \
+ u32 val = reg_read(dev, offset); \
+ set_field(&val, field, mask); \
+ reg_write(dev, offset, val); }
+
+/* ------------------------------------------------------------------
+ * Basic structures
+ * ------------------------------------------------------------------
+ */
+
+struct cal_fmt {
+ u32 fourcc;
+ u32 code;
+ u8 depth;
+};
+
+static struct cal_fmt cal_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .depth = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .depth = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .depth = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .depth = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .depth = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .depth = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .depth = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .depth = 16,
+ },
+};
+
+/* Print Four-character-code (FOURCC) */
+static char *fourcc_to_str(u32 fmt)
+{
+ static char code[5];
+
+ code[0] = (unsigned char)(fmt & 0xff);
+ code[1] = (unsigned char)((fmt >> 8) & 0xff);
+ code[2] = (unsigned char)((fmt >> 16) & 0xff);
+ code[3] = (unsigned char)((fmt >> 24) & 0xff);
+ code[4] = '\0';
+
+ return code;
+}
+
+/* buffer for one video frame */
+struct cal_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ const struct cal_fmt *fmt;
+};
+
+struct cal_dmaqueue {
+ struct list_head active;
+
+ /* Counters to control fps rate */
+ int frame;
+ int ini_jiffies;
+};
+
+struct cm_data {
+ void __iomem *base;
+ struct resource *res;
+
+ unsigned int camerrx_control;
+
+ struct platform_device *pdev;
+};
+
+struct cc_data {
+ void __iomem *base;
+ struct resource *res;
+
+ struct platform_device *pdev;
+};
+
+/*
+ * there is one cal_dev structure in the driver, it is shared by
+ * all instances.
+ */
+struct cal_dev {
+ int irq;
+ void __iomem *base;
+ struct resource *res;
+ struct platform_device *pdev;
+ struct v4l2_device v4l2_dev;
+
+ /* Control Module handle */
+ struct cm_data *cm;
+ /* Camera Core Module handle */
+ struct cc_data *cc[CAL_NUM_CSI2_PORTS];
+
+ struct cal_ctx *ctx[CAL_NUM_CONTEXT];
+};
+
+/*
+ * There is one cal_ctx structure for each camera core context.
+ */
+struct cal_ctx {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct video_device vdev;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *sensor;
+ struct v4l2_fwnode_endpoint endpoint;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_async_subdev *asd_list[1];
+
+ struct v4l2_fh fh;
+ struct cal_dev *dev;
+ struct cc_data *cc;
+
+ /* v4l2_ioctl mutex */
+ struct mutex mutex;
+ /* v4l2 buffers lock */
+ spinlock_t slock;
+
+ /* Several counters */
+ unsigned long jiffies;
+
+ struct cal_dmaqueue vidq;
+
+ /* Input Number */
+ int input;
+
+ /* video capture */
+ const struct cal_fmt *fmt;
+ /* Used to store current pixel format */
+ struct v4l2_format v_fmt;
+ /* Used to store current mbus frame format */
+ struct v4l2_mbus_framefmt m_fmt;
+
+ /* Current subdev enumerated format */
+ struct cal_fmt *active_fmt[ARRAY_SIZE(cal_formats)];
+ int num_active_fmt;
+
+ struct v4l2_fract timeperframe;
+ unsigned int sequence;
+ unsigned int external_rate;
+ struct vb2_queue vb_vidq;
+ unsigned int seq_count;
+ unsigned int csi2_port;
+ unsigned int virtual_channel;
+
+ /* Pointer pointing to current v4l2_buffer */
+ struct cal_buffer *cur_frm;
+ /* Pointer pointing to next v4l2_buffer */
+ struct cal_buffer *next_frm;
+};
+
+static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
+ u32 pixelformat)
+{
+ const struct cal_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ctx->num_active_fmt; k++) {
+ fmt = ctx->active_fmt[k];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
+ u32 code)
+{
+ const struct cal_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ctx->num_active_fmt; k++) {
+ fmt = ctx->active_fmt[k];
+ if (fmt->code == code)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct cal_ctx, notifier);
+}
+
+static inline int get_field(u32 value, u32 mask)
+{
+ return (value & mask) >> __ffs(mask);
+}
+
+static inline void set_field(u32 *valp, u32 field, u32 mask)
+{
+ u32 val = *valp;
+
+ val &= ~mask;
+ val |= (field << __ffs(mask)) & mask;
+ *valp = val;
+}
+
+/*
+ * Control Module block access
+ */
+static struct cm_data *cm_create(struct cal_dev *dev)
+{
+ struct platform_device *pdev = dev->pdev;
+ struct cm_data *cm;
+
+ cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
+ if (!cm)
+ return ERR_PTR(-ENOMEM);
+
+ cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "camerrx_control");
+ cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
+ if (IS_ERR(cm->base)) {
+ cal_err(dev, "failed to ioremap\n");
+ return ERR_CAST(cm->base);
+ }
+
+ cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+ cm->res->name, &cm->res->start, &cm->res->end);
+
+ return cm;
+}
+
+static void camerarx_phy_enable(struct cal_ctx *ctx)
+{
+ u32 val;
+
+ if (!ctx->dev->cm->base) {
+ ctx_err(ctx, "cm not mapped\n");
+ return;
+ }
+
+ val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
+ if (ctx->csi2_port == 1) {
+ set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
+ set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
+ /* enable all lanes by default */
+ set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
+ set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
+ } else if (ctx->csi2_port == 2) {
+ set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
+ set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
+ /* enable all lanes by default */
+ set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
+ set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
+ }
+ reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+}
+
+static void camerarx_phy_disable(struct cal_ctx *ctx)
+{
+ u32 val;
+
+ if (!ctx->dev->cm->base) {
+ ctx_err(ctx, "cm not mapped\n");
+ return;
+ }
+
+ val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
+ if (ctx->csi2_port == 1)
+ set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
+ else if (ctx->csi2_port == 2)
+ set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
+ reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+}
+
+/*
+ * Camera Instance access block
+ */
+static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core)
+{
+ struct platform_device *pdev = dev->pdev;
+ struct cc_data *cc;
+
+ cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL);
+ if (!cc)
+ return ERR_PTR(-ENOMEM);
+
+ cc->res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ (core == 0) ?
+ "cal_rx_core0" :
+ "cal_rx_core1");
+ cc->base = devm_ioremap_resource(&pdev->dev, cc->res);
+ if (IS_ERR(cc->base)) {
+ cal_err(dev, "failed to ioremap\n");
+ return ERR_CAST(cc->base);
+ }
+
+ cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+ cc->res->name, &cc->res->start, &cc->res->end);
+
+ return cc;
+}
+
+/*
+ * Get Revision and HW info
+ */
+static void cal_get_hwinfo(struct cal_dev *dev)
+{
+ u32 revision = 0;
+ u32 hwinfo = 0;
+
+ revision = reg_read(dev, CAL_HL_REVISION);
+ cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n",
+ revision);
+
+ hwinfo = reg_read(dev, CAL_HL_HWINFO);
+ cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n",
+ hwinfo);
+}
+
+static inline int cal_runtime_get(struct cal_dev *dev)
+{
+ return pm_runtime_get_sync(&dev->pdev->dev);
+}
+
+static inline void cal_runtime_put(struct cal_dev *dev)
+{
+ pm_runtime_put_sync(&dev->pdev->dev);
+}
+
+static void cal_quickdump_regs(struct cal_dev *dev)
+{
+ cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (__force const void *)dev->base,
+ resource_size(dev->res), false);
+
+ if (dev->ctx[0]) {
+ cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n",
+ &dev->ctx[0]->cc->res->start);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (__force const void *)dev->ctx[0]->cc->base,
+ resource_size(dev->ctx[0]->cc->res),
+ false);
+ }
+
+ if (dev->ctx[1]) {
+ cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n",
+ &dev->ctx[1]->cc->res->start);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (__force const void *)dev->ctx[1]->cc->base,
+ resource_size(dev->ctx[1]->cc->res),
+ false);
+ }
+
+ cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
+ &dev->cm->res->start);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (__force const void *)dev->cm->base,
+ resource_size(dev->cm->res), false);
+}
+
+/*
+ * Enable the expected IRQ sources
+ */
+static void enable_irqs(struct cal_ctx *ctx)
+{
+ /* Enable IRQ_WDMA_END 0/1 */
+ reg_write_field(ctx->dev,
+ CAL_HL_IRQENABLE_SET(2),
+ CAL_HL_IRQ_ENABLE,
+ CAL_HL_IRQ_MASK(ctx->csi2_port));
+ /* Enable IRQ_WDMA_START 0/1 */
+ reg_write_field(ctx->dev,
+ CAL_HL_IRQENABLE_SET(3),
+ CAL_HL_IRQ_ENABLE,
+ CAL_HL_IRQ_MASK(ctx->csi2_port));
+ /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
+ reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
+}
+
+static void disable_irqs(struct cal_ctx *ctx)
+{
+ u32 val;
+
+ /* Disable IRQ_WDMA_END 0/1 */
+ val = 0;
+ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val);
+ /* Disable IRQ_WDMA_START 0/1 */
+ val = 0;
+ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val);
+ /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
+ reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
+}
+
+static void csi2_init(struct cal_ctx *ctx)
+{
+ int i;
+ u32 val;
+
+ val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
+ set_field(&val, CAL_GEN_ENABLE,
+ CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
+ set_field(&val, CAL_GEN_ENABLE,
+ CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
+ set_field(&val, CAL_GEN_DISABLE,
+ CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
+ set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
+ reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
+
+ val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+ set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
+ CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
+ set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
+ CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
+ reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+ for (i = 0; i < 10; i++) {
+ if (reg_read_field(ctx->dev,
+ CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
+ CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
+ break;
+ usleep_range(1000, 1100);
+ }
+ ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
+
+ val = reg_read(ctx->dev, CAL_CTRL);
+ set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
+ set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
+ set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
+ CAL_CTRL_POSTED_WRITES_MASK);
+ set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
+ set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
+ reg_write(ctx->dev, CAL_CTRL, val);
+ ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
+}
+
+static void csi2_lane_config(struct cal_ctx *ctx)
+{
+ u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+ u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
+ u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
+ struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
+ &ctx->endpoint.bus.mipi_csi2;
+ int lane;
+
+ set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
+ set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
+ for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
+ /*
+ * Every lane are one nibble apart starting with the
+ * clock followed by the data lanes so shift masks by 4.
+ */
+ lane_mask <<= 4;
+ polarity_mask <<= 4;
+ set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
+ set_field(&val, mipi_csi2->lane_polarities[lane + 1],
+ polarity_mask);
+ }
+
+ reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
+ ctx->csi2_port, val);
+}
+
+static void csi2_ppi_enable(struct cal_ctx *ctx)
+{
+ reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
+ CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void csi2_ppi_disable(struct cal_ctx *ctx)
+{
+ reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
+ CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void csi2_ctx_config(struct cal_ctx *ctx)
+{
+ u32 val;
+
+ val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port));
+ set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK);
+ /*
+ * DT type: MIPI CSI-2 Specs
+ * 0x1: All - DT filter is disabled
+ * 0x24: RGB888 1 pixel = 3 bytes
+ * 0x2B: RAW10 4 pixels = 5 bytes
+ * 0x2A: RAW8 1 pixel = 1 byte
+ * 0x1E: YUV422 2 pixels = 4 bytes
+ */
+ set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
+ /* Virtual Channel from the CSI2 sensor usually 0! */
+ set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
+ /* NUM_LINES_PER_FRAME => 0 means auto detect */
+ set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
+ set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
+ set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
+ CAL_CSI2_CTX_PACK_MODE_MASK);
+ reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)));
+}
+
+static void pix_proc_config(struct cal_ctx *ctx)
+{
+ u32 val;
+
+ val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
+ set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
+ set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
+ set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
+ set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
+ set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
+ set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
+ reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
+}
+
+static void cal_wr_dma_config(struct cal_ctx *ctx,
+ unsigned int width, unsigned int height)
+{
+ u32 val;
+
+ val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
+ set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
+ set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
+ set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
+ CAL_WR_DMA_CTRL_DTAG_MASK);
+ set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
+ CAL_WR_DMA_CTRL_MODE_MASK);
+ set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
+ CAL_WR_DMA_CTRL_PATTERN_MASK);
+ set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
+ reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
+
+ /*
+ * width/16 not sure but giving it a whirl.
+ * zero does not work right
+ */
+ reg_write_field(ctx->dev,
+ CAL_WR_DMA_OFST(ctx->csi2_port),
+ (width / 16),
+ CAL_WR_DMA_OFST_MASK);
+ ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port)));
+
+ val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port));
+ /* 64 bit word means no skipping */
+ set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
+ /*
+ * (width*8)/64 this should be size of an entire line
+ * in 64bit word but 0 means all data until the end
+ * is detected automagically
+ */
+ set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
+ reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
+}
+
+static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
+{
+ reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr);
+}
+
+/*
+ * TCLK values are OK at their reset values
+ */
+#define TCLK_TERM 0
+#define TCLK_MISS 1
+#define TCLK_SETTLE 14
+#define THS_SETTLE 15
+
+static void csi2_phy_config(struct cal_ctx *ctx)
+{
+ unsigned int reg0, reg1;
+ unsigned int ths_term, ths_settle;
+ unsigned int ddrclkperiod_us;
+
+ /*
+ * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2.
+ */
+ ddrclkperiod_us = ctx->external_rate / 2000000;
+ ddrclkperiod_us = 1000000 / ddrclkperiod_us;
+ ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
+
+ ths_term = 20000 / ddrclkperiod_us;
+ ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
+ ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
+
+ /*
+ * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1.
+ * Since CtrlClk is fixed at 96Mhz then we get
+ * ths_settle = floor(176.3 / 10.416) - 1 = 15
+ * If we ever switch to a dynamic clock then this code might be useful
+ *
+ * unsigned int ctrlclkperiod_us;
+ * ctrlclkperiod_us = 96000000 / 1000000;
+ * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us;
+ * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us);
+
+ * ths_settle = 176300 / ctrlclkperiod_us;
+ * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle;
+ */
+
+ ths_settle = THS_SETTLE;
+ ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
+
+ reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
+ set_field(&reg0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
+ CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
+ set_field(&reg0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
+ set_field(&reg0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
+
+ ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0);
+ reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0);
+
+ reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1);
+ set_field(&reg1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
+ set_field(&reg1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
+ set_field(&reg1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
+ set_field(&reg1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
+
+ ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1);
+ reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1);
+}
+
+static int cal_get_external_info(struct cal_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+
+ if (!ctx->sensor)
+ return -ENODEV;
+
+ ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl) {
+ ctx_err(ctx, "no pixel rate control in subdev: %s\n",
+ ctx->sensor->name);
+ return -EPIPE;
+ }
+
+ ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
+ ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate);
+
+ return 0;
+}
+
+static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
+{
+ struct cal_dmaqueue *dma_q = &ctx->vidq;
+ struct cal_buffer *buf;
+ unsigned long addr;
+
+ buf = list_entry(dma_q->active.next, struct cal_buffer, list);
+ ctx->next_frm = buf;
+ list_del(&buf->list);
+
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ cal_wr_dma_addr(ctx, addr);
+}
+
+static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
+{
+ ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
+ ctx->cur_frm->vb.field = ctx->m_fmt.field;
+ ctx->cur_frm->vb.sequence = ctx->sequence++;
+
+ vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ ctx->cur_frm = ctx->next_frm;
+}
+
+#define isvcirqset(irq, vc, ff) (irq & \
+ (CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK))
+
+#define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port))
+
+static irqreturn_t cal_irq(int irq_cal, void *data)
+{
+ struct cal_dev *dev = (struct cal_dev *)data;
+ struct cal_ctx *ctx;
+ struct cal_dmaqueue *dma_q;
+ u32 irqst2, irqst3;
+
+ /* Check which DMA just finished */
+ irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
+ if (irqst2) {
+ /* Clear Interrupt status */
+ reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
+
+ /* Need to check both port */
+ if (isportirqset(irqst2, 1)) {
+ ctx = dev->ctx[0];
+
+ if (ctx->cur_frm != ctx->next_frm)
+ cal_process_buffer_complete(ctx);
+ }
+
+ if (isportirqset(irqst2, 2)) {
+ ctx = dev->ctx[1];
+
+ if (ctx->cur_frm != ctx->next_frm)
+ cal_process_buffer_complete(ctx);
+ }
+ }
+
+ /* Check which DMA just started */
+ irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
+ if (irqst3) {
+ /* Clear Interrupt status */
+ reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
+
+ /* Need to check both port */
+ if (isportirqset(irqst3, 1)) {
+ ctx = dev->ctx[0];
+ dma_q = &ctx->vidq;
+
+ spin_lock(&ctx->slock);
+ if (!list_empty(&dma_q->active) &&
+ ctx->cur_frm == ctx->next_frm)
+ cal_schedule_next_buffer(ctx);
+ spin_unlock(&ctx->slock);
+ }
+
+ if (isportirqset(irqst3, 2)) {
+ ctx = dev->ctx[1];
+ dma_q = &ctx->vidq;
+
+ spin_lock(&ctx->slock);
+ if (!list_empty(&dma_q->active) &&
+ ctx->cur_frm == ctx->next_frm)
+ cal_schedule_next_buffer(ctx);
+ spin_unlock(&ctx->slock);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int cal_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ strlcpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
+
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", ctx->v4l2_dev.name);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int cal_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_fmt *fmt = NULL;
+
+ if (f->index >= ctx->num_active_fmt)
+ return -EINVAL;
+
+ fmt = ctx->active_fmt[f->index];
+
+ f->pixelformat = fmt->fourcc;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ return 0;
+}
+
+static int __subdev_get_format(struct cal_ctx *ctx,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+ int ret;
+
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+
+ ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
+
+ *fmt = *mbus_fmt;
+
+ ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
+
+ return 0;
+}
+
+static int __subdev_set_format(struct cal_ctx *ctx,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+ int ret;
+
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ *mbus_fmt = *fmt;
+
+ ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
+
+ ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
+
+ return 0;
+}
+
+static int cal_calc_format_size(struct cal_ctx *ctx,
+ const struct cal_fmt *fmt,
+ struct v4l2_format *f)
+{
+ if (!fmt) {
+ ctx_dbg(3, ctx, "No cal_fmt provided!\n");
+ return -EINVAL;
+ }
+
+ v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
+ &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
+ f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
+ fmt->depth >> 3);
+ f->fmt.pix.sizeimage = f->fmt.pix.height *
+ f->fmt.pix.bytesperline;
+
+ ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
+ __func__, fourcc_to_str(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+
+ return 0;
+}
+
+static int cal_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ *f = ctx->v_fmt;
+
+ return 0;
+}
+
+static int cal_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret, found;
+
+ fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+ if (!fmt) {
+ ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
+ f->fmt.pix.pixelformat);
+
+ /* Just get the first one enumerated */
+ fmt = ctx->active_fmt[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
+
+ /* check for/find a valid width/height */
+ ret = 0;
+ found = false;
+ fse.pad = 0;
+ fse.code = fmt->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ for (fse.index = 0; ; fse.index++) {
+ ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ break;
+
+ if ((f->fmt.pix.width == fse.max_width) &&
+ (f->fmt.pix.height == fse.max_height)) {
+ found = true;
+ break;
+ } else if ((f->fmt.pix.width >= fse.min_width) &&
+ (f->fmt.pix.width <= fse.max_width) &&
+ (f->fmt.pix.height >= fse.min_height) &&
+ (f->fmt.pix.height <= fse.max_height)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* use existing values as default */
+ f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
+ f->fmt.pix.height = ctx->v_fmt.fmt.pix.height;
+ }
+
+ /*
+ * Use current colorspace for now, it will get
+ * updated properly during s_fmt
+ */
+ f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
+ return cal_calc_format_size(ctx, fmt, f);
+}
+
+static int cal_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ struct vb2_queue *q = &ctx->vb_vidq;
+ const struct cal_fmt *fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ if (vb2_is_busy(q)) {
+ ctx_dbg(3, ctx, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = cal_try_fmt_vid_cap(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+
+ v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
+
+ ret = __subdev_set_format(ctx, &mbus_fmt);
+ if (ret)
+ return ret;
+
+ /* Just double check nothing has gone wrong */
+ if (mbus_fmt.code != fmt->code) {
+ ctx_dbg(3, ctx,
+ "%s subdev changed format on us, this should not happen\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+ ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
+ cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
+ ctx->fmt = fmt;
+ ctx->m_fmt = mbus_fmt;
+ *f = ctx->v_fmt;
+
+ return 0;
+}
+
+static int cal_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret;
+
+ /* check for valid format */
+ fmt = find_format_by_pix(ctx, fsize->pixel_format);
+ if (!fmt) {
+ ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ fse.index = fsize->index;
+ fse.pad = 0;
+ fse.code = fmt->code;
+
+ ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int cal_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index >= CAL_NUM_INPUT)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ sprintf(inp->name, "Camera %u", inp->index);
+ return 0;
+}
+
+static int cal_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ *i = ctx->input;
+ return 0;
+}
+
+static int cal_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ if (i >= CAL_NUM_INPUT)
+ return -EINVAL;
+
+ ctx->input = i;
+ return 0;
+}
+
+/* timeperframe is arbitrary and continuous */
+static int cal_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_fmt *fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ fmt = find_format_by_pix(ctx, fival->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fie.code = fmt->code;
+ ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_interval,
+ NULL, &fie);
+ if (ret)
+ return ret;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+/*
+ * Videobuf operations
+ */
+static int cal_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
+
+ return 0;
+}
+
+static int cal_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+ vb.vb2_buf);
+ unsigned long size;
+
+ if (WARN_ON(!ctx->fmt))
+ return -EINVAL;
+
+ size = ctx->v_fmt.fmt.pix.sizeimage;
+ if (vb2_plane_size(vb, 0) < size) {
+ ctx_err(ctx,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+ return 0;
+}
+
+static void cal_buffer_queue(struct vb2_buffer *vb)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+ vb.vb2_buf);
+ struct cal_dmaqueue *vidq = &ctx->vidq;
+ unsigned long flags = 0;
+
+ /* recheck locking */
+ spin_lock_irqsave(&ctx->slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cal_dmaqueue *dma_q = &ctx->vidq;
+ struct cal_buffer *buf, *tmp;
+ unsigned long addr = 0;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctx->slock, flags);
+ if (list_empty(&dma_q->active)) {
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ ctx_dbg(3, ctx, "buffer queue is empty\n");
+ return -EIO;
+ }
+
+ buf = list_entry(dma_q->active.next, struct cal_buffer, list);
+ ctx->cur_frm = buf;
+ ctx->next_frm = buf;
+ list_del(&buf->list);
+ spin_unlock_irqrestore(&ctx->slock, flags);
+
+ addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
+ ctx->sequence = 0;
+
+ ret = cal_get_external_info(ctx);
+ if (ret < 0)
+ goto err;
+
+ cal_runtime_get(ctx->dev);
+
+ enable_irqs(ctx);
+ camerarx_phy_enable(ctx);
+ csi2_init(ctx);
+ csi2_phy_config(ctx);
+ csi2_lane_config(ctx);
+ csi2_ctx_config(ctx);
+ pix_proc_config(ctx);
+ cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
+ ctx->v_fmt.fmt.pix.height);
+ cal_wr_dma_addr(ctx, addr);
+ csi2_ppi_enable(ctx);
+
+ ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
+ if (ret) {
+ ctx_err(ctx, "stream on failed in subdev\n");
+ cal_runtime_put(ctx->dev);
+ goto err;
+ }
+
+ if (debug >= 4)
+ cal_quickdump_regs(ctx->dev);
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ return ret;
+}
+
+static void cal_stop_streaming(struct vb2_queue *vq)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cal_dmaqueue *dma_q = &ctx->vidq;
+ struct cal_buffer *buf, *tmp;
+ unsigned long flags;
+
+ if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
+ ctx_err(ctx, "stream off failed in subdev\n");
+
+ csi2_ppi_disable(ctx);
+ disable_irqs(ctx);
+
+ /* Release all active buffers */
+ spin_lock_irqsave(&ctx->slock, flags);
+ list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ if (ctx->cur_frm == ctx->next_frm) {
+ vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ ctx->cur_frm = NULL;
+ ctx->next_frm = NULL;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+
+ cal_runtime_put(ctx->dev);
+}
+
+static const struct vb2_ops cal_video_qops = {
+ .queue_setup = cal_queue_setup,
+ .buf_prepare = cal_buffer_prepare,
+ .buf_queue = cal_buffer_queue,
+ .start_streaming = cal_start_streaming,
+ .stop_streaming = cal_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static const struct v4l2_file_operations cal_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops cal_ioctl_ops = {
+ .vidioc_querycap = cal_querycap,
+ .vidioc_enum_fmt_vid_cap = cal_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cal_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cal_s_fmt_vid_cap,
+ .vidioc_enum_framesizes = cal_enum_framesizes,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_enum_input = cal_enum_input,
+ .vidioc_g_input = cal_g_input,
+ .vidioc_s_input = cal_s_input,
+ .vidioc_enum_frameintervals = cal_enum_frameintervals,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct video_device cal_videodev = {
+ .name = CAL_MODULE_NAME,
+ .fops = &cal_fops,
+ .ioctl_ops = &cal_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+/* -----------------------------------------------------------------
+ * Initialization and module stuff
+ * ------------------------------------------------------------------
+ */
+static int cal_complete_ctx(struct cal_ctx *ctx);
+
+static int cal_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct cal_ctx *ctx = notifier_to_ctx(notifier);
+ struct v4l2_subdev_mbus_code_enum mbus_code;
+ int ret = 0;
+ int i, j, k;
+
+ if (ctx->sensor) {
+ ctx_info(ctx, "Rejecting subdev %s (Already set!!)",
+ subdev->name);
+ return 0;
+ }
+
+ ctx->sensor = subdev;
+ ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name);
+
+ /* Enumerate sub device formats and enable all matching local formats */
+ ctx->num_active_fmt = 0;
+ for (j = 0, i = 0; ret != -EINVAL; ++j) {
+ struct cal_fmt *fmt;
+
+ memset(&mbus_code, 0, sizeof(mbus_code));
+ mbus_code.index = j;
+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code);
+ if (ret)
+ continue;
+
+ ctx_dbg(2, ctx,
+ "subdev %s: code: %04x idx: %d\n",
+ subdev->name, mbus_code.code, j);
+
+ for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
+ fmt = &cal_formats[k];
+
+ if (mbus_code.code == fmt->code) {
+ ctx->active_fmt[i] = fmt;
+ ctx_dbg(2, ctx,
+ "matched fourcc: %s: code: %04x idx: %d\n",
+ fourcc_to_str(fmt->fourcc),
+ fmt->code, i);
+ ctx->num_active_fmt = ++i;
+ }
+ }
+ }
+
+ if (i == 0) {
+ ctx_err(ctx, "No suitable format reported by subdev %s\n",
+ subdev->name);
+ return -EINVAL;
+ }
+
+ cal_complete_ctx(ctx);
+
+ return 0;
+}
+
+static int cal_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct cal_ctx *ctx = notifier_to_ctx(notifier);
+ const struct cal_fmt *fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ ret = __subdev_get_format(ctx, &mbus_fmt);
+ if (ret)
+ return ret;
+
+ fmt = find_format_by_code(ctx, mbus_fmt.code);
+ if (!fmt) {
+ ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
+ mbus_fmt.code);
+ return -EINVAL;
+ }
+
+ /* Save current subdev format */
+ v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+ ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
+ cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
+ ctx->fmt = fmt;
+ ctx->m_fmt = mbus_fmt;
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations cal_async_ops = {
+ .bound = cal_async_bound,
+ .complete = cal_async_complete,
+};
+
+static int cal_complete_ctx(struct cal_ctx *ctx)
+{
+ struct video_device *vfd;
+ struct vb2_queue *q;
+ int ret;
+
+ ctx->timeperframe = tpf_default;
+ ctx->external_rate = 192000000;
+
+ /* initialize locks */
+ spin_lock_init(&ctx->slock);
+ mutex_init(&ctx->mutex);
+
+ /* initialize queue */
+ q = &ctx->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = ctx;
+ q->buf_struct_size = sizeof(struct cal_buffer);
+ q->ops = &cal_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ctx->mutex;
+ q->min_buffers_needed = 3;
+ q->dev = ctx->v4l2_dev.dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&ctx->vidq.active);
+
+ vfd = &ctx->vdev;
+ *vfd = cal_videodev;
+ vfd->v4l2_dev = &ctx->v4l2_dev;
+ vfd->queue = q;
+
+ /*
+ * Provide a mutex to v4l2 core. It will be used to protect
+ * all fops and v4l2 ioctls.
+ */
+ vfd->lock = &ctx->mutex;
+ video_set_drvdata(vfd, ctx);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
+ if (ret < 0)
+ return ret;
+
+ v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
+ video_device_node_name(vfd));
+
+ return 0;
+}
+
+static struct device_node *
+of_get_next_port(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *port = NULL;
+
+ if (!parent)
+ return NULL;
+
+ if (!prev) {
+ struct device_node *ports;
+ /*
+ * It's the first call, we have to find a port subnode
+ * within this node or within an optional 'ports' node.
+ */
+ ports = of_get_child_by_name(parent, "ports");
+ if (ports)
+ parent = ports;
+
+ port = of_get_child_by_name(parent, "port");
+
+ /* release the 'ports' node */
+ of_node_put(ports);
+ } else {
+ struct device_node *ports;
+
+ ports = of_get_parent(prev);
+ if (!ports)
+ return NULL;
+
+ do {
+ port = of_get_next_child(ports, prev);
+ if (!port) {
+ of_node_put(ports);
+ return NULL;
+ }
+ prev = port;
+ } while (of_node_cmp(port->name, "port") != 0);
+ }
+
+ return port;
+}
+
+static struct device_node *
+of_get_next_endpoint(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *ep = NULL;
+
+ if (!parent)
+ return NULL;
+
+ do {
+ ep = of_get_next_child(parent, prev);
+ if (!ep)
+ return NULL;
+ prev = ep;
+ } while (of_node_cmp(ep->name, "endpoint") != 0);
+
+ return ep;
+}
+
+static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
+{
+ struct platform_device *pdev = ctx->dev->pdev;
+ struct device_node *ep_node, *port, *remote_ep,
+ *sensor_node, *parent;
+ struct v4l2_fwnode_endpoint *endpoint;
+ struct v4l2_async_subdev *asd;
+ u32 regval = 0;
+ int ret, index, found_port = 0, lane;
+
+ parent = pdev->dev.of_node;
+
+ asd = &ctx->asd;
+ endpoint = &ctx->endpoint;
+
+ ep_node = NULL;
+ port = NULL;
+ remote_ep = NULL;
+ sensor_node = NULL;
+ ret = -EINVAL;
+
+ ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst);
+ for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) {
+ port = of_get_next_port(parent, port);
+ if (!port) {
+ ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n",
+ index);
+ goto cleanup_exit;
+ }
+
+ /* Match the slice number with <REG> */
+ of_property_read_u32(port, "reg", &regval);
+ ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n",
+ index, inst, regval);
+ if ((regval == inst) && (index == inst)) {
+ found_port = 1;
+ break;
+ }
+ }
+
+ if (!found_port) {
+ ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n",
+ inst);
+ goto cleanup_exit;
+ }
+
+ ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n",
+ inst);
+
+ ep_node = of_get_next_endpoint(port, ep_node);
+ if (!ep_node) {
+ ctx_dbg(3, ctx, "can't get next endpoint\n");
+ goto cleanup_exit;
+ }
+
+ sensor_node = of_graph_get_remote_port_parent(ep_node);
+ if (!sensor_node) {
+ ctx_dbg(3, ctx, "can't get remote parent\n");
+ goto cleanup_exit;
+ }
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ asd->match.fwnode = of_fwnode_handle(sensor_node);
+
+ remote_ep = of_graph_get_remote_endpoint(ep_node);
+ if (!remote_ep) {
+ ctx_dbg(3, ctx, "can't get remote-endpoint\n");
+ goto cleanup_exit;
+ }
+ v4l2_fwnode_endpoint_parse(of_fwnode_handle(remote_ep), endpoint);
+
+ if (endpoint->bus_type != V4L2_MBUS_CSI2) {
+ ctx_err(ctx, "Port:%d sub-device %s is not a CSI2 device\n",
+ inst, sensor_node->name);
+ goto cleanup_exit;
+ }
+
+ /* Store Virtual Channel number */
+ ctx->virtual_channel = endpoint->base.id;
+
+ ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst);
+ ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel);
+ ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags);
+ ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane);
+ ctx_dbg(3, ctx, "num_data_lanes=%d\n",
+ endpoint->bus.mipi_csi2.num_data_lanes);
+ ctx_dbg(3, ctx, "data_lanes= <\n");
+ for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++)
+ ctx_dbg(3, ctx, "\t%d\n",
+ endpoint->bus.mipi_csi2.data_lanes[lane]);
+ ctx_dbg(3, ctx, "\t>\n");
+
+ ctx_dbg(1, ctx, "Port: %d found sub-device %s\n",
+ inst, sensor_node->name);
+
+ ctx->asd_list[0] = asd;
+ ctx->notifier.subdevs = ctx->asd_list;
+ ctx->notifier.num_subdevs = 1;
+ ctx->notifier.ops = &cal_async_ops;
+ ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
+ &ctx->notifier);
+ if (ret) {
+ ctx_err(ctx, "Error registering async notifier\n");
+ ret = -EINVAL;
+ }
+
+cleanup_exit:
+ if (remote_ep)
+ of_node_put(remote_ep);
+ if (sensor_node)
+ of_node_put(sensor_node);
+ if (ep_node)
+ of_node_put(ep_node);
+ if (port)
+ of_node_put(port);
+
+ return ret;
+}
+
+static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
+{
+ struct cal_ctx *ctx;
+ struct v4l2_ctrl_handler *hdl;
+ int ret;
+
+ ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ /* save the cal_dev * for future ref */
+ ctx->dev = dev;
+
+ snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name),
+ "%s-%03d", CAL_MODULE_NAME, inst);
+ ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev);
+ if (ret)
+ goto err_exit;
+
+ hdl = &ctx->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(hdl, 11);
+ if (ret) {
+ ctx_err(ctx, "Failed to init ctrl handler\n");
+ goto unreg_dev;
+ }
+ ctx->v4l2_dev.ctrl_handler = hdl;
+
+ /* Make sure Camera Core H/W register area is available */
+ ctx->cc = dev->cc[inst];
+
+ /* Store the instance id */
+ ctx->csi2_port = inst + 1;
+
+ ret = of_cal_create_instance(ctx, inst);
+ if (ret) {
+ ret = -EINVAL;
+ goto free_hdl;
+ }
+ return ctx;
+
+free_hdl:
+ v4l2_ctrl_handler_free(hdl);
+unreg_dev:
+ v4l2_device_unregister(&ctx->v4l2_dev);
+err_exit:
+ return NULL;
+}
+
+static int cal_probe(struct platform_device *pdev)
+{
+ struct cal_dev *dev;
+ int ret;
+ int irq;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ /* set pseudo v4l2 device name so we can use v4l2_printk */
+ strlcpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
+ sizeof(dev->v4l2_dev.name));
+
+ /* save pdev pointer */
+ dev->pdev = pdev;
+
+ dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cal_top");
+ dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
+
+ cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+ dev->res->name, &dev->res->start, &dev->res->end);
+
+ irq = platform_get_irq(pdev, 0);
+ cal_dbg(1, dev, "got irq# %d\n", irq);
+ ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
+ dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->cm = cm_create(dev);
+ if (IS_ERR(dev->cm))
+ return PTR_ERR(dev->cm);
+
+ dev->cc[0] = cc_create(dev, 0);
+ if (IS_ERR(dev->cc[0]))
+ return PTR_ERR(dev->cc[0]);
+
+ dev->cc[1] = cc_create(dev, 1);
+ if (IS_ERR(dev->cc[1]))
+ return PTR_ERR(dev->cc[1]);
+
+ dev->ctx[0] = NULL;
+ dev->ctx[1] = NULL;
+
+ dev->ctx[0] = cal_create_instance(dev, 0);
+ dev->ctx[1] = cal_create_instance(dev, 1);
+ if (!dev->ctx[0] && !dev->ctx[1]) {
+ cal_err(dev, "Neither port is configured, no point in staying up\n");
+ return -ENODEV;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = cal_runtime_get(dev);
+ if (ret)
+ goto runtime_disable;
+
+ /* Just check we can actually access the module */
+ cal_get_hwinfo(dev);
+
+ cal_runtime_put(dev);
+
+ return 0;
+
+runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int cal_remove(struct platform_device *pdev)
+{
+ struct cal_dev *dev =
+ (struct cal_dev *)platform_get_drvdata(pdev);
+ struct cal_ctx *ctx;
+ int i;
+
+ cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
+
+ cal_runtime_get(dev);
+
+ for (i = 0; i < CAL_NUM_CONTEXT; i++) {
+ ctx = dev->ctx[i];
+ if (ctx) {
+ ctx_dbg(1, ctx, "unregistering %s\n",
+ video_device_node_name(&ctx->vdev));
+ camerarx_phy_disable(ctx);
+ v4l2_async_notifier_unregister(&ctx->notifier);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_device_unregister(&ctx->v4l2_dev);
+ video_unregister_device(&ctx->vdev);
+ }
+ }
+
+ cal_runtime_put(dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id cal_of_match[] = {
+ { .compatible = "ti,dra72-cal", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cal_of_match);
+#endif
+
+static struct platform_driver cal_pdrv = {
+ .probe = cal_probe,
+ .remove = cal_remove,
+ .driver = {
+ .name = CAL_MODULE_NAME,
+ .of_match_table = of_match_ptr(cal_of_match),
+ },
+};
+
+module_platform_driver(cal_pdrv);
diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti-vpe/cal_regs.h
new file mode 100644
index 000000000..82b3dcf87
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/cal_regs.h
@@ -0,0 +1,479 @@
+/*
+ * TI CAL camera interface driver
+ *
+ * Copyright (c) 2015 Texas Instruments Inc.
+ *
+ * Benoit Parrot, <bparrot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_CAL_REGS_H
+#define __TI_CAL_REGS_H
+
+#define CAL_NUM_CSI2_PORTS 2
+
+/* CAL register offsets */
+
+#define CAL_HL_REVISION 0x0000
+#define CAL_HL_HWINFO 0x0004
+#define CAL_HL_SYSCONFIG 0x0010
+#define CAL_HL_IRQ_EOI 0x001c
+#define CAL_HL_IRQSTATUS_RAW(m) (0x20U + ((m-1) * 0x10U))
+#define CAL_HL_IRQSTATUS(m) (0x24U + ((m-1) * 0x10U))
+#define CAL_HL_IRQENABLE_SET(m) (0x28U + ((m-1) * 0x10U))
+#define CAL_HL_IRQENABLE_CLR(m) (0x2cU + ((m-1) * 0x10U))
+#define CAL_PIX_PROC(m) (0xc0U + ((m-1) * 0x4U))
+#define CAL_CTRL 0x100
+#define CAL_CTRL1 0x104
+#define CAL_LINE_NUMBER_EVT 0x108
+#define CAL_VPORT_CTRL1 0x120
+#define CAL_VPORT_CTRL2 0x124
+#define CAL_BYS_CTRL1 0x130
+#define CAL_BYS_CTRL2 0x134
+#define CAL_RD_DMA_CTRL 0x140
+#define CAL_RD_DMA_PIX_ADDR 0x144
+#define CAL_RD_DMA_PIX_OFST 0x148
+#define CAL_RD_DMA_XSIZE 0x14c
+#define CAL_RD_DMA_YSIZE 0x150
+#define CAL_RD_DMA_INIT_ADDR 0x154
+#define CAL_RD_DMA_INIT_OFST 0x168
+#define CAL_RD_DMA_CTRL2 0x16c
+#define CAL_WR_DMA_CTRL(m) (0x200U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_ADDR(m) (0x204U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_OFST(m) (0x208U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_XSIZE(m) (0x20cU + ((m-1) * 0x10U))
+#define CAL_CSI2_PPI_CTRL(m) (0x300U + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_CFG(m) (0x304U + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_IRQSTATUS(m) (0x308U + ((m-1) * 0x80U))
+#define CAL_CSI2_SHORT_PACKET(m) (0x30cU + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_IRQENABLE(m) (0x310U + ((m-1) * 0x80U))
+#define CAL_CSI2_TIMING(m) (0x314U + ((m-1) * 0x80U))
+#define CAL_CSI2_VC_IRQENABLE(m) (0x318U + ((m-1) * 0x80U))
+#define CAL_CSI2_VC_IRQSTATUS(m) (0x328U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX0(m) (0x330U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX1(m) (0x334U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX2(m) (0x338U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX3(m) (0x33cU + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX4(m) (0x340U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX5(m) (0x344U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX6(m) (0x348U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX7(m) (0x34cU + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS0(m) (0x350U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS1(m) (0x354U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS2(m) (0x358U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS3(m) (0x35cU + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS4(m) (0x360U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS5(m) (0x364U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS6(m) (0x368U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS7(m) (0x36cU + ((m-1) * 0x80U))
+
+/* CAL CSI2 PHY register offsets */
+#define CAL_CSI2_PHY_REG0 0x000
+#define CAL_CSI2_PHY_REG1 0x004
+#define CAL_CSI2_PHY_REG2 0x008
+
+/* CAL Control Module Core Camerrx Control register offsets */
+#define CM_CTRL_CORE_CAMERRX_CONTROL 0x000
+
+/*********************************************************************
+* Generic value used in various field below
+*********************************************************************/
+
+#define CAL_GEN_DISABLE 0
+#define CAL_GEN_ENABLE 1
+#define CAL_GEN_FALSE 0
+#define CAL_GEN_TRUE 1
+
+/*********************************************************************
+* Field Definition Macros
+*********************************************************************/
+
+#define CAL_HL_REVISION_MINOR_MASK GENMASK(5, 0)
+#define CAL_HL_REVISION_CUSTOM_MASK GENMASK(7, 6)
+#define CAL_HL_REVISION_MAJOR_MASK GENMASK(10, 8)
+#define CAL_HL_REVISION_RTL_MASK GENMASK(15, 11)
+#define CAL_HL_REVISION_FUNC_MASK GENMASK(27, 16)
+#define CAL_HL_REVISION_SCHEME_MASK GENMASK(31, 30)
+#define CAL_HL_REVISION_SCHEME_H08 1
+#define CAL_HL_REVISION_SCHEME_LEGACY 0
+
+#define CAL_HL_HWINFO_WFIFO_MASK GENMASK(3, 0)
+#define CAL_HL_HWINFO_RFIFO_MASK GENMASK(7, 4)
+#define CAL_HL_HWINFO_PCTX_MASK GENMASK(12, 8)
+#define CAL_HL_HWINFO_WCTX_MASK GENMASK(18, 13)
+#define CAL_HL_HWINFO_VFIFO_MASK GENMASK(22, 19)
+#define CAL_HL_HWINFO_NCPORT_MASK GENMASK(27, 23)
+#define CAL_HL_HWINFO_NPPI_CTXS0_MASK GENMASK(29, 28)
+#define CAL_HL_HWINFO_NPPI_CTXS1_MASK GENMASK(31, 30)
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_ZERO 0
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_FOUR 1
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_EIGHT 2
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_RESERVED 3
+
+#define CAL_HL_SYSCONFIG_SOFTRESET_MASK BIT_MASK(0)
+#define CAL_HL_SYSCONFIG_SOFTRESET_DONE 0x0
+#define CAL_HL_SYSCONFIG_SOFTRESET_PENDING 0x1
+#define CAL_HL_SYSCONFIG_SOFTRESET_NOACTION 0x0
+#define CAL_HL_SYSCONFIG_SOFTRESET_RESET 0x1
+#define CAL_HL_SYSCONFIG_IDLE_MASK GENMASK(3, 2)
+#define CAL_HL_SYSCONFIG_IDLEMODE_FORCE 0
+#define CAL_HL_SYSCONFIG_IDLEMODE_NO 1
+#define CAL_HL_SYSCONFIG_IDLEMODE_SMART1 2
+#define CAL_HL_SYSCONFIG_IDLEMODE_SMART2 3
+
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK BIT_MASK(0)
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0 0
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0 0
+
+#define CAL_HL_IRQ_MASK(m) BIT_MASK(m-1)
+#define CAL_HL_IRQ_NOACTION 0x0
+#define CAL_HL_IRQ_ENABLE 0x1
+#define CAL_HL_IRQ_CLEAR 0x1
+#define CAL_HL_IRQ_DISABLED 0x0
+#define CAL_HL_IRQ_ENABLED 0x1
+#define CAL_HL_IRQ_PENDING 0x1
+
+#define CAL_PIX_PROC_EN_MASK BIT_MASK(0)
+#define CAL_PIX_PROC_EXTRACT_MASK GENMASK(4, 1)
+#define CAL_PIX_PROC_EXTRACT_B6 0x0
+#define CAL_PIX_PROC_EXTRACT_B7 0x1
+#define CAL_PIX_PROC_EXTRACT_B8 0x2
+#define CAL_PIX_PROC_EXTRACT_B10 0x3
+#define CAL_PIX_PROC_EXTRACT_B10_MIPI 0x4
+#define CAL_PIX_PROC_EXTRACT_B12 0x5
+#define CAL_PIX_PROC_EXTRACT_B12_MIPI 0x6
+#define CAL_PIX_PROC_EXTRACT_B14 0x7
+#define CAL_PIX_PROC_EXTRACT_B14_MIPI 0x8
+#define CAL_PIX_PROC_EXTRACT_B16_BE 0x9
+#define CAL_PIX_PROC_EXTRACT_B16_LE 0xa
+#define CAL_PIX_PROC_DPCMD_MASK GENMASK(9, 5)
+#define CAL_PIX_PROC_DPCMD_BYPASS 0x0
+#define CAL_PIX_PROC_DPCMD_DPCM_10_8_1 0x2
+#define CAL_PIX_PROC_DPCMD_DPCM_12_8_1 0x8
+#define CAL_PIX_PROC_DPCMD_DPCM_10_7_1 0x4
+#define CAL_PIX_PROC_DPCMD_DPCM_10_7_2 0x5
+#define CAL_PIX_PROC_DPCMD_DPCM_10_6_1 0x6
+#define CAL_PIX_PROC_DPCMD_DPCM_10_6_2 0x7
+#define CAL_PIX_PROC_DPCMD_DPCM_12_7_1 0xa
+#define CAL_PIX_PROC_DPCMD_DPCM_12_6_1 0xc
+#define CAL_PIX_PROC_DPCMD_DPCM_14_10 0xe
+#define CAL_PIX_PROC_DPCMD_DPCM_14_8_1 0x10
+#define CAL_PIX_PROC_DPCMD_DPCM_16_12_1 0x12
+#define CAL_PIX_PROC_DPCMD_DPCM_16_10_1 0x14
+#define CAL_PIX_PROC_DPCMD_DPCM_16_8_1 0x16
+#define CAL_PIX_PROC_DPCME_MASK GENMASK(15, 11)
+#define CAL_PIX_PROC_DPCME_BYPASS 0x0
+#define CAL_PIX_PROC_DPCME_DPCM_10_8_1 0x2
+#define CAL_PIX_PROC_DPCME_DPCM_12_8_1 0x8
+#define CAL_PIX_PROC_DPCME_DPCM_14_10 0xe
+#define CAL_PIX_PROC_DPCME_DPCM_14_8_1 0x10
+#define CAL_PIX_PROC_DPCME_DPCM_16_12_1 0x12
+#define CAL_PIX_PROC_DPCME_DPCM_16_10_1 0x14
+#define CAL_PIX_PROC_DPCME_DPCM_16_8_1 0x16
+#define CAL_PIX_PROC_PACK_MASK GENMASK(18, 16)
+#define CAL_PIX_PROC_PACK_B8 0x0
+#define CAL_PIX_PROC_PACK_B10_MIPI 0x2
+#define CAL_PIX_PROC_PACK_B12 0x3
+#define CAL_PIX_PROC_PACK_B12_MIPI 0x4
+#define CAL_PIX_PROC_PACK_B16 0x5
+#define CAL_PIX_PROC_PACK_ARGB 0x6
+#define CAL_PIX_PROC_CPORT_MASK GENMASK(23, 19)
+
+#define CAL_CTRL_POSTED_WRITES_MASK BIT_MASK(0)
+#define CAL_CTRL_POSTED_WRITES_NONPOSTED 0
+#define CAL_CTRL_POSTED_WRITES 1
+#define CAL_CTRL_TAGCNT_MASK GENMASK(4, 1)
+#define CAL_CTRL_BURSTSIZE_MASK GENMASK(6, 5)
+#define CAL_CTRL_BURSTSIZE_BURST16 0x0
+#define CAL_CTRL_BURSTSIZE_BURST32 0x1
+#define CAL_CTRL_BURSTSIZE_BURST64 0x2
+#define CAL_CTRL_BURSTSIZE_BURST128 0x3
+#define CAL_CTRL_LL_FORCE_STATE_MASK GENMASK(12, 7)
+#define CAL_CTRL_MFLAGL_MASK GENMASK(20, 13)
+#define CAL_CTRL_PWRSCPCLK_MASK BIT_MASK(21)
+#define CAL_CTRL_PWRSCPCLK_AUTO 0
+#define CAL_CTRL_PWRSCPCLK_FORCE 1
+#define CAL_CTRL_RD_DMA_STALL_MASK BIT_MASK(22)
+#define CAL_CTRL_MFLAGH_MASK GENMASK(31, 24)
+
+#define CAL_CTRL1_PPI_GROUPING_MASK GENMASK(1, 0)
+#define CAL_CTRL1_PPI_GROUPING_DISABLED 0
+#define CAL_CTRL1_PPI_GROUPING_RESERVED 1
+#define CAL_CTRL1_PPI_GROUPING_0 2
+#define CAL_CTRL1_PPI_GROUPING_1 3
+#define CAL_CTRL1_INTERLEAVE01_MASK GENMASK(3, 2)
+#define CAL_CTRL1_INTERLEAVE01_DISABLED 0
+#define CAL_CTRL1_INTERLEAVE01_PIX1 1
+#define CAL_CTRL1_INTERLEAVE01_PIX4 2
+#define CAL_CTRL1_INTERLEAVE01_RESERVED 3
+#define CAL_CTRL1_INTERLEAVE23_MASK GENMASK(5, 4)
+#define CAL_CTRL1_INTERLEAVE23_DISABLED 0
+#define CAL_CTRL1_INTERLEAVE23_PIX1 1
+#define CAL_CTRL1_INTERLEAVE23_PIX4 2
+#define CAL_CTRL1_INTERLEAVE23_RESERVED 3
+
+#define CAL_LINE_NUMBER_EVT_CPORT_MASK GENMASK(4, 0)
+#define CAL_LINE_NUMBER_EVT_MASK GENMASK(29, 16)
+
+#define CAL_VPORT_CTRL1_PCLK_MASK GENMASK(16, 0)
+#define CAL_VPORT_CTRL1_XBLK_MASK GENMASK(24, 17)
+#define CAL_VPORT_CTRL1_YBLK_MASK GENMASK(30, 25)
+#define CAL_VPORT_CTRL1_WIDTH_MASK BIT_MASK(31)
+#define CAL_VPORT_CTRL1_WIDTH_ONE 0
+#define CAL_VPORT_CTRL1_WIDTH_TWO 1
+
+#define CAL_VPORT_CTRL2_CPORT_MASK GENMASK(4, 0)
+#define CAL_VPORT_CTRL2_FREERUNNING_MASK BIT_MASK(15)
+#define CAL_VPORT_CTRL2_FREERUNNING_GATED 0
+#define CAL_VPORT_CTRL2_FREERUNNING_FREE 1
+#define CAL_VPORT_CTRL2_FS_RESETS_MASK BIT_MASK(16)
+#define CAL_VPORT_CTRL2_FS_RESETS_NO 0
+#define CAL_VPORT_CTRL2_FS_RESETS_YES 1
+#define CAL_VPORT_CTRL2_FSM_RESET_MASK BIT_MASK(17)
+#define CAL_VPORT_CTRL2_FSM_RESET_NOEFFECT 0
+#define CAL_VPORT_CTRL2_FSM_RESET 1
+#define CAL_VPORT_CTRL2_RDY_THR_MASK GENMASK(31, 18)
+
+#define CAL_BYS_CTRL1_PCLK_MASK GENMASK(16, 0)
+#define CAL_BYS_CTRL1_XBLK_MASK GENMASK(24, 17)
+#define CAL_BYS_CTRL1_YBLK_MASK GENMASK(30, 25)
+#define CAL_BYS_CTRL1_BYSINEN_MASK BIT_MASK(31)
+
+#define CAL_BYS_CTRL2_CPORTIN_MASK GENMASK(4, 0)
+#define CAL_BYS_CTRL2_CPORTOUT_MASK GENMASK(9, 5)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK BIT_MASK(10)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_NO 0
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_YES 1
+#define CAL_BYS_CTRL2_FREERUNNING_MASK BIT_MASK(11)
+#define CAL_BYS_CTRL2_FREERUNNING_NO 0
+#define CAL_BYS_CTRL2_FREERUNNING_YES 1
+
+#define CAL_RD_DMA_CTRL_GO_MASK BIT_MASK(0)
+#define CAL_RD_DMA_CTRL_GO_DIS 0
+#define CAL_RD_DMA_CTRL_GO_EN 1
+#define CAL_RD_DMA_CTRL_GO_IDLE 0
+#define CAL_RD_DMA_CTRL_GO_BUSY 1
+#define CAL_RD_DMA_CTRL_INIT_MASK BIT_MASK(1)
+#define CAL_RD_DMA_CTRL_BW_LIMITER_MASK GENMASK(10, 2)
+#define CAL_RD_DMA_CTRL_OCP_TAG_CNT_MASK GENMASK(14, 11)
+#define CAL_RD_DMA_CTRL_PCLK_MASK GENMASK(31, 15)
+
+#define CAL_RD_DMA_PIX_ADDR_MASK GENMASK(31, 3)
+
+#define CAL_RD_DMA_PIX_OFST_MASK GENMASK(31, 4)
+
+#define CAL_RD_DMA_XSIZE_MASK GENMASK(31, 19)
+
+#define CAL_RD_DMA_YSIZE_MASK GENMASK(29, 16)
+
+#define CAL_RD_DMA_INIT_ADDR_MASK GENMASK(31, 3)
+
+#define CAL_RD_DMA_INIT_OFST_MASK GENMASK(31, 3)
+
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_MASK GENMASK(2, 0)
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_DIS 0
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_ONE 1
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_FOUR 2
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTEEN 3
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTYFOUR 4
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_RESERVED 5
+#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK BIT_MASK(3)
+#define CAL_RD_DMA_CTRL2_PATTERN_MASK GENMASK(5, 4)
+#define CAL_RD_DMA_CTRL2_PATTERN_LINEAR 0
+#define CAL_RD_DMA_CTRL2_PATTERN_YUV420 1
+#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP2 2
+#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP4 3
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK BIT_MASK(6)
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_FREERUNNING 0
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_WAITFORBYSOUT 1
+#define CAL_RD_DMA_CTRL2_CIRC_SIZE_MASK GENMASK(29, 16)
+
+#define CAL_WR_DMA_CTRL_MODE_MASK GENMASK(2, 0)
+#define CAL_WR_DMA_CTRL_MODE_DIS 0
+#define CAL_WR_DMA_CTRL_MODE_SHD 1
+#define CAL_WR_DMA_CTRL_MODE_CNT 2
+#define CAL_WR_DMA_CTRL_MODE_CNT_INIT 3
+#define CAL_WR_DMA_CTRL_MODE_CONST 4
+#define CAL_WR_DMA_CTRL_MODE_RESERVED 5
+#define CAL_WR_DMA_CTRL_PATTERN_MASK GENMASK(4, 3)
+#define CAL_WR_DMA_CTRL_PATTERN_LINEAR 0
+#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP2 2
+#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP4 3
+#define CAL_WR_DMA_CTRL_PATTERN_RESERVED 1
+#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK BIT_MASK(5)
+#define CAL_WR_DMA_CTRL_DTAG_MASK GENMASK(8, 6)
+#define CAL_WR_DMA_CTRL_DTAG_ATT_HDR 0
+#define CAL_WR_DMA_CTRL_DTAG_ATT_DAT 1
+#define CAL_WR_DMA_CTRL_DTAG 2
+#define CAL_WR_DMA_CTRL_DTAG_PIX_HDR 3
+#define CAL_WR_DMA_CTRL_DTAG_PIX_DAT 4
+#define CAL_WR_DMA_CTRL_DTAG_D5 5
+#define CAL_WR_DMA_CTRL_DTAG_D6 6
+#define CAL_WR_DMA_CTRL_DTAG_D7 7
+#define CAL_WR_DMA_CTRL_CPORT_MASK GENMASK(13, 9)
+#define CAL_WR_DMA_CTRL_STALL_RD_MASK BIT_MASK(14)
+#define CAL_WR_DMA_CTRL_YSIZE_MASK GENMASK(31, 18)
+
+#define CAL_WR_DMA_ADDR_MASK GENMASK(31, 4)
+
+#define CAL_WR_DMA_OFST_MASK GENMASK(18, 4)
+#define CAL_WR_DMA_OFST_CIRC_MODE_MASK GENMASK(23, 22)
+#define CAL_WR_DMA_OFST_CIRC_MODE_ONE 1
+#define CAL_WR_DMA_OFST_CIRC_MODE_FOUR 2
+#define CAL_WR_DMA_OFST_CIRC_MODE_SIXTYFOUR 3
+#define CAL_WR_DMA_OFST_CIRC_MODE_DISABLED 0
+#define CAL_WR_DMA_OFST_CIRC_SIZE_MASK GENMASK(31, 24)
+
+#define CAL_WR_DMA_XSIZE_XSKIP_MASK GENMASK(15, 3)
+#define CAL_WR_DMA_XSIZE_MASK GENMASK(31, 19)
+
+#define CAL_CSI2_PPI_CTRL_IF_EN_MASK BIT_MASK(0)
+#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK BIT_MASK(2)
+#define CAL_CSI2_PPI_CTRL_FRAME_MASK BIT_MASK(3)
+#define CAL_CSI2_PPI_CTRL_FRAME_IMMEDIATE 0
+#define CAL_CSI2_PPI_CTRL_FRAME 1
+
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK GENMASK(2, 0)
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_5 5
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_4 4
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_3 3
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_2 2
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_1 1
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_NOT_USED 0
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_CFG_POL_PLUSMINUS 0
+#define CAL_CSI2_COMPLEXIO_CFG_POL_MINUSPLUS 1
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POSITION_MASK GENMASK(6, 4)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POSITION_MASK GENMASK(10, 8)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POSITION_MASK GENMASK(14, 12)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POSITION_MASK GENMASK(18, 16)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK BIT_MASK(19)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK GENMASK(26, 25)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF 0
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON 1
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ULP 2
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK GENMASK(28, 27)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF 0
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON 1
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ULP 2
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK BIT_MASK(29)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED 1
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING 0
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK BIT_MASK(30)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL 0
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL 1
+
+#define CAL_CSI2_SHORT_PACKET_MASK GENMASK(23, 0)
+
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK BIT_MASK(0)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK BIT_MASK(1)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK BIT_MASK(2)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK BIT_MASK(4)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK BIT_MASK(5)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK BIT_MASK(6)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK BIT_MASK(8)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK BIT_MASK(9)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK BIT_MASK(10)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK BIT_MASK(12)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK BIT_MASK(13)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK BIT_MASK(14)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK BIT_MASK(16)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK BIT_MASK(17)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK BIT_MASK(18)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK BIT_MASK(19)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK BIT_MASK(20)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK BIT_MASK(21)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK BIT_MASK(22)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK BIT_MASK(23)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK BIT_MASK(25)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK BIT_MASK(26)
+#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK BIT_MASK(27)
+#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK BIT_MASK(28)
+#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK BIT_MASK(30)
+
+#define CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK GENMASK(12, 0)
+#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK BIT_MASK(13)
+#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK BIT_MASK(14)
+#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK BIT_MASK(15)
+
+#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK BIT_MASK(0)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK BIT_MASK(1)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK BIT_MASK(2)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK BIT_MASK(3)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK BIT_MASK(4)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK BIT_MASK(5)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK BIT_MASK(8)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK BIT_MASK(9)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK BIT_MASK(10)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK BIT_MASK(11)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK BIT_MASK(12)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK BIT_MASK(13)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK BIT_MASK(16)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK BIT_MASK(17)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK BIT_MASK(18)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK BIT_MASK(19)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK BIT_MASK(20)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK BIT_MASK(21)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK BIT_MASK(24)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK BIT_MASK(25)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK BIT_MASK(26)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK BIT_MASK(27)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK BIT_MASK(28)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK BIT_MASK(29)
+
+#define CAL_CSI2_CTX_DT_MASK GENMASK(5, 0)
+#define CAL_CSI2_CTX_VC_MASK GENMASK(7, 6)
+#define CAL_CSI2_CTX_CPORT_MASK GENMASK(12, 8)
+#define CAL_CSI2_CTX_ATT_MASK BIT_MASK(13)
+#define CAL_CSI2_CTX_ATT_PIX 0
+#define CAL_CSI2_CTX_ATT 1
+#define CAL_CSI2_CTX_PACK_MODE_MASK BIT_MASK(14)
+#define CAL_CSI2_CTX_PACK_MODE_LINE 0
+#define CAL_CSI2_CTX_PACK_MODE_FRAME 1
+#define CAL_CSI2_CTX_LINES_MASK GENMASK(29, 16)
+
+#define CAL_CSI2_STATUS_FRAME_MASK GENMASK(15, 0)
+
+#define CAL_CSI2_PHY_REG0_THS_SETTLE_MASK GENMASK(7, 0)
+#define CAL_CSI2_PHY_REG0_THS_TERM_MASK GENMASK(15, 8)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK BIT_MASK(24)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE 1
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_ENABLE 0
+
+#define CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK GENMASK(7, 0)
+#define CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK GENMASK(9, 8)
+#define CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK GENMASK(17, 10)
+#define CAL_CSI2_PHY_REG1_TCLK_TERM_MASK GENMASK(24, 18)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK BIT_MASK(25)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_ERROR 1
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SUCCESS 0
+#define CAL_CSI2_PHY_REG1_RESET_DONE_STATUS_MASK GENMASK(29, 28)
+
+#define CAL_CSI2_PHY_REG2_CCP2_SYNC_PATTERN_MASK GENMASK(23, 0)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK GENMASK(25, 24)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK GENMASK(27, 26)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK GENMASK(29, 28)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK GENMASK(31, 30)
+
+#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK BIT_MASK(0)
+#define CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK GENMASK(2, 1)
+#define CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK GENMASK(4, 3)
+#define CM_CAMERRX_CTRL_CSI1_MODE_MASK BIT_MASK(5)
+#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK BIT_MASK(10)
+#define CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK GENMASK(12, 11)
+#define CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK GENMASK(16, 13)
+#define CM_CAMERRX_CTRL_CSI0_MODE_MASK BIT_MASK(17)
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
new file mode 100644
index 000000000..44b8465cf
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -0,0 +1,204 @@
+/*
+ * Color space converter library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include "csc.h"
+
+/*
+ * 16 coefficients in the order:
+ * a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2
+ * (we may need to pass non-default values from user space later on, we might
+ * need to make the coefficient struct more easy to populate)
+ */
+struct colorspace_coeffs {
+ u16 sd[12];
+ u16 hd[12];
+};
+
+/* VIDEO_RANGE: limited range, GRAPHICS_RANGE: full range */
+#define CSC_COEFFS_VIDEO_RANGE_Y2R 0
+#define CSC_COEFFS_GRAPHICS_RANGE_Y2R 1
+#define CSC_COEFFS_VIDEO_RANGE_R2Y 2
+#define CSC_COEFFS_GRAPHICS_RANGE_R2Y 3
+
+/* default colorspace coefficients */
+static struct colorspace_coeffs colorspace_coeffs[4] = {
+ [CSC_COEFFS_VIDEO_RANGE_Y2R] = {
+ {
+ /* SDTV */
+ 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
+ 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+ },
+ {
+ /* HDTV */
+ 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
+ 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ },
+ },
+ [CSC_COEFFS_GRAPHICS_RANGE_Y2R] = {
+ {
+ /* SDTV */
+ 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
+ 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ },
+ {
+ /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ },
+ },
+ [CSC_COEFFS_VIDEO_RANGE_R2Y] = {
+ {
+ /* SDTV */
+ 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
+ 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
+ },
+ {
+ /* HDTV */
+ 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
+ 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
+ },
+ },
+ [CSC_COEFFS_GRAPHICS_RANGE_R2Y] = {
+ {
+ /* SDTV */
+ 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
+ 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
+ },
+ {
+ /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ },
+ },
+};
+
+void csc_dump_regs(struct csc_data *csc)
+{
+ struct device *dev = &csc->pdev->dev;
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
+ ioread32(csc->base + CSC_##r))
+
+ dev_dbg(dev, "CSC Registers @ %pa:\n", &csc->res->start);
+
+ DUMPREG(CSC00);
+ DUMPREG(CSC01);
+ DUMPREG(CSC02);
+ DUMPREG(CSC03);
+ DUMPREG(CSC04);
+ DUMPREG(CSC05);
+
+#undef DUMPREG
+}
+EXPORT_SYMBOL(csc_dump_regs);
+
+void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5)
+{
+ *csc_reg5 |= CSC_BYPASS;
+}
+EXPORT_SYMBOL(csc_set_coeff_bypass);
+
+/*
+ * set the color space converter coefficient shadow register values
+ */
+void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
+ enum v4l2_colorspace src_colorspace,
+ enum v4l2_colorspace dst_colorspace)
+{
+ u32 *csc_reg5 = csc_reg0 + 5;
+ u32 *shadow_csc = csc_reg0;
+ struct colorspace_coeffs *sd_hd_coeffs;
+ u16 *coeff, *end_coeff;
+ enum v4l2_colorspace yuv_colorspace;
+ int sel = 0;
+
+ /*
+ * support only graphics data range(full range) for now, a control ioctl
+ * would be nice here
+ */
+ /* Y2R */
+ if (dst_colorspace == V4L2_COLORSPACE_SRGB &&
+ (src_colorspace == V4L2_COLORSPACE_SMPTE170M ||
+ src_colorspace == V4L2_COLORSPACE_REC709)) {
+ /* Y2R */
+ sel = 1;
+ yuv_colorspace = src_colorspace;
+ } else if ((dst_colorspace == V4L2_COLORSPACE_SMPTE170M ||
+ dst_colorspace == V4L2_COLORSPACE_REC709) &&
+ src_colorspace == V4L2_COLORSPACE_SRGB) {
+ /* R2Y */
+ sel = 3;
+ yuv_colorspace = dst_colorspace;
+ } else {
+ *csc_reg5 |= CSC_BYPASS;
+ return;
+ }
+
+ sd_hd_coeffs = &colorspace_coeffs[sel];
+
+ /* select between SD or HD coefficients */
+ if (yuv_colorspace == V4L2_COLORSPACE_SMPTE170M)
+ coeff = sd_hd_coeffs->sd;
+ else
+ coeff = sd_hd_coeffs->hd;
+
+ end_coeff = coeff + 12;
+
+ for (; coeff < end_coeff; coeff += 2)
+ *shadow_csc++ = (*(coeff + 1) << 16) | *coeff;
+}
+EXPORT_SYMBOL(csc_set_coeff);
+
+struct csc_data *csc_create(struct platform_device *pdev, const char *res_name)
+{
+ struct csc_data *csc;
+
+ dev_dbg(&pdev->dev, "csc_create\n");
+
+ csc = devm_kzalloc(&pdev->dev, sizeof(*csc), GFP_KERNEL);
+ if (!csc) {
+ dev_err(&pdev->dev, "couldn't alloc csc_data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ csc->pdev = pdev;
+
+ csc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ res_name);
+ if (csc->res == NULL) {
+ dev_err(&pdev->dev, "missing '%s' platform resources data\n",
+ res_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ csc->base = devm_ioremap_resource(&pdev->dev, csc->res);
+ if (IS_ERR(csc->base)) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return ERR_CAST(csc->base);
+ }
+
+ return csc;
+}
+EXPORT_SYMBOL(csc_create);
+
+MODULE_DESCRIPTION("TI VIP/VPE Color Space Converter");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
new file mode 100644
index 000000000..024700b15
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef TI_CSC_H
+#define TI_CSC_H
+
+/* VPE color space converter regs */
+#define CSC_CSC00 0x00
+#define CSC_A0_MASK 0x1fff
+#define CSC_A0_SHIFT 0
+#define CSC_B0_MASK 0x1fff
+#define CSC_B0_SHIFT 16
+
+#define CSC_CSC01 0x04
+#define CSC_C0_MASK 0x1fff
+#define CSC_C0_SHIFT 0
+#define CSC_A1_MASK 0x1fff
+#define CSC_A1_SHIFT 16
+
+#define CSC_CSC02 0x08
+#define CSC_B1_MASK 0x1fff
+#define CSC_B1_SHIFT 0
+#define CSC_C1_MASK 0x1fff
+#define CSC_C1_SHIFT 16
+
+#define CSC_CSC03 0x0c
+#define CSC_A2_MASK 0x1fff
+#define CSC_A2_SHIFT 0
+#define CSC_B2_MASK 0x1fff
+#define CSC_B2_SHIFT 16
+
+#define CSC_CSC04 0x10
+#define CSC_C2_MASK 0x1fff
+#define CSC_C2_SHIFT 0
+#define CSC_D0_MASK 0x0fff
+#define CSC_D0_SHIFT 16
+
+#define CSC_CSC05 0x14
+#define CSC_D1_MASK 0x0fff
+#define CSC_D1_SHIFT 0
+#define CSC_D2_MASK 0x0fff
+#define CSC_D2_SHIFT 16
+
+#define CSC_BYPASS (1 << 28)
+
+struct csc_data {
+ void __iomem *base;
+ struct resource *res;
+
+ struct platform_device *pdev;
+};
+
+void csc_dump_regs(struct csc_data *csc);
+void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
+void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
+ enum v4l2_colorspace src_colorspace,
+ enum v4l2_colorspace dst_colorspace);
+struct csc_data *csc_create(struct platform_device *pdev, const char *res_name);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti-vpe/sc.c
new file mode 100644
index 000000000..e9273b713
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc.c
@@ -0,0 +1,311 @@
+/*
+ * Scaler library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "sc.h"
+#include "sc_coeff.h"
+
+void sc_dump_regs(struct sc_data *sc)
+{
+ struct device *dev = &sc->pdev->dev;
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
+ ioread32(sc->base + CFG_##r))
+
+ dev_dbg(dev, "SC Registers @ %pa:\n", &sc->res->start);
+
+ DUMPREG(SC0);
+ DUMPREG(SC1);
+ DUMPREG(SC2);
+ DUMPREG(SC3);
+ DUMPREG(SC4);
+ DUMPREG(SC5);
+ DUMPREG(SC6);
+ DUMPREG(SC8);
+ DUMPREG(SC9);
+ DUMPREG(SC10);
+ DUMPREG(SC11);
+ DUMPREG(SC12);
+ DUMPREG(SC13);
+ DUMPREG(SC17);
+ DUMPREG(SC18);
+ DUMPREG(SC19);
+ DUMPREG(SC20);
+ DUMPREG(SC21);
+ DUMPREG(SC22);
+ DUMPREG(SC23);
+ DUMPREG(SC24);
+ DUMPREG(SC25);
+
+#undef DUMPREG
+}
+EXPORT_SYMBOL(sc_dump_regs);
+
+/*
+ * set the horizontal scaler coefficients according to the ratio of output to
+ * input widths, after accounting for up to two levels of decimation
+ */
+void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
+ unsigned int dst_w)
+{
+ int sixteenths;
+ int idx;
+ int i, j;
+ u16 *coeff_h = addr;
+ const u16 *cp;
+
+ if (dst_w > src_w) {
+ idx = HS_UP_SCALE;
+ } else {
+ if ((dst_w << 1) < src_w)
+ dst_w <<= 1; /* first level decimation */
+ if ((dst_w << 1) < src_w)
+ dst_w <<= 1; /* second level decimation */
+
+ if (dst_w == src_w) {
+ idx = HS_LE_16_16_SCALE;
+ } else {
+ sixteenths = (dst_w << 4) / src_w;
+ if (sixteenths < 8)
+ sixteenths = 8;
+ idx = HS_LT_9_16_SCALE + sixteenths - 8;
+ }
+ }
+
+ cp = scaler_hs_coeffs[idx];
+
+ for (i = 0; i < SC_NUM_PHASES * 2; i++) {
+ for (j = 0; j < SC_H_NUM_TAPS; j++)
+ *coeff_h++ = *cp++;
+ /*
+ * for each phase, the scaler expects space for 8 coefficients
+ * in it's memory. For the horizontal scaler, we copy the first
+ * 7 coefficients and skip the last slot to move to the next
+ * row to hold coefficients for the next phase
+ */
+ coeff_h += SC_NUM_TAPS_MEM_ALIGN - SC_H_NUM_TAPS;
+ }
+
+ sc->load_coeff_h = true;
+}
+EXPORT_SYMBOL(sc_set_hs_coeffs);
+
+/*
+ * set the vertical scaler coefficients according to the ratio of output to
+ * input heights
+ */
+void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
+ unsigned int dst_h)
+{
+ int sixteenths;
+ int idx;
+ int i, j;
+ u16 *coeff_v = addr;
+ const u16 *cp;
+
+ if (dst_h > src_h) {
+ idx = VS_UP_SCALE;
+ } else if (dst_h == src_h) {
+ idx = VS_1_TO_1_SCALE;
+ } else {
+ sixteenths = (dst_h << 4) / src_h;
+ if (sixteenths < 8)
+ sixteenths = 8;
+ idx = VS_LT_9_16_SCALE + sixteenths - 8;
+ }
+
+ cp = scaler_vs_coeffs[idx];
+
+ for (i = 0; i < SC_NUM_PHASES * 2; i++) {
+ for (j = 0; j < SC_V_NUM_TAPS; j++)
+ *coeff_v++ = *cp++;
+ /*
+ * for the vertical scaler, we copy the first 5 coefficients and
+ * skip the last 3 slots to move to the next row to hold
+ * coefficients for the next phase
+ */
+ coeff_v += SC_NUM_TAPS_MEM_ALIGN - SC_V_NUM_TAPS;
+ }
+
+ sc->load_coeff_v = true;
+}
+EXPORT_SYMBOL(sc_set_vs_coeffs);
+
+void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
+ u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
+ unsigned int dst_w, unsigned int dst_h)
+{
+ struct device *dev = &sc->pdev->dev;
+ u32 val;
+ int dcm_x, dcm_shift;
+ bool use_rav;
+ unsigned long lltmp;
+ u32 lin_acc_inc, lin_acc_inc_u;
+ u32 col_acc_offset;
+ u16 factor = 0;
+ int row_acc_init_rav = 0, row_acc_init_rav_b = 0;
+ u32 row_acc_inc = 0, row_acc_offset = 0, row_acc_offset_b = 0;
+ /*
+ * location of SC register in payload memory with respect to the first
+ * register in the mmr address data block
+ */
+ u32 *sc_reg9 = sc_reg8 + 1;
+ u32 *sc_reg12 = sc_reg8 + 4;
+ u32 *sc_reg13 = sc_reg8 + 5;
+ u32 *sc_reg24 = sc_reg17 + 7;
+
+ val = sc_reg0[0];
+
+ /* clear all the features(they may get enabled elsewhere later) */
+ val &= ~(CFG_SELFGEN_FID | CFG_TRIM | CFG_ENABLE_SIN2_VER_INTP |
+ CFG_INTERLACE_I | CFG_DCM_4X | CFG_DCM_2X | CFG_AUTO_HS |
+ CFG_ENABLE_EV | CFG_USE_RAV | CFG_INVT_FID | CFG_SC_BYPASS |
+ CFG_INTERLACE_O | CFG_Y_PK_EN | CFG_HP_BYPASS | CFG_LINEAR);
+
+ if (src_w == dst_w && src_h == dst_h) {
+ val |= CFG_SC_BYPASS;
+ sc_reg0[0] = val;
+ return;
+ }
+
+ /* we only support linear scaling for now */
+ val |= CFG_LINEAR;
+
+ /* configure horizontal scaler */
+
+ /* enable 2X or 4X decimation */
+ dcm_x = src_w / dst_w;
+ if (dcm_x > 4) {
+ val |= CFG_DCM_4X;
+ dcm_shift = 2;
+ } else if (dcm_x > 2) {
+ val |= CFG_DCM_2X;
+ dcm_shift = 1;
+ } else {
+ dcm_shift = 0;
+ }
+
+ lltmp = dst_w - 1;
+ lin_acc_inc = div64_u64(((u64)(src_w >> dcm_shift) - 1) << 24, lltmp);
+ lin_acc_inc_u = 0;
+ col_acc_offset = 0;
+
+ dev_dbg(dev, "hs config: src_w = %d, dst_w = %d, decimation = %s, lin_acc_inc = %08x\n",
+ src_w, dst_w, dcm_shift == 2 ? "4x" :
+ (dcm_shift == 1 ? "2x" : "none"), lin_acc_inc);
+
+ /* configure vertical scaler */
+
+ /* use RAV for vertical scaler if vertical downscaling is > 4x */
+ if (dst_h < (src_h >> 2)) {
+ use_rav = true;
+ val |= CFG_USE_RAV;
+ } else {
+ use_rav = false;
+ }
+
+ if (use_rav) {
+ /* use RAV */
+ factor = (u16) ((dst_h << 10) / src_h);
+
+ row_acc_init_rav = factor + ((1 + factor) >> 1);
+ if (row_acc_init_rav >= 1024)
+ row_acc_init_rav -= 1024;
+
+ row_acc_init_rav_b = row_acc_init_rav +
+ (1 + (row_acc_init_rav >> 1)) -
+ (1024 >> 1);
+
+ if (row_acc_init_rav_b < 0) {
+ row_acc_init_rav_b += row_acc_init_rav;
+ row_acc_init_rav *= 2;
+ }
+
+ dev_dbg(dev, "vs config(RAV): src_h = %d, dst_h = %d, factor = %d, acc_init = %08x, acc_init_b = %08x\n",
+ src_h, dst_h, factor, row_acc_init_rav,
+ row_acc_init_rav_b);
+ } else {
+ /* use polyphase */
+ row_acc_inc = ((src_h - 1) << 16) / (dst_h - 1);
+ row_acc_offset = 0;
+ row_acc_offset_b = 0;
+
+ dev_dbg(dev, "vs config(POLY): src_h = %d, dst_h = %d,row_acc_inc = %08x\n",
+ src_h, dst_h, row_acc_inc);
+ }
+
+
+ sc_reg0[0] = val;
+ sc_reg0[1] = row_acc_inc;
+ sc_reg0[2] = row_acc_offset;
+ sc_reg0[3] = row_acc_offset_b;
+
+ sc_reg0[4] = ((lin_acc_inc_u & CFG_LIN_ACC_INC_U_MASK) <<
+ CFG_LIN_ACC_INC_U_SHIFT) | (dst_w << CFG_TAR_W_SHIFT) |
+ (dst_h << CFG_TAR_H_SHIFT);
+
+ sc_reg0[5] = (src_w << CFG_SRC_W_SHIFT) | (src_h << CFG_SRC_H_SHIFT);
+
+ sc_reg0[6] = (row_acc_init_rav_b << CFG_ROW_ACC_INIT_RAV_B_SHIFT) |
+ (row_acc_init_rav << CFG_ROW_ACC_INIT_RAV_SHIFT);
+
+ *sc_reg9 = lin_acc_inc;
+
+ *sc_reg12 = col_acc_offset << CFG_COL_ACC_OFFSET_SHIFT;
+
+ *sc_reg13 = factor;
+
+ *sc_reg24 = (src_w << CFG_ORG_W_SHIFT) | (src_h << CFG_ORG_H_SHIFT);
+}
+EXPORT_SYMBOL(sc_config_scaler);
+
+struct sc_data *sc_create(struct platform_device *pdev, const char *res_name)
+{
+ struct sc_data *sc;
+
+ dev_dbg(&pdev->dev, "sc_create\n");
+
+ sc = devm_kzalloc(&pdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (!sc) {
+ dev_err(&pdev->dev, "couldn't alloc sc_data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sc->pdev = pdev;
+
+ sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+ if (!sc->res) {
+ dev_err(&pdev->dev, "missing '%s' platform resources data\n",
+ res_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ sc->base = devm_ioremap_resource(&pdev->dev, sc->res);
+ if (IS_ERR(sc->base)) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return ERR_CAST(sc->base);
+ }
+
+ return sc;
+}
+EXPORT_SYMBOL(sc_create);
+
+MODULE_DESCRIPTION("TI VIP/VPE Scaler");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/sc.h b/drivers/media/platform/ti-vpe/sc.h
new file mode 100644
index 000000000..f1fe80b38
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef TI_SC_H
+#define TI_SC_H
+
+/* Scaler regs */
+#define CFG_SC0 0x0
+#define CFG_INTERLACE_O (1 << 0)
+#define CFG_LINEAR (1 << 1)
+#define CFG_SC_BYPASS (1 << 2)
+#define CFG_INVT_FID (1 << 3)
+#define CFG_USE_RAV (1 << 4)
+#define CFG_ENABLE_EV (1 << 5)
+#define CFG_AUTO_HS (1 << 6)
+#define CFG_DCM_2X (1 << 7)
+#define CFG_DCM_4X (1 << 8)
+#define CFG_HP_BYPASS (1 << 9)
+#define CFG_INTERLACE_I (1 << 10)
+#define CFG_ENABLE_SIN2_VER_INTP (1 << 11)
+#define CFG_Y_PK_EN (1 << 14)
+#define CFG_TRIM (1 << 15)
+#define CFG_SELFGEN_FID (1 << 16)
+
+#define CFG_SC1 0x4
+#define CFG_ROW_ACC_INC_MASK 0x07ffffff
+#define CFG_ROW_ACC_INC_SHIFT 0
+
+#define CFG_SC2 0x08
+#define CFG_ROW_ACC_OFFSET_MASK 0x0fffffff
+#define CFG_ROW_ACC_OFFSET_SHIFT 0
+
+#define CFG_SC3 0x0c
+#define CFG_ROW_ACC_OFFSET_B_MASK 0x0fffffff
+#define CFG_ROW_ACC_OFFSET_B_SHIFT 0
+
+#define CFG_SC4 0x10
+#define CFG_TAR_H_MASK 0x07ff
+#define CFG_TAR_H_SHIFT 0
+#define CFG_TAR_W_MASK 0x07ff
+#define CFG_TAR_W_SHIFT 12
+#define CFG_LIN_ACC_INC_U_MASK 0x07
+#define CFG_LIN_ACC_INC_U_SHIFT 24
+#define CFG_NLIN_ACC_INIT_U_MASK 0x07
+#define CFG_NLIN_ACC_INIT_U_SHIFT 28
+
+#define CFG_SC5 0x14
+#define CFG_SRC_H_MASK 0x07ff
+#define CFG_SRC_H_SHIFT 0
+#define CFG_SRC_W_MASK 0x07ff
+#define CFG_SRC_W_SHIFT 12
+#define CFG_NLIN_ACC_INC_U_MASK 0x07
+#define CFG_NLIN_ACC_INC_U_SHIFT 24
+
+#define CFG_SC6 0x18
+#define CFG_ROW_ACC_INIT_RAV_MASK 0x03ff
+#define CFG_ROW_ACC_INIT_RAV_SHIFT 0
+#define CFG_ROW_ACC_INIT_RAV_B_MASK 0x03ff
+#define CFG_ROW_ACC_INIT_RAV_B_SHIFT 10
+
+#define CFG_SC8 0x20
+#define CFG_NLIN_LEFT_MASK 0x07ff
+#define CFG_NLIN_LEFT_SHIFT 0
+#define CFG_NLIN_RIGHT_MASK 0x07ff
+#define CFG_NLIN_RIGHT_SHIFT 12
+
+#define CFG_SC9 0x24
+#define CFG_LIN_ACC_INC CFG_SC9
+
+#define CFG_SC10 0x28
+#define CFG_NLIN_ACC_INIT CFG_SC10
+
+#define CFG_SC11 0x2c
+#define CFG_NLIN_ACC_INC CFG_SC11
+
+#define CFG_SC12 0x30
+#define CFG_COL_ACC_OFFSET_MASK 0x01ffffff
+#define CFG_COL_ACC_OFFSET_SHIFT 0
+
+#define CFG_SC13 0x34
+#define CFG_SC_FACTOR_RAV_MASK 0xff
+#define CFG_SC_FACTOR_RAV_SHIFT 0
+#define CFG_CHROMA_INTP_THR_MASK 0x03ff
+#define CFG_CHROMA_INTP_THR_SHIFT 12
+#define CFG_DELTA_CHROMA_THR_MASK 0x0f
+#define CFG_DELTA_CHROMA_THR_SHIFT 24
+
+#define CFG_SC17 0x44
+#define CFG_EV_THR_MASK 0x03ff
+#define CFG_EV_THR_SHIFT 12
+#define CFG_DELTA_LUMA_THR_MASK 0x0f
+#define CFG_DELTA_LUMA_THR_SHIFT 24
+#define CFG_DELTA_EV_THR_MASK 0x0f
+#define CFG_DELTA_EV_THR_SHIFT 28
+
+#define CFG_SC18 0x48
+#define CFG_HS_FACTOR_MASK 0x03ff
+#define CFG_HS_FACTOR_SHIFT 0
+#define CFG_CONF_DEFAULT_MASK 0x01ff
+#define CFG_CONF_DEFAULT_SHIFT 16
+
+#define CFG_SC19 0x4c
+#define CFG_HPF_COEFF0_MASK 0xff
+#define CFG_HPF_COEFF0_SHIFT 0
+#define CFG_HPF_COEFF1_MASK 0xff
+#define CFG_HPF_COEFF1_SHIFT 8
+#define CFG_HPF_COEFF2_MASK 0xff
+#define CFG_HPF_COEFF2_SHIFT 16
+#define CFG_HPF_COEFF3_MASK 0xff
+#define CFG_HPF_COEFF3_SHIFT 23
+
+#define CFG_SC20 0x50
+#define CFG_HPF_COEFF4_MASK 0xff
+#define CFG_HPF_COEFF4_SHIFT 0
+#define CFG_HPF_COEFF5_MASK 0xff
+#define CFG_HPF_COEFF5_SHIFT 8
+#define CFG_HPF_NORM_SHIFT_MASK 0x07
+#define CFG_HPF_NORM_SHIFT_SHIFT 16
+#define CFG_NL_LIMIT_MASK 0x1ff
+#define CFG_NL_LIMIT_SHIFT 20
+
+#define CFG_SC21 0x54
+#define CFG_NL_LO_THR_MASK 0x01ff
+#define CFG_NL_LO_THR_SHIFT 0
+#define CFG_NL_LO_SLOPE_MASK 0xff
+#define CFG_NL_LO_SLOPE_SHIFT 16
+
+#define CFG_SC22 0x58
+#define CFG_NL_HI_THR_MASK 0x01ff
+#define CFG_NL_HI_THR_SHIFT 0
+#define CFG_NL_HI_SLOPE_SH_MASK 0x07
+#define CFG_NL_HI_SLOPE_SH_SHIFT 16
+
+#define CFG_SC23 0x5c
+#define CFG_GRADIENT_THR_MASK 0x07ff
+#define CFG_GRADIENT_THR_SHIFT 0
+#define CFG_GRADIENT_THR_RANGE_MASK 0x0f
+#define CFG_GRADIENT_THR_RANGE_SHIFT 12
+#define CFG_MIN_GY_THR_MASK 0xff
+#define CFG_MIN_GY_THR_SHIFT 16
+#define CFG_MIN_GY_THR_RANGE_MASK 0x0f
+#define CFG_MIN_GY_THR_RANGE_SHIFT 28
+
+#define CFG_SC24 0x60
+#define CFG_ORG_H_MASK 0x07ff
+#define CFG_ORG_H_SHIFT 0
+#define CFG_ORG_W_MASK 0x07ff
+#define CFG_ORG_W_SHIFT 16
+
+#define CFG_SC25 0x64
+#define CFG_OFF_H_MASK 0x07ff
+#define CFG_OFF_H_SHIFT 0
+#define CFG_OFF_W_MASK 0x07ff
+#define CFG_OFF_W_SHIFT 16
+
+/* number of phases supported by the polyphase scalers */
+#define SC_NUM_PHASES 32
+
+/* number of taps used by horizontal polyphase scaler */
+#define SC_H_NUM_TAPS 7
+
+/* number of taps used by vertical polyphase scaler */
+#define SC_V_NUM_TAPS 5
+
+/* number of taps expected by the scaler in it's coefficient memory */
+#define SC_NUM_TAPS_MEM_ALIGN 8
+
+/* Maximum frame width the scaler can handle (in pixels) */
+#define SC_MAX_PIXEL_WIDTH 2047
+
+/* Maximum frame height the scaler can handle (in lines) */
+#define SC_MAX_PIXEL_HEIGHT 2047
+
+/*
+ * coefficient memory size in bytes:
+ * num phases x num sets(luma and chroma) x num taps(aligned) x coeff size
+ */
+#define SC_COEF_SRAM_SIZE (SC_NUM_PHASES * 2 * SC_NUM_TAPS_MEM_ALIGN * 2)
+
+struct sc_data {
+ void __iomem *base;
+ struct resource *res;
+
+ dma_addr_t loaded_coeff_h; /* loaded h coeffs in SC */
+ dma_addr_t loaded_coeff_v; /* loaded v coeffs in SC */
+
+ bool load_coeff_h; /* have new h SC coeffs */
+ bool load_coeff_v; /* have new v SC coeffs */
+
+ struct platform_device *pdev;
+};
+
+void sc_dump_regs(struct sc_data *sc);
+void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
+ unsigned int dst_w);
+void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
+ unsigned int dst_h);
+void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
+ u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
+ unsigned int dst_w, unsigned int dst_h);
+struct sc_data *sc_create(struct platform_device *pdev, const char *res_name);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/sc_coeff.h b/drivers/media/platform/ti-vpe/sc_coeff.h
new file mode 100644
index 000000000..5bfa5c03a
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc_coeff.h
@@ -0,0 +1,1342 @@
+/*
+ * VPE SC coefs
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_SC_COEFF_H
+#define __TI_SC_COEFF_H
+
+/* horizontal scaler coefficients */
+enum {
+ HS_UP_SCALE = 0,
+ HS_LT_9_16_SCALE,
+ HS_LT_10_16_SCALE,
+ HS_LT_11_16_SCALE,
+ HS_LT_12_16_SCALE,
+ HS_LT_13_16_SCALE,
+ HS_LT_14_16_SCALE,
+ HS_LT_15_16_SCALE,
+ HS_LE_16_16_SCALE,
+};
+
+static const u16 scaler_hs_coeffs[13][SC_NUM_PHASES * 2 * SC_H_NUM_TAPS] = {
+ [HS_UP_SCALE] = {
+ /* Luma */
+ 0x001F, 0x1F90, 0x00D2, 0x06FE, 0x00D2, 0x1F90, 0x001F,
+ 0x001C, 0x1F9E, 0x009F, 0x06FB, 0x0108, 0x1F82, 0x0022,
+ 0x0019, 0x1FAC, 0x006F, 0x06F3, 0x0140, 0x1F74, 0x0025,
+ 0x0016, 0x1FB9, 0x0041, 0x06E7, 0x017B, 0x1F66, 0x0028,
+ 0x0013, 0x1FC6, 0x0017, 0x06D6, 0x01B7, 0x1F58, 0x002B,
+ 0x0010, 0x1FD3, 0x1FEF, 0x06C0, 0x01F6, 0x1F4B, 0x002D,
+ 0x000E, 0x1FDF, 0x1FCB, 0x06A5, 0x0235, 0x1F3F, 0x002F,
+ 0x000B, 0x1FEA, 0x1FAA, 0x0686, 0x0277, 0x1F33, 0x0031,
+ 0x0009, 0x1FF5, 0x1F8C, 0x0663, 0x02B8, 0x1F28, 0x0033,
+ 0x0007, 0x1FFF, 0x1F72, 0x063A, 0x02FB, 0x1F1F, 0x0034,
+ 0x0005, 0x0008, 0x1F5A, 0x060F, 0x033E, 0x1F17, 0x0035,
+ 0x0003, 0x0010, 0x1F46, 0x05E0, 0x0382, 0x1F10, 0x0035,
+ 0x0002, 0x0017, 0x1F34, 0x05AF, 0x03C5, 0x1F0B, 0x0034,
+ 0x0001, 0x001E, 0x1F26, 0x0579, 0x0407, 0x1F08, 0x0033,
+ 0x0000, 0x0023, 0x1F1A, 0x0541, 0x0449, 0x1F07, 0x0032,
+ 0x1FFF, 0x0028, 0x1F12, 0x0506, 0x048A, 0x1F08, 0x002F,
+ 0x002C, 0x1F0C, 0x04C8, 0x04C8, 0x1F0C, 0x002C, 0x0000,
+ 0x002F, 0x1F08, 0x048A, 0x0506, 0x1F12, 0x0028, 0x1FFF,
+ 0x0032, 0x1F07, 0x0449, 0x0541, 0x1F1A, 0x0023, 0x0000,
+ 0x0033, 0x1F08, 0x0407, 0x0579, 0x1F26, 0x001E, 0x0001,
+ 0x0034, 0x1F0B, 0x03C5, 0x05AF, 0x1F34, 0x0017, 0x0002,
+ 0x0035, 0x1F10, 0x0382, 0x05E0, 0x1F46, 0x0010, 0x0003,
+ 0x0035, 0x1F17, 0x033E, 0x060F, 0x1F5A, 0x0008, 0x0005,
+ 0x0034, 0x1F1F, 0x02FB, 0x063A, 0x1F72, 0x1FFF, 0x0007,
+ 0x0033, 0x1F28, 0x02B8, 0x0663, 0x1F8C, 0x1FF5, 0x0009,
+ 0x0031, 0x1F33, 0x0277, 0x0686, 0x1FAA, 0x1FEA, 0x000B,
+ 0x002F, 0x1F3F, 0x0235, 0x06A5, 0x1FCB, 0x1FDF, 0x000E,
+ 0x002D, 0x1F4B, 0x01F6, 0x06C0, 0x1FEF, 0x1FD3, 0x0010,
+ 0x002B, 0x1F58, 0x01B7, 0x06D6, 0x0017, 0x1FC6, 0x0013,
+ 0x0028, 0x1F66, 0x017B, 0x06E7, 0x0041, 0x1FB9, 0x0016,
+ 0x0025, 0x1F74, 0x0140, 0x06F3, 0x006F, 0x1FAC, 0x0019,
+ 0x0022, 0x1F82, 0x0108, 0x06FB, 0x009F, 0x1F9E, 0x001C,
+ /* Chroma */
+ 0x001F, 0x1F90, 0x00D2, 0x06FE, 0x00D2, 0x1F90, 0x001F,
+ 0x001C, 0x1F9E, 0x009F, 0x06FB, 0x0108, 0x1F82, 0x0022,
+ 0x0019, 0x1FAC, 0x006F, 0x06F3, 0x0140, 0x1F74, 0x0025,
+ 0x0016, 0x1FB9, 0x0041, 0x06E7, 0x017B, 0x1F66, 0x0028,
+ 0x0013, 0x1FC6, 0x0017, 0x06D6, 0x01B7, 0x1F58, 0x002B,
+ 0x0010, 0x1FD3, 0x1FEF, 0x06C0, 0x01F6, 0x1F4B, 0x002D,
+ 0x000E, 0x1FDF, 0x1FCB, 0x06A5, 0x0235, 0x1F3F, 0x002F,
+ 0x000B, 0x1FEA, 0x1FAA, 0x0686, 0x0277, 0x1F33, 0x0031,
+ 0x0009, 0x1FF5, 0x1F8C, 0x0663, 0x02B8, 0x1F28, 0x0033,
+ 0x0007, 0x1FFF, 0x1F72, 0x063A, 0x02FB, 0x1F1F, 0x0034,
+ 0x0005, 0x0008, 0x1F5A, 0x060F, 0x033E, 0x1F17, 0x0035,
+ 0x0003, 0x0010, 0x1F46, 0x05E0, 0x0382, 0x1F10, 0x0035,
+ 0x0002, 0x0017, 0x1F34, 0x05AF, 0x03C5, 0x1F0B, 0x0034,
+ 0x0001, 0x001E, 0x1F26, 0x0579, 0x0407, 0x1F08, 0x0033,
+ 0x0000, 0x0023, 0x1F1A, 0x0541, 0x0449, 0x1F07, 0x0032,
+ 0x1FFF, 0x0028, 0x1F12, 0x0506, 0x048A, 0x1F08, 0x002F,
+ 0x002C, 0x1F0C, 0x04C8, 0x04C8, 0x1F0C, 0x002C, 0x0000,
+ 0x002F, 0x1F08, 0x048A, 0x0506, 0x1F12, 0x0028, 0x1FFF,
+ 0x0032, 0x1F07, 0x0449, 0x0541, 0x1F1A, 0x0023, 0x0000,
+ 0x0033, 0x1F08, 0x0407, 0x0579, 0x1F26, 0x001E, 0x0001,
+ 0x0034, 0x1F0B, 0x03C5, 0x05AF, 0x1F34, 0x0017, 0x0002,
+ 0x0035, 0x1F10, 0x0382, 0x05E0, 0x1F46, 0x0010, 0x0003,
+ 0x0035, 0x1F17, 0x033E, 0x060F, 0x1F5A, 0x0008, 0x0005,
+ 0x0034, 0x1F1F, 0x02FB, 0x063A, 0x1F72, 0x1FFF, 0x0007,
+ 0x0033, 0x1F28, 0x02B8, 0x0663, 0x1F8C, 0x1FF5, 0x0009,
+ 0x0031, 0x1F33, 0x0277, 0x0686, 0x1FAA, 0x1FEA, 0x000B,
+ 0x002F, 0x1F3F, 0x0235, 0x06A5, 0x1FCB, 0x1FDF, 0x000E,
+ 0x002D, 0x1F4B, 0x01F6, 0x06C0, 0x1FEF, 0x1FD3, 0x0010,
+ 0x002B, 0x1F58, 0x01B7, 0x06D6, 0x0017, 0x1FC6, 0x0013,
+ 0x0028, 0x1F66, 0x017B, 0x06E7, 0x0041, 0x1FB9, 0x0016,
+ 0x0025, 0x1F74, 0x0140, 0x06F3, 0x006F, 0x1FAC, 0x0019,
+ 0x0022, 0x1F82, 0x0108, 0x06FB, 0x009F, 0x1F9E, 0x001C,
+ },
+ [HS_LT_9_16_SCALE] = {
+ /* Luma */
+ 0x1FA3, 0x005E, 0x024A, 0x036A, 0x024A, 0x005E, 0x1FA3,
+ 0x1FA3, 0x0052, 0x023A, 0x036A, 0x0259, 0x006A, 0x1FA4,
+ 0x1FA3, 0x0046, 0x022A, 0x036A, 0x0269, 0x0076, 0x1FA4,
+ 0x1FA3, 0x003B, 0x021A, 0x0368, 0x0278, 0x0083, 0x1FA5,
+ 0x1FA4, 0x0031, 0x020A, 0x0365, 0x0286, 0x0090, 0x1FA6,
+ 0x1FA5, 0x0026, 0x01F9, 0x0362, 0x0294, 0x009E, 0x1FA8,
+ 0x1FA6, 0x001C, 0x01E8, 0x035E, 0x02A3, 0x00AB, 0x1FAA,
+ 0x1FA7, 0x0013, 0x01D7, 0x035A, 0x02B0, 0x00B9, 0x1FAC,
+ 0x1FA9, 0x000A, 0x01C6, 0x0354, 0x02BD, 0x00C7, 0x1FAF,
+ 0x1FAA, 0x0001, 0x01B6, 0x034E, 0x02C9, 0x00D6, 0x1FB2,
+ 0x1FAC, 0x1FF9, 0x01A5, 0x0347, 0x02D5, 0x00E5, 0x1FB5,
+ 0x1FAE, 0x1FF1, 0x0194, 0x0340, 0x02E1, 0x00F3, 0x1FB9,
+ 0x1FB0, 0x1FEA, 0x0183, 0x0338, 0x02EC, 0x0102, 0x1FBD,
+ 0x1FB2, 0x1FE3, 0x0172, 0x0330, 0x02F6, 0x0112, 0x1FC1,
+ 0x1FB4, 0x1FDC, 0x0161, 0x0327, 0x0301, 0x0121, 0x1FC6,
+ 0x1FB7, 0x1FD6, 0x0151, 0x031D, 0x030A, 0x0130, 0x1FCB,
+ 0x1FD2, 0x0136, 0x02F8, 0x02F8, 0x0136, 0x1FD2, 0x0000,
+ 0x1FCB, 0x0130, 0x030A, 0x031D, 0x0151, 0x1FD6, 0x1FB7,
+ 0x1FC6, 0x0121, 0x0301, 0x0327, 0x0161, 0x1FDC, 0x1FB4,
+ 0x1FC1, 0x0112, 0x02F6, 0x0330, 0x0172, 0x1FE3, 0x1FB2,
+ 0x1FBD, 0x0102, 0x02EC, 0x0338, 0x0183, 0x1FEA, 0x1FB0,
+ 0x1FB9, 0x00F3, 0x02E1, 0x0340, 0x0194, 0x1FF1, 0x1FAE,
+ 0x1FB5, 0x00E5, 0x02D5, 0x0347, 0x01A5, 0x1FF9, 0x1FAC,
+ 0x1FB2, 0x00D6, 0x02C9, 0x034E, 0x01B6, 0x0001, 0x1FAA,
+ 0x1FAF, 0x00C7, 0x02BD, 0x0354, 0x01C6, 0x000A, 0x1FA9,
+ 0x1FAC, 0x00B9, 0x02B0, 0x035A, 0x01D7, 0x0013, 0x1FA7,
+ 0x1FAA, 0x00AB, 0x02A3, 0x035E, 0x01E8, 0x001C, 0x1FA6,
+ 0x1FA8, 0x009E, 0x0294, 0x0362, 0x01F9, 0x0026, 0x1FA5,
+ 0x1FA6, 0x0090, 0x0286, 0x0365, 0x020A, 0x0031, 0x1FA4,
+ 0x1FA5, 0x0083, 0x0278, 0x0368, 0x021A, 0x003B, 0x1FA3,
+ 0x1FA4, 0x0076, 0x0269, 0x036A, 0x022A, 0x0046, 0x1FA3,
+ 0x1FA4, 0x006A, 0x0259, 0x036A, 0x023A, 0x0052, 0x1FA3,
+ /* Chroma */
+ 0x1FA3, 0x005E, 0x024A, 0x036A, 0x024A, 0x005E, 0x1FA3,
+ 0x1FA3, 0x0052, 0x023A, 0x036A, 0x0259, 0x006A, 0x1FA4,
+ 0x1FA3, 0x0046, 0x022A, 0x036A, 0x0269, 0x0076, 0x1FA4,
+ 0x1FA3, 0x003B, 0x021A, 0x0368, 0x0278, 0x0083, 0x1FA5,
+ 0x1FA4, 0x0031, 0x020A, 0x0365, 0x0286, 0x0090, 0x1FA6,
+ 0x1FA5, 0x0026, 0x01F9, 0x0362, 0x0294, 0x009E, 0x1FA8,
+ 0x1FA6, 0x001C, 0x01E8, 0x035E, 0x02A3, 0x00AB, 0x1FAA,
+ 0x1FA7, 0x0013, 0x01D7, 0x035A, 0x02B0, 0x00B9, 0x1FAC,
+ 0x1FA9, 0x000A, 0x01C6, 0x0354, 0x02BD, 0x00C7, 0x1FAF,
+ 0x1FAA, 0x0001, 0x01B6, 0x034E, 0x02C9, 0x00D6, 0x1FB2,
+ 0x1FAC, 0x1FF9, 0x01A5, 0x0347, 0x02D5, 0x00E5, 0x1FB5,
+ 0x1FAE, 0x1FF1, 0x0194, 0x0340, 0x02E1, 0x00F3, 0x1FB9,
+ 0x1FB0, 0x1FEA, 0x0183, 0x0338, 0x02EC, 0x0102, 0x1FBD,
+ 0x1FB2, 0x1FE3, 0x0172, 0x0330, 0x02F6, 0x0112, 0x1FC1,
+ 0x1FB4, 0x1FDC, 0x0161, 0x0327, 0x0301, 0x0121, 0x1FC6,
+ 0x1FB7, 0x1FD6, 0x0151, 0x031D, 0x030A, 0x0130, 0x1FCB,
+ 0x1FD2, 0x0136, 0x02F8, 0x02F8, 0x0136, 0x1FD2, 0x0000,
+ 0x1FCB, 0x0130, 0x030A, 0x031D, 0x0151, 0x1FD6, 0x1FB7,
+ 0x1FC6, 0x0121, 0x0301, 0x0327, 0x0161, 0x1FDC, 0x1FB4,
+ 0x1FC1, 0x0112, 0x02F6, 0x0330, 0x0172, 0x1FE3, 0x1FB2,
+ 0x1FBD, 0x0102, 0x02EC, 0x0338, 0x0183, 0x1FEA, 0x1FB0,
+ 0x1FB9, 0x00F3, 0x02E1, 0x0340, 0x0194, 0x1FF1, 0x1FAE,
+ 0x1FB5, 0x00E5, 0x02D5, 0x0347, 0x01A5, 0x1FF9, 0x1FAC,
+ 0x1FB2, 0x00D6, 0x02C9, 0x034E, 0x01B6, 0x0001, 0x1FAA,
+ 0x1FAF, 0x00C7, 0x02BD, 0x0354, 0x01C6, 0x000A, 0x1FA9,
+ 0x1FAC, 0x00B9, 0x02B0, 0x035A, 0x01D7, 0x0013, 0x1FA7,
+ 0x1FAA, 0x00AB, 0x02A3, 0x035E, 0x01E8, 0x001C, 0x1FA6,
+ 0x1FA8, 0x009E, 0x0294, 0x0362, 0x01F9, 0x0026, 0x1FA5,
+ 0x1FA6, 0x0090, 0x0286, 0x0365, 0x020A, 0x0031, 0x1FA4,
+ 0x1FA5, 0x0083, 0x0278, 0x0368, 0x021A, 0x003B, 0x1FA3,
+ 0x1FA4, 0x0076, 0x0269, 0x036A, 0x022A, 0x0046, 0x1FA3,
+ 0x1FA4, 0x006A, 0x0259, 0x036A, 0x023A, 0x0052, 0x1FA3,
+ },
+ [HS_LT_10_16_SCALE] = {
+ /* Luma */
+ 0x1F8D, 0x000C, 0x026A, 0x03FA, 0x026A, 0x000C, 0x1F8D,
+ 0x1F8F, 0x0000, 0x0255, 0x03FA, 0x027F, 0x0019, 0x1F8A,
+ 0x1F92, 0x1FF5, 0x023F, 0x03F8, 0x0293, 0x0027, 0x1F88,
+ 0x1F95, 0x1FEA, 0x022A, 0x03F6, 0x02A7, 0x0034, 0x1F86,
+ 0x1F99, 0x1FDF, 0x0213, 0x03F2, 0x02BB, 0x0043, 0x1F85,
+ 0x1F9C, 0x1FD5, 0x01FE, 0x03ED, 0x02CF, 0x0052, 0x1F83,
+ 0x1FA0, 0x1FCC, 0x01E8, 0x03E7, 0x02E1, 0x0061, 0x1F83,
+ 0x1FA4, 0x1FC3, 0x01D2, 0x03E0, 0x02F4, 0x0071, 0x1F82,
+ 0x1FA7, 0x1FBB, 0x01BC, 0x03D9, 0x0306, 0x0081, 0x1F82,
+ 0x1FAB, 0x1FB4, 0x01A6, 0x03D0, 0x0317, 0x0092, 0x1F82,
+ 0x1FAF, 0x1FAD, 0x0190, 0x03C7, 0x0327, 0x00A3, 0x1F83,
+ 0x1FB3, 0x1FA7, 0x017A, 0x03BC, 0x0337, 0x00B5, 0x1F84,
+ 0x1FB8, 0x1FA1, 0x0165, 0x03B0, 0x0346, 0x00C7, 0x1F85,
+ 0x1FBC, 0x1F9C, 0x0150, 0x03A4, 0x0354, 0x00D9, 0x1F87,
+ 0x1FC0, 0x1F98, 0x013A, 0x0397, 0x0361, 0x00EC, 0x1F8A,
+ 0x1FC4, 0x1F93, 0x0126, 0x0389, 0x036F, 0x00FE, 0x1F8D,
+ 0x1F93, 0x010A, 0x0363, 0x0363, 0x010A, 0x1F93, 0x0000,
+ 0x1F8D, 0x00FE, 0x036F, 0x0389, 0x0126, 0x1F93, 0x1FC4,
+ 0x1F8A, 0x00EC, 0x0361, 0x0397, 0x013A, 0x1F98, 0x1FC0,
+ 0x1F87, 0x00D9, 0x0354, 0x03A4, 0x0150, 0x1F9C, 0x1FBC,
+ 0x1F85, 0x00C7, 0x0346, 0x03B0, 0x0165, 0x1FA1, 0x1FB8,
+ 0x1F84, 0x00B5, 0x0337, 0x03BC, 0x017A, 0x1FA7, 0x1FB3,
+ 0x1F83, 0x00A3, 0x0327, 0x03C7, 0x0190, 0x1FAD, 0x1FAF,
+ 0x1F82, 0x0092, 0x0317, 0x03D0, 0x01A6, 0x1FB4, 0x1FAB,
+ 0x1F82, 0x0081, 0x0306, 0x03D9, 0x01BC, 0x1FBB, 0x1FA7,
+ 0x1F82, 0x0071, 0x02F4, 0x03E0, 0x01D2, 0x1FC3, 0x1FA4,
+ 0x1F83, 0x0061, 0x02E1, 0x03E7, 0x01E8, 0x1FCC, 0x1FA0,
+ 0x1F83, 0x0052, 0x02CF, 0x03ED, 0x01FE, 0x1FD5, 0x1F9C,
+ 0x1F85, 0x0043, 0x02BB, 0x03F2, 0x0213, 0x1FDF, 0x1F99,
+ 0x1F86, 0x0034, 0x02A7, 0x03F6, 0x022A, 0x1FEA, 0x1F95,
+ 0x1F88, 0x0027, 0x0293, 0x03F8, 0x023F, 0x1FF5, 0x1F92,
+ 0x1F8A, 0x0019, 0x027F, 0x03FA, 0x0255, 0x0000, 0x1F8F,
+ /* Chroma */
+ 0x1F8D, 0x000C, 0x026A, 0x03FA, 0x026A, 0x000C, 0x1F8D,
+ 0x1F8F, 0x0000, 0x0255, 0x03FA, 0x027F, 0x0019, 0x1F8A,
+ 0x1F92, 0x1FF5, 0x023F, 0x03F8, 0x0293, 0x0027, 0x1F88,
+ 0x1F95, 0x1FEA, 0x022A, 0x03F6, 0x02A7, 0x0034, 0x1F86,
+ 0x1F99, 0x1FDF, 0x0213, 0x03F2, 0x02BB, 0x0043, 0x1F85,
+ 0x1F9C, 0x1FD5, 0x01FE, 0x03ED, 0x02CF, 0x0052, 0x1F83,
+ 0x1FA0, 0x1FCC, 0x01E8, 0x03E7, 0x02E1, 0x0061, 0x1F83,
+ 0x1FA4, 0x1FC3, 0x01D2, 0x03E0, 0x02F4, 0x0071, 0x1F82,
+ 0x1FA7, 0x1FBB, 0x01BC, 0x03D9, 0x0306, 0x0081, 0x1F82,
+ 0x1FAB, 0x1FB4, 0x01A6, 0x03D0, 0x0317, 0x0092, 0x1F82,
+ 0x1FAF, 0x1FAD, 0x0190, 0x03C7, 0x0327, 0x00A3, 0x1F83,
+ 0x1FB3, 0x1FA7, 0x017A, 0x03BC, 0x0337, 0x00B5, 0x1F84,
+ 0x1FB8, 0x1FA1, 0x0165, 0x03B0, 0x0346, 0x00C7, 0x1F85,
+ 0x1FBC, 0x1F9C, 0x0150, 0x03A4, 0x0354, 0x00D9, 0x1F87,
+ 0x1FC0, 0x1F98, 0x013A, 0x0397, 0x0361, 0x00EC, 0x1F8A,
+ 0x1FC4, 0x1F93, 0x0126, 0x0389, 0x036F, 0x00FE, 0x1F8D,
+ 0x1F93, 0x010A, 0x0363, 0x0363, 0x010A, 0x1F93, 0x0000,
+ 0x1F8D, 0x00FE, 0x036F, 0x0389, 0x0126, 0x1F93, 0x1FC4,
+ 0x1F8A, 0x00EC, 0x0361, 0x0397, 0x013A, 0x1F98, 0x1FC0,
+ 0x1F87, 0x00D9, 0x0354, 0x03A4, 0x0150, 0x1F9C, 0x1FBC,
+ 0x1F85, 0x00C7, 0x0346, 0x03B0, 0x0165, 0x1FA1, 0x1FB8,
+ 0x1F84, 0x00B5, 0x0337, 0x03BC, 0x017A, 0x1FA7, 0x1FB3,
+ 0x1F83, 0x00A3, 0x0327, 0x03C7, 0x0190, 0x1FAD, 0x1FAF,
+ 0x1F82, 0x0092, 0x0317, 0x03D0, 0x01A6, 0x1FB4, 0x1FAB,
+ 0x1F82, 0x0081, 0x0306, 0x03D9, 0x01BC, 0x1FBB, 0x1FA7,
+ 0x1F82, 0x0071, 0x02F4, 0x03E0, 0x01D2, 0x1FC3, 0x1FA4,
+ 0x1F83, 0x0061, 0x02E1, 0x03E7, 0x01E8, 0x1FCC, 0x1FA0,
+ 0x1F83, 0x0052, 0x02CF, 0x03ED, 0x01FE, 0x1FD5, 0x1F9C,
+ 0x1F85, 0x0043, 0x02BB, 0x03F2, 0x0213, 0x1FDF, 0x1F99,
+ 0x1F86, 0x0034, 0x02A7, 0x03F6, 0x022A, 0x1FEA, 0x1F95,
+ 0x1F88, 0x0027, 0x0293, 0x03F8, 0x023F, 0x1FF5, 0x1F92,
+ 0x1F8A, 0x0019, 0x027F, 0x03FA, 0x0255, 0x0000, 0x1F8F,
+ },
+ [HS_LT_11_16_SCALE] = {
+ /* Luma */
+ 0x1F95, 0x1FB5, 0x0272, 0x0488, 0x0272, 0x1FB5, 0x1F95,
+ 0x1F9B, 0x1FAA, 0x0257, 0x0486, 0x028D, 0x1FC1, 0x1F90,
+ 0x1FA0, 0x1FA0, 0x023C, 0x0485, 0x02A8, 0x1FCD, 0x1F8A,
+ 0x1FA6, 0x1F96, 0x0221, 0x0481, 0x02C2, 0x1FDB, 0x1F85,
+ 0x1FAC, 0x1F8E, 0x0205, 0x047C, 0x02DC, 0x1FE9, 0x1F80,
+ 0x1FB1, 0x1F86, 0x01E9, 0x0476, 0x02F6, 0x1FF8, 0x1F7C,
+ 0x1FB7, 0x1F7F, 0x01CE, 0x046E, 0x030F, 0x0008, 0x1F77,
+ 0x1FBD, 0x1F79, 0x01B3, 0x0465, 0x0326, 0x0019, 0x1F73,
+ 0x1FC3, 0x1F73, 0x0197, 0x045B, 0x033E, 0x002A, 0x1F70,
+ 0x1FC8, 0x1F6F, 0x017D, 0x044E, 0x0355, 0x003C, 0x1F6D,
+ 0x1FCE, 0x1F6B, 0x0162, 0x0441, 0x036B, 0x004F, 0x1F6A,
+ 0x1FD3, 0x1F68, 0x0148, 0x0433, 0x0380, 0x0063, 0x1F67,
+ 0x1FD8, 0x1F65, 0x012E, 0x0424, 0x0395, 0x0077, 0x1F65,
+ 0x1FDE, 0x1F63, 0x0115, 0x0413, 0x03A8, 0x008B, 0x1F64,
+ 0x1FE3, 0x1F62, 0x00FC, 0x0403, 0x03BA, 0x00A0, 0x1F62,
+ 0x1FE7, 0x1F62, 0x00E4, 0x03EF, 0x03CC, 0x00B6, 0x1F62,
+ 0x1F63, 0x00CA, 0x03D3, 0x03D3, 0x00CA, 0x1F63, 0x0000,
+ 0x1F62, 0x00B6, 0x03CC, 0x03EF, 0x00E4, 0x1F62, 0x1FE7,
+ 0x1F62, 0x00A0, 0x03BA, 0x0403, 0x00FC, 0x1F62, 0x1FE3,
+ 0x1F64, 0x008B, 0x03A8, 0x0413, 0x0115, 0x1F63, 0x1FDE,
+ 0x1F65, 0x0077, 0x0395, 0x0424, 0x012E, 0x1F65, 0x1FD8,
+ 0x1F67, 0x0063, 0x0380, 0x0433, 0x0148, 0x1F68, 0x1FD3,
+ 0x1F6A, 0x004F, 0x036B, 0x0441, 0x0162, 0x1F6B, 0x1FCE,
+ 0x1F6D, 0x003C, 0x0355, 0x044E, 0x017D, 0x1F6F, 0x1FC8,
+ 0x1F70, 0x002A, 0x033E, 0x045B, 0x0197, 0x1F73, 0x1FC3,
+ 0x1F73, 0x0019, 0x0326, 0x0465, 0x01B3, 0x1F79, 0x1FBD,
+ 0x1F77, 0x0008, 0x030F, 0x046E, 0x01CE, 0x1F7F, 0x1FB7,
+ 0x1F7C, 0x1FF8, 0x02F6, 0x0476, 0x01E9, 0x1F86, 0x1FB1,
+ 0x1F80, 0x1FE9, 0x02DC, 0x047C, 0x0205, 0x1F8E, 0x1FAC,
+ 0x1F85, 0x1FDB, 0x02C2, 0x0481, 0x0221, 0x1F96, 0x1FA6,
+ 0x1F8A, 0x1FCD, 0x02A8, 0x0485, 0x023C, 0x1FA0, 0x1FA0,
+ 0x1F90, 0x1FC1, 0x028D, 0x0486, 0x0257, 0x1FAA, 0x1F9B,
+ /* Chroma */
+ 0x1F95, 0x1FB5, 0x0272, 0x0488, 0x0272, 0x1FB5, 0x1F95,
+ 0x1F9B, 0x1FAA, 0x0257, 0x0486, 0x028D, 0x1FC1, 0x1F90,
+ 0x1FA0, 0x1FA0, 0x023C, 0x0485, 0x02A8, 0x1FCD, 0x1F8A,
+ 0x1FA6, 0x1F96, 0x0221, 0x0481, 0x02C2, 0x1FDB, 0x1F85,
+ 0x1FAC, 0x1F8E, 0x0205, 0x047C, 0x02DC, 0x1FE9, 0x1F80,
+ 0x1FB1, 0x1F86, 0x01E9, 0x0476, 0x02F6, 0x1FF8, 0x1F7C,
+ 0x1FB7, 0x1F7F, 0x01CE, 0x046E, 0x030F, 0x0008, 0x1F77,
+ 0x1FBD, 0x1F79, 0x01B3, 0x0465, 0x0326, 0x0019, 0x1F73,
+ 0x1FC3, 0x1F73, 0x0197, 0x045B, 0x033E, 0x002A, 0x1F70,
+ 0x1FC8, 0x1F6F, 0x017D, 0x044E, 0x0355, 0x003C, 0x1F6D,
+ 0x1FCE, 0x1F6B, 0x0162, 0x0441, 0x036B, 0x004F, 0x1F6A,
+ 0x1FD3, 0x1F68, 0x0148, 0x0433, 0x0380, 0x0063, 0x1F67,
+ 0x1FD8, 0x1F65, 0x012E, 0x0424, 0x0395, 0x0077, 0x1F65,
+ 0x1FDE, 0x1F63, 0x0115, 0x0413, 0x03A8, 0x008B, 0x1F64,
+ 0x1FE3, 0x1F62, 0x00FC, 0x0403, 0x03BA, 0x00A0, 0x1F62,
+ 0x1FE7, 0x1F62, 0x00E4, 0x03EF, 0x03CC, 0x00B6, 0x1F62,
+ 0x1F63, 0x00CA, 0x03D3, 0x03D3, 0x00CA, 0x1F63, 0x0000,
+ 0x1F62, 0x00B6, 0x03CC, 0x03EF, 0x00E4, 0x1F62, 0x1FE7,
+ 0x1F62, 0x00A0, 0x03BA, 0x0403, 0x00FC, 0x1F62, 0x1FE3,
+ 0x1F64, 0x008B, 0x03A8, 0x0413, 0x0115, 0x1F63, 0x1FDE,
+ 0x1F65, 0x0077, 0x0395, 0x0424, 0x012E, 0x1F65, 0x1FD8,
+ 0x1F67, 0x0063, 0x0380, 0x0433, 0x0148, 0x1F68, 0x1FD3,
+ 0x1F6A, 0x004F, 0x036B, 0x0441, 0x0162, 0x1F6B, 0x1FCE,
+ 0x1F6D, 0x003C, 0x0355, 0x044E, 0x017D, 0x1F6F, 0x1FC8,
+ 0x1F70, 0x002A, 0x033E, 0x045B, 0x0197, 0x1F73, 0x1FC3,
+ 0x1F73, 0x0019, 0x0326, 0x0465, 0x01B3, 0x1F79, 0x1FBD,
+ 0x1F77, 0x0008, 0x030F, 0x046E, 0x01CE, 0x1F7F, 0x1FB7,
+ 0x1F7C, 0x1FF8, 0x02F6, 0x0476, 0x01E9, 0x1F86, 0x1FB1,
+ 0x1F80, 0x1FE9, 0x02DC, 0x047C, 0x0205, 0x1F8E, 0x1FAC,
+ 0x1F85, 0x1FDB, 0x02C2, 0x0481, 0x0221, 0x1F96, 0x1FA6,
+ 0x1F8A, 0x1FCD, 0x02A8, 0x0485, 0x023C, 0x1FA0, 0x1FA0,
+ 0x1F90, 0x1FC1, 0x028D, 0x0486, 0x0257, 0x1FAA, 0x1F9B,
+ },
+ [HS_LT_12_16_SCALE] = {
+ /* Luma */
+ 0x1FBB, 0x1F65, 0x025E, 0x0504, 0x025E, 0x1F65, 0x1FBB,
+ 0x1FC3, 0x1F5D, 0x023C, 0x0503, 0x027F, 0x1F6E, 0x1FB4,
+ 0x1FCA, 0x1F56, 0x021B, 0x0501, 0x02A0, 0x1F78, 0x1FAC,
+ 0x1FD1, 0x1F50, 0x01FA, 0x04FD, 0x02C0, 0x1F83, 0x1FA5,
+ 0x1FD8, 0x1F4B, 0x01D9, 0x04F6, 0x02E1, 0x1F90, 0x1F9D,
+ 0x1FDF, 0x1F47, 0x01B8, 0x04EF, 0x0301, 0x1F9D, 0x1F95,
+ 0x1FE6, 0x1F43, 0x0198, 0x04E5, 0x0321, 0x1FAB, 0x1F8E,
+ 0x1FEC, 0x1F41, 0x0178, 0x04DA, 0x0340, 0x1FBB, 0x1F86,
+ 0x1FF2, 0x1F40, 0x0159, 0x04CC, 0x035E, 0x1FCC, 0x1F7F,
+ 0x1FF8, 0x1F40, 0x013A, 0x04BE, 0x037B, 0x1FDD, 0x1F78,
+ 0x1FFE, 0x1F40, 0x011B, 0x04AD, 0x0398, 0x1FF0, 0x1F72,
+ 0x0003, 0x1F41, 0x00FD, 0x049C, 0x03B4, 0x0004, 0x1F6B,
+ 0x0008, 0x1F43, 0x00E0, 0x0489, 0x03CE, 0x0019, 0x1F65,
+ 0x000D, 0x1F46, 0x00C4, 0x0474, 0x03E8, 0x002E, 0x1F5F,
+ 0x0011, 0x1F49, 0x00A9, 0x045E, 0x0400, 0x0045, 0x1F5A,
+ 0x0015, 0x1F4D, 0x008E, 0x0447, 0x0418, 0x005C, 0x1F55,
+ 0x1F4F, 0x0076, 0x043B, 0x043B, 0x0076, 0x1F4F, 0x0000,
+ 0x1F55, 0x005C, 0x0418, 0x0447, 0x008E, 0x1F4D, 0x0015,
+ 0x1F5A, 0x0045, 0x0400, 0x045E, 0x00A9, 0x1F49, 0x0011,
+ 0x1F5F, 0x002E, 0x03E8, 0x0474, 0x00C4, 0x1F46, 0x000D,
+ 0x1F65, 0x0019, 0x03CE, 0x0489, 0x00E0, 0x1F43, 0x0008,
+ 0x1F6B, 0x0004, 0x03B4, 0x049C, 0x00FD, 0x1F41, 0x0003,
+ 0x1F72, 0x1FF0, 0x0398, 0x04AD, 0x011B, 0x1F40, 0x1FFE,
+ 0x1F78, 0x1FDD, 0x037B, 0x04BE, 0x013A, 0x1F40, 0x1FF8,
+ 0x1F7F, 0x1FCC, 0x035E, 0x04CC, 0x0159, 0x1F40, 0x1FF2,
+ 0x1F86, 0x1FBB, 0x0340, 0x04DA, 0x0178, 0x1F41, 0x1FEC,
+ 0x1F8E, 0x1FAB, 0x0321, 0x04E5, 0x0198, 0x1F43, 0x1FE6,
+ 0x1F95, 0x1F9D, 0x0301, 0x04EF, 0x01B8, 0x1F47, 0x1FDF,
+ 0x1F9D, 0x1F90, 0x02E1, 0x04F6, 0x01D9, 0x1F4B, 0x1FD8,
+ 0x1FA5, 0x1F83, 0x02C0, 0x04FD, 0x01FA, 0x1F50, 0x1FD1,
+ 0x1FAC, 0x1F78, 0x02A0, 0x0501, 0x021B, 0x1F56, 0x1FCA,
+ 0x1FB4, 0x1F6E, 0x027F, 0x0503, 0x023C, 0x1F5D, 0x1FC3,
+ /* Chroma */
+ 0x1FBB, 0x1F65, 0x025E, 0x0504, 0x025E, 0x1F65, 0x1FBB,
+ 0x1FC3, 0x1F5D, 0x023C, 0x0503, 0x027F, 0x1F6E, 0x1FB4,
+ 0x1FCA, 0x1F56, 0x021B, 0x0501, 0x02A0, 0x1F78, 0x1FAC,
+ 0x1FD1, 0x1F50, 0x01FA, 0x04FD, 0x02C0, 0x1F83, 0x1FA5,
+ 0x1FD8, 0x1F4B, 0x01D9, 0x04F6, 0x02E1, 0x1F90, 0x1F9D,
+ 0x1FDF, 0x1F47, 0x01B8, 0x04EF, 0x0301, 0x1F9D, 0x1F95,
+ 0x1FE6, 0x1F43, 0x0198, 0x04E5, 0x0321, 0x1FAB, 0x1F8E,
+ 0x1FEC, 0x1F41, 0x0178, 0x04DA, 0x0340, 0x1FBB, 0x1F86,
+ 0x1FF2, 0x1F40, 0x0159, 0x04CC, 0x035E, 0x1FCC, 0x1F7F,
+ 0x1FF8, 0x1F40, 0x013A, 0x04BE, 0x037B, 0x1FDD, 0x1F78,
+ 0x1FFE, 0x1F40, 0x011B, 0x04AD, 0x0398, 0x1FF0, 0x1F72,
+ 0x0003, 0x1F41, 0x00FD, 0x049C, 0x03B4, 0x0004, 0x1F6B,
+ 0x0008, 0x1F43, 0x00E0, 0x0489, 0x03CE, 0x0019, 0x1F65,
+ 0x000D, 0x1F46, 0x00C4, 0x0474, 0x03E8, 0x002E, 0x1F5F,
+ 0x0011, 0x1F49, 0x00A9, 0x045E, 0x0400, 0x0045, 0x1F5A,
+ 0x0015, 0x1F4D, 0x008E, 0x0447, 0x0418, 0x005C, 0x1F55,
+ 0x1F4F, 0x0076, 0x043B, 0x043B, 0x0076, 0x1F4F, 0x0000,
+ 0x1F55, 0x005C, 0x0418, 0x0447, 0x008E, 0x1F4D, 0x0015,
+ 0x1F5A, 0x0045, 0x0400, 0x045E, 0x00A9, 0x1F49, 0x0011,
+ 0x1F5F, 0x002E, 0x03E8, 0x0474, 0x00C4, 0x1F46, 0x000D,
+ 0x1F65, 0x0019, 0x03CE, 0x0489, 0x00E0, 0x1F43, 0x0008,
+ 0x1F6B, 0x0004, 0x03B4, 0x049C, 0x00FD, 0x1F41, 0x0003,
+ 0x1F72, 0x1FF0, 0x0398, 0x04AD, 0x011B, 0x1F40, 0x1FFE,
+ 0x1F78, 0x1FDD, 0x037B, 0x04BE, 0x013A, 0x1F40, 0x1FF8,
+ 0x1F7F, 0x1FCC, 0x035E, 0x04CC, 0x0159, 0x1F40, 0x1FF2,
+ 0x1F86, 0x1FBB, 0x0340, 0x04DA, 0x0178, 0x1F41, 0x1FEC,
+ 0x1F8E, 0x1FAB, 0x0321, 0x04E5, 0x0198, 0x1F43, 0x1FE6,
+ 0x1F95, 0x1F9D, 0x0301, 0x04EF, 0x01B8, 0x1F47, 0x1FDF,
+ 0x1F9D, 0x1F90, 0x02E1, 0x04F6, 0x01D9, 0x1F4B, 0x1FD8,
+ 0x1FA5, 0x1F83, 0x02C0, 0x04FD, 0x01FA, 0x1F50, 0x1FD1,
+ 0x1FAC, 0x1F78, 0x02A0, 0x0501, 0x021B, 0x1F56, 0x1FCA,
+ 0x1FB4, 0x1F6E, 0x027F, 0x0503, 0x023C, 0x1F5D, 0x1FC3,
+ },
+ [HS_LT_13_16_SCALE] = {
+ /* Luma */
+ 0x1FF4, 0x1F29, 0x022D, 0x056C, 0x022D, 0x1F29, 0x1FF4,
+ 0x1FFC, 0x1F26, 0x0206, 0x056A, 0x0254, 0x1F2E, 0x1FEC,
+ 0x0003, 0x1F24, 0x01E0, 0x0567, 0x027A, 0x1F34, 0x1FE4,
+ 0x000A, 0x1F23, 0x01BA, 0x0561, 0x02A2, 0x1F3B, 0x1FDB,
+ 0x0011, 0x1F22, 0x0194, 0x055B, 0x02C9, 0x1F43, 0x1FD2,
+ 0x0017, 0x1F23, 0x016F, 0x0551, 0x02F0, 0x1F4D, 0x1FC9,
+ 0x001D, 0x1F25, 0x014B, 0x0545, 0x0316, 0x1F58, 0x1FC0,
+ 0x0022, 0x1F28, 0x0127, 0x0538, 0x033C, 0x1F65, 0x1FB6,
+ 0x0027, 0x1F2C, 0x0104, 0x0528, 0x0361, 0x1F73, 0x1FAD,
+ 0x002B, 0x1F30, 0x00E2, 0x0518, 0x0386, 0x1F82, 0x1FA3,
+ 0x002F, 0x1F36, 0x00C2, 0x0504, 0x03AA, 0x1F92, 0x1F99,
+ 0x0032, 0x1F3C, 0x00A2, 0x04EF, 0x03CD, 0x1FA4, 0x1F90,
+ 0x0035, 0x1F42, 0x0083, 0x04D9, 0x03EF, 0x1FB8, 0x1F86,
+ 0x0038, 0x1F49, 0x0065, 0x04C0, 0x0410, 0x1FCD, 0x1F7D,
+ 0x003A, 0x1F51, 0x0048, 0x04A6, 0x0431, 0x1FE3, 0x1F73,
+ 0x003C, 0x1F59, 0x002D, 0x048A, 0x0450, 0x1FFA, 0x1F6A,
+ 0x1F5D, 0x0014, 0x048F, 0x048F, 0x0014, 0x1F5D, 0x0000,
+ 0x1F6A, 0x1FFA, 0x0450, 0x048A, 0x002D, 0x1F59, 0x003C,
+ 0x1F73, 0x1FE3, 0x0431, 0x04A6, 0x0048, 0x1F51, 0x003A,
+ 0x1F7D, 0x1FCD, 0x0410, 0x04C0, 0x0065, 0x1F49, 0x0038,
+ 0x1F86, 0x1FB8, 0x03EF, 0x04D9, 0x0083, 0x1F42, 0x0035,
+ 0x1F90, 0x1FA4, 0x03CD, 0x04EF, 0x00A2, 0x1F3C, 0x0032,
+ 0x1F99, 0x1F92, 0x03AA, 0x0504, 0x00C2, 0x1F36, 0x002F,
+ 0x1FA3, 0x1F82, 0x0386, 0x0518, 0x00E2, 0x1F30, 0x002B,
+ 0x1FAD, 0x1F73, 0x0361, 0x0528, 0x0104, 0x1F2C, 0x0027,
+ 0x1FB6, 0x1F65, 0x033C, 0x0538, 0x0127, 0x1F28, 0x0022,
+ 0x1FC0, 0x1F58, 0x0316, 0x0545, 0x014B, 0x1F25, 0x001D,
+ 0x1FC9, 0x1F4D, 0x02F0, 0x0551, 0x016F, 0x1F23, 0x0017,
+ 0x1FD2, 0x1F43, 0x02C9, 0x055B, 0x0194, 0x1F22, 0x0011,
+ 0x1FDB, 0x1F3B, 0x02A2, 0x0561, 0x01BA, 0x1F23, 0x000A,
+ 0x1FE4, 0x1F34, 0x027A, 0x0567, 0x01E0, 0x1F24, 0x0003,
+ 0x1FEC, 0x1F2E, 0x0254, 0x056A, 0x0206, 0x1F26, 0x1FFC,
+ /* Chroma */
+ 0x1FF4, 0x1F29, 0x022D, 0x056C, 0x022D, 0x1F29, 0x1FF4,
+ 0x1FFC, 0x1F26, 0x0206, 0x056A, 0x0254, 0x1F2E, 0x1FEC,
+ 0x0003, 0x1F24, 0x01E0, 0x0567, 0x027A, 0x1F34, 0x1FE4,
+ 0x000A, 0x1F23, 0x01BA, 0x0561, 0x02A2, 0x1F3B, 0x1FDB,
+ 0x0011, 0x1F22, 0x0194, 0x055B, 0x02C9, 0x1F43, 0x1FD2,
+ 0x0017, 0x1F23, 0x016F, 0x0551, 0x02F0, 0x1F4D, 0x1FC9,
+ 0x001D, 0x1F25, 0x014B, 0x0545, 0x0316, 0x1F58, 0x1FC0,
+ 0x0022, 0x1F28, 0x0127, 0x0538, 0x033C, 0x1F65, 0x1FB6,
+ 0x0027, 0x1F2C, 0x0104, 0x0528, 0x0361, 0x1F73, 0x1FAD,
+ 0x002B, 0x1F30, 0x00E2, 0x0518, 0x0386, 0x1F82, 0x1FA3,
+ 0x002F, 0x1F36, 0x00C2, 0x0504, 0x03AA, 0x1F92, 0x1F99,
+ 0x0032, 0x1F3C, 0x00A2, 0x04EF, 0x03CD, 0x1FA4, 0x1F90,
+ 0x0035, 0x1F42, 0x0083, 0x04D9, 0x03EF, 0x1FB8, 0x1F86,
+ 0x0038, 0x1F49, 0x0065, 0x04C0, 0x0410, 0x1FCD, 0x1F7D,
+ 0x003A, 0x1F51, 0x0048, 0x04A6, 0x0431, 0x1FE3, 0x1F73,
+ 0x003C, 0x1F59, 0x002D, 0x048A, 0x0450, 0x1FFA, 0x1F6A,
+ 0x1F5D, 0x0014, 0x048F, 0x048F, 0x0014, 0x1F5D, 0x0000,
+ 0x1F6A, 0x1FFA, 0x0450, 0x048A, 0x002D, 0x1F59, 0x003C,
+ 0x1F73, 0x1FE3, 0x0431, 0x04A6, 0x0048, 0x1F51, 0x003A,
+ 0x1F7D, 0x1FCD, 0x0410, 0x04C0, 0x0065, 0x1F49, 0x0038,
+ 0x1F86, 0x1FB8, 0x03EF, 0x04D9, 0x0083, 0x1F42, 0x0035,
+ 0x1F90, 0x1FA4, 0x03CD, 0x04EF, 0x00A2, 0x1F3C, 0x0032,
+ 0x1F99, 0x1F92, 0x03AA, 0x0504, 0x00C2, 0x1F36, 0x002F,
+ 0x1FA3, 0x1F82, 0x0386, 0x0518, 0x00E2, 0x1F30, 0x002B,
+ 0x1FAD, 0x1F73, 0x0361, 0x0528, 0x0104, 0x1F2C, 0x0027,
+ 0x1FB6, 0x1F65, 0x033C, 0x0538, 0x0127, 0x1F28, 0x0022,
+ 0x1FC0, 0x1F58, 0x0316, 0x0545, 0x014B, 0x1F25, 0x001D,
+ 0x1FC9, 0x1F4D, 0x02F0, 0x0551, 0x016F, 0x1F23, 0x0017,
+ 0x1FD2, 0x1F43, 0x02C9, 0x055B, 0x0194, 0x1F22, 0x0011,
+ 0x1FDB, 0x1F3B, 0x02A2, 0x0561, 0x01BA, 0x1F23, 0x000A,
+ 0x1FE4, 0x1F34, 0x027A, 0x0567, 0x01E0, 0x1F24, 0x0003,
+ 0x1FEC, 0x1F2E, 0x0254, 0x056A, 0x0206, 0x1F26, 0x1FFC,
+ },
+ [HS_LT_14_16_SCALE] = {
+ /* Luma */
+ 0x002F, 0x1F0B, 0x01E7, 0x05BE, 0x01E7, 0x1F0B, 0x002F,
+ 0x0035, 0x1F0D, 0x01BC, 0x05BD, 0x0213, 0x1F0A, 0x0028,
+ 0x003A, 0x1F11, 0x0191, 0x05BA, 0x023F, 0x1F0A, 0x0021,
+ 0x003F, 0x1F15, 0x0167, 0x05B3, 0x026C, 0x1F0C, 0x001A,
+ 0x0043, 0x1F1B, 0x013E, 0x05AA, 0x0299, 0x1F0F, 0x0012,
+ 0x0046, 0x1F21, 0x0116, 0x05A1, 0x02C6, 0x1F13, 0x0009,
+ 0x0049, 0x1F28, 0x00EF, 0x0593, 0x02F4, 0x1F19, 0x0000,
+ 0x004C, 0x1F30, 0x00C9, 0x0584, 0x0321, 0x1F20, 0x1FF6,
+ 0x004E, 0x1F39, 0x00A4, 0x0572, 0x034D, 0x1F2A, 0x1FEC,
+ 0x004F, 0x1F43, 0x0080, 0x055E, 0x037A, 0x1F34, 0x1FE2,
+ 0x0050, 0x1F4D, 0x005E, 0x0548, 0x03A5, 0x1F41, 0x1FD7,
+ 0x0050, 0x1F57, 0x003D, 0x0531, 0x03D1, 0x1F4F, 0x1FCB,
+ 0x0050, 0x1F62, 0x001E, 0x0516, 0x03FB, 0x1F5F, 0x1FC0,
+ 0x004F, 0x1F6D, 0x0000, 0x04FA, 0x0425, 0x1F71, 0x1FB4,
+ 0x004E, 0x1F79, 0x1FE4, 0x04DC, 0x044D, 0x1F84, 0x1FA8,
+ 0x004D, 0x1F84, 0x1FCA, 0x04BC, 0x0474, 0x1F99, 0x1F9C,
+ 0x1F8C, 0x1FAE, 0x04C6, 0x04C6, 0x1FAE, 0x1F8C, 0x0000,
+ 0x1F9C, 0x1F99, 0x0474, 0x04BC, 0x1FCA, 0x1F84, 0x004D,
+ 0x1FA8, 0x1F84, 0x044D, 0x04DC, 0x1FE4, 0x1F79, 0x004E,
+ 0x1FB4, 0x1F71, 0x0425, 0x04FA, 0x0000, 0x1F6D, 0x004F,
+ 0x1FC0, 0x1F5F, 0x03FB, 0x0516, 0x001E, 0x1F62, 0x0050,
+ 0x1FCB, 0x1F4F, 0x03D1, 0x0531, 0x003D, 0x1F57, 0x0050,
+ 0x1FD7, 0x1F41, 0x03A5, 0x0548, 0x005E, 0x1F4D, 0x0050,
+ 0x1FE2, 0x1F34, 0x037A, 0x055E, 0x0080, 0x1F43, 0x004F,
+ 0x1FEC, 0x1F2A, 0x034D, 0x0572, 0x00A4, 0x1F39, 0x004E,
+ 0x1FF6, 0x1F20, 0x0321, 0x0584, 0x00C9, 0x1F30, 0x004C,
+ 0x0000, 0x1F19, 0x02F4, 0x0593, 0x00EF, 0x1F28, 0x0049,
+ 0x0009, 0x1F13, 0x02C6, 0x05A1, 0x0116, 0x1F21, 0x0046,
+ 0x0012, 0x1F0F, 0x0299, 0x05AA, 0x013E, 0x1F1B, 0x0043,
+ 0x001A, 0x1F0C, 0x026C, 0x05B3, 0x0167, 0x1F15, 0x003F,
+ 0x0021, 0x1F0A, 0x023F, 0x05BA, 0x0191, 0x1F11, 0x003A,
+ 0x0028, 0x1F0A, 0x0213, 0x05BD, 0x01BC, 0x1F0D, 0x0035,
+ /* Chroma */
+ 0x002F, 0x1F0B, 0x01E7, 0x05BE, 0x01E7, 0x1F0B, 0x002F,
+ 0x0035, 0x1F0D, 0x01BC, 0x05BD, 0x0213, 0x1F0A, 0x0028,
+ 0x003A, 0x1F11, 0x0191, 0x05BA, 0x023F, 0x1F0A, 0x0021,
+ 0x003F, 0x1F15, 0x0167, 0x05B3, 0x026C, 0x1F0C, 0x001A,
+ 0x0043, 0x1F1B, 0x013E, 0x05AA, 0x0299, 0x1F0F, 0x0012,
+ 0x0046, 0x1F21, 0x0116, 0x05A1, 0x02C6, 0x1F13, 0x0009,
+ 0x0049, 0x1F28, 0x00EF, 0x0593, 0x02F4, 0x1F19, 0x0000,
+ 0x004C, 0x1F30, 0x00C9, 0x0584, 0x0321, 0x1F20, 0x1FF6,
+ 0x004E, 0x1F39, 0x00A4, 0x0572, 0x034D, 0x1F2A, 0x1FEC,
+ 0x004F, 0x1F43, 0x0080, 0x055E, 0x037A, 0x1F34, 0x1FE2,
+ 0x0050, 0x1F4D, 0x005E, 0x0548, 0x03A5, 0x1F41, 0x1FD7,
+ 0x0050, 0x1F57, 0x003D, 0x0531, 0x03D1, 0x1F4F, 0x1FCB,
+ 0x0050, 0x1F62, 0x001E, 0x0516, 0x03FB, 0x1F5F, 0x1FC0,
+ 0x004F, 0x1F6D, 0x0000, 0x04FA, 0x0425, 0x1F71, 0x1FB4,
+ 0x004E, 0x1F79, 0x1FE4, 0x04DC, 0x044D, 0x1F84, 0x1FA8,
+ 0x004D, 0x1F84, 0x1FCA, 0x04BC, 0x0474, 0x1F99, 0x1F9C,
+ 0x1F8C, 0x1FAE, 0x04C6, 0x04C6, 0x1FAE, 0x1F8C, 0x0000,
+ 0x1F9C, 0x1F99, 0x0474, 0x04BC, 0x1FCA, 0x1F84, 0x004D,
+ 0x1FA8, 0x1F84, 0x044D, 0x04DC, 0x1FE4, 0x1F79, 0x004E,
+ 0x1FB4, 0x1F71, 0x0425, 0x04FA, 0x0000, 0x1F6D, 0x004F,
+ 0x1FC0, 0x1F5F, 0x03FB, 0x0516, 0x001E, 0x1F62, 0x0050,
+ 0x1FCB, 0x1F4F, 0x03D1, 0x0531, 0x003D, 0x1F57, 0x0050,
+ 0x1FD7, 0x1F41, 0x03A5, 0x0548, 0x005E, 0x1F4D, 0x0050,
+ 0x1FE2, 0x1F34, 0x037A, 0x055E, 0x0080, 0x1F43, 0x004F,
+ 0x1FEC, 0x1F2A, 0x034D, 0x0572, 0x00A4, 0x1F39, 0x004E,
+ 0x1FF6, 0x1F20, 0x0321, 0x0584, 0x00C9, 0x1F30, 0x004C,
+ 0x0000, 0x1F19, 0x02F4, 0x0593, 0x00EF, 0x1F28, 0x0049,
+ 0x0009, 0x1F13, 0x02C6, 0x05A1, 0x0116, 0x1F21, 0x0046,
+ 0x0012, 0x1F0F, 0x0299, 0x05AA, 0x013E, 0x1F1B, 0x0043,
+ 0x001A, 0x1F0C, 0x026C, 0x05B3, 0x0167, 0x1F15, 0x003F,
+ 0x0021, 0x1F0A, 0x023F, 0x05BA, 0x0191, 0x1F11, 0x003A,
+ 0x0028, 0x1F0A, 0x0213, 0x05BD, 0x01BC, 0x1F0D, 0x0035,
+ },
+ [HS_LT_15_16_SCALE] = {
+ /* Luma */
+ 0x005B, 0x1F0A, 0x0195, 0x060C, 0x0195, 0x1F0A, 0x005B,
+ 0x005D, 0x1F13, 0x0166, 0x0609, 0x01C6, 0x1F03, 0x0058,
+ 0x005F, 0x1F1C, 0x0138, 0x0605, 0x01F7, 0x1EFD, 0x0054,
+ 0x0060, 0x1F26, 0x010B, 0x05FF, 0x0229, 0x1EF8, 0x004F,
+ 0x0060, 0x1F31, 0x00DF, 0x05F5, 0x025C, 0x1EF5, 0x004A,
+ 0x0060, 0x1F3D, 0x00B5, 0x05E8, 0x028F, 0x1EF3, 0x0044,
+ 0x005F, 0x1F49, 0x008C, 0x05DA, 0x02C3, 0x1EF2, 0x003D,
+ 0x005E, 0x1F56, 0x0065, 0x05C7, 0x02F6, 0x1EF4, 0x0036,
+ 0x005C, 0x1F63, 0x003F, 0x05B3, 0x032B, 0x1EF7, 0x002D,
+ 0x0059, 0x1F71, 0x001B, 0x059D, 0x035F, 0x1EFB, 0x0024,
+ 0x0057, 0x1F7F, 0x1FF9, 0x0583, 0x0392, 0x1F02, 0x001A,
+ 0x0053, 0x1F8D, 0x1FD9, 0x0567, 0x03C5, 0x1F0B, 0x0010,
+ 0x0050, 0x1F9B, 0x1FBB, 0x0548, 0x03F8, 0x1F15, 0x0005,
+ 0x004C, 0x1FA9, 0x1F9E, 0x0528, 0x042A, 0x1F22, 0x1FF9,
+ 0x0048, 0x1FB7, 0x1F84, 0x0505, 0x045A, 0x1F31, 0x1FED,
+ 0x0043, 0x1FC5, 0x1F6C, 0x04E0, 0x048A, 0x1F42, 0x1FE0,
+ 0x1FD1, 0x1F50, 0x04DF, 0x04DF, 0x1F50, 0x1FD1, 0x0000,
+ 0x1FE0, 0x1F42, 0x048A, 0x04E0, 0x1F6C, 0x1FC5, 0x0043,
+ 0x1FED, 0x1F31, 0x045A, 0x0505, 0x1F84, 0x1FB7, 0x0048,
+ 0x1FF9, 0x1F22, 0x042A, 0x0528, 0x1F9E, 0x1FA9, 0x004C,
+ 0x0005, 0x1F15, 0x03F8, 0x0548, 0x1FBB, 0x1F9B, 0x0050,
+ 0x0010, 0x1F0B, 0x03C5, 0x0567, 0x1FD9, 0x1F8D, 0x0053,
+ 0x001A, 0x1F02, 0x0392, 0x0583, 0x1FF9, 0x1F7F, 0x0057,
+ 0x0024, 0x1EFB, 0x035F, 0x059D, 0x001B, 0x1F71, 0x0059,
+ 0x002D, 0x1EF7, 0x032B, 0x05B3, 0x003F, 0x1F63, 0x005C,
+ 0x0036, 0x1EF4, 0x02F6, 0x05C7, 0x0065, 0x1F56, 0x005E,
+ 0x003D, 0x1EF2, 0x02C3, 0x05DA, 0x008C, 0x1F49, 0x005F,
+ 0x0044, 0x1EF3, 0x028F, 0x05E8, 0x00B5, 0x1F3D, 0x0060,
+ 0x004A, 0x1EF5, 0x025C, 0x05F5, 0x00DF, 0x1F31, 0x0060,
+ 0x004F, 0x1EF8, 0x0229, 0x05FF, 0x010B, 0x1F26, 0x0060,
+ 0x0054, 0x1EFD, 0x01F7, 0x0605, 0x0138, 0x1F1C, 0x005F,
+ 0x0058, 0x1F03, 0x01C6, 0x0609, 0x0166, 0x1F13, 0x005D,
+ /* Chroma */
+ 0x005B, 0x1F0A, 0x0195, 0x060C, 0x0195, 0x1F0A, 0x005B,
+ 0x005D, 0x1F13, 0x0166, 0x0609, 0x01C6, 0x1F03, 0x0058,
+ 0x005F, 0x1F1C, 0x0138, 0x0605, 0x01F7, 0x1EFD, 0x0054,
+ 0x0060, 0x1F26, 0x010B, 0x05FF, 0x0229, 0x1EF8, 0x004F,
+ 0x0060, 0x1F31, 0x00DF, 0x05F5, 0x025C, 0x1EF5, 0x004A,
+ 0x0060, 0x1F3D, 0x00B5, 0x05E8, 0x028F, 0x1EF3, 0x0044,
+ 0x005F, 0x1F49, 0x008C, 0x05DA, 0x02C3, 0x1EF2, 0x003D,
+ 0x005E, 0x1F56, 0x0065, 0x05C7, 0x02F6, 0x1EF4, 0x0036,
+ 0x005C, 0x1F63, 0x003F, 0x05B3, 0x032B, 0x1EF7, 0x002D,
+ 0x0059, 0x1F71, 0x001B, 0x059D, 0x035F, 0x1EFB, 0x0024,
+ 0x0057, 0x1F7F, 0x1FF9, 0x0583, 0x0392, 0x1F02, 0x001A,
+ 0x0053, 0x1F8D, 0x1FD9, 0x0567, 0x03C5, 0x1F0B, 0x0010,
+ 0x0050, 0x1F9B, 0x1FBB, 0x0548, 0x03F8, 0x1F15, 0x0005,
+ 0x004C, 0x1FA9, 0x1F9E, 0x0528, 0x042A, 0x1F22, 0x1FF9,
+ 0x0048, 0x1FB7, 0x1F84, 0x0505, 0x045A, 0x1F31, 0x1FED,
+ 0x0043, 0x1FC5, 0x1F6C, 0x04E0, 0x048A, 0x1F42, 0x1FE0,
+ 0x1FD1, 0x1F50, 0x04DF, 0x04DF, 0x1F50, 0x1FD1, 0x0000,
+ 0x1FE0, 0x1F42, 0x048A, 0x04E0, 0x1F6C, 0x1FC5, 0x0043,
+ 0x1FED, 0x1F31, 0x045A, 0x0505, 0x1F84, 0x1FB7, 0x0048,
+ 0x1FF9, 0x1F22, 0x042A, 0x0528, 0x1F9E, 0x1FA9, 0x004C,
+ 0x0005, 0x1F15, 0x03F8, 0x0548, 0x1FBB, 0x1F9B, 0x0050,
+ 0x0010, 0x1F0B, 0x03C5, 0x0567, 0x1FD9, 0x1F8D, 0x0053,
+ 0x001A, 0x1F02, 0x0392, 0x0583, 0x1FF9, 0x1F7F, 0x0057,
+ 0x0024, 0x1EFB, 0x035F, 0x059D, 0x001B, 0x1F71, 0x0059,
+ 0x002D, 0x1EF7, 0x032B, 0x05B3, 0x003F, 0x1F63, 0x005C,
+ 0x0036, 0x1EF4, 0x02F6, 0x05C7, 0x0065, 0x1F56, 0x005E,
+ 0x003D, 0x1EF2, 0x02C3, 0x05DA, 0x008C, 0x1F49, 0x005F,
+ 0x0044, 0x1EF3, 0x028F, 0x05E8, 0x00B5, 0x1F3D, 0x0060,
+ 0x004A, 0x1EF5, 0x025C, 0x05F5, 0x00DF, 0x1F31, 0x0060,
+ 0x004F, 0x1EF8, 0x0229, 0x05FF, 0x010B, 0x1F26, 0x0060,
+ 0x0054, 0x1EFD, 0x01F7, 0x0605, 0x0138, 0x1F1C, 0x005F,
+ 0x0058, 0x1F03, 0x01C6, 0x0609, 0x0166, 0x1F13, 0x005D,
+ },
+ [HS_LE_16_16_SCALE] = {
+ /* Luma */
+ 0x006E, 0x1F24, 0x013E, 0x0660, 0x013E, 0x1F24, 0x006E,
+ 0x006C, 0x1F33, 0x010B, 0x065D, 0x0172, 0x1F17, 0x0070,
+ 0x0069, 0x1F41, 0x00DA, 0x0659, 0x01A8, 0x1F0B, 0x0070,
+ 0x0066, 0x1F51, 0x00AA, 0x0650, 0x01DF, 0x1F00, 0x0070,
+ 0x0062, 0x1F61, 0x007D, 0x0644, 0x0217, 0x1EF6, 0x006F,
+ 0x005E, 0x1F71, 0x0051, 0x0636, 0x0250, 0x1EED, 0x006D,
+ 0x0059, 0x1F81, 0x0028, 0x0624, 0x028A, 0x1EE5, 0x006B,
+ 0x0054, 0x1F91, 0x0000, 0x060F, 0x02C5, 0x1EE0, 0x0067,
+ 0x004E, 0x1FA2, 0x1FDB, 0x05F6, 0x0300, 0x1EDC, 0x0063,
+ 0x0049, 0x1FB2, 0x1FB8, 0x05DB, 0x033B, 0x1EDA, 0x005D,
+ 0x0043, 0x1FC3, 0x1F98, 0x05BC, 0x0376, 0x1ED9, 0x0057,
+ 0x003D, 0x1FD3, 0x1F7A, 0x059B, 0x03B1, 0x1EDB, 0x004F,
+ 0x0036, 0x1FE2, 0x1F5E, 0x0578, 0x03EC, 0x1EDF, 0x0047,
+ 0x0030, 0x1FF1, 0x1F45, 0x0551, 0x0426, 0x1EE6, 0x003D,
+ 0x002A, 0x0000, 0x1F2E, 0x0528, 0x045F, 0x1EEE, 0x0033,
+ 0x0023, 0x000E, 0x1F19, 0x04FD, 0x0498, 0x1EFA, 0x0027,
+ 0x001B, 0x1F04, 0x04E1, 0x04E1, 0x1F04, 0x001B, 0x0000,
+ 0x0027, 0x1EFA, 0x0498, 0x04FD, 0x1F19, 0x000E, 0x0023,
+ 0x0033, 0x1EEE, 0x045F, 0x0528, 0x1F2E, 0x0000, 0x002A,
+ 0x003D, 0x1EE6, 0x0426, 0x0551, 0x1F45, 0x1FF1, 0x0030,
+ 0x0047, 0x1EDF, 0x03EC, 0x0578, 0x1F5E, 0x1FE2, 0x0036,
+ 0x004F, 0x1EDB, 0x03B1, 0x059B, 0x1F7A, 0x1FD3, 0x003D,
+ 0x0057, 0x1ED9, 0x0376, 0x05BC, 0x1F98, 0x1FC3, 0x0043,
+ 0x005D, 0x1EDA, 0x033B, 0x05DB, 0x1FB8, 0x1FB2, 0x0049,
+ 0x0063, 0x1EDC, 0x0300, 0x05F6, 0x1FDB, 0x1FA2, 0x004E,
+ 0x0067, 0x1EE0, 0x02C5, 0x060F, 0x0000, 0x1F91, 0x0054,
+ 0x006B, 0x1EE5, 0x028A, 0x0624, 0x0028, 0x1F81, 0x0059,
+ 0x006D, 0x1EED, 0x0250, 0x0636, 0x0051, 0x1F71, 0x005E,
+ 0x006F, 0x1EF6, 0x0217, 0x0644, 0x007D, 0x1F61, 0x0062,
+ 0x0070, 0x1F00, 0x01DF, 0x0650, 0x00AA, 0x1F51, 0x0066,
+ 0x0070, 0x1F0B, 0x01A8, 0x0659, 0x00DA, 0x1F41, 0x0069,
+ 0x0070, 0x1F17, 0x0172, 0x065D, 0x010B, 0x1F33, 0x006C,
+ /* Chroma */
+ 0x006E, 0x1F24, 0x013E, 0x0660, 0x013E, 0x1F24, 0x006E,
+ 0x006C, 0x1F33, 0x010B, 0x065D, 0x0172, 0x1F17, 0x0070,
+ 0x0069, 0x1F41, 0x00DA, 0x0659, 0x01A8, 0x1F0B, 0x0070,
+ 0x0066, 0x1F51, 0x00AA, 0x0650, 0x01DF, 0x1F00, 0x0070,
+ 0x0062, 0x1F61, 0x007D, 0x0644, 0x0217, 0x1EF6, 0x006F,
+ 0x005E, 0x1F71, 0x0051, 0x0636, 0x0250, 0x1EED, 0x006D,
+ 0x0059, 0x1F81, 0x0028, 0x0624, 0x028A, 0x1EE5, 0x006B,
+ 0x0054, 0x1F91, 0x0000, 0x060F, 0x02C5, 0x1EE0, 0x0067,
+ 0x004E, 0x1FA2, 0x1FDB, 0x05F6, 0x0300, 0x1EDC, 0x0063,
+ 0x0049, 0x1FB2, 0x1FB8, 0x05DB, 0x033B, 0x1EDA, 0x005D,
+ 0x0043, 0x1FC3, 0x1F98, 0x05BC, 0x0376, 0x1ED9, 0x0057,
+ 0x003D, 0x1FD3, 0x1F7A, 0x059B, 0x03B1, 0x1EDB, 0x004F,
+ 0x0036, 0x1FE2, 0x1F5E, 0x0578, 0x03EC, 0x1EDF, 0x0047,
+ 0x0030, 0x1FF1, 0x1F45, 0x0551, 0x0426, 0x1EE6, 0x003D,
+ 0x002A, 0x0000, 0x1F2E, 0x0528, 0x045F, 0x1EEE, 0x0033,
+ 0x0023, 0x000E, 0x1F19, 0x04FD, 0x0498, 0x1EFA, 0x0027,
+ 0x001B, 0x1F04, 0x04E1, 0x04E1, 0x1F04, 0x001B, 0x0000,
+ 0x0027, 0x1EFA, 0x0498, 0x04FD, 0x1F19, 0x000E, 0x0023,
+ 0x0033, 0x1EEE, 0x045F, 0x0528, 0x1F2E, 0x0000, 0x002A,
+ 0x003D, 0x1EE6, 0x0426, 0x0551, 0x1F45, 0x1FF1, 0x0030,
+ 0x0047, 0x1EDF, 0x03EC, 0x0578, 0x1F5E, 0x1FE2, 0x0036,
+ 0x004F, 0x1EDB, 0x03B1, 0x059B, 0x1F7A, 0x1FD3, 0x003D,
+ 0x0057, 0x1ED9, 0x0376, 0x05BC, 0x1F98, 0x1FC3, 0x0043,
+ 0x005D, 0x1EDA, 0x033B, 0x05DB, 0x1FB8, 0x1FB2, 0x0049,
+ 0x0063, 0x1EDC, 0x0300, 0x05F6, 0x1FDB, 0x1FA2, 0x004E,
+ 0x0067, 0x1EE0, 0x02C5, 0x060F, 0x0000, 0x1F91, 0x0054,
+ 0x006B, 0x1EE5, 0x028A, 0x0624, 0x0028, 0x1F81, 0x0059,
+ 0x006D, 0x1EED, 0x0250, 0x0636, 0x0051, 0x1F71, 0x005E,
+ 0x006F, 0x1EF6, 0x0217, 0x0644, 0x007D, 0x1F61, 0x0062,
+ 0x0070, 0x1F00, 0x01DF, 0x0650, 0x00AA, 0x1F51, 0x0066,
+ 0x0070, 0x1F0B, 0x01A8, 0x0659, 0x00DA, 0x1F41, 0x0069,
+ 0x0070, 0x1F17, 0x0172, 0x065D, 0x010B, 0x1F33, 0x006C,
+ },
+};
+
+/* vertical scaler coefficients */
+enum {
+ VS_UP_SCALE = 0,
+ VS_LT_9_16_SCALE,
+ VS_LT_10_16_SCALE,
+ VS_LT_11_16_SCALE,
+ VS_LT_12_16_SCALE,
+ VS_LT_13_16_SCALE,
+ VS_LT_14_16_SCALE,
+ VS_LT_15_16_SCALE,
+ VS_LT_16_16_SCALE,
+ VS_1_TO_1_SCALE,
+};
+
+static const u16 scaler_vs_coeffs[15][SC_NUM_PHASES * 2 * SC_V_NUM_TAPS] = {
+ [VS_UP_SCALE] = {
+ /* Luma */
+ 0x1FD1, 0x00B1, 0x06FC, 0x00B1, 0x1FD1,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ /* Chroma */
+ 0x1FD1, 0x00B1, 0x06FC, 0x00B1, 0x1FD1,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ },
+ [VS_LT_9_16_SCALE] = {
+ /* Luma */
+ 0x001C, 0x01F6, 0x03DC, 0x01F6, 0x001C,
+ 0x0018, 0x01DF, 0x03DB, 0x020C, 0x0022,
+ 0x0013, 0x01C9, 0x03D9, 0x0223, 0x0028,
+ 0x000F, 0x01B3, 0x03D6, 0x023A, 0x002E,
+ 0x000C, 0x019D, 0x03D2, 0x0250, 0x0035,
+ 0x0009, 0x0188, 0x03CC, 0x0266, 0x003D,
+ 0x0006, 0x0173, 0x03C5, 0x027D, 0x0045,
+ 0x0004, 0x015E, 0x03BD, 0x0293, 0x004E,
+ 0x0002, 0x014A, 0x03B4, 0x02A8, 0x0058,
+ 0x0000, 0x0136, 0x03AA, 0x02BE, 0x0062,
+ 0x1FFF, 0x0123, 0x039E, 0x02D3, 0x006D,
+ 0x1FFE, 0x0110, 0x0392, 0x02E8, 0x0078,
+ 0x1FFD, 0x00FE, 0x0384, 0x02FC, 0x0085,
+ 0x1FFD, 0x00ED, 0x0376, 0x030F, 0x0091,
+ 0x1FFC, 0x00DC, 0x0367, 0x0322, 0x009F,
+ 0x1FFC, 0x00CC, 0x0357, 0x0334, 0x00AD,
+ 0x00BC, 0x0344, 0x0344, 0x00BC, 0x0000,
+ 0x00AD, 0x0334, 0x0357, 0x00CC, 0x1FFC,
+ 0x009F, 0x0322, 0x0367, 0x00DC, 0x1FFC,
+ 0x0091, 0x030F, 0x0376, 0x00ED, 0x1FFD,
+ 0x0085, 0x02FC, 0x0384, 0x00FE, 0x1FFD,
+ 0x0078, 0x02E8, 0x0392, 0x0110, 0x1FFE,
+ 0x006D, 0x02D3, 0x039E, 0x0123, 0x1FFF,
+ 0x0062, 0x02BE, 0x03AA, 0x0136, 0x0000,
+ 0x0058, 0x02A8, 0x03B4, 0x014A, 0x0002,
+ 0x004E, 0x0293, 0x03BD, 0x015E, 0x0004,
+ 0x0045, 0x027D, 0x03C5, 0x0173, 0x0006,
+ 0x003D, 0x0266, 0x03CC, 0x0188, 0x0009,
+ 0x0035, 0x0250, 0x03D2, 0x019D, 0x000C,
+ 0x002E, 0x023A, 0x03D6, 0x01B3, 0x000F,
+ 0x0028, 0x0223, 0x03D9, 0x01C9, 0x0013,
+ 0x0022, 0x020C, 0x03DB, 0x01DF, 0x0018,
+ /* Chroma */
+ 0x001C, 0x01F6, 0x03DC, 0x01F6, 0x001C,
+ 0x0018, 0x01DF, 0x03DB, 0x020C, 0x0022,
+ 0x0013, 0x01C9, 0x03D9, 0x0223, 0x0028,
+ 0x000F, 0x01B3, 0x03D6, 0x023A, 0x002E,
+ 0x000C, 0x019D, 0x03D2, 0x0250, 0x0035,
+ 0x0009, 0x0188, 0x03CC, 0x0266, 0x003D,
+ 0x0006, 0x0173, 0x03C5, 0x027D, 0x0045,
+ 0x0004, 0x015E, 0x03BD, 0x0293, 0x004E,
+ 0x0002, 0x014A, 0x03B4, 0x02A8, 0x0058,
+ 0x0000, 0x0136, 0x03AA, 0x02BE, 0x0062,
+ 0x1FFF, 0x0123, 0x039E, 0x02D3, 0x006D,
+ 0x1FFE, 0x0110, 0x0392, 0x02E8, 0x0078,
+ 0x1FFD, 0x00FE, 0x0384, 0x02FC, 0x0085,
+ 0x1FFD, 0x00ED, 0x0376, 0x030F, 0x0091,
+ 0x1FFC, 0x00DC, 0x0367, 0x0322, 0x009F,
+ 0x1FFC, 0x00CC, 0x0357, 0x0334, 0x00AD,
+ 0x00BC, 0x0344, 0x0344, 0x00BC, 0x0000,
+ 0x00AD, 0x0334, 0x0357, 0x00CC, 0x1FFC,
+ 0x009F, 0x0322, 0x0367, 0x00DC, 0x1FFC,
+ 0x0091, 0x030F, 0x0376, 0x00ED, 0x1FFD,
+ 0x0085, 0x02FC, 0x0384, 0x00FE, 0x1FFD,
+ 0x0078, 0x02E8, 0x0392, 0x0110, 0x1FFE,
+ 0x006D, 0x02D3, 0x039E, 0x0123, 0x1FFF,
+ 0x0062, 0x02BE, 0x03AA, 0x0136, 0x0000,
+ 0x0058, 0x02A8, 0x03B4, 0x014A, 0x0002,
+ 0x004E, 0x0293, 0x03BD, 0x015E, 0x0004,
+ 0x0045, 0x027D, 0x03C5, 0x0173, 0x0006,
+ 0x003D, 0x0266, 0x03CC, 0x0188, 0x0009,
+ 0x0035, 0x0250, 0x03D2, 0x019D, 0x000C,
+ 0x002E, 0x023A, 0x03D6, 0x01B3, 0x000F,
+ 0x0028, 0x0223, 0x03D9, 0x01C9, 0x0013,
+ 0x0022, 0x020C, 0x03DB, 0x01DF, 0x0018,
+ },
+ [VS_LT_10_16_SCALE] = {
+ /* Luma */
+ 0x0003, 0x01E9, 0x0428, 0x01E9, 0x0003,
+ 0x0000, 0x01D0, 0x0426, 0x0203, 0x0007,
+ 0x1FFD, 0x01B7, 0x0424, 0x021C, 0x000C,
+ 0x1FFB, 0x019E, 0x0420, 0x0236, 0x0011,
+ 0x1FF9, 0x0186, 0x041A, 0x0250, 0x0017,
+ 0x1FF7, 0x016E, 0x0414, 0x026A, 0x001D,
+ 0x1FF6, 0x0157, 0x040B, 0x0284, 0x0024,
+ 0x1FF5, 0x0140, 0x0401, 0x029E, 0x002C,
+ 0x1FF4, 0x012A, 0x03F6, 0x02B7, 0x0035,
+ 0x1FF4, 0x0115, 0x03E9, 0x02D0, 0x003E,
+ 0x1FF4, 0x0100, 0x03DB, 0x02E9, 0x0048,
+ 0x1FF4, 0x00EC, 0x03CC, 0x0301, 0x0053,
+ 0x1FF4, 0x00D9, 0x03BC, 0x0318, 0x005F,
+ 0x1FF5, 0x00C7, 0x03AA, 0x032F, 0x006B,
+ 0x1FF6, 0x00B5, 0x0398, 0x0345, 0x0078,
+ 0x1FF6, 0x00A5, 0x0384, 0x035B, 0x0086,
+ 0x0094, 0x036C, 0x036C, 0x0094, 0x0000,
+ 0x0086, 0x035B, 0x0384, 0x00A5, 0x1FF6,
+ 0x0078, 0x0345, 0x0398, 0x00B5, 0x1FF6,
+ 0x006B, 0x032F, 0x03AA, 0x00C7, 0x1FF5,
+ 0x005F, 0x0318, 0x03BC, 0x00D9, 0x1FF4,
+ 0x0053, 0x0301, 0x03CC, 0x00EC, 0x1FF4,
+ 0x0048, 0x02E9, 0x03DB, 0x0100, 0x1FF4,
+ 0x003E, 0x02D0, 0x03E9, 0x0115, 0x1FF4,
+ 0x0035, 0x02B7, 0x03F6, 0x012A, 0x1FF4,
+ 0x002C, 0x029E, 0x0401, 0x0140, 0x1FF5,
+ 0x0024, 0x0284, 0x040B, 0x0157, 0x1FF6,
+ 0x001D, 0x026A, 0x0414, 0x016E, 0x1FF7,
+ 0x0017, 0x0250, 0x041A, 0x0186, 0x1FF9,
+ 0x0011, 0x0236, 0x0420, 0x019E, 0x1FFB,
+ 0x000C, 0x021C, 0x0424, 0x01B7, 0x1FFD,
+ 0x0007, 0x0203, 0x0426, 0x01D0, 0x0000,
+ /* Chroma */
+ 0x0003, 0x01E9, 0x0428, 0x01E9, 0x0003,
+ 0x0000, 0x01D0, 0x0426, 0x0203, 0x0007,
+ 0x1FFD, 0x01B7, 0x0424, 0x021C, 0x000C,
+ 0x1FFB, 0x019E, 0x0420, 0x0236, 0x0011,
+ 0x1FF9, 0x0186, 0x041A, 0x0250, 0x0017,
+ 0x1FF7, 0x016E, 0x0414, 0x026A, 0x001D,
+ 0x1FF6, 0x0157, 0x040B, 0x0284, 0x0024,
+ 0x1FF5, 0x0140, 0x0401, 0x029E, 0x002C,
+ 0x1FF4, 0x012A, 0x03F6, 0x02B7, 0x0035,
+ 0x1FF4, 0x0115, 0x03E9, 0x02D0, 0x003E,
+ 0x1FF4, 0x0100, 0x03DB, 0x02E9, 0x0048,
+ 0x1FF4, 0x00EC, 0x03CC, 0x0301, 0x0053,
+ 0x1FF4, 0x00D9, 0x03BC, 0x0318, 0x005F,
+ 0x1FF5, 0x00C7, 0x03AA, 0x032F, 0x006B,
+ 0x1FF6, 0x00B5, 0x0398, 0x0345, 0x0078,
+ 0x1FF6, 0x00A5, 0x0384, 0x035B, 0x0086,
+ 0x0094, 0x036C, 0x036C, 0x0094, 0x0000,
+ 0x0086, 0x035B, 0x0384, 0x00A5, 0x1FF6,
+ 0x0078, 0x0345, 0x0398, 0x00B5, 0x1FF6,
+ 0x006B, 0x032F, 0x03AA, 0x00C7, 0x1FF5,
+ 0x005F, 0x0318, 0x03BC, 0x00D9, 0x1FF4,
+ 0x0053, 0x0301, 0x03CC, 0x00EC, 0x1FF4,
+ 0x0048, 0x02E9, 0x03DB, 0x0100, 0x1FF4,
+ 0x003E, 0x02D0, 0x03E9, 0x0115, 0x1FF4,
+ 0x0035, 0x02B7, 0x03F6, 0x012A, 0x1FF4,
+ 0x002C, 0x029E, 0x0401, 0x0140, 0x1FF5,
+ 0x0024, 0x0284, 0x040B, 0x0157, 0x1FF6,
+ 0x001D, 0x026A, 0x0414, 0x016E, 0x1FF7,
+ 0x0017, 0x0250, 0x041A, 0x0186, 0x1FF9,
+ 0x0011, 0x0236, 0x0420, 0x019E, 0x1FFB,
+ 0x000C, 0x021C, 0x0424, 0x01B7, 0x1FFD,
+ 0x0007, 0x0203, 0x0426, 0x01D0, 0x0000,
+ },
+ [VS_LT_11_16_SCALE] = {
+ /* Luma */
+ 0x1FEC, 0x01D6, 0x047C, 0x01D6, 0x1FEC,
+ 0x1FEA, 0x01BA, 0x047B, 0x01F3, 0x1FEE,
+ 0x1FE9, 0x019D, 0x0478, 0x0211, 0x1FF1,
+ 0x1FE8, 0x0182, 0x0473, 0x022E, 0x1FF5,
+ 0x1FE8, 0x0167, 0x046C, 0x024C, 0x1FF9,
+ 0x1FE8, 0x014D, 0x0464, 0x026A, 0x1FFD,
+ 0x1FE8, 0x0134, 0x0459, 0x0288, 0x0003,
+ 0x1FE9, 0x011B, 0x044D, 0x02A6, 0x0009,
+ 0x1FE9, 0x0104, 0x0440, 0x02C3, 0x0010,
+ 0x1FEA, 0x00ED, 0x0430, 0x02E1, 0x0018,
+ 0x1FEB, 0x00D7, 0x0420, 0x02FD, 0x0021,
+ 0x1FED, 0x00C2, 0x040D, 0x0319, 0x002B,
+ 0x1FEE, 0x00AE, 0x03F9, 0x0336, 0x0035,
+ 0x1FF0, 0x009C, 0x03E3, 0x0350, 0x0041,
+ 0x1FF1, 0x008A, 0x03CD, 0x036B, 0x004D,
+ 0x1FF3, 0x0079, 0x03B5, 0x0384, 0x005B,
+ 0x0069, 0x0397, 0x0397, 0x0069, 0x0000,
+ 0x005B, 0x0384, 0x03B5, 0x0079, 0x1FF3,
+ 0x004D, 0x036B, 0x03CD, 0x008A, 0x1FF1,
+ 0x0041, 0x0350, 0x03E3, 0x009C, 0x1FF0,
+ 0x0035, 0x0336, 0x03F9, 0x00AE, 0x1FEE,
+ 0x002B, 0x0319, 0x040D, 0x00C2, 0x1FED,
+ 0x0021, 0x02FD, 0x0420, 0x00D7, 0x1FEB,
+ 0x0018, 0x02E1, 0x0430, 0x00ED, 0x1FEA,
+ 0x0010, 0x02C3, 0x0440, 0x0104, 0x1FE9,
+ 0x0009, 0x02A6, 0x044D, 0x011B, 0x1FE9,
+ 0x0003, 0x0288, 0x0459, 0x0134, 0x1FE8,
+ 0x1FFD, 0x026A, 0x0464, 0x014D, 0x1FE8,
+ 0x1FF9, 0x024C, 0x046C, 0x0167, 0x1FE8,
+ 0x1FF5, 0x022E, 0x0473, 0x0182, 0x1FE8,
+ 0x1FF1, 0x0211, 0x0478, 0x019D, 0x1FE9,
+ 0x1FEE, 0x01F3, 0x047B, 0x01BA, 0x1FEA,
+ /* Chroma */
+ 0x1FEC, 0x01D6, 0x047C, 0x01D6, 0x1FEC,
+ 0x1FEA, 0x01BA, 0x047B, 0x01F3, 0x1FEE,
+ 0x1FE9, 0x019D, 0x0478, 0x0211, 0x1FF1,
+ 0x1FE8, 0x0182, 0x0473, 0x022E, 0x1FF5,
+ 0x1FE8, 0x0167, 0x046C, 0x024C, 0x1FF9,
+ 0x1FE8, 0x014D, 0x0464, 0x026A, 0x1FFD,
+ 0x1FE8, 0x0134, 0x0459, 0x0288, 0x0003,
+ 0x1FE9, 0x011B, 0x044D, 0x02A6, 0x0009,
+ 0x1FE9, 0x0104, 0x0440, 0x02C3, 0x0010,
+ 0x1FEA, 0x00ED, 0x0430, 0x02E1, 0x0018,
+ 0x1FEB, 0x00D7, 0x0420, 0x02FD, 0x0021,
+ 0x1FED, 0x00C2, 0x040D, 0x0319, 0x002B,
+ 0x1FEE, 0x00AE, 0x03F9, 0x0336, 0x0035,
+ 0x1FF0, 0x009C, 0x03E3, 0x0350, 0x0041,
+ 0x1FF1, 0x008A, 0x03CD, 0x036B, 0x004D,
+ 0x1FF3, 0x0079, 0x03B5, 0x0384, 0x005B,
+ 0x0069, 0x0397, 0x0397, 0x0069, 0x0000,
+ 0x005B, 0x0384, 0x03B5, 0x0079, 0x1FF3,
+ 0x004D, 0x036B, 0x03CD, 0x008A, 0x1FF1,
+ 0x0041, 0x0350, 0x03E3, 0x009C, 0x1FF0,
+ 0x0035, 0x0336, 0x03F9, 0x00AE, 0x1FEE,
+ 0x002B, 0x0319, 0x040D, 0x00C2, 0x1FED,
+ 0x0021, 0x02FD, 0x0420, 0x00D7, 0x1FEB,
+ 0x0018, 0x02E1, 0x0430, 0x00ED, 0x1FEA,
+ 0x0010, 0x02C3, 0x0440, 0x0104, 0x1FE9,
+ 0x0009, 0x02A6, 0x044D, 0x011B, 0x1FE9,
+ 0x0003, 0x0288, 0x0459, 0x0134, 0x1FE8,
+ 0x1FFD, 0x026A, 0x0464, 0x014D, 0x1FE8,
+ 0x1FF9, 0x024C, 0x046C, 0x0167, 0x1FE8,
+ 0x1FF5, 0x022E, 0x0473, 0x0182, 0x1FE8,
+ 0x1FF1, 0x0211, 0x0478, 0x019D, 0x1FE9,
+ 0x1FEE, 0x01F3, 0x047B, 0x01BA, 0x1FEA,
+ },
+ [VS_LT_12_16_SCALE] = {
+ /* Luma */
+ 0x1FD8, 0x01BC, 0x04D8, 0x01BC, 0x1FD8,
+ 0x1FD8, 0x019C, 0x04D8, 0x01DC, 0x1FD8,
+ 0x1FD8, 0x017D, 0x04D4, 0x01FE, 0x1FD9,
+ 0x1FD9, 0x015E, 0x04CF, 0x0220, 0x1FDA,
+ 0x1FDB, 0x0141, 0x04C7, 0x0241, 0x1FDC,
+ 0x1FDC, 0x0125, 0x04BC, 0x0264, 0x1FDF,
+ 0x1FDE, 0x0109, 0x04B0, 0x0286, 0x1FE3,
+ 0x1FE0, 0x00EF, 0x04A1, 0x02A9, 0x1FE7,
+ 0x1FE2, 0x00D6, 0x0491, 0x02CB, 0x1FEC,
+ 0x1FE4, 0x00BE, 0x047E, 0x02EE, 0x1FF2,
+ 0x1FE6, 0x00A7, 0x046A, 0x030F, 0x1FFA,
+ 0x1FE9, 0x0092, 0x0453, 0x0330, 0x0002,
+ 0x1FEB, 0x007E, 0x043B, 0x0351, 0x000B,
+ 0x1FED, 0x006B, 0x0421, 0x0372, 0x0015,
+ 0x1FEF, 0x005A, 0x0406, 0x0391, 0x0020,
+ 0x1FF1, 0x0049, 0x03EA, 0x03AF, 0x002D,
+ 0x003A, 0x03C6, 0x03C6, 0x003A, 0x0000,
+ 0x002D, 0x03AF, 0x03EA, 0x0049, 0x1FF1,
+ 0x0020, 0x0391, 0x0406, 0x005A, 0x1FEF,
+ 0x0015, 0x0372, 0x0421, 0x006B, 0x1FED,
+ 0x000B, 0x0351, 0x043B, 0x007E, 0x1FEB,
+ 0x0002, 0x0330, 0x0453, 0x0092, 0x1FE9,
+ 0x1FFA, 0x030F, 0x046A, 0x00A7, 0x1FE6,
+ 0x1FF2, 0x02EE, 0x047E, 0x00BE, 0x1FE4,
+ 0x1FEC, 0x02CB, 0x0491, 0x00D6, 0x1FE2,
+ 0x1FE7, 0x02A9, 0x04A1, 0x00EF, 0x1FE0,
+ 0x1FE3, 0x0286, 0x04B0, 0x0109, 0x1FDE,
+ 0x1FDF, 0x0264, 0x04BC, 0x0125, 0x1FDC,
+ 0x1FDC, 0x0241, 0x04C7, 0x0141, 0x1FDB,
+ 0x1FDA, 0x0220, 0x04CF, 0x015E, 0x1FD9,
+ 0x1FD9, 0x01FE, 0x04D4, 0x017D, 0x1FD8,
+ 0x1FD8, 0x01DC, 0x04D8, 0x019C, 0x1FD8,
+ /* Chroma */
+ 0x1FD8, 0x01BC, 0x04D8, 0x01BC, 0x1FD8,
+ 0x1FD8, 0x019C, 0x04D8, 0x01DC, 0x1FD8,
+ 0x1FD8, 0x017D, 0x04D4, 0x01FE, 0x1FD9,
+ 0x1FD9, 0x015E, 0x04CF, 0x0220, 0x1FDA,
+ 0x1FDB, 0x0141, 0x04C7, 0x0241, 0x1FDC,
+ 0x1FDC, 0x0125, 0x04BC, 0x0264, 0x1FDF,
+ 0x1FDE, 0x0109, 0x04B0, 0x0286, 0x1FE3,
+ 0x1FE0, 0x00EF, 0x04A1, 0x02A9, 0x1FE7,
+ 0x1FE2, 0x00D6, 0x0491, 0x02CB, 0x1FEC,
+ 0x1FE4, 0x00BE, 0x047E, 0x02EE, 0x1FF2,
+ 0x1FE6, 0x00A7, 0x046A, 0x030F, 0x1FFA,
+ 0x1FE9, 0x0092, 0x0453, 0x0330, 0x0002,
+ 0x1FEB, 0x007E, 0x043B, 0x0351, 0x000B,
+ 0x1FED, 0x006B, 0x0421, 0x0372, 0x0015,
+ 0x1FEF, 0x005A, 0x0406, 0x0391, 0x0020,
+ 0x1FF1, 0x0049, 0x03EA, 0x03AF, 0x002D,
+ 0x003A, 0x03C6, 0x03C6, 0x003A, 0x0000,
+ 0x002D, 0x03AF, 0x03EA, 0x0049, 0x1FF1,
+ 0x0020, 0x0391, 0x0406, 0x005A, 0x1FEF,
+ 0x0015, 0x0372, 0x0421, 0x006B, 0x1FED,
+ 0x000B, 0x0351, 0x043B, 0x007E, 0x1FEB,
+ 0x0002, 0x0330, 0x0453, 0x0092, 0x1FE9,
+ 0x1FFA, 0x030F, 0x046A, 0x00A7, 0x1FE6,
+ 0x1FF2, 0x02EE, 0x047E, 0x00BE, 0x1FE4,
+ 0x1FEC, 0x02CB, 0x0491, 0x00D6, 0x1FE2,
+ 0x1FE7, 0x02A9, 0x04A1, 0x00EF, 0x1FE0,
+ 0x1FE3, 0x0286, 0x04B0, 0x0109, 0x1FDE,
+ 0x1FDF, 0x0264, 0x04BC, 0x0125, 0x1FDC,
+ 0x1FDC, 0x0241, 0x04C7, 0x0141, 0x1FDB,
+ 0x1FDA, 0x0220, 0x04CF, 0x015E, 0x1FD9,
+ 0x1FD9, 0x01FE, 0x04D4, 0x017D, 0x1FD8,
+ 0x1FD8, 0x01DC, 0x04D8, 0x019C, 0x1FD8,
+ },
+ [VS_LT_13_16_SCALE] = {
+ /* Luma */
+ 0x1FC8, 0x0199, 0x053E, 0x0199, 0x1FC8,
+ 0x1FCA, 0x0175, 0x053E, 0x01BD, 0x1FC6,
+ 0x1FCD, 0x0153, 0x0539, 0x01E2, 0x1FC5,
+ 0x1FCF, 0x0132, 0x0532, 0x0209, 0x1FC4,
+ 0x1FD2, 0x0112, 0x0529, 0x022F, 0x1FC4,
+ 0x1FD5, 0x00F4, 0x051C, 0x0256, 0x1FC5,
+ 0x1FD8, 0x00D7, 0x050D, 0x027E, 0x1FC6,
+ 0x1FDC, 0x00BB, 0x04FB, 0x02A6, 0x1FC8,
+ 0x1FDF, 0x00A1, 0x04E7, 0x02CE, 0x1FCB,
+ 0x1FE2, 0x0089, 0x04D1, 0x02F5, 0x1FCF,
+ 0x1FE5, 0x0072, 0x04B8, 0x031D, 0x1FD4,
+ 0x1FE8, 0x005D, 0x049E, 0x0344, 0x1FD9,
+ 0x1FEB, 0x0049, 0x0480, 0x036B, 0x1FE1,
+ 0x1FEE, 0x0037, 0x0462, 0x0390, 0x1FE9,
+ 0x1FF0, 0x0026, 0x0442, 0x03B6, 0x1FF2,
+ 0x1FF2, 0x0017, 0x0420, 0x03DA, 0x1FFD,
+ 0x0009, 0x03F7, 0x03F7, 0x0009, 0x0000,
+ 0x1FFD, 0x03DA, 0x0420, 0x0017, 0x1FF2,
+ 0x1FF2, 0x03B6, 0x0442, 0x0026, 0x1FF0,
+ 0x1FE9, 0x0390, 0x0462, 0x0037, 0x1FEE,
+ 0x1FE1, 0x036B, 0x0480, 0x0049, 0x1FEB,
+ 0x1FD9, 0x0344, 0x049E, 0x005D, 0x1FE8,
+ 0x1FD4, 0x031D, 0x04B8, 0x0072, 0x1FE5,
+ 0x1FCF, 0x02F5, 0x04D1, 0x0089, 0x1FE2,
+ 0x1FCB, 0x02CE, 0x04E7, 0x00A1, 0x1FDF,
+ 0x1FC8, 0x02A6, 0x04FB, 0x00BB, 0x1FDC,
+ 0x1FC6, 0x027E, 0x050D, 0x00D7, 0x1FD8,
+ 0x1FC5, 0x0256, 0x051C, 0x00F4, 0x1FD5,
+ 0x1FC4, 0x022F, 0x0529, 0x0112, 0x1FD2,
+ 0x1FC4, 0x0209, 0x0532, 0x0132, 0x1FCF,
+ 0x1FC5, 0x01E2, 0x0539, 0x0153, 0x1FCD,
+ 0x1FC6, 0x01BD, 0x053E, 0x0175, 0x1FCA,
+ /* Chroma */
+ 0x1FC8, 0x0199, 0x053E, 0x0199, 0x1FC8,
+ 0x1FCA, 0x0175, 0x053E, 0x01BD, 0x1FC6,
+ 0x1FCD, 0x0153, 0x0539, 0x01E2, 0x1FC5,
+ 0x1FCF, 0x0132, 0x0532, 0x0209, 0x1FC4,
+ 0x1FD2, 0x0112, 0x0529, 0x022F, 0x1FC4,
+ 0x1FD5, 0x00F4, 0x051C, 0x0256, 0x1FC5,
+ 0x1FD8, 0x00D7, 0x050D, 0x027E, 0x1FC6,
+ 0x1FDC, 0x00BB, 0x04FB, 0x02A6, 0x1FC8,
+ 0x1FDF, 0x00A1, 0x04E7, 0x02CE, 0x1FCB,
+ 0x1FE2, 0x0089, 0x04D1, 0x02F5, 0x1FCF,
+ 0x1FE5, 0x0072, 0x04B8, 0x031D, 0x1FD4,
+ 0x1FE8, 0x005D, 0x049E, 0x0344, 0x1FD9,
+ 0x1FEB, 0x0049, 0x0480, 0x036B, 0x1FE1,
+ 0x1FEE, 0x0037, 0x0462, 0x0390, 0x1FE9,
+ 0x1FF0, 0x0026, 0x0442, 0x03B6, 0x1FF2,
+ 0x1FF2, 0x0017, 0x0420, 0x03DA, 0x1FFD,
+ 0x0009, 0x03F7, 0x03F7, 0x0009, 0x0000,
+ 0x1FFD, 0x03DA, 0x0420, 0x0017, 0x1FF2,
+ 0x1FF2, 0x03B6, 0x0442, 0x0026, 0x1FF0,
+ 0x1FE9, 0x0390, 0x0462, 0x0037, 0x1FEE,
+ 0x1FE1, 0x036B, 0x0480, 0x0049, 0x1FEB,
+ 0x1FD9, 0x0344, 0x049E, 0x005D, 0x1FE8,
+ 0x1FD4, 0x031D, 0x04B8, 0x0072, 0x1FE5,
+ 0x1FCF, 0x02F5, 0x04D1, 0x0089, 0x1FE2,
+ 0x1FCB, 0x02CE, 0x04E7, 0x00A1, 0x1FDF,
+ 0x1FC8, 0x02A6, 0x04FB, 0x00BB, 0x1FDC,
+ 0x1FC6, 0x027E, 0x050D, 0x00D7, 0x1FD8,
+ 0x1FC5, 0x0256, 0x051C, 0x00F4, 0x1FD5,
+ 0x1FC4, 0x022F, 0x0529, 0x0112, 0x1FD2,
+ 0x1FC4, 0x0209, 0x0532, 0x0132, 0x1FCF,
+ 0x1FC5, 0x01E2, 0x0539, 0x0153, 0x1FCD,
+ 0x1FC6, 0x01BD, 0x053E, 0x0175, 0x1FCA,
+ },
+ [VS_LT_14_16_SCALE] = {
+ /* Luma */
+ 0x1FBF, 0x016C, 0x05AA, 0x016C, 0x1FBF,
+ 0x1FC3, 0x0146, 0x05A8, 0x0194, 0x1FBB,
+ 0x1FC7, 0x0121, 0x05A3, 0x01BD, 0x1FB8,
+ 0x1FCB, 0x00FD, 0x059B, 0x01E8, 0x1FB5,
+ 0x1FD0, 0x00DC, 0x058F, 0x0213, 0x1FB2,
+ 0x1FD4, 0x00BC, 0x0580, 0x0240, 0x1FB0,
+ 0x1FD8, 0x009E, 0x056E, 0x026D, 0x1FAF,
+ 0x1FDC, 0x0082, 0x055A, 0x029A, 0x1FAE,
+ 0x1FE0, 0x0067, 0x0542, 0x02C9, 0x1FAE,
+ 0x1FE4, 0x004F, 0x0528, 0x02F6, 0x1FAF,
+ 0x1FE8, 0x0038, 0x050A, 0x0325, 0x1FB1,
+ 0x1FEB, 0x0024, 0x04EB, 0x0352, 0x1FB4,
+ 0x1FEE, 0x0011, 0x04C8, 0x0380, 0x1FB9,
+ 0x1FF1, 0x0000, 0x04A4, 0x03AC, 0x1FBF,
+ 0x1FF4, 0x1FF1, 0x047D, 0x03D8, 0x1FC6,
+ 0x1FF6, 0x1FE4, 0x0455, 0x0403, 0x1FCE,
+ 0x1FD8, 0x0428, 0x0428, 0x1FD8, 0x0000,
+ 0x1FCE, 0x0403, 0x0455, 0x1FE4, 0x1FF6,
+ 0x1FC6, 0x03D8, 0x047D, 0x1FF1, 0x1FF4,
+ 0x1FBF, 0x03AC, 0x04A4, 0x0000, 0x1FF1,
+ 0x1FB9, 0x0380, 0x04C8, 0x0011, 0x1FEE,
+ 0x1FB4, 0x0352, 0x04EB, 0x0024, 0x1FEB,
+ 0x1FB1, 0x0325, 0x050A, 0x0038, 0x1FE8,
+ 0x1FAF, 0x02F6, 0x0528, 0x004F, 0x1FE4,
+ 0x1FAE, 0x02C9, 0x0542, 0x0067, 0x1FE0,
+ 0x1FAE, 0x029A, 0x055A, 0x0082, 0x1FDC,
+ 0x1FAF, 0x026D, 0x056E, 0x009E, 0x1FD8,
+ 0x1FB0, 0x0240, 0x0580, 0x00BC, 0x1FD4,
+ 0x1FB2, 0x0213, 0x058F, 0x00DC, 0x1FD0,
+ 0x1FB5, 0x01E8, 0x059B, 0x00FD, 0x1FCB,
+ 0x1FB8, 0x01BD, 0x05A3, 0x0121, 0x1FC7,
+ 0x1FBB, 0x0194, 0x05A8, 0x0146, 0x1FC3,
+ /* Chroma */
+ 0x1FBF, 0x016C, 0x05AA, 0x016C, 0x1FBF,
+ 0x1FC3, 0x0146, 0x05A8, 0x0194, 0x1FBB,
+ 0x1FC7, 0x0121, 0x05A3, 0x01BD, 0x1FB8,
+ 0x1FCB, 0x00FD, 0x059B, 0x01E8, 0x1FB5,
+ 0x1FD0, 0x00DC, 0x058F, 0x0213, 0x1FB2,
+ 0x1FD4, 0x00BC, 0x0580, 0x0240, 0x1FB0,
+ 0x1FD8, 0x009E, 0x056E, 0x026D, 0x1FAF,
+ 0x1FDC, 0x0082, 0x055A, 0x029A, 0x1FAE,
+ 0x1FE0, 0x0067, 0x0542, 0x02C9, 0x1FAE,
+ 0x1FE4, 0x004F, 0x0528, 0x02F6, 0x1FAF,
+ 0x1FE8, 0x0038, 0x050A, 0x0325, 0x1FB1,
+ 0x1FEB, 0x0024, 0x04EB, 0x0352, 0x1FB4,
+ 0x1FEE, 0x0011, 0x04C8, 0x0380, 0x1FB9,
+ 0x1FF1, 0x0000, 0x04A4, 0x03AC, 0x1FBF,
+ 0x1FF4, 0x1FF1, 0x047D, 0x03D8, 0x1FC6,
+ 0x1FF6, 0x1FE4, 0x0455, 0x0403, 0x1FCE,
+ 0x1FD8, 0x0428, 0x0428, 0x1FD8, 0x0000,
+ 0x1FCE, 0x0403, 0x0455, 0x1FE4, 0x1FF6,
+ 0x1FC6, 0x03D8, 0x047D, 0x1FF1, 0x1FF4,
+ 0x1FBF, 0x03AC, 0x04A4, 0x0000, 0x1FF1,
+ 0x1FB9, 0x0380, 0x04C8, 0x0011, 0x1FEE,
+ 0x1FB4, 0x0352, 0x04EB, 0x0024, 0x1FEB,
+ 0x1FB1, 0x0325, 0x050A, 0x0038, 0x1FE8,
+ 0x1FAF, 0x02F6, 0x0528, 0x004F, 0x1FE4,
+ 0x1FAE, 0x02C9, 0x0542, 0x0067, 0x1FE0,
+ 0x1FAE, 0x029A, 0x055A, 0x0082, 0x1FDC,
+ 0x1FAF, 0x026D, 0x056E, 0x009E, 0x1FD8,
+ 0x1FB0, 0x0240, 0x0580, 0x00BC, 0x1FD4,
+ 0x1FB2, 0x0213, 0x058F, 0x00DC, 0x1FD0,
+ 0x1FB5, 0x01E8, 0x059B, 0x00FD, 0x1FCB,
+ 0x1FB8, 0x01BD, 0x05A3, 0x0121, 0x1FC7,
+ 0x1FBB, 0x0194, 0x05A8, 0x0146, 0x1FC3,
+ },
+ [VS_LT_15_16_SCALE] = {
+ /* Luma */
+ 0x1FBD, 0x0136, 0x061A, 0x0136, 0x1FBD,
+ 0x1FC3, 0x010D, 0x0617, 0x0161, 0x1FB8,
+ 0x1FC9, 0x00E6, 0x0611, 0x018E, 0x1FB2,
+ 0x1FCE, 0x00C1, 0x0607, 0x01BD, 0x1FAD,
+ 0x1FD4, 0x009E, 0x05F9, 0x01ED, 0x1FA8,
+ 0x1FD9, 0x007D, 0x05E8, 0x021F, 0x1FA3,
+ 0x1FDE, 0x005E, 0x05D3, 0x0252, 0x1F9F,
+ 0x1FE2, 0x0042, 0x05BC, 0x0285, 0x1F9B,
+ 0x1FE7, 0x0029, 0x059F, 0x02B9, 0x1F98,
+ 0x1FEA, 0x0011, 0x0580, 0x02EF, 0x1F96,
+ 0x1FEE, 0x1FFC, 0x055D, 0x0324, 0x1F95,
+ 0x1FF1, 0x1FE9, 0x0538, 0x0359, 0x1F95,
+ 0x1FF4, 0x1FD8, 0x0510, 0x038E, 0x1F96,
+ 0x1FF7, 0x1FC9, 0x04E5, 0x03C2, 0x1F99,
+ 0x1FF9, 0x1FBD, 0x04B8, 0x03F5, 0x1F9D,
+ 0x1FFB, 0x1FB2, 0x0489, 0x0428, 0x1FA2,
+ 0x1FAA, 0x0456, 0x0456, 0x1FAA, 0x0000,
+ 0x1FA2, 0x0428, 0x0489, 0x1FB2, 0x1FFB,
+ 0x1F9D, 0x03F5, 0x04B8, 0x1FBD, 0x1FF9,
+ 0x1F99, 0x03C2, 0x04E5, 0x1FC9, 0x1FF7,
+ 0x1F96, 0x038E, 0x0510, 0x1FD8, 0x1FF4,
+ 0x1F95, 0x0359, 0x0538, 0x1FE9, 0x1FF1,
+ 0x1F95, 0x0324, 0x055D, 0x1FFC, 0x1FEE,
+ 0x1F96, 0x02EF, 0x0580, 0x0011, 0x1FEA,
+ 0x1F98, 0x02B9, 0x059F, 0x0029, 0x1FE7,
+ 0x1F9B, 0x0285, 0x05BC, 0x0042, 0x1FE2,
+ 0x1F9F, 0x0252, 0x05D3, 0x005E, 0x1FDE,
+ 0x1FA3, 0x021F, 0x05E8, 0x007D, 0x1FD9,
+ 0x1FA8, 0x01ED, 0x05F9, 0x009E, 0x1FD4,
+ 0x1FAD, 0x01BD, 0x0607, 0x00C1, 0x1FCE,
+ 0x1FB2, 0x018E, 0x0611, 0x00E6, 0x1FC9,
+ 0x1FB8, 0x0161, 0x0617, 0x010D, 0x1FC3,
+ /* Chroma */
+ 0x1FBD, 0x0136, 0x061A, 0x0136, 0x1FBD,
+ 0x1FC3, 0x010D, 0x0617, 0x0161, 0x1FB8,
+ 0x1FC9, 0x00E6, 0x0611, 0x018E, 0x1FB2,
+ 0x1FCE, 0x00C1, 0x0607, 0x01BD, 0x1FAD,
+ 0x1FD4, 0x009E, 0x05F9, 0x01ED, 0x1FA8,
+ 0x1FD9, 0x007D, 0x05E8, 0x021F, 0x1FA3,
+ 0x1FDE, 0x005E, 0x05D3, 0x0252, 0x1F9F,
+ 0x1FE2, 0x0042, 0x05BC, 0x0285, 0x1F9B,
+ 0x1FE7, 0x0029, 0x059F, 0x02B9, 0x1F98,
+ 0x1FEA, 0x0011, 0x0580, 0x02EF, 0x1F96,
+ 0x1FEE, 0x1FFC, 0x055D, 0x0324, 0x1F95,
+ 0x1FF1, 0x1FE9, 0x0538, 0x0359, 0x1F95,
+ 0x1FF4, 0x1FD8, 0x0510, 0x038E, 0x1F96,
+ 0x1FF7, 0x1FC9, 0x04E5, 0x03C2, 0x1F99,
+ 0x1FF9, 0x1FBD, 0x04B8, 0x03F5, 0x1F9D,
+ 0x1FFB, 0x1FB2, 0x0489, 0x0428, 0x1FA2,
+ 0x1FAA, 0x0456, 0x0456, 0x1FAA, 0x0000,
+ 0x1FA2, 0x0428, 0x0489, 0x1FB2, 0x1FFB,
+ 0x1F9D, 0x03F5, 0x04B8, 0x1FBD, 0x1FF9,
+ 0x1F99, 0x03C2, 0x04E5, 0x1FC9, 0x1FF7,
+ 0x1F96, 0x038E, 0x0510, 0x1FD8, 0x1FF4,
+ 0x1F95, 0x0359, 0x0538, 0x1FE9, 0x1FF1,
+ 0x1F95, 0x0324, 0x055D, 0x1FFC, 0x1FEE,
+ 0x1F96, 0x02EF, 0x0580, 0x0011, 0x1FEA,
+ 0x1F98, 0x02B9, 0x059F, 0x0029, 0x1FE7,
+ 0x1F9B, 0x0285, 0x05BC, 0x0042, 0x1FE2,
+ 0x1F9F, 0x0252, 0x05D3, 0x005E, 0x1FDE,
+ 0x1FA3, 0x021F, 0x05E8, 0x007D, 0x1FD9,
+ 0x1FA8, 0x01ED, 0x05F9, 0x009E, 0x1FD4,
+ 0x1FAD, 0x01BD, 0x0607, 0x00C1, 0x1FCE,
+ 0x1FB2, 0x018E, 0x0611, 0x00E6, 0x1FC9,
+ 0x1FB8, 0x0161, 0x0617, 0x010D, 0x1FC3,
+ },
+ [VS_LT_16_16_SCALE] = {
+ /* Luma */
+ 0x1FC3, 0x00F8, 0x068A, 0x00F8, 0x1FC3,
+ 0x1FCA, 0x00CC, 0x0689, 0x0125, 0x1FBC,
+ 0x1FD1, 0x00A3, 0x0681, 0x0156, 0x1FB5,
+ 0x1FD7, 0x007D, 0x0676, 0x0188, 0x1FAE,
+ 0x1FDD, 0x005A, 0x0666, 0x01BD, 0x1FA6,
+ 0x1FE3, 0x0039, 0x0652, 0x01F3, 0x1F9F,
+ 0x1FE8, 0x001B, 0x0639, 0x022C, 0x1F98,
+ 0x1FEC, 0x0000, 0x061D, 0x0265, 0x1F92,
+ 0x1FF0, 0x1FE8, 0x05FC, 0x02A0, 0x1F8C,
+ 0x1FF4, 0x1FD2, 0x05D7, 0x02DC, 0x1F87,
+ 0x1FF7, 0x1FBF, 0x05AF, 0x0319, 0x1F82,
+ 0x1FFA, 0x1FAF, 0x0583, 0x0356, 0x1F7E,
+ 0x1FFC, 0x1FA1, 0x0554, 0x0393, 0x1F7C,
+ 0x1FFE, 0x1F95, 0x0523, 0x03CF, 0x1F7B,
+ 0x0000, 0x1F8C, 0x04EE, 0x040B, 0x1F7B,
+ 0x0001, 0x1F85, 0x04B8, 0x0446, 0x1F7C,
+ 0x1F80, 0x0480, 0x0480, 0x1F80, 0x0000,
+ 0x1F7C, 0x0446, 0x04B8, 0x1F85, 0x0001,
+ 0x1F7B, 0x040B, 0x04EE, 0x1F8C, 0x0000,
+ 0x1F7B, 0x03CF, 0x0523, 0x1F95, 0x1FFE,
+ 0x1F7C, 0x0393, 0x0554, 0x1FA1, 0x1FFC,
+ 0x1F7E, 0x0356, 0x0583, 0x1FAF, 0x1FFA,
+ 0x1F82, 0x0319, 0x05AF, 0x1FBF, 0x1FF7,
+ 0x1F87, 0x02DC, 0x05D7, 0x1FD2, 0x1FF4,
+ 0x1F8C, 0x02A0, 0x05FC, 0x1FE8, 0x1FF0,
+ 0x1F92, 0x0265, 0x061D, 0x0000, 0x1FEC,
+ 0x1F98, 0x022C, 0x0639, 0x001B, 0x1FE8,
+ 0x1F9F, 0x01F3, 0x0652, 0x0039, 0x1FE3,
+ 0x1FA6, 0x01BD, 0x0666, 0x005A, 0x1FDD,
+ 0x1FAE, 0x0188, 0x0676, 0x007D, 0x1FD7,
+ 0x1FB5, 0x0156, 0x0681, 0x00A3, 0x1FD1,
+ 0x1FBC, 0x0125, 0x0689, 0x00CC, 0x1FCA,
+ /* Chroma */
+ 0x1FC3, 0x00F8, 0x068A, 0x00F8, 0x1FC3,
+ 0x1FCA, 0x00CC, 0x0689, 0x0125, 0x1FBC,
+ 0x1FD1, 0x00A3, 0x0681, 0x0156, 0x1FB5,
+ 0x1FD7, 0x007D, 0x0676, 0x0188, 0x1FAE,
+ 0x1FDD, 0x005A, 0x0666, 0x01BD, 0x1FA6,
+ 0x1FE3, 0x0039, 0x0652, 0x01F3, 0x1F9F,
+ 0x1FE8, 0x001B, 0x0639, 0x022C, 0x1F98,
+ 0x1FEC, 0x0000, 0x061D, 0x0265, 0x1F92,
+ 0x1FF0, 0x1FE8, 0x05FC, 0x02A0, 0x1F8C,
+ 0x1FF4, 0x1FD2, 0x05D7, 0x02DC, 0x1F87,
+ 0x1FF7, 0x1FBF, 0x05AF, 0x0319, 0x1F82,
+ 0x1FFA, 0x1FAF, 0x0583, 0x0356, 0x1F7E,
+ 0x1FFC, 0x1FA1, 0x0554, 0x0393, 0x1F7C,
+ 0x1FFE, 0x1F95, 0x0523, 0x03CF, 0x1F7B,
+ 0x0000, 0x1F8C, 0x04EE, 0x040B, 0x1F7B,
+ 0x0001, 0x1F85, 0x04B8, 0x0446, 0x1F7C,
+ 0x1F80, 0x0480, 0x0480, 0x1F80, 0x0000,
+ 0x1F7C, 0x0446, 0x04B8, 0x1F85, 0x0001,
+ 0x1F7B, 0x040B, 0x04EE, 0x1F8C, 0x0000,
+ 0x1F7B, 0x03CF, 0x0523, 0x1F95, 0x1FFE,
+ 0x1F7C, 0x0393, 0x0554, 0x1FA1, 0x1FFC,
+ 0x1F7E, 0x0356, 0x0583, 0x1FAF, 0x1FFA,
+ 0x1F82, 0x0319, 0x05AF, 0x1FBF, 0x1FF7,
+ 0x1F87, 0x02DC, 0x05D7, 0x1FD2, 0x1FF4,
+ 0x1F8C, 0x02A0, 0x05FC, 0x1FE8, 0x1FF0,
+ 0x1F92, 0x0265, 0x061D, 0x0000, 0x1FEC,
+ 0x1F98, 0x022C, 0x0639, 0x001B, 0x1FE8,
+ 0x1F9F, 0x01F3, 0x0652, 0x0039, 0x1FE3,
+ 0x1FA6, 0x01BD, 0x0666, 0x005A, 0x1FDD,
+ 0x1FAE, 0x0188, 0x0676, 0x007D, 0x1FD7,
+ 0x1FB5, 0x0156, 0x0681, 0x00A3, 0x1FD1,
+ 0x1FBC, 0x0125, 0x0689, 0x00CC, 0x1FCA,
+ },
+ [VS_1_TO_1_SCALE] = {
+ /* Luma */
+ 0x0000, 0x0000, 0x0800, 0x0000, 0x0000,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ /* Chroma */
+ 0x0000, 0x0000, 0x0800, 0x0000, 0x0000,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ },
+};
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
new file mode 100644
index 000000000..e2cf2b90e
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -0,0 +1,1171 @@
+/*
+ * VPDMA helper library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include "vpdma.h"
+#include "vpdma_priv.h"
+
+#define VPDMA_FIRMWARE "vpdma-1b8.bin"
+
+const struct vpdma_data_format vpdma_yuv_fmts[] = {
+ [VPDMA_DATA_FMT_Y444] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_Y444,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_Y422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_Y422,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_Y420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_Y420,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_C444] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_C444,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_C422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_C422,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_C420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_C420,
+ .depth = 4,
+ },
+ [VPDMA_DATA_FMT_YCR422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_YCR422,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_YC444] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_YC444,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_CRY422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CRY422,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_CBY422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CBY422,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_YCB422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_YCB422,
+ .depth = 16,
+ },
+};
+EXPORT_SYMBOL(vpdma_yuv_fmts);
+
+const struct vpdma_data_format vpdma_rgb_fmts[] = {
+ [VPDMA_DATA_FMT_RGB565] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_RGB16_565,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ARGB16_1555] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_ARGB_1555,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ARGB16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_ARGB_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_RGBA16_5551] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_RGBA_5551,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_RGBA16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_RGBA_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ARGB24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_ARGB24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_RGB24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_RGB24_888,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_ARGB32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_ARGB32_8888,
+ .depth = 32,
+ },
+ [VPDMA_DATA_FMT_RGBA24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_RGBA24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_RGBA32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_RGBA32_8888,
+ .depth = 32,
+ },
+ [VPDMA_DATA_FMT_BGR565] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_BGR16_565,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ABGR16_1555] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_ABGR_1555,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ABGR16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_ABGR_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_BGRA16_5551] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_BGRA_5551,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_BGRA16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_BGRA_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ABGR24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_ABGR24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_BGR24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_BGR24_888,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_ABGR32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_ABGR32_8888,
+ .depth = 32,
+ },
+ [VPDMA_DATA_FMT_BGRA24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_BGRA24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_BGRA32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
+ .data_type = DATA_TYPE_BGRA32_8888,
+ .depth = 32,
+ },
+};
+EXPORT_SYMBOL(vpdma_rgb_fmts);
+
+/*
+ * To handle RAW format we are re-using the CBY422
+ * vpdma data type so that we use the vpdma to re-order
+ * the incoming bytes, as the parser assumes that the
+ * first byte presented on the bus is the MSB of a 2
+ * bytes value.
+ * RAW8 handles from 1 to 8 bits
+ * RAW16 handles from 9 to 16 bits
+ */
+const struct vpdma_data_format vpdma_raw_fmts[] = {
+ [VPDMA_DATA_FMT_RAW8] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CBY422,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_RAW16] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CBY422,
+ .depth = 16,
+ },
+};
+EXPORT_SYMBOL(vpdma_raw_fmts);
+
+const struct vpdma_data_format vpdma_misc_fmts[] = {
+ [VPDMA_DATA_FMT_MV] = {
+ .type = VPDMA_DATA_FMT_TYPE_MISC,
+ .data_type = DATA_TYPE_MV,
+ .depth = 4,
+ },
+};
+EXPORT_SYMBOL(vpdma_misc_fmts);
+
+struct vpdma_channel_info {
+ int num; /* VPDMA channel number */
+ int cstat_offset; /* client CSTAT register offset */
+};
+
+static const struct vpdma_channel_info chan_info[] = {
+ [VPE_CHAN_LUMA1_IN] = {
+ .num = VPE_CHAN_NUM_LUMA1_IN,
+ .cstat_offset = VPDMA_DEI_LUMA1_CSTAT,
+ },
+ [VPE_CHAN_CHROMA1_IN] = {
+ .num = VPE_CHAN_NUM_CHROMA1_IN,
+ .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT,
+ },
+ [VPE_CHAN_LUMA2_IN] = {
+ .num = VPE_CHAN_NUM_LUMA2_IN,
+ .cstat_offset = VPDMA_DEI_LUMA2_CSTAT,
+ },
+ [VPE_CHAN_CHROMA2_IN] = {
+ .num = VPE_CHAN_NUM_CHROMA2_IN,
+ .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT,
+ },
+ [VPE_CHAN_LUMA3_IN] = {
+ .num = VPE_CHAN_NUM_LUMA3_IN,
+ .cstat_offset = VPDMA_DEI_LUMA3_CSTAT,
+ },
+ [VPE_CHAN_CHROMA3_IN] = {
+ .num = VPE_CHAN_NUM_CHROMA3_IN,
+ .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT,
+ },
+ [VPE_CHAN_MV_IN] = {
+ .num = VPE_CHAN_NUM_MV_IN,
+ .cstat_offset = VPDMA_DEI_MV_IN_CSTAT,
+ },
+ [VPE_CHAN_MV_OUT] = {
+ .num = VPE_CHAN_NUM_MV_OUT,
+ .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT,
+ },
+ [VPE_CHAN_LUMA_OUT] = {
+ .num = VPE_CHAN_NUM_LUMA_OUT,
+ .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
+ },
+ [VPE_CHAN_CHROMA_OUT] = {
+ .num = VPE_CHAN_NUM_CHROMA_OUT,
+ .cstat_offset = VPDMA_VIP_UP_UV_CSTAT,
+ },
+ [VPE_CHAN_RGB_OUT] = {
+ .num = VPE_CHAN_NUM_RGB_OUT,
+ .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
+ },
+};
+
+static u32 read_reg(struct vpdma_data *vpdma, int offset)
+{
+ return ioread32(vpdma->base + offset);
+}
+
+static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
+{
+ iowrite32(value, vpdma->base + offset);
+}
+
+static int read_field_reg(struct vpdma_data *vpdma, int offset,
+ u32 mask, int shift)
+{
+ return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
+}
+
+static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
+ u32 mask, int shift)
+{
+ u32 val = read_reg(vpdma, offset);
+
+ val &= ~(mask << shift);
+ val |= (field & mask) << shift;
+
+ write_reg(vpdma, offset, val);
+}
+
+void vpdma_dump_regs(struct vpdma_data *vpdma)
+{
+ struct device *dev = &vpdma->pdev->dev;
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
+
+ dev_dbg(dev, "VPDMA Registers:\n");
+
+ DUMPREG(PID);
+ DUMPREG(LIST_ADDR);
+ DUMPREG(LIST_ATTR);
+ DUMPREG(LIST_STAT_SYNC);
+ DUMPREG(BG_RGB);
+ DUMPREG(BG_YUV);
+ DUMPREG(SETUP);
+ DUMPREG(MAX_SIZE1);
+ DUMPREG(MAX_SIZE2);
+ DUMPREG(MAX_SIZE3);
+
+ /*
+ * dumping registers of only group0 and group3, because VPE channels
+ * lie within group0 and group3 registers
+ */
+ DUMPREG(INT_CHAN_STAT(0));
+ DUMPREG(INT_CHAN_MASK(0));
+ DUMPREG(INT_CHAN_STAT(3));
+ DUMPREG(INT_CHAN_MASK(3));
+ DUMPREG(INT_CLIENT0_STAT);
+ DUMPREG(INT_CLIENT0_MASK);
+ DUMPREG(INT_CLIENT1_STAT);
+ DUMPREG(INT_CLIENT1_MASK);
+ DUMPREG(INT_LIST0_STAT);
+ DUMPREG(INT_LIST0_MASK);
+
+ /*
+ * these are registers specific to VPE clients, we can make this
+ * function dump client registers specific to VPE or VIP based on
+ * who is using it
+ */
+ DUMPREG(DEI_CHROMA1_CSTAT);
+ DUMPREG(DEI_LUMA1_CSTAT);
+ DUMPREG(DEI_CHROMA2_CSTAT);
+ DUMPREG(DEI_LUMA2_CSTAT);
+ DUMPREG(DEI_CHROMA3_CSTAT);
+ DUMPREG(DEI_LUMA3_CSTAT);
+ DUMPREG(DEI_MV_IN_CSTAT);
+ DUMPREG(DEI_MV_OUT_CSTAT);
+ DUMPREG(VIP_UP_Y_CSTAT);
+ DUMPREG(VIP_UP_UV_CSTAT);
+ DUMPREG(VPI_CTL_CSTAT);
+}
+EXPORT_SYMBOL(vpdma_dump_regs);
+
+/*
+ * Allocate a DMA buffer
+ */
+int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
+{
+ buf->size = size;
+ buf->mapped = false;
+ buf->addr = kzalloc(size, GFP_KERNEL);
+ if (!buf->addr)
+ return -ENOMEM;
+
+ WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpdma_alloc_desc_buf);
+
+void vpdma_free_desc_buf(struct vpdma_buf *buf)
+{
+ WARN_ON(buf->mapped);
+ kfree(buf->addr);
+ buf->addr = NULL;
+ buf->size = 0;
+}
+EXPORT_SYMBOL(vpdma_free_desc_buf);
+
+/*
+ * map descriptor/payload DMA buffer, enabling DMA access
+ */
+int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
+{
+ struct device *dev = &vpdma->pdev->dev;
+
+ WARN_ON(buf->mapped);
+ buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, buf->dma_addr)) {
+ dev_err(dev, "failed to map buffer\n");
+ return -EINVAL;
+ }
+
+ buf->mapped = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(vpdma_map_desc_buf);
+
+/*
+ * unmap descriptor/payload DMA buffer, disabling DMA access and
+ * allowing the main processor to acces the data
+ */
+void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
+{
+ struct device *dev = &vpdma->pdev->dev;
+
+ if (buf->mapped)
+ dma_unmap_single(dev, buf->dma_addr, buf->size,
+ DMA_BIDIRECTIONAL);
+
+ buf->mapped = false;
+}
+EXPORT_SYMBOL(vpdma_unmap_desc_buf);
+
+/*
+ * Cleanup all pending descriptors of a list
+ * First, stop the current list being processed.
+ * If the VPDMA was busy, this step makes vpdma to accept post lists.
+ * To cleanup the internal FSM, post abort list descriptor for all the
+ * channels from @channels array of size @size.
+ */
+int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
+ int *channels, int size)
+{
+ struct vpdma_desc_list abort_list;
+ int i, ret, timeout = 500;
+
+ write_reg(vpdma, VPDMA_LIST_ATTR,
+ (list_num << VPDMA_LIST_NUM_SHFT) |
+ (1 << VPDMA_LIST_STOP_SHFT));
+
+ if (size <= 0 || !channels)
+ return 0;
+
+ ret = vpdma_create_desc_list(&abort_list,
+ size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < size; i++)
+ vpdma_add_abort_channel_ctd(&abort_list, channels[i]);
+
+ ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
+ if (ret)
+ return ret;
+ ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
+ if (ret)
+ return ret;
+
+ while (vpdma_list_busy(vpdma, list_num) && --timeout)
+ ;
+
+ if (timeout == 0) {
+ dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
+ return -EBUSY;
+ }
+
+ vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
+ vpdma_free_desc_buf(&abort_list.buf);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpdma_list_cleanup);
+
+/*
+ * create a descriptor list, the user of this list will append configuration,
+ * control and data descriptors to this list, this list will be submitted to
+ * VPDMA. VPDMA's list parser will go through each descriptor and perform the
+ * required DMA operations
+ */
+int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
+{
+ int r;
+
+ r = vpdma_alloc_desc_buf(&list->buf, size);
+ if (r)
+ return r;
+
+ list->next = list->buf.addr;
+
+ list->type = type;
+
+ return 0;
+}
+EXPORT_SYMBOL(vpdma_create_desc_list);
+
+/*
+ * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
+ * to allow new descriptors to be added to the list.
+ */
+void vpdma_reset_desc_list(struct vpdma_desc_list *list)
+{
+ list->next = list->buf.addr;
+}
+EXPORT_SYMBOL(vpdma_reset_desc_list);
+
+/*
+ * free the buffer allocated fot the VPDMA descriptor list, this should be
+ * called when the user doesn't want to use VPDMA any more.
+ */
+void vpdma_free_desc_list(struct vpdma_desc_list *list)
+{
+ vpdma_free_desc_buf(&list->buf);
+
+ list->next = NULL;
+}
+EXPORT_SYMBOL(vpdma_free_desc_list);
+
+bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
+{
+ return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
+}
+EXPORT_SYMBOL(vpdma_list_busy);
+
+/*
+ * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
+ */
+int vpdma_submit_descs(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, int list_num)
+{
+ int list_size;
+ unsigned long flags;
+
+ if (vpdma_list_busy(vpdma, list_num))
+ return -EBUSY;
+
+ /* 16-byte granularity */
+ list_size = (list->next - list->buf.addr) >> 4;
+
+ spin_lock_irqsave(&vpdma->lock, flags);
+ write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
+
+ write_reg(vpdma, VPDMA_LIST_ATTR,
+ (list_num << VPDMA_LIST_NUM_SHFT) |
+ (list->type << VPDMA_LIST_TYPE_SHFT) |
+ list_size);
+ spin_unlock_irqrestore(&vpdma->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpdma_submit_descs);
+
+static void dump_dtd(struct vpdma_dtd *dtd);
+
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, dma_addr_t dma_addr,
+ void *write_dtd, int drop, int idx)
+{
+ struct vpdma_dtd *dtd = list->buf.addr;
+ dma_addr_t write_desc_addr;
+ int offset;
+
+ dtd += idx;
+ vpdma_unmap_desc_buf(vpdma, &list->buf);
+
+ dtd->start_addr = dma_addr;
+
+ /* Calculate write address from the offset of write_dtd from start
+ * of the list->buf
+ */
+ offset = (void *)write_dtd - list->buf.addr;
+ write_desc_addr = list->buf.dma_addr + offset;
+
+ if (drop)
+ dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+ 1, 1, 0);
+ else
+ dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+ 1, 0, 0);
+
+ vpdma_map_desc_buf(vpdma, &list->buf);
+
+ dump_dtd(dtd);
+}
+EXPORT_SYMBOL(vpdma_update_dma_addr);
+
+void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
+ u32 width, u32 height)
+{
+ if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 &&
+ reg_addr != VPDMA_MAX_SIZE3)
+ reg_addr = VPDMA_MAX_SIZE1;
+
+ write_field_reg(vpdma, reg_addr, width - 1,
+ VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT);
+
+ write_field_reg(vpdma, reg_addr, height - 1,
+ VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT);
+
+}
+EXPORT_SYMBOL(vpdma_set_max_size);
+
+static void dump_cfd(struct vpdma_cfd *cfd)
+{
+ int class;
+
+ class = cfd_get_class(cfd);
+
+ pr_debug("config descriptor of payload class: %s\n",
+ class == CFD_CLS_BLOCK ? "simple block" :
+ "address data block");
+
+ if (class == CFD_CLS_BLOCK)
+ pr_debug("word0: dst_addr_offset = 0x%08x\n",
+ cfd->dest_addr_offset);
+
+ if (class == CFD_CLS_BLOCK)
+ pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
+
+ pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
+
+ pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
+ cfd_get_pkt_type(cfd),
+ cfd_get_direct(cfd), class, cfd_get_dest(cfd),
+ cfd_get_payload_len(cfd));
+}
+
+/*
+ * append a configuration descriptor to the given descriptor list, where the
+ * payload is in the form of a simple data block specified in the descriptor
+ * header, this is used to upload scaler coefficients to the scaler module
+ */
+void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *blk, u32 dest_offset)
+{
+ struct vpdma_cfd *cfd;
+ int len = blk->size;
+
+ WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
+
+ cfd = list->next;
+ WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
+
+ cfd->dest_addr_offset = dest_offset;
+ cfd->block_len = len;
+ cfd->payload_addr = (u32) blk->dma_addr;
+ cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
+ client, len >> 4);
+
+ list->next = cfd + 1;
+
+ dump_cfd(cfd);
+}
+EXPORT_SYMBOL(vpdma_add_cfd_block);
+
+/*
+ * append a configuration descriptor to the given descriptor list, where the
+ * payload is in the address data block format, this is used to a configure a
+ * discontiguous set of MMRs
+ */
+void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *adb)
+{
+ struct vpdma_cfd *cfd;
+ unsigned int len = adb->size;
+
+ WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
+ WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
+
+ cfd = list->next;
+ BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
+
+ cfd->w0 = 0;
+ cfd->w1 = 0;
+ cfd->payload_addr = (u32) adb->dma_addr;
+ cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
+ client, len >> 4);
+
+ list->next = cfd + 1;
+
+ dump_cfd(cfd);
+};
+EXPORT_SYMBOL(vpdma_add_cfd_adb);
+
+/*
+ * control descriptor format change based on what type of control descriptor it
+ * is, we only use 'sync on channel' control descriptors for now, so assume it's
+ * that
+ */
+static void dump_ctd(struct vpdma_ctd *ctd)
+{
+ pr_debug("control descriptor\n");
+
+ pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
+ ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
+}
+
+/*
+ * append a 'sync on channel' type control descriptor to the given descriptor
+ * list, this descriptor stalls the VPDMA list till the time DMA is completed
+ * on the specified channel
+ */
+void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
+ enum vpdma_channel chan)
+{
+ struct vpdma_ctd *ctd;
+
+ ctd = list->next;
+ WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
+
+ ctd->w0 = 0;
+ ctd->w1 = 0;
+ ctd->w2 = 0;
+ ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
+ CTD_TYPE_SYNC_ON_CHANNEL);
+
+ list->next = ctd + 1;
+
+ dump_ctd(ctd);
+}
+EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd);
+
+/*
+ * append an 'abort_channel' type control descriptor to the given descriptor
+ * list, this descriptor aborts any DMA transaction happening using the
+ * specified channel
+ */
+void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
+ int chan_num)
+{
+ struct vpdma_ctd *ctd;
+
+ ctd = list->next;
+ WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
+
+ ctd->w0 = 0;
+ ctd->w1 = 0;
+ ctd->w2 = 0;
+ ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
+ CTD_TYPE_ABORT_CHANNEL);
+
+ list->next = ctd + 1;
+
+ dump_ctd(ctd);
+}
+EXPORT_SYMBOL(vpdma_add_abort_channel_ctd);
+
+static void dump_dtd(struct vpdma_dtd *dtd)
+{
+ int dir, chan;
+
+ dir = dtd_get_dir(dtd);
+ chan = dtd_get_chan(dtd);
+
+ pr_debug("%s data transfer descriptor for channel %d\n",
+ dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
+
+ pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
+ dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
+ dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
+ dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
+
+ if (dir == DTD_DIR_IN)
+ pr_debug("word1: line_length = %d, xfer_height = %d\n",
+ dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
+
+ pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
+
+ pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
+ dtd_get_pkt_type(dtd),
+ dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
+ dtd_get_next_chan(dtd));
+
+ if (dir == DTD_DIR_IN)
+ pr_debug("word4: frame_width = %d, frame_height = %d\n",
+ dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
+ else
+ pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
+ dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
+ dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
+
+ if (dir == DTD_DIR_IN)
+ pr_debug("word5: hor_start = %d, ver_start = %d\n",
+ dtd_get_h_start(dtd), dtd_get_v_start(dtd));
+ else
+ pr_debug("word5: max_width %d, max_height %d\n",
+ dtd_get_max_width(dtd), dtd_get_max_height(dtd));
+
+ pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0);
+ pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1);
+}
+
+/*
+ * append an outbound data transfer descriptor to the given descriptor list,
+ * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
+ *
+ * @list: vpdma desc list to which we add this decriptor
+ * @width: width of the image in pixels in memory
+ * @c_rect: compose params of output image
+ * @fmt: vpdma data format of the buffer
+ * dma_addr: dma address as seen by VPDMA
+ * max_width: enum for maximum width of data transfer
+ * max_height: enum for maximum height of data transfer
+ * chan: VPDMA channel
+ * flags: VPDMA flags to configure some descriptor fileds
+ */
+void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
+ int stride, const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int max_w, int max_h, enum vpdma_channel chan, u32 flags)
+{
+ vpdma_rawchan_add_out_dtd(list, width, stride, c_rect, fmt, dma_addr,
+ max_w, max_h, chan_info[chan].num, flags);
+}
+EXPORT_SYMBOL(vpdma_add_out_dtd);
+
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+ int stride, const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int max_w, int max_h, int raw_vpdma_chan, u32 flags)
+{
+ int priority = 0;
+ int field = 0;
+ int notify = 1;
+ int channel, next_chan;
+ struct v4l2_rect rect = *c_rect;
+ int depth = fmt->depth;
+ struct vpdma_dtd *dtd;
+
+ channel = next_chan = raw_vpdma_chan;
+
+ if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
+ fmt->data_type == DATA_TYPE_C420) {
+ rect.height >>= 1;
+ rect.top >>= 1;
+ depth = 8;
+ }
+
+ dma_addr += rect.top * stride + (rect.left * depth >> 3);
+
+ dtd = list->next;
+ WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
+
+ dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
+ notify,
+ field,
+ !!(flags & VPDMA_DATA_FRAME_1D),
+ !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
+ !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
+ stride);
+ dtd->w1 = 0;
+ dtd->start_addr = (u32) dma_addr;
+ dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
+ DTD_DIR_OUT, channel, priority, next_chan);
+ dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
+ dtd->max_width_height = dtd_max_width_height(max_w, max_h);
+ dtd->client_attr0 = 0;
+ dtd->client_attr1 = 0;
+
+ list->next = dtd + 1;
+
+ dump_dtd(dtd);
+}
+EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
+
+/*
+ * append an inbound data transfer descriptor to the given descriptor list,
+ * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
+ *
+ * @list: vpdma desc list to which we add this decriptor
+ * @width: width of the image in pixels in memory(not the cropped width)
+ * @c_rect: crop params of input image
+ * @fmt: vpdma data format of the buffer
+ * dma_addr: dma address as seen by VPDMA
+ * chan: VPDMA channel
+ * field: top or bottom field info of the input image
+ * flags: VPDMA flags to configure some descriptor fileds
+ * frame_width/height: the complete width/height of the image presented to the
+ * client (this makes sense when multiple channels are
+ * connected to the same client, forming a larger frame)
+ * start_h, start_v: position where the given channel starts providing pixel
+ * data to the client (makes sense when multiple channels
+ * contribute to the client)
+ */
+void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
+ int stride, const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, int field, u32 flags, int frame_width,
+ int frame_height, int start_h, int start_v)
+{
+ int priority = 0;
+ int notify = 1;
+ int depth = fmt->depth;
+ int channel, next_chan;
+ struct v4l2_rect rect = *c_rect;
+ struct vpdma_dtd *dtd;
+
+ channel = next_chan = chan_info[chan].num;
+
+ if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
+ fmt->data_type == DATA_TYPE_C420) {
+ rect.height >>= 1;
+ rect.top >>= 1;
+ depth = 8;
+ }
+
+ dma_addr += rect.top * stride + (rect.left * depth >> 3);
+
+ dtd = list->next;
+ WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
+
+ dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
+ notify,
+ field,
+ !!(flags & VPDMA_DATA_FRAME_1D),
+ !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
+ !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
+ stride);
+
+ dtd->xfer_length_height = dtd_xfer_length_height(rect.width,
+ rect.height);
+ dtd->start_addr = (u32) dma_addr;
+ dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
+ DTD_DIR_IN, channel, priority, next_chan);
+ dtd->frame_width_height = dtd_frame_width_height(frame_width,
+ frame_height);
+ dtd->start_h_v = dtd_start_h_v(start_h, start_v);
+ dtd->client_attr0 = 0;
+ dtd->client_attr1 = 0;
+
+ list->next = dtd + 1;
+
+ dump_dtd(dtd);
+}
+EXPORT_SYMBOL(vpdma_add_in_dtd);
+
+int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv)
+{
+ int i, list_num = -1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpdma->lock, flags);
+ for (i = 0; i < VPDMA_MAX_NUM_LIST &&
+ vpdma->hwlist_used[i] == true; i++)
+ ;
+
+ if (i < VPDMA_MAX_NUM_LIST) {
+ list_num = i;
+ vpdma->hwlist_used[i] = true;
+ vpdma->hwlist_priv[i] = priv;
+ }
+ spin_unlock_irqrestore(&vpdma->lock, flags);
+
+ return list_num;
+}
+EXPORT_SYMBOL(vpdma_hwlist_alloc);
+
+void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num)
+{
+ if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST)
+ return NULL;
+
+ return vpdma->hwlist_priv[list_num];
+}
+EXPORT_SYMBOL(vpdma_hwlist_get_priv);
+
+void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num)
+{
+ void *priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpdma->lock, flags);
+ vpdma->hwlist_used[list_num] = false;
+ priv = vpdma->hwlist_priv;
+ spin_unlock_irqrestore(&vpdma->lock, flags);
+
+ return priv;
+}
+EXPORT_SYMBOL(vpdma_hwlist_release);
+
+/* set or clear the mask for list complete interrupt */
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+ int list_num, bool enable)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
+ u32 val;
+
+ val = read_reg(vpdma, reg_addr);
+ if (enable)
+ val |= (1 << (list_num * 2));
+ else
+ val &= ~(1 << (list_num * 2));
+ write_reg(vpdma, reg_addr, val);
+}
+EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
+
+/* get the LIST_STAT register */
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+ return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_stat);
+
+/* get the LIST_MASK register */
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
+
+ return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_mask);
+
+/* clear previosuly occured list intterupts in the LIST_STAT register */
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
+ int list_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+ write_reg(vpdma, reg_addr, 3 << (list_num * 2));
+}
+EXPORT_SYMBOL(vpdma_clear_list_stat);
+
+void vpdma_set_bg_color(struct vpdma_data *vpdma,
+ struct vpdma_data_format *fmt, u32 color)
+{
+ if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
+ write_reg(vpdma, VPDMA_BG_RGB, color);
+ else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
+ write_reg(vpdma, VPDMA_BG_YUV, color);
+}
+EXPORT_SYMBOL(vpdma_set_bg_color);
+
+/*
+ * configures the output mode of the line buffer for the given client, the
+ * line buffer content can either be mirrored(each line repeated twice) or
+ * passed to the client as is
+ */
+void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
+ enum vpdma_channel chan)
+{
+ int client_cstat = chan_info[chan].cstat_offset;
+
+ write_field_reg(vpdma, client_cstat, line_mode,
+ VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
+}
+EXPORT_SYMBOL(vpdma_set_line_mode);
+
+/*
+ * configures the event which should trigger VPDMA transfer for the given
+ * client
+ */
+void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
+ enum vpdma_frame_start_event fs_event,
+ enum vpdma_channel chan)
+{
+ int client_cstat = chan_info[chan].cstat_offset;
+
+ write_field_reg(vpdma, client_cstat, fs_event,
+ VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
+}
+EXPORT_SYMBOL(vpdma_set_frame_start_event);
+
+static void vpdma_firmware_cb(const struct firmware *f, void *context)
+{
+ struct vpdma_data *vpdma = context;
+ struct vpdma_buf fw_dma_buf;
+ int i, r;
+
+ dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
+
+ if (!f || !f->data) {
+ dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
+ return;
+ }
+
+ /* already initialized */
+ if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
+ VPDMA_LIST_RDY_SHFT)) {
+ vpdma->cb(vpdma->pdev);
+ return;
+ }
+
+ r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
+ if (r) {
+ dev_err(&vpdma->pdev->dev,
+ "failed to allocate dma buffer for firmware\n");
+ goto rel_fw;
+ }
+
+ memcpy(fw_dma_buf.addr, f->data, f->size);
+
+ vpdma_map_desc_buf(vpdma, &fw_dma_buf);
+
+ write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
+
+ for (i = 0; i < 100; i++) { /* max 1 second */
+ msleep_interruptible(10);
+
+ if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
+ VPDMA_LIST_RDY_SHFT))
+ break;
+ }
+
+ if (i == 100) {
+ dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
+ goto free_buf;
+ }
+
+ vpdma->cb(vpdma->pdev);
+
+free_buf:
+ vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
+
+ vpdma_free_desc_buf(&fw_dma_buf);
+rel_fw:
+ release_firmware(f);
+}
+
+static int vpdma_load_firmware(struct vpdma_data *vpdma)
+{
+ int r;
+ struct device *dev = &vpdma->pdev->dev;
+
+ r = request_firmware_nowait(THIS_MODULE, 1,
+ (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
+ vpdma_firmware_cb);
+ if (r) {
+ dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
+ return r;
+ } else {
+ dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
+ }
+
+ return 0;
+}
+
+int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
+ void (*cb)(struct platform_device *pdev))
+{
+ struct resource *res;
+ int r;
+
+ dev_dbg(&pdev->dev, "vpdma_create\n");
+
+ vpdma->pdev = pdev;
+ vpdma->cb = cb;
+ spin_lock_init(&vpdma->lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
+ if (res == NULL) {
+ dev_err(&pdev->dev, "missing platform resources data\n");
+ return -ENODEV;
+ }
+
+ vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!vpdma->base) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return -ENOMEM;
+ }
+
+ r = vpdma_load_firmware(vpdma);
+ if (r) {
+ pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
+ return r;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vpdma_create);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_FIRMWARE(VPDMA_FIRMWARE);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
new file mode 100644
index 000000000..f29074c84
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_VPDMA_H_
+#define __TI_VPDMA_H_
+
+#define VPDMA_MAX_NUM_LIST 8
+/*
+ * A vpdma_buf tracks the size, DMA address and mapping status of each
+ * driver DMA area.
+ */
+struct vpdma_buf {
+ void *addr;
+ dma_addr_t dma_addr;
+ size_t size;
+ bool mapped;
+};
+
+struct vpdma_desc_list {
+ struct vpdma_buf buf;
+ void *next;
+ int type;
+};
+
+struct vpdma_data {
+ void __iomem *base;
+
+ struct platform_device *pdev;
+
+ spinlock_t lock;
+ bool hwlist_used[VPDMA_MAX_NUM_LIST];
+ void *hwlist_priv[VPDMA_MAX_NUM_LIST];
+ /* callback to VPE driver when the firmware is loaded */
+ void (*cb)(struct platform_device *pdev);
+};
+
+enum vpdma_data_format_type {
+ VPDMA_DATA_FMT_TYPE_YUV,
+ VPDMA_DATA_FMT_TYPE_RGB,
+ VPDMA_DATA_FMT_TYPE_MISC,
+};
+
+struct vpdma_data_format {
+ enum vpdma_data_format_type type;
+ int data_type;
+ u8 depth;
+};
+
+#define VPDMA_DESC_ALIGN 16 /* 16-byte descriptor alignment */
+#define VPDMA_STRIDE_ALIGN 16 /*
+ * line stride of source and dest
+ * buffers should be 16 byte aligned
+ */
+#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */
+#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
+#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
+
+#define VPDMA_LIST_TYPE_NORMAL 0
+#define VPDMA_LIST_TYPE_SELF_MODIFYING 1
+#define VPDMA_LIST_TYPE_DOORBELL 2
+
+enum vpdma_yuv_formats {
+ VPDMA_DATA_FMT_Y444 = 0,
+ VPDMA_DATA_FMT_Y422,
+ VPDMA_DATA_FMT_Y420,
+ VPDMA_DATA_FMT_C444,
+ VPDMA_DATA_FMT_C422,
+ VPDMA_DATA_FMT_C420,
+ VPDMA_DATA_FMT_YCR422,
+ VPDMA_DATA_FMT_YC444,
+ VPDMA_DATA_FMT_CRY422,
+ VPDMA_DATA_FMT_CBY422,
+ VPDMA_DATA_FMT_YCB422,
+};
+
+enum vpdma_rgb_formats {
+ VPDMA_DATA_FMT_RGB565 = 0,
+ VPDMA_DATA_FMT_ARGB16_1555,
+ VPDMA_DATA_FMT_ARGB16,
+ VPDMA_DATA_FMT_RGBA16_5551,
+ VPDMA_DATA_FMT_RGBA16,
+ VPDMA_DATA_FMT_ARGB24,
+ VPDMA_DATA_FMT_RGB24,
+ VPDMA_DATA_FMT_ARGB32,
+ VPDMA_DATA_FMT_RGBA24,
+ VPDMA_DATA_FMT_RGBA32,
+ VPDMA_DATA_FMT_BGR565,
+ VPDMA_DATA_FMT_ABGR16_1555,
+ VPDMA_DATA_FMT_ABGR16,
+ VPDMA_DATA_FMT_BGRA16_5551,
+ VPDMA_DATA_FMT_BGRA16,
+ VPDMA_DATA_FMT_ABGR24,
+ VPDMA_DATA_FMT_BGR24,
+ VPDMA_DATA_FMT_ABGR32,
+ VPDMA_DATA_FMT_BGRA24,
+ VPDMA_DATA_FMT_BGRA32,
+};
+
+enum vpdma_raw_formats {
+ VPDMA_DATA_FMT_RAW8 = 0,
+ VPDMA_DATA_FMT_RAW16,
+};
+
+enum vpdma_misc_formats {
+ VPDMA_DATA_FMT_MV = 0,
+};
+
+extern const struct vpdma_data_format vpdma_yuv_fmts[];
+extern const struct vpdma_data_format vpdma_rgb_fmts[];
+extern const struct vpdma_data_format vpdma_raw_fmts[];
+extern const struct vpdma_data_format vpdma_misc_fmts[];
+
+enum vpdma_frame_start_event {
+ VPDMA_FSEVENT_HDMI_FID = 0,
+ VPDMA_FSEVENT_DVO2_FID,
+ VPDMA_FSEVENT_HDCOMP_FID,
+ VPDMA_FSEVENT_SD_FID,
+ VPDMA_FSEVENT_LM_FID0,
+ VPDMA_FSEVENT_LM_FID1,
+ VPDMA_FSEVENT_LM_FID2,
+ VPDMA_FSEVENT_CHANNEL_ACTIVE,
+};
+
+/* max width configurations */
+enum vpdma_max_width {
+ MAX_OUT_WIDTH_UNLIMITED = 0,
+ MAX_OUT_WIDTH_REG1,
+ MAX_OUT_WIDTH_REG2,
+ MAX_OUT_WIDTH_REG3,
+ MAX_OUT_WIDTH_352,
+ MAX_OUT_WIDTH_768,
+ MAX_OUT_WIDTH_1280,
+ MAX_OUT_WIDTH_1920,
+};
+
+/* max height configurations */
+enum vpdma_max_height {
+ MAX_OUT_HEIGHT_UNLIMITED = 0,
+ MAX_OUT_HEIGHT_REG1,
+ MAX_OUT_HEIGHT_REG2,
+ MAX_OUT_HEIGHT_REG3,
+ MAX_OUT_HEIGHT_288,
+ MAX_OUT_HEIGHT_576,
+ MAX_OUT_HEIGHT_720,
+ MAX_OUT_HEIGHT_1080,
+};
+
+/*
+ * VPDMA channel numbers
+ */
+enum vpdma_channel {
+ VPE_CHAN_LUMA1_IN,
+ VPE_CHAN_CHROMA1_IN,
+ VPE_CHAN_LUMA2_IN,
+ VPE_CHAN_CHROMA2_IN,
+ VPE_CHAN_LUMA3_IN,
+ VPE_CHAN_CHROMA3_IN,
+ VPE_CHAN_MV_IN,
+ VPE_CHAN_MV_OUT,
+ VPE_CHAN_LUMA_OUT,
+ VPE_CHAN_CHROMA_OUT,
+ VPE_CHAN_RGB_OUT,
+};
+
+#define VIP_CHAN_VIP2_OFFSET 70
+#define VIP_CHAN_MULT_PORTB_OFFSET 16
+#define VIP_CHAN_YUV_PORTB_OFFSET 2
+#define VIP_CHAN_RGB_PORTB_OFFSET 1
+
+#define VPDMA_MAX_CHANNELS 256
+
+/* flags for VPDMA data descriptors */
+#define VPDMA_DATA_ODD_LINE_SKIP (1 << 0)
+#define VPDMA_DATA_EVEN_LINE_SKIP (1 << 1)
+#define VPDMA_DATA_FRAME_1D (1 << 2)
+#define VPDMA_DATA_MODE_TILED (1 << 3)
+
+/*
+ * client identifiers used for configuration descriptors
+ */
+#define CFD_MMR_CLIENT 0
+#define CFD_SC_CLIENT 4
+
+/* Address data block header format */
+struct vpdma_adb_hdr {
+ u32 offset;
+ u32 nwords;
+ u32 reserved0;
+ u32 reserved1;
+};
+
+/* helpers for creating ADB headers for config descriptors MMRs as client */
+#define ADB_ADDR(dma_buf, str, fld) ((dma_buf)->addr + offsetof(str, fld))
+#define MMR_ADB_ADDR(buf, str, fld) ADB_ADDR(&(buf), struct str, fld)
+
+#define VPDMA_SET_MMR_ADB_HDR(buf, str, hdr, regs, offset_a) \
+ do { \
+ struct vpdma_adb_hdr *h; \
+ struct str *adb = NULL; \
+ h = MMR_ADB_ADDR(buf, str, hdr); \
+ h->offset = (offset_a); \
+ h->nwords = sizeof(adb->regs) >> 2; \
+ } while (0)
+
+/* vpdma descriptor buffer allocation and management */
+int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size);
+void vpdma_free_desc_buf(struct vpdma_buf *buf);
+int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
+void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
+
+/* vpdma descriptor list funcs */
+int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type);
+void vpdma_reset_desc_list(struct vpdma_desc_list *list);
+void vpdma_free_desc_list(struct vpdma_desc_list *list);
+int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list,
+ int list_num);
+bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num);
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, dma_addr_t dma_addr,
+ void *write_dtd, int drop, int idx);
+
+/* VPDMA hardware list funcs */
+int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv);
+void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num);
+void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num);
+
+/* helpers for creating vpdma descriptors */
+void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *blk, u32 dest_offset);
+void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *adb);
+void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
+ enum vpdma_channel chan);
+void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
+ int chan_num);
+void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
+ int stride, const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int max_w, int max_h, enum vpdma_channel chan, u32 flags);
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+ int stride, const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int max_w, int max_h, int raw_vpdma_chan, u32 flags);
+
+void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
+ int stride, const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, int field, u32 flags, int frame_width,
+ int frame_height, int start_h, int start_v);
+int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
+ int *channels, int size);
+
+/* vpdma list interrupt management */
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+ int list_num, bool enable);
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
+ int list_num);
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num);
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num);
+
+/* vpdma client configuration */
+void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
+ enum vpdma_channel chan);
+void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
+ enum vpdma_frame_start_event fs_event, enum vpdma_channel chan);
+void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
+ u32 width, u32 height);
+
+void vpdma_set_bg_color(struct vpdma_data *vpdma,
+ struct vpdma_data_format *fmt, u32 color);
+void vpdma_dump_regs(struct vpdma_data *vpdma);
+
+/* initialize vpdma, passed with VPE's platform device pointer */
+int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
+ void (*cb)(struct platform_device *pdev));
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
new file mode 100644
index 000000000..72c7f13b4
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -0,0 +1,641 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _TI_VPDMA_PRIV_H_
+#define _TI_VPDMA_PRIV_H_
+
+/*
+ * VPDMA Register offsets
+ */
+
+/* Top level */
+#define VPDMA_PID 0x00
+#define VPDMA_LIST_ADDR 0x04
+#define VPDMA_LIST_ATTR 0x08
+#define VPDMA_LIST_STAT_SYNC 0x0c
+#define VPDMA_BG_RGB 0x18
+#define VPDMA_BG_YUV 0x1c
+#define VPDMA_SETUP 0x30
+#define VPDMA_MAX_SIZE1 0x34
+#define VPDMA_MAX_SIZE2 0x38
+#define VPDMA_MAX_SIZE3 0x3c
+#define VPDMA_MAX_SIZE_WIDTH_MASK 0xffff
+#define VPDMA_MAX_SIZE_WIDTH_SHFT 16
+#define VPDMA_MAX_SIZE_HEIGHT_MASK 0xffff
+#define VPDMA_MAX_SIZE_HEIGHT_SHFT 0
+
+/* Interrupts */
+#define VPDMA_INT_CHAN_STAT(grp) (0x40 + grp * 8)
+#define VPDMA_INT_CHAN_MASK(grp) (VPDMA_INT_CHAN_STAT(grp) + 4)
+#define VPDMA_INT_CLIENT0_STAT 0x78
+#define VPDMA_INT_CLIENT0_MASK 0x7c
+#define VPDMA_INT_CLIENT1_STAT 0x80
+#define VPDMA_INT_CLIENT1_MASK 0x84
+#define VPDMA_INT_LIST0_STAT 0x88
+#define VPDMA_INT_LIST0_MASK 0x8c
+
+#define VPDMA_INTX_OFFSET 0x50
+
+#define VPDMA_PERFMON(i) (0x200 + i * 4)
+
+/* VIP/VPE client registers */
+#define VPDMA_DEI_CHROMA1_CSTAT 0x0300
+#define VPDMA_DEI_LUMA1_CSTAT 0x0304
+#define VPDMA_DEI_LUMA2_CSTAT 0x0308
+#define VPDMA_DEI_CHROMA2_CSTAT 0x030c
+#define VPDMA_DEI_LUMA3_CSTAT 0x0310
+#define VPDMA_DEI_CHROMA3_CSTAT 0x0314
+#define VPDMA_DEI_MV_IN_CSTAT 0x0330
+#define VPDMA_DEI_MV_OUT_CSTAT 0x033c
+#define VPDMA_VIP_LO_Y_CSTAT 0x0388
+#define VPDMA_VIP_LO_UV_CSTAT 0x038c
+#define VPDMA_VIP_UP_Y_CSTAT 0x0390
+#define VPDMA_VIP_UP_UV_CSTAT 0x0394
+#define VPDMA_VPI_CTL_CSTAT 0x03d0
+
+/* Reg field info for VPDMA_CLIENT_CSTAT registers */
+#define VPDMA_CSTAT_LINE_MODE_MASK 0x03
+#define VPDMA_CSTAT_LINE_MODE_SHIFT 8
+#define VPDMA_CSTAT_FRAME_START_MASK 0xf
+#define VPDMA_CSTAT_FRAME_START_SHIFT 10
+
+#define VPDMA_LIST_NUM_MASK 0x07
+#define VPDMA_LIST_NUM_SHFT 24
+#define VPDMA_LIST_STOP_SHFT 20
+#define VPDMA_LIST_RDY_MASK 0x01
+#define VPDMA_LIST_RDY_SHFT 19
+#define VPDMA_LIST_TYPE_MASK 0x03
+#define VPDMA_LIST_TYPE_SHFT 16
+#define VPDMA_LIST_SIZE_MASK 0xffff
+
+/*
+ * The YUV data type definition below are taken from
+ * both the TRM and i839 Errata information.
+ * Use the correct data type considering byte
+ * reordering of components.
+ *
+ * Also since the single use of "C" in the 422 case
+ * to mean "Cr" (i.e. V component). It was decided
+ * to explicitly label them CR to remove any confusion.
+ * Bear in mind that the type label refer to the memory
+ * packed order (LSB - MSB).
+ */
+#define DATA_TYPE_Y444 0x0
+#define DATA_TYPE_Y422 0x1
+#define DATA_TYPE_Y420 0x2
+#define DATA_TYPE_C444 0x4
+#define DATA_TYPE_C422 0x5
+#define DATA_TYPE_C420 0x6
+#define DATA_TYPE_YC444 0x8
+#define DATA_TYPE_YCB422 0x7
+#define DATA_TYPE_YCR422 0x17
+#define DATA_TYPE_CBY422 0x27
+#define DATA_TYPE_CRY422 0x37
+
+/*
+ * The RGB data type definition below are defined
+ * to follow Errata i819.
+ * The initial values were taken from:
+ * VPDMA_data_type_mapping_v0.2vayu_c.pdf
+ * But some of the ARGB definition appeared to be wrong
+ * in the document also. As they would yield RGBA instead.
+ * They have been corrected based on experimentation.
+ */
+#define DATA_TYPE_RGB16_565 0x10
+#define DATA_TYPE_ARGB_1555 0x13
+#define DATA_TYPE_ARGB_4444 0x14
+#define DATA_TYPE_RGBA_5551 0x11
+#define DATA_TYPE_RGBA_4444 0x12
+#define DATA_TYPE_ARGB24_6666 0x18
+#define DATA_TYPE_RGB24_888 0x16
+#define DATA_TYPE_ARGB32_8888 0x17
+#define DATA_TYPE_RGBA24_6666 0x15
+#define DATA_TYPE_RGBA32_8888 0x19
+#define DATA_TYPE_BGR16_565 0x0
+#define DATA_TYPE_ABGR_1555 0x3
+#define DATA_TYPE_ABGR_4444 0x4
+#define DATA_TYPE_BGRA_5551 0x1
+#define DATA_TYPE_BGRA_4444 0x2
+#define DATA_TYPE_ABGR24_6666 0x8
+#define DATA_TYPE_BGR24_888 0x6
+#define DATA_TYPE_ABGR32_8888 0x7
+#define DATA_TYPE_BGRA24_6666 0x5
+#define DATA_TYPE_BGRA32_8888 0x9
+
+#define DATA_TYPE_MV 0x3
+
+/* VPDMA channel numbers, some are common between VIP/VPE and appear twice */
+#define VPE_CHAN_NUM_LUMA1_IN 0
+#define VPE_CHAN_NUM_CHROMA1_IN 1
+#define VPE_CHAN_NUM_LUMA2_IN 2
+#define VPE_CHAN_NUM_CHROMA2_IN 3
+#define VPE_CHAN_NUM_LUMA3_IN 4
+#define VPE_CHAN_NUM_CHROMA3_IN 5
+#define VPE_CHAN_NUM_MV_IN 12
+#define VPE_CHAN_NUM_MV_OUT 15
+#define VIP1_CHAN_NUM_MULT_PORT_A_SRC0 38
+#define VIP1_CHAN_NUM_MULT_ANC_A_SRC0 70
+#define VPE_CHAN_NUM_LUMA_OUT 102
+#define VPE_CHAN_NUM_CHROMA_OUT 103
+#define VIP1_CHAN_NUM_PORT_A_LUMA 102
+#define VIP1_CHAN_NUM_PORT_A_CHROMA 103
+#define VPE_CHAN_NUM_RGB_OUT 106
+#define VIP1_CHAN_NUM_PORT_A_RGB 106
+#define VIP1_CHAN_NUM_PORT_B_RGB 107
+/*
+ * a VPDMA address data block payload for a configuration descriptor needs to
+ * have each sub block length as a multiple of 16 bytes. Therefore, the overall
+ * size of the payload also needs to be a multiple of 16 bytes. The sub block
+ * lengths should be ensured to be aligned by the VPDMA user.
+ */
+#define VPDMA_ADB_SIZE_ALIGN 0x0f
+
+/*
+ * data transfer descriptor
+ */
+struct vpdma_dtd {
+ u32 type_ctl_stride;
+ union {
+ u32 xfer_length_height;
+ u32 w1;
+ };
+ dma_addr_t start_addr;
+ u32 pkt_ctl;
+ union {
+ u32 frame_width_height; /* inbound */
+ dma_addr_t desc_write_addr; /* outbound */
+ };
+ union {
+ u32 start_h_v; /* inbound */
+ u32 max_width_height; /* outbound */
+ };
+ u32 client_attr0;
+ u32 client_attr1;
+};
+
+/* Data Transfer Descriptor specifics */
+#define DTD_NO_NOTIFY 0
+#define DTD_NOTIFY 1
+
+#define DTD_PKT_TYPE 0xa
+#define DTD_DIR_IN 0
+#define DTD_DIR_OUT 1
+
+/* type_ctl_stride */
+#define DTD_DATA_TYPE_MASK 0x3f
+#define DTD_DATA_TYPE_SHFT 26
+#define DTD_NOTIFY_MASK 0x01
+#define DTD_NOTIFY_SHFT 25
+#define DTD_FIELD_MASK 0x01
+#define DTD_FIELD_SHFT 24
+#define DTD_1D_MASK 0x01
+#define DTD_1D_SHFT 23
+#define DTD_EVEN_LINE_SKIP_MASK 0x01
+#define DTD_EVEN_LINE_SKIP_SHFT 20
+#define DTD_ODD_LINE_SKIP_MASK 0x01
+#define DTD_ODD_LINE_SKIP_SHFT 16
+#define DTD_LINE_STRIDE_MASK 0xffff
+#define DTD_LINE_STRIDE_SHFT 0
+
+/* xfer_length_height */
+#define DTD_LINE_LENGTH_MASK 0xffff
+#define DTD_LINE_LENGTH_SHFT 16
+#define DTD_XFER_HEIGHT_MASK 0xffff
+#define DTD_XFER_HEIGHT_SHFT 0
+
+/* pkt_ctl */
+#define DTD_PKT_TYPE_MASK 0x1f
+#define DTD_PKT_TYPE_SHFT 27
+#define DTD_MODE_MASK 0x01
+#define DTD_MODE_SHFT 26
+#define DTD_DIR_MASK 0x01
+#define DTD_DIR_SHFT 25
+#define DTD_CHAN_MASK 0x01ff
+#define DTD_CHAN_SHFT 16
+#define DTD_PRI_MASK 0x0f
+#define DTD_PRI_SHFT 9
+#define DTD_NEXT_CHAN_MASK 0x01ff
+#define DTD_NEXT_CHAN_SHFT 0
+
+/* frame_width_height */
+#define DTD_FRAME_WIDTH_MASK 0xffff
+#define DTD_FRAME_WIDTH_SHFT 16
+#define DTD_FRAME_HEIGHT_MASK 0xffff
+#define DTD_FRAME_HEIGHT_SHFT 0
+
+/* start_h_v */
+#define DTD_H_START_MASK 0xffff
+#define DTD_H_START_SHFT 16
+#define DTD_V_START_MASK 0xffff
+#define DTD_V_START_SHFT 0
+
+#define DTD_DESC_START_MASK 0xffffffe0
+#define DTD_DESC_START_SHIFT 5
+#define DTD_WRITE_DESC_MASK 0x01
+#define DTD_WRITE_DESC_SHIFT 2
+#define DTD_DROP_DATA_MASK 0x01
+#define DTD_DROP_DATA_SHIFT 1
+#define DTD_USE_DESC_MASK 0x01
+#define DTD_USE_DESC_SHIFT 0
+
+/* max_width_height */
+#define DTD_MAX_WIDTH_MASK 0x07
+#define DTD_MAX_WIDTH_SHFT 4
+#define DTD_MAX_HEIGHT_MASK 0x07
+#define DTD_MAX_HEIGHT_SHFT 0
+
+static inline u32 dtd_type_ctl_stride(int type, bool notify, int field,
+ bool one_d, bool even_line_skip, bool odd_line_skip,
+ int line_stride)
+{
+ return (type << DTD_DATA_TYPE_SHFT) | (notify << DTD_NOTIFY_SHFT) |
+ (field << DTD_FIELD_SHFT) | (one_d << DTD_1D_SHFT) |
+ (even_line_skip << DTD_EVEN_LINE_SKIP_SHFT) |
+ (odd_line_skip << DTD_ODD_LINE_SKIP_SHFT) |
+ line_stride;
+}
+
+static inline u32 dtd_xfer_length_height(int line_length, int xfer_height)
+{
+ return (line_length << DTD_LINE_LENGTH_SHFT) | xfer_height;
+}
+
+static inline u32 dtd_pkt_ctl(bool mode, bool dir, int chan, int pri,
+ int next_chan)
+{
+ return (DTD_PKT_TYPE << DTD_PKT_TYPE_SHFT) | (mode << DTD_MODE_SHFT) |
+ (dir << DTD_DIR_SHFT) | (chan << DTD_CHAN_SHFT) |
+ (pri << DTD_PRI_SHFT) | next_chan;
+}
+
+static inline u32 dtd_frame_width_height(int width, int height)
+{
+ return (width << DTD_FRAME_WIDTH_SHFT) | height;
+}
+
+static inline u32 dtd_desc_write_addr(unsigned int addr, bool write_desc,
+ bool drop_data, bool use_desc)
+{
+ return (addr & DTD_DESC_START_MASK) |
+ (write_desc << DTD_WRITE_DESC_SHIFT) |
+ (drop_data << DTD_DROP_DATA_SHIFT) |
+ use_desc;
+}
+
+static inline u32 dtd_start_h_v(int h_start, int v_start)
+{
+ return (h_start << DTD_H_START_SHFT) | v_start;
+}
+
+static inline u32 dtd_max_width_height(int max_width, int max_height)
+{
+ return (max_width << DTD_MAX_WIDTH_SHFT) | max_height;
+}
+
+static inline int dtd_get_data_type(struct vpdma_dtd *dtd)
+{
+ return dtd->type_ctl_stride >> DTD_DATA_TYPE_SHFT;
+}
+
+static inline bool dtd_get_notify(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_NOTIFY_SHFT) & DTD_NOTIFY_MASK;
+}
+
+static inline int dtd_get_field(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_FIELD_SHFT) & DTD_FIELD_MASK;
+}
+
+static inline bool dtd_get_1d(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_1D_SHFT) & DTD_1D_MASK;
+}
+
+static inline bool dtd_get_even_line_skip(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_EVEN_LINE_SKIP_SHFT)
+ & DTD_EVEN_LINE_SKIP_MASK;
+}
+
+static inline bool dtd_get_odd_line_skip(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_ODD_LINE_SKIP_SHFT)
+ & DTD_ODD_LINE_SKIP_MASK;
+}
+
+static inline int dtd_get_line_stride(struct vpdma_dtd *dtd)
+{
+ return dtd->type_ctl_stride & DTD_LINE_STRIDE_MASK;
+}
+
+static inline int dtd_get_line_length(struct vpdma_dtd *dtd)
+{
+ return dtd->xfer_length_height >> DTD_LINE_LENGTH_SHFT;
+}
+
+static inline int dtd_get_xfer_height(struct vpdma_dtd *dtd)
+{
+ return dtd->xfer_length_height & DTD_XFER_HEIGHT_MASK;
+}
+
+static inline int dtd_get_pkt_type(struct vpdma_dtd *dtd)
+{
+ return dtd->pkt_ctl >> DTD_PKT_TYPE_SHFT;
+}
+
+static inline bool dtd_get_mode(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_MODE_SHFT) & DTD_MODE_MASK;
+}
+
+static inline bool dtd_get_dir(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_DIR_SHFT) & DTD_DIR_MASK;
+}
+
+static inline int dtd_get_chan(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_CHAN_SHFT) & DTD_CHAN_MASK;
+}
+
+static inline int dtd_get_priority(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_PRI_SHFT) & DTD_PRI_MASK;
+}
+
+static inline int dtd_get_next_chan(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_NEXT_CHAN_SHFT) & DTD_NEXT_CHAN_MASK;
+}
+
+static inline int dtd_get_frame_width(struct vpdma_dtd *dtd)
+{
+ return dtd->frame_width_height >> DTD_FRAME_WIDTH_SHFT;
+}
+
+static inline int dtd_get_frame_height(struct vpdma_dtd *dtd)
+{
+ return dtd->frame_width_height & DTD_FRAME_HEIGHT_MASK;
+}
+
+static inline int dtd_get_desc_write_addr(struct vpdma_dtd *dtd)
+{
+ return dtd->desc_write_addr & DTD_DESC_START_MASK;
+}
+
+static inline bool dtd_get_write_desc(struct vpdma_dtd *dtd)
+{
+ return (dtd->desc_write_addr >> DTD_WRITE_DESC_SHIFT) &
+ DTD_WRITE_DESC_MASK;
+}
+
+static inline bool dtd_get_drop_data(struct vpdma_dtd *dtd)
+{
+ return (dtd->desc_write_addr >> DTD_DROP_DATA_SHIFT) &
+ DTD_DROP_DATA_MASK;
+}
+
+static inline bool dtd_get_use_desc(struct vpdma_dtd *dtd)
+{
+ return dtd->desc_write_addr & DTD_USE_DESC_MASK;
+}
+
+static inline int dtd_get_h_start(struct vpdma_dtd *dtd)
+{
+ return dtd->start_h_v >> DTD_H_START_SHFT;
+}
+
+static inline int dtd_get_v_start(struct vpdma_dtd *dtd)
+{
+ return dtd->start_h_v & DTD_V_START_MASK;
+}
+
+static inline int dtd_get_max_width(struct vpdma_dtd *dtd)
+{
+ return (dtd->max_width_height >> DTD_MAX_WIDTH_SHFT) &
+ DTD_MAX_WIDTH_MASK;
+}
+
+static inline int dtd_get_max_height(struct vpdma_dtd *dtd)
+{
+ return (dtd->max_width_height >> DTD_MAX_HEIGHT_SHFT) &
+ DTD_MAX_HEIGHT_MASK;
+}
+
+/*
+ * configuration descriptor
+ */
+struct vpdma_cfd {
+ union {
+ u32 dest_addr_offset;
+ u32 w0;
+ };
+ union {
+ u32 block_len; /* in words */
+ u32 w1;
+ };
+ u32 payload_addr;
+ u32 ctl_payload_len; /* in words */
+};
+
+/* Configuration descriptor specifics */
+
+#define CFD_PKT_TYPE 0xb
+
+#define CFD_DIRECT 1
+#define CFD_INDIRECT 0
+#define CFD_CLS_ADB 0
+#define CFD_CLS_BLOCK 1
+
+/* block_len */
+#define CFD__BLOCK_LEN_MASK 0xffff
+#define CFD__BLOCK_LEN_SHFT 0
+
+/* ctl_payload_len */
+#define CFD_PKT_TYPE_MASK 0x1f
+#define CFD_PKT_TYPE_SHFT 27
+#define CFD_DIRECT_MASK 0x01
+#define CFD_DIRECT_SHFT 26
+#define CFD_CLASS_MASK 0x03
+#define CFD_CLASS_SHFT 24
+#define CFD_DEST_MASK 0xff
+#define CFD_DEST_SHFT 16
+#define CFD_PAYLOAD_LEN_MASK 0xffff
+#define CFD_PAYLOAD_LEN_SHFT 0
+
+static inline u32 cfd_pkt_payload_len(bool direct, int cls, int dest,
+ int payload_len)
+{
+ return (CFD_PKT_TYPE << CFD_PKT_TYPE_SHFT) |
+ (direct << CFD_DIRECT_SHFT) |
+ (cls << CFD_CLASS_SHFT) |
+ (dest << CFD_DEST_SHFT) |
+ payload_len;
+}
+
+static inline int cfd_get_pkt_type(struct vpdma_cfd *cfd)
+{
+ return cfd->ctl_payload_len >> CFD_PKT_TYPE_SHFT;
+}
+
+static inline bool cfd_get_direct(struct vpdma_cfd *cfd)
+{
+ return (cfd->ctl_payload_len >> CFD_DIRECT_SHFT) & CFD_DIRECT_MASK;
+}
+
+static inline bool cfd_get_class(struct vpdma_cfd *cfd)
+{
+ return (cfd->ctl_payload_len >> CFD_CLASS_SHFT) & CFD_CLASS_MASK;
+}
+
+static inline int cfd_get_dest(struct vpdma_cfd *cfd)
+{
+ return (cfd->ctl_payload_len >> CFD_DEST_SHFT) & CFD_DEST_MASK;
+}
+
+static inline int cfd_get_payload_len(struct vpdma_cfd *cfd)
+{
+ return cfd->ctl_payload_len & CFD_PAYLOAD_LEN_MASK;
+}
+
+/*
+ * control descriptor
+ */
+struct vpdma_ctd {
+ union {
+ u32 timer_value;
+ u32 list_addr;
+ u32 w0;
+ };
+ union {
+ u32 pixel_line_count;
+ u32 list_size;
+ u32 w1;
+ };
+ union {
+ u32 event;
+ u32 fid_ctl;
+ u32 w2;
+ };
+ u32 type_source_ctl;
+};
+
+/* control descriptor types */
+#define CTD_TYPE_SYNC_ON_CLIENT 0
+#define CTD_TYPE_SYNC_ON_LIST 1
+#define CTD_TYPE_SYNC_ON_EXT 2
+#define CTD_TYPE_SYNC_ON_LM_TIMER 3
+#define CTD_TYPE_SYNC_ON_CHANNEL 4
+#define CTD_TYPE_CHNG_CLIENT_IRQ 5
+#define CTD_TYPE_SEND_IRQ 6
+#define CTD_TYPE_RELOAD_LIST 7
+#define CTD_TYPE_ABORT_CHANNEL 8
+
+#define CTD_PKT_TYPE 0xc
+
+/* timer_value */
+#define CTD_TIMER_VALUE_MASK 0xffff
+#define CTD_TIMER_VALUE_SHFT 0
+
+/* pixel_line_count */
+#define CTD_PIXEL_COUNT_MASK 0xffff
+#define CTD_PIXEL_COUNT_SHFT 16
+#define CTD_LINE_COUNT_MASK 0xffff
+#define CTD_LINE_COUNT_SHFT 0
+
+/* list_size */
+#define CTD_LIST_SIZE_MASK 0xffff
+#define CTD_LIST_SIZE_SHFT 0
+
+/* event */
+#define CTD_EVENT_MASK 0x0f
+#define CTD_EVENT_SHFT 0
+
+/* fid_ctl */
+#define CTD_FID2_MASK 0x03
+#define CTD_FID2_SHFT 4
+#define CTD_FID1_MASK 0x03
+#define CTD_FID1_SHFT 2
+#define CTD_FID0_MASK 0x03
+#define CTD_FID0_SHFT 0
+
+/* type_source_ctl */
+#define CTD_PKT_TYPE_MASK 0x1f
+#define CTD_PKT_TYPE_SHFT 27
+#define CTD_SOURCE_MASK 0xff
+#define CTD_SOURCE_SHFT 16
+#define CTD_CONTROL_MASK 0x0f
+#define CTD_CONTROL_SHFT 0
+
+static inline u32 ctd_pixel_line_count(int pixel_count, int line_count)
+{
+ return (pixel_count << CTD_PIXEL_COUNT_SHFT) | line_count;
+}
+
+static inline u32 ctd_set_fid_ctl(int fid0, int fid1, int fid2)
+{
+ return (fid2 << CTD_FID2_SHFT) | (fid1 << CTD_FID1_SHFT) | fid0;
+}
+
+static inline u32 ctd_type_source_ctl(int source, int control)
+{
+ return (CTD_PKT_TYPE << CTD_PKT_TYPE_SHFT) |
+ (source << CTD_SOURCE_SHFT) | control;
+}
+
+static inline u32 ctd_get_pixel_count(struct vpdma_ctd *ctd)
+{
+ return ctd->pixel_line_count >> CTD_PIXEL_COUNT_SHFT;
+}
+
+static inline int ctd_get_line_count(struct vpdma_ctd *ctd)
+{
+ return ctd->pixel_line_count & CTD_LINE_COUNT_MASK;
+}
+
+static inline int ctd_get_event(struct vpdma_ctd *ctd)
+{
+ return ctd->event & CTD_EVENT_MASK;
+}
+
+static inline int ctd_get_fid2_ctl(struct vpdma_ctd *ctd)
+{
+ return (ctd->fid_ctl >> CTD_FID2_SHFT) & CTD_FID2_MASK;
+}
+
+static inline int ctd_get_fid1_ctl(struct vpdma_ctd *ctd)
+{
+ return (ctd->fid_ctl >> CTD_FID1_SHFT) & CTD_FID1_MASK;
+}
+
+static inline int ctd_get_fid0_ctl(struct vpdma_ctd *ctd)
+{
+ return ctd->fid_ctl & CTD_FID2_MASK;
+}
+
+static inline int ctd_get_pkt_type(struct vpdma_ctd *ctd)
+{
+ return ctd->type_source_ctl >> CTD_PKT_TYPE_SHFT;
+}
+
+static inline int ctd_get_source(struct vpdma_ctd *ctd)
+{
+ return (ctd->type_source_ctl >> CTD_SOURCE_SHFT) & CTD_SOURCE_MASK;
+}
+
+static inline int ctd_get_ctl(struct vpdma_ctd *ctd)
+{
+ return ctd->type_source_ctl & CTD_CONTROL_MASK;
+}
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
new file mode 100644
index 000000000..70a8371b7
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -0,0 +1,2633 @@
+/*
+ * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * Based on the virtual v4l2-mem2mem example device
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/log2.h>
+#include <linux/sizes.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vpdma.h"
+#include "vpdma_priv.h"
+#include "vpe_regs.h"
+#include "sc.h"
+#include "csc.h"
+
+#define VPE_MODULE_NAME "vpe"
+
+/* minimum and maximum frame sizes */
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 2048
+#define MAX_H 1184
+
+/* required alignments */
+#define S_ALIGN 0 /* multiple of 1 */
+#define H_ALIGN 1 /* multiple of 2 */
+
+/* flags that indicate a format can be used for capture/output */
+#define VPE_FMT_TYPE_CAPTURE (1 << 0)
+#define VPE_FMT_TYPE_OUTPUT (1 << 1)
+
+/* used as plane indices */
+#define VPE_MAX_PLANES 2
+#define VPE_LUMA 0
+#define VPE_CHROMA 1
+
+/* per m2m context info */
+#define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */
+
+#define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
+
+/*
+ * each VPE context can need up to 3 config descriptors, 7 input descriptors,
+ * 3 output descriptors, and 10 control descriptors
+ */
+#define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
+ 13 * VPDMA_CFD_CTD_DESC_SIZE)
+
+#define vpe_dbg(vpedev, fmt, arg...) \
+ dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
+#define vpe_err(vpedev, fmt, arg...) \
+ dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
+
+struct vpe_us_coeffs {
+ unsigned short anchor_fid0_c0;
+ unsigned short anchor_fid0_c1;
+ unsigned short anchor_fid0_c2;
+ unsigned short anchor_fid0_c3;
+ unsigned short interp_fid0_c0;
+ unsigned short interp_fid0_c1;
+ unsigned short interp_fid0_c2;
+ unsigned short interp_fid0_c3;
+ unsigned short anchor_fid1_c0;
+ unsigned short anchor_fid1_c1;
+ unsigned short anchor_fid1_c2;
+ unsigned short anchor_fid1_c3;
+ unsigned short interp_fid1_c0;
+ unsigned short interp_fid1_c1;
+ unsigned short interp_fid1_c2;
+ unsigned short interp_fid1_c3;
+};
+
+/*
+ * Default upsampler coefficients
+ */
+static const struct vpe_us_coeffs us_coeffs[] = {
+ {
+ /* Coefficients for progressive input */
+ 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
+ 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
+ },
+ {
+ /* Coefficients for Top Field Interlaced input */
+ 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
+ /* Coefficients for Bottom Field Interlaced input */
+ 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
+ },
+};
+
+/*
+ * the following registers are for configuring some of the parameters of the
+ * motion and edge detection blocks inside DEI, these generally remain the same,
+ * these could be passed later via userspace if some one needs to tweak these.
+ */
+struct vpe_dei_regs {
+ unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */
+ unsigned long edi_config_reg; /* VPE_DEI_REG3 */
+ unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */
+ unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */
+ unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */
+ unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */
+};
+
+/*
+ * default expert DEI register values, unlikely to be modified.
+ */
+static const struct vpe_dei_regs dei_regs = {
+ .mdt_spacial_freq_thr_reg = 0x020C0804u,
+ .edi_config_reg = 0x0118100Cu,
+ .edi_lut_reg0 = 0x08040200u,
+ .edi_lut_reg1 = 0x1010100Cu,
+ .edi_lut_reg2 = 0x10101010u,
+ .edi_lut_reg3 = 0x10101010u,
+};
+
+/*
+ * The port_data structure contains per-port data.
+ */
+struct vpe_port_data {
+ enum vpdma_channel channel; /* VPDMA channel */
+ u8 vb_index; /* input frame f, f-1, f-2 index */
+ u8 vb_part; /* plane index for co-panar formats */
+};
+
+/*
+ * Define indices into the port_data tables
+ */
+#define VPE_PORT_LUMA1_IN 0
+#define VPE_PORT_CHROMA1_IN 1
+#define VPE_PORT_LUMA2_IN 2
+#define VPE_PORT_CHROMA2_IN 3
+#define VPE_PORT_LUMA3_IN 4
+#define VPE_PORT_CHROMA3_IN 5
+#define VPE_PORT_MV_IN 6
+#define VPE_PORT_MV_OUT 7
+#define VPE_PORT_LUMA_OUT 8
+#define VPE_PORT_CHROMA_OUT 9
+#define VPE_PORT_RGB_OUT 10
+
+static const struct vpe_port_data port_data[11] = {
+ [VPE_PORT_LUMA1_IN] = {
+ .channel = VPE_CHAN_LUMA1_IN,
+ .vb_index = 0,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA1_IN] = {
+ .channel = VPE_CHAN_CHROMA1_IN,
+ .vb_index = 0,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_LUMA2_IN] = {
+ .channel = VPE_CHAN_LUMA2_IN,
+ .vb_index = 1,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA2_IN] = {
+ .channel = VPE_CHAN_CHROMA2_IN,
+ .vb_index = 1,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_LUMA3_IN] = {
+ .channel = VPE_CHAN_LUMA3_IN,
+ .vb_index = 2,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA3_IN] = {
+ .channel = VPE_CHAN_CHROMA3_IN,
+ .vb_index = 2,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_MV_IN] = {
+ .channel = VPE_CHAN_MV_IN,
+ },
+ [VPE_PORT_MV_OUT] = {
+ .channel = VPE_CHAN_MV_OUT,
+ },
+ [VPE_PORT_LUMA_OUT] = {
+ .channel = VPE_CHAN_LUMA_OUT,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA_OUT] = {
+ .channel = VPE_CHAN_CHROMA_OUT,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_RGB_OUT] = {
+ .channel = VPE_CHAN_RGB_OUT,
+ .vb_part = VPE_LUMA,
+ },
+};
+
+
+/* driver info for each of the supported video formats */
+struct vpe_fmt {
+ char *name; /* human-readable name */
+ u32 fourcc; /* standard format identifier */
+ u8 types; /* CAPTURE and/or OUTPUT */
+ u8 coplanar; /* set for unpacked Luma and Chroma */
+ /* vpdma format info for each plane */
+ struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
+};
+
+static struct vpe_fmt vpe_formats[] = {
+ {
+ .name = "NV16 YUV 422 co-planar",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
+ },
+ },
+ {
+ .name = "NV12 YUV 420 co-planar",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
+ },
+ },
+ {
+ .name = "YUYV 422 packed",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCB422],
+ },
+ },
+ {
+ .name = "UYVY 422 packed",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CBY422],
+ },
+ },
+ {
+ .name = "RGB888 packed",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
+ },
+ },
+ {
+ .name = "ARGB32",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
+ },
+ },
+ {
+ .name = "BGR888 packed",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
+ },
+ },
+ {
+ .name = "ABGR32",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
+ },
+ },
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB565],
+ },
+ },
+ {
+ .name = "RGB5551",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGBA16_5551],
+ },
+ },
+};
+
+/*
+ * per-queue, driver-specific private data.
+ * there is one source queue and one destination queue for each m2m context.
+ */
+struct vpe_q_data {
+ unsigned int width; /* frame width */
+ unsigned int height; /* frame height */
+ unsigned int nplanes; /* Current number of planes */
+ unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
+ enum v4l2_colorspace colorspace;
+ enum v4l2_field field; /* supported field value */
+ unsigned int flags;
+ unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
+ struct v4l2_rect c_rect; /* crop/compose rectangle */
+ struct vpe_fmt *fmt; /* format info */
+};
+
+/* vpe_q_data flag bits */
+#define Q_DATA_FRAME_1D BIT(0)
+#define Q_DATA_MODE_TILED BIT(1)
+#define Q_DATA_INTERLACED_ALTERNATE BIT(2)
+#define Q_DATA_INTERLACED_SEQ_TB BIT(3)
+
+#define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \
+ Q_DATA_INTERLACED_SEQ_TB)
+
+enum {
+ Q_DATA_SRC = 0,
+ Q_DATA_DST = 1,
+};
+
+/* find our format description corresponding to the passed v4l2_format */
+static struct vpe_fmt *__find_format(u32 fourcc)
+{
+ struct vpe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
+ fmt = &vpe_formats[k];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct vpe_fmt *find_format(struct v4l2_format *f)
+{
+ return __find_format(f->fmt.pix.pixelformat);
+}
+
+/*
+ * there is one vpe_dev structure in the driver, it is shared by
+ * all instances.
+ */
+struct vpe_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ atomic_t num_instances; /* count of driver instances */
+ dma_addr_t loaded_mmrs; /* shadow mmrs in device */
+ struct mutex dev_mutex;
+ spinlock_t lock;
+
+ int irq;
+ void __iomem *base;
+ struct resource *res;
+
+ struct vpdma_data vpdma_data;
+ struct vpdma_data *vpdma; /* vpdma data handle */
+ struct sc_data *sc; /* scaler data handle */
+ struct csc_data *csc; /* csc data handle */
+};
+
+/*
+ * There is one vpe_ctx structure for each m2m context.
+ */
+struct vpe_ctx {
+ struct v4l2_fh fh;
+ struct vpe_dev *dev;
+ struct v4l2_ctrl_handler hdl;
+
+ unsigned int field; /* current field */
+ unsigned int sequence; /* current frame/field seq */
+ unsigned int aborting; /* abort after next irq */
+
+ unsigned int bufs_per_job; /* input buffers per batch */
+ unsigned int bufs_completed; /* bufs done in this batch */
+
+ struct vpe_q_data q_data[2]; /* src & dst queue data */
+ struct vb2_v4l2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
+ struct vb2_v4l2_buffer *dst_vb;
+
+ dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */
+ void *mv_buf[2]; /* virtual addrs of motion vector bufs */
+ size_t mv_buf_size; /* current motion vector buffer size */
+ struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
+ struct vpdma_buf sc_coeff_h; /* h coeff buffer */
+ struct vpdma_buf sc_coeff_v; /* v coeff buffer */
+ struct vpdma_desc_list desc_list; /* DMA descriptor list */
+
+ bool deinterlacing; /* using de-interlacer */
+ bool load_mmrs; /* have new shadow reg values */
+
+ unsigned int src_mv_buf_selector;
+};
+
+
+/*
+ * M2M devices get 2 queues.
+ * Return the queue given the type.
+ */
+static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->q_data[Q_DATA_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->q_data[Q_DATA_DST];
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static u32 read_reg(struct vpe_dev *dev, int offset)
+{
+ return ioread32(dev->base + offset);
+}
+
+static void write_reg(struct vpe_dev *dev, int offset, u32 value)
+{
+ iowrite32(value, dev->base + offset);
+}
+
+/* register field read/write helpers */
+static int get_field(u32 value, u32 mask, int shift)
+{
+ return (value & (mask << shift)) >> shift;
+}
+
+static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
+{
+ return get_field(read_reg(dev, offset), mask, shift);
+}
+
+static void write_field(u32 *valp, u32 field, u32 mask, int shift)
+{
+ u32 val = *valp;
+
+ val &= ~(mask << shift);
+ val |= (field & mask) << shift;
+ *valp = val;
+}
+
+static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
+ u32 mask, int shift)
+{
+ u32 val = read_reg(dev, offset);
+
+ write_field(&val, field, mask, shift);
+
+ write_reg(dev, offset, val);
+}
+
+/*
+ * DMA address/data block for the shadow registers
+ */
+struct vpe_mmr_adb {
+ struct vpdma_adb_hdr out_fmt_hdr;
+ u32 out_fmt_reg[1];
+ u32 out_fmt_pad[3];
+ struct vpdma_adb_hdr us1_hdr;
+ u32 us1_regs[8];
+ struct vpdma_adb_hdr us2_hdr;
+ u32 us2_regs[8];
+ struct vpdma_adb_hdr us3_hdr;
+ u32 us3_regs[8];
+ struct vpdma_adb_hdr dei_hdr;
+ u32 dei_regs[8];
+ struct vpdma_adb_hdr sc_hdr0;
+ u32 sc_regs0[7];
+ u32 sc_pad0[1];
+ struct vpdma_adb_hdr sc_hdr8;
+ u32 sc_regs8[6];
+ u32 sc_pad8[2];
+ struct vpdma_adb_hdr sc_hdr17;
+ u32 sc_regs17[9];
+ u32 sc_pad17[3];
+ struct vpdma_adb_hdr csc_hdr;
+ u32 csc_regs[6];
+ u32 csc_pad[2];
+};
+
+#define GET_OFFSET_TOP(ctx, obj, reg) \
+ ((obj)->res->start - ctx->dev->res->start + reg)
+
+#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
+ VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
+/*
+ * Set the headers for all of the address/data block structures.
+ */
+static void init_adb_hdrs(struct vpe_ctx *ctx)
+{
+ VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
+ VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
+ VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
+ VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
+ VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
+ GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
+ GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
+ GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
+ VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
+ GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
+};
+
+/*
+ * Allocate or re-allocate the motion vector DMA buffers
+ * There are two buffers, one for input and one for output.
+ * However, the roles are reversed after each field is processed.
+ * In other words, after each field is processed, the previous
+ * output (dst) MV buffer becomes the new input (src) MV buffer.
+ */
+static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
+{
+ struct device *dev = ctx->dev->v4l2_dev.dev;
+
+ if (ctx->mv_buf_size == size)
+ return 0;
+
+ if (ctx->mv_buf[0])
+ dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
+ ctx->mv_buf_dma[0]);
+
+ if (ctx->mv_buf[1])
+ dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
+ ctx->mv_buf_dma[1]);
+
+ if (size == 0)
+ return 0;
+
+ ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
+ GFP_KERNEL);
+ if (!ctx->mv_buf[0]) {
+ vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
+ return -ENOMEM;
+ }
+
+ ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
+ GFP_KERNEL);
+ if (!ctx->mv_buf[1]) {
+ vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
+ dma_free_coherent(dev, size, ctx->mv_buf[0],
+ ctx->mv_buf_dma[0]);
+
+ return -ENOMEM;
+ }
+
+ ctx->mv_buf_size = size;
+ ctx->src_mv_buf_selector = 0;
+
+ return 0;
+}
+
+static void free_mv_buffers(struct vpe_ctx *ctx)
+{
+ realloc_mv_buffers(ctx, 0);
+}
+
+/*
+ * While de-interlacing, we keep the two most recent input buffers
+ * around. This function frees those two buffers when we have
+ * finished processing the current stream.
+ */
+static void free_vbs(struct vpe_ctx *ctx)
+{
+ struct vpe_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ if (ctx->src_vbs[2] == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ctx->src_vbs[2]) {
+ v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
+ if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
+ v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
+ ctx->src_vbs[2] = NULL;
+ ctx->src_vbs[1] = NULL;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/*
+ * Enable or disable the VPE clocks
+ */
+static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
+{
+ u32 val = 0;
+
+ if (on)
+ val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
+ write_reg(dev, VPE_CLK_ENABLE, val);
+}
+
+static void vpe_top_reset(struct vpe_dev *dev)
+{
+
+ write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
+ VPE_DATA_PATH_CLK_RESET_SHIFT);
+
+ usleep_range(100, 150);
+
+ write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
+ VPE_DATA_PATH_CLK_RESET_SHIFT);
+}
+
+static void vpe_top_vpdma_reset(struct vpe_dev *dev)
+{
+ write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
+ VPE_VPDMA_CLK_RESET_SHIFT);
+
+ usleep_range(100, 150);
+
+ write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
+ VPE_VPDMA_CLK_RESET_SHIFT);
+}
+
+/*
+ * Load the correct of upsampler coefficients into the shadow MMRs
+ */
+static void set_us_coefficients(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ u32 *us1_reg = &mmr_adb->us1_regs[0];
+ u32 *us2_reg = &mmr_adb->us2_regs[0];
+ u32 *us3_reg = &mmr_adb->us3_regs[0];
+ const unsigned short *cp, *end_cp;
+
+ cp = &us_coeffs[0].anchor_fid0_c0;
+
+ if (s_q_data->flags & Q_IS_INTERLACED) /* interlaced */
+ cp += sizeof(us_coeffs[0]) / sizeof(*cp);
+
+ end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
+
+ while (cp < end_cp) {
+ write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
+ write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
+ *us2_reg++ = *us1_reg;
+ *us3_reg++ = *us1_reg++;
+ }
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
+ */
+static void set_cfg_modes(struct vpe_ctx *ctx)
+{
+ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *us1_reg0 = &mmr_adb->us1_regs[0];
+ u32 *us2_reg0 = &mmr_adb->us2_regs[0];
+ u32 *us3_reg0 = &mmr_adb->us3_regs[0];
+ int cfg_mode = 1;
+
+ /*
+ * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
+ * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
+ */
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ cfg_mode = 0;
+
+ write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+ write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+ write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+
+ ctx->load_mmrs = true;
+}
+
+static void set_line_modes(struct vpe_ctx *ctx)
+{
+ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
+ int line_mode = 1;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ line_mode = 0; /* double lines to line buffer */
+
+ /* regs for now */
+ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
+ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
+ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
+
+ /* frame start for input luma */
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_LUMA1_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_LUMA2_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_LUMA3_IN);
+
+ /* frame start for input chroma */
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_CHROMA1_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_CHROMA2_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_CHROMA3_IN);
+
+ /* frame start for MV in client */
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_MV_IN);
+}
+
+/*
+ * Set the shadow registers that are modified when the source
+ * format changes.
+ */
+static void set_src_registers(struct vpe_ctx *ctx)
+{
+ set_us_coefficients(ctx);
+}
+
+/*
+ * Set the shadow registers that are modified when the destination
+ * format changes.
+ */
+static void set_dst_registers(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
+ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
+ u32 val = 0;
+
+ if (clrspc == V4L2_COLORSPACE_SRGB) {
+ val |= VPE_RGB_OUT_SELECT;
+ vpdma_set_bg_color(ctx->dev->vpdma,
+ (struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
+ } else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
+ val |= VPE_COLOR_SEPARATE_422;
+
+ /*
+ * the source of CHR_DS and CSC is always the scaler, irrespective of
+ * whether it's used or not
+ */
+ val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
+
+ if (fmt->fourcc != V4L2_PIX_FMT_NV12)
+ val |= VPE_DS_BYPASS;
+
+ mmr_adb->out_fmt_reg[0] = val;
+
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the de-interlacer shadow register values
+ */
+static void set_dei_regs(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ unsigned int src_h = s_q_data->c_rect.height;
+ unsigned int src_w = s_q_data->c_rect.width;
+ u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
+ bool deinterlace = true;
+ u32 val = 0;
+
+ /*
+ * according to TRM, we should set DEI in progressive bypass mode when
+ * the input content is progressive, however, DEI is bypassed correctly
+ * for both progressive and interlace content in interlace bypass mode.
+ * It has been recommended not to use progressive bypass mode.
+ */
+ if (!(s_q_data->flags & Q_IS_INTERLACED) || !ctx->deinterlacing) {
+ deinterlace = false;
+ val = VPE_DEI_INTERLACE_BYPASS;
+ }
+
+ src_h = deinterlace ? src_h * 2 : src_h;
+
+ val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
+ (src_w << VPE_DEI_WIDTH_SHIFT) |
+ VPE_DEI_FIELD_FLUSH;
+
+ *dei_mmr0 = val;
+
+ ctx->load_mmrs = true;
+}
+
+static void set_dei_shadow_registers(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *dei_mmr = &mmr_adb->dei_regs[0];
+ const struct vpe_dei_regs *cur = &dei_regs;
+
+ dei_mmr[2] = cur->mdt_spacial_freq_thr_reg;
+ dei_mmr[3] = cur->edi_config_reg;
+ dei_mmr[4] = cur->edi_lut_reg0;
+ dei_mmr[5] = cur->edi_lut_reg1;
+ dei_mmr[6] = cur->edi_lut_reg2;
+ dei_mmr[7] = cur->edi_lut_reg3;
+
+ ctx->load_mmrs = true;
+}
+
+static void config_edi_input_mode(struct vpe_ctx *ctx, int mode)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *edi_config_reg = &mmr_adb->dei_regs[3];
+
+ if (mode & 0x2)
+ write_field(edi_config_reg, 1, 1, 2); /* EDI_ENABLE_3D */
+
+ if (mode & 0x3)
+ write_field(edi_config_reg, 1, 1, 3); /* EDI_CHROMA_3D */
+
+ write_field(edi_config_reg, mode, VPE_EDI_INP_MODE_MASK,
+ VPE_EDI_INP_MODE_SHIFT);
+
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the shadow registers whose values are modified when either the
+ * source or destination format is changed.
+ */
+static int set_srcdst_params(struct vpe_ctx *ctx)
+{
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ unsigned int src_w = s_q_data->c_rect.width;
+ unsigned int src_h = s_q_data->c_rect.height;
+ unsigned int dst_w = d_q_data->c_rect.width;
+ unsigned int dst_h = d_q_data->c_rect.height;
+ size_t mv_buf_size;
+ int ret;
+
+ ctx->sequence = 0;
+ ctx->field = V4L2_FIELD_TOP;
+
+ if ((s_q_data->flags & Q_IS_INTERLACED) &&
+ !(d_q_data->flags & Q_IS_INTERLACED)) {
+ int bytes_per_line;
+ const struct vpdma_data_format *mv =
+ &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+
+ /*
+ * we make sure that the source image has a 16 byte aligned
+ * stride, we need to do the same for the motion vector buffer
+ * by aligning it's stride to the next 16 byte boundry. this
+ * extra space will not be used by the de-interlacer, but will
+ * ensure that vpdma operates correctly
+ */
+ bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
+ mv_buf_size = bytes_per_line * s_q_data->height;
+
+ ctx->deinterlacing = true;
+ src_h <<= 1;
+ } else {
+ ctx->deinterlacing = false;
+ mv_buf_size = 0;
+ }
+
+ free_vbs(ctx);
+ ctx->src_vbs[2] = ctx->src_vbs[1] = ctx->src_vbs[0] = NULL;
+
+ ret = realloc_mv_buffers(ctx, mv_buf_size);
+ if (ret)
+ return ret;
+
+ set_cfg_modes(ctx);
+ set_dei_regs(ctx);
+
+ csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
+ s_q_data->colorspace, d_q_data->colorspace);
+
+ sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
+ sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
+
+ sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
+ &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
+ src_w, src_h, dst_w, dst_h);
+
+ return 0;
+}
+
+/*
+ * Return the vpe_ctx structure for a given struct file
+ */
+static struct vpe_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct vpe_ctx, fh);
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/*
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+
+ /*
+ * This check is needed as this might be called directly from driver
+ * When called by m2m framework, this will always satisfy, but when
+ * called from vpe_irq, this might fail. (src stream with zero buffers)
+ */
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) <= 0 ||
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) <= 0)
+ return 0;
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+static void vpe_dump_regs(struct vpe_dev *dev)
+{
+#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
+
+ vpe_dbg(dev, "VPE Registers:\n");
+
+ DUMPREG(PID);
+ DUMPREG(SYSCONFIG);
+ DUMPREG(INT0_STATUS0_RAW);
+ DUMPREG(INT0_STATUS0);
+ DUMPREG(INT0_ENABLE0);
+ DUMPREG(INT0_STATUS1_RAW);
+ DUMPREG(INT0_STATUS1);
+ DUMPREG(INT0_ENABLE1);
+ DUMPREG(CLK_ENABLE);
+ DUMPREG(CLK_RESET);
+ DUMPREG(CLK_FORMAT_SELECT);
+ DUMPREG(CLK_RANGE_MAP);
+ DUMPREG(US1_R0);
+ DUMPREG(US1_R1);
+ DUMPREG(US1_R2);
+ DUMPREG(US1_R3);
+ DUMPREG(US1_R4);
+ DUMPREG(US1_R5);
+ DUMPREG(US1_R6);
+ DUMPREG(US1_R7);
+ DUMPREG(US2_R0);
+ DUMPREG(US2_R1);
+ DUMPREG(US2_R2);
+ DUMPREG(US2_R3);
+ DUMPREG(US2_R4);
+ DUMPREG(US2_R5);
+ DUMPREG(US2_R6);
+ DUMPREG(US2_R7);
+ DUMPREG(US3_R0);
+ DUMPREG(US3_R1);
+ DUMPREG(US3_R2);
+ DUMPREG(US3_R3);
+ DUMPREG(US3_R4);
+ DUMPREG(US3_R5);
+ DUMPREG(US3_R6);
+ DUMPREG(US3_R7);
+ DUMPREG(DEI_FRAME_SIZE);
+ DUMPREG(MDT_BYPASS);
+ DUMPREG(MDT_SF_THRESHOLD);
+ DUMPREG(EDI_CONFIG);
+ DUMPREG(DEI_EDI_LUT_R0);
+ DUMPREG(DEI_EDI_LUT_R1);
+ DUMPREG(DEI_EDI_LUT_R2);
+ DUMPREG(DEI_EDI_LUT_R3);
+ DUMPREG(DEI_FMD_WINDOW_R0);
+ DUMPREG(DEI_FMD_WINDOW_R1);
+ DUMPREG(DEI_FMD_CONTROL_R0);
+ DUMPREG(DEI_FMD_CONTROL_R1);
+ DUMPREG(DEI_FMD_STATUS_R0);
+ DUMPREG(DEI_FMD_STATUS_R1);
+ DUMPREG(DEI_FMD_STATUS_R2);
+#undef DUMPREG
+
+ sc_dump_regs(dev->sc);
+ csc_dump_regs(dev->csc);
+}
+
+static void add_out_dtd(struct vpe_ctx *ctx, int port)
+{
+ struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
+ const struct vpe_port_data *p_data = &port_data[port];
+ struct vb2_buffer *vb = &ctx->dst_vb->vb2_buf;
+ struct vpe_fmt *fmt = q_data->fmt;
+ const struct vpdma_data_format *vpdma_fmt;
+ int mv_buf_selector = !ctx->src_mv_buf_selector;
+ dma_addr_t dma_addr;
+ u32 flags = 0;
+ u32 offset = 0;
+ u32 stride;
+
+ if (port == VPE_PORT_MV_OUT) {
+ vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ q_data = &ctx->q_data[Q_DATA_SRC];
+ stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
+ } else {
+ /* to incorporate interleaved formats */
+ int plane = fmt->coplanar ? p_data->vb_part : 0;
+
+ vpdma_fmt = fmt->vpdma_fmt[plane];
+ /*
+ * If we are using a single plane buffer and
+ * we need to set a separate vpdma chroma channel.
+ */
+ if (q_data->nplanes == 1 && plane) {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ /* Compute required offset */
+ offset = q_data->bytesperline[0] * q_data->height;
+ } else {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /* Use address as is, no offset */
+ offset = 0;
+ }
+ if (!dma_addr) {
+ vpe_err(ctx->dev,
+ "acquiring output buffer(%d) dma_addr failed\n",
+ port);
+ return;
+ }
+ /* Apply the offset */
+ dma_addr += offset;
+ stride = q_data->bytesperline[VPE_LUMA];
+ }
+
+ if (q_data->flags & Q_DATA_FRAME_1D)
+ flags |= VPDMA_DATA_FRAME_1D;
+ if (q_data->flags & Q_DATA_MODE_TILED)
+ flags |= VPDMA_DATA_MODE_TILED;
+
+ vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
+ MAX_W, MAX_H);
+
+ vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
+ stride, &q_data->c_rect,
+ vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
+ MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
+}
+
+static void add_in_dtd(struct vpe_ctx *ctx, int port)
+{
+ struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
+ const struct vpe_port_data *p_data = &port_data[port];
+ struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpe_fmt *fmt = q_data->fmt;
+ const struct vpdma_data_format *vpdma_fmt;
+ int mv_buf_selector = ctx->src_mv_buf_selector;
+ int field = vbuf->field == V4L2_FIELD_BOTTOM;
+ int frame_width, frame_height;
+ dma_addr_t dma_addr;
+ u32 flags = 0;
+ u32 offset = 0;
+ u32 stride;
+
+ if (port == VPE_PORT_MV_IN) {
+ vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
+ } else {
+ /* to incorporate interleaved formats */
+ int plane = fmt->coplanar ? p_data->vb_part : 0;
+
+ vpdma_fmt = fmt->vpdma_fmt[plane];
+ /*
+ * If we are using a single plane buffer and
+ * we need to set a separate vpdma chroma channel.
+ */
+ if (q_data->nplanes == 1 && plane) {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ /* Compute required offset */
+ offset = q_data->bytesperline[0] * q_data->height;
+ } else {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /* Use address as is, no offset */
+ offset = 0;
+ }
+ if (!dma_addr) {
+ vpe_err(ctx->dev,
+ "acquiring output buffer(%d) dma_addr failed\n",
+ port);
+ return;
+ }
+ /* Apply the offset */
+ dma_addr += offset;
+ stride = q_data->bytesperline[VPE_LUMA];
+
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
+ /*
+ * Use top or bottom field from same vb alternately
+ * f,f-1,f-2 = TBT when seq is even
+ * f,f-1,f-2 = BTB when seq is odd
+ */
+ field = (p_data->vb_index + (ctx->sequence % 2)) % 2;
+
+ if (field) {
+ /*
+ * bottom field of a SEQ_TB buffer
+ * Skip the top field data by
+ */
+ int height = q_data->height / 2;
+ int bpp = fmt->fourcc == V4L2_PIX_FMT_NV12 ?
+ 1 : (vpdma_fmt->depth >> 3);
+ if (plane)
+ height /= 2;
+ dma_addr += q_data->width * height * bpp;
+ }
+ }
+ }
+
+ if (q_data->flags & Q_DATA_FRAME_1D)
+ flags |= VPDMA_DATA_FRAME_1D;
+ if (q_data->flags & Q_DATA_MODE_TILED)
+ flags |= VPDMA_DATA_MODE_TILED;
+
+ frame_width = q_data->c_rect.width;
+ frame_height = q_data->c_rect.height;
+
+ if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
+ frame_height /= 2;
+
+ vpdma_add_in_dtd(&ctx->desc_list, q_data->width, stride,
+ &q_data->c_rect, vpdma_fmt, dma_addr,
+ p_data->channel, field, flags, frame_width,
+ frame_height, 0, 0);
+}
+
+/*
+ * Enable the expected IRQ sources
+ */
+static void enable_irqs(struct vpe_ctx *ctx)
+{
+ write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
+ write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
+ VPE_DS1_UV_ERROR_INT);
+
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, true);
+}
+
+static void disable_irqs(struct vpe_ctx *ctx)
+{
+ write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
+ write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
+
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, false);
+}
+
+/* device_run() - prepares and starts the device
+ *
+ * This function is only called when both the source and destination
+ * buffers are in place.
+ */
+static void device_run(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+ struct sc_data *sc = ctx->dev->sc;
+ struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+
+ if (ctx->deinterlacing && s_q_data->flags & Q_DATA_INTERLACED_SEQ_TB &&
+ ctx->sequence % 2 == 0) {
+ /* When using SEQ_TB buffers, When using it first time,
+ * No need to remove the buffer as the next field is present
+ * in the same buffer. (so that job_ready won't fail)
+ * It will be removed when using bottom field
+ */
+ ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ WARN_ON(ctx->src_vbs[0] == NULL);
+ } else {
+ ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ WARN_ON(ctx->src_vbs[0] == NULL);
+ }
+
+ ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ WARN_ON(ctx->dst_vb == NULL);
+
+ if (ctx->deinterlacing) {
+
+ if (ctx->src_vbs[2] == NULL) {
+ ctx->src_vbs[2] = ctx->src_vbs[0];
+ WARN_ON(ctx->src_vbs[2] == NULL);
+ ctx->src_vbs[1] = ctx->src_vbs[0];
+ WARN_ON(ctx->src_vbs[1] == NULL);
+ }
+
+ /*
+ * we have output the first 2 frames through line average, we
+ * now switch to EDI de-interlacer
+ */
+ if (ctx->sequence == 2)
+ config_edi_input_mode(ctx, 0x3); /* EDI (Y + UV) */
+ }
+
+ /* config descriptors */
+ if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
+ vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
+
+ set_line_modes(ctx);
+
+ ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
+ ctx->load_mmrs = false;
+ }
+
+ if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
+ sc->load_coeff_h) {
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
+ &ctx->sc_coeff_h, 0);
+
+ sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
+ sc->load_coeff_h = false;
+ }
+
+ if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
+ sc->load_coeff_v) {
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
+ vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
+ &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
+
+ sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
+ sc->load_coeff_v = false;
+ }
+
+ /* output data descriptors */
+ if (ctx->deinterlacing)
+ add_out_dtd(ctx, VPE_PORT_MV_OUT);
+
+ if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ add_out_dtd(ctx, VPE_PORT_RGB_OUT);
+ } else {
+ add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
+ }
+
+ /* input data descriptors */
+ if (ctx->deinterlacing) {
+ add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
+ add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
+
+ add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
+ add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
+ }
+
+ add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
+ add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
+
+ if (ctx->deinterlacing)
+ add_in_dtd(ctx, VPE_PORT_MV_IN);
+
+ /* sync on channel control descriptors for input ports */
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
+
+ if (ctx->deinterlacing) {
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_LUMA2_IN);
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_CHROMA2_IN);
+
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_LUMA3_IN);
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_CHROMA3_IN);
+
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
+ }
+
+ /* sync on channel control descriptors for output ports */
+ if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_RGB_OUT);
+ } else {
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_CHROMA_OUT);
+ }
+
+ if (ctx->deinterlacing)
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
+
+ enable_irqs(ctx);
+
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
+ vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list, 0);
+}
+
+static void dei_error(struct vpe_ctx *ctx)
+{
+ dev_warn(ctx->dev->v4l2_dev.dev,
+ "received DEI error interrupt\n");
+}
+
+static void ds1_uv_error(struct vpe_ctx *ctx)
+{
+ dev_warn(ctx->dev->v4l2_dev.dev,
+ "received downsampler error interrupt\n");
+}
+
+static irqreturn_t vpe_irq(int irq_vpe, void *data)
+{
+ struct vpe_dev *dev = (struct vpe_dev *)data;
+ struct vpe_ctx *ctx;
+ struct vpe_q_data *d_q_data;
+ struct vb2_v4l2_buffer *s_vb, *d_vb;
+ unsigned long flags;
+ u32 irqst0, irqst1;
+ bool list_complete = false;
+
+ irqst0 = read_reg(dev, VPE_INT0_STATUS0);
+ if (irqst0) {
+ write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
+ vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
+ }
+
+ irqst1 = read_reg(dev, VPE_INT0_STATUS1);
+ if (irqst1) {
+ write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
+ vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
+ }
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ vpe_err(dev, "instance released before end of transaction\n");
+ goto handled;
+ }
+
+ if (irqst1) {
+ if (irqst1 & VPE_DEI_ERROR_INT) {
+ irqst1 &= ~VPE_DEI_ERROR_INT;
+ dei_error(ctx);
+ }
+ if (irqst1 & VPE_DS1_UV_ERROR_INT) {
+ irqst1 &= ~VPE_DS1_UV_ERROR_INT;
+ ds1_uv_error(ctx);
+ }
+ }
+
+ if (irqst0) {
+ if (irqst0 & VPE_INT0_LIST0_COMPLETE)
+ vpdma_clear_list_stat(ctx->dev->vpdma, 0, 0);
+
+ irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
+ list_complete = true;
+ }
+
+ if (irqst0 | irqst1) {
+ dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
+ irqst0, irqst1);
+ }
+
+ /*
+ * Setup next operation only when list complete IRQ occurs
+ * otherwise, skip the following code
+ */
+ if (!list_complete)
+ goto handled;
+
+ disable_irqs(ctx);
+
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
+
+ vpdma_reset_desc_list(&ctx->desc_list);
+
+ /* the previous dst mv buffer becomes the next src mv buffer */
+ ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
+
+ s_vb = ctx->src_vbs[0];
+ d_vb = ctx->dst_vb;
+
+ d_vb->flags = s_vb->flags;
+ d_vb->vb2_buf.timestamp = s_vb->vb2_buf.timestamp;
+
+ if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ d_vb->timecode = s_vb->timecode;
+
+ d_vb->sequence = ctx->sequence;
+ s_vb->sequence = ctx->sequence;
+
+ d_q_data = &ctx->q_data[Q_DATA_DST];
+ if (d_q_data->flags & Q_IS_INTERLACED) {
+ d_vb->field = ctx->field;
+ if (ctx->field == V4L2_FIELD_BOTTOM) {
+ ctx->sequence++;
+ ctx->field = V4L2_FIELD_TOP;
+ } else {
+ WARN_ON(ctx->field != V4L2_FIELD_TOP);
+ ctx->field = V4L2_FIELD_BOTTOM;
+ }
+ } else {
+ d_vb->field = V4L2_FIELD_NONE;
+ ctx->sequence++;
+ }
+
+ if (ctx->deinterlacing) {
+ /*
+ * Allow source buffer to be dequeued only if it won't be used
+ * in the next iteration. All vbs are initialized to first
+ * buffer and we are shifting buffers every iteration, for the
+ * first two iterations, no buffer will be dequeued.
+ * This ensures that driver will keep (n-2)th (n-1)th and (n)th
+ * field when deinterlacing is enabled
+ */
+ if (ctx->src_vbs[2] != ctx->src_vbs[1])
+ s_vb = ctx->src_vbs[2];
+ else
+ s_vb = NULL;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (s_vb)
+ v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (ctx->deinterlacing) {
+ ctx->src_vbs[2] = ctx->src_vbs[1];
+ ctx->src_vbs[1] = ctx->src_vbs[0];
+ }
+
+ /*
+ * Since the vb2_buf_done has already been called fir therse
+ * buffer we can now NULL them out so that we won't try
+ * to clean out stray pointer later on.
+ */
+ ctx->src_vbs[0] = NULL;
+ ctx->dst_vb = NULL;
+
+ if (ctx->aborting)
+ goto finished;
+
+ ctx->bufs_completed++;
+ if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
+ device_run(ctx);
+ goto handled;
+ }
+
+finished:
+ vpe_dbg(ctx->dev, "finishing transaction\n");
+ ctx->bufs_completed = 0;
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
+handled:
+ return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int vpe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ VPE_MODULE_NAME);
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, index;
+ struct vpe_fmt *fmt = NULL;
+
+ index = 0;
+ for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
+ if (vpe_formats[i].types & type) {
+ if (index == f->index) {
+ fmt = &vpe_formats[i];
+ break;
+ }
+ index++;
+ }
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int vpe_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
+
+ return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
+}
+
+static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vb2_queue *vq;
+ struct vpe_q_data *q_data;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ pix->width = q_data->width;
+ pix->height = q_data->height;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->field = q_data->field;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ pix->colorspace = q_data->colorspace;
+ } else {
+ struct vpe_q_data *s_q_data;
+
+ /* get colorspace from the source queue */
+ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ pix->colorspace = s_q_data->colorspace;
+ }
+
+ pix->num_planes = q_data->nplanes;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
+ struct vpe_fmt *fmt, int type)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ unsigned int w_align;
+ int i, depth, depth_bytes, height;
+ unsigned int stride = 0;
+
+ if (!fmt || !(fmt->types & type)) {
+ vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ pix->pixelformat);
+ fmt = __find_format(V4L2_PIX_FMT_YUYV);
+ }
+
+ if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
+ && pix->field != V4L2_FIELD_SEQ_TB)
+ pix->field = V4L2_FIELD_NONE;
+
+ depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
+
+ /*
+ * the line stride should 16 byte aligned for VPDMA to work, based on
+ * the bytes per pixel, figure out how much the width should be aligned
+ * to make sure line stride is 16 byte aligned
+ */
+ depth_bytes = depth >> 3;
+
+ if (depth_bytes == 3) {
+ /*
+ * if bpp is 3(as in some RGB formats), the pixel width doesn't
+ * really help in ensuring line stride is 16 byte aligned
+ */
+ w_align = 4;
+ } else {
+ /*
+ * for the remainder bpp(4, 2 and 1), the pixel width alignment
+ * can ensure a line stride alignment of 16 bytes. For example,
+ * if bpp is 2, then the line stride can be 16 byte aligned if
+ * the width is 8 byte aligned
+ */
+
+ /*
+ * HACK: using order_base_2() here causes lots of asm output
+ * errors with smatch, on i386:
+ * ./arch/x86/include/asm/bitops.h:457:22:
+ * warning: asm output is not an lvalue
+ * Perhaps some gcc optimization is doing the wrong thing
+ * there.
+ * Let's get rid of them by doing the calculus on two steps
+ */
+ w_align = roundup_pow_of_two(VPDMA_DESC_ALIGN / depth_bytes);
+ w_align = ilog2(w_align);
+ }
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
+ &pix->height, MIN_H, MAX_H, H_ALIGN,
+ S_ALIGN);
+
+ if (!pix->num_planes || pix->num_planes > 2)
+ pix->num_planes = fmt->coplanar ? 2 : 1;
+ else if (pix->num_planes > 1 && !fmt->coplanar)
+ pix->num_planes = 1;
+
+ pix->pixelformat = fmt->fourcc;
+
+ /*
+ * For the actual image parameters, we need to consider the field
+ * height of the image for SEQ_TB buffers.
+ */
+ if (pix->field == V4L2_FIELD_SEQ_TB)
+ height = pix->height / 2;
+ else
+ height = pix->height;
+
+ if (!pix->colorspace) {
+ if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
+ fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
+ fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
+ fmt->fourcc == V4L2_PIX_FMT_BGR32) {
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ } else {
+ if (height > 1280) /* HD */
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ else /* SD */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ }
+ }
+
+ memset(pix->reserved, 0, sizeof(pix->reserved));
+ for (i = 0; i < pix->num_planes; i++) {
+ plane_fmt = &pix->plane_fmt[i];
+ depth = fmt->vpdma_fmt[i]->depth;
+
+ stride = (pix->width * fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
+ if (stride > plane_fmt->bytesperline)
+ plane_fmt->bytesperline = stride;
+
+ plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
+ stride,
+ VPDMA_MAX_STRIDE);
+
+ plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
+ VPDMA_STRIDE_ALIGN);
+
+ if (i == VPE_LUMA) {
+ plane_fmt->sizeimage = pix->height *
+ plane_fmt->bytesperline;
+
+ if (pix->num_planes == 1 && fmt->coplanar)
+ plane_fmt->sizeimage += pix->height *
+ plane_fmt->bytesperline *
+ fmt->vpdma_fmt[VPE_CHROMA]->depth >> 3;
+
+ } else { /* i == VIP_CHROMA */
+ plane_fmt->sizeimage = (pix->height *
+ plane_fmt->bytesperline *
+ depth) >> 3;
+ }
+ memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
+ }
+
+ return 0;
+}
+
+static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_fmt *fmt = find_format(f);
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
+ else
+ return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
+}
+
+static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ struct vpe_q_data *q_data;
+ struct vb2_queue *vq;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ vpe_err(ctx->dev, "queue busy\n");
+ return -EBUSY;
+ }
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ q_data->fmt = find_format(f);
+ q_data->width = pix->width;
+ q_data->height = pix->height;
+ q_data->colorspace = pix->colorspace;
+ q_data->field = pix->field;
+ q_data->nplanes = pix->num_planes;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ plane_fmt = &pix->plane_fmt[i];
+
+ q_data->bytesperline[i] = plane_fmt->bytesperline;
+ q_data->sizeimage[i] = plane_fmt->sizeimage;
+ }
+
+ q_data->c_rect.left = 0;
+ q_data->c_rect.top = 0;
+ q_data->c_rect.width = q_data->width;
+ q_data->c_rect.height = q_data->height;
+
+ if (q_data->field == V4L2_FIELD_ALTERNATE)
+ q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
+ else if (q_data->field == V4L2_FIELD_SEQ_TB)
+ q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
+ else
+ q_data->flags &= ~Q_IS_INTERLACED;
+
+ /* the crop height is halved for the case of SEQ_TB buffers */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ q_data->c_rect.height /= 2;
+
+ vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
+ q_data->bytesperline[VPE_LUMA]);
+ if (q_data->nplanes == 2)
+ vpe_dbg(ctx->dev, " bpl_uv %d\n",
+ q_data->bytesperline[VPE_CHROMA]);
+
+ return 0;
+}
+
+static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ int ret;
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ ret = vpe_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = __vpe_s_fmt(ctx, f);
+ if (ret)
+ return ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ set_src_registers(ctx);
+ else
+ set_dst_registers(ctx);
+
+ return set_srcdst_params(ctx);
+}
+
+static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
+{
+ struct vpe_q_data *q_data;
+ int height;
+
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ /*
+ * COMPOSE target is only valid for capture buffer type, return
+ * error for output buffer type
+ */
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * CROP target is only valid for output buffer type, return
+ * error for capture buffer type
+ */
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ /*
+ * bound and default crop/compose targets are invalid targets to
+ * try/set
+ */
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * For SEQ_TB buffers, crop height should be less than the height of
+ * the field height, not the buffer height
+ */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ height = q_data->height / 2;
+ else
+ height = q_data->height;
+
+ if (s->r.top < 0 || s->r.left < 0) {
+ vpe_err(ctx->dev, "negative values for top and left\n");
+ s->r.top = s->r.left = 0;
+ }
+
+ v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
+ &s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ if (s->r.left + s->r.width > q_data->width)
+ s->r.left = q_data->width - s->r.width;
+ if (s->r.top + s->r.height > q_data->height)
+ s->r.top = q_data->height - s->r.height;
+
+ return 0;
+}
+
+static int vpe_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_q_data *q_data;
+ bool use_c_rect = false;
+
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ use_c_rect = true;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ use_c_rect = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_c_rect) {
+ /*
+ * for CROP/COMPOSE target type, return c_rect params from the
+ * respective buffer type
+ */
+ s->r = q_data->c_rect;
+ } else {
+ /*
+ * for DEFAULT/BOUNDS target type, return width and height from
+ * S_FMT of the respective buffer type
+ */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->width;
+ s->r.height = q_data->height;
+ }
+
+ return 0;
+}
+
+
+static int vpe_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_q_data *q_data;
+ struct v4l2_selection sel = *s;
+ int ret;
+
+ ret = __vpe_try_selection(ctx, &sel);
+ if (ret)
+ return ret;
+
+ q_data = get_q_data(ctx, sel.type);
+ if (!q_data)
+ return -EINVAL;
+
+ if ((q_data->c_rect.left == sel.r.left) &&
+ (q_data->c_rect.top == sel.r.top) &&
+ (q_data->c_rect.width == sel.r.width) &&
+ (q_data->c_rect.height == sel.r.height)) {
+ vpe_dbg(ctx->dev,
+ "requested crop/compose values are already set\n");
+ return 0;
+ }
+
+ q_data->c_rect = sel.r;
+
+ return set_srcdst_params(ctx);
+}
+
+/*
+ * defines number of buffers/frames a context can process with VPE before
+ * switching to a different context. default value is 1 buffer per context
+ */
+#define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
+
+static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpe_ctx *ctx =
+ container_of(ctrl->handler, struct vpe_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VPE_BUFS_PER_JOB:
+ ctx->bufs_per_job = ctrl->val;
+ break;
+
+ default:
+ vpe_err(ctx->dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
+ .s_ctrl = vpe_s_ctrl,
+};
+
+static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
+ .vidioc_querycap = vpe_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
+
+ .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
+
+ .vidioc_g_selection = vpe_g_selection,
+ .vidioc_s_selection = vpe_s_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+static int vpe_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ int i;
+ struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vpe_q_data *q_data;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ *nplanes = q_data->nplanes;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->sizeimage[i];
+
+ vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
+ sizes[VPE_LUMA]);
+ if (q_data->nplanes == 2)
+ vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
+
+ return 0;
+}
+
+static int vpe_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpe_q_data *q_data;
+ int i, num_planes;
+
+ vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ num_planes = q_data->nplanes;
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (!(q_data->flags & Q_IS_INTERLACED)) {
+ vbuf->field = V4L2_FIELD_NONE;
+ } else {
+ if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM &&
+ vbuf->field != V4L2_FIELD_SEQ_TB)
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ vpe_err(ctx->dev,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, i),
+ (long) q_data->sizeimage[i]);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < num_planes; i++)
+ vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+
+ return 0;
+}
+
+static void vpe_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int check_srcdst_sizes(struct vpe_ctx *ctx)
+{
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ unsigned int src_w = s_q_data->c_rect.width;
+ unsigned int src_h = s_q_data->c_rect.height;
+ unsigned int dst_w = d_q_data->c_rect.width;
+ unsigned int dst_h = d_q_data->c_rect.height;
+
+ if (src_w == dst_w && src_h == dst_h)
+ return 0;
+
+ if (src_h <= SC_MAX_PIXEL_HEIGHT &&
+ src_w <= SC_MAX_PIXEL_WIDTH &&
+ dst_h <= SC_MAX_PIXEL_HEIGHT &&
+ dst_w <= SC_MAX_PIXEL_WIDTH)
+ return 0;
+
+ return -1;
+}
+
+static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vb)
+ break;
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+ v4l2_m2m_buf_done(vb, state);
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ }
+
+ /*
+ * Cleanup the in-transit vb2 buffers that have been
+ * removed from their respective queue already but for
+ * which procecessing has not been completed yet.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+
+ if (ctx->src_vbs[2])
+ v4l2_m2m_buf_done(ctx->src_vbs[2], state);
+
+ if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
+ v4l2_m2m_buf_done(ctx->src_vbs[1], state);
+
+ if (ctx->src_vbs[0] &&
+ (ctx->src_vbs[0] != ctx->src_vbs[1]) &&
+ (ctx->src_vbs[0] != ctx->src_vbs[2]))
+ v4l2_m2m_buf_done(ctx->src_vbs[0], state);
+
+ ctx->src_vbs[2] = NULL;
+ ctx->src_vbs[1] = NULL;
+ ctx->src_vbs[0] = NULL;
+
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ } else {
+ if (ctx->dst_vb) {
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+
+ v4l2_m2m_buf_done(ctx->dst_vb, state);
+ ctx->dst_vb = NULL;
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ }
+ }
+}
+
+static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(q);
+
+ /* Check any of the size exceed maximum scaling sizes */
+ if (check_srcdst_sizes(ctx)) {
+ vpe_err(ctx->dev,
+ "Conversion setup failed, check source and destination parameters\n"
+ );
+ vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_QUEUED);
+ return -EINVAL;
+ }
+
+ if (ctx->deinterlacing)
+ config_edi_input_mode(ctx, 0x0);
+
+ if (ctx->sequence != 0)
+ set_srcdst_params(ctx);
+
+ return 0;
+}
+
+static void vpe_stop_streaming(struct vb2_queue *q)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(q);
+
+ vpe_dump_regs(ctx->dev);
+ vpdma_dump_regs(ctx->dev->vpdma);
+
+ vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops vpe_qops = {
+ .queue_setup = vpe_queue_setup,
+ .buf_prepare = vpe_buf_prepare,
+ .buf_queue = vpe_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vpe_start_streaming,
+ .stop_streaming = vpe_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct vpe_ctx *ctx = priv;
+ struct vpe_dev *dev = ctx->dev;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &vpe_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &dev->dev_mutex;
+ src_vq->dev = dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &vpe_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &dev->dev_mutex;
+ dst_vq->dev = dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static const struct v4l2_ctrl_config vpe_bufs_per_job = {
+ .ops = &vpe_ctrl_ops,
+ .id = V4L2_CID_VPE_BUFS_PER_JOB,
+ .name = "Buffers Per Transaction",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = VPE_DEF_BUFS_PER_JOB,
+ .min = 1,
+ .max = VIDEO_MAX_FRAME,
+ .step = 1,
+};
+
+/*
+ * File operations
+ */
+static int vpe_open(struct file *file)
+{
+ struct vpe_dev *dev = video_drvdata(file);
+ struct vpe_q_data *s_q_data;
+ struct v4l2_ctrl_handler *hdl;
+ struct vpe_ctx *ctx;
+ int ret;
+
+ vpe_dbg(dev, "vpe_open\n");
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex)) {
+ ret = -ERESTARTSYS;
+ goto free_ctx;
+ }
+
+ ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
+ VPDMA_LIST_TYPE_NORMAL);
+ if (ret != 0)
+ goto unlock;
+
+ ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
+ if (ret != 0)
+ goto free_desc_list;
+
+ ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
+ if (ret != 0)
+ goto free_mmr_adb;
+
+ ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
+ if (ret != 0)
+ goto free_sc_h;
+
+ init_adb_hdrs(ctx);
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, 1);
+ v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
+ if (hdl->error) {
+ ret = hdl->error;
+ goto exit_fh;
+ }
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ s_q_data = &ctx->q_data[Q_DATA_SRC];
+ s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
+ s_q_data->width = 1920;
+ s_q_data->height = 1080;
+ s_q_data->nplanes = 1;
+ s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
+ s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
+ s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
+ s_q_data->height);
+ s_q_data->colorspace = V4L2_COLORSPACE_REC709;
+ s_q_data->field = V4L2_FIELD_NONE;
+ s_q_data->c_rect.left = 0;
+ s_q_data->c_rect.top = 0;
+ s_q_data->c_rect.width = s_q_data->width;
+ s_q_data->c_rect.height = s_q_data->height;
+ s_q_data->flags = 0;
+
+ ctx->q_data[Q_DATA_DST] = *s_q_data;
+
+ set_dei_shadow_registers(ctx);
+ set_src_registers(ctx);
+ set_dst_registers(ctx);
+ ret = set_srcdst_params(ctx);
+ if (ret)
+ goto exit_fh;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto exit_fh;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ /*
+ * for now, just report the creation of the first instance, we can later
+ * optimize the driver to enable or disable clocks when the first
+ * instance is created or the last instance released
+ */
+ if (atomic_inc_return(&dev->num_instances) == 1)
+ vpe_dbg(dev, "first instance created\n");
+
+ ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
+
+ ctx->load_mmrs = true;
+
+ vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+exit_fh:
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
+ vpdma_free_desc_buf(&ctx->sc_coeff_v);
+free_sc_h:
+ vpdma_free_desc_buf(&ctx->sc_coeff_h);
+free_mmr_adb:
+ vpdma_free_desc_buf(&ctx->mmr_adb);
+free_desc_list:
+ vpdma_free_desc_list(&ctx->desc_list);
+unlock:
+ mutex_unlock(&dev->dev_mutex);
+free_ctx:
+ kfree(ctx);
+ return ret;
+}
+
+static int vpe_release(struct file *file)
+{
+ struct vpe_dev *dev = video_drvdata(file);
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ vpe_dbg(dev, "releasing instance %p\n", ctx);
+
+ mutex_lock(&dev->dev_mutex);
+ free_mv_buffers(ctx);
+
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
+
+ vpdma_free_desc_list(&ctx->desc_list);
+ vpdma_free_desc_buf(&ctx->mmr_adb);
+
+ vpdma_free_desc_buf(&ctx->sc_coeff_v);
+ vpdma_free_desc_buf(&ctx->sc_coeff_h);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ kfree(ctx);
+
+ /*
+ * for now, just report the release of the last instance, we can later
+ * optimize the driver to enable or disable clocks when the first
+ * instance is created or the last instance released
+ */
+ if (atomic_dec_return(&dev->num_instances) == 0)
+ vpe_dbg(dev, "last instance released\n");
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vpe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpe_open,
+ .release = vpe_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device vpe_videodev = {
+ .name = VPE_MODULE_NAME,
+ .fops = &vpe_fops,
+ .ioctl_ops = &vpe_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+};
+
+static int vpe_runtime_get(struct platform_device *pdev)
+{
+ int r;
+
+ dev_dbg(&pdev->dev, "vpe_runtime_get\n");
+
+ r = pm_runtime_get_sync(&pdev->dev);
+ WARN_ON(r < 0);
+ if (r)
+ pm_runtime_put_noidle(&pdev->dev);
+ return r < 0 ? r : 0;
+}
+
+static void vpe_runtime_put(struct platform_device *pdev)
+{
+
+ int r;
+
+ dev_dbg(&pdev->dev, "vpe_runtime_put\n");
+
+ r = pm_runtime_put_sync(&pdev->dev);
+ WARN_ON(r < 0 && r != -ENOSYS);
+}
+
+static void vpe_fw_cb(struct platform_device *pdev)
+{
+ struct vpe_dev *dev = platform_get_drvdata(pdev);
+ struct video_device *vfd;
+ int ret;
+
+ vfd = &dev->vfd;
+ *vfd = vpe_videodev;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ vpe_err(dev, "Failed to register video device\n");
+
+ vpe_set_clock_enable(dev, 0);
+ vpe_runtime_put(pdev);
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(dev->m2m_dev);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return;
+ }
+
+ video_set_drvdata(vfd, dev);
+ dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
+ vfd->num);
+}
+
+static int vpe_probe(struct platform_device *pdev)
+{
+ struct vpe_dev *dev;
+ int ret, irq, func;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ atomic_set(&dev->num_instances, 0);
+ mutex_init(&dev->dev_mutex);
+
+ dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "vpe_top");
+ /*
+ * HACK: we get resource info from device tree in the form of a list of
+ * VPE sub blocks, the driver currently uses only the base of vpe_top
+ * for register access, the driver should be changed later to access
+ * registers based on the sub block base addresses
+ */
+ dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
+ if (!dev->base) {
+ ret = -ENOMEM;
+ goto v4l2_dev_unreg;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
+ dev);
+ if (ret)
+ goto v4l2_dev_unreg;
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ vpe_err(dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto v4l2_dev_unreg;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = vpe_runtime_get(pdev);
+ if (ret)
+ goto rel_m2m;
+
+ /* Perform clk enable followed by reset */
+ vpe_set_clock_enable(dev, 1);
+
+ vpe_top_reset(dev);
+
+ func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
+ VPE_PID_FUNC_SHIFT);
+ vpe_dbg(dev, "VPE PID function %x\n", func);
+
+ vpe_top_vpdma_reset(dev);
+
+ dev->sc = sc_create(pdev, "sc");
+ if (IS_ERR(dev->sc)) {
+ ret = PTR_ERR(dev->sc);
+ goto runtime_put;
+ }
+
+ dev->csc = csc_create(pdev, "csc");
+ if (IS_ERR(dev->csc)) {
+ ret = PTR_ERR(dev->csc);
+ goto runtime_put;
+ }
+
+ dev->vpdma = &dev->vpdma_data;
+ ret = vpdma_create(pdev, dev->vpdma, vpe_fw_cb);
+ if (ret)
+ goto runtime_put;
+
+ return 0;
+
+runtime_put:
+ vpe_runtime_put(pdev);
+rel_m2m:
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(dev->m2m_dev);
+v4l2_dev_unreg:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int vpe_remove(struct platform_device *pdev)
+{
+ struct vpe_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ vpe_set_clock_enable(dev, 0);
+ vpe_runtime_put(pdev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id vpe_of_match[] = {
+ {
+ .compatible = "ti,vpe",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, vpe_of_match);
+#endif
+
+static struct platform_driver vpe_pdrv = {
+ .probe = vpe_probe,
+ .remove = vpe_remove,
+ .driver = {
+ .name = VPE_MODULE_NAME,
+ .of_match_table = of_match_ptr(vpe_of_match),
+ },
+};
+
+module_platform_driver(vpe_pdrv);
+
+MODULE_DESCRIPTION("TI VPE driver");
+MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h
new file mode 100644
index 000000000..74283d79e
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe_regs.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_VPE_REGS_H
+#define __TI_VPE_REGS_H
+
+/* VPE register offsets and field selectors */
+
+/* VPE top level regs */
+#define VPE_PID 0x0000
+#define VPE_PID_MINOR_MASK 0x3f
+#define VPE_PID_MINOR_SHIFT 0
+#define VPE_PID_CUSTOM_MASK 0x03
+#define VPE_PID_CUSTOM_SHIFT 6
+#define VPE_PID_MAJOR_MASK 0x07
+#define VPE_PID_MAJOR_SHIFT 8
+#define VPE_PID_RTL_MASK 0x1f
+#define VPE_PID_RTL_SHIFT 11
+#define VPE_PID_FUNC_MASK 0xfff
+#define VPE_PID_FUNC_SHIFT 16
+#define VPE_PID_SCHEME_MASK 0x03
+#define VPE_PID_SCHEME_SHIFT 30
+
+#define VPE_SYSCONFIG 0x0010
+#define VPE_SYSCONFIG_IDLE_MASK 0x03
+#define VPE_SYSCONFIG_IDLE_SHIFT 2
+#define VPE_SYSCONFIG_STANDBY_MASK 0x03
+#define VPE_SYSCONFIG_STANDBY_SHIFT 4
+#define VPE_FORCE_IDLE_MODE 0
+#define VPE_NO_IDLE_MODE 1
+#define VPE_SMART_IDLE_MODE 2
+#define VPE_SMART_IDLE_WAKEUP_MODE 3
+#define VPE_FORCE_STANDBY_MODE 0
+#define VPE_NO_STANDBY_MODE 1
+#define VPE_SMART_STANDBY_MODE 2
+#define VPE_SMART_STANDBY_WAKEUP_MODE 3
+
+#define VPE_INT0_STATUS0_RAW_SET 0x0020
+#define VPE_INT0_STATUS0_RAW VPE_INT0_STATUS0_RAW_SET
+#define VPE_INT0_STATUS0_CLR 0x0028
+#define VPE_INT0_STATUS0 VPE_INT0_STATUS0_CLR
+#define VPE_INT0_ENABLE0_SET 0x0030
+#define VPE_INT0_ENABLE0 VPE_INT0_ENABLE0_SET
+#define VPE_INT0_ENABLE0_CLR 0x0038
+#define VPE_INT0_LIST0_COMPLETE (1 << 0)
+#define VPE_INT0_LIST0_NOTIFY (1 << 1)
+#define VPE_INT0_LIST1_COMPLETE (1 << 2)
+#define VPE_INT0_LIST1_NOTIFY (1 << 3)
+#define VPE_INT0_LIST2_COMPLETE (1 << 4)
+#define VPE_INT0_LIST2_NOTIFY (1 << 5)
+#define VPE_INT0_LIST3_COMPLETE (1 << 6)
+#define VPE_INT0_LIST3_NOTIFY (1 << 7)
+#define VPE_INT0_LIST4_COMPLETE (1 << 8)
+#define VPE_INT0_LIST4_NOTIFY (1 << 9)
+#define VPE_INT0_LIST5_COMPLETE (1 << 10)
+#define VPE_INT0_LIST5_NOTIFY (1 << 11)
+#define VPE_INT0_LIST6_COMPLETE (1 << 12)
+#define VPE_INT0_LIST6_NOTIFY (1 << 13)
+#define VPE_INT0_LIST7_COMPLETE (1 << 14)
+#define VPE_INT0_LIST7_NOTIFY (1 << 15)
+#define VPE_INT0_DESCRIPTOR (1 << 16)
+#define VPE_DEI_FMD_INT (1 << 18)
+
+#define VPE_INT0_STATUS1_RAW_SET 0x0024
+#define VPE_INT0_STATUS1_RAW VPE_INT0_STATUS1_RAW_SET
+#define VPE_INT0_STATUS1_CLR 0x002c
+#define VPE_INT0_STATUS1 VPE_INT0_STATUS1_CLR
+#define VPE_INT0_ENABLE1_SET 0x0034
+#define VPE_INT0_ENABLE1 VPE_INT0_ENABLE1_SET
+#define VPE_INT0_ENABLE1_CLR 0x003c
+#define VPE_INT0_CHANNEL_GROUP0 (1 << 0)
+#define VPE_INT0_CHANNEL_GROUP1 (1 << 1)
+#define VPE_INT0_CHANNEL_GROUP2 (1 << 2)
+#define VPE_INT0_CHANNEL_GROUP3 (1 << 3)
+#define VPE_INT0_CHANNEL_GROUP4 (1 << 4)
+#define VPE_INT0_CHANNEL_GROUP5 (1 << 5)
+#define VPE_INT0_CLIENT (1 << 7)
+#define VPE_DEI_ERROR_INT (1 << 16)
+#define VPE_DS1_UV_ERROR_INT (1 << 22)
+
+#define VPE_INTC_EOI 0x00a0
+
+#define VPE_CLK_ENABLE 0x0100
+#define VPE_VPEDMA_CLK_ENABLE (1 << 0)
+#define VPE_DATA_PATH_CLK_ENABLE (1 << 1)
+
+#define VPE_CLK_RESET 0x0104
+#define VPE_VPDMA_CLK_RESET_MASK 0x1
+#define VPE_VPDMA_CLK_RESET_SHIFT 0
+#define VPE_DATA_PATH_CLK_RESET_MASK 0x1
+#define VPE_DATA_PATH_CLK_RESET_SHIFT 1
+#define VPE_MAIN_RESET_MASK 0x1
+#define VPE_MAIN_RESET_SHIFT 31
+
+#define VPE_CLK_FORMAT_SELECT 0x010c
+#define VPE_CSC_SRC_SELECT_MASK 0x03
+#define VPE_CSC_SRC_SELECT_SHIFT 0
+#define VPE_RGB_OUT_SELECT (1 << 8)
+#define VPE_DS_SRC_SELECT_MASK 0x07
+#define VPE_DS_SRC_SELECT_SHIFT 9
+#define VPE_DS_BYPASS (1 << 16)
+#define VPE_COLOR_SEPARATE_422 (1 << 18)
+
+#define VPE_DS_SRC_DEI_SCALER (5 << VPE_DS_SRC_SELECT_SHIFT)
+#define VPE_CSC_SRC_DEI_SCALER (3 << VPE_CSC_SRC_SELECT_SHIFT)
+
+#define VPE_CLK_RANGE_MAP 0x011c
+#define VPE_RANGE_RANGE_MAP_Y_MASK 0x07
+#define VPE_RANGE_RANGE_MAP_Y_SHIFT 0
+#define VPE_RANGE_RANGE_MAP_UV_MASK 0x07
+#define VPE_RANGE_RANGE_MAP_UV_SHIFT 3
+#define VPE_RANGE_MAP_ON (1 << 6)
+#define VPE_RANGE_REDUCTION_ON (1 << 28)
+
+/* VPE chrominance upsampler regs */
+#define VPE_US1_R0 0x0304
+#define VPE_US2_R0 0x0404
+#define VPE_US3_R0 0x0504
+#define VPE_US_C1_MASK 0x3fff
+#define VPE_US_C1_SHIFT 2
+#define VPE_US_C0_MASK 0x3fff
+#define VPE_US_C0_SHIFT 18
+#define VPE_US_MODE_MASK 0x03
+#define VPE_US_MODE_SHIFT 16
+#define VPE_ANCHOR_FID0_C1_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C1_SHIFT 2
+#define VPE_ANCHOR_FID0_C0_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C0_SHIFT 18
+
+#define VPE_US1_R1 0x0308
+#define VPE_US2_R1 0x0408
+#define VPE_US3_R1 0x0508
+#define VPE_ANCHOR_FID0_C3_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C3_SHIFT 2
+#define VPE_ANCHOR_FID0_C2_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C2_SHIFT 18
+
+#define VPE_US1_R2 0x030c
+#define VPE_US2_R2 0x040c
+#define VPE_US3_R2 0x050c
+#define VPE_INTERP_FID0_C1_MASK 0x3fff
+#define VPE_INTERP_FID0_C1_SHIFT 2
+#define VPE_INTERP_FID0_C0_MASK 0x3fff
+#define VPE_INTERP_FID0_C0_SHIFT 18
+
+#define VPE_US1_R3 0x0310
+#define VPE_US2_R3 0x0410
+#define VPE_US3_R3 0x0510
+#define VPE_INTERP_FID0_C3_MASK 0x3fff
+#define VPE_INTERP_FID0_C3_SHIFT 2
+#define VPE_INTERP_FID0_C2_MASK 0x3fff
+#define VPE_INTERP_FID0_C2_SHIFT 18
+
+#define VPE_US1_R4 0x0314
+#define VPE_US2_R4 0x0414
+#define VPE_US3_R4 0x0514
+#define VPE_ANCHOR_FID1_C1_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C1_SHIFT 2
+#define VPE_ANCHOR_FID1_C0_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C0_SHIFT 18
+
+#define VPE_US1_R5 0x0318
+#define VPE_US2_R5 0x0418
+#define VPE_US3_R5 0x0518
+#define VPE_ANCHOR_FID1_C3_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C3_SHIFT 2
+#define VPE_ANCHOR_FID1_C2_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C2_SHIFT 18
+
+#define VPE_US1_R6 0x031c
+#define VPE_US2_R6 0x041c
+#define VPE_US3_R6 0x051c
+#define VPE_INTERP_FID1_C1_MASK 0x3fff
+#define VPE_INTERP_FID1_C1_SHIFT 2
+#define VPE_INTERP_FID1_C0_MASK 0x3fff
+#define VPE_INTERP_FID1_C0_SHIFT 18
+
+#define VPE_US1_R7 0x0320
+#define VPE_US2_R7 0x0420
+#define VPE_US3_R7 0x0520
+#define VPE_INTERP_FID0_C3_MASK 0x3fff
+#define VPE_INTERP_FID0_C3_SHIFT 2
+#define VPE_INTERP_FID0_C2_MASK 0x3fff
+#define VPE_INTERP_FID0_C2_SHIFT 18
+
+/* VPE de-interlacer regs */
+#define VPE_DEI_FRAME_SIZE 0x0600
+#define VPE_DEI_WIDTH_MASK 0x07ff
+#define VPE_DEI_WIDTH_SHIFT 0
+#define VPE_DEI_HEIGHT_MASK 0x07ff
+#define VPE_DEI_HEIGHT_SHIFT 16
+#define VPE_DEI_INTERLACE_BYPASS (1 << 29)
+#define VPE_DEI_FIELD_FLUSH (1 << 30)
+#define VPE_DEI_PROGRESSIVE (1 << 31)
+
+#define VPE_MDT_BYPASS 0x0604
+#define VPE_MDT_TEMPMAX_BYPASS (1 << 0)
+#define VPE_MDT_SPATMAX_BYPASS (1 << 1)
+
+#define VPE_MDT_SF_THRESHOLD 0x0608
+#define VPE_MDT_SF_SC_THR1_MASK 0xff
+#define VPE_MDT_SF_SC_THR1_SHIFT 0
+#define VPE_MDT_SF_SC_THR2_MASK 0xff
+#define VPE_MDT_SF_SC_THR2_SHIFT 0
+#define VPE_MDT_SF_SC_THR3_MASK 0xff
+#define VPE_MDT_SF_SC_THR3_SHIFT 0
+
+#define VPE_EDI_CONFIG 0x060c
+#define VPE_EDI_INP_MODE_MASK 0x03
+#define VPE_EDI_INP_MODE_SHIFT 0
+#define VPE_EDI_ENABLE_3D (1 << 2)
+#define VPE_EDI_ENABLE_CHROMA_3D (1 << 3)
+#define VPE_EDI_CHROMA3D_COR_THR_MASK 0xff
+#define VPE_EDI_CHROMA3D_COR_THR_SHIFT 8
+#define VPE_EDI_DIR_COR_LOWER_THR_MASK 0xff
+#define VPE_EDI_DIR_COR_LOWER_THR_SHIFT 16
+#define VPE_EDI_COR_SCALE_FACTOR_MASK 0xff
+#define VPE_EDI_COR_SCALE_FACTOR_SHIFT 23
+
+#define VPE_DEI_EDI_LUT_R0 0x0610
+#define VPE_EDI_LUT0_MASK 0x1f
+#define VPE_EDI_LUT0_SHIFT 0
+#define VPE_EDI_LUT1_MASK 0x1f
+#define VPE_EDI_LUT1_SHIFT 8
+#define VPE_EDI_LUT2_MASK 0x1f
+#define VPE_EDI_LUT2_SHIFT 16
+#define VPE_EDI_LUT3_MASK 0x1f
+#define VPE_EDI_LUT3_SHIFT 24
+
+#define VPE_DEI_EDI_LUT_R1 0x0614
+#define VPE_EDI_LUT0_MASK 0x1f
+#define VPE_EDI_LUT0_SHIFT 0
+#define VPE_EDI_LUT1_MASK 0x1f
+#define VPE_EDI_LUT1_SHIFT 8
+#define VPE_EDI_LUT2_MASK 0x1f
+#define VPE_EDI_LUT2_SHIFT 16
+#define VPE_EDI_LUT3_MASK 0x1f
+#define VPE_EDI_LUT3_SHIFT 24
+
+#define VPE_DEI_EDI_LUT_R2 0x0618
+#define VPE_EDI_LUT4_MASK 0x1f
+#define VPE_EDI_LUT4_SHIFT 0
+#define VPE_EDI_LUT5_MASK 0x1f
+#define VPE_EDI_LUT5_SHIFT 8
+#define VPE_EDI_LUT6_MASK 0x1f
+#define VPE_EDI_LUT6_SHIFT 16
+#define VPE_EDI_LUT7_MASK 0x1f
+#define VPE_EDI_LUT7_SHIFT 24
+
+#define VPE_DEI_EDI_LUT_R3 0x061c
+#define VPE_EDI_LUT8_MASK 0x1f
+#define VPE_EDI_LUT8_SHIFT 0
+#define VPE_EDI_LUT9_MASK 0x1f
+#define VPE_EDI_LUT9_SHIFT 8
+#define VPE_EDI_LUT10_MASK 0x1f
+#define VPE_EDI_LUT10_SHIFT 16
+#define VPE_EDI_LUT11_MASK 0x1f
+#define VPE_EDI_LUT11_SHIFT 24
+
+#define VPE_DEI_FMD_WINDOW_R0 0x0620
+#define VPE_FMD_WINDOW_MINX_MASK 0x07ff
+#define VPE_FMD_WINDOW_MINX_SHIFT 0
+#define VPE_FMD_WINDOW_MAXX_MASK 0x07ff
+#define VPE_FMD_WINDOW_MAXX_SHIFT 16
+#define VPE_FMD_WINDOW_ENABLE (1 << 31)
+
+#define VPE_DEI_FMD_WINDOW_R1 0x0624
+#define VPE_FMD_WINDOW_MINY_MASK 0x07ff
+#define VPE_FMD_WINDOW_MINY_SHIFT 0
+#define VPE_FMD_WINDOW_MAXY_MASK 0x07ff
+#define VPE_FMD_WINDOW_MAXY_SHIFT 16
+
+#define VPE_DEI_FMD_CONTROL_R0 0x0628
+#define VPE_FMD_ENABLE (1 << 0)
+#define VPE_FMD_LOCK (1 << 1)
+#define VPE_FMD_JAM_DIR (1 << 2)
+#define VPE_FMD_BED_ENABLE (1 << 3)
+#define VPE_FMD_CAF_FIELD_THR_MASK 0xff
+#define VPE_FMD_CAF_FIELD_THR_SHIFT 16
+#define VPE_FMD_CAF_LINE_THR_MASK 0xff
+#define VPE_FMD_CAF_LINE_THR_SHIFT 24
+
+#define VPE_DEI_FMD_CONTROL_R1 0x062c
+#define VPE_FMD_CAF_THR_MASK 0x000fffff
+#define VPE_FMD_CAF_THR_SHIFT 0
+
+#define VPE_DEI_FMD_STATUS_R0 0x0630
+#define VPE_FMD_CAF_MASK 0x000fffff
+#define VPE_FMD_CAF_SHIFT 0
+#define VPE_FMD_RESET (1 << 24)
+
+#define VPE_DEI_FMD_STATUS_R1 0x0634
+#define VPE_FMD_FIELD_DIFF_MASK 0x0fffffff
+#define VPE_FMD_FIELD_DIFF_SHIFT 0
+
+#define VPE_DEI_FMD_STATUS_R2 0x0638
+#define VPE_FMD_FRAME_DIFF_MASK 0x000fffff
+#define VPE_FMD_FRAME_DIFF_SHIFT 0
+
+#endif
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
new file mode 100644
index 000000000..c8bb82fe0
--- /dev/null
+++ b/drivers/media/platform/via-camera.c
@@ -0,0 +1,1483 @@
+/*
+ * Driver for the VIA Chrome integrated camera controller.
+ *
+ * Copyright 2009,2010 Jonathan Corbet <corbet@lwn.net>
+ * Distributable under the terms of the GNU General Public License, version 2
+ *
+ * This work was supported by the One Laptop Per Child project
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/i2c/ov7670.h>
+#include <media/videobuf-dma-sg.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_qos.h>
+#include <linux/via-core.h>
+#include <linux/via-gpio.h>
+#include <linux/via_i2c.h>
+
+#ifdef CONFIG_X86
+#include <asm/olpc.h>
+#else
+#define machine_is_olpc(x) 0
+#endif
+
+#include "via-camera.h"
+
+MODULE_ALIAS("platform:viafb-camera");
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_DESCRIPTION("VIA framebuffer-based camera controller driver");
+MODULE_LICENSE("GPL");
+
+static bool flip_image;
+module_param(flip_image, bool, 0444);
+MODULE_PARM_DESC(flip_image,
+ "If set, the sensor will be instructed to flip the image vertically.");
+
+static bool override_serial;
+module_param(override_serial, bool, 0444);
+MODULE_PARM_DESC(override_serial,
+ "The camera driver will normally refuse to load if the XO 1.5 serial port is enabled. Set this option to force-enable the camera.");
+
+/*
+ * The structure describing our camera.
+ */
+enum viacam_opstate { S_IDLE = 0, S_RUNNING = 1 };
+
+struct via_camera {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct video_device vdev;
+ struct v4l2_subdev *sensor;
+ struct platform_device *platdev;
+ struct viafb_dev *viadev;
+ struct mutex lock;
+ enum viacam_opstate opstate;
+ unsigned long flags;
+ struct pm_qos_request qos_request;
+ /*
+ * GPIO info for power/reset management
+ */
+ int power_gpio;
+ int reset_gpio;
+ /*
+ * I/O memory stuff.
+ */
+ void __iomem *mmio; /* Where the registers live */
+ void __iomem *fbmem; /* Frame buffer memory */
+ u32 fb_offset; /* Reserved memory offset (FB) */
+ /*
+ * Capture buffers and related. The controller supports
+ * up to three, so that's what we have here. These buffers
+ * live in frame buffer memory, so we don't call them "DMA".
+ */
+ unsigned int cb_offsets[3]; /* offsets into fb mem */
+ u8 __iomem *cb_addrs[3]; /* Kernel-space addresses */
+ int n_cap_bufs; /* How many are we using? */
+ int next_buf;
+ struct videobuf_queue vb_queue;
+ struct list_head buffer_queue; /* prot. by reg_lock */
+ /*
+ * User tracking.
+ */
+ int users;
+ struct file *owner;
+ /*
+ * Video format information. sensor_format is kept in a form
+ * that we can use to pass to the sensor. We always run the
+ * sensor in VGA resolution, though, and let the controller
+ * downscale things if need be. So we keep the "real*
+ * dimensions separately.
+ */
+ struct v4l2_pix_format sensor_format;
+ struct v4l2_pix_format user_format;
+ u32 mbus_code;
+};
+
+/*
+ * Yes, this is a hack, but there's only going to be one of these
+ * on any system we know of.
+ */
+static struct via_camera *via_cam_info;
+
+/*
+ * Flag values, manipulated with bitops
+ */
+#define CF_DMA_ACTIVE 0 /* A frame is incoming */
+#define CF_CONFIG_NEEDED 1 /* Must configure hardware */
+
+
+/*
+ * Nasty ugly v4l2 boilerplate.
+ */
+#define sensor_call(cam, optype, func, args...) \
+ v4l2_subdev_call(cam->sensor, optype, func, ##args)
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err(&(cam)->platdev->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn(&(cam)->platdev->dev, fmt, ##arg);
+#define cam_dbg(cam, fmt, arg...) \
+ dev_dbg(&(cam)->platdev->dev, fmt, ##arg);
+
+/*
+ * Format handling. This is ripped almost directly from Hans's changes
+ * to cafe_ccic.c. It's a little unfortunate; until this change, we
+ * didn't need to know anything about the format except its byte depth;
+ * now this information must be managed at this level too.
+ */
+static struct via_format {
+ __u8 *desc;
+ __u32 pixelformat;
+ int bpp; /* Bytes per pixel */
+ u32 mbus_code;
+} via_formats[] = {
+ {
+ .desc = "YUYV 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ },
+ /* RGB444 and Bayer should be doable, but have never been
+ tested with this driver. RGB565 seems to work at the default
+ resolution, but results in color corruption when being scaled by
+ viacam_set_scaled(), and is disabled as a result. */
+};
+#define N_VIA_FMTS ARRAY_SIZE(via_formats)
+
+static struct via_format *via_find_format(u32 pixelformat)
+{
+ unsigned i;
+
+ for (i = 0; i < N_VIA_FMTS; i++)
+ if (via_formats[i].pixelformat == pixelformat)
+ return via_formats + i;
+ /* Not found? Then return the first format. */
+ return via_formats;
+}
+
+
+/*--------------------------------------------------------------------------*/
+/*
+ * Sensor power/reset management. This piece is OLPC-specific for
+ * sure; other configurations will have things connected differently.
+ */
+static int via_sensor_power_setup(struct via_camera *cam)
+{
+ int ret;
+
+ cam->power_gpio = viafb_gpio_lookup("VGPIO3");
+ cam->reset_gpio = viafb_gpio_lookup("VGPIO2");
+ if (!gpio_is_valid(cam->power_gpio) || !gpio_is_valid(cam->reset_gpio)) {
+ dev_err(&cam->platdev->dev, "Unable to find GPIO lines\n");
+ return -EINVAL;
+ }
+ ret = gpio_request(cam->power_gpio, "viafb-camera");
+ if (ret) {
+ dev_err(&cam->platdev->dev, "Unable to request power GPIO\n");
+ return ret;
+ }
+ ret = gpio_request(cam->reset_gpio, "viafb-camera");
+ if (ret) {
+ dev_err(&cam->platdev->dev, "Unable to request reset GPIO\n");
+ gpio_free(cam->power_gpio);
+ return ret;
+ }
+ gpio_direction_output(cam->power_gpio, 0);
+ gpio_direction_output(cam->reset_gpio, 0);
+ return 0;
+}
+
+/*
+ * Power up the sensor and perform the reset dance.
+ */
+static void via_sensor_power_up(struct via_camera *cam)
+{
+ gpio_set_value(cam->power_gpio, 1);
+ gpio_set_value(cam->reset_gpio, 0);
+ msleep(20); /* Probably excessive */
+ gpio_set_value(cam->reset_gpio, 1);
+ msleep(20);
+}
+
+static void via_sensor_power_down(struct via_camera *cam)
+{
+ gpio_set_value(cam->power_gpio, 0);
+ gpio_set_value(cam->reset_gpio, 0);
+}
+
+
+static void via_sensor_power_release(struct via_camera *cam)
+{
+ via_sensor_power_down(cam);
+ gpio_free(cam->power_gpio);
+ gpio_free(cam->reset_gpio);
+}
+
+/* --------------------------------------------------------------------------*/
+/* Sensor ops */
+
+/*
+ * Manage the ov7670 "flip" bit, which needs special help.
+ */
+static int viacam_set_flip(struct via_camera *cam)
+{
+ struct v4l2_control ctrl;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_VFLIP;
+ ctrl.value = flip_image;
+ return v4l2_s_ctrl(NULL, cam->sensor->ctrl_handler, &ctrl);
+}
+
+/*
+ * Configure the sensor. It's up to the caller to ensure
+ * that the camera is in the correct operating state.
+ */
+static int viacam_configure_sensor(struct via_camera *cam)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ v4l2_fill_mbus_format(&format.format, &cam->sensor_format, cam->mbus_code);
+ ret = sensor_call(cam, core, init, 0);
+ if (ret == 0)
+ ret = sensor_call(cam, pad, set_fmt, NULL, &format);
+ /*
+ * OV7670 does weird things if flip is set *before* format...
+ */
+ if (ret == 0)
+ ret = viacam_set_flip(cam);
+ return ret;
+}
+
+
+
+/* --------------------------------------------------------------------------*/
+/*
+ * Some simple register accessors; they assume that the lock is held.
+ *
+ * Should we want to support the second capture engine, we could
+ * hide the register difference by adding 0x1000 to registers in the
+ * 0x300-350 range.
+ */
+static inline void viacam_write_reg(struct via_camera *cam,
+ int reg, int value)
+{
+ iowrite32(value, cam->mmio + reg);
+}
+
+static inline int viacam_read_reg(struct via_camera *cam, int reg)
+{
+ return ioread32(cam->mmio + reg);
+}
+
+static inline void viacam_write_reg_mask(struct via_camera *cam,
+ int reg, int value, int mask)
+{
+ int tmp = viacam_read_reg(cam, reg);
+
+ tmp = (tmp & ~mask) | (value & mask);
+ viacam_write_reg(cam, reg, tmp);
+}
+
+
+/* --------------------------------------------------------------------------*/
+/* Interrupt management and handling */
+
+static irqreturn_t viacam_quick_irq(int irq, void *data)
+{
+ struct via_camera *cam = data;
+ irqreturn_t ret = IRQ_NONE;
+ int icv;
+
+ /*
+ * All we do here is to clear the interrupts and tell
+ * the handler thread to wake up.
+ */
+ spin_lock(&cam->viadev->reg_lock);
+ icv = viacam_read_reg(cam, VCR_INTCTRL);
+ if (icv & VCR_IC_EAV) {
+ icv |= VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL;
+ viacam_write_reg(cam, VCR_INTCTRL, icv);
+ ret = IRQ_WAKE_THREAD;
+ }
+ spin_unlock(&cam->viadev->reg_lock);
+ return ret;
+}
+
+/*
+ * Find the next videobuf buffer which has somebody waiting on it.
+ */
+static struct videobuf_buffer *viacam_next_buffer(struct via_camera *cam)
+{
+ unsigned long flags;
+ struct videobuf_buffer *buf = NULL;
+
+ spin_lock_irqsave(&cam->viadev->reg_lock, flags);
+ if (cam->opstate != S_RUNNING)
+ goto out;
+ if (list_empty(&cam->buffer_queue))
+ goto out;
+ buf = list_entry(cam->buffer_queue.next, struct videobuf_buffer, queue);
+ if (!waitqueue_active(&buf->done)) {/* Nobody waiting */
+ buf = NULL;
+ goto out;
+ }
+ list_del(&buf->queue);
+ buf->state = VIDEOBUF_ACTIVE;
+out:
+ spin_unlock_irqrestore(&cam->viadev->reg_lock, flags);
+ return buf;
+}
+
+/*
+ * The threaded IRQ handler.
+ */
+static irqreturn_t viacam_irq(int irq, void *data)
+{
+ int bufn;
+ struct videobuf_buffer *vb;
+ struct via_camera *cam = data;
+ struct videobuf_dmabuf *vdma;
+
+ /*
+ * If there is no place to put the data frame, don't bother
+ * with anything else.
+ */
+ vb = viacam_next_buffer(cam);
+ if (vb == NULL)
+ goto done;
+ /*
+ * Figure out which buffer we just completed.
+ */
+ bufn = (viacam_read_reg(cam, VCR_INTCTRL) & VCR_IC_ACTBUF) >> 3;
+ bufn -= 1;
+ if (bufn < 0)
+ bufn = cam->n_cap_bufs - 1;
+ /*
+ * Copy over the data and let any waiters know.
+ */
+ vdma = videobuf_to_dma(vb);
+ viafb_dma_copy_out_sg(cam->cb_offsets[bufn], vdma->sglist, vdma->sglen);
+ vb->state = VIDEOBUF_DONE;
+ vb->size = cam->user_format.sizeimage;
+ wake_up(&vb->done);
+done:
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * These functions must mess around with the general interrupt
+ * control register, which is relevant to much more than just the
+ * camera. Nothing else uses interrupts, though, as of this writing.
+ * Should that situation change, we'll have to improve support at
+ * the via-core level.
+ */
+static void viacam_int_enable(struct via_camera *cam)
+{
+ viacam_write_reg(cam, VCR_INTCTRL,
+ VCR_IC_INTEN|VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL);
+ viafb_irq_enable(VDE_I_C0AVEN);
+}
+
+static void viacam_int_disable(struct via_camera *cam)
+{
+ viafb_irq_disable(VDE_I_C0AVEN);
+ viacam_write_reg(cam, VCR_INTCTRL, 0);
+}
+
+
+
+/* --------------------------------------------------------------------------*/
+/* Controller operations */
+
+/*
+ * Set up our capture buffers in framebuffer memory.
+ */
+static int viacam_ctlr_cbufs(struct via_camera *cam)
+{
+ int nbuf = cam->viadev->camera_fbmem_size/cam->sensor_format.sizeimage;
+ int i;
+ unsigned int offset;
+
+ /*
+ * See how many buffers we can work with.
+ */
+ if (nbuf >= 3) {
+ cam->n_cap_bufs = 3;
+ viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_3BUFS,
+ VCR_CI_3BUFS);
+ } else if (nbuf == 2) {
+ cam->n_cap_bufs = 2;
+ viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_3BUFS);
+ } else {
+ cam_warn(cam, "Insufficient frame buffer memory\n");
+ return -ENOMEM;
+ }
+ /*
+ * Set them up.
+ */
+ offset = cam->fb_offset;
+ for (i = 0; i < cam->n_cap_bufs; i++) {
+ cam->cb_offsets[i] = offset;
+ cam->cb_addrs[i] = cam->fbmem + offset;
+ viacam_write_reg(cam, VCR_VBUF1 + i*4, offset & VCR_VBUF_MASK);
+ offset += cam->sensor_format.sizeimage;
+ }
+ return 0;
+}
+
+/*
+ * Set the scaling register for downscaling the image.
+ *
+ * This register works like this... Vertical scaling is enabled
+ * by bit 26; if that bit is set, downscaling is controlled by the
+ * value in bits 16:25. Those bits are divided by 1024 to get
+ * the scaling factor; setting just bit 25 thus cuts the height
+ * in half.
+ *
+ * Horizontal scaling works about the same, but it's enabled by
+ * bit 11, with bits 0:10 giving the numerator of a fraction
+ * (over 2048) for the scaling value.
+ *
+ * This function is naive in that, if the user departs from
+ * the 3x4 VGA scaling factor, the image will distort. We
+ * could work around that if it really seemed important.
+ */
+static void viacam_set_scale(struct via_camera *cam)
+{
+ unsigned int avscale;
+ int sf;
+
+ if (cam->user_format.width == VGA_WIDTH)
+ avscale = 0;
+ else {
+ sf = (cam->user_format.width*2048)/VGA_WIDTH;
+ avscale = VCR_AVS_HEN | sf;
+ }
+ if (cam->user_format.height < VGA_HEIGHT) {
+ sf = (1024*cam->user_format.height)/VGA_HEIGHT;
+ avscale |= VCR_AVS_VEN | (sf << 16);
+ }
+ viacam_write_reg(cam, VCR_AVSCALE, avscale);
+}
+
+
+/*
+ * Configure image-related information into the capture engine.
+ */
+static void viacam_ctlr_image(struct via_camera *cam)
+{
+ int cicreg;
+
+ /*
+ * Disable clock before messing with stuff - from the via
+ * sample driver.
+ */
+ viacam_write_reg(cam, VCR_CAPINTC, ~(VCR_CI_ENABLE|VCR_CI_CLKEN));
+ /*
+ * Set up the controller for VGA resolution, modulo magic
+ * offsets from the via sample driver.
+ */
+ viacam_write_reg(cam, VCR_HORRANGE, 0x06200120);
+ viacam_write_reg(cam, VCR_VERTRANGE, 0x01de0000);
+ viacam_set_scale(cam);
+ /*
+ * Image size info.
+ */
+ viacam_write_reg(cam, VCR_MAXDATA,
+ (cam->sensor_format.height << 16) |
+ (cam->sensor_format.bytesperline >> 3));
+ viacam_write_reg(cam, VCR_MAXVBI, 0);
+ viacam_write_reg(cam, VCR_VSTRIDE,
+ cam->user_format.bytesperline & VCR_VS_STRIDE);
+ /*
+ * Set up the capture interface control register,
+ * everything but the "go" bit.
+ *
+ * The FIFO threshold is a bit of a magic number; 8 is what
+ * VIA's sample code uses.
+ */
+ cicreg = VCR_CI_CLKEN |
+ 0x08000000 | /* FIFO threshold */
+ VCR_CI_FLDINV | /* OLPC-specific? */
+ VCR_CI_VREFINV | /* OLPC-specific? */
+ VCR_CI_DIBOTH | /* Capture both fields */
+ VCR_CI_CCIR601_8;
+ if (cam->n_cap_bufs == 3)
+ cicreg |= VCR_CI_3BUFS;
+ /*
+ * YUV formats need different byte swapping than RGB.
+ */
+ if (cam->user_format.pixelformat == V4L2_PIX_FMT_YUYV)
+ cicreg |= VCR_CI_YUYV;
+ else
+ cicreg |= VCR_CI_UYVY;
+ viacam_write_reg(cam, VCR_CAPINTC, cicreg);
+}
+
+
+static int viacam_config_controller(struct via_camera *cam)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->viadev->reg_lock, flags);
+ ret = viacam_ctlr_cbufs(cam);
+ if (!ret)
+ viacam_ctlr_image(cam);
+ spin_unlock_irqrestore(&cam->viadev->reg_lock, flags);
+ clear_bit(CF_CONFIG_NEEDED, &cam->flags);
+ return ret;
+}
+
+/*
+ * Make it start grabbing data.
+ */
+static void viacam_start_engine(struct via_camera *cam)
+{
+ spin_lock_irq(&cam->viadev->reg_lock);
+ cam->next_buf = 0;
+ viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_ENABLE, VCR_CI_ENABLE);
+ viacam_int_enable(cam);
+ (void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */
+ cam->opstate = S_RUNNING;
+ spin_unlock_irq(&cam->viadev->reg_lock);
+}
+
+
+static void viacam_stop_engine(struct via_camera *cam)
+{
+ spin_lock_irq(&cam->viadev->reg_lock);
+ viacam_int_disable(cam);
+ viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_ENABLE);
+ (void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */
+ cam->opstate = S_IDLE;
+ spin_unlock_irq(&cam->viadev->reg_lock);
+}
+
+
+/* --------------------------------------------------------------------------*/
+/* Videobuf callback ops */
+
+/*
+ * buffer_setup. The purpose of this one would appear to be to tell
+ * videobuf how big a single image is. It's also evidently up to us
+ * to put some sort of limit on the maximum number of buffers allowed.
+ */
+static int viacam_vb_buf_setup(struct videobuf_queue *q,
+ unsigned int *count, unsigned int *size)
+{
+ struct via_camera *cam = q->priv_data;
+
+ *size = cam->user_format.sizeimage;
+ if (*count == 0 || *count > 6) /* Arbitrary number */
+ *count = 6;
+ return 0;
+}
+
+/*
+ * Prepare a buffer.
+ */
+static int viacam_vb_buf_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct via_camera *cam = q->priv_data;
+
+ vb->size = cam->user_format.sizeimage;
+ vb->width = cam->user_format.width; /* bytesperline???? */
+ vb->height = cam->user_format.height;
+ vb->field = field;
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ int ret = videobuf_iolock(q, vb, NULL);
+ if (ret)
+ return ret;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ return 0;
+}
+
+/*
+ * We've got a buffer to put data into.
+ *
+ * FIXME: check for a running engine and valid buffers?
+ */
+static void viacam_vb_buf_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct via_camera *cam = q->priv_data;
+
+ /*
+ * Note that videobuf holds the lock when it calls
+ * us, so we need not (indeed, cannot) take it here.
+ */
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &cam->buffer_queue);
+}
+
+/*
+ * Free a buffer.
+ */
+static void viacam_vb_buf_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct via_camera *cam = q->priv_data;
+
+ videobuf_dma_unmap(&cam->platdev->dev, videobuf_to_dma(vb));
+ videobuf_dma_free(videobuf_to_dma(vb));
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static const struct videobuf_queue_ops viacam_vb_ops = {
+ .buf_setup = viacam_vb_buf_setup,
+ .buf_prepare = viacam_vb_buf_prepare,
+ .buf_queue = viacam_vb_buf_queue,
+ .buf_release = viacam_vb_buf_release,
+};
+
+/* --------------------------------------------------------------------------*/
+/* File operations */
+
+static int viacam_open(struct file *filp)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ filp->private_data = cam;
+ /*
+ * Note the new user. If this is the first one, we'll also
+ * need to power up the sensor.
+ */
+ mutex_lock(&cam->lock);
+ if (cam->users == 0) {
+ int ret = viafb_request_dma();
+
+ if (ret) {
+ mutex_unlock(&cam->lock);
+ return ret;
+ }
+ via_sensor_power_up(cam);
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ /*
+ * Hook into videobuf. Evidently this cannot fail.
+ */
+ videobuf_queue_sg_init(&cam->vb_queue, &viacam_vb_ops,
+ &cam->platdev->dev, &cam->viadev->reg_lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), cam, NULL);
+ }
+ (cam->users)++;
+ mutex_unlock(&cam->lock);
+ return 0;
+}
+
+static int viacam_release(struct file *filp)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ mutex_lock(&cam->lock);
+ (cam->users)--;
+ /*
+ * If the "owner" is closing, shut down any ongoing
+ * operations.
+ */
+ if (filp == cam->owner) {
+ videobuf_stop(&cam->vb_queue);
+ /*
+ * We don't hold the spinlock here, but, if release()
+ * is being called by the owner, nobody else will
+ * be changing the state. And an extra stop would
+ * not hurt anyway.
+ */
+ if (cam->opstate != S_IDLE)
+ viacam_stop_engine(cam);
+ cam->owner = NULL;
+ }
+ /*
+ * Last one out needs to turn out the lights.
+ */
+ if (cam->users == 0) {
+ videobuf_mmap_free(&cam->vb_queue);
+ via_sensor_power_down(cam);
+ viafb_release_dma();
+ }
+ mutex_unlock(&cam->lock);
+ return 0;
+}
+
+/*
+ * Read a frame from the device.
+ */
+static ssize_t viacam_read(struct file *filp, char __user *buffer,
+ size_t len, loff_t *pos)
+{
+ struct via_camera *cam = video_drvdata(filp);
+ int ret;
+
+ mutex_lock(&cam->lock);
+ /*
+ * Enforce the V4l2 "only one owner gets to read data" rule.
+ */
+ if (cam->owner && cam->owner != filp) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ cam->owner = filp;
+ /*
+ * Do we need to configure the hardware?
+ */
+ if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
+ ret = viacam_configure_sensor(cam);
+ if (!ret)
+ ret = viacam_config_controller(cam);
+ if (ret)
+ goto out_unlock;
+ }
+ /*
+ * Fire up the capture engine, then have videobuf do
+ * the heavy lifting. Someday it would be good to avoid
+ * stopping and restarting the engine each time.
+ */
+ INIT_LIST_HEAD(&cam->buffer_queue);
+ viacam_start_engine(cam);
+ ret = videobuf_read_stream(&cam->vb_queue, buffer, len, pos, 0,
+ filp->f_flags & O_NONBLOCK);
+ viacam_stop_engine(cam);
+ /* videobuf_stop() ?? */
+
+out_unlock:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static __poll_t viacam_poll(struct file *filp, struct poll_table_struct *pt)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ return videobuf_poll_stream(filp, &cam->vb_queue, pt);
+}
+
+
+static int viacam_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ return videobuf_mmap_mapper(&cam->vb_queue, vma);
+}
+
+
+
+static const struct v4l2_file_operations viacam_fops = {
+ .owner = THIS_MODULE,
+ .open = viacam_open,
+ .release = viacam_release,
+ .read = viacam_read,
+ .poll = viacam_poll,
+ .mmap = viacam_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+/*----------------------------------------------------------------------------*/
+/*
+ * The long list of v4l2 ioctl ops
+ */
+
+/*
+ * Only one input.
+ */
+static int viacam_enum_input(struct file *filp, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index != 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->std = V4L2_STD_ALL; /* Not sure what should go here */
+ strcpy(input->name, "Camera");
+ return 0;
+}
+
+static int viacam_g_input(struct file *filp, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int viacam_s_input(struct file *filp, void *priv, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int viacam_s_std(struct file *filp, void *priv, v4l2_std_id std)
+{
+ return 0;
+}
+
+static int viacam_g_std(struct file *filp, void *priv, v4l2_std_id *std)
+{
+ *std = V4L2_STD_NTSC_M;
+ return 0;
+}
+
+/*
+ * Video format stuff. Here is our default format until
+ * user space messes with things.
+ */
+static const struct v4l2_pix_format viacam_def_pix_format = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = VGA_WIDTH * 2,
+ .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
+};
+
+static const u32 via_def_mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+
+static int viacam_enum_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index >= N_VIA_FMTS)
+ return -EINVAL;
+ strlcpy(fmt->description, via_formats[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = via_formats[fmt->index].pixelformat;
+ return 0;
+}
+
+/*
+ * Figure out proper image dimensions, but always force the
+ * sensor to VGA.
+ */
+static void viacam_fmt_pre(struct v4l2_pix_format *userfmt,
+ struct v4l2_pix_format *sensorfmt)
+{
+ *sensorfmt = *userfmt;
+ if (userfmt->width < QCIF_WIDTH || userfmt->height < QCIF_HEIGHT) {
+ userfmt->width = QCIF_WIDTH;
+ userfmt->height = QCIF_HEIGHT;
+ }
+ if (userfmt->width > VGA_WIDTH || userfmt->height > VGA_HEIGHT) {
+ userfmt->width = VGA_WIDTH;
+ userfmt->height = VGA_HEIGHT;
+ }
+ sensorfmt->width = VGA_WIDTH;
+ sensorfmt->height = VGA_HEIGHT;
+}
+
+static void viacam_fmt_post(struct v4l2_pix_format *userfmt,
+ struct v4l2_pix_format *sensorfmt)
+{
+ struct via_format *f = via_find_format(userfmt->pixelformat);
+
+ sensorfmt->bytesperline = sensorfmt->width * f->bpp;
+ sensorfmt->sizeimage = sensorfmt->height * sensorfmt->bytesperline;
+ userfmt->pixelformat = sensorfmt->pixelformat;
+ userfmt->field = sensorfmt->field;
+ userfmt->bytesperline = 2 * userfmt->width;
+ userfmt->sizeimage = userfmt->bytesperline * userfmt->height;
+}
+
+
+/*
+ * The real work of figuring out a workable format.
+ */
+static int viacam_do_try_fmt(struct via_camera *cam,
+ struct v4l2_pix_format *upix, struct v4l2_pix_format *spix)
+{
+ int ret;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ struct via_format *f = via_find_format(upix->pixelformat);
+
+ upix->pixelformat = f->pixelformat;
+ viacam_fmt_pre(upix, spix);
+ v4l2_fill_mbus_format(&format.format, spix, f->mbus_code);
+ ret = sensor_call(cam, pad, set_fmt, &pad_cfg, &format);
+ v4l2_fill_pix_format(spix, &format.format);
+ viacam_fmt_post(upix, spix);
+ return ret;
+}
+
+
+
+static int viacam_try_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct via_camera *cam = priv;
+ struct v4l2_format sfmt;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static int viacam_g_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct via_camera *cam = priv;
+
+ mutex_lock(&cam->lock);
+ fmt->fmt.pix = cam->user_format;
+ mutex_unlock(&cam->lock);
+ return 0;
+}
+
+static int viacam_s_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct via_camera *cam = priv;
+ int ret;
+ struct v4l2_format sfmt;
+ struct via_format *f = via_find_format(fmt->fmt.pix.pixelformat);
+
+ /*
+ * Camera must be idle or we can't mess with the
+ * video setup.
+ */
+ mutex_lock(&cam->lock);
+ if (cam->opstate != S_IDLE) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * Let the sensor code look over and tweak the
+ * requested formatting.
+ */
+ ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
+ if (ret)
+ goto out;
+ /*
+ * OK, let's commit to the new format.
+ */
+ cam->user_format = fmt->fmt.pix;
+ cam->sensor_format = sfmt.fmt.pix;
+ cam->mbus_code = f->mbus_code;
+ ret = viacam_configure_sensor(cam);
+ if (!ret)
+ ret = viacam_config_controller(cam);
+out:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+static int viacam_querycap(struct file *filp, void *priv,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "via-camera");
+ strcpy(cap->card, "via-camera");
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+/*
+ * Streaming operations - pure videobuf stuff.
+ */
+static int viacam_reqbufs(struct file *filp, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_reqbufs(&cam->vb_queue, rb);
+}
+
+static int viacam_querybuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_querybuf(&cam->vb_queue, buf);
+}
+
+static int viacam_qbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_qbuf(&cam->vb_queue, buf);
+}
+
+static int viacam_dqbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
+}
+
+static int viacam_streamon(struct file *filp, void *priv, enum v4l2_buf_type t)
+{
+ struct via_camera *cam = priv;
+ int ret = 0;
+
+ if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ mutex_lock(&cam->lock);
+ if (cam->opstate != S_IDLE) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * Enforce the V4l2 "only one owner gets to read data" rule.
+ */
+ if (cam->owner && cam->owner != filp) {
+ ret = -EBUSY;
+ goto out;
+ }
+ cam->owner = filp;
+ /*
+ * Configure things if need be.
+ */
+ if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
+ ret = viacam_configure_sensor(cam);
+ if (ret)
+ goto out;
+ ret = viacam_config_controller(cam);
+ if (ret)
+ goto out;
+ }
+ /*
+ * If the CPU goes into C3, the DMA transfer gets corrupted and
+ * users start filing unsightly bug reports. Put in a "latency"
+ * requirement which will keep the CPU out of the deeper sleep
+ * states.
+ */
+ pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50);
+ /*
+ * Fire things up.
+ */
+ INIT_LIST_HEAD(&cam->buffer_queue);
+ ret = videobuf_streamon(&cam->vb_queue);
+ if (!ret)
+ viacam_start_engine(cam);
+out:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+static int viacam_streamoff(struct file *filp, void *priv, enum v4l2_buf_type t)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ mutex_lock(&cam->lock);
+ if (cam->opstate != S_RUNNING) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pm_qos_remove_request(&cam->qos_request);
+ viacam_stop_engine(cam);
+ /*
+ * Videobuf will recycle all of the outstanding buffers, but
+ * we should be sure we don't retain any references to
+ * any of them.
+ */
+ ret = videobuf_streamoff(&cam->vb_queue);
+ INIT_LIST_HEAD(&cam->buffer_queue);
+out:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+/* G/S_PARM */
+
+static int viacam_g_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = v4l2_g_parm_cap(video_devdata(filp), cam->sensor, parm);
+ mutex_unlock(&cam->lock);
+ parm->parm.capture.readbuffers = cam->n_cap_bufs;
+ return ret;
+}
+
+static int viacam_s_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = v4l2_s_parm_cap(video_devdata(filp), cam->sensor, parm);
+ mutex_unlock(&cam->lock);
+ parm->parm.capture.readbuffers = cam->n_cap_bufs;
+ return ret;
+}
+
+static int viacam_enum_framesizes(struct file *filp, void *priv,
+ struct v4l2_frmsizeenum *sizes)
+{
+ if (sizes->index != 0)
+ return -EINVAL;
+ sizes->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ sizes->stepwise.min_width = QCIF_WIDTH;
+ sizes->stepwise.min_height = QCIF_HEIGHT;
+ sizes->stepwise.max_width = VGA_WIDTH;
+ sizes->stepwise.max_height = VGA_HEIGHT;
+ sizes->stepwise.step_width = sizes->stepwise.step_height = 1;
+ return 0;
+}
+
+static int viacam_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *interval)
+{
+ struct via_camera *cam = priv;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = interval->index,
+ .code = cam->mbus_code,
+ .width = cam->sensor_format.width,
+ .height = cam->sensor_format.height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, pad, enum_frame_interval, NULL, &fie);
+ mutex_unlock(&cam->lock);
+ if (ret)
+ return ret;
+ interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ interval->discrete = fie.interval;
+ return 0;
+}
+
+
+
+static const struct v4l2_ioctl_ops viacam_ioctl_ops = {
+ .vidioc_enum_input = viacam_enum_input,
+ .vidioc_g_input = viacam_g_input,
+ .vidioc_s_input = viacam_s_input,
+ .vidioc_s_std = viacam_s_std,
+ .vidioc_g_std = viacam_g_std,
+ .vidioc_enum_fmt_vid_cap = viacam_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = viacam_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = viacam_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = viacam_s_fmt_vid_cap,
+ .vidioc_querycap = viacam_querycap,
+ .vidioc_reqbufs = viacam_reqbufs,
+ .vidioc_querybuf = viacam_querybuf,
+ .vidioc_qbuf = viacam_qbuf,
+ .vidioc_dqbuf = viacam_dqbuf,
+ .vidioc_streamon = viacam_streamon,
+ .vidioc_streamoff = viacam_streamoff,
+ .vidioc_g_parm = viacam_g_parm,
+ .vidioc_s_parm = viacam_s_parm,
+ .vidioc_enum_framesizes = viacam_enum_framesizes,
+ .vidioc_enum_frameintervals = viacam_enum_frameintervals,
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Power management.
+ */
+#ifdef CONFIG_PM
+
+static int viacam_suspend(void *priv)
+{
+ struct via_camera *cam = priv;
+ enum viacam_opstate state = cam->opstate;
+
+ if (cam->opstate != S_IDLE) {
+ viacam_stop_engine(cam);
+ cam->opstate = state; /* So resume restarts */
+ }
+
+ return 0;
+}
+
+static int viacam_resume(void *priv)
+{
+ struct via_camera *cam = priv;
+ int ret = 0;
+
+ /*
+ * Get back to a reasonable operating state.
+ */
+ via_write_reg_mask(VIASR, 0x78, 0, 0x80);
+ via_write_reg_mask(VIASR, 0x1e, 0xc0, 0xc0);
+ viacam_int_disable(cam);
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ /*
+ * Make sure the sensor's power state is correct
+ */
+ if (cam->users > 0)
+ via_sensor_power_up(cam);
+ else
+ via_sensor_power_down(cam);
+ /*
+ * If it was operating, try to restart it.
+ */
+ if (cam->opstate != S_IDLE) {
+ mutex_lock(&cam->lock);
+ ret = viacam_configure_sensor(cam);
+ if (!ret)
+ ret = viacam_config_controller(cam);
+ mutex_unlock(&cam->lock);
+ if (!ret)
+ viacam_start_engine(cam);
+ }
+
+ return ret;
+}
+
+static struct viafb_pm_hooks viacam_pm_hooks = {
+ .suspend = viacam_suspend,
+ .resume = viacam_resume
+};
+
+#endif /* CONFIG_PM */
+
+/*
+ * Setup stuff.
+ */
+
+static const struct video_device viacam_v4l_template = {
+ .name = "via-camera",
+ .minor = -1,
+ .tvnorms = V4L2_STD_NTSC_M,
+ .fops = &viacam_fops,
+ .ioctl_ops = &viacam_ioctl_ops,
+ .release = video_device_release_empty, /* Check this */
+};
+
+/*
+ * The OLPC folks put the serial port on the same pin as
+ * the camera. They also get grumpy if we break the
+ * serial port and keep them from using it. So we have
+ * to check the serial enable bit and not step on it.
+ */
+#define VIACAM_SERIAL_DEVFN 0x88
+#define VIACAM_SERIAL_CREG 0x46
+#define VIACAM_SERIAL_BIT 0x40
+
+static bool viacam_serial_is_enabled(void)
+{
+ struct pci_bus *pbus = pci_find_bus(0, 0);
+ u8 cbyte;
+
+ if (!pbus)
+ return false;
+ pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
+ VIACAM_SERIAL_CREG, &cbyte);
+ if ((cbyte & VIACAM_SERIAL_BIT) == 0)
+ return false; /* Not enabled */
+ if (!override_serial) {
+ printk(KERN_NOTICE "Via camera: serial port is enabled, " \
+ "refusing to load.\n");
+ printk(KERN_NOTICE "Specify override_serial=1 to force " \
+ "module loading.\n");
+ return true;
+ }
+ printk(KERN_NOTICE "Via camera: overriding serial port\n");
+ pci_bus_write_config_byte(pbus, VIACAM_SERIAL_DEVFN,
+ VIACAM_SERIAL_CREG, cbyte & ~VIACAM_SERIAL_BIT);
+ return false;
+}
+
+static struct ov7670_config sensor_cfg = {
+ /* The XO-1.5 (only known user) clocks the camera at 90MHz. */
+ .clock_speed = 90,
+};
+
+static int viacam_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct i2c_adapter *sensor_adapter;
+ struct viafb_dev *viadev = pdev->dev.platform_data;
+ struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42 >> 1,
+ .platform_data = &sensor_cfg,
+ };
+
+ /*
+ * Note that there are actually two capture channels on
+ * the device. We only deal with one for now. That
+ * is encoded here; nothing else assumes it's dealing with
+ * a unique capture device.
+ */
+ struct via_camera *cam;
+
+ /*
+ * Ensure that frame buffer memory has been set aside for
+ * this purpose. As an arbitrary limit, refuse to work
+ * with less than two frames of VGA 16-bit data.
+ *
+ * If we ever support the second port, we'll need to set
+ * aside more memory.
+ */
+ if (viadev->camera_fbmem_size < (VGA_HEIGHT*VGA_WIDTH*4)) {
+ printk(KERN_ERR "viacam: insufficient FB memory reserved\n");
+ return -ENOMEM;
+ }
+ if (viadev->engine_mmio == NULL) {
+ printk(KERN_ERR "viacam: No I/O memory, so no pictures\n");
+ return -ENOMEM;
+ }
+
+ if (machine_is_olpc() && viacam_serial_is_enabled())
+ return -EBUSY;
+
+ /*
+ * Basic structure initialization.
+ */
+ cam = kzalloc (sizeof(struct via_camera), GFP_KERNEL);
+ if (cam == NULL)
+ return -ENOMEM;
+ via_cam_info = cam;
+ cam->platdev = pdev;
+ cam->viadev = viadev;
+ cam->users = 0;
+ cam->owner = NULL;
+ cam->opstate = S_IDLE;
+ cam->user_format = cam->sensor_format = viacam_def_pix_format;
+ mutex_init(&cam->lock);
+ INIT_LIST_HEAD(&cam->buffer_queue);
+ cam->mmio = viadev->engine_mmio;
+ cam->fbmem = viadev->fbmem;
+ cam->fb_offset = viadev->camera_fbmem_offset;
+ cam->flags = 1 << CF_CONFIG_NEEDED;
+ cam->mbus_code = via_def_mbus_code;
+ /*
+ * Tell V4L that we exist.
+ */
+ ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register v4l2 device\n");
+ goto out_free;
+ }
+ ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
+ if (ret)
+ goto out_unregister;
+ cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
+ /*
+ * Convince the system that we can do DMA.
+ */
+ pdev->dev.dma_mask = &viadev->pdev->dma_mask;
+ dma_set_mask(&pdev->dev, 0xffffffff);
+ /*
+ * Fire up the capture port. The write to 0x78 looks purely
+ * OLPCish; any system will need to tweak 0x1e.
+ */
+ via_write_reg_mask(VIASR, 0x78, 0, 0x80);
+ via_write_reg_mask(VIASR, 0x1e, 0xc0, 0xc0);
+ /*
+ * Get the sensor powered up.
+ */
+ ret = via_sensor_power_setup(cam);
+ if (ret)
+ goto out_ctrl_hdl_free;
+ via_sensor_power_up(cam);
+
+ /*
+ * See if we can't find it on the bus. The VIA_PORT_31 assumption
+ * is OLPC-specific. 0x42 assumption is ov7670-specific.
+ */
+ sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31);
+ cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, sensor_adapter,
+ &ov7670_info, NULL);
+ if (cam->sensor == NULL) {
+ dev_err(&pdev->dev, "Unable to find the sensor!\n");
+ ret = -ENODEV;
+ goto out_power_down;
+ }
+ /*
+ * Get the IRQ.
+ */
+ viacam_int_disable(cam);
+ ret = request_threaded_irq(viadev->pdev->irq, viacam_quick_irq,
+ viacam_irq, IRQF_SHARED, "via-camera", cam);
+ if (ret)
+ goto out_power_down;
+ /*
+ * Tell V4l2 that we exist.
+ */
+ cam->vdev = viacam_v4l_template;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto out_irq;
+ video_set_drvdata(&cam->vdev, cam);
+
+#ifdef CONFIG_PM
+ /*
+ * Hook into PM events
+ */
+ viacam_pm_hooks.private = cam;
+ viafb_pm_register(&viacam_pm_hooks);
+#endif
+
+ /* Power the sensor down until somebody opens the device */
+ via_sensor_power_down(cam);
+ return 0;
+
+out_irq:
+ free_irq(viadev->pdev->irq, cam);
+out_power_down:
+ via_sensor_power_release(cam);
+out_ctrl_hdl_free:
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+out_unregister:
+ v4l2_device_unregister(&cam->v4l2_dev);
+out_free:
+ kfree(cam);
+ return ret;
+}
+
+static int viacam_remove(struct platform_device *pdev)
+{
+ struct via_camera *cam = via_cam_info;
+ struct viafb_dev *viadev = pdev->dev.platform_data;
+
+ video_unregister_device(&cam->vdev);
+ v4l2_device_unregister(&cam->v4l2_dev);
+ free_irq(viadev->pdev->irq, cam);
+ via_sensor_power_release(cam);
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ kfree(cam);
+ via_cam_info = NULL;
+ return 0;
+}
+
+static struct platform_driver viacam_driver = {
+ .driver = {
+ .name = "viafb-camera",
+ },
+ .probe = viacam_probe,
+ .remove = viacam_remove,
+};
+
+module_platform_driver(viacam_driver);
diff --git a/drivers/media/platform/via-camera.h b/drivers/media/platform/via-camera.h
new file mode 100644
index 000000000..54f16318b
--- /dev/null
+++ b/drivers/media/platform/via-camera.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VIA Camera register definitions.
+ */
+#define VCR_INTCTRL 0x300 /* Capture interrupt control */
+#define VCR_IC_EAV 0x0001 /* End of active video status */
+#define VCR_IC_EVBI 0x0002 /* End of VBI status */
+#define VCR_IC_FBOTFLD 0x0004 /* "flipping" Bottom field is active */
+#define VCR_IC_ACTBUF 0x0018 /* Active video buffer */
+#define VCR_IC_VSYNC 0x0020 /* 0 = VB, 1 = active video */
+#define VCR_IC_BOTFLD 0x0040 /* Bottom field is active */
+#define VCR_IC_FFULL 0x0080 /* FIFO full */
+#define VCR_IC_INTEN 0x0100 /* End of active video int. enable */
+#define VCR_IC_VBIINT 0x0200 /* End of VBI int enable */
+#define VCR_IC_VBIBUF 0x0400 /* Current VBI buffer */
+
+#define VCR_TSC 0x308 /* Transport stream control */
+#define VCR_TSC_ENABLE 0x000001 /* Transport stream input enable */
+#define VCR_TSC_DROPERR 0x000002 /* Drop error packets */
+#define VCR_TSC_METHOD 0x00000c /* DMA method (non-functional) */
+#define VCR_TSC_COUNT 0x07fff0 /* KByte or packet count */
+#define VCR_TSC_CBMODE 0x080000 /* Change buffer by byte count */
+#define VCR_TSC_PSSIG 0x100000 /* Packet starting signal disable */
+#define VCR_TSC_BE 0x200000 /* MSB first (serial mode) */
+#define VCR_TSC_SERIAL 0x400000 /* Serial input (0 = parallel) */
+
+#define VCR_CAPINTC 0x310 /* Capture interface control */
+#define VCR_CI_ENABLE 0x00000001 /* Capture enable */
+#define VCR_CI_BSS 0x00000002 /* WTF "bit stream selection" */
+#define VCR_CI_3BUFS 0x00000004 /* 1 = 3 buffers, 0 = 2 buffers */
+#define VCR_CI_VIPEN 0x00000008 /* VIP enable */
+#define VCR_CI_CCIR601_8 0 /* CCIR601 input stream, 8 bit */
+#define VCR_CI_CCIR656_8 0x00000010 /* ... CCIR656, 8 bit */
+#define VCR_CI_CCIR601_16 0x00000020 /* ... CCIR601, 16 bit */
+#define VCR_CI_CCIR656_16 0x00000030 /* ... CCIR656, 16 bit */
+#define VCR_CI_HDMODE 0x00000040 /* CCIR656-16 hdr decode mode; 1=16b */
+#define VCR_CI_BSWAP 0x00000080 /* Swap bytes (16-bit) */
+#define VCR_CI_YUYV 0 /* Byte order 0123 */
+#define VCR_CI_UYVY 0x00000100 /* Byte order 1032 */
+#define VCR_CI_YVYU 0x00000200 /* Byte order 0321 */
+#define VCR_CI_VYUY 0x00000300 /* Byte order 3012 */
+#define VCR_CI_VIPTYPE 0x00000400 /* VIP type */
+#define VCR_CI_IFSEN 0x00000800 /* Input field signal enable */
+#define VCR_CI_DIODD 0 /* De-interlace odd, 30fps */
+#define VCR_CI_DIEVEN 0x00001000 /* ...even field, 30fps */
+#define VCR_CI_DIBOTH 0x00002000 /* ...both fields, 60fps */
+#define VCR_CI_DIBOTH30 0x00003000 /* ...both fields, 30fps interlace */
+#define VCR_CI_CONVTYPE 0x00004000 /* 4:2:2 to 4:4:4; 1 = interpolate */
+#define VCR_CI_CFC 0x00008000 /* Capture flipping control */
+#define VCR_CI_FILTER 0x00070000 /* Horiz filter mode select
+ 000 = none
+ 001 = 2 tap
+ 010 = 3 tap
+ 011 = 4 tap
+ 100 = 5 tap */
+#define VCR_CI_CLKINV 0x00080000 /* Input CLK inverted */
+#define VCR_CI_VREFINV 0x00100000 /* VREF inverted */
+#define VCR_CI_HREFINV 0x00200000 /* HREF inverted */
+#define VCR_CI_FLDINV 0x00400000 /* Field inverted */
+#define VCR_CI_CLKPIN 0x00800000 /* Capture clock pin */
+#define VCR_CI_THRESH 0x0f000000 /* Capture fifo threshold */
+#define VCR_CI_HRLE 0x10000000 /* Positive edge of HREF */
+#define VCR_CI_VRLE 0x20000000 /* Positive edge of VREF */
+#define VCR_CI_OFLDINV 0x40000000 /* Field output inverted */
+#define VCR_CI_CLKEN 0x80000000 /* Capture clock enable */
+
+#define VCR_HORRANGE 0x314 /* Active video horizontal range */
+#define VCR_VERTRANGE 0x318 /* Active video vertical range */
+#define VCR_AVSCALE 0x31c /* Active video scaling control */
+#define VCR_AVS_HEN 0x00000800 /* Horizontal scale enable */
+#define VCR_AVS_VEN 0x04000000 /* Vertical enable */
+#define VCR_VBIHOR 0x320 /* VBI Data horizontal range */
+#define VCR_VBIVERT 0x324 /* VBI data vertical range */
+#define VCR_VBIBUF1 0x328 /* First VBI buffer */
+#define VCR_VBISTRIDE 0x32c /* VBI stride */
+#define VCR_ANCDATACNT 0x330 /* Ancillary data count setting */
+#define VCR_MAXDATA 0x334 /* Active data count of active video */
+#define VCR_MAXVBI 0x338 /* Maximum data count of VBI */
+#define VCR_CAPDATA 0x33c /* Capture data count */
+#define VCR_VBUF1 0x340 /* First video buffer */
+#define VCR_VBUF2 0x344 /* Second video buffer */
+#define VCR_VBUF3 0x348 /* Third video buffer */
+#define VCR_VBUF_MASK 0x1ffffff0 /* Bits 28:4 */
+#define VCR_VBIBUF2 0x34c /* Second VBI buffer */
+#define VCR_VSTRIDE 0x350 /* Stride of video + coring control */
+#define VCR_VS_STRIDE_SHIFT 4
+#define VCR_VS_STRIDE 0x00001ff0 /* Stride (8-byte units) */
+#define VCR_VS_CCD 0x007f0000 /* Coring compare data */
+#define VCR_VS_COREEN 0x00800000 /* Coring enable */
+#define VCR_TS0ERR 0x354 /* TS buffer 0 error indicator */
+#define VCR_TS1ERR 0x358 /* TS buffer 0 error indicator */
+#define VCR_TS2ERR 0x35c /* TS buffer 0 error indicator */
+
+/* Add 0x1000 for the second capture engine registers */
diff --git a/drivers/media/platform/vicodec/Kconfig b/drivers/media/platform/vicodec/Kconfig
new file mode 100644
index 000000000..2503bcb15
--- /dev/null
+++ b/drivers/media/platform/vicodec/Kconfig
@@ -0,0 +1,13 @@
+config VIDEO_VICODEC
+ tristate "Virtual Codec Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ default n
+ help
+ Driver for a Virtual Codec
+
+ This driver can be compared to the vim2m driver for emulating
+ a video device node that exposes an emulated hardware codec.
+
+ When in doubt, say N.
diff --git a/drivers/media/platform/vicodec/Makefile b/drivers/media/platform/vicodec/Makefile
new file mode 100644
index 000000000..197229428
--- /dev/null
+++ b/drivers/media/platform/vicodec/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+vicodec-objs := vicodec-core.o vicodec-codec.o
+
+obj-$(CONFIG_VIDEO_VICODEC) += vicodec.o
diff --git a/drivers/media/platform/vicodec/vicodec-codec.c b/drivers/media/platform/vicodec/vicodec-codec.c
new file mode 100644
index 000000000..d854b2344
--- /dev/null
+++ b/drivers/media/platform/vicodec/vicodec-codec.c
@@ -0,0 +1,803 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2016 Tom aan de Wiel
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * 8x8 Fast Walsh Hadamard Transform in sequency order based on the paper:
+ *
+ * A Recursive Algorithm for Sequency-Ordered Fast Walsh Transforms,
+ * R.D. Brown, 1977
+ */
+
+#include <linux/string.h>
+#include "vicodec-codec.h"
+
+#define ALL_ZEROS 15
+#define DEADZONE_WIDTH 20
+
+static const uint8_t zigzag[64] = {
+ 0,
+ 1, 8,
+ 2, 9, 16,
+ 3, 10, 17, 24,
+ 4, 11, 18, 25, 32,
+ 5, 12, 19, 26, 33, 40,
+ 6, 13, 20, 27, 34, 41, 48,
+ 7, 14, 21, 28, 35, 42, 49, 56,
+ 15, 22, 29, 36, 43, 50, 57,
+ 23, 30, 37, 44, 51, 58,
+ 31, 38, 45, 52, 59,
+ 39, 46, 53, 60,
+ 47, 54, 61,
+ 55, 62,
+ 63,
+};
+
+
+static int rlc(const s16 *in, __be16 *output, int blocktype)
+{
+ s16 block[8 * 8];
+ s16 *wp = block;
+ int i = 0;
+ int x, y;
+ int ret = 0;
+
+ /* read in block from framebuffer */
+ int lastzero_run = 0;
+ int to_encode;
+
+ for (y = 0; y < 8; y++) {
+ for (x = 0; x < 8; x++) {
+ *wp = in[x + y * 8];
+ wp++;
+ }
+ }
+
+ /* keep track of amount of trailing zeros */
+ for (i = 63; i >= 0 && !block[zigzag[i]]; i--)
+ lastzero_run++;
+
+ *output++ = (blocktype == PBLOCK ? htons(PFRAME_BIT) : 0);
+ ret++;
+
+ to_encode = 8 * 8 - (lastzero_run > 14 ? lastzero_run : 0);
+
+ i = 0;
+ while (i < to_encode) {
+ int cnt = 0;
+ int tmp;
+
+ /* count leading zeros */
+ while ((tmp = block[zigzag[i]]) == 0 && cnt < 14) {
+ cnt++;
+ i++;
+ if (i == to_encode) {
+ cnt--;
+ break;
+ }
+ }
+ /* 4 bits for run, 12 for coefficient (quantization by 4) */
+ *output++ = htons((cnt | tmp << 4));
+ i++;
+ ret++;
+ }
+ if (lastzero_run > 14) {
+ *output = htons(ALL_ZEROS | 0);
+ ret++;
+ }
+
+ return ret;
+}
+
+/*
+ * This function will worst-case increase rlc_in by 65*2 bytes:
+ * one s16 value for the header and 8 * 8 coefficients of type s16.
+ */
+static s16 derlc(const __be16 **rlc_in, s16 *dwht_out)
+{
+ /* header */
+ const __be16 *input = *rlc_in;
+ s16 ret = ntohs(*input++);
+ int dec_count = 0;
+ s16 block[8 * 8 + 16];
+ s16 *wp = block;
+ int i;
+
+ /*
+ * Now de-compress, it expands one byte to up to 15 bytes
+ * (or fills the remainder of the 64 bytes with zeroes if it
+ * is the last byte to expand).
+ *
+ * So block has to be 8 * 8 + 16 bytes, the '+ 16' is to
+ * allow for overflow if the incoming data was malformed.
+ */
+ while (dec_count < 8 * 8) {
+ s16 in = ntohs(*input++);
+ int length = in & 0xf;
+ int coeff = in >> 4;
+
+ /* fill remainder with zeros */
+ if (length == 15) {
+ for (i = 0; i < 64 - dec_count; i++)
+ *wp++ = 0;
+ break;
+ }
+
+ for (i = 0; i < length; i++)
+ *wp++ = 0;
+ *wp++ = coeff;
+ dec_count += length + 1;
+ }
+
+ wp = block;
+
+ for (i = 0; i < 64; i++) {
+ int pos = zigzag[i];
+ int y = pos / 8;
+ int x = pos % 8;
+
+ dwht_out[x + y * 8] = *wp++;
+ }
+ *rlc_in = input;
+ return ret;
+}
+
+static const int quant_table[] = {
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 2, 2, 2, 2, 2, 2, 3, 6,
+ 2, 2, 2, 2, 2, 3, 6, 6,
+ 2, 2, 2, 2, 3, 6, 6, 6,
+ 2, 2, 2, 3, 6, 6, 6, 6,
+ 2, 2, 3, 6, 6, 6, 6, 8,
+};
+
+static const int quant_table_p[] = {
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 6,
+ 3, 3, 3, 3, 3, 3, 6, 6,
+ 3, 3, 3, 3, 3, 6, 6, 9,
+ 3, 3, 3, 3, 6, 6, 9, 9,
+ 3, 3, 3, 6, 6, 9, 9, 10,
+};
+
+static void quantize_intra(s16 *coeff, s16 *de_coeff)
+{
+ const int *quant = quant_table;
+ int i, j;
+
+ for (j = 0; j < 8; j++) {
+ for (i = 0; i < 8; i++, quant++, coeff++, de_coeff++) {
+ *coeff >>= *quant;
+ if (*coeff >= -DEADZONE_WIDTH &&
+ *coeff <= DEADZONE_WIDTH)
+ *coeff = *de_coeff = 0;
+ else
+ *de_coeff = *coeff << *quant;
+ }
+ }
+}
+
+static void dequantize_intra(s16 *coeff)
+{
+ const int *quant = quant_table;
+ int i, j;
+
+ for (j = 0; j < 8; j++)
+ for (i = 0; i < 8; i++, quant++, coeff++)
+ *coeff <<= *quant;
+}
+
+static void quantize_inter(s16 *coeff, s16 *de_coeff)
+{
+ const int *quant = quant_table_p;
+ int i, j;
+
+ for (j = 0; j < 8; j++) {
+ for (i = 0; i < 8; i++, quant++, coeff++, de_coeff++) {
+ *coeff >>= *quant;
+ if (*coeff >= -DEADZONE_WIDTH &&
+ *coeff <= DEADZONE_WIDTH)
+ *coeff = *de_coeff = 0;
+ else
+ *de_coeff = *coeff << *quant;
+ }
+ }
+}
+
+static void dequantize_inter(s16 *coeff)
+{
+ const int *quant = quant_table_p;
+ int i, j;
+
+ for (j = 0; j < 8; j++)
+ for (i = 0; i < 8; i++, quant++, coeff++)
+ *coeff <<= *quant;
+}
+
+static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
+ unsigned int input_step, bool intra)
+{
+ /* we'll need more than 8 bits for the transformed coefficients */
+ s32 workspace1[8], workspace2[8];
+ const u8 *tmp = block;
+ s16 *out = output_block;
+ int add = intra ? 256 : 0;
+ unsigned int i;
+
+ /* stage 1 */
+ stride *= input_step;
+
+ for (i = 0; i < 8; i++, tmp += stride, out += 8) {
+ if (input_step == 1) {
+ workspace1[0] = tmp[0] + tmp[1] - add;
+ workspace1[1] = tmp[0] - tmp[1];
+
+ workspace1[2] = tmp[2] + tmp[3] - add;
+ workspace1[3] = tmp[2] - tmp[3];
+
+ workspace1[4] = tmp[4] + tmp[5] - add;
+ workspace1[5] = tmp[4] - tmp[5];
+
+ workspace1[6] = tmp[6] + tmp[7] - add;
+ workspace1[7] = tmp[6] - tmp[7];
+ } else {
+ workspace1[0] = tmp[0] + tmp[2] - add;
+ workspace1[1] = tmp[0] - tmp[2];
+
+ workspace1[2] = tmp[4] + tmp[6] - add;
+ workspace1[3] = tmp[4] - tmp[6];
+
+ workspace1[4] = tmp[8] + tmp[10] - add;
+ workspace1[5] = tmp[8] - tmp[10];
+
+ workspace1[6] = tmp[12] + tmp[14] - add;
+ workspace1[7] = tmp[12] - tmp[14];
+ }
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0] = workspace2[0] + workspace2[4];
+ out[1] = workspace2[0] - workspace2[4];
+ out[2] = workspace2[1] - workspace2[5];
+ out[3] = workspace2[1] + workspace2[5];
+ out[4] = workspace2[2] + workspace2[6];
+ out[5] = workspace2[2] - workspace2[6];
+ out[6] = workspace2[3] - workspace2[7];
+ out[7] = workspace2[3] + workspace2[7];
+ }
+
+ out = output_block;
+
+ for (i = 0; i < 8; i++, out++) {
+ /* stage 1 */
+ workspace1[0] = out[0] + out[1 * 8];
+ workspace1[1] = out[0] - out[1 * 8];
+
+ workspace1[2] = out[2 * 8] + out[3 * 8];
+ workspace1[3] = out[2 * 8] - out[3 * 8];
+
+ workspace1[4] = out[4 * 8] + out[5 * 8];
+ workspace1[5] = out[4 * 8] - out[5 * 8];
+
+ workspace1[6] = out[6 * 8] + out[7 * 8];
+ workspace1[7] = out[6 * 8] - out[7 * 8];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+ /* stage 3 */
+ out[0 * 8] = workspace2[0] + workspace2[4];
+ out[1 * 8] = workspace2[0] - workspace2[4];
+ out[2 * 8] = workspace2[1] - workspace2[5];
+ out[3 * 8] = workspace2[1] + workspace2[5];
+ out[4 * 8] = workspace2[2] + workspace2[6];
+ out[5 * 8] = workspace2[2] - workspace2[6];
+ out[6 * 8] = workspace2[3] - workspace2[7];
+ out[7 * 8] = workspace2[3] + workspace2[7];
+ }
+}
+
+/*
+ * Not the nicest way of doing it, but P-blocks get twice the range of
+ * that of the I-blocks. Therefore we need a type bigger than 8 bits.
+ * Furthermore values can be negative... This is just a version that
+ * works with 16 signed data
+ */
+static void fwht16(const s16 *block, s16 *output_block, int stride, int intra)
+{
+ /* we'll need more than 8 bits for the transformed coefficients */
+ s32 workspace1[8], workspace2[8];
+ const s16 *tmp = block;
+ s16 *out = output_block;
+ int i;
+
+ for (i = 0; i < 8; i++, tmp += stride, out += 8) {
+ /* stage 1 */
+ workspace1[0] = tmp[0] + tmp[1];
+ workspace1[1] = tmp[0] - tmp[1];
+
+ workspace1[2] = tmp[2] + tmp[3];
+ workspace1[3] = tmp[2] - tmp[3];
+
+ workspace1[4] = tmp[4] + tmp[5];
+ workspace1[5] = tmp[4] - tmp[5];
+
+ workspace1[6] = tmp[6] + tmp[7];
+ workspace1[7] = tmp[6] - tmp[7];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0] = workspace2[0] + workspace2[4];
+ out[1] = workspace2[0] - workspace2[4];
+ out[2] = workspace2[1] - workspace2[5];
+ out[3] = workspace2[1] + workspace2[5];
+ out[4] = workspace2[2] + workspace2[6];
+ out[5] = workspace2[2] - workspace2[6];
+ out[6] = workspace2[3] - workspace2[7];
+ out[7] = workspace2[3] + workspace2[7];
+ }
+
+ out = output_block;
+
+ for (i = 0; i < 8; i++, out++) {
+ /* stage 1 */
+ workspace1[0] = out[0] + out[1*8];
+ workspace1[1] = out[0] - out[1*8];
+
+ workspace1[2] = out[2*8] + out[3*8];
+ workspace1[3] = out[2*8] - out[3*8];
+
+ workspace1[4] = out[4*8] + out[5*8];
+ workspace1[5] = out[4*8] - out[5*8];
+
+ workspace1[6] = out[6*8] + out[7*8];
+ workspace1[7] = out[6*8] - out[7*8];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0*8] = workspace2[0] + workspace2[4];
+ out[1*8] = workspace2[0] - workspace2[4];
+ out[2*8] = workspace2[1] - workspace2[5];
+ out[3*8] = workspace2[1] + workspace2[5];
+ out[4*8] = workspace2[2] + workspace2[6];
+ out[5*8] = workspace2[2] - workspace2[6];
+ out[6*8] = workspace2[3] - workspace2[7];
+ out[7*8] = workspace2[3] + workspace2[7];
+ }
+}
+
+static void ifwht(const s16 *block, s16 *output_block, int intra)
+{
+ /*
+ * we'll need more than 8 bits for the transformed coefficients
+ * use native unit of cpu
+ */
+ int workspace1[8], workspace2[8];
+ int inter = intra ? 0 : 1;
+ const s16 *tmp = block;
+ s16 *out = output_block;
+ int i;
+
+ for (i = 0; i < 8; i++, tmp += 8, out += 8) {
+ /* stage 1 */
+ workspace1[0] = tmp[0] + tmp[1];
+ workspace1[1] = tmp[0] - tmp[1];
+
+ workspace1[2] = tmp[2] + tmp[3];
+ workspace1[3] = tmp[2] - tmp[3];
+
+ workspace1[4] = tmp[4] + tmp[5];
+ workspace1[5] = tmp[4] - tmp[5];
+
+ workspace1[6] = tmp[6] + tmp[7];
+ workspace1[7] = tmp[6] - tmp[7];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0] = workspace2[0] + workspace2[4];
+ out[1] = workspace2[0] - workspace2[4];
+ out[2] = workspace2[1] - workspace2[5];
+ out[3] = workspace2[1] + workspace2[5];
+ out[4] = workspace2[2] + workspace2[6];
+ out[5] = workspace2[2] - workspace2[6];
+ out[6] = workspace2[3] - workspace2[7];
+ out[7] = workspace2[3] + workspace2[7];
+ }
+
+ out = output_block;
+
+ for (i = 0; i < 8; i++, out++) {
+ /* stage 1 */
+ workspace1[0] = out[0] + out[1 * 8];
+ workspace1[1] = out[0] - out[1 * 8];
+
+ workspace1[2] = out[2 * 8] + out[3 * 8];
+ workspace1[3] = out[2 * 8] - out[3 * 8];
+
+ workspace1[4] = out[4 * 8] + out[5 * 8];
+ workspace1[5] = out[4 * 8] - out[5 * 8];
+
+ workspace1[6] = out[6 * 8] + out[7 * 8];
+ workspace1[7] = out[6 * 8] - out[7 * 8];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ if (inter) {
+ int d;
+
+ out[0 * 8] = workspace2[0] + workspace2[4];
+ out[1 * 8] = workspace2[0] - workspace2[4];
+ out[2 * 8] = workspace2[1] - workspace2[5];
+ out[3 * 8] = workspace2[1] + workspace2[5];
+ out[4 * 8] = workspace2[2] + workspace2[6];
+ out[5 * 8] = workspace2[2] - workspace2[6];
+ out[6 * 8] = workspace2[3] - workspace2[7];
+ out[7 * 8] = workspace2[3] + workspace2[7];
+
+ for (d = 0; d < 8; d++)
+ out[8 * d] >>= 6;
+ } else {
+ int d;
+
+ out[0 * 8] = workspace2[0] + workspace2[4];
+ out[1 * 8] = workspace2[0] - workspace2[4];
+ out[2 * 8] = workspace2[1] - workspace2[5];
+ out[3 * 8] = workspace2[1] + workspace2[5];
+ out[4 * 8] = workspace2[2] + workspace2[6];
+ out[5 * 8] = workspace2[2] - workspace2[6];
+ out[6 * 8] = workspace2[3] - workspace2[7];
+ out[7 * 8] = workspace2[3] + workspace2[7];
+
+ for (d = 0; d < 8; d++) {
+ out[8 * d] >>= 6;
+ out[8 * d] += 128;
+ }
+ }
+ }
+}
+
+static void fill_encoder_block(const u8 *input, s16 *dst,
+ unsigned int stride, unsigned int input_step)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++, input += input_step)
+ *dst++ = *input;
+ input += (stride - 8) * input_step;
+ }
+}
+
+static int var_intra(const s16 *input)
+{
+ int32_t mean = 0;
+ int32_t ret = 0;
+ const s16 *tmp = input;
+ int i;
+
+ for (i = 0; i < 8 * 8; i++, tmp++)
+ mean += *tmp;
+ mean /= 64;
+ tmp = input;
+ for (i = 0; i < 8 * 8; i++, tmp++)
+ ret += (*tmp - mean) < 0 ? -(*tmp - mean) : (*tmp - mean);
+ return ret;
+}
+
+static int var_inter(const s16 *old, const s16 *new)
+{
+ int32_t ret = 0;
+ int i;
+
+ for (i = 0; i < 8 * 8; i++, old++, new++)
+ ret += (*old - *new) < 0 ? -(*old - *new) : (*old - *new);
+ return ret;
+}
+
+static int decide_blocktype(const u8 *cur, const u8 *reference,
+ s16 *deltablock, unsigned int stride,
+ unsigned int input_step)
+{
+ s16 tmp[64];
+ s16 old[64];
+ s16 *work = tmp;
+ unsigned int k, l;
+ int vari;
+ int vard;
+
+ fill_encoder_block(cur, tmp, stride, input_step);
+ fill_encoder_block(reference, old, 8, 1);
+ vari = var_intra(tmp);
+
+ for (k = 0; k < 8; k++) {
+ for (l = 0; l < 8; l++) {
+ *deltablock = *work - *reference;
+ deltablock++;
+ work++;
+ reference++;
+ }
+ }
+ deltablock -= 64;
+ vard = var_inter(old, tmp);
+ return vari <= vard ? IBLOCK : PBLOCK;
+}
+
+static void fill_decoder_block(u8 *dst, const s16 *input, int stride)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++, input++, dst++) {
+ if (*input < 0)
+ *dst = 0;
+ else if (*input > 255)
+ *dst = 255;
+ else
+ *dst = *input;
+ }
+ dst += stride - 8;
+ }
+}
+
+static void add_deltas(s16 *deltas, const u8 *ref, int stride)
+{
+ int k, l;
+
+ for (k = 0; k < 8; k++) {
+ for (l = 0; l < 8; l++) {
+ *deltas += *ref++;
+ /*
+ * Due to quantizing, it might possible that the
+ * decoded coefficients are slightly out of range
+ */
+ if (*deltas < 0)
+ *deltas = 0;
+ else if (*deltas > 255)
+ *deltas = 255;
+ deltas++;
+ }
+ ref += stride - 8;
+ }
+}
+
+static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
+ struct cframe *cf, u32 height, u32 width,
+ unsigned int input_step,
+ bool is_intra, bool next_is_intra)
+{
+ u8 *input_start = input;
+ __be16 *rlco_start = *rlco;
+ s16 deltablock[64];
+ __be16 pframe_bit = htons(PFRAME_BIT);
+ u32 encoding = 0;
+ unsigned int last_size = 0;
+ unsigned int i, j;
+
+ for (j = 0; j < height / 8; j++) {
+ for (i = 0; i < width / 8; i++) {
+ /* intra code, first frame is always intra coded. */
+ int blocktype = IBLOCK;
+ unsigned int size;
+
+ if (!is_intra)
+ blocktype = decide_blocktype(input, refp,
+ deltablock, width, input_step);
+ if (is_intra || blocktype == IBLOCK) {
+ fwht(input, cf->coeffs, width, input_step, 1);
+ quantize_intra(cf->coeffs, cf->de_coeffs);
+ blocktype = IBLOCK;
+ } else {
+ /* inter code */
+ encoding |= FRAME_PCODED;
+ fwht16(deltablock, cf->coeffs, 8, 0);
+ quantize_inter(cf->coeffs, cf->de_coeffs);
+ }
+ if (!next_is_intra) {
+ ifwht(cf->de_coeffs, cf->de_fwht, blocktype);
+
+ if (blocktype == PBLOCK)
+ add_deltas(cf->de_fwht, refp, 8);
+ fill_decoder_block(refp, cf->de_fwht, 8);
+ }
+
+ input += 8 * input_step;
+ refp += 8 * 8;
+
+ if (encoding & FRAME_UNENCODED)
+ continue;
+
+ size = rlc(cf->coeffs, *rlco, blocktype);
+ if (last_size == size &&
+ !memcmp(*rlco + 1, *rlco - size + 1, 2 * size - 2)) {
+ __be16 *last_rlco = *rlco - size;
+ s16 hdr = ntohs(*last_rlco);
+
+ if (!((*last_rlco ^ **rlco) & pframe_bit) &&
+ (hdr & DUPS_MASK) < DUPS_MASK)
+ *last_rlco = htons(hdr + 2);
+ else
+ *rlco += size;
+ } else {
+ *rlco += size;
+ }
+ if (*rlco >= rlco_max)
+ encoding |= FRAME_UNENCODED;
+ last_size = size;
+ }
+ input += width * 7 * input_step;
+ }
+ if (encoding & FRAME_UNENCODED) {
+ u8 *out = (u8 *)rlco_start;
+
+ input = input_start;
+ /*
+ * The compressed stream should never contain the magic
+ * header, so when we copy the YUV data we replace 0xff
+ * by 0xfe. Since YUV is limited range such values
+ * shouldn't appear anyway.
+ */
+ for (i = 0; i < height * width; i++, input += input_step)
+ *out++ = (*input == 0xff) ? 0xfe : *input;
+ *rlco = (__be16 *)out;
+ }
+ return encoding;
+}
+
+u32 encode_frame(struct raw_frame *frm, struct raw_frame *ref_frm,
+ struct cframe *cf, bool is_intra, bool next_is_intra)
+{
+ unsigned int size = frm->height * frm->width;
+ __be16 *rlco = cf->rlc_data;
+ __be16 *rlco_max;
+ u32 encoding;
+
+ rlco_max = rlco + size / 2 - 256;
+ encoding = encode_plane(frm->luma, ref_frm->luma, &rlco, rlco_max, cf,
+ frm->height, frm->width,
+ 1, is_intra, next_is_intra);
+ if (encoding & FRAME_UNENCODED)
+ encoding |= LUMA_UNENCODED;
+ encoding &= ~FRAME_UNENCODED;
+ rlco_max = rlco + size / 8 - 256;
+ encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max, cf,
+ frm->height / 2, frm->width / 2,
+ frm->chroma_step, is_intra, next_is_intra);
+ if (encoding & FRAME_UNENCODED)
+ encoding |= CB_UNENCODED;
+ encoding &= ~FRAME_UNENCODED;
+ rlco_max = rlco + size / 8 - 256;
+ encoding |= encode_plane(frm->cr, ref_frm->cr, &rlco, rlco_max, cf,
+ frm->height / 2, frm->width / 2,
+ frm->chroma_step, is_intra, next_is_intra);
+ if (encoding & FRAME_UNENCODED)
+ encoding |= CR_UNENCODED;
+ encoding &= ~FRAME_UNENCODED;
+ cf->size = (rlco - cf->rlc_data) * sizeof(*rlco);
+ return encoding;
+}
+
+static void decode_plane(struct cframe *cf, const __be16 **rlco, u8 *ref,
+ u32 height, u32 width, bool uncompressed)
+{
+ unsigned int copies = 0;
+ s16 copy[8 * 8];
+ s16 stat;
+ unsigned int i, j;
+
+ if (uncompressed) {
+ memcpy(ref, *rlco, width * height);
+ *rlco += width * height / 2;
+ return;
+ }
+
+ /*
+ * When decoding each macroblock the rlco pointer will be increased
+ * by 65 * 2 bytes worst-case.
+ * To avoid overflow the buffer has to be 65/64th of the actual raw
+ * image size, just in case someone feeds it malicious data.
+ */
+ for (j = 0; j < height / 8; j++) {
+ for (i = 0; i < width / 8; i++) {
+ u8 *refp = ref + j * 8 * width + i * 8;
+
+ if (copies) {
+ memcpy(cf->de_fwht, copy, sizeof(copy));
+ if (stat & PFRAME_BIT)
+ add_deltas(cf->de_fwht, refp, width);
+ fill_decoder_block(refp, cf->de_fwht, width);
+ copies--;
+ continue;
+ }
+
+ stat = derlc(rlco, cf->coeffs);
+
+ if (stat & PFRAME_BIT)
+ dequantize_inter(cf->coeffs);
+ else
+ dequantize_intra(cf->coeffs);
+
+ ifwht(cf->coeffs, cf->de_fwht,
+ (stat & PFRAME_BIT) ? 0 : 1);
+
+ copies = (stat & DUPS_MASK) >> 1;
+ if (copies)
+ memcpy(copy, cf->de_fwht, sizeof(copy));
+ if (stat & PFRAME_BIT)
+ add_deltas(cf->de_fwht, refp, width);
+ fill_decoder_block(refp, cf->de_fwht, width);
+ }
+ }
+}
+
+void decode_frame(struct cframe *cf, struct raw_frame *ref, u32 hdr_flags)
+{
+ const __be16 *rlco = cf->rlc_data;
+
+ decode_plane(cf, &rlco, ref->luma, cf->height, cf->width,
+ hdr_flags & VICODEC_FL_LUMA_IS_UNCOMPRESSED);
+ decode_plane(cf, &rlco, ref->cb, cf->height / 2, cf->width / 2,
+ hdr_flags & VICODEC_FL_CB_IS_UNCOMPRESSED);
+ decode_plane(cf, &rlco, ref->cr, cf->height / 2, cf->width / 2,
+ hdr_flags & VICODEC_FL_CR_IS_UNCOMPRESSED);
+}
diff --git a/drivers/media/platform/vicodec/vicodec-codec.h b/drivers/media/platform/vicodec/vicodec-codec.h
new file mode 100644
index 000000000..cdfad1332
--- /dev/null
+++ b/drivers/media/platform/vicodec/vicodec-codec.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2016 Tom aan de Wiel
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef VICODEC_RLC_H
+#define VICODEC_RLC_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
+/*
+ * The compressed format consists of a cframe_hdr struct followed by the
+ * compressed frame data. The header contains the size of that data.
+ * Each Y, Cb and Cr plane is compressed separately. If the compressed
+ * size of each plane becomes larger than the uncompressed size, then
+ * that plane is stored uncompressed and the corresponding bit is set
+ * in the flags field of the header.
+ *
+ * Each compressed plane consists of macroblocks and each macroblock
+ * is run-length-encoded. Each macroblock starts with a 16 bit value.
+ * Bit 15 indicates if this is a P-coded macroblock (1) or not (0).
+ * P-coded macroblocks contain a delta against the previous frame.
+ *
+ * Bits 1-12 contain a number. If non-zero, then this same macroblock
+ * repeats that number of times. This results in a high degree of
+ * compression for generated images like colorbars.
+ *
+ * Following this macroblock header the MB coefficients are run-length
+ * encoded: the top 12 bits contain the coefficient, the bottom 4 bits
+ * tell how many times this coefficient occurs. The value 0xf indicates
+ * that the remainder of the macroblock should be filled with zeroes.
+ *
+ * All 16 and 32 bit values are stored in big-endian (network) order.
+ *
+ * Each cframe_hdr starts with an 8 byte magic header that is
+ * guaranteed not to occur in the compressed frame data. This header
+ * can be used to sync to the next frame.
+ *
+ * This codec uses the Fast Walsh Hadamard Transform. Tom aan de Wiel
+ * developed this as part of a university project, specifically for use
+ * with this driver. His project report can be found here:
+ *
+ * https://hverkuil.home.xs4all.nl/fwht.pdf
+ */
+
+/*
+ * Note: bit 0 of the header must always be 0. Otherwise it cannot
+ * be guaranteed that the magic 8 byte sequence (see below) can
+ * never occur in the rlc output.
+ */
+#define PFRAME_BIT (1 << 15)
+#define DUPS_MASK 0x1ffe
+
+/*
+ * This is a sequence of 8 bytes with the low 4 bits set to 0xf.
+ *
+ * This sequence cannot occur in the encoded data
+ */
+#define VICODEC_MAGIC1 0x4f4f4f4f
+#define VICODEC_MAGIC2 0xffffffff
+
+#define VICODEC_VERSION 1
+
+#define VICODEC_MAX_WIDTH 3840
+#define VICODEC_MAX_HEIGHT 2160
+#define VICODEC_MIN_WIDTH 640
+#define VICODEC_MIN_HEIGHT 480
+
+#define PBLOCK 0
+#define IBLOCK 1
+
+/* Set if this is an interlaced format */
+#define VICODEC_FL_IS_INTERLACED BIT(0)
+/* Set if this is a bottom-first (NTSC) interlaced format */
+#define VICODEC_FL_IS_BOTTOM_FIRST BIT(1)
+/* Set if each 'frame' contains just one field */
+#define VICODEC_FL_IS_ALTERNATE BIT(2)
+/*
+ * If VICODEC_FL_IS_ALTERNATE was set, then this is set if this
+ * 'frame' is the bottom field, else it is the top field.
+ */
+#define VICODEC_FL_IS_BOTTOM_FIELD BIT(3)
+/* Set if this frame is uncompressed */
+#define VICODEC_FL_LUMA_IS_UNCOMPRESSED BIT(4)
+#define VICODEC_FL_CB_IS_UNCOMPRESSED BIT(5)
+#define VICODEC_FL_CR_IS_UNCOMPRESSED BIT(6)
+
+struct cframe_hdr {
+ u32 magic1;
+ u32 magic2;
+ __be32 version;
+ __be32 width, height;
+ __be32 flags;
+ __be32 colorspace;
+ __be32 xfer_func;
+ __be32 ycbcr_enc;
+ __be32 quantization;
+ __be32 size;
+};
+
+struct cframe {
+ unsigned int width, height;
+ __be16 *rlc_data;
+ s16 coeffs[8 * 8];
+ s16 de_coeffs[8 * 8];
+ s16 de_fwht[8 * 8];
+ u32 size;
+};
+
+struct raw_frame {
+ unsigned int width, height;
+ unsigned int chroma_step;
+ u8 *luma, *cb, *cr;
+};
+
+#define FRAME_PCODED BIT(0)
+#define FRAME_UNENCODED BIT(1)
+#define LUMA_UNENCODED BIT(2)
+#define CB_UNENCODED BIT(3)
+#define CR_UNENCODED BIT(4)
+
+u32 encode_frame(struct raw_frame *frm, struct raw_frame *ref_frm,
+ struct cframe *cf, bool is_intra, bool next_is_intra);
+void decode_frame(struct cframe *cf, struct raw_frame *ref, u32 hdr_flags);
+
+#endif
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
new file mode 100644
index 000000000..9d2e1ce53
--- /dev/null
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -0,0 +1,1507 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A virtual codec example device.
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This is a virtual codec device driver for testing the codec framework.
+ * It simulates a device that uses memory buffers for both source and
+ * destination and encodes or decodes the data.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vicodec-codec.h"
+
+MODULE_DESCRIPTION("Virtual codec device");
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_LICENSE("GPL v2");
+
+static bool multiplanar;
+module_param(multiplanar, bool, 0444);
+MODULE_PARM_DESC(multiplanar,
+ " use multi-planar API instead of single-planar API");
+
+static unsigned int debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, " activates debug info");
+
+#define VICODEC_NAME "vicodec"
+#define MAX_WIDTH 4096U
+#define MIN_WIDTH 640U
+#define MAX_HEIGHT 2160U
+#define MIN_HEIGHT 360U
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+
+static void vicodec_dev_release(struct device *dev)
+{
+}
+
+static struct platform_device vicodec_pdev = {
+ .name = VICODEC_NAME,
+ .dev.release = vicodec_dev_release,
+};
+
+/* Per-queue, driver-specific private data */
+struct vicodec_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int flags;
+ unsigned int sizeimage;
+ unsigned int sequence;
+ u32 fourcc;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+struct vicodec_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device enc_vfd;
+ struct video_device dec_vfd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+#endif
+
+ struct mutex enc_mutex;
+ struct mutex dec_mutex;
+ spinlock_t enc_lock;
+ spinlock_t dec_lock;
+
+ struct v4l2_m2m_dev *enc_dev;
+ struct v4l2_m2m_dev *dec_dev;
+};
+
+struct vicodec_ctx {
+ struct v4l2_fh fh;
+ struct vicodec_dev *dev;
+ bool is_enc;
+ spinlock_t *lock;
+
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *ctrl_gop_size;
+ unsigned int gop_size;
+ unsigned int gop_cnt;
+
+ /* Abort requested by m2m */
+ int aborting;
+ struct vb2_v4l2_buffer *last_src_buf;
+ struct vb2_v4l2_buffer *last_dst_buf;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quantization;
+
+ /* Source and destination queue data */
+ struct vicodec_q_data q_data[2];
+ struct raw_frame ref_frame;
+ u8 *compressed_frame;
+ u32 cur_buf_offset;
+ u32 comp_max_size;
+ u32 comp_size;
+ u32 comp_magic_cnt;
+ u32 comp_frame_size;
+ bool comp_has_frame;
+ bool comp_has_next_frame;
+};
+
+static const u32 pixfmts_yuv[] = {
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+};
+
+static inline struct vicodec_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct vicodec_ctx, fh);
+}
+
+static struct vicodec_q_data *get_q_data(struct vicodec_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return &ctx->q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return &ctx->q_data[V4L2_M2M_DST];
+ default:
+ WARN_ON(1);
+ break;
+ }
+ return NULL;
+}
+
+static void encode(struct vicodec_ctx *ctx,
+ struct vicodec_q_data *q_data,
+ u8 *p_in, u8 *p_out)
+{
+ unsigned int size = q_data->width * q_data->height;
+ struct cframe_hdr *p_hdr;
+ struct cframe cf;
+ struct raw_frame rf;
+ u32 encoding;
+
+ rf.width = q_data->width;
+ rf.height = q_data->height;
+ rf.luma = p_in;
+
+ switch (q_data->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ rf.cb = rf.luma + size;
+ rf.cr = rf.cb + size / 4;
+ rf.chroma_step = 1;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ rf.cr = rf.luma + size;
+ rf.cb = rf.cr + size / 4;
+ rf.chroma_step = 1;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ rf.cb = rf.luma + size;
+ rf.cr = rf.cb + 1;
+ rf.chroma_step = 2;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ rf.cr = rf.luma + size;
+ rf.cb = rf.cr + 1;
+ rf.chroma_step = 2;
+ break;
+ }
+
+ cf.width = q_data->width;
+ cf.height = q_data->height;
+ cf.rlc_data = (__be16 *)(p_out + sizeof(*p_hdr));
+
+ encoding = encode_frame(&rf, &ctx->ref_frame, &cf, !ctx->gop_cnt,
+ ctx->gop_cnt == ctx->gop_size - 1);
+ if (encoding != FRAME_PCODED)
+ ctx->gop_cnt = 0;
+ if (++ctx->gop_cnt == ctx->gop_size)
+ ctx->gop_cnt = 0;
+
+ p_hdr = (struct cframe_hdr *)p_out;
+ p_hdr->magic1 = VICODEC_MAGIC1;
+ p_hdr->magic2 = VICODEC_MAGIC2;
+ p_hdr->version = htonl(VICODEC_VERSION);
+ p_hdr->width = htonl(cf.width);
+ p_hdr->height = htonl(cf.height);
+ p_hdr->flags = htonl(q_data->flags);
+ if (encoding & LUMA_UNENCODED)
+ p_hdr->flags |= htonl(VICODEC_FL_LUMA_IS_UNCOMPRESSED);
+ if (encoding & CB_UNENCODED)
+ p_hdr->flags |= htonl(VICODEC_FL_CB_IS_UNCOMPRESSED);
+ if (encoding & CR_UNENCODED)
+ p_hdr->flags |= htonl(VICODEC_FL_CR_IS_UNCOMPRESSED);
+ p_hdr->colorspace = htonl(ctx->colorspace);
+ p_hdr->xfer_func = htonl(ctx->xfer_func);
+ p_hdr->ycbcr_enc = htonl(ctx->ycbcr_enc);
+ p_hdr->quantization = htonl(ctx->quantization);
+ p_hdr->size = htonl(cf.size);
+ ctx->ref_frame.width = cf.width;
+ ctx->ref_frame.height = cf.height;
+}
+
+static int decode(struct vicodec_ctx *ctx,
+ struct vicodec_q_data *q_data,
+ u8 *p_in, u8 *p_out)
+{
+ unsigned int size = q_data->width * q_data->height;
+ unsigned int i;
+ struct cframe_hdr *p_hdr;
+ struct cframe cf;
+ u8 *p;
+
+ p_hdr = (struct cframe_hdr *)p_in;
+ cf.width = ntohl(p_hdr->width);
+ cf.height = ntohl(p_hdr->height);
+ q_data->flags = ntohl(p_hdr->flags);
+ ctx->colorspace = ntohl(p_hdr->colorspace);
+ ctx->xfer_func = ntohl(p_hdr->xfer_func);
+ ctx->ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
+ ctx->quantization = ntohl(p_hdr->quantization);
+ cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr));
+
+ if (p_hdr->magic1 != VICODEC_MAGIC1 ||
+ p_hdr->magic2 != VICODEC_MAGIC2 ||
+ ntohl(p_hdr->version) != VICODEC_VERSION ||
+ cf.width < VICODEC_MIN_WIDTH ||
+ cf.width > VICODEC_MAX_WIDTH ||
+ cf.height < VICODEC_MIN_HEIGHT ||
+ cf.height > VICODEC_MAX_HEIGHT ||
+ (cf.width & 7) || (cf.height & 7))
+ return -EINVAL;
+
+ /* TODO: support resolution changes */
+ if (cf.width != q_data->width || cf.height != q_data->height)
+ return -EINVAL;
+
+ decode_frame(&cf, &ctx->ref_frame, q_data->flags);
+ memcpy(p_out, ctx->ref_frame.luma, size);
+ p_out += size;
+
+ switch (q_data->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ memcpy(p_out, ctx->ref_frame.cb, size / 4);
+ p_out += size / 4;
+ memcpy(p_out, ctx->ref_frame.cr, size / 4);
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ memcpy(p_out, ctx->ref_frame.cr, size / 4);
+ p_out += size / 4;
+ memcpy(p_out, ctx->ref_frame.cb, size / 4);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ for (i = 0, p = p_out; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cb[i];
+ for (i = 0, p = p_out + 1; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cr[i];
+ break;
+ case V4L2_PIX_FMT_NV21:
+ for (i = 0, p = p_out; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cr[i];
+ for (i = 0, p = p_out + 1; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cb[i];
+ break;
+ }
+ return 0;
+}
+
+static int device_process(struct vicodec_ctx *ctx,
+ struct vb2_v4l2_buffer *in_vb,
+ struct vb2_v4l2_buffer *out_vb)
+{
+ struct vicodec_dev *dev = ctx->dev;
+ struct vicodec_q_data *q_out, *q_cap;
+ u8 *p_in, *p_out;
+ int ret;
+
+ q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_cap = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (ctx->is_enc)
+ p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
+ else
+ p_in = ctx->compressed_frame;
+ p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&dev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return -EFAULT;
+ }
+
+ if (ctx->is_enc) {
+ struct cframe_hdr *p_hdr = (struct cframe_hdr *)p_out;
+
+ encode(ctx, q_out, p_in, p_out);
+ vb2_set_plane_payload(&out_vb->vb2_buf, 0,
+ sizeof(*p_hdr) + ntohl(p_hdr->size));
+ } else {
+ ret = decode(ctx, q_cap, p_in, p_out);
+ if (ret)
+ return ret;
+ vb2_set_plane_payload(&out_vb->vb2_buf, 0,
+ q_cap->width * q_cap->height * 3 / 2);
+ }
+
+ out_vb->sequence = q_cap->sequence++;
+ out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp;
+
+ if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ out_vb->timecode = in_vb->timecode;
+ out_vb->field = in_vb->field;
+ out_vb->flags &= ~V4L2_BUF_FLAG_LAST;
+ out_vb->flags |= in_vb->flags &
+ (V4L2_BUF_FLAG_TIMECODE |
+ V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+
+ return 0;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/* device_run() - prepares and starts the device */
+static void device_run(void *priv)
+{
+ static const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+ struct vicodec_ctx *ctx = priv;
+ struct vicodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct vicodec_q_data *q_out;
+ u32 state;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ state = VB2_BUF_STATE_DONE;
+ if (device_process(ctx, src_buf, dst_buf))
+ state = VB2_BUF_STATE_ERROR;
+ ctx->last_dst_buf = dst_buf;
+
+ spin_lock(ctx->lock);
+ if (!ctx->comp_has_next_frame && src_buf == ctx->last_src_buf) {
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+ if (ctx->is_enc) {
+ src_buf->sequence = q_out->sequence++;
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, state);
+ } else if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == ctx->cur_buf_offset) {
+ src_buf->sequence = q_out->sequence++;
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, state);
+ ctx->cur_buf_offset = 0;
+ ctx->comp_has_next_frame = false;
+ }
+ v4l2_m2m_buf_done(dst_buf, state);
+ ctx->comp_size = 0;
+ ctx->comp_magic_cnt = 0;
+ ctx->comp_has_frame = false;
+ spin_unlock(ctx->lock);
+
+ if (ctx->is_enc)
+ v4l2_m2m_job_finish(dev->enc_dev, ctx->fh.m2m_ctx);
+ else
+ v4l2_m2m_job_finish(dev->dec_dev, ctx->fh.m2m_ctx);
+}
+
+static void job_remove_out_buf(struct vicodec_ctx *ctx, u32 state)
+{
+ struct vb2_v4l2_buffer *src_buf;
+ struct vicodec_q_data *q_out;
+
+ q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ spin_lock(ctx->lock);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ src_buf->sequence = q_out->sequence++;
+ v4l2_m2m_buf_done(src_buf, state);
+ ctx->cur_buf_offset = 0;
+ spin_unlock(ctx->lock);
+}
+
+static int job_ready(void *priv)
+{
+ static const u8 magic[] = {
+ 0x4f, 0x4f, 0x4f, 0x4f, 0xff, 0xff, 0xff, 0xff
+ };
+ struct vicodec_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_buf;
+ u8 *p_out;
+ u8 *p;
+ u32 sz;
+ u32 state;
+
+ if (ctx->is_enc || ctx->comp_has_frame)
+ return 1;
+
+restart:
+ ctx->comp_has_next_frame = false;
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!src_buf)
+ return 0;
+ p_out = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ sz = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ p = p_out + ctx->cur_buf_offset;
+
+ state = VB2_BUF_STATE_DONE;
+
+ if (!ctx->comp_size) {
+ state = VB2_BUF_STATE_ERROR;
+ for (; p < p_out + sz; p++) {
+ u32 copy;
+
+ p = memchr(p, magic[ctx->comp_magic_cnt],
+ p_out + sz - p);
+ if (!p) {
+ ctx->comp_magic_cnt = 0;
+ break;
+ }
+ copy = sizeof(magic) - ctx->comp_magic_cnt;
+ if (p_out + sz - p < copy)
+ copy = p_out + sz - p;
+ memcpy(ctx->compressed_frame + ctx->comp_magic_cnt,
+ p, copy);
+ ctx->comp_magic_cnt += copy;
+ if (!memcmp(ctx->compressed_frame, magic, ctx->comp_magic_cnt)) {
+ p += copy;
+ state = VB2_BUF_STATE_DONE;
+ break;
+ }
+ ctx->comp_magic_cnt = 0;
+ }
+ if (ctx->comp_magic_cnt < sizeof(magic)) {
+ job_remove_out_buf(ctx, state);
+ goto restart;
+ }
+ ctx->comp_size = sizeof(magic);
+ }
+ if (ctx->comp_size < sizeof(struct cframe_hdr)) {
+ struct cframe_hdr *p_hdr = (struct cframe_hdr *)ctx->compressed_frame;
+ u32 copy = sizeof(struct cframe_hdr) - ctx->comp_size;
+
+ if (copy > p_out + sz - p)
+ copy = p_out + sz - p;
+ memcpy(ctx->compressed_frame + ctx->comp_size,
+ p, copy);
+ p += copy;
+ ctx->comp_size += copy;
+ if (ctx->comp_size < sizeof(struct cframe_hdr)) {
+ job_remove_out_buf(ctx, state);
+ goto restart;
+ }
+ ctx->comp_frame_size = ntohl(p_hdr->size) + sizeof(*p_hdr);
+ if (ctx->comp_frame_size > ctx->comp_max_size)
+ ctx->comp_frame_size = ctx->comp_max_size;
+ }
+ if (ctx->comp_size < ctx->comp_frame_size) {
+ u32 copy = ctx->comp_frame_size - ctx->comp_size;
+
+ if (copy > p_out + sz - p)
+ copy = p_out + sz - p;
+ memcpy(ctx->compressed_frame + ctx->comp_size,
+ p, copy);
+ p += copy;
+ ctx->comp_size += copy;
+ if (ctx->comp_size < ctx->comp_frame_size) {
+ job_remove_out_buf(ctx, state);
+ goto restart;
+ }
+ }
+ ctx->cur_buf_offset = p - p_out;
+ ctx->comp_has_frame = true;
+ ctx->comp_has_next_frame = false;
+ if (sz - ctx->cur_buf_offset >= sizeof(struct cframe_hdr)) {
+ struct cframe_hdr *p_hdr = (struct cframe_hdr *)p;
+ u32 frame_size = ntohl(p_hdr->size);
+ u32 remaining = sz - ctx->cur_buf_offset - sizeof(*p_hdr);
+
+ if (!memcmp(p, magic, sizeof(magic)))
+ ctx->comp_has_next_frame = remaining >= frame_size;
+ }
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct vicodec_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/*
+ * video ioctls
+ */
+
+static u32 find_fmt(u32 fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixfmts_yuv); i++)
+ if (pixfmts_yuv[i] == fmt)
+ return fmt;
+ return pixfmts_yuv[0];
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, VICODEC_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, VICODEC_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", VICODEC_NAME);
+ cap->device_caps = V4L2_CAP_STREAMING |
+ (multiplanar ?
+ V4L2_CAP_VIDEO_M2M_MPLANE :
+ V4L2_CAP_VIDEO_M2M);
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out)
+{
+ bool is_yuv = (is_enc && is_out) || (!is_enc && !is_out);
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(f->type) && !multiplanar)
+ return -EINVAL;
+ if (!V4L2_TYPE_IS_MULTIPLANAR(f->type) && multiplanar)
+ return -EINVAL;
+ if (f->index >= (is_yuv ? ARRAY_SIZE(pixfmts_yuv) : 1))
+ return -EINVAL;
+
+ if (is_yuv)
+ f->pixelformat = pixfmts_yuv[f->index];
+ else
+ f->pixelformat = V4L2_PIX_FMT_FWHT;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+
+ return enum_fmt(f, ctx->is_enc, false);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+
+ return enum_fmt(f, ctx->is_enc, true);
+}
+
+static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct vicodec_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (multiplanar)
+ return -EINVAL;
+ pix = &f->fmt.pix;
+ pix->width = q_data->width;
+ pix->height = q_data->height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = q_data->fourcc;
+ if (q_data->fourcc == V4L2_PIX_FMT_FWHT)
+ pix->bytesperline = 0;
+ else
+ pix->bytesperline = q_data->width;
+ pix->sizeimage = q_data->sizeimage;
+ pix->colorspace = ctx->colorspace;
+ pix->xfer_func = ctx->xfer_func;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (!multiplanar)
+ return -EINVAL;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->width = q_data->width;
+ pix_mp->height = q_data->height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = q_data->fourcc;
+ pix_mp->num_planes = 1;
+ if (q_data->fourcc == V4L2_PIX_FMT_FWHT)
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ else
+ pix_mp->plane_fmt[0].bytesperline = q_data->width;
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quantization;
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &f->fmt.pix;
+ pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH) & ~7;
+ pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+ pix->bytesperline = pix->width;
+ pix->sizeimage = pix->width * pix->height * 3 / 2;
+ pix->field = V4L2_FIELD_NONE;
+ if (pix->pixelformat == V4L2_PIX_FMT_FWHT) {
+ pix->bytesperline = 0;
+ pix->sizeimage += sizeof(struct cframe_hdr);
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH) & ~7;
+ pix_mp->height =
+ clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+ pix_mp->plane_fmt[0].bytesperline = pix_mp->width;
+ pix_mp->plane_fmt[0].sizeimage =
+ pix_mp->width * pix_mp->height * 3 / 2;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->num_planes = 1;
+ if (pix_mp->pixelformat == V4L2_PIX_FMT_FWHT) {
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ pix_mp->plane_fmt[0].sizeimage +=
+ sizeof(struct cframe_hdr);
+ }
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (multiplanar)
+ return -EINVAL;
+ pix = &f->fmt.pix;
+ pix->pixelformat = ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(f->fmt.pix.pixelformat);
+ pix->colorspace = ctx->colorspace;
+ pix->xfer_func = ctx->xfer_func;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (!multiplanar)
+ return -EINVAL;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->pixelformat = ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(pix_mp->pixelformat);
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quantization;
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(ctx, f);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (multiplanar)
+ return -EINVAL;
+ pix = &f->fmt.pix;
+ pix->pixelformat = !ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(pix->pixelformat);
+ if (!pix->colorspace)
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (!multiplanar)
+ return -EINVAL;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->pixelformat = !ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(pix_mp->pixelformat);
+ if (!pix_mp->colorspace)
+ pix_mp->colorspace = V4L2_COLORSPACE_REC709;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(ctx, f);
+}
+
+static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
+{
+ struct vicodec_q_data *q_data;
+ struct vb2_queue *vq;
+ bool fmt_changed = true;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &f->fmt.pix;
+ if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
+ fmt_changed =
+ q_data->fourcc != pix->pixelformat ||
+ q_data->width != pix->width ||
+ q_data->height != pix->height;
+
+ if (vb2_is_busy(vq) && fmt_changed)
+ return -EBUSY;
+
+ q_data->fourcc = pix->pixelformat;
+ q_data->width = pix->width;
+ q_data->height = pix->height;
+ q_data->sizeimage = pix->sizeimage;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pix_mp = &f->fmt.pix_mp;
+ if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
+ fmt_changed =
+ q_data->fourcc != pix_mp->pixelformat ||
+ q_data->width != pix_mp->width ||
+ q_data->height != pix_mp->height;
+
+ if (vb2_is_busy(vq) && fmt_changed)
+ return -EBUSY;
+
+ q_data->fourcc = pix_mp->pixelformat;
+ q_data->width = pix_mp->width;
+ q_data->height = pix_mp->height;
+ q_data->sizeimage = pix_mp->plane_fmt[0].sizeimage;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fourcc: %08x\n",
+ f->type, q_data->width, q_data->height, q_data->fourcc);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(file2ctx(file), f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = vidioc_s_fmt(file2ctx(file), f);
+ if (!ret) {
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &f->fmt.pix;
+ ctx->colorspace = pix->colorspace;
+ ctx->xfer_func = pix->xfer_func;
+ ctx->ycbcr_enc = pix->ycbcr_enc;
+ ctx->quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pix_mp = &f->fmt.pix_mp;
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->xfer_func = pix_mp->xfer_func;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quantization = pix_mp->quantization;
+ break;
+ default:
+ break;
+ }
+ }
+ return ret;
+}
+
+static void vicodec_mark_last_buf(struct vicodec_ctx *ctx)
+{
+ static const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ spin_lock(ctx->lock);
+ ctx->last_src_buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
+ if (!ctx->last_src_buf && ctx->last_dst_buf) {
+ ctx->last_dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+ spin_unlock(ctx->lock);
+}
+
+static int vicodec_try_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP)
+ return -EINVAL;
+
+ if (ec->flags & V4L2_ENC_CMD_STOP_AT_GOP_END)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vicodec_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = vicodec_try_encoder_cmd(file, fh, ec);
+ if (ret < 0)
+ return ret;
+
+ vicodec_mark_last_buf(ctx);
+ return 0;
+}
+
+static int vicodec_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+
+ if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vicodec_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = vicodec_try_decoder_cmd(file, fh, dc);
+ if (ret < 0)
+ return ret;
+
+ vicodec_mark_last_buf(ctx);
+ return 0;
+}
+
+static int vicodec_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ switch (fsize->pixel_format) {
+ case V4L2_PIX_FMT_FWHT:
+ break;
+ default:
+ if (find_fmt(fsize->pixel_format) == fsize->pixel_format)
+ break;
+ return -EINVAL;
+ }
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+
+ fsize->stepwise.min_width = MIN_WIDTH;
+ fsize->stepwise.max_width = MAX_WIDTH;
+ fsize->stepwise.step_width = 8;
+ fsize->stepwise.min_height = MIN_HEIGHT;
+ fsize->stepwise.max_height = MAX_HEIGHT;
+ fsize->stepwise.step_height = 8;
+
+ return 0;
+}
+
+static int vicodec_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static const struct v4l2_ioctl_ops vicodec_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_encoder_cmd = vicodec_try_encoder_cmd,
+ .vidioc_encoder_cmd = vicodec_encoder_cmd,
+ .vidioc_try_decoder_cmd = vicodec_try_decoder_cmd,
+ .vidioc_decoder_cmd = vicodec_decoder_cmd,
+ .vidioc_enum_framesizes = vicodec_enum_framesizes,
+
+ .vidioc_subscribe_event = vicodec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+
+/*
+ * Queue operations
+ */
+
+static int vicodec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vicodec_q_data *q_data = get_q_data(ctx, vq->type);
+ unsigned int size = q_data->sizeimage;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+ return 0;
+}
+
+static int vicodec_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vicodec_q_data *q_data;
+
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dprintk(ctx->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vicodec_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void vicodec_return_bufs(struct vb2_queue *q, u32 state)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vbuf == NULL)
+ return;
+ spin_lock(ctx->lock);
+ v4l2_m2m_buf_done(vbuf, state);
+ spin_unlock(ctx->lock);
+ }
+}
+
+static int vicodec_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct vicodec_q_data *q_data = get_q_data(ctx, q->type);
+ unsigned int size = q_data->width * q_data->height;
+
+ q_data->sequence = 0;
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ return 0;
+
+ ctx->ref_frame.width = ctx->ref_frame.height = 0;
+ ctx->ref_frame.luma = kvmalloc(size * 3 / 2, GFP_KERNEL);
+ ctx->comp_max_size = size * 3 / 2 + sizeof(struct cframe_hdr);
+ ctx->compressed_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL);
+ if (!ctx->ref_frame.luma || !ctx->compressed_frame) {
+ kvfree(ctx->ref_frame.luma);
+ kvfree(ctx->compressed_frame);
+ vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
+ return -ENOMEM;
+ }
+ ctx->ref_frame.cb = ctx->ref_frame.luma + size;
+ ctx->ref_frame.cr = ctx->ref_frame.cb + size / 4;
+ ctx->last_src_buf = NULL;
+ ctx->last_dst_buf = NULL;
+ v4l2_ctrl_grab(ctx->ctrl_gop_size, true);
+ ctx->gop_size = v4l2_ctrl_g_ctrl(ctx->ctrl_gop_size);
+ ctx->gop_cnt = 0;
+ ctx->cur_buf_offset = 0;
+ ctx->comp_size = 0;
+ ctx->comp_magic_cnt = 0;
+ ctx->comp_has_frame = false;
+
+ return 0;
+}
+
+static void vicodec_stop_streaming(struct vb2_queue *q)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
+
+ vicodec_return_bufs(q, VB2_BUF_STATE_ERROR);
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ return;
+
+ kvfree(ctx->ref_frame.luma);
+ kvfree(ctx->compressed_frame);
+ v4l2_ctrl_grab(ctx->ctrl_gop_size, false);
+}
+
+static const struct vb2_ops vicodec_qops = {
+ .queue_setup = vicodec_queue_setup,
+ .buf_prepare = vicodec_buf_prepare,
+ .buf_queue = vicodec_buf_queue,
+ .start_streaming = vicodec_start_streaming,
+ .stop_streaming = vicodec_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct vicodec_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = (multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &vicodec_qops;
+ src_vq->mem_ops = &vb2_vmalloc_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = ctx->is_enc ? &ctx->dev->enc_mutex :
+ &ctx->dev->dec_mutex;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = (multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &vicodec_qops;
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = src_vq->lock;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int vicodec_open(struct file *file)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct vicodec_dev *dev = video_drvdata(file);
+ struct vicodec_ctx *ctx = NULL;
+ struct v4l2_ctrl_handler *hdl;
+ unsigned int size;
+ int rc = 0;
+
+ if (mutex_lock_interruptible(vfd->lock))
+ return -ERESTARTSYS;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto open_unlock;
+ }
+
+ if (vfd == &dev->enc_vfd)
+ ctx->is_enc = true;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ ctx->ctrl_gop_size = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 1, 16, 1, 10);
+ if (hdl->error) {
+ rc = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ kfree(ctx);
+ goto open_unlock;
+ }
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ ctx->q_data[V4L2_M2M_SRC].fourcc =
+ ctx->is_enc ? V4L2_PIX_FMT_YUV420 : V4L2_PIX_FMT_FWHT;
+ ctx->q_data[V4L2_M2M_SRC].width = 1280;
+ ctx->q_data[V4L2_M2M_SRC].height = 720;
+ size = 1280 * 720 * 3 / 2;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
+ ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
+ ctx->q_data[V4L2_M2M_DST].fourcc =
+ ctx->is_enc ? V4L2_PIX_FMT_FWHT : V4L2_PIX_FMT_YUV420;
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+
+ size += sizeof(struct cframe_hdr);
+ if (ctx->is_enc) {
+ ctx->q_data[V4L2_M2M_DST].sizeimage = size;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->enc_dev, ctx,
+ &queue_init);
+ ctx->lock = &dev->enc_lock;
+ } else {
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->dec_dev, ctx,
+ &queue_init);
+ ctx->lock = &dev->dec_lock;
+ }
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ rc = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ goto open_unlock;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+open_unlock:
+ mutex_unlock(vfd->lock);
+ return rc;
+}
+
+static int vicodec_release(struct file *file)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct vicodec_ctx *ctx = file2ctx(file);
+
+ mutex_lock(vfd->lock);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(vfd->lock);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vicodec_fops = {
+ .owner = THIS_MODULE,
+ .open = vicodec_open,
+ .release = vicodec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device vicodec_videodev = {
+ .name = VICODEC_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &vicodec_fops,
+ .ioctl_ops = &vicodec_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+ .job_ready = job_ready,
+};
+
+static int vicodec_probe(struct platform_device *pdev)
+{
+ struct vicodec_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->enc_lock);
+ spin_lock_init(&dev->dec_lock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &pdev->dev;
+ strlcpy(dev->mdev.model, "vicodec", sizeof(dev->mdev.model));
+ media_device_init(&dev->mdev);
+ dev->v4l2_dev.mdev = &dev->mdev;
+#endif
+
+ mutex_init(&dev->enc_mutex);
+ mutex_init(&dev->dec_mutex);
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->enc_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->enc_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init vicodec device\n");
+ ret = PTR_ERR(dev->enc_dev);
+ goto unreg_dev;
+ }
+
+ dev->dec_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->dec_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init vicodec device\n");
+ ret = PTR_ERR(dev->dec_dev);
+ goto err_enc_m2m;
+ }
+
+ dev->enc_vfd = vicodec_videodev;
+ vfd = &dev->enc_vfd;
+ vfd->lock = &dev->enc_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ strlcpy(vfd->name, "vicodec-enc", sizeof(vfd->name));
+ v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_dec_m2m;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ dev->dec_vfd = vicodec_videodev;
+ vfd = &dev->dec_vfd;
+ vfd->lock = &dev->dec_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ strlcpy(vfd->name, "vicodec-dec", sizeof(vfd->name));
+ v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto unreg_enc;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ ret = v4l2_m2m_register_media_controller(dev->enc_dev,
+ &dev->enc_vfd, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto unreg_m2m;
+ }
+
+ ret = v4l2_m2m_register_media_controller(dev->dec_dev,
+ &dev->dec_vfd, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto unreg_m2m_enc_mc;
+ }
+
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto unreg_m2m_dec_mc;
+ }
+#endif
+ return 0;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+unreg_m2m_dec_mc:
+ v4l2_m2m_unregister_media_controller(dev->dec_dev);
+unreg_m2m_enc_mc:
+ v4l2_m2m_unregister_media_controller(dev->enc_dev);
+unreg_m2m:
+ video_unregister_device(&dev->dec_vfd);
+#endif
+unreg_enc:
+ video_unregister_device(&dev->enc_vfd);
+err_dec_m2m:
+ v4l2_m2m_release(dev->dec_dev);
+err_enc_m2m:
+ v4l2_m2m_release(dev->enc_dev);
+unreg_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int vicodec_remove(struct platform_device *pdev)
+{
+ struct vicodec_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " VICODEC_NAME);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->enc_dev);
+ v4l2_m2m_unregister_media_controller(dev->dec_dev);
+ media_device_cleanup(&dev->mdev);
+#endif
+
+ v4l2_m2m_release(dev->enc_dev);
+ v4l2_m2m_release(dev->dec_dev);
+ video_unregister_device(&dev->enc_vfd);
+ video_unregister_device(&dev->dec_vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return 0;
+}
+
+static struct platform_driver vicodec_pdrv = {
+ .probe = vicodec_probe,
+ .remove = vicodec_remove,
+ .driver = {
+ .name = VICODEC_NAME,
+ },
+};
+
+static void __exit vicodec_exit(void)
+{
+ platform_driver_unregister(&vicodec_pdrv);
+ platform_device_unregister(&vicodec_pdev);
+}
+
+static int __init vicodec_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&vicodec_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&vicodec_pdrv);
+ if (ret)
+ platform_device_unregister(&vicodec_pdev);
+
+ return ret;
+}
+
+module_init(vicodec_init);
+module_exit(vicodec_exit);
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
new file mode 100644
index 000000000..c8ffe7bff
--- /dev/null
+++ b/drivers/media/platform/video-mux.c
@@ -0,0 +1,425 @@
+/*
+ * video stream multiplexer controlled via mux control
+ *
+ * Copyright (C) 2013 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ * Copyright (C) 2016-2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+struct video_mux {
+ struct v4l2_subdev subdev;
+ struct media_pad *pads;
+ struct v4l2_mbus_framefmt *format_mbus;
+ struct mux_control *mux;
+ struct mutex lock;
+ int active;
+};
+
+static const struct v4l2_mbus_framefmt video_mux_format_mbus_default = {
+ .width = 1,
+ .height = 1,
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .field = V4L2_FIELD_NONE,
+};
+
+static inline struct video_mux *v4l2_subdev_to_video_mux(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct video_mux, subdev);
+}
+
+static int video_mux_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+ u16 source_pad = entity->num_pads - 1;
+ int ret = 0;
+
+ /*
+ * The mux state is determined by the enabled sink pad link.
+ * Enabling or disabling the source pad link has no effect.
+ */
+ if (local->flags & MEDIA_PAD_FL_SOURCE)
+ return 0;
+
+ dev_dbg(sd->dev, "link setup '%s':%d->'%s':%d[%d]",
+ remote->entity->name, remote->index, local->entity->name,
+ local->index, flags & MEDIA_LNK_FL_ENABLED);
+
+ mutex_lock(&vmux->lock);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (vmux->active == local->index)
+ goto out;
+
+ if (vmux->active >= 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ dev_dbg(sd->dev, "setting %d active\n", local->index);
+ ret = mux_control_try_select(vmux->mux, local->index);
+ if (ret < 0)
+ goto out;
+ vmux->active = local->index;
+
+ /* Propagate the active format to the source */
+ vmux->format_mbus[source_pad] = vmux->format_mbus[vmux->active];
+ } else {
+ if (vmux->active != local->index)
+ goto out;
+
+ dev_dbg(sd->dev, "going inactive\n");
+ mux_control_deselect(vmux->mux);
+ vmux->active = -1;
+ }
+
+out:
+ mutex_unlock(&vmux->lock);
+ return ret;
+}
+
+static const struct media_entity_operations video_mux_ops = {
+ .link_setup = video_mux_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int video_mux_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+ struct v4l2_subdev *upstream_sd;
+ struct media_pad *pad;
+
+ if (vmux->active == -1) {
+ dev_err(sd->dev, "Can not start streaming on inactive mux\n");
+ return -EINVAL;
+ }
+
+ pad = media_entity_remote_pad(&sd->entity.pads[vmux->active]);
+ if (!pad) {
+ dev_err(sd->dev, "Failed to find remote source pad\n");
+ return -ENOLINK;
+ }
+
+ if (!is_media_entity_v4l2_subdev(pad->entity)) {
+ dev_err(sd->dev, "Upstream entity is not a v4l2 subdev\n");
+ return -ENODEV;
+ }
+
+ upstream_sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ return v4l2_subdev_call(upstream_sd, video, s_stream, enable);
+}
+
+static const struct v4l2_subdev_video_ops video_mux_subdev_video_ops = {
+ .s_stream = video_mux_s_stream,
+};
+
+static struct v4l2_mbus_framefmt *
+__video_mux_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(sd, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &vmux->format_mbus[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int video_mux_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+
+ mutex_lock(&vmux->lock);
+
+ sdformat->format = *__video_mux_get_pad_format(sd, cfg, sdformat->pad,
+ sdformat->which);
+
+ mutex_unlock(&vmux->lock);
+
+ return 0;
+}
+
+static int video_mux_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+ struct v4l2_mbus_framefmt *mbusformat, *source_mbusformat;
+ struct media_pad *pad = &vmux->pads[sdformat->pad];
+ u16 source_pad = sd->entity.num_pads - 1;
+
+ mbusformat = __video_mux_get_pad_format(sd, cfg, sdformat->pad,
+ sdformat->which);
+ if (!mbusformat)
+ return -EINVAL;
+
+ source_mbusformat = __video_mux_get_pad_format(sd, cfg, source_pad,
+ sdformat->which);
+ if (!source_mbusformat)
+ return -EINVAL;
+
+ /* No size limitations except V4L2 compliance requirements */
+ v4l_bound_align_image(&sdformat->format.width, 1, 65536, 0,
+ &sdformat->format.height, 1, 65536, 0, 0);
+
+ /* All formats except LVDS and vendor specific formats are acceptable */
+ switch (sdformat->format.code) {
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ case MEDIA_BUS_FMT_BGR565_2X8_BE:
+ case MEDIA_BUS_FMT_BGR565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_GBR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_ARGB8888_1X32:
+ case MEDIA_BUS_FMT_RGB888_1X32_PADHI:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_UYVY8_1_5X8:
+ case MEDIA_BUS_FMT_VYUY8_1_5X8:
+ case MEDIA_BUS_FMT_YUYV8_1_5X8:
+ case MEDIA_BUS_FMT_YVYU8_1_5X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_VYUY10_2X10:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_YVYU10_2X10:
+ case MEDIA_BUS_FMT_Y12_1X12:
+ case MEDIA_BUS_FMT_UYVY12_2X12:
+ case MEDIA_BUS_FMT_VYUY12_2X12:
+ case MEDIA_BUS_FMT_YUYV12_2X12:
+ case MEDIA_BUS_FMT_YVYU12_2X12:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
+ case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VYUY10_1X20:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YVYU10_1X20:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ case MEDIA_BUS_FMT_VYUY12_1X24:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ case MEDIA_BUS_FMT_YVYU12_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_AYUV8_1X32:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ case MEDIA_BUS_FMT_JPEG_1X8:
+ case MEDIA_BUS_FMT_AHSV8888_1X32:
+ break;
+ default:
+ sdformat->format.code = MEDIA_BUS_FMT_Y8_1X8;
+ break;
+ }
+ if (sdformat->format.field == V4L2_FIELD_ANY)
+ sdformat->format.field = V4L2_FIELD_NONE;
+
+ mutex_lock(&vmux->lock);
+
+ /* Source pad mirrors active sink pad, no limitations on sink pads */
+ if ((pad->flags & MEDIA_PAD_FL_SOURCE) && vmux->active >= 0)
+ sdformat->format = vmux->format_mbus[vmux->active];
+
+ *mbusformat = sdformat->format;
+
+ /* Propagate the format from an active sink to source */
+ if ((pad->flags & MEDIA_PAD_FL_SINK) && (pad->index == vmux->active))
+ *source_mbusformat = sdformat->format;
+
+ mutex_unlock(&vmux->lock);
+
+ return 0;
+}
+
+static int video_mux_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+ struct v4l2_mbus_framefmt *mbusformat;
+ unsigned int i;
+
+ mutex_lock(&vmux->lock);
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ mbusformat = v4l2_subdev_get_try_format(sd, cfg, i);
+ *mbusformat = video_mux_format_mbus_default;
+ }
+
+ mutex_unlock(&vmux->lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops video_mux_pad_ops = {
+ .init_cfg = video_mux_init_cfg,
+ .get_fmt = video_mux_get_format,
+ .set_fmt = video_mux_set_format,
+};
+
+static const struct v4l2_subdev_ops video_mux_subdev_ops = {
+ .pad = &video_mux_pad_ops,
+ .video = &video_mux_subdev_video_ops,
+};
+
+static int video_mux_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *ep;
+ struct video_mux *vmux;
+ unsigned int num_pads = 0;
+ unsigned int i;
+ int ret;
+
+ vmux = devm_kzalloc(dev, sizeof(*vmux), GFP_KERNEL);
+ if (!vmux)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, vmux);
+
+ v4l2_subdev_init(&vmux->subdev, &video_mux_subdev_ops);
+ snprintf(vmux->subdev.name, sizeof(vmux->subdev.name), "%s", np->name);
+ vmux->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ vmux->subdev.dev = dev;
+
+ /*
+ * The largest numbered port is the output port. It determines
+ * total number of pads.
+ */
+ for_each_endpoint_of_node(np, ep) {
+ struct of_endpoint endpoint;
+
+ of_graph_parse_endpoint(ep, &endpoint);
+ num_pads = max(num_pads, endpoint.port + 1);
+ }
+
+ if (num_pads < 2) {
+ dev_err(dev, "Not enough ports %d\n", num_pads);
+ return -EINVAL;
+ }
+
+ vmux->mux = devm_mux_control_get(dev, NULL);
+ if (IS_ERR(vmux->mux)) {
+ ret = PTR_ERR(vmux->mux);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get mux: %d\n", ret);
+ return ret;
+ }
+
+ mutex_init(&vmux->lock);
+ vmux->active = -1;
+ vmux->pads = devm_kcalloc(dev, num_pads, sizeof(*vmux->pads),
+ GFP_KERNEL);
+ if (!vmux->pads)
+ return -ENOMEM;
+
+ vmux->format_mbus = devm_kcalloc(dev, num_pads,
+ sizeof(*vmux->format_mbus),
+ GFP_KERNEL);
+ if (!vmux->format_mbus)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pads; i++) {
+ vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK
+ : MEDIA_PAD_FL_SOURCE;
+ vmux->format_mbus[i] = video_mux_format_mbus_default;
+ }
+
+ vmux->subdev.entity.function = MEDIA_ENT_F_VID_MUX;
+ ret = media_entity_pads_init(&vmux->subdev.entity, num_pads,
+ vmux->pads);
+ if (ret < 0)
+ return ret;
+
+ vmux->subdev.entity.ops = &video_mux_ops;
+
+ return v4l2_async_register_subdev(&vmux->subdev);
+}
+
+static int video_mux_remove(struct platform_device *pdev)
+{
+ struct video_mux *vmux = platform_get_drvdata(pdev);
+ struct v4l2_subdev *sd = &vmux->subdev;
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct of_device_id video_mux_dt_ids[] = {
+ { .compatible = "video-mux", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, video_mux_dt_ids);
+
+static struct platform_driver video_mux_driver = {
+ .probe = video_mux_probe,
+ .remove = video_mux_remove,
+ .driver = {
+ .of_match_table = video_mux_dt_ids,
+ .name = "video-mux",
+ },
+};
+
+module_platform_driver(video_mux_driver);
+
+MODULE_DESCRIPTION("video stream multiplexer");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_AUTHOR("Philipp Zabel, Pengutronix");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
new file mode 100644
index 000000000..7b8cf661f
--- /dev/null
+++ b/drivers/media/platform/vim2m.c
@@ -0,0 +1,1121 @@
+/*
+ * A virtual v4l2-mem2mem example device.
+ *
+ * This is a virtual device driver for testing mem-to-mem videobuf framework.
+ * It simulates a device that uses memory buffers for both source and
+ * destination, processes the data and issues an "irq" (simulated by a delayed
+ * workqueue).
+ * The device is capable of multi-instance, multi-buffer-per-transaction
+ * operation (via the mem2mem framework).
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+
+MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
+MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.1");
+MODULE_ALIAS("mem2mem_testdev");
+
+static unsigned debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activates debug info");
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 640
+#define MAX_H 480
+#define DIM_ALIGN_MASK 7 /* 8-byte alignment for line length */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+#define MEM2MEM_NAME "vim2m"
+
+/* Per queue */
+#define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024)
+
+/* Default transaction time in msec */
+#define MEM2MEM_DEF_TRANSTIME 40
+#define MEM2MEM_COLOR_STEP (0xff >> 4)
+#define MEM2MEM_NUM_TILES 8
+
+/* Flags that indicate processing mode */
+#define MEM2MEM_HFLIP (1 << 0)
+#define MEM2MEM_VFLIP (1 << 1)
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+
+static void vim2m_dev_release(struct device *dev)
+{}
+
+static struct platform_device vim2m_pdev = {
+ .name = MEM2MEM_NAME,
+ .dev.release = vim2m_dev_release,
+};
+
+struct vim2m_fmt {
+ u32 fourcc;
+ int depth;
+ /* Types the format can be used for */
+ u32 types;
+};
+
+static struct vim2m_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .depth = 16,
+ /* Both capture and output format */
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ /* Output-only format */
+ .types = MEM2MEM_OUTPUT,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Per-queue, driver-specific private data */
+struct vim2m_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ unsigned int sequence;
+ struct vim2m_fmt *fmt;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+#define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000)
+#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001)
+
+static struct vim2m_fmt *find_format(struct v4l2_format *f)
+{
+ struct vim2m_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &formats[k];
+}
+
+struct vim2m_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+#endif
+
+ atomic_t num_inst;
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+
+ struct delayed_work work_run;
+
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct vim2m_ctx {
+ struct v4l2_fh fh;
+ struct vim2m_dev *dev;
+
+ struct v4l2_ctrl_handler hdl;
+
+ /* Processed buffers in this transaction */
+ u8 num_processed;
+
+ /* Transaction length (i.e. how many buffers per transaction) */
+ u32 translen;
+ /* Transaction time (i.e. simulated processing time) in milliseconds */
+ u32 transtime;
+
+ /* Abort requested by m2m */
+ int aborting;
+
+ /* Processing mode */
+ int mode;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
+
+ /* Source and destination queue data */
+ struct vim2m_q_data q_data[2];
+};
+
+static inline struct vim2m_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct vim2m_ctx, fh);
+}
+
+static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+
+static int device_process(struct vim2m_ctx *ctx,
+ struct vb2_v4l2_buffer *in_vb,
+ struct vb2_v4l2_buffer *out_vb)
+{
+ struct vim2m_dev *dev = ctx->dev;
+ struct vim2m_q_data *q_data;
+ u8 *p_in, *p_out;
+ int x, y, t, w;
+ int tile_w, bytes_left;
+ int width, height, bytesperline;
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ width = q_data->width;
+ height = q_data->height;
+ bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+
+ p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
+ p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&dev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return -EFAULT;
+ }
+
+ if (vb2_plane_size(&in_vb->vb2_buf, 0) >
+ vb2_plane_size(&out_vb->vb2_buf, 0)) {
+ v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
+ return -EINVAL;
+ }
+
+ tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
+ / MEM2MEM_NUM_TILES;
+ bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
+ w = 0;
+
+ out_vb->sequence =
+ get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
+ in_vb->sequence = q_data->sequence++;
+ out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp;
+
+ if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ out_vb->timecode = in_vb->timecode;
+ out_vb->field = in_vb->field;
+ out_vb->flags = in_vb->flags &
+ (V4L2_BUF_FLAG_TIMECODE |
+ V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+
+ switch (ctx->mode) {
+ case MEM2MEM_HFLIP | MEM2MEM_VFLIP:
+ p_out += bytesperline * height - bytes_left;
+ for (y = 0; y < height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out -= bytes_left;
+ }
+ break;
+
+ case MEM2MEM_HFLIP:
+ for (y = 0; y < height; ++y) {
+ p_out += MEM2MEM_NUM_TILES * tile_w;
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x01) {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytesperline;
+ }
+ break;
+
+ case MEM2MEM_VFLIP:
+ p_out += bytesperline * (height - 1);
+ for (y = 0; y < height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytes_left - 2 * bytesperline;
+ }
+ break;
+
+ default:
+ for (y = 0; y < height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytes_left;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/*
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+ struct vim2m_ctx *ctx = priv;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen
+ || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) {
+ dprintk(ctx->dev, "Not enough buffers available\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct vim2m_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/* device_run() - prepares and starts the device
+ *
+ * This simulates all the immediate preparations required before starting
+ * a device. This will be called by the framework when it decides to schedule
+ * a particular instance.
+ */
+static void device_run(void *priv)
+{
+ struct vim2m_ctx *ctx = priv;
+ struct vim2m_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ device_process(ctx, src_buf, dst_buf);
+
+ /* Run delayed work, which simulates a hardware irq */
+ schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime));
+}
+
+static void device_work(struct work_struct *w)
+{
+ struct vim2m_dev *vim2m_dev =
+ container_of(w, struct vim2m_dev, work_run.work);
+ struct vim2m_ctx *curr_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ unsigned long flags;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev);
+
+ if (NULL == curr_ctx) {
+ pr_err("Instance released before the end of transaction\n");
+ return;
+ }
+
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ curr_ctx->num_processed++;
+
+ spin_lock_irqsave(&vim2m_dev->irqlock, flags);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&vim2m_dev->irqlock, flags);
+
+ if (curr_ctx->num_processed == curr_ctx->translen
+ || curr_ctx->aborting) {
+ dprintk(curr_ctx->dev, "Finishing transaction\n");
+ curr_ctx->num_processed = 0;
+ v4l2_m2m_job_finish(vim2m_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
+ } else {
+ device_run(curr_ctx);
+ }
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", MEM2MEM_NAME);
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num;
+ struct vim2m_fmt *fmt;
+
+ num = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ if (i < NUM_FORMATS) {
+ /* Format found */
+ fmt = &formats[i];
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct vim2m_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = q_data->fmt->fourcc;
+ f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt)
+{
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ if (f->fmt.pix.height < MIN_H)
+ f->fmt.pix.height = MIN_H;
+ else if (f->fmt.pix.height > MAX_H)
+ f->fmt.pix.height = MAX_H;
+
+ if (f->fmt.pix.width < MIN_W)
+ f->fmt.pix.width = MIN_W;
+ else if (f->fmt.pix.width > MAX_W)
+ f->fmt.pix.width = MAX_W;
+
+ f->fmt.pix.width &= ~DIM_ALIGN_MASK;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vim2m_fmt *fmt;
+ struct vim2m_ctx *ctx = file2ctx(file);
+
+ fmt = find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ fmt = find_format(f);
+ }
+ if (!(fmt->types & MEM2MEM_CAPTURE)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vim2m_fmt *fmt;
+ struct vim2m_ctx *ctx = file2ctx(file);
+
+ fmt = find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ fmt = find_format(f);
+ }
+ if (!(fmt->types & MEM2MEM_OUTPUT)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+ if (!f->fmt.pix.colorspace)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
+{
+ struct vim2m_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = find_format(f);
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->sizeimage = q_data->width * q_data->height
+ * q_data->fmt->depth >> 3;
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(file2ctx(file), f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vim2m_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = vidioc_s_fmt(file2ctx(file), f);
+ if (!ret) {
+ ctx->colorspace = f->fmt.pix.colorspace;
+ ctx->xfer_func = f->fmt.pix.xfer_func;
+ ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->quant = f->fmt.pix.quantization;
+ }
+ return ret;
+}
+
+static int vim2m_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vim2m_ctx *ctx =
+ container_of(ctrl->handler, struct vim2m_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ ctx->mode |= MEM2MEM_HFLIP;
+ else
+ ctx->mode &= ~MEM2MEM_HFLIP;
+ break;
+
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ ctx->mode |= MEM2MEM_VFLIP;
+ else
+ ctx->mode &= ~MEM2MEM_VFLIP;
+ break;
+
+ case V4L2_CID_TRANS_TIME_MSEC:
+ ctx->transtime = ctrl->val;
+ break;
+
+ case V4L2_CID_TRANS_NUM_BUFS:
+ ctx->translen = ctrl->val;
+ break;
+
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vim2m_ctrl_ops = {
+ .s_ctrl = vim2m_s_ctrl,
+};
+
+
+static const struct v4l2_ioctl_ops vim2m_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+
+/*
+ * Queue operations
+ */
+
+static int vim2m_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vim2m_q_data *q_data;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
+
+ while (size * count > MEM2MEM_VID_MEM_LIMIT)
+ (count)--;
+ *nbuffers = count;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+ return 0;
+}
+
+static int vim2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vim2m_q_data *q_data;
+
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dprintk(ctx->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+ return 0;
+}
+
+static void vim2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
+{
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct vim2m_q_data *q_data = get_q_data(ctx, q->type);
+
+ q_data->sequence = 0;
+ return 0;
+}
+
+static void vim2m_stop_streaming(struct vb2_queue *q)
+{
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct vim2m_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned long flags;
+
+ if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
+ cancel_delayed_work_sync(&dev->work_run);
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vbuf == NULL)
+ return;
+ spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+ }
+}
+
+static const struct vb2_ops vim2m_qops = {
+ .queue_setup = vim2m_queue_setup,
+ .buf_prepare = vim2m_buf_prepare,
+ .buf_queue = vim2m_buf_queue,
+ .start_streaming = vim2m_start_streaming,
+ .stop_streaming = vim2m_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct vim2m_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &vim2m_qops;
+ src_vq->mem_ops = &vb2_vmalloc_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &vim2m_qops;
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static const struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = {
+ .ops = &vim2m_ctrl_ops,
+ .id = V4L2_CID_TRANS_TIME_MSEC,
+ .name = "Transaction Time (msec)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = MEM2MEM_DEF_TRANSTIME,
+ .min = 1,
+ .max = 10001,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vim2m_ctrl_trans_num_bufs = {
+ .ops = &vim2m_ctrl_ops,
+ .id = V4L2_CID_TRANS_NUM_BUFS,
+ .name = "Buffers Per Transaction",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = 1,
+ .min = 1,
+ .max = MEM2MEM_DEF_NUM_BUFS,
+ .step = 1,
+};
+
+/*
+ * File operations
+ */
+static int vim2m_open(struct file *file)
+{
+ struct vim2m_dev *dev = video_drvdata(file);
+ struct vim2m_ctx *ctx = NULL;
+ struct v4l2_ctrl_handler *hdl;
+ int rc = 0;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto open_unlock;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_time_msec, NULL);
+ v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_num_bufs, NULL);
+ if (hdl->error) {
+ rc = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ kfree(ctx);
+ goto open_unlock;
+ }
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+ ctx->q_data[V4L2_M2M_SRC].width = 640;
+ ctx->q_data[V4L2_M2M_SRC].height = 480;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage =
+ ctx->q_data[V4L2_M2M_SRC].width *
+ ctx->q_data[V4L2_M2M_SRC].height *
+ (ctx->q_data[V4L2_M2M_SRC].fmt->depth >> 3);
+ ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ rc = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ goto open_unlock;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+ atomic_inc(&dev->num_inst);
+
+ dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
+
+open_unlock:
+ mutex_unlock(&dev->dev_mutex);
+ return rc;
+}
+
+static int vim2m_release(struct file *file)
+{
+ struct vim2m_dev *dev = video_drvdata(file);
+ struct vim2m_ctx *ctx = file2ctx(file);
+
+ dprintk(dev, "Releasing instance %p\n", ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ mutex_lock(&dev->dev_mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&dev->dev_mutex);
+ kfree(ctx);
+
+ atomic_dec(&dev->num_inst);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vim2m_fops = {
+ .owner = THIS_MODULE,
+ .open = vim2m_open,
+ .release = vim2m_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device vim2m_videodev = {
+ .name = MEM2MEM_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &vim2m_fops,
+ .ioctl_ops = &vim2m_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+};
+
+static int vim2m_probe(struct platform_device *pdev)
+{
+ struct vim2m_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->irqlock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ atomic_set(&dev->num_inst, 0);
+ mutex_init(&dev->dev_mutex);
+
+ dev->vfd = vim2m_videodev;
+ vfd = &dev->vfd;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ INIT_DELAYED_WORK(&dev->work_run, device_work);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto unreg_v4l2;
+ }
+
+ video_set_drvdata(vfd, dev);
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto unreg_dev;
+ }
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &pdev->dev;
+ strlcpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
+ media_device_init(&dev->mdev);
+ dev->v4l2_dev.mdev = &dev->mdev;
+
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev,
+ vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto unreg_m2m;
+ }
+
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto unreg_m2m_mc;
+ }
+#endif
+ return 0;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+unreg_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+unreg_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+#endif
+unreg_dev:
+ video_unregister_device(&dev->vfd);
+unreg_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int vim2m_remove(struct platform_device *pdev)
+{
+ struct vim2m_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_NAME);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+ media_device_cleanup(&dev->mdev);
+#endif
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return 0;
+}
+
+static struct platform_driver vim2m_pdrv = {
+ .probe = vim2m_probe,
+ .remove = vim2m_remove,
+ .driver = {
+ .name = MEM2MEM_NAME,
+ },
+};
+
+static void __exit vim2m_exit(void)
+{
+ platform_driver_unregister(&vim2m_pdrv);
+ platform_device_unregister(&vim2m_pdev);
+}
+
+static int __init vim2m_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&vim2m_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&vim2m_pdrv);
+ if (ret)
+ platform_device_unregister(&vim2m_pdev);
+
+ return ret;
+}
+
+module_init(vim2m_init);
+module_exit(vim2m_exit);
diff --git a/drivers/media/platform/vimc/Kconfig b/drivers/media/platform/vimc/Kconfig
new file mode 100644
index 000000000..71c9fe7d3
--- /dev/null
+++ b/drivers/media/platform/vimc/Kconfig
@@ -0,0 +1,15 @@
+config VIDEO_VIMC
+ tristate "Virtual Media Controller Driver (VIMC)"
+ depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_VMALLOC
+ select VIDEO_V4L2_TPG
+ default n
+ ---help---
+ Skeleton driver for Virtual Media Controller
+
+ This driver can be compared to the vivid driver for emulating
+ a media node that exposes a complex media topology. The topology
+ is hard coded for now but is meant to be highly configurable in
+ the future.
+
+ When in doubt, say N.
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
new file mode 100644
index 000000000..c4fc8e7d3
--- /dev/null
+++ b/drivers/media/platform/vimc/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+vimc-objs := vimc-core.o
+vimc_capture-objs := vimc-capture.o
+vimc_common-objs := vimc-common.o
+vimc_debayer-objs := vimc-debayer.o
+vimc_scaler-objs := vimc-scaler.o
+vimc_sensor-objs := vimc-sensor.o
+vimc_streamer-objs := vimc-streamer.o
+
+obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
+ vimc_scaler.o vimc_sensor.o vimc_streamer.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
new file mode 100644
index 000000000..8e014cc48
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -0,0 +1,546 @@
+/*
+ * vimc-capture.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vimc-common.h"
+#include "vimc-streamer.h"
+
+#define VIMC_CAP_DRV_NAME "vimc-capture"
+
+struct vimc_cap_device {
+ struct vimc_ent_device ved;
+ struct video_device vdev;
+ struct device *dev;
+ struct v4l2_pix_format format;
+ struct vb2_queue queue;
+ struct list_head buf_list;
+ /*
+ * NOTE: in a real driver, a spin lock must be used to access the
+ * queue because the frames are generated from a hardware interruption
+ * and the isr is not allowed to sleep.
+ * Even if it is not necessary a spinlock in the vimc driver, we
+ * use it here as a code reference
+ */
+ spinlock_t qlock;
+ struct mutex lock;
+ u32 sequence;
+ struct vimc_stream stream;
+};
+
+static const struct v4l2_pix_format fmt_default = {
+ .width = 640,
+ .height = 480,
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+};
+
+struct vimc_cap_buffer {
+ /*
+ * struct vb2_v4l2_buffer must be the first element
+ * the videobuf2 framework will allocate this struct based on
+ * buf_struct_size and use the first sizeof(struct vb2_buffer) bytes of
+ * memory as a vb2_buffer
+ */
+ struct vb2_v4l2_buffer vb2;
+ struct list_head list;
+};
+
+static int vimc_cap_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vimc_cap_device *vcap = video_drvdata(file);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", vcap->vdev.v4l2_dev->name);
+
+ return 0;
+}
+
+static void vimc_cap_get_format(struct vimc_ent_device *ved,
+ struct v4l2_pix_format *fmt)
+{
+ struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ ved);
+
+ *fmt = vcap->format;
+}
+
+static int vimc_cap_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vimc_cap_device *vcap = video_drvdata(file);
+
+ f->fmt.pix = vcap->format;
+
+ return 0;
+}
+
+static int vimc_cap_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *format = &f->fmt.pix;
+ const struct vimc_pix_map *vpix;
+
+ format->width = clamp_t(u32, format->width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ format->height = clamp_t(u32, format->height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
+ /* Don't accept a pixelformat that is not on the table */
+ vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
+ if (!vpix) {
+ format->pixelformat = fmt_default.pixelformat;
+ vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
+ }
+ /* TODO: Add support for custom bytesperline values */
+ format->bytesperline = format->width * vpix->bpp;
+ format->sizeimage = format->bytesperline * format->height;
+
+ if (format->field == V4L2_FIELD_ANY)
+ format->field = fmt_default.field;
+
+ vimc_colorimetry_clamp(format);
+
+ return 0;
+}
+
+static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vimc_cap_device *vcap = video_drvdata(file);
+ int ret;
+
+ /* Do not change the format while stream is on */
+ if (vb2_is_busy(&vcap->queue))
+ return -EBUSY;
+
+ ret = vimc_cap_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ dev_dbg(vcap->dev, "%s: format update: "
+ "old:%dx%d (0x%x, %d, %d, %d, %d) "
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vcap->vdev.name,
+ /* old */
+ vcap->format.width, vcap->format.height,
+ vcap->format.pixelformat, vcap->format.colorspace,
+ vcap->format.quantization, vcap->format.xfer_func,
+ vcap->format.ycbcr_enc,
+ /* new */
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.pixelformat, f->fmt.pix.colorspace,
+ f->fmt.pix.quantization, f->fmt.pix.xfer_func,
+ f->fmt.pix.ycbcr_enc);
+
+ vcap->format = f->fmt.pix;
+
+ return 0;
+}
+
+static int vimc_cap_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct vimc_pix_map *vpix = vimc_pix_map_by_index(f->index);
+
+ if (!vpix)
+ return -EINVAL;
+
+ f->pixelformat = vpix->pixelformat;
+
+ return 0;
+}
+
+static int vimc_cap_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct vimc_pix_map *vpix;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ /* Only accept code in the pix map table */
+ vpix = vimc_pix_map_by_code(fsize->pixel_format);
+ if (!vpix)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = VIMC_FRAME_MIN_WIDTH;
+ fsize->stepwise.max_width = VIMC_FRAME_MAX_WIDTH;
+ fsize->stepwise.min_height = VIMC_FRAME_MIN_HEIGHT;
+ fsize->stepwise.max_height = VIMC_FRAME_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 2;
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vimc_cap_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops vimc_cap_ioctl_ops = {
+ .vidioc_querycap = vimc_cap_querycap,
+
+ .vidioc_g_fmt_vid_cap = vimc_cap_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vimc_cap_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vimc_cap_try_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = vimc_cap_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = vimc_cap_enum_framesizes,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static void vimc_cap_return_all_buffers(struct vimc_cap_device *vcap,
+ enum vb2_buffer_state state)
+{
+ struct vimc_cap_buffer *vbuf, *node;
+
+ spin_lock(&vcap->qlock);
+
+ list_for_each_entry_safe(vbuf, node, &vcap->buf_list, list) {
+ list_del(&vbuf->list);
+ vb2_buffer_done(&vbuf->vb2.vb2_buf, state);
+ }
+
+ spin_unlock(&vcap->qlock);
+}
+
+static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+ struct media_entity *entity = &vcap->vdev.entity;
+ int ret;
+
+ vcap->sequence = 0;
+
+ /* Start the media pipeline */
+ ret = media_pipeline_start(entity, &vcap->stream.pipe);
+ if (ret) {
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+ ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
+ if (ret) {
+ media_pipeline_stop(entity);
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Stop the stream engine. Any remaining buffers in the stream queue are
+ * dequeued and passed on to the vb2 framework marked as STATE_ERROR.
+ */
+static void vimc_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+
+ vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
+
+ /* Stop the media pipeline */
+ media_pipeline_stop(&vcap->vdev.entity);
+
+ /* Release all active buffers */
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_ERROR);
+}
+
+static void vimc_cap_buf_queue(struct vb2_buffer *vb2_buf)
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vb2_buf->vb2_queue);
+ struct vimc_cap_buffer *buf = container_of(vb2_buf,
+ struct vimc_cap_buffer,
+ vb2.vb2_buf);
+
+ spin_lock(&vcap->qlock);
+ list_add_tail(&buf->list, &vcap->buf_list);
+ spin_unlock(&vcap->qlock);
+}
+
+static int vimc_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+
+ if (*nplanes)
+ return sizes[0] < vcap->format.sizeimage ? -EINVAL : 0;
+ /* We don't support multiplanes for now */
+ *nplanes = 1;
+ sizes[0] = vcap->format.sizeimage;
+
+ return 0;
+}
+
+static int vimc_cap_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = vcap->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(vcap->dev, "%s: buffer too small (%lu < %lu)\n",
+ vcap->vdev.name, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct vb2_ops vimc_cap_qops = {
+ .start_streaming = vimc_cap_start_streaming,
+ .stop_streaming = vimc_cap_stop_streaming,
+ .buf_queue = vimc_cap_buf_queue,
+ .queue_setup = vimc_cap_queue_setup,
+ .buf_prepare = vimc_cap_buffer_prepare,
+ /*
+ * Since q->lock is set we can use the standard
+ * vb2_ops_wait_prepare/finish helper functions.
+ */
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static const struct media_entity_operations vimc_cap_mops = {
+ .link_validate = vimc_link_validate,
+};
+
+static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct vimc_ent_device *ved = dev_get_drvdata(comp);
+ struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ ved);
+
+ vb2_queue_release(&vcap->queue);
+ media_entity_cleanup(ved->ent);
+ video_unregister_device(&vcap->vdev);
+ vimc_pads_cleanup(vcap->ved.pads);
+ kfree(vcap);
+}
+
+static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
+ const void *frame)
+{
+ struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ ved);
+ struct vimc_cap_buffer *vimc_buf;
+ void *vbuf;
+
+ spin_lock(&vcap->qlock);
+
+ /* Get the first entry of the list */
+ vimc_buf = list_first_entry_or_null(&vcap->buf_list,
+ typeof(*vimc_buf), list);
+ if (!vimc_buf) {
+ spin_unlock(&vcap->qlock);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ /* Remove this entry from the list */
+ list_del(&vimc_buf->list);
+
+ spin_unlock(&vcap->qlock);
+
+ /* Fill the buffer */
+ vimc_buf->vb2.vb2_buf.timestamp = ktime_get_ns();
+ vimc_buf->vb2.sequence = vcap->sequence++;
+ vimc_buf->vb2.field = vcap->format.field;
+
+ vbuf = vb2_plane_vaddr(&vimc_buf->vb2.vb2_buf, 0);
+
+ memcpy(vbuf, frame, vcap->format.sizeimage);
+
+ /* Set it as ready */
+ vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
+ vcap->format.sizeimage);
+ vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
+ return NULL;
+}
+
+static int vimc_cap_comp_bind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct v4l2_device *v4l2_dev = master_data;
+ struct vimc_platform_data *pdata = comp->platform_data;
+ const struct vimc_pix_map *vpix;
+ struct vimc_cap_device *vcap;
+ struct video_device *vdev;
+ struct vb2_queue *q;
+ int ret;
+
+ /* Allocate the vimc_cap_device struct */
+ vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
+ if (!vcap)
+ return -ENOMEM;
+
+ /* Allocate the pads */
+ vcap->ved.pads =
+ vimc_pads_init(1, (const unsigned long[1]) {MEDIA_PAD_FL_SINK});
+ if (IS_ERR(vcap->ved.pads)) {
+ ret = PTR_ERR(vcap->ved.pads);
+ goto err_free_vcap;
+ }
+
+ /* Initialize the media entity */
+ vcap->vdev.entity.name = pdata->entity_name;
+ vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
+ ret = media_entity_pads_init(&vcap->vdev.entity,
+ 1, vcap->ved.pads);
+ if (ret)
+ goto err_clean_pads;
+
+ /* Initialize the lock */
+ mutex_init(&vcap->lock);
+
+ /* Initialize the vb2 queue */
+ q = &vcap->queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = vcap;
+ q->buf_struct_size = sizeof(struct vimc_cap_buffer);
+ q->ops = &vimc_cap_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &vcap->lock;
+
+ ret = vb2_queue_init(q);
+ if (ret) {
+ dev_err(comp, "%s: vb2 queue init failed (err=%d)\n",
+ pdata->entity_name, ret);
+ goto err_clean_m_ent;
+ }
+
+ /* Initialize buffer list and its lock */
+ INIT_LIST_HEAD(&vcap->buf_list);
+ spin_lock_init(&vcap->qlock);
+
+ /* Set default frame format */
+ vcap->format = fmt_default;
+ vpix = vimc_pix_map_by_pixelformat(vcap->format.pixelformat);
+ vcap->format.bytesperline = vcap->format.width * vpix->bpp;
+ vcap->format.sizeimage = vcap->format.bytesperline *
+ vcap->format.height;
+
+ /* Fill the vimc_ent_device struct */
+ vcap->ved.ent = &vcap->vdev.entity;
+ vcap->ved.process_frame = vimc_cap_process_frame;
+ vcap->ved.vdev_get_format = vimc_cap_get_format;
+ dev_set_drvdata(comp, &vcap->ved);
+ vcap->dev = comp;
+
+ /* Initialize the video_device struct */
+ vdev = &vcap->vdev;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->entity.ops = &vimc_cap_mops;
+ vdev->release = video_device_release_empty;
+ vdev->fops = &vimc_cap_fops;
+ vdev->ioctl_ops = &vimc_cap_ioctl_ops;
+ vdev->lock = &vcap->lock;
+ vdev->queue = q;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ strlcpy(vdev->name, pdata->entity_name, sizeof(vdev->name));
+ video_set_drvdata(vdev, &vcap->ved);
+
+ /* Register the video_device with the v4l2 and the media framework */
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(comp, "%s: video register failed (err=%d)\n",
+ vcap->vdev.name, ret);
+ goto err_release_queue;
+ }
+
+ return 0;
+
+err_release_queue:
+ vb2_queue_release(q);
+err_clean_m_ent:
+ media_entity_cleanup(&vcap->vdev.entity);
+err_clean_pads:
+ vimc_pads_cleanup(vcap->ved.pads);
+err_free_vcap:
+ kfree(vcap);
+
+ return ret;
+}
+
+static const struct component_ops vimc_cap_comp_ops = {
+ .bind = vimc_cap_comp_bind,
+ .unbind = vimc_cap_comp_unbind,
+};
+
+static int vimc_cap_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vimc_cap_comp_ops);
+}
+
+static int vimc_cap_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vimc_cap_comp_ops);
+
+ return 0;
+}
+
+static const struct platform_device_id vimc_cap_driver_ids[] = {
+ {
+ .name = VIMC_CAP_DRV_NAME,
+ },
+ { }
+};
+
+static struct platform_driver vimc_cap_pdrv = {
+ .probe = vimc_cap_probe,
+ .remove = vimc_cap_remove,
+ .id_table = vimc_cap_driver_ids,
+ .driver = {
+ .name = VIMC_CAP_DRV_NAME,
+ },
+};
+
+module_platform_driver(vimc_cap_pdrv);
+
+MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Capture");
+MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
new file mode 100644
index 000000000..fa8435ac2
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -0,0 +1,442 @@
+/*
+ * vimc-common.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "vimc-common.h"
+
+/*
+ * NOTE: non-bayer formats need to come first (necessary for enum_mbus_code
+ * in the scaler)
+ */
+static const struct vimc_pix_map vimc_pix_map_list[] = {
+ /* TODO: add all missing formats */
+
+ /* RGB formats */
+ {
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .pixelformat = V4L2_PIX_FMT_BGR24,
+ .bpp = 3,
+ .bayer = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .bpp = 3,
+ .bayer = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .pixelformat = V4L2_PIX_FMT_ARGB32,
+ .bpp = 4,
+ .bayer = false,
+ },
+
+ /* Bayer formats */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
+ .bpp = 2,
+ .bayer = true,
+ },
+
+ /* 10bit raw bayer a-law compressed to 8 bits */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+
+ /* 10bit raw bayer DPCM compressed to 8 bits */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SBGGR12,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SGBRG12,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SGRBG12,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SRGGB12,
+ .bpp = 2,
+ .bayer = true,
+ },
+};
+
+const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
+{
+ if (i >= ARRAY_SIZE(vimc_pix_map_list))
+ return NULL;
+
+ return &vimc_pix_map_list[i];
+}
+EXPORT_SYMBOL_GPL(vimc_pix_map_by_index);
+
+const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
+ if (vimc_pix_map_list[i].code == code)
+ return &vimc_pix_map_list[i];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vimc_pix_map_by_code);
+
+const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
+ if (vimc_pix_map_list[i].pixelformat == pixelformat)
+ return &vimc_pix_map_list[i];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
+
+/* Helper function to allocate and initialize pads */
+struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
+{
+ struct media_pad *pads;
+ unsigned int i;
+
+ /* Allocate memory for the pads */
+ pads = kcalloc(num_pads, sizeof(*pads), GFP_KERNEL);
+ if (!pads)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the pads */
+ for (i = 0; i < num_pads; i++) {
+ pads[i].index = i;
+ pads[i].flags = pads_flag[i];
+ }
+
+ return pads;
+}
+EXPORT_SYMBOL_GPL(vimc_pads_init);
+
+int vimc_pipeline_s_stream(struct media_entity *ent, int enable)
+{
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ent->num_pads; i++) {
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ continue;
+
+ /* Start the stream in the subdevice direct connected */
+ pad = media_entity_remote_pad(&ent->pads[i]);
+ if (!pad)
+ continue;
+
+ if (!is_media_entity_v4l2_subdev(pad->entity))
+ return -EINVAL;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ ret = v4l2_subdev_call(sd, video, s_stream, enable);
+ if (ret && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vimc_pipeline_s_stream);
+
+static int vimc_get_mbus_format(struct media_pad *pad,
+ struct v4l2_subdev_format *fmt)
+{
+ if (is_media_entity_v4l2_subdev(pad->entity)) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(pad->entity);
+ int ret;
+
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ if (ret)
+ return ret;
+
+ } else if (is_media_entity_v4l2_video_device(pad->entity)) {
+ struct video_device *vdev = container_of(pad->entity,
+ struct video_device,
+ entity);
+ struct vimc_ent_device *ved = video_get_drvdata(vdev);
+ const struct vimc_pix_map *vpix;
+ struct v4l2_pix_format vdev_fmt;
+
+ if (!ved->vdev_get_format)
+ return -ENOIOCTLCMD;
+
+ ved->vdev_get_format(ved, &vdev_fmt);
+ vpix = vimc_pix_map_by_pixelformat(vdev_fmt.pixelformat);
+ v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, vpix->code);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vimc_link_validate(struct media_link *link)
+{
+ struct v4l2_subdev_format source_fmt, sink_fmt;
+ int ret;
+
+ ret = vimc_get_mbus_format(link->source, &source_fmt);
+ if (ret)
+ return ret;
+
+ ret = vimc_get_mbus_format(link->sink, &sink_fmt);
+ if (ret)
+ return ret;
+
+ pr_info("vimc link validate: "
+ "%s:src:%dx%d (0x%x, %d, %d, %d, %d) "
+ "%s:snk:%dx%d (0x%x, %d, %d, %d, %d)\n",
+ /* src */
+ link->source->entity->name,
+ source_fmt.format.width, source_fmt.format.height,
+ source_fmt.format.code, source_fmt.format.colorspace,
+ source_fmt.format.quantization, source_fmt.format.xfer_func,
+ source_fmt.format.ycbcr_enc,
+ /* sink */
+ link->sink->entity->name,
+ sink_fmt.format.width, sink_fmt.format.height,
+ sink_fmt.format.code, sink_fmt.format.colorspace,
+ sink_fmt.format.quantization, sink_fmt.format.xfer_func,
+ sink_fmt.format.ycbcr_enc);
+
+ /* The width, height and code must match. */
+ if (source_fmt.format.width != sink_fmt.format.width
+ || source_fmt.format.height != sink_fmt.format.height
+ || source_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+
+ /*
+ * The field order must match, or the sink field order must be NONE
+ * to support interlaced hardware connected to bridges that support
+ * progressive formats only.
+ */
+ if (source_fmt.format.field != sink_fmt.format.field &&
+ sink_fmt.format.field != V4L2_FIELD_NONE)
+ return -EPIPE;
+
+ /*
+ * If colorspace is DEFAULT, then assume all the colorimetry is also
+ * DEFAULT, return 0 to skip comparing the other colorimetry parameters
+ */
+ if (source_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT
+ || sink_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ return 0;
+
+ /* Colorspace must match. */
+ if (source_fmt.format.colorspace != sink_fmt.format.colorspace)
+ return -EPIPE;
+
+ /* Colorimetry must match if they are not set to DEFAULT */
+ if (source_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
+ && sink_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
+ && source_fmt.format.ycbcr_enc != sink_fmt.format.ycbcr_enc)
+ return -EPIPE;
+
+ if (source_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
+ && sink_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
+ && source_fmt.format.quantization != sink_fmt.format.quantization)
+ return -EPIPE;
+
+ if (source_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
+ && sink_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
+ && source_fmt.format.xfer_func != sink_fmt.format.xfer_func)
+ return -EPIPE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vimc_link_validate);
+
+static const struct media_entity_operations vimc_ent_sd_mops = {
+ .link_validate = vimc_link_validate,
+};
+
+int vimc_ent_sd_register(struct vimc_ent_device *ved,
+ struct v4l2_subdev *sd,
+ struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u32 function,
+ u16 num_pads,
+ const unsigned long *pads_flag,
+ const struct v4l2_subdev_ops *sd_ops)
+{
+ int ret;
+
+ /* Allocate the pads */
+ ved->pads = vimc_pads_init(num_pads, pads_flag);
+ if (IS_ERR(ved->pads))
+ return PTR_ERR(ved->pads);
+
+ /* Fill the vimc_ent_device struct */
+ ved->ent = &sd->entity;
+
+ /* Initialize the subdev */
+ v4l2_subdev_init(sd, sd_ops);
+ sd->entity.function = function;
+ sd->entity.ops = &vimc_ent_sd_mops;
+ sd->owner = THIS_MODULE;
+ strlcpy(sd->name, name, sizeof(sd->name));
+ v4l2_set_subdevdata(sd, ved);
+
+ /* Expose this subdev to user space */
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ if (sd->ctrl_handler)
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* Initialize the media entity */
+ ret = media_entity_pads_init(&sd->entity, num_pads, ved->pads);
+ if (ret)
+ goto err_clean_pads;
+
+ /* Register the subdev with the v4l2 and the media framework */
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret) {
+ dev_err(v4l2_dev->dev,
+ "%s: subdev register failed (err=%d)\n",
+ name, ret);
+ goto err_clean_m_ent;
+ }
+
+ return 0;
+
+err_clean_m_ent:
+ media_entity_cleanup(&sd->entity);
+err_clean_pads:
+ vimc_pads_cleanup(ved->pads);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vimc_ent_sd_register);
+
+void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd)
+{
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(ved->ent);
+ vimc_pads_cleanup(ved->pads);
+}
+EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Common");
+MODULE_AUTHOR("Helen Koike <helen.fornazier@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
new file mode 100644
index 000000000..6ed969d9e
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -0,0 +1,223 @@
+/*
+ * vimc-common.h Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIMC_COMMON_H_
+#define _VIMC_COMMON_H_
+
+#include <linux/slab.h>
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+
+/* VIMC-specific controls */
+#define VIMC_CID_VIMC_BASE (0x00f00000 | 0xf000)
+#define VIMC_CID_VIMC_CLASS (0x00f00000 | 1)
+#define VIMC_CID_TEST_PATTERN (VIMC_CID_VIMC_BASE + 0)
+
+#define VIMC_FRAME_MAX_WIDTH 4096
+#define VIMC_FRAME_MAX_HEIGHT 2160
+#define VIMC_FRAME_MIN_WIDTH 16
+#define VIMC_FRAME_MIN_HEIGHT 16
+
+#define VIMC_FRAME_INDEX(lin, col, width, bpp) ((lin * width + col) * bpp)
+
+/**
+ * struct vimc_colorimetry_clamp - Adjust colorimetry parameters
+ *
+ * @fmt: the pointer to struct v4l2_pix_format or
+ * struct v4l2_mbus_framefmt
+ *
+ * Entities must check if colorimetry given by the userspace is valid, if not
+ * then set them as DEFAULT
+ */
+#define vimc_colorimetry_clamp(fmt) \
+do { \
+ if ((fmt)->colorspace == V4L2_COLORSPACE_DEFAULT \
+ || (fmt)->colorspace > V4L2_COLORSPACE_DCI_P3) { \
+ (fmt)->colorspace = V4L2_COLORSPACE_DEFAULT; \
+ (fmt)->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; \
+ (fmt)->quantization = V4L2_QUANTIZATION_DEFAULT; \
+ (fmt)->xfer_func = V4L2_XFER_FUNC_DEFAULT; \
+ } \
+ if ((fmt)->ycbcr_enc > V4L2_YCBCR_ENC_SMPTE240M) \
+ (fmt)->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; \
+ if ((fmt)->quantization > V4L2_QUANTIZATION_LIM_RANGE) \
+ (fmt)->quantization = V4L2_QUANTIZATION_DEFAULT; \
+ if ((fmt)->xfer_func > V4L2_XFER_FUNC_SMPTE2084) \
+ (fmt)->xfer_func = V4L2_XFER_FUNC_DEFAULT; \
+} while (0)
+
+/**
+ * struct vimc_platform_data - platform data to components
+ *
+ * @entity_name: The name of the entity to be created
+ *
+ * Board setup code will often provide additional information using the device's
+ * platform_data field to hold additional information.
+ * When injecting a new platform_device in the component system the core needs
+ * to provide to the corresponding submodules the name of the entity that should
+ * be used when registering the subdevice in the Media Controller system.
+ */
+struct vimc_platform_data {
+ char entity_name[32];
+};
+
+/**
+ * struct vimc_pix_map - maps media bus code with v4l2 pixel format
+ *
+ * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
+ * @bbp: number of bytes each pixel occupies
+ * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ *
+ * Struct which matches the MEDIA_BUS_FMT_* codes with the corresponding
+ * V4L2_PIX_FMT_* fourcc pixelformat and its bytes per pixel (bpp)
+ */
+struct vimc_pix_map {
+ unsigned int code;
+ unsigned int bpp;
+ u32 pixelformat;
+ bool bayer;
+};
+
+/**
+ * struct vimc_ent_device - core struct that represents a node in the topology
+ *
+ * @ent: the pointer to struct media_entity for the node
+ * @pads: the list of pads of the node
+ * @process_frame: callback send a frame to that node
+ * @vdev_get_format: callback that returns the current format a pad, used
+ * only when is_media_entity_v4l2_video_device(ent) returns
+ * true
+ *
+ * Each node of the topology must create a vimc_ent_device struct. Depending on
+ * the node it will be of an instance of v4l2_subdev or video_device struct
+ * where both contains a struct media_entity.
+ * Those structures should embedded the vimc_ent_device struct through
+ * v4l2_set_subdevdata() and video_set_drvdata() respectivaly, allowing the
+ * vimc_ent_device struct to be retrieved from the corresponding struct
+ * media_entity
+ */
+struct vimc_ent_device {
+ struct media_entity *ent;
+ struct media_pad *pads;
+ void * (*process_frame)(struct vimc_ent_device *ved,
+ const void *frame);
+ void (*vdev_get_format)(struct vimc_ent_device *ved,
+ struct v4l2_pix_format *fmt);
+};
+
+/**
+ * vimc_pads_init - initialize pads
+ *
+ * @num_pads: number of pads to initialize
+ * @pads_flags: flags to use in each pad
+ *
+ * Helper functions to allocate/initialize pads
+ */
+struct media_pad *vimc_pads_init(u16 num_pads,
+ const unsigned long *pads_flag);
+
+/**
+ * vimc_pads_cleanup - free pads
+ *
+ * @pads: pointer to the pads
+ *
+ * Helper function to free the pads initialized with vimc_pads_init
+ */
+static inline void vimc_pads_cleanup(struct media_pad *pads)
+{
+ kfree(pads);
+}
+
+/**
+ * vimc_pipeline_s_stream - start stream through the pipeline
+ *
+ * @ent: the pointer to struct media_entity for the node
+ * @enable: 1 to start the stream and 0 to stop
+ *
+ * Helper function to call the s_stream of the subdevices connected
+ * in all the sink pads of the entity
+ */
+int vimc_pipeline_s_stream(struct media_entity *ent, int enable);
+
+/**
+ * vimc_pix_map_by_index - get vimc_pix_map struct by its index
+ *
+ * @i: index of the vimc_pix_map struct in vimc_pix_map_list
+ */
+const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i);
+
+/**
+ * vimc_pix_map_by_code - get vimc_pix_map struct by media bus code
+ *
+ * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
+ */
+const struct vimc_pix_map *vimc_pix_map_by_code(u32 code);
+
+/**
+ * vimc_pix_map_by_pixelformat - get vimc_pix_map struct by v4l2 pixel format
+ *
+ * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ */
+const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
+
+/**
+ * vimc_ent_sd_register - initialize and register a subdev node
+ *
+ * @ved: the vimc_ent_device struct to be initialize
+ * @sd: the v4l2_subdev struct to be initialize and registered
+ * @v4l2_dev: the v4l2 device to register the v4l2_subdev
+ * @name: name of the sub-device. Please notice that the name must be
+ * unique.
+ * @function: media entity function defined by MEDIA_ENT_F_* macros
+ * @num_pads: number of pads to initialize
+ * @pads_flag: flags to use in each pad
+ * @sd_ops: pointer to &struct v4l2_subdev_ops.
+ *
+ * Helper function initialize and register the struct vimc_ent_device and struct
+ * v4l2_subdev which represents a subdev node in the topology
+ */
+int vimc_ent_sd_register(struct vimc_ent_device *ved,
+ struct v4l2_subdev *sd,
+ struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u32 function,
+ u16 num_pads,
+ const unsigned long *pads_flag,
+ const struct v4l2_subdev_ops *sd_ops);
+
+/**
+ * vimc_ent_sd_unregister - cleanup and unregister a subdev node
+ *
+ * @ved: the vimc_ent_device struct to be cleaned up
+ * @sd: the v4l2_subdev struct to be unregistered
+ *
+ * Helper function cleanup and unregister the struct vimc_ent_device and struct
+ * v4l2_subdev which represents a subdev node in the topology
+ */
+void vimc_ent_sd_unregister(struct vimc_ent_device *ved,
+ struct v4l2_subdev *sd);
+
+/**
+ * vimc_link_validate - validates a media link
+ *
+ * @link: pointer to &struct media_link
+ *
+ * This function calls validates if a media link is valid for streaming.
+ */
+int vimc_link_validate(struct media_link *link);
+
+#endif
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
new file mode 100644
index 000000000..8548fa93b
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -0,0 +1,402 @@
+/*
+ * vimc-core.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/component.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+
+#include "vimc-common.h"
+
+#define VIMC_PDEV_NAME "vimc"
+#define VIMC_MDEV_MODEL_NAME "VIMC MDEV"
+
+#define VIMC_ENT_LINK(src, srcpad, sink, sinkpad, link_flags) { \
+ .src_ent = src, \
+ .src_pad = srcpad, \
+ .sink_ent = sink, \
+ .sink_pad = sinkpad, \
+ .flags = link_flags, \
+}
+
+struct vimc_device {
+ /* The platform device */
+ struct platform_device pdev;
+
+ /* The pipeline configuration */
+ const struct vimc_pipeline_config *pipe_cfg;
+
+ /* The Associated media_device parent */
+ struct media_device mdev;
+
+ /* Internal v4l2 parent device*/
+ struct v4l2_device v4l2_dev;
+
+ /* Subdevices */
+ struct platform_device **subdevs;
+};
+
+/* Structure which describes individual configuration for each entity */
+struct vimc_ent_config {
+ const char *name;
+ const char *drv;
+};
+
+/* Structure which describes links between entities */
+struct vimc_ent_link {
+ unsigned int src_ent;
+ u16 src_pad;
+ unsigned int sink_ent;
+ u16 sink_pad;
+ u32 flags;
+};
+
+/* Structure which describes the whole topology */
+struct vimc_pipeline_config {
+ const struct vimc_ent_config *ents;
+ size_t num_ents;
+ const struct vimc_ent_link *links;
+ size_t num_links;
+};
+
+/* --------------------------------------------------------------------------
+ * Topology Configuration
+ */
+
+static const struct vimc_ent_config ent_config[] = {
+ {
+ .name = "Sensor A",
+ .drv = "vimc-sensor",
+ },
+ {
+ .name = "Sensor B",
+ .drv = "vimc-sensor",
+ },
+ {
+ .name = "Debayer A",
+ .drv = "vimc-debayer",
+ },
+ {
+ .name = "Debayer B",
+ .drv = "vimc-debayer",
+ },
+ {
+ .name = "Raw Capture 0",
+ .drv = "vimc-capture",
+ },
+ {
+ .name = "Raw Capture 1",
+ .drv = "vimc-capture",
+ },
+ {
+ .name = "RGB/YUV Input",
+ /* TODO: change this to vimc-input when it is implemented */
+ .drv = "vimc-sensor",
+ },
+ {
+ .name = "Scaler",
+ .drv = "vimc-scaler",
+ },
+ {
+ .name = "RGB/YUV Capture",
+ .drv = "vimc-capture",
+ },
+};
+
+static const struct vimc_ent_link ent_links[] = {
+ /* Link: Sensor A (Pad 0)->(Pad 0) Debayer A */
+ VIMC_ENT_LINK(0, 0, 2, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ /* Link: Sensor A (Pad 0)->(Pad 0) Raw Capture 0 */
+ VIMC_ENT_LINK(0, 0, 4, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ /* Link: Sensor B (Pad 0)->(Pad 0) Debayer B */
+ VIMC_ENT_LINK(1, 0, 3, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ /* Link: Sensor B (Pad 0)->(Pad 0) Raw Capture 1 */
+ VIMC_ENT_LINK(1, 0, 5, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ /* Link: Debayer A (Pad 1)->(Pad 0) Scaler */
+ VIMC_ENT_LINK(2, 1, 7, 0, MEDIA_LNK_FL_ENABLED),
+ /* Link: Debayer B (Pad 1)->(Pad 0) Scaler */
+ VIMC_ENT_LINK(3, 1, 7, 0, 0),
+ /* Link: RGB/YUV Input (Pad 0)->(Pad 0) Scaler */
+ VIMC_ENT_LINK(6, 0, 7, 0, 0),
+ /* Link: Scaler (Pad 1)->(Pad 0) RGB/YUV Capture */
+ VIMC_ENT_LINK(7, 1, 8, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+};
+
+static const struct vimc_pipeline_config pipe_cfg = {
+ .ents = ent_config,
+ .num_ents = ARRAY_SIZE(ent_config),
+ .links = ent_links,
+ .num_links = ARRAY_SIZE(ent_links)
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int vimc_create_links(struct vimc_device *vimc)
+{
+ unsigned int i;
+ int ret;
+
+ /* Initialize the links between entities */
+ for (i = 0; i < vimc->pipe_cfg->num_links; i++) {
+ const struct vimc_ent_link *link = &vimc->pipe_cfg->links[i];
+ /*
+ * TODO: Check another way of retrieving ved struct without
+ * relying on platform_get_drvdata
+ */
+ struct vimc_ent_device *ved_src =
+ platform_get_drvdata(vimc->subdevs[link->src_ent]);
+ struct vimc_ent_device *ved_sink =
+ platform_get_drvdata(vimc->subdevs[link->sink_ent]);
+
+ ret = media_create_pad_link(ved_src->ent, link->src_pad,
+ ved_sink->ent, link->sink_pad,
+ link->flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vimc_comp_bind(struct device *master)
+{
+ struct vimc_device *vimc = container_of(to_platform_device(master),
+ struct vimc_device, pdev);
+ int ret;
+
+ dev_dbg(master, "bind");
+
+ /* Register the v4l2 struct */
+ ret = v4l2_device_register(vimc->mdev.dev, &vimc->v4l2_dev);
+ if (ret) {
+ dev_err(vimc->mdev.dev,
+ "v4l2 device register failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ /* Bind subdevices */
+ ret = component_bind_all(master, &vimc->v4l2_dev);
+ if (ret)
+ goto err_v4l2_unregister;
+
+ /* Initialize links */
+ ret = vimc_create_links(vimc);
+ if (ret)
+ goto err_comp_unbind_all;
+
+ /* Register the media device */
+ ret = media_device_register(&vimc->mdev);
+ if (ret) {
+ dev_err(vimc->mdev.dev,
+ "media device register failed (err=%d)\n", ret);
+ goto err_comp_unbind_all;
+ }
+
+ /* Expose all subdev's nodes*/
+ ret = v4l2_device_register_subdev_nodes(&vimc->v4l2_dev);
+ if (ret) {
+ dev_err(vimc->mdev.dev,
+ "vimc subdev nodes registration failed (err=%d)\n",
+ ret);
+ goto err_mdev_unregister;
+ }
+
+ return 0;
+
+err_mdev_unregister:
+ media_device_unregister(&vimc->mdev);
+err_comp_unbind_all:
+ component_unbind_all(master, NULL);
+err_v4l2_unregister:
+ v4l2_device_unregister(&vimc->v4l2_dev);
+
+ return ret;
+}
+
+static void vimc_comp_unbind(struct device *master)
+{
+ struct vimc_device *vimc = container_of(to_platform_device(master),
+ struct vimc_device, pdev);
+
+ dev_dbg(master, "unbind");
+
+ media_device_unregister(&vimc->mdev);
+ component_unbind_all(master, NULL);
+ v4l2_device_unregister(&vimc->v4l2_dev);
+}
+
+static int vimc_comp_compare(struct device *comp, void *data)
+{
+ return comp == data;
+}
+
+static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
+{
+ struct component_match *match = NULL;
+ struct vimc_platform_data pdata;
+ int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
+ dev_dbg(&vimc->pdev.dev, "new pdev for %s\n",
+ vimc->pipe_cfg->ents[i].drv);
+
+ strlcpy(pdata.entity_name, vimc->pipe_cfg->ents[i].name,
+ sizeof(pdata.entity_name));
+
+ vimc->subdevs[i] = platform_device_register_data(&vimc->pdev.dev,
+ vimc->pipe_cfg->ents[i].drv,
+ PLATFORM_DEVID_AUTO,
+ &pdata,
+ sizeof(pdata));
+ if (IS_ERR(vimc->subdevs[i])) {
+ match = ERR_CAST(vimc->subdevs[i]);
+ while (--i >= 0)
+ platform_device_unregister(vimc->subdevs[i]);
+
+ return match;
+ }
+
+ component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
+ &vimc->subdevs[i]->dev);
+ }
+
+ return match;
+}
+
+static void vimc_rm_subdevs(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ platform_device_unregister(vimc->subdevs[i]);
+}
+
+static const struct component_master_ops vimc_comp_ops = {
+ .bind = vimc_comp_bind,
+ .unbind = vimc_comp_unbind,
+};
+
+static int vimc_probe(struct platform_device *pdev)
+{
+ struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
+ struct component_match *match = NULL;
+ int ret;
+
+ dev_dbg(&pdev->dev, "probe");
+
+ memset(&vimc->mdev, 0, sizeof(vimc->mdev));
+
+ /* Create platform_device for each entity in the topology*/
+ vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
+ sizeof(*vimc->subdevs), GFP_KERNEL);
+ if (!vimc->subdevs)
+ return -ENOMEM;
+
+ match = vimc_add_subdevs(vimc);
+ if (IS_ERR(match))
+ return PTR_ERR(match);
+
+ /* Link the media device within the v4l2_device */
+ vimc->v4l2_dev.mdev = &vimc->mdev;
+
+ /* Initialize media device */
+ strlcpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME,
+ sizeof(vimc->mdev.model));
+ vimc->mdev.dev = &pdev->dev;
+ media_device_init(&vimc->mdev);
+
+ /* Add self to the component system */
+ ret = component_master_add_with_match(&pdev->dev, &vimc_comp_ops,
+ match);
+ if (ret) {
+ media_device_cleanup(&vimc->mdev);
+ vimc_rm_subdevs(vimc);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vimc_remove(struct platform_device *pdev)
+{
+ struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
+
+ dev_dbg(&pdev->dev, "remove");
+
+ component_master_del(&pdev->dev, &vimc_comp_ops);
+ vimc_rm_subdevs(vimc);
+
+ return 0;
+}
+
+static void vimc_dev_release(struct device *dev)
+{
+}
+
+static struct vimc_device vimc_dev = {
+ .pipe_cfg = &pipe_cfg,
+ .pdev = {
+ .name = VIMC_PDEV_NAME,
+ .dev.release = vimc_dev_release,
+ }
+};
+
+static struct platform_driver vimc_pdrv = {
+ .probe = vimc_probe,
+ .remove = vimc_remove,
+ .driver = {
+ .name = VIMC_PDEV_NAME,
+ },
+};
+
+static int __init vimc_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&vimc_dev.pdev);
+ if (ret) {
+ dev_err(&vimc_dev.pdev.dev,
+ "platform device registration failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&vimc_pdrv);
+ if (ret) {
+ dev_err(&vimc_dev.pdev.dev,
+ "platform driver registration failed (err=%d)\n", ret);
+ platform_driver_unregister(&vimc_pdrv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit vimc_exit(void)
+{
+ platform_driver_unregister(&vimc_pdrv);
+
+ platform_device_unregister(&vimc_dev.pdev);
+}
+
+module_init(vimc_init);
+module_exit(vimc_exit);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC)");
+MODULE_AUTHOR("Helen Fornazier <helen.fornazier@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
new file mode 100644
index 000000000..7d77c63b9
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -0,0 +1,585 @@
+/*
+ * vimc-debayer.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#include "vimc-common.h"
+
+#define VIMC_DEB_DRV_NAME "vimc-debayer"
+
+static unsigned int deb_mean_win_size = 3;
+module_param(deb_mean_win_size, uint, 0000);
+MODULE_PARM_DESC(deb_mean_win_size, " the window size to calculate the mean.\n"
+ "NOTE: the window size need to be an odd number, as the main pixel "
+ "stays in the center of the window, otherwise the next odd number "
+ "is considered");
+
+#define IS_SINK(pad) (!pad)
+#define IS_SRC(pad) (pad)
+
+enum vimc_deb_rgb_colors {
+ VIMC_DEB_RED = 0,
+ VIMC_DEB_GREEN = 1,
+ VIMC_DEB_BLUE = 2,
+};
+
+struct vimc_deb_pix_map {
+ u32 code;
+ enum vimc_deb_rgb_colors order[2][2];
+};
+
+struct vimc_deb_device {
+ struct vimc_ent_device ved;
+ struct v4l2_subdev sd;
+ struct device *dev;
+ /* The active format */
+ struct v4l2_mbus_framefmt sink_fmt;
+ u32 src_code;
+ void (*set_rgb_src)(struct vimc_deb_device *vdeb, unsigned int lin,
+ unsigned int col, unsigned int rgb[3]);
+ /* Values calculated when the stream starts */
+ u8 *src_frame;
+ const struct vimc_deb_pix_map *sink_pix_map;
+ unsigned int sink_bpp;
+};
+
+static const struct v4l2_mbus_framefmt sink_fmt_default = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+};
+
+static const struct vimc_deb_pix_map vimc_deb_pix_map_list[] = {
+ {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_RED } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
+ { VIMC_DEB_RED, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
+ { VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_RED } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
+ { VIMC_DEB_RED, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
+ { VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_RED } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
+ { VIMC_DEB_RED, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
+ { VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
+ },
+};
+
+static const struct vimc_deb_pix_map *vimc_deb_pix_map_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_deb_pix_map_list); i++)
+ if (vimc_deb_pix_map_list[i].code == code)
+ return &vimc_deb_pix_map_list[i];
+
+ return NULL;
+}
+
+static int vimc_deb_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+ unsigned int i;
+
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ *mf = sink_fmt_default;
+
+ for (i = 1; i < sd->entity.num_pads; i++) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ *mf = sink_fmt_default;
+ mf->code = vdeb->src_code;
+ }
+
+ return 0;
+}
+
+static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* We only support one format for source pads */
+ if (IS_SRC(code->pad)) {
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = vdeb->src_code;
+ } else {
+ if (code->index >= ARRAY_SIZE(vimc_deb_pix_map_list))
+ return -EINVAL;
+
+ code->code = vimc_deb_pix_map_list[code->index].code;
+ }
+
+ return 0;
+}
+
+static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+
+ if (fse->index)
+ return -EINVAL;
+
+ if (IS_SINK(fse->pad)) {
+ const struct vimc_deb_pix_map *vpix =
+ vimc_deb_pix_map_by_code(fse->code);
+
+ if (!vpix)
+ return -EINVAL;
+ } else if (fse->code != vdeb->src_code) {
+ return -EINVAL;
+ }
+
+ fse->min_width = VIMC_FRAME_MIN_WIDTH;
+ fse->max_width = VIMC_FRAME_MAX_WIDTH;
+ fse->min_height = VIMC_FRAME_MIN_HEIGHT;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT;
+
+ return 0;
+}
+
+static int vimc_deb_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+
+ /* Get the current sink format */
+ fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
+ *v4l2_subdev_get_try_format(sd, cfg, 0) :
+ vdeb->sink_fmt;
+
+ /* Set the right code for the source pad */
+ if (IS_SRC(fmt->pad))
+ fmt->format.code = vdeb->src_code;
+
+ return 0;
+}
+
+static void vimc_deb_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
+{
+ const struct vimc_deb_pix_map *vpix;
+
+ /* Don't accept a code that is not on the debayer table */
+ vpix = vimc_deb_pix_map_by_code(fmt->code);
+ if (!vpix)
+ fmt->code = sink_fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
+ if (fmt->field == V4L2_FIELD_ANY)
+ fmt->field = sink_fmt_default.field;
+
+ vimc_colorimetry_clamp(fmt);
+}
+
+static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ /* Do not change the format while stream is on */
+ if (vdeb->src_frame)
+ return -EBUSY;
+
+ sink_fmt = &vdeb->sink_fmt;
+ } else {
+ sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ }
+
+ /*
+ * Do not change the format of the source pad,
+ * it is propagated from the sink
+ */
+ if (IS_SRC(fmt->pad)) {
+ fmt->format = *sink_fmt;
+ /* TODO: Add support for other formats */
+ fmt->format.code = vdeb->src_code;
+ } else {
+ /* Set the new format in the sink pad */
+ vimc_deb_adjust_sink_fmt(&fmt->format);
+
+ dev_dbg(vdeb->dev, "%s: sink format update: "
+ "old:%dx%d (0x%x, %d, %d, %d, %d) "
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vdeb->sd.name,
+ /* old */
+ sink_fmt->width, sink_fmt->height, sink_fmt->code,
+ sink_fmt->colorspace, sink_fmt->quantization,
+ sink_fmt->xfer_func, sink_fmt->ycbcr_enc,
+ /* new */
+ fmt->format.width, fmt->format.height, fmt->format.code,
+ fmt->format.colorspace, fmt->format.quantization,
+ fmt->format.xfer_func, fmt->format.ycbcr_enc);
+
+ *sink_fmt = fmt->format;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops vimc_deb_pad_ops = {
+ .init_cfg = vimc_deb_init_cfg,
+ .enum_mbus_code = vimc_deb_enum_mbus_code,
+ .enum_frame_size = vimc_deb_enum_frame_size,
+ .get_fmt = vimc_deb_get_fmt,
+ .set_fmt = vimc_deb_set_fmt,
+};
+
+static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
+ unsigned int lin,
+ unsigned int col,
+ unsigned int rgb[3])
+{
+ unsigned int i, index;
+
+ index = VIMC_FRAME_INDEX(lin, col, vdeb->sink_fmt.width, 3);
+ for (i = 0; i < 3; i++)
+ vdeb->src_frame[index + i] = rgb[i];
+}
+
+static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+ unsigned int frame_size;
+
+ if (vdeb->src_frame)
+ return 0;
+
+ /* Calculate the frame size of the source pad */
+ vpix = vimc_pix_map_by_code(vdeb->src_code);
+ frame_size = vdeb->sink_fmt.width * vdeb->sink_fmt.height *
+ vpix->bpp;
+
+ /* Save the bytes per pixel of the sink */
+ vpix = vimc_pix_map_by_code(vdeb->sink_fmt.code);
+ vdeb->sink_bpp = vpix->bpp;
+
+ /* Get the corresponding pixel map from the table */
+ vdeb->sink_pix_map =
+ vimc_deb_pix_map_by_code(vdeb->sink_fmt.code);
+
+ /*
+ * Allocate the frame buffer. Use vmalloc to be able to
+ * allocate a large amount of memory
+ */
+ vdeb->src_frame = vmalloc(frame_size);
+ if (!vdeb->src_frame)
+ return -ENOMEM;
+
+ } else {
+ if (!vdeb->src_frame)
+ return 0;
+
+ vfree(vdeb->src_frame);
+ vdeb->src_frame = NULL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops vimc_deb_video_ops = {
+ .s_stream = vimc_deb_s_stream,
+};
+
+static const struct v4l2_subdev_ops vimc_deb_ops = {
+ .pad = &vimc_deb_pad_ops,
+ .video = &vimc_deb_video_ops,
+};
+
+static unsigned int vimc_deb_get_val(const u8 *bytes,
+ const unsigned int n_bytes)
+{
+ unsigned int i;
+ unsigned int acc = 0;
+
+ for (i = 0; i < n_bytes; i++)
+ acc = acc + (bytes[i] << (8 * i));
+
+ return acc;
+}
+
+static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
+ const u8 *frame,
+ const unsigned int lin,
+ const unsigned int col,
+ unsigned int rgb[3])
+{
+ unsigned int i, seek, wlin, wcol;
+ unsigned int n_rgb[3] = {0, 0, 0};
+
+ for (i = 0; i < 3; i++)
+ rgb[i] = 0;
+
+ /*
+ * Calculate how many we need to subtract to get to the pixel in
+ * the top left corner of the mean window (considering the current
+ * pixel as the center)
+ */
+ seek = deb_mean_win_size / 2;
+
+ /* Sum the values of the colors in the mean window */
+
+ dev_dbg(vdeb->dev,
+ "deb: %s: --- Calc pixel %dx%d, window mean %d, seek %d ---\n",
+ vdeb->sd.name, lin, col, vdeb->sink_fmt.height, seek);
+
+ /*
+ * Iterate through all the lines in the mean window, start
+ * with zero if the pixel is outside the frame and don't pass
+ * the height when the pixel is in the bottom border of the
+ * frame
+ */
+ for (wlin = seek > lin ? 0 : lin - seek;
+ wlin < lin + seek + 1 && wlin < vdeb->sink_fmt.height;
+ wlin++) {
+
+ /*
+ * Iterate through all the columns in the mean window, start
+ * with zero if the pixel is outside the frame and don't pass
+ * the width when the pixel is in the right border of the
+ * frame
+ */
+ for (wcol = seek > col ? 0 : col - seek;
+ wcol < col + seek + 1 && wcol < vdeb->sink_fmt.width;
+ wcol++) {
+ enum vimc_deb_rgb_colors color;
+ unsigned int index;
+
+ /* Check which color this pixel is */
+ color = vdeb->sink_pix_map->order[wlin % 2][wcol % 2];
+
+ index = VIMC_FRAME_INDEX(wlin, wcol,
+ vdeb->sink_fmt.width,
+ vdeb->sink_bpp);
+
+ dev_dbg(vdeb->dev,
+ "deb: %s: RGB CALC: frame index %d, win pos %dx%d, color %d\n",
+ vdeb->sd.name, index, wlin, wcol, color);
+
+ /* Get its value */
+ rgb[color] = rgb[color] +
+ vimc_deb_get_val(&frame[index], vdeb->sink_bpp);
+
+ /* Save how many values we already added */
+ n_rgb[color]++;
+
+ dev_dbg(vdeb->dev, "deb: %s: RGB CALC: val %d, n %d\n",
+ vdeb->sd.name, rgb[color], n_rgb[color]);
+ }
+ }
+
+ /* Calculate the mean */
+ for (i = 0; i < 3; i++) {
+ dev_dbg(vdeb->dev,
+ "deb: %s: PRE CALC: %dx%d Color %d, val %d, n %d\n",
+ vdeb->sd.name, lin, col, i, rgb[i], n_rgb[i]);
+
+ if (n_rgb[i])
+ rgb[i] = rgb[i] / n_rgb[i];
+
+ dev_dbg(vdeb->dev,
+ "deb: %s: FINAL CALC: %dx%d Color %d, val %d\n",
+ vdeb->sd.name, lin, col, i, rgb[i]);
+ }
+}
+
+static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
+{
+ struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
+ ved);
+ unsigned int rgb[3];
+ unsigned int i, j;
+
+ /* If the stream in this node is not active, just return */
+ if (!vdeb->src_frame)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < vdeb->sink_fmt.height; i++)
+ for (j = 0; j < vdeb->sink_fmt.width; j++) {
+ vimc_deb_calc_rgb_sink(vdeb, sink_frame, i, j, rgb);
+ vdeb->set_rgb_src(vdeb, i, j, rgb);
+ }
+
+ return vdeb->src_frame;
+
+}
+
+static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct vimc_ent_device *ved = dev_get_drvdata(comp);
+ struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
+ ved);
+
+ vimc_ent_sd_unregister(ved, &vdeb->sd);
+ kfree(vdeb);
+}
+
+static int vimc_deb_comp_bind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct v4l2_device *v4l2_dev = master_data;
+ struct vimc_platform_data *pdata = comp->platform_data;
+ struct vimc_deb_device *vdeb;
+ int ret;
+
+ /* Allocate the vdeb struct */
+ vdeb = kzalloc(sizeof(*vdeb), GFP_KERNEL);
+ if (!vdeb)
+ return -ENOMEM;
+
+ /* Initialize ved and sd */
+ ret = vimc_ent_sd_register(&vdeb->ved, &vdeb->sd, v4l2_dev,
+ pdata->entity_name,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2,
+ (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
+ MEDIA_PAD_FL_SOURCE},
+ &vimc_deb_ops);
+ if (ret) {
+ kfree(vdeb);
+ return ret;
+ }
+
+ vdeb->ved.process_frame = vimc_deb_process_frame;
+ dev_set_drvdata(comp, &vdeb->ved);
+ vdeb->dev = comp;
+
+ /* Initialize the frame format */
+ vdeb->sink_fmt = sink_fmt_default;
+ /*
+ * TODO: Add support for more output formats, we only support
+ * RGB888 for now
+ * NOTE: the src format is always the same as the sink, except
+ * for the code
+ */
+ vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
+ vdeb->set_rgb_src = vimc_deb_set_rgb_mbus_fmt_rgb888_1x24;
+
+ return 0;
+}
+
+static const struct component_ops vimc_deb_comp_ops = {
+ .bind = vimc_deb_comp_bind,
+ .unbind = vimc_deb_comp_unbind,
+};
+
+static int vimc_deb_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vimc_deb_comp_ops);
+}
+
+static int vimc_deb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vimc_deb_comp_ops);
+
+ return 0;
+}
+
+static const struct platform_device_id vimc_deb_driver_ids[] = {
+ {
+ .name = VIMC_DEB_DRV_NAME,
+ },
+ { }
+};
+
+static struct platform_driver vimc_deb_pdrv = {
+ .probe = vimc_deb_probe,
+ .remove = vimc_deb_remove,
+ .id_table = vimc_deb_driver_ids,
+ .driver = {
+ .name = VIMC_DEB_DRV_NAME,
+ },
+};
+
+module_platform_driver(vimc_deb_pdrv);
+
+MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Debayer");
+MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
new file mode 100644
index 000000000..39b2a73df
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -0,0 +1,437 @@
+/*
+ * vimc-scaler.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#include "vimc-common.h"
+
+#define VIMC_SCA_DRV_NAME "vimc-scaler"
+
+static unsigned int sca_mult = 3;
+module_param(sca_mult, uint, 0000);
+MODULE_PARM_DESC(sca_mult, " the image size multiplier");
+
+#define IS_SINK(pad) (!pad)
+#define IS_SRC(pad) (pad)
+#define MAX_ZOOM 8
+
+struct vimc_sca_device {
+ struct vimc_ent_device ved;
+ struct v4l2_subdev sd;
+ struct device *dev;
+ /* NOTE: the source fmt is the same as the sink
+ * with the width and hight multiplied by mult
+ */
+ struct v4l2_mbus_framefmt sink_fmt;
+ /* Values calculated when the stream starts */
+ u8 *src_frame;
+ unsigned int src_line_size;
+ unsigned int bpp;
+};
+
+static const struct v4l2_mbus_framefmt sink_fmt_default = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+};
+
+static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_mbus_framefmt *mf;
+ unsigned int i;
+
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ *mf = sink_fmt_default;
+
+ for (i = 1; i < sd->entity.num_pads; i++) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ *mf = sink_fmt_default;
+ mf->width = mf->width * sca_mult;
+ mf->height = mf->height * sca_mult;
+ }
+
+ return 0;
+}
+
+static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
+
+ /* We don't support bayer format */
+ if (!vpix || vpix->bayer)
+ return -EINVAL;
+
+ code->code = vpix->code;
+
+ return 0;
+}
+
+static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct vimc_pix_map *vpix;
+
+ if (fse->index)
+ return -EINVAL;
+
+ /* Only accept code in the pix map table in non bayer format */
+ vpix = vimc_pix_map_by_code(fse->code);
+ if (!vpix || vpix->bayer)
+ return -EINVAL;
+
+ fse->min_width = VIMC_FRAME_MIN_WIDTH;
+ fse->min_height = VIMC_FRAME_MIN_HEIGHT;
+
+ if (IS_SINK(fse->pad)) {
+ fse->max_width = VIMC_FRAME_MAX_WIDTH;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT;
+ } else {
+ fse->max_width = VIMC_FRAME_MAX_WIDTH * MAX_ZOOM;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT * MAX_ZOOM;
+ }
+
+ return 0;
+}
+
+static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+
+ /* Get the current sink format */
+ format->format = (format->which == V4L2_SUBDEV_FORMAT_TRY) ?
+ *v4l2_subdev_get_try_format(sd, cfg, 0) :
+ vsca->sink_fmt;
+
+ /* Scale the frame size for the source pad */
+ if (IS_SRC(format->pad)) {
+ format->format.width = vsca->sink_fmt.width * sca_mult;
+ format->format.height = vsca->sink_fmt.height * sca_mult;
+ }
+
+ return 0;
+}
+
+static void vimc_sca_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
+{
+ const struct vimc_pix_map *vpix;
+
+ /* Only accept code in the pix map table in non bayer format */
+ vpix = vimc_pix_map_by_code(fmt->code);
+ if (!vpix || vpix->bayer)
+ fmt->code = sink_fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
+ if (fmt->field == V4L2_FIELD_ANY)
+ fmt->field = sink_fmt_default.field;
+
+ vimc_colorimetry_clamp(fmt);
+}
+
+static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ /* Do not change the format while stream is on */
+ if (vsca->src_frame)
+ return -EBUSY;
+
+ sink_fmt = &vsca->sink_fmt;
+ } else {
+ sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ }
+
+ /*
+ * Do not change the format of the source pad,
+ * it is propagated from the sink
+ */
+ if (IS_SRC(fmt->pad)) {
+ fmt->format = *sink_fmt;
+ fmt->format.width = sink_fmt->width * sca_mult;
+ fmt->format.height = sink_fmt->height * sca_mult;
+ } else {
+ /* Set the new format in the sink pad */
+ vimc_sca_adjust_sink_fmt(&fmt->format);
+
+ dev_dbg(vsca->dev, "%s: sink format update: "
+ "old:%dx%d (0x%x, %d, %d, %d, %d) "
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsca->sd.name,
+ /* old */
+ sink_fmt->width, sink_fmt->height, sink_fmt->code,
+ sink_fmt->colorspace, sink_fmt->quantization,
+ sink_fmt->xfer_func, sink_fmt->ycbcr_enc,
+ /* new */
+ fmt->format.width, fmt->format.height, fmt->format.code,
+ fmt->format.colorspace, fmt->format.quantization,
+ fmt->format.xfer_func, fmt->format.ycbcr_enc);
+
+ *sink_fmt = fmt->format;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
+ .init_cfg = vimc_sca_init_cfg,
+ .enum_mbus_code = vimc_sca_enum_mbus_code,
+ .enum_frame_size = vimc_sca_enum_frame_size,
+ .get_fmt = vimc_sca_get_fmt,
+ .set_fmt = vimc_sca_set_fmt,
+};
+
+static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+ unsigned int frame_size;
+
+ if (vsca->src_frame)
+ return 0;
+
+ /* Save the bytes per pixel of the sink */
+ vpix = vimc_pix_map_by_code(vsca->sink_fmt.code);
+ vsca->bpp = vpix->bpp;
+
+ /* Calculate the width in bytes of the src frame */
+ vsca->src_line_size = vsca->sink_fmt.width *
+ sca_mult * vsca->bpp;
+
+ /* Calculate the frame size of the source pad */
+ frame_size = vsca->src_line_size * vsca->sink_fmt.height *
+ sca_mult;
+
+ /* Allocate the frame buffer. Use vmalloc to be able to
+ * allocate a large amount of memory
+ */
+ vsca->src_frame = vmalloc(frame_size);
+ if (!vsca->src_frame)
+ return -ENOMEM;
+
+ } else {
+ if (!vsca->src_frame)
+ return 0;
+
+ vfree(vsca->src_frame);
+ vsca->src_frame = NULL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops vimc_sca_video_ops = {
+ .s_stream = vimc_sca_s_stream,
+};
+
+static const struct v4l2_subdev_ops vimc_sca_ops = {
+ .pad = &vimc_sca_pad_ops,
+ .video = &vimc_sca_video_ops,
+};
+
+static void vimc_sca_fill_pix(u8 *const ptr,
+ const u8 *const pixel,
+ const unsigned int bpp)
+{
+ unsigned int i;
+
+ /* copy the pixel to the pointer */
+ for (i = 0; i < bpp; i++)
+ ptr[i] = pixel[i];
+}
+
+static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
+ const unsigned int lin, const unsigned int col,
+ const u8 *const sink_frame)
+{
+ unsigned int i, j, index;
+ const u8 *pixel;
+
+ /* Point to the pixel value in position (lin, col) in the sink frame */
+ index = VIMC_FRAME_INDEX(lin, col,
+ vsca->sink_fmt.width,
+ vsca->bpp);
+ pixel = &sink_frame[index];
+
+ dev_dbg(vsca->dev,
+ "sca: %s: --- scale_pix sink pos %dx%d, index %d ---\n",
+ vsca->sd.name, lin, col, index);
+
+ /* point to the place we are going to put the first pixel
+ * in the scaled src frame
+ */
+ index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
+ vsca->sink_fmt.width * sca_mult, vsca->bpp);
+
+ dev_dbg(vsca->dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
+ vsca->sd.name, lin * sca_mult, col * sca_mult, index);
+
+ /* Repeat this pixel mult times */
+ for (i = 0; i < sca_mult; i++) {
+ /* Iterate through each beginning of a
+ * pixel repetition in a line
+ */
+ for (j = 0; j < sca_mult * vsca->bpp; j += vsca->bpp) {
+ dev_dbg(vsca->dev,
+ "sca: %s: sca: scale_pix src pos %d\n",
+ vsca->sd.name, index + j);
+
+ /* copy the pixel to the position index + j */
+ vimc_sca_fill_pix(&vsca->src_frame[index + j],
+ pixel, vsca->bpp);
+ }
+
+ /* move the index to the next line */
+ index += vsca->src_line_size;
+ }
+}
+
+static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
+ const u8 *const sink_frame)
+{
+ unsigned int i, j;
+
+ /* Scale each pixel from the original sink frame */
+ /* TODO: implement scale down, only scale up is supported for now */
+ for (i = 0; i < vsca->sink_fmt.height; i++)
+ for (j = 0; j < vsca->sink_fmt.width; j++)
+ vimc_sca_scale_pix(vsca, i, j, sink_frame);
+}
+
+static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
+{
+ struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
+ ved);
+
+ /* If the stream in this node is not active, just return */
+ if (!vsca->src_frame)
+ return ERR_PTR(-EINVAL);
+
+ vimc_sca_fill_src_frame(vsca, sink_frame);
+
+ return vsca->src_frame;
+};
+
+static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct vimc_ent_device *ved = dev_get_drvdata(comp);
+ struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
+ ved);
+
+ vimc_ent_sd_unregister(ved, &vsca->sd);
+ kfree(vsca);
+}
+
+
+static int vimc_sca_comp_bind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct v4l2_device *v4l2_dev = master_data;
+ struct vimc_platform_data *pdata = comp->platform_data;
+ struct vimc_sca_device *vsca;
+ int ret;
+
+ /* Allocate the vsca struct */
+ vsca = kzalloc(sizeof(*vsca), GFP_KERNEL);
+ if (!vsca)
+ return -ENOMEM;
+
+ /* Initialize ved and sd */
+ ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
+ pdata->entity_name,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER, 2,
+ (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
+ MEDIA_PAD_FL_SOURCE},
+ &vimc_sca_ops);
+ if (ret) {
+ kfree(vsca);
+ return ret;
+ }
+
+ vsca->ved.process_frame = vimc_sca_process_frame;
+ dev_set_drvdata(comp, &vsca->ved);
+ vsca->dev = comp;
+
+ /* Initialize the frame format */
+ vsca->sink_fmt = sink_fmt_default;
+
+ return 0;
+}
+
+static const struct component_ops vimc_sca_comp_ops = {
+ .bind = vimc_sca_comp_bind,
+ .unbind = vimc_sca_comp_unbind,
+};
+
+static int vimc_sca_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vimc_sca_comp_ops);
+}
+
+static int vimc_sca_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vimc_sca_comp_ops);
+
+ return 0;
+}
+
+static const struct platform_device_id vimc_sca_driver_ids[] = {
+ {
+ .name = VIMC_SCA_DRV_NAME,
+ },
+ { }
+};
+
+static struct platform_driver vimc_sca_pdrv = {
+ .probe = vimc_sca_probe,
+ .remove = vimc_sca_remove,
+ .id_table = vimc_sca_driver_ids,
+ .driver = {
+ .name = VIMC_SCA_DRV_NAME,
+ },
+};
+
+module_platform_driver(vimc_sca_pdrv);
+
+MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Scaler");
+MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
new file mode 100644
index 000000000..3f0ffd491
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -0,0 +1,423 @@
+/*
+ * vimc-sensor.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+#include <media/tpg/v4l2-tpg.h>
+
+#include "vimc-common.h"
+
+#define VIMC_SEN_DRV_NAME "vimc-sensor"
+
+struct vimc_sen_device {
+ struct vimc_ent_device ved;
+ struct v4l2_subdev sd;
+ struct device *dev;
+ struct tpg_data tpg;
+ struct task_struct *kthread_sen;
+ u8 *frame;
+ /* The active format */
+ struct v4l2_mbus_framefmt mbus_format;
+ struct v4l2_ctrl_handler hdl;
+};
+
+static const struct v4l2_mbus_framefmt fmt_default = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+};
+
+static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ unsigned int i;
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ *mf = fmt_default;
+ }
+
+ return 0;
+}
+
+static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
+
+ if (!vpix)
+ return -EINVAL;
+
+ code->code = vpix->code;
+
+ return 0;
+}
+
+static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct vimc_pix_map *vpix;
+
+ if (fse->index)
+ return -EINVAL;
+
+ /* Only accept code in the pix map table */
+ vpix = vimc_pix_map_by_code(fse->code);
+ if (!vpix)
+ return -EINVAL;
+
+ fse->min_width = VIMC_FRAME_MIN_WIDTH;
+ fse->max_width = VIMC_FRAME_MAX_WIDTH;
+ fse->min_height = VIMC_FRAME_MIN_HEIGHT;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT;
+
+ return 0;
+}
+
+static int vimc_sen_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vimc_sen_device *vsen =
+ container_of(sd, struct vimc_sen_device, sd);
+
+ fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) :
+ vsen->mbus_format;
+
+ return 0;
+}
+
+static void vimc_sen_tpg_s_format(struct vimc_sen_device *vsen)
+{
+ const struct vimc_pix_map *vpix =
+ vimc_pix_map_by_code(vsen->mbus_format.code);
+
+ tpg_reset_source(&vsen->tpg, vsen->mbus_format.width,
+ vsen->mbus_format.height, vsen->mbus_format.field);
+ tpg_s_bytesperline(&vsen->tpg, 0, vsen->mbus_format.width * vpix->bpp);
+ tpg_s_buf_height(&vsen->tpg, vsen->mbus_format.height);
+ tpg_s_fourcc(&vsen->tpg, vpix->pixelformat);
+ /* TODO: add support for V4L2_FIELD_ALTERNATE */
+ tpg_s_field(&vsen->tpg, vsen->mbus_format.field, false);
+ tpg_s_colorspace(&vsen->tpg, vsen->mbus_format.colorspace);
+ tpg_s_ycbcr_enc(&vsen->tpg, vsen->mbus_format.ycbcr_enc);
+ tpg_s_quantization(&vsen->tpg, vsen->mbus_format.quantization);
+ tpg_s_xfer_func(&vsen->tpg, vsen->mbus_format.xfer_func);
+}
+
+static void vimc_sen_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
+{
+ const struct vimc_pix_map *vpix;
+
+ /* Only accept code in the pix map table */
+ vpix = vimc_pix_map_by_code(fmt->code);
+ if (!vpix)
+ fmt->code = fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
+ /* TODO: add support for V4L2_FIELD_ALTERNATE */
+ if (fmt->field == V4L2_FIELD_ANY || fmt->field == V4L2_FIELD_ALTERNATE)
+ fmt->field = fmt_default.field;
+
+ vimc_colorimetry_clamp(fmt);
+}
+
+static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vimc_sen_device *vsen = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ /* Do not change the format while stream is on */
+ if (vsen->frame)
+ return -EBUSY;
+
+ mf = &vsen->mbus_format;
+ } else {
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ }
+
+ /* Set the new format */
+ vimc_sen_adjust_fmt(&fmt->format);
+
+ dev_dbg(vsen->dev, "%s: format update: "
+ "old:%dx%d (0x%x, %d, %d, %d, %d) "
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsen->sd.name,
+ /* old */
+ mf->width, mf->height, mf->code,
+ mf->colorspace, mf->quantization,
+ mf->xfer_func, mf->ycbcr_enc,
+ /* new */
+ fmt->format.width, fmt->format.height, fmt->format.code,
+ fmt->format.colorspace, fmt->format.quantization,
+ fmt->format.xfer_func, fmt->format.ycbcr_enc);
+
+ *mf = fmt->format;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
+ .init_cfg = vimc_sen_init_cfg,
+ .enum_mbus_code = vimc_sen_enum_mbus_code,
+ .enum_frame_size = vimc_sen_enum_frame_size,
+ .get_fmt = vimc_sen_get_fmt,
+ .set_fmt = vimc_sen_set_fmt,
+};
+
+static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
+{
+ struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
+ ved);
+
+ tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
+ return vsen->frame;
+}
+
+static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vimc_sen_device *vsen =
+ container_of(sd, struct vimc_sen_device, sd);
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+ unsigned int frame_size;
+
+ if (vsen->kthread_sen)
+ /* tpg is already executing */
+ return 0;
+
+ /* Calculate the frame size */
+ vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
+ frame_size = vsen->mbus_format.width * vpix->bpp *
+ vsen->mbus_format.height;
+
+ /*
+ * Allocate the frame buffer. Use vmalloc to be able to
+ * allocate a large amount of memory
+ */
+ vsen->frame = vmalloc(frame_size);
+ if (!vsen->frame)
+ return -ENOMEM;
+
+ /* configure the test pattern generator */
+ vimc_sen_tpg_s_format(vsen);
+
+ } else {
+
+ vfree(vsen->frame);
+ vsen->frame = NULL;
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops vimc_sen_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops vimc_sen_video_ops = {
+ .s_stream = vimc_sen_s_stream,
+};
+
+static const struct v4l2_subdev_ops vimc_sen_ops = {
+ .core = &vimc_sen_core_ops,
+ .pad = &vimc_sen_pad_ops,
+ .video = &vimc_sen_video_ops,
+};
+
+static int vimc_sen_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vimc_sen_device *vsen =
+ container_of(ctrl->handler, struct vimc_sen_device, hdl);
+
+ switch (ctrl->id) {
+ case VIMC_CID_TEST_PATTERN:
+ tpg_s_pattern(&vsen->tpg, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ tpg_s_hflip(&vsen->tpg, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ tpg_s_vflip(&vsen->tpg, ctrl->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vimc_sen_ctrl_ops = {
+ .s_ctrl = vimc_sen_s_ctrl,
+};
+
+static void vimc_sen_comp_unbind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct vimc_ent_device *ved = dev_get_drvdata(comp);
+ struct vimc_sen_device *vsen =
+ container_of(ved, struct vimc_sen_device, ved);
+
+ vimc_ent_sd_unregister(ved, &vsen->sd);
+ v4l2_ctrl_handler_free(&vsen->hdl);
+ tpg_free(&vsen->tpg);
+ kfree(vsen);
+}
+
+/* Image Processing Controls */
+static const struct v4l2_ctrl_config vimc_sen_ctrl_class = {
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
+ .id = VIMC_CID_VIMC_CLASS,
+ .name = "VIMC Controls",
+ .type = V4L2_CTRL_TYPE_CTRL_CLASS,
+};
+
+static const struct v4l2_ctrl_config vimc_sen_ctrl_test_pattern = {
+ .ops = &vimc_sen_ctrl_ops,
+ .id = VIMC_CID_TEST_PATTERN,
+ .name = "Test Pattern",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = TPG_PAT_NOISE,
+ .qmenu = tpg_pattern_strings,
+};
+
+static int vimc_sen_comp_bind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct v4l2_device *v4l2_dev = master_data;
+ struct vimc_platform_data *pdata = comp->platform_data;
+ struct vimc_sen_device *vsen;
+ int ret;
+
+ /* Allocate the vsen struct */
+ vsen = kzalloc(sizeof(*vsen), GFP_KERNEL);
+ if (!vsen)
+ return -ENOMEM;
+
+ v4l2_ctrl_handler_init(&vsen->hdl, 4);
+
+ v4l2_ctrl_new_custom(&vsen->hdl, &vimc_sen_ctrl_class, NULL);
+ v4l2_ctrl_new_custom(&vsen->hdl, &vimc_sen_ctrl_test_pattern, NULL);
+ v4l2_ctrl_new_std(&vsen->hdl, &vimc_sen_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&vsen->hdl, &vimc_sen_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ vsen->sd.ctrl_handler = &vsen->hdl;
+ if (vsen->hdl.error) {
+ ret = vsen->hdl.error;
+ goto err_free_vsen;
+ }
+
+ /* Initialize ved and sd */
+ ret = vimc_ent_sd_register(&vsen->ved, &vsen->sd, v4l2_dev,
+ pdata->entity_name,
+ MEDIA_ENT_F_CAM_SENSOR, 1,
+ (const unsigned long[1]) {MEDIA_PAD_FL_SOURCE},
+ &vimc_sen_ops);
+ if (ret)
+ goto err_free_hdl;
+
+ vsen->ved.process_frame = vimc_sen_process_frame;
+ dev_set_drvdata(comp, &vsen->ved);
+ vsen->dev = comp;
+
+ /* Initialize the frame format */
+ vsen->mbus_format = fmt_default;
+
+ /* Initialize the test pattern generator */
+ tpg_init(&vsen->tpg, vsen->mbus_format.width,
+ vsen->mbus_format.height);
+ ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
+ if (ret)
+ goto err_unregister_ent_sd;
+
+ return 0;
+
+err_unregister_ent_sd:
+ vimc_ent_sd_unregister(&vsen->ved, &vsen->sd);
+err_free_hdl:
+ v4l2_ctrl_handler_free(&vsen->hdl);
+err_free_vsen:
+ kfree(vsen);
+
+ return ret;
+}
+
+static const struct component_ops vimc_sen_comp_ops = {
+ .bind = vimc_sen_comp_bind,
+ .unbind = vimc_sen_comp_unbind,
+};
+
+static int vimc_sen_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vimc_sen_comp_ops);
+}
+
+static int vimc_sen_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vimc_sen_comp_ops);
+
+ return 0;
+}
+
+static const struct platform_device_id vimc_sen_driver_ids[] = {
+ {
+ .name = VIMC_SEN_DRV_NAME,
+ },
+ { }
+};
+
+static struct platform_driver vimc_sen_pdrv = {
+ .probe = vimc_sen_probe,
+ .remove = vimc_sen_remove,
+ .id_table = vimc_sen_driver_ids,
+ .driver = {
+ .name = VIMC_SEN_DRV_NAME,
+ },
+};
+
+module_platform_driver(vimc_sen_pdrv);
+
+MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Sensor");
+MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
new file mode 100644
index 000000000..392754c18
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vimc-streamer.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+
+#include "vimc-streamer.h"
+
+/**
+ * vimc_get_source_entity - get the entity connected with the first sink pad
+ *
+ * @ent: reference media_entity
+ *
+ * Helper function that returns the media entity containing the source pad
+ * linked with the first sink pad from the given media entity pad list.
+ */
+static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
+{
+ struct media_pad *pad;
+ int i;
+
+ for (i = 0; i < ent->num_pads; i++) {
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ continue;
+ pad = media_entity_remote_pad(&ent->pads[i]);
+ return pad ? pad->entity : NULL;
+ }
+ return NULL;
+}
+
+/*
+ * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
+ *
+ * @stream: the pointer to the stream structure with the pipeline to be
+ * disabled.
+ *
+ * Calls s_stream to disable the stream in each entity of the pipeline
+ *
+ */
+static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
+{
+ struct media_entity *entity;
+ struct v4l2_subdev *sd;
+
+ while (stream->pipe_size) {
+ stream->pipe_size--;
+ entity = stream->ved_pipeline[stream->pipe_size]->ent;
+ entity = vimc_get_source_entity(entity);
+ stream->ved_pipeline[stream->pipe_size] = NULL;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ v4l2_subdev_call(sd, video, s_stream, 0);
+ }
+}
+
+/*
+ * vimc_streamer_pipeline_init - initializes the stream structure
+ *
+ * @stream: the pointer to the stream structure to be initialized
+ * @ved: the pointer to the vimc entity initializing the stream
+ *
+ * Initializes the stream structure. Walks through the entity graph to
+ * construct the pipeline used later on the streamer thread.
+ * Calls s_stream to enable stream in all entities of the pipeline.
+ */
+static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
+ struct vimc_ent_device *ved)
+{
+ struct media_entity *entity;
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ stream->pipe_size = 0;
+ while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) {
+ if (!ved) {
+ vimc_streamer_pipeline_terminate(stream);
+ return -EINVAL;
+ }
+ stream->ved_pipeline[stream->pipe_size++] = ved;
+
+ entity = vimc_get_source_entity(ved->ent);
+ /* Check if the end of the pipeline was reached*/
+ if (!entity)
+ return 0;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ sd = media_entity_to_v4l2_subdev(entity);
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ vimc_streamer_pipeline_terminate(stream);
+ return ret;
+ }
+ ved = v4l2_get_subdevdata(sd);
+ } else {
+ vdev = container_of(entity,
+ struct video_device,
+ entity);
+ ved = video_get_drvdata(vdev);
+ }
+ }
+
+ vimc_streamer_pipeline_terminate(stream);
+ return -EINVAL;
+}
+
+static int vimc_streamer_thread(void *data)
+{
+ struct vimc_stream *stream = data;
+ int i;
+
+ set_freezable();
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ for (i = stream->pipe_size - 1; i >= 0; i--) {
+ stream->frame = stream->ved_pipeline[i]->process_frame(
+ stream->ved_pipeline[i],
+ stream->frame);
+ if (!stream->frame)
+ break;
+ if (IS_ERR(stream->frame))
+ break;
+ }
+ //wait for 60hz
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 60);
+ }
+
+ return 0;
+}
+
+int vimc_streamer_s_stream(struct vimc_stream *stream,
+ struct vimc_ent_device *ved,
+ int enable)
+{
+ int ret;
+
+ if (!stream || !ved)
+ return -EINVAL;
+
+ if (enable) {
+ if (stream->kthread)
+ return 0;
+
+ ret = vimc_streamer_pipeline_init(stream, ved);
+ if (ret)
+ return ret;
+
+ stream->kthread = kthread_run(vimc_streamer_thread, stream,
+ "vimc-streamer thread");
+
+ if (IS_ERR(stream->kthread))
+ return PTR_ERR(stream->kthread);
+
+ } else {
+ if (!stream->kthread)
+ return 0;
+
+ ret = kthread_stop(stream->kthread);
+ if (ret)
+ return ret;
+
+ stream->kthread = NULL;
+
+ vimc_streamer_pipeline_terminate(stream);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
+MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h
new file mode 100644
index 000000000..752af2e2d
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-streamer.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vimc-streamer.h Virtual Media Controller Driver
+ *
+ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
+ *
+ */
+
+#ifndef _VIMC_STREAMER_H_
+#define _VIMC_STREAMER_H_
+
+#include <media/media-device.h>
+
+#include "vimc-common.h"
+
+#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
+
+struct vimc_stream {
+ struct media_pipeline pipe;
+ struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
+ unsigned int pipe_size;
+ u8 *frame;
+ struct task_struct *kthread;
+};
+
+/**
+ * vimc_streamer_s_streamer - start/stop the stream
+ *
+ * @stream: the pointer to the stream to start or stop
+ * @ved: The last entity of the streamer pipeline
+ * @enable: any non-zero number start the stream, zero stop
+ *
+ */
+int vimc_streamer_s_stream(struct vimc_stream *stream,
+ struct vimc_ent_device *ved,
+ int enable);
+
+#endif //_VIMC_STREAMER_H_
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
new file mode 100644
index 000000000..154de92dd
--- /dev/null
+++ b/drivers/media/platform/vivid/Kconfig
@@ -0,0 +1,41 @@
+config VIDEO_VIVID
+ tristate "Virtual Video Test Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FB
+ depends on HAS_DMA
+ select FONT_SUPPORT
+ select FONT_8x16
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_V4L2_TPG
+ default n
+ ---help---
+ Enables a virtual video driver. This driver emulates a webcam,
+ TV, S-Video and HDMI capture hardware, including VBI support for
+ the SDTV inputs. Also video output, VBI output, radio receivers,
+ transmitters and software defined radio capture is emulated.
+
+ It is highly configurable and is ideal for testing applications.
+ Error injection is supported to test rare errors that are hard
+ to reproduce in real hardware.
+
+ Say Y here if you want to test video apps or debug V4L devices.
+ When in doubt, say N.
+
+config VIDEO_VIVID_CEC
+ bool "Enable CEC emulation support"
+ depends on VIDEO_VIVID
+ select CEC_CORE
+ ---help---
+ When selected the vivid module will emulate the optional
+ HDMI CEC feature.
+
+config VIDEO_VIVID_MAX_DEVS
+ int "Maximum number of devices"
+ depends on VIDEO_VIVID
+ default "64"
+ ---help---
+ This allows you to specify the maximum number of devices supported
+ by the vivid driver.
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
new file mode 100644
index 000000000..2f5762e33
--- /dev/null
+++ b/drivers/media/platform/vivid/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
+ vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
+ vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
+ vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
+ vivid-osd.o
+ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
+ vivid-objs += vivid-cec.o
+endif
+
+obj-$(CONFIG_VIDEO_VIVID) += vivid.o
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
new file mode 100644
index 000000000..71105fa4c
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-cec.c - A Virtual Video Test Driver, cec emulation
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <media/cec.h>
+
+#include "vivid-core.h"
+#include "vivid-cec.h"
+
+#define CEC_TIM_START_BIT_TOTAL 4500
+#define CEC_TIM_START_BIT_LOW 3700
+#define CEC_TIM_START_BIT_HIGH 800
+#define CEC_TIM_DATA_BIT_TOTAL 2400
+#define CEC_TIM_DATA_BIT_0_LOW 1500
+#define CEC_TIM_DATA_BIT_0_HIGH 900
+#define CEC_TIM_DATA_BIT_1_LOW 600
+#define CEC_TIM_DATA_BIT_1_HIGH 1800
+
+void vivid_cec_bus_free_work(struct vivid_dev *dev)
+{
+ spin_lock(&dev->cec_slock);
+ while (!list_empty(&dev->cec_work_list)) {
+ struct vivid_cec_work *cw =
+ list_first_entry(&dev->cec_work_list,
+ struct vivid_cec_work, list);
+
+ spin_unlock(&dev->cec_slock);
+ cancel_delayed_work_sync(&cw->work);
+ spin_lock(&dev->cec_slock);
+ list_del(&cw->list);
+ cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_LOW_DRIVE);
+ kfree(cw);
+ }
+ spin_unlock(&dev->cec_slock);
+}
+
+static bool vivid_cec_find_dest_adap(struct vivid_dev *dev,
+ struct cec_adapter *adap, u8 dest)
+{
+ unsigned int i;
+
+ if (dest >= 0xf)
+ return false;
+
+ if (adap != dev->cec_rx_adap && dev->cec_rx_adap &&
+ dev->cec_rx_adap->is_configured &&
+ cec_has_log_addr(dev->cec_rx_adap, dest))
+ return true;
+
+ for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++) {
+ if (adap == dev->cec_tx_adap[i])
+ continue;
+ if (!dev->cec_tx_adap[i]->is_configured)
+ continue;
+ if (cec_has_log_addr(dev->cec_tx_adap[i], dest))
+ return true;
+ }
+ return false;
+}
+
+static void vivid_cec_pin_adap_events(struct cec_adapter *adap, ktime_t ts,
+ const struct cec_msg *msg, bool nacked)
+{
+ unsigned int len = nacked ? 1 : msg->len;
+ unsigned int i;
+ bool bit;
+
+ if (adap == NULL)
+ return;
+
+ /*
+ * Suffix ULL on constant 10 makes the expression
+ * CEC_TIM_START_BIT_TOTAL + 10ULL * len * CEC_TIM_DATA_BIT_TOTAL
+ * to be evaluated using 64-bit unsigned arithmetic (u64), which
+ * is what ktime_sub_us expects as second argument.
+ */
+ ts = ktime_sub_us(ts, CEC_TIM_START_BIT_TOTAL +
+ 10ULL * len * CEC_TIM_DATA_BIT_TOTAL);
+ cec_queue_pin_cec_event(adap, false, false, ts);
+ ts = ktime_add_us(ts, CEC_TIM_START_BIT_LOW);
+ cec_queue_pin_cec_event(adap, true, false, ts);
+ ts = ktime_add_us(ts, CEC_TIM_START_BIT_HIGH);
+
+ for (i = 0; i < 10 * len; i++) {
+ switch (i % 10) {
+ case 0 ... 7:
+ bit = msg->msg[i / 10] & (0x80 >> (i % 10));
+ break;
+ case 8: /* EOM */
+ bit = i / 10 == msg->len - 1;
+ break;
+ case 9: /* ACK */
+ bit = cec_msg_is_broadcast(msg) ^ nacked;
+ break;
+ }
+ cec_queue_pin_cec_event(adap, false, false, ts);
+ if (bit)
+ ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_LOW);
+ else
+ ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_LOW);
+ cec_queue_pin_cec_event(adap, true, false, ts);
+ if (bit)
+ ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_HIGH);
+ else
+ ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_HIGH);
+ }
+}
+
+static void vivid_cec_pin_events(struct vivid_dev *dev,
+ const struct cec_msg *msg, bool nacked)
+{
+ ktime_t ts = ktime_get();
+ unsigned int i;
+
+ vivid_cec_pin_adap_events(dev->cec_rx_adap, ts, msg, nacked);
+ for (i = 0; i < MAX_OUTPUTS; i++)
+ vivid_cec_pin_adap_events(dev->cec_tx_adap[i], ts, msg, nacked);
+}
+
+static void vivid_cec_xfer_done_worker(struct work_struct *work)
+{
+ struct vivid_cec_work *cw =
+ container_of(work, struct vivid_cec_work, work.work);
+ struct vivid_dev *dev = cw->dev;
+ struct cec_adapter *adap = cw->adap;
+ u8 dest = cec_msg_destination(&cw->msg);
+ bool valid_dest;
+ unsigned int i;
+
+ valid_dest = cec_msg_is_broadcast(&cw->msg);
+ if (!valid_dest)
+ valid_dest = vivid_cec_find_dest_adap(dev, adap, dest);
+
+ cw->tx_status = valid_dest ? CEC_TX_STATUS_OK : CEC_TX_STATUS_NACK;
+ spin_lock(&dev->cec_slock);
+ dev->cec_xfer_time_jiffies = 0;
+ dev->cec_xfer_start_jiffies = 0;
+ list_del(&cw->list);
+ spin_unlock(&dev->cec_slock);
+ vivid_cec_pin_events(dev, &cw->msg, !valid_dest);
+ cec_transmit_attempt_done(cw->adap, cw->tx_status);
+
+ /* Broadcast message */
+ if (adap != dev->cec_rx_adap)
+ cec_received_msg(dev->cec_rx_adap, &cw->msg);
+ for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
+ if (adap != dev->cec_tx_adap[i])
+ cec_received_msg(dev->cec_tx_adap[i], &cw->msg);
+ kfree(cw);
+}
+
+static void vivid_cec_xfer_try_worker(struct work_struct *work)
+{
+ struct vivid_cec_work *cw =
+ container_of(work, struct vivid_cec_work, work.work);
+ struct vivid_dev *dev = cw->dev;
+
+ spin_lock(&dev->cec_slock);
+ if (dev->cec_xfer_time_jiffies) {
+ list_del(&cw->list);
+ spin_unlock(&dev->cec_slock);
+ cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_ARB_LOST);
+ kfree(cw);
+ } else {
+ INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
+ dev->cec_xfer_start_jiffies = jiffies;
+ dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
+ spin_unlock(&dev->cec_slock);
+ schedule_delayed_work(&cw->work, dev->cec_xfer_time_jiffies);
+ }
+}
+
+static int vivid_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ adap->cec_pin_is_high = true;
+ return 0;
+}
+
+static int vivid_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ return 0;
+}
+
+/*
+ * One data bit takes 2400 us, each byte needs 10 bits so that's 24000 us
+ * per byte.
+ */
+#define USECS_PER_BYTE 24000
+
+static int vivid_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct vivid_dev *dev = cec_get_drvdata(adap);
+ struct vivid_cec_work *cw = kzalloc(sizeof(*cw), GFP_KERNEL);
+ long delta_jiffies = 0;
+
+ if (cw == NULL)
+ return -ENOMEM;
+ cw->dev = dev;
+ cw->adap = adap;
+ cw->usecs = CEC_FREE_TIME_TO_USEC(signal_free_time) +
+ msg->len * USECS_PER_BYTE;
+ cw->msg = *msg;
+
+ spin_lock(&dev->cec_slock);
+ list_add(&cw->list, &dev->cec_work_list);
+ if (dev->cec_xfer_time_jiffies == 0) {
+ INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
+ dev->cec_xfer_start_jiffies = jiffies;
+ dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
+ delta_jiffies = dev->cec_xfer_time_jiffies;
+ } else {
+ INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_try_worker);
+ delta_jiffies = dev->cec_xfer_start_jiffies +
+ dev->cec_xfer_time_jiffies - jiffies;
+ }
+ spin_unlock(&dev->cec_slock);
+ schedule_delayed_work(&cw->work, delta_jiffies < 0 ? 0 : delta_jiffies);
+ return 0;
+}
+
+static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct vivid_dev *dev = cec_get_drvdata(adap);
+ struct cec_msg reply;
+ u8 dest = cec_msg_destination(msg);
+ u8 disp_ctl;
+ char osd[14];
+
+ if (cec_msg_is_broadcast(msg))
+ dest = adap->log_addrs.log_addr[0];
+ cec_msg_init(&reply, dest, cec_msg_initiator(msg));
+
+ switch (cec_msg_opcode(msg)) {
+ case CEC_MSG_SET_OSD_STRING:
+ if (!cec_is_sink(adap))
+ return -ENOMSG;
+ cec_ops_set_osd_string(msg, &disp_ctl, osd);
+ switch (disp_ctl) {
+ case CEC_OP_DISP_CTL_DEFAULT:
+ strcpy(dev->osd, osd);
+ dev->osd_jiffies = jiffies;
+ break;
+ case CEC_OP_DISP_CTL_UNTIL_CLEARED:
+ strcpy(dev->osd, osd);
+ dev->osd_jiffies = 0;
+ break;
+ case CEC_OP_DISP_CTL_CLEAR:
+ dev->osd[0] = 0;
+ dev->osd_jiffies = 0;
+ break;
+ default:
+ cec_msg_feature_abort(&reply, cec_msg_opcode(msg),
+ CEC_OP_ABORT_INVALID_OP);
+ cec_transmit_msg(adap, &reply, false);
+ break;
+ }
+ break;
+ default:
+ return -ENOMSG;
+ }
+ return 0;
+}
+
+static const struct cec_adap_ops vivid_cec_adap_ops = {
+ .adap_enable = vivid_cec_adap_enable,
+ .adap_log_addr = vivid_cec_adap_log_addr,
+ .adap_transmit = vivid_cec_adap_transmit,
+ .received = vivid_received,
+};
+
+struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
+ unsigned int idx,
+ bool is_source)
+{
+ char name[sizeof(dev->vid_out_dev.name) + 2];
+ u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
+
+ snprintf(name, sizeof(name), "%s%d",
+ is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name,
+ idx);
+ return cec_allocate_adapter(&vivid_cec_adap_ops, dev,
+ name, caps, 1);
+}
diff --git a/drivers/media/platform/vivid/vivid-cec.h b/drivers/media/platform/vivid/vivid-cec.h
new file mode 100644
index 000000000..7524ed48a
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-cec.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-cec.h - A Virtual Video Test Driver, cec emulation
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifdef CONFIG_VIDEO_VIVID_CEC
+struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
+ unsigned int idx,
+ bool is_source);
+void vivid_cec_bus_free_work(struct vivid_dev *dev);
+
+#else
+
+static inline void vivid_cec_bus_free_work(struct vivid_dev *dev)
+{
+}
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
new file mode 100644
index 000000000..b603ca412
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -0,0 +1,1541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-core.c - A Virtual Video Test Driver, core initialization
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/font.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-vid-cap.h"
+#include "vivid-vid-out.h"
+#include "vivid-radio-common.h"
+#include "vivid-radio-rx.h"
+#include "vivid-radio-tx.h"
+#include "vivid-sdr-cap.h"
+#include "vivid-vbi-cap.h"
+#include "vivid-vbi-out.h"
+#include "vivid-osd.h"
+#include "vivid-cec.h"
+#include "vivid-ctrls.h"
+
+#define VIVID_MODULE_NAME "vivid"
+
+/* The maximum number of vivid devices */
+#define VIVID_MAX_DEVS CONFIG_VIDEO_VIVID_MAX_DEVS
+
+MODULE_DESCRIPTION("Virtual Video Test Driver");
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_LICENSE("GPL");
+
+static unsigned n_devs = 1;
+module_param(n_devs, uint, 0444);
+MODULE_PARM_DESC(n_devs, " number of driver instances to create");
+
+static int vid_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(vid_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(vid_cap_nr, " videoX start number, -1 is autodetect");
+
+static int vid_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(vid_out_nr, int, NULL, 0444);
+MODULE_PARM_DESC(vid_out_nr, " videoX start number, -1 is autodetect");
+
+static int vbi_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(vbi_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(vbi_cap_nr, " vbiX start number, -1 is autodetect");
+
+static int vbi_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(vbi_out_nr, int, NULL, 0444);
+MODULE_PARM_DESC(vbi_out_nr, " vbiX start number, -1 is autodetect");
+
+static int sdr_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(sdr_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(sdr_cap_nr, " swradioX start number, -1 is autodetect");
+
+static int radio_rx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(radio_rx_nr, int, NULL, 0444);
+MODULE_PARM_DESC(radio_rx_nr, " radioX start number, -1 is autodetect");
+
+static int radio_tx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(radio_tx_nr, int, NULL, 0444);
+MODULE_PARM_DESC(radio_tx_nr, " radioX start number, -1 is autodetect");
+
+static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(ccs_cap_mode, int, NULL, 0444);
+MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n"
+ "\t\t bit 0=crop, 1=compose, 2=scale,\n"
+ "\t\t -1=user-controlled (default)");
+
+static int ccs_out_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(ccs_out_mode, int, NULL, 0444);
+MODULE_PARM_DESC(ccs_out_mode, " output crop/compose/scale mode:\n"
+ "\t\t bit 0=crop, 1=compose, 2=scale,\n"
+ "\t\t -1=user-controlled (default)");
+
+static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1 };
+module_param_array(multiplanar, uint, NULL, 0444);
+MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device.");
+
+/* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */
+static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d };
+module_param_array(node_types, uint, NULL, 0444);
+MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the following meaning:\n"
+ "\t\t bit 0: Video Capture node\n"
+ "\t\t bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
+ "\t\t bit 4: Radio Receiver node\n"
+ "\t\t bit 5: Software Defined Radio Receiver node\n"
+ "\t\t bit 8: Video Output node\n"
+ "\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
+ "\t\t bit 12: Radio Transmitter node\n"
+ "\t\t bit 16: Framebuffer for testing overlays");
+
+/* Default: 4 inputs */
+static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 };
+module_param_array(num_inputs, uint, NULL, 0444);
+MODULE_PARM_DESC(num_inputs, " number of inputs, default is 4");
+
+/* Default: input 0 = WEBCAM, 1 = TV, 2 = SVID, 3 = HDMI */
+static unsigned input_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0xe4 };
+module_param_array(input_types, uint, NULL, 0444);
+MODULE_PARM_DESC(input_types, " input types, default is 0xe4. Two bits per input,\n"
+ "\t\t bits 0-1 == input 0, bits 31-30 == input 15.\n"
+ "\t\t Type 0 == webcam, 1 == TV, 2 == S-Video, 3 == HDMI");
+
+/* Default: 2 outputs */
+static unsigned num_outputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 2 };
+module_param_array(num_outputs, uint, NULL, 0444);
+MODULE_PARM_DESC(num_outputs, " number of outputs, default is 2");
+
+/* Default: output 0 = SVID, 1 = HDMI */
+static unsigned output_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 2 };
+module_param_array(output_types, uint, NULL, 0444);
+MODULE_PARM_DESC(output_types, " output types, default is 0x02. One bit per output,\n"
+ "\t\t bit 0 == output 0, bit 15 == output 15.\n"
+ "\t\t Type 0 == S-Video, 1 == HDMI");
+
+unsigned vivid_debug;
+module_param(vivid_debug, uint, 0644);
+MODULE_PARM_DESC(vivid_debug, " activates debug info");
+
+static bool no_error_inj;
+module_param(no_error_inj, bool, 0444);
+MODULE_PARM_DESC(no_error_inj, " if set disable the error injecting controls");
+
+static unsigned int allocators[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0 };
+module_param_array(allocators, uint, NULL, 0444);
+MODULE_PARM_DESC(allocators, " memory allocator selection, default is 0.\n"
+ "\t\t 0 == vmalloc\n"
+ "\t\t 1 == dma-contig");
+
+static struct vivid_dev *vivid_devs[VIVID_MAX_DEVS];
+
+const struct v4l2_rect vivid_min_rect = {
+ 0, 0, MIN_WIDTH, MIN_HEIGHT
+};
+
+const struct v4l2_rect vivid_max_rect = {
+ 0, 0, MAX_WIDTH * MAX_ZOOM, MAX_HEIGHT * MAX_ZOOM
+};
+
+static const u8 vivid_hdmi_edid[256] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x34, 0x12, 0x00, 0x00, 0x00, 0x00,
+ 0x22, 0x1a, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x2f, 0xcf, 0x00, 0x31, 0x59,
+ 0x45, 0x59, 0x81, 0x80, 0x81, 0x40, 0x90, 0x40,
+ 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x08, 0xe8,
+ 0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58,
+ 0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x76,
+ 0x69, 0x76, 0x69, 0x64, 0x0a, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
+
+ 0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
+ 0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
+ 0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
+ 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
+ 0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
+ 0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
+ 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
+ 0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
+ 0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
+ 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
+ 0x1e, 0x1a, 0x36, 0x80, 0xa0, 0x70, 0x38, 0x1f,
+ 0x40, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32,
+ 0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
+ 0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
+ 0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
+};
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ strcpy(cap->driver, "vivid");
+ strcpy(cap->card, "vivid");
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev->v4l2_dev.name);
+
+ cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
+ dev->vbi_cap_caps | dev->vbi_out_caps |
+ dev->radio_rx_caps | dev->radio_tx_caps |
+ dev->sdr_cap_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int vidioc_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_rx_s_hw_freq_seek(file, fh, a);
+ return -ENOTTY;
+}
+
+static int vidioc_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_rx_enum_freq_bands(file, fh, band);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_enum_freq_bands(file, fh, band);
+ return -ENOTTY;
+}
+
+static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_rx_g_tuner(file, fh, vt);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_g_tuner(file, fh, vt);
+ return vivid_video_g_tuner(file, fh, vt);
+}
+
+static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_rx_s_tuner(file, fh, vt);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_s_tuner(file, fh, vt);
+ return vivid_video_s_tuner(file, fh, vt);
+}
+
+static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_g_frequency(file,
+ vdev->vfl_dir == VFL_DIR_RX ?
+ &dev->radio_rx_freq : &dev->radio_tx_freq, vf);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_g_frequency(file, fh, vf);
+ return vivid_video_g_frequency(file, fh, vf);
+}
+
+static int vidioc_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_s_frequency(file,
+ vdev->vfl_dir == VFL_DIR_RX ?
+ &dev->radio_rx_freq : &dev->radio_tx_freq, vf);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_s_frequency(file, fh, vf);
+ return vivid_video_s_frequency(file, fh, vf);
+}
+
+static int vidioc_overlay(struct file *file, void *fh, unsigned i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_overlay(file, fh, i);
+ return vivid_vid_out_overlay(file, fh, i);
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_g_fbuf(file, fh, a);
+ return vivid_vid_out_g_fbuf(file, fh, a);
+}
+
+static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_fbuf(file, fh, a);
+ return vivid_vid_out_s_fbuf(file, fh, a);
+}
+
+static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id id)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_std(file, fh, id);
+ return vivid_vid_out_s_std(file, fh, id);
+}
+
+static int vidioc_s_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_dv_timings(file, fh, timings);
+ return vivid_vid_out_s_dv_timings(file, fh, timings);
+}
+
+static int vidioc_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cc)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_cropcap(file, fh, cc);
+ return vivid_vid_out_cropcap(file, fh, cc);
+}
+
+static int vidioc_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_g_selection(file, fh, sel);
+ return vivid_vid_out_g_selection(file, fh, sel);
+}
+
+static int vidioc_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_selection(file, fh, sel);
+ return vivid_vid_out_s_selection(file, fh, sel);
+}
+
+static int vidioc_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_g_parm(file, fh, parm);
+ return vivid_vid_out_g_parm(file, fh, parm);
+}
+
+static int vidioc_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_parm(file, fh, parm);
+ return vivid_vid_out_g_parm(file, fh, parm);
+}
+
+static int vidioc_log_status(struct file *file, void *fh)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ v4l2_ctrl_log_status(file, fh);
+ if (vdev->vfl_dir == VFL_DIR_RX && vdev->vfl_type == VFL_TYPE_GRABBER)
+ tpg_log_status(&dev->tpg);
+ return 0;
+}
+
+static ssize_t vivid_radio_read(struct file *file, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_TX)
+ return -EINVAL;
+ return vivid_radio_rx_read(file, buf, size, offset);
+}
+
+static ssize_t vivid_radio_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return -EINVAL;
+ return vivid_radio_tx_write(file, buf, size, offset);
+}
+
+static __poll_t vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_radio_rx_poll(file, wait);
+ return vivid_radio_tx_poll(file, wait);
+}
+
+static bool vivid_is_in_use(struct video_device *vdev)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+ res = !list_empty(&vdev->fh_list);
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+ return res;
+}
+
+static bool vivid_is_last_user(struct vivid_dev *dev)
+{
+ unsigned uses = vivid_is_in_use(&dev->vid_cap_dev) +
+ vivid_is_in_use(&dev->vid_out_dev) +
+ vivid_is_in_use(&dev->vbi_cap_dev) +
+ vivid_is_in_use(&dev->vbi_out_dev) +
+ vivid_is_in_use(&dev->sdr_cap_dev) +
+ vivid_is_in_use(&dev->radio_rx_dev) +
+ vivid_is_in_use(&dev->radio_tx_dev);
+
+ return uses == 1;
+}
+
+static int vivid_fop_release(struct file *file)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ mutex_lock(&dev->mutex);
+ if (!no_error_inj && v4l2_fh_is_singular_file(file) &&
+ !video_is_registered(vdev) && vivid_is_last_user(dev)) {
+ /*
+ * I am the last user of this driver, and a disconnect
+ * was forced (since this video_device is unregistered),
+ * so re-register all video_device's again.
+ */
+ v4l2_info(&dev->v4l2_dev, "reconnect\n");
+ set_bit(V4L2_FL_REGISTERED, &dev->vid_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->vid_out_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->vbi_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->vbi_out_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ }
+ mutex_unlock(&dev->mutex);
+ if (file->private_data == dev->overlay_cap_owner)
+ dev->overlay_cap_owner = NULL;
+ if (file->private_data == dev->radio_rx_rds_owner) {
+ dev->radio_rx_rds_last_block = 0;
+ dev->radio_rx_rds_owner = NULL;
+ }
+ if (file->private_data == dev->radio_tx_rds_owner) {
+ dev->radio_tx_rds_last_block = 0;
+ dev->radio_tx_rds_owner = NULL;
+ }
+ if (vdev->queue)
+ return vb2_fop_release(file);
+ return v4l2_fh_release(file);
+}
+
+static const struct v4l2_file_operations vivid_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vivid_fop_release,
+ .read = vb2_fop_read,
+ .write = vb2_fop_write,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_file_operations vivid_radio_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vivid_fop_release,
+ .read = vivid_radio_read,
+ .write = vivid_radio_write,
+ .poll = vivid_radio_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out_mplane,
+
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+ .vidioc_cropcap = vidioc_cropcap,
+
+ .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
+ .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
+ .vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
+
+ .vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap,
+ .vidioc_try_fmt_sliced_vbi_cap = vidioc_try_fmt_sliced_vbi_cap,
+ .vidioc_s_fmt_sliced_vbi_cap = vidioc_s_fmt_sliced_vbi_cap,
+ .vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap,
+
+ .vidioc_g_fmt_vbi_out = vidioc_g_fmt_vbi_out,
+ .vidioc_try_fmt_vbi_out = vidioc_g_fmt_vbi_out,
+ .vidioc_s_fmt_vbi_out = vidioc_s_fmt_vbi_out,
+
+ .vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out,
+ .vidioc_try_fmt_sliced_vbi_out = vidioc_try_fmt_sliced_vbi_out,
+ .vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out,
+
+ .vidioc_enum_fmt_sdr_cap = vidioc_enum_fmt_sdr_cap,
+ .vidioc_g_fmt_sdr_cap = vidioc_g_fmt_sdr_cap,
+ .vidioc_try_fmt_sdr_cap = vidioc_try_fmt_sdr_cap,
+ .vidioc_s_fmt_sdr_cap = vidioc_s_fmt_sdr_cap,
+
+ .vidioc_overlay = vidioc_overlay,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
+
+ .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_out_overlay,
+ .vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_out_overlay,
+ .vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_out_overlay,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_s_audio = vidioc_s_audio,
+ .vidioc_g_audio = vidioc_g_audio,
+ .vidioc_enumaudio = vidioc_enumaudio,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_modulator = vidioc_s_modulator,
+ .vidioc_g_modulator = vidioc_g_modulator,
+ .vidioc_s_hw_freq_seek = vidioc_s_hw_freq_seek,
+ .vidioc_enum_freq_bands = vidioc_enum_freq_bands,
+
+ .vidioc_enum_output = vidioc_enum_output,
+ .vidioc_g_output = vidioc_g_output,
+ .vidioc_s_output = vidioc_s_output,
+ .vidioc_s_audout = vidioc_s_audout,
+ .vidioc_g_audout = vidioc_g_audout,
+ .vidioc_enumaudout = vidioc_enumaudout,
+
+ .vidioc_querystd = vidioc_querystd,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_s_dv_timings = vidioc_s_dv_timings,
+ .vidioc_g_dv_timings = vidioc_g_dv_timings,
+ .vidioc_query_dv_timings = vidioc_query_dv_timings,
+ .vidioc_enum_dv_timings = vidioc_enum_dv_timings,
+ .vidioc_dv_timings_cap = vidioc_dv_timings_cap,
+ .vidioc_g_edid = vidioc_g_edid,
+ .vidioc_s_edid = vidioc_s_edid,
+
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------
+ Initialization and module stuff
+ ------------------------------------------------------------------*/
+
+static void vivid_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct vivid_dev *dev = container_of(v4l2_dev, struct vivid_dev, v4l2_dev);
+
+ vivid_free_controls(dev);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vfree(dev->scaled_line);
+ vfree(dev->blended_line);
+ vfree(dev->edid);
+ vfree(dev->bitmap_cap);
+ vfree(dev->bitmap_out);
+ tpg_free(&dev->tpg);
+ kfree(dev->query_dv_timings_qmenu);
+ kfree(dev);
+}
+
+static int vivid_create_instance(struct platform_device *pdev, int inst)
+{
+ static const struct v4l2_dv_timings def_dv_timings =
+ V4L2_DV_BT_CEA_1280X720P60;
+ static const struct vb2_mem_ops * const vivid_mem_ops[2] = {
+ &vb2_vmalloc_memops,
+ &vb2_dma_contig_memops,
+ };
+ unsigned in_type_counter[4] = { 0, 0, 0, 0 };
+ unsigned out_type_counter[4] = { 0, 0, 0, 0 };
+ int ccs_cap = ccs_cap_mode[inst];
+ int ccs_out = ccs_out_mode[inst];
+ bool has_tuner;
+ bool has_modulator;
+ struct vivid_dev *dev;
+ struct video_device *vfd;
+ struct vb2_queue *q;
+ unsigned node_type = node_types[inst];
+ unsigned int allocator = allocators[inst];
+ v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
+ int ret;
+ int i;
+
+ /* allocate main vivid state structure */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->inst = inst;
+
+ /* register v4l2_device */
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
+ "%s-%03d", VIVID_MODULE_NAME, inst);
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ kfree(dev);
+ return ret;
+ }
+ dev->v4l2_dev.release = vivid_dev_release;
+
+ /* start detecting feature set */
+
+ /* do we use single- or multi-planar? */
+ dev->multiplanar = multiplanar[inst] > 1;
+ v4l2_info(&dev->v4l2_dev, "using %splanar format API\n",
+ dev->multiplanar ? "multi" : "single ");
+
+ /* how many inputs do we have and of what type? */
+ dev->num_inputs = num_inputs[inst];
+ if (dev->num_inputs < 1)
+ dev->num_inputs = 1;
+ if (dev->num_inputs >= MAX_INPUTS)
+ dev->num_inputs = MAX_INPUTS;
+ for (i = 0; i < dev->num_inputs; i++) {
+ dev->input_type[i] = (input_types[inst] >> (i * 2)) & 0x3;
+ dev->input_name_counter[i] = in_type_counter[dev->input_type[i]]++;
+ }
+ dev->has_audio_inputs = in_type_counter[TV] && in_type_counter[SVID];
+ if (in_type_counter[HDMI] == 16) {
+ /* The CEC physical address only allows for max 15 inputs */
+ in_type_counter[HDMI]--;
+ dev->num_inputs--;
+ }
+
+ /* how many outputs do we have and of what type? */
+ dev->num_outputs = num_outputs[inst];
+ if (dev->num_outputs < 1)
+ dev->num_outputs = 1;
+ if (dev->num_outputs >= MAX_OUTPUTS)
+ dev->num_outputs = MAX_OUTPUTS;
+ for (i = 0; i < dev->num_outputs; i++) {
+ dev->output_type[i] = ((output_types[inst] >> i) & 1) ? HDMI : SVID;
+ dev->output_name_counter[i] = out_type_counter[dev->output_type[i]]++;
+ }
+ dev->has_audio_outputs = out_type_counter[SVID];
+ if (out_type_counter[HDMI] == 16) {
+ /*
+ * The CEC physical address only allows for max 15 inputs,
+ * so outputs are also limited to 15 to allow for easy
+ * CEC output to input mapping.
+ */
+ out_type_counter[HDMI]--;
+ dev->num_outputs--;
+ }
+
+ /* do we create a video capture device? */
+ dev->has_vid_cap = node_type & 0x0001;
+
+ /* do we create a vbi capture device? */
+ if (in_type_counter[TV] || in_type_counter[SVID]) {
+ dev->has_raw_vbi_cap = node_type & 0x0004;
+ dev->has_sliced_vbi_cap = node_type & 0x0008;
+ dev->has_vbi_cap = dev->has_raw_vbi_cap | dev->has_sliced_vbi_cap;
+ }
+
+ /* do we create a video output device? */
+ dev->has_vid_out = node_type & 0x0100;
+
+ /* do we create a vbi output device? */
+ if (out_type_counter[SVID]) {
+ dev->has_raw_vbi_out = node_type & 0x0400;
+ dev->has_sliced_vbi_out = node_type & 0x0800;
+ dev->has_vbi_out = dev->has_raw_vbi_out | dev->has_sliced_vbi_out;
+ }
+
+ /* do we create a radio receiver device? */
+ dev->has_radio_rx = node_type & 0x0010;
+
+ /* do we create a radio transmitter device? */
+ dev->has_radio_tx = node_type & 0x1000;
+
+ /* do we create a software defined radio capture device? */
+ dev->has_sdr_cap = node_type & 0x0020;
+
+ /* do we have a tuner? */
+ has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) ||
+ dev->has_radio_rx || dev->has_sdr_cap;
+
+ /* do we have a modulator? */
+ has_modulator = dev->has_radio_tx;
+
+ if (dev->has_vid_cap)
+ /* do we have a framebuffer for overlay testing? */
+ dev->has_fb = node_type & 0x10000;
+
+ /* can we do crop/compose/scaling while capturing? */
+ if (no_error_inj && ccs_cap == -1)
+ ccs_cap = 7;
+
+ /* if ccs_cap == -1, then the use can select it using controls */
+ if (ccs_cap != -1) {
+ dev->has_crop_cap = ccs_cap & 1;
+ dev->has_compose_cap = ccs_cap & 2;
+ dev->has_scaler_cap = ccs_cap & 4;
+ v4l2_info(&dev->v4l2_dev, "Capture Crop: %c Compose: %c Scaler: %c\n",
+ dev->has_crop_cap ? 'Y' : 'N',
+ dev->has_compose_cap ? 'Y' : 'N',
+ dev->has_scaler_cap ? 'Y' : 'N');
+ }
+
+ /* can we do crop/compose/scaling with video output? */
+ if (no_error_inj && ccs_out == -1)
+ ccs_out = 7;
+
+ /* if ccs_out == -1, then the use can select it using controls */
+ if (ccs_out != -1) {
+ dev->has_crop_out = ccs_out & 1;
+ dev->has_compose_out = ccs_out & 2;
+ dev->has_scaler_out = ccs_out & 4;
+ v4l2_info(&dev->v4l2_dev, "Output Crop: %c Compose: %c Scaler: %c\n",
+ dev->has_crop_out ? 'Y' : 'N',
+ dev->has_compose_out ? 'Y' : 'N',
+ dev->has_scaler_out ? 'Y' : 'N');
+ }
+
+ /* end detecting feature set */
+
+ if (dev->has_vid_cap) {
+ /* set up the capabilities of the video capture device */
+ dev->vid_cap_caps = dev->multiplanar ?
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE :
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY;
+ dev->vid_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_inputs)
+ dev->vid_cap_caps |= V4L2_CAP_AUDIO;
+ if (in_type_counter[TV])
+ dev->vid_cap_caps |= V4L2_CAP_TUNER;
+ }
+ if (dev->has_vid_out) {
+ /* set up the capabilities of the video output device */
+ dev->vid_out_caps = dev->multiplanar ?
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE :
+ V4L2_CAP_VIDEO_OUTPUT;
+ if (dev->has_fb)
+ dev->vid_out_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ dev->vid_out_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_outputs)
+ dev->vid_out_caps |= V4L2_CAP_AUDIO;
+ }
+ if (dev->has_vbi_cap) {
+ /* set up the capabilities of the vbi capture device */
+ dev->vbi_cap_caps = (dev->has_raw_vbi_cap ? V4L2_CAP_VBI_CAPTURE : 0) |
+ (dev->has_sliced_vbi_cap ? V4L2_CAP_SLICED_VBI_CAPTURE : 0);
+ dev->vbi_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_inputs)
+ dev->vbi_cap_caps |= V4L2_CAP_AUDIO;
+ if (in_type_counter[TV])
+ dev->vbi_cap_caps |= V4L2_CAP_TUNER;
+ }
+ if (dev->has_vbi_out) {
+ /* set up the capabilities of the vbi output device */
+ dev->vbi_out_caps = (dev->has_raw_vbi_out ? V4L2_CAP_VBI_OUTPUT : 0) |
+ (dev->has_sliced_vbi_out ? V4L2_CAP_SLICED_VBI_OUTPUT : 0);
+ dev->vbi_out_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_outputs)
+ dev->vbi_out_caps |= V4L2_CAP_AUDIO;
+ }
+ if (dev->has_sdr_cap) {
+ /* set up the capabilities of the sdr capture device */
+ dev->sdr_cap_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER;
+ dev->sdr_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ }
+ /* set up the capabilities of the radio receiver device */
+ if (dev->has_radio_rx)
+ dev->radio_rx_caps = V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE |
+ V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
+ V4L2_CAP_READWRITE;
+ /* set up the capabilities of the radio transmitter device */
+ if (dev->has_radio_tx)
+ dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR |
+ V4L2_CAP_READWRITE;
+
+ ret = -ENOMEM;
+ /* initialize the test pattern generator */
+ tpg_init(&dev->tpg, 640, 360);
+ if (tpg_alloc(&dev->tpg, MAX_ZOOM * MAX_WIDTH))
+ goto free_dev;
+ dev->scaled_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM));
+ if (!dev->scaled_line)
+ goto free_dev;
+ dev->blended_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM));
+ if (!dev->blended_line)
+ goto free_dev;
+
+ /* load the edid */
+ dev->edid = vmalloc(256 * 128);
+ if (!dev->edid)
+ goto free_dev;
+
+ /* create a string array containing the names of all the preset timings */
+ while (v4l2_dv_timings_presets[dev->query_dv_timings_size].bt.width)
+ dev->query_dv_timings_size++;
+ dev->query_dv_timings_qmenu = kmalloc_array(dev->query_dv_timings_size,
+ (sizeof(void *) + 32),
+ GFP_KERNEL);
+ if (dev->query_dv_timings_qmenu == NULL)
+ goto free_dev;
+ for (i = 0; i < dev->query_dv_timings_size; i++) {
+ const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
+ char *p = (char *)&dev->query_dv_timings_qmenu[dev->query_dv_timings_size];
+ u32 htot, vtot;
+
+ p += i * 32;
+ dev->query_dv_timings_qmenu[i] = p;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ snprintf(p, 32, "%ux%u%s%u",
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ (u32)bt->pixelclock / (htot * vtot));
+ }
+
+ /* disable invalid ioctls based on the feature set */
+ if (!dev->has_audio_inputs) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUMAUDIO);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_ENUMAUDIO);
+ }
+ if (!dev->has_audio_outputs) {
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_AUDOUT);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_AUDOUT);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUMAUDOUT);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_AUDOUT);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_AUDOUT);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_ENUMAUDOUT);
+ }
+ if (!in_type_counter[TV] && !in_type_counter[SVID]) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_STD);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_STD);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUMSTD);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_QUERYSTD);
+ }
+ if (!out_type_counter[SVID]) {
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_STD);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_STD);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUMSTD);
+ }
+ if (!has_tuner && !has_modulator) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_FREQUENCY);
+ }
+ if (!has_tuner) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_TUNER);
+ }
+ if (in_type_counter[HDMI] == 0) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_EDID);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_EDID);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_DV_TIMINGS_CAP);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUM_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_QUERY_DV_TIMINGS);
+ }
+ if (out_type_counter[HDMI] == 0) {
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_EDID);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_DV_TIMINGS_CAP);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_DV_TIMINGS);
+ }
+ if (!dev->has_fb) {
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FBUF);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FBUF);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_OVERLAY);
+ }
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->sdr_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMESIZES);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMEINTERVALS);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY);
+
+ /* configure internal data */
+ dev->fmt_cap = &vivid_formats[0];
+ dev->fmt_out = &vivid_formats[0];
+ if (!dev->multiplanar)
+ vivid_formats[0].data_offset[0] = 0;
+ dev->webcam_size_idx = 1;
+ dev->webcam_ival_idx = 3;
+ tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
+ dev->std_cap = V4L2_STD_PAL;
+ dev->std_out = V4L2_STD_PAL;
+ if (dev->input_type[0] == TV || dev->input_type[0] == SVID)
+ tvnorms_cap = V4L2_STD_ALL;
+ if (dev->output_type[0] == SVID)
+ tvnorms_out = V4L2_STD_ALL;
+ dev->dv_timings_cap = def_dv_timings;
+ dev->dv_timings_out = def_dv_timings;
+ dev->tv_freq = 2804 /* 175.25 * 16 */;
+ dev->tv_audmode = V4L2_TUNER_MODE_STEREO;
+ dev->tv_field_cap = V4L2_FIELD_INTERLACED;
+ dev->tv_field_out = V4L2_FIELD_INTERLACED;
+ dev->radio_rx_freq = 95000 * 16;
+ dev->radio_rx_audmode = V4L2_TUNER_MODE_STEREO;
+ if (dev->has_radio_tx) {
+ dev->radio_tx_freq = 95500 * 16;
+ dev->radio_rds_loop = false;
+ }
+ dev->radio_tx_subchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_RDS;
+ dev->sdr_adc_freq = 300000;
+ dev->sdr_fm_freq = 50000000;
+ dev->sdr_pixelformat = V4L2_SDR_FMT_CU8;
+ dev->sdr_buffersize = SDR_CAP_SAMPLES_PER_BUF * 2;
+
+ dev->edid_max_blocks = dev->edid_blocks = 2;
+ memcpy(dev->edid, vivid_hdmi_edid, sizeof(vivid_hdmi_edid));
+ dev->radio_rds_init_time = ktime_get();
+
+ /* create all controls */
+ ret = vivid_create_controls(dev, ccs_cap == -1, ccs_out == -1, no_error_inj,
+ in_type_counter[TV] || in_type_counter[SVID] ||
+ out_type_counter[SVID],
+ in_type_counter[HDMI] || out_type_counter[HDMI]);
+ if (ret)
+ goto unreg_dev;
+
+ /*
+ * update the capture and output formats to do a proper initial
+ * configuration.
+ */
+ vivid_update_format_cap(dev, false);
+ vivid_update_format_out(dev);
+
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
+
+ /* initialize overlay */
+ dev->fb_cap.fmt.width = dev->src_rect.width;
+ dev->fb_cap.fmt.height = dev->src_rect.height;
+ dev->fb_cap.fmt.pixelformat = dev->fmt_cap->fourcc;
+ dev->fb_cap.fmt.bytesperline = dev->src_rect.width * tpg_g_twopixelsize(&dev->tpg, 0) / 2;
+ dev->fb_cap.fmt.sizeimage = dev->src_rect.height * dev->fb_cap.fmt.bytesperline;
+
+ /* initialize locks */
+ spin_lock_init(&dev->slock);
+ mutex_init(&dev->mutex);
+
+ /* init dma queues */
+ INIT_LIST_HEAD(&dev->vid_cap_active);
+ INIT_LIST_HEAD(&dev->vid_out_active);
+ INIT_LIST_HEAD(&dev->vbi_cap_active);
+ INIT_LIST_HEAD(&dev->vbi_out_active);
+ INIT_LIST_HEAD(&dev->sdr_cap_active);
+
+ INIT_LIST_HEAD(&dev->cec_work_list);
+ spin_lock_init(&dev->cec_slock);
+ /*
+ * Same as create_singlethread_workqueue, but now I can use the
+ * string formatting of alloc_ordered_workqueue.
+ */
+ dev->cec_workqueue =
+ alloc_ordered_workqueue("vivid-%03d-cec", WQ_MEM_RECLAIM, inst);
+ if (!dev->cec_workqueue) {
+ ret = -ENOMEM;
+ goto unreg_dev;
+ }
+
+ if (allocator == 1)
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ else if (allocator >= ARRAY_SIZE(vivid_mem_ops))
+ allocator = 0;
+
+ /* start creating the vb2 queues */
+ if (dev->has_vid_cap) {
+ /* initialize vid_cap queue */
+ q = &dev->vb_vid_cap_q;
+ q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_vid_cap_qops;
+ q->mem_ops = vivid_mem_ops[allocator];
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_vid_out) {
+ /* initialize vid_out queue */
+ q = &dev->vb_vid_out_q;
+ q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_vid_out_qops;
+ q->mem_ops = vivid_mem_ops[allocator];
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_vbi_cap) {
+ /* initialize vbi_cap queue */
+ q = &dev->vb_vbi_cap_q;
+ q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE :
+ V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_vbi_cap_qops;
+ q->mem_ops = vivid_mem_ops[allocator];
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_vbi_out) {
+ /* initialize vbi_out queue */
+ q = &dev->vb_vbi_out_q;
+ q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT :
+ V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_vbi_out_qops;
+ q->mem_ops = vivid_mem_ops[allocator];
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_sdr_cap) {
+ /* initialize sdr_cap queue */
+ q = &dev->vb_sdr_cap_q;
+ q->type = V4L2_BUF_TYPE_SDR_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_sdr_cap_qops;
+ q->mem_ops = vivid_mem_ops[allocator];
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 8;
+ q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_fb) {
+ /* Create framebuffer for testing capture/output overlay */
+ ret = vivid_fb_init(dev);
+ if (ret)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
+ dev->fb_info.node);
+ }
+
+ /* finally start creating the device nodes */
+ if (dev->has_vid_cap) {
+ vfd = &dev->vid_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-cap", inst);
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->vid_cap_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_vid_cap_q;
+ vfd->tvnorms = tvnorms_cap;
+
+ /*
+ * Provide a mutex to v4l2 core. It will be used to protect
+ * all fops and v4l2 ioctls.
+ */
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ if (in_type_counter[HDMI]) {
+ struct cec_adapter *adap;
+
+ adap = vivid_cec_alloc_adap(dev, 0, false);
+ ret = PTR_ERR_OR_ZERO(adap);
+ if (ret < 0)
+ goto unreg_dev;
+ dev->cec_rx_adap = adap;
+ ret = cec_register_adapter(adap, &pdev->dev);
+ if (ret < 0) {
+ cec_delete_adapter(adap);
+ dev->cec_rx_adap = NULL;
+ goto unreg_dev;
+ }
+ cec_s_phys_addr(adap, 0, false);
+ v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI input 0\n",
+ dev_name(&adap->devnode.dev));
+ }
+#endif
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_vid_out) {
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ unsigned int bus_cnt = 0;
+#endif
+
+ vfd = &dev->vid_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-out", inst);
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->vid_out_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_vid_out_q;
+ vfd->tvnorms = tvnorms_out;
+
+ /*
+ * Provide a mutex to v4l2 core. It will be used to protect
+ * all fops and v4l2 ioctls.
+ */
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ for (i = 0; i < dev->num_outputs; i++) {
+ struct cec_adapter *adap;
+
+ if (dev->output_type[i] != HDMI)
+ continue;
+ dev->cec_output2bus_map[i] = bus_cnt;
+ adap = vivid_cec_alloc_adap(dev, bus_cnt, true);
+ ret = PTR_ERR_OR_ZERO(adap);
+ if (ret < 0)
+ goto unreg_dev;
+ dev->cec_tx_adap[bus_cnt] = adap;
+ ret = cec_register_adapter(adap, &pdev->dev);
+ if (ret < 0) {
+ cec_delete_adapter(adap);
+ dev->cec_tx_adap[bus_cnt] = NULL;
+ goto unreg_dev;
+ }
+ v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n",
+ dev_name(&adap->devnode.dev), bus_cnt);
+ bus_cnt++;
+ if (bus_cnt <= out_type_counter[HDMI])
+ cec_s_phys_addr(adap, bus_cnt << 12, false);
+ else
+ cec_s_phys_addr(adap, 0x1000, false);
+ }
+#endif
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_out_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_vbi_cap) {
+ vfd = &dev->vbi_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vbi-cap", inst);
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->vbi_cap_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_vbi_cap_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_cap;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s, supports %s VBI\n",
+ video_device_node_name(vfd),
+ (dev->has_raw_vbi_cap && dev->has_sliced_vbi_cap) ?
+ "raw and sliced" :
+ (dev->has_raw_vbi_cap ? "raw" : "sliced"));
+ }
+
+ if (dev->has_vbi_out) {
+ vfd = &dev->vbi_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vbi-out", inst);
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->vbi_out_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_vbi_out_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_out;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_out_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s, supports %s VBI\n",
+ video_device_node_name(vfd),
+ (dev->has_raw_vbi_out && dev->has_sliced_vbi_out) ?
+ "raw and sliced" :
+ (dev->has_raw_vbi_out ? "raw" : "sliced"));
+ }
+
+ if (dev->has_sdr_cap) {
+ vfd = &dev->sdr_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-sdr-cap", inst);
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->sdr_cap_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_sdr_cap_q;
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_SDR, sdr_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_radio_rx) {
+ vfd = &dev->radio_rx_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-rad-rx", inst);
+ vfd->fops = &vivid_radio_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->radio_rx_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_rx_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 receiver device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_radio_tx) {
+ vfd = &dev->radio_tx_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-rad-tx", inst);
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_radio_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->radio_tx_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_tx_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 transmitter device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ /* Now that everything is fine, let's add it to device list */
+ vivid_devs[inst] = dev;
+
+ return 0;
+
+unreg_dev:
+ video_unregister_device(&dev->radio_tx_dev);
+ video_unregister_device(&dev->radio_rx_dev);
+ video_unregister_device(&dev->sdr_cap_dev);
+ video_unregister_device(&dev->vbi_out_dev);
+ video_unregister_device(&dev->vbi_cap_dev);
+ video_unregister_device(&dev->vid_out_dev);
+ video_unregister_device(&dev->vid_cap_dev);
+ cec_unregister_adapter(dev->cec_rx_adap);
+ for (i = 0; i < MAX_OUTPUTS; i++)
+ cec_unregister_adapter(dev->cec_tx_adap[i]);
+ if (dev->cec_workqueue) {
+ vivid_cec_bus_free_work(dev);
+ destroy_workqueue(dev->cec_workqueue);
+ }
+free_dev:
+ v4l2_device_put(&dev->v4l2_dev);
+ return ret;
+}
+
+/* This routine allocates from 1 to n_devs virtual drivers.
+
+ The real maximum number of virtual drivers will depend on how many drivers
+ will succeed. This is limited to the maximum number of devices that
+ videodev supports, which is equal to VIDEO_NUM_DEVICES.
+ */
+static int vivid_probe(struct platform_device *pdev)
+{
+ const struct font_desc *font = find_font("VGA8x16");
+ int ret = 0, i;
+
+ if (font == NULL) {
+ pr_err("vivid: could not find font\n");
+ return -ENODEV;
+ }
+
+ tpg_set_font(font->data);
+
+ n_devs = clamp_t(unsigned, n_devs, 1, VIVID_MAX_DEVS);
+
+ for (i = 0; i < n_devs; i++) {
+ ret = vivid_create_instance(pdev, i);
+ if (ret) {
+ /* If some instantiations succeeded, keep driver */
+ if (i)
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret < 0) {
+ pr_err("vivid: error %d while loading driver\n", ret);
+ return ret;
+ }
+
+ /* n_devs will reflect the actual number of allocated devices */
+ n_devs = i;
+
+ return ret;
+}
+
+static int vivid_remove(struct platform_device *pdev)
+{
+ struct vivid_dev *dev;
+ unsigned int i, j;
+
+ for (i = 0; i < n_devs; i++) {
+ dev = vivid_devs[i];
+ if (!dev)
+ continue;
+
+ if (dev->has_vid_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->vid_cap_dev));
+ video_unregister_device(&dev->vid_cap_dev);
+ }
+ if (dev->has_vid_out) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->vid_out_dev));
+ video_unregister_device(&dev->vid_out_dev);
+ }
+ if (dev->has_vbi_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->vbi_cap_dev));
+ video_unregister_device(&dev->vbi_cap_dev);
+ }
+ if (dev->has_vbi_out) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->vbi_out_dev));
+ video_unregister_device(&dev->vbi_out_dev);
+ }
+ if (dev->has_sdr_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->sdr_cap_dev));
+ video_unregister_device(&dev->sdr_cap_dev);
+ }
+ if (dev->has_radio_rx) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->radio_rx_dev));
+ video_unregister_device(&dev->radio_rx_dev);
+ }
+ if (dev->has_radio_tx) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->radio_tx_dev));
+ video_unregister_device(&dev->radio_tx_dev);
+ }
+ if (dev->has_fb) {
+ v4l2_info(&dev->v4l2_dev, "unregistering fb%d\n",
+ dev->fb_info.node);
+ unregister_framebuffer(&dev->fb_info);
+ vivid_fb_release_buffers(dev);
+ }
+ cec_unregister_adapter(dev->cec_rx_adap);
+ for (j = 0; j < MAX_OUTPUTS; j++)
+ cec_unregister_adapter(dev->cec_tx_adap[j]);
+ if (dev->cec_workqueue) {
+ vivid_cec_bus_free_work(dev);
+ destroy_workqueue(dev->cec_workqueue);
+ }
+ v4l2_device_put(&dev->v4l2_dev);
+ vivid_devs[i] = NULL;
+ }
+ return 0;
+}
+
+static void vivid_pdev_release(struct device *dev)
+{
+}
+
+static struct platform_device vivid_pdev = {
+ .name = "vivid",
+ .dev.release = vivid_pdev_release,
+};
+
+static struct platform_driver vivid_pdrv = {
+ .probe = vivid_probe,
+ .remove = vivid_remove,
+ .driver = {
+ .name = "vivid",
+ },
+};
+
+static int __init vivid_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&vivid_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&vivid_pdrv);
+ if (ret)
+ platform_device_unregister(&vivid_pdev);
+
+ return ret;
+}
+
+static void __exit vivid_exit(void)
+{
+ platform_driver_unregister(&vivid_pdrv);
+ platform_device_unregister(&vivid_pdev);
+}
+
+module_init(vivid_init);
+module_exit(vivid_exit);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
new file mode 100644
index 000000000..cd4c82305
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -0,0 +1,554 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-core.h - core datastructures
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_CORE_H_
+#define _VIVID_CORE_H_
+
+#include <linux/fb.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ctrls.h>
+#include <media/tpg/v4l2-tpg.h>
+#include "vivid-rds-gen.h"
+#include "vivid-vbi-gen.h"
+
+#define dprintk(dev, level, fmt, arg...) \
+ v4l2_dbg(level, vivid_debug, &dev->v4l2_dev, fmt, ## arg)
+
+/* Maximum allowed frame rate
+ *
+ * vivid will allow setting timeperframe in [1/FPS_MAX - FPS_MAX/1] range.
+ *
+ * Ideally FPS_MAX should be infinity, i.e. practically UINT_MAX, but that
+ * might hit application errors when they manipulate these values.
+ *
+ * Besides, for tpf < 10ms image-generation logic should be changed, to avoid
+ * producing frames with equal content.
+ */
+#define FPS_MAX 100
+
+/* The maximum number of clip rectangles */
+#define MAX_CLIPS 16
+/* The maximum number of inputs */
+#define MAX_INPUTS 16
+/* The maximum number of outputs */
+#define MAX_OUTPUTS 16
+/* The maximum up or down scaling factor is 4 */
+#define MAX_ZOOM 4
+/* The maximum image width/height are set to 4K DMT */
+#define MAX_WIDTH 4096
+#define MAX_HEIGHT 2160
+/* The minimum image width/height */
+#define MIN_WIDTH 16
+#define MIN_HEIGHT 16
+/* The data_offset of plane 0 for the multiplanar formats */
+#define PLANE0_DATA_OFFSET 128
+
+/* The supported TV frequency range in MHz */
+#define MIN_TV_FREQ (44U * 16U)
+#define MAX_TV_FREQ (958U * 16U)
+
+/* The number of samples returned in every SDR buffer */
+#define SDR_CAP_SAMPLES_PER_BUF 0x4000
+
+/* used by the threads to know when to resync internal counters */
+#define JIFFIES_PER_DAY (3600U * 24U * HZ)
+#define JIFFIES_RESYNC (JIFFIES_PER_DAY * (0xf0000000U / JIFFIES_PER_DAY))
+
+extern const struct v4l2_rect vivid_min_rect;
+extern const struct v4l2_rect vivid_max_rect;
+extern unsigned vivid_debug;
+
+struct vivid_fmt {
+ u32 fourcc; /* v4l2 format id */
+ enum tgp_color_enc color_enc;
+ bool can_do_overlay;
+ u8 vdownsampling[TPG_MAX_PLANES];
+ u32 alpha_mask;
+ u8 planes;
+ u8 buffers;
+ u32 data_offset[TPG_MAX_PLANES];
+ u32 bit_depth[TPG_MAX_PLANES];
+};
+
+extern struct vivid_fmt vivid_formats[];
+
+/* buffer for one video frame */
+struct vivid_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+enum vivid_input {
+ WEBCAM,
+ TV,
+ SVID,
+ HDMI,
+};
+
+enum vivid_signal_mode {
+ CURRENT_DV_TIMINGS,
+ CURRENT_STD = CURRENT_DV_TIMINGS,
+ NO_SIGNAL,
+ NO_LOCK,
+ OUT_OF_RANGE,
+ SELECTED_DV_TIMINGS,
+ SELECTED_STD = SELECTED_DV_TIMINGS,
+ CYCLE_DV_TIMINGS,
+ CYCLE_STD = CYCLE_DV_TIMINGS,
+ CUSTOM_DV_TIMINGS,
+};
+
+enum vivid_colorspace {
+ VIVID_CS_170M,
+ VIVID_CS_709,
+ VIVID_CS_SRGB,
+ VIVID_CS_OPRGB,
+ VIVID_CS_2020,
+ VIVID_CS_DCI_P3,
+ VIVID_CS_240M,
+ VIVID_CS_SYS_M,
+ VIVID_CS_SYS_BG,
+};
+
+#define VIVID_INVALID_SIGNAL(mode) \
+ ((mode) == NO_SIGNAL || (mode) == NO_LOCK || (mode) == OUT_OF_RANGE)
+
+struct vivid_cec_work {
+ struct list_head list;
+ struct delayed_work work;
+ struct cec_adapter *adap;
+ struct vivid_dev *dev;
+ unsigned int usecs;
+ unsigned int timeout_ms;
+ u8 tx_status;
+ struct cec_msg msg;
+};
+
+struct vivid_dev {
+ unsigned inst;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_user_gen;
+ struct v4l2_ctrl_handler ctrl_hdl_user_vid;
+ struct v4l2_ctrl_handler ctrl_hdl_user_aud;
+ struct v4l2_ctrl_handler ctrl_hdl_streaming;
+ struct v4l2_ctrl_handler ctrl_hdl_sdtv_cap;
+ struct v4l2_ctrl_handler ctrl_hdl_loop_cap;
+ struct v4l2_ctrl_handler ctrl_hdl_fb;
+ struct video_device vid_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_vid_cap;
+ struct video_device vid_out_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_vid_out;
+ struct video_device vbi_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_vbi_cap;
+ struct video_device vbi_out_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_vbi_out;
+ struct video_device radio_rx_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_radio_rx;
+ struct video_device radio_tx_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_radio_tx;
+ struct video_device sdr_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_sdr_cap;
+ spinlock_t slock;
+ struct mutex mutex;
+
+ /* capabilities */
+ u32 vid_cap_caps;
+ u32 vid_out_caps;
+ u32 vbi_cap_caps;
+ u32 vbi_out_caps;
+ u32 sdr_cap_caps;
+ u32 radio_rx_caps;
+ u32 radio_tx_caps;
+
+ /* supported features */
+ bool multiplanar;
+ unsigned num_inputs;
+ u8 input_type[MAX_INPUTS];
+ u8 input_name_counter[MAX_INPUTS];
+ unsigned num_outputs;
+ u8 output_type[MAX_OUTPUTS];
+ u8 output_name_counter[MAX_OUTPUTS];
+ bool has_audio_inputs;
+ bool has_audio_outputs;
+ bool has_vid_cap;
+ bool has_vid_out;
+ bool has_vbi_cap;
+ bool has_raw_vbi_cap;
+ bool has_sliced_vbi_cap;
+ bool has_vbi_out;
+ bool has_raw_vbi_out;
+ bool has_sliced_vbi_out;
+ bool has_radio_rx;
+ bool has_radio_tx;
+ bool has_sdr_cap;
+ bool has_fb;
+
+ bool can_loop_video;
+
+ /* controls */
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *hue;
+ struct {
+ /* autogain/gain cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ };
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *mute;
+ struct v4l2_ctrl *alpha;
+ struct v4l2_ctrl *button;
+ struct v4l2_ctrl *boolean;
+ struct v4l2_ctrl *int32;
+ struct v4l2_ctrl *int64;
+ struct v4l2_ctrl *menu;
+ struct v4l2_ctrl *string;
+ struct v4l2_ctrl *bitmask;
+ struct v4l2_ctrl *int_menu;
+ struct v4l2_ctrl *test_pattern;
+ struct v4l2_ctrl *colorspace;
+ struct v4l2_ctrl *rgb_range_cap;
+ struct v4l2_ctrl *real_rgb_range_cap;
+ struct {
+ /* std_signal_mode/standard cluster */
+ struct v4l2_ctrl *ctrl_std_signal_mode;
+ struct v4l2_ctrl *ctrl_standard;
+ };
+ struct {
+ /* dv_timings_signal_mode/timings cluster */
+ struct v4l2_ctrl *ctrl_dv_timings_signal_mode;
+ struct v4l2_ctrl *ctrl_dv_timings;
+ };
+ struct v4l2_ctrl *ctrl_has_crop_cap;
+ struct v4l2_ctrl *ctrl_has_compose_cap;
+ struct v4l2_ctrl *ctrl_has_scaler_cap;
+ struct v4l2_ctrl *ctrl_has_crop_out;
+ struct v4l2_ctrl *ctrl_has_compose_out;
+ struct v4l2_ctrl *ctrl_has_scaler_out;
+ struct v4l2_ctrl *ctrl_tx_mode;
+ struct v4l2_ctrl *ctrl_tx_rgb_range;
+
+ struct v4l2_ctrl *radio_tx_rds_pi;
+ struct v4l2_ctrl *radio_tx_rds_pty;
+ struct v4l2_ctrl *radio_tx_rds_mono_stereo;
+ struct v4l2_ctrl *radio_tx_rds_art_head;
+ struct v4l2_ctrl *radio_tx_rds_compressed;
+ struct v4l2_ctrl *radio_tx_rds_dyn_pty;
+ struct v4l2_ctrl *radio_tx_rds_ta;
+ struct v4l2_ctrl *radio_tx_rds_tp;
+ struct v4l2_ctrl *radio_tx_rds_ms;
+ struct v4l2_ctrl *radio_tx_rds_psname;
+ struct v4l2_ctrl *radio_tx_rds_radiotext;
+
+ struct v4l2_ctrl *radio_rx_rds_pty;
+ struct v4l2_ctrl *radio_rx_rds_ta;
+ struct v4l2_ctrl *radio_rx_rds_tp;
+ struct v4l2_ctrl *radio_rx_rds_ms;
+ struct v4l2_ctrl *radio_rx_rds_psname;
+ struct v4l2_ctrl *radio_rx_rds_radiotext;
+
+ unsigned input_brightness[MAX_INPUTS];
+ unsigned osd_mode;
+ unsigned button_pressed;
+ bool sensor_hflip;
+ bool sensor_vflip;
+ bool hflip;
+ bool vflip;
+ bool vbi_cap_interlaced;
+ bool loop_video;
+ bool reduced_fps;
+
+ /* Framebuffer */
+ unsigned long video_pbase;
+ void *video_vbase;
+ u32 video_buffer_size;
+ int display_width;
+ int display_height;
+ int display_byte_stride;
+ int bits_per_pixel;
+ int bytes_per_pixel;
+ struct fb_info fb_info;
+ struct fb_var_screeninfo fb_defined;
+ struct fb_fix_screeninfo fb_fix;
+
+ /* Error injection */
+ bool queue_setup_error;
+ bool buf_prepare_error;
+ bool start_streaming_error;
+ bool dqbuf_error;
+ bool seq_wrap;
+ bool time_wrap;
+ u64 time_wrap_offset;
+ unsigned perc_dropped_buffers;
+ enum vivid_signal_mode std_signal_mode;
+ unsigned query_std_last;
+ v4l2_std_id query_std;
+ enum tpg_video_aspect std_aspect_ratio;
+
+ enum vivid_signal_mode dv_timings_signal_mode;
+ char **query_dv_timings_qmenu;
+ unsigned query_dv_timings_size;
+ unsigned query_dv_timings_last;
+ unsigned query_dv_timings;
+ enum tpg_video_aspect dv_timings_aspect_ratio;
+
+ /* Input */
+ unsigned input;
+ v4l2_std_id std_cap;
+ struct v4l2_dv_timings dv_timings_cap;
+ u32 service_set_cap;
+ struct vivid_vbi_gen_data vbi_gen;
+ u8 *edid;
+ unsigned edid_blocks;
+ unsigned edid_max_blocks;
+ unsigned webcam_size_idx;
+ unsigned webcam_ival_idx;
+ unsigned tv_freq;
+ unsigned tv_audmode;
+ unsigned tv_field_cap;
+ unsigned tv_audio_input;
+
+ /* Capture Overlay */
+ struct v4l2_framebuffer fb_cap;
+ struct v4l2_fh *overlay_cap_owner;
+ void *fb_vbase_cap;
+ int overlay_cap_top, overlay_cap_left;
+ enum v4l2_field overlay_cap_field;
+ void *bitmap_cap;
+ struct v4l2_clip clips_cap[MAX_CLIPS];
+ struct v4l2_clip try_clips_cap[MAX_CLIPS];
+ unsigned clipcount_cap;
+
+ /* Output */
+ unsigned output;
+ v4l2_std_id std_out;
+ struct v4l2_dv_timings dv_timings_out;
+ u32 colorspace_out;
+ u32 ycbcr_enc_out;
+ u32 hsv_enc_out;
+ u32 quantization_out;
+ u32 xfer_func_out;
+ u32 service_set_out;
+ unsigned bytesperline_out[TPG_MAX_PLANES];
+ unsigned tv_field_out;
+ unsigned tv_audio_output;
+ bool vbi_out_have_wss;
+ u8 vbi_out_wss[2];
+ bool vbi_out_have_cc[2];
+ u8 vbi_out_cc[2][2];
+ bool dvi_d_out;
+ u8 *scaled_line;
+ u8 *blended_line;
+ unsigned cur_scaled_line;
+
+ /* Output Overlay */
+ void *fb_vbase_out;
+ bool overlay_out_enabled;
+ int overlay_out_top, overlay_out_left;
+ void *bitmap_out;
+ struct v4l2_clip clips_out[MAX_CLIPS];
+ struct v4l2_clip try_clips_out[MAX_CLIPS];
+ unsigned clipcount_out;
+ unsigned fbuf_out_flags;
+ u32 chromakey_out;
+ u8 global_alpha_out;
+
+ /* video capture */
+ struct tpg_data tpg;
+ unsigned ms_vid_cap;
+ bool must_blank[VIDEO_MAX_FRAME];
+
+ const struct vivid_fmt *fmt_cap;
+ struct v4l2_fract timeperframe_vid_cap;
+ enum v4l2_field field_cap;
+ struct v4l2_rect src_rect;
+ struct v4l2_rect fmt_cap_rect;
+ struct v4l2_rect crop_cap;
+ struct v4l2_rect compose_cap;
+ struct v4l2_rect crop_bounds_cap;
+ struct vb2_queue vb_vid_cap_q;
+ struct list_head vid_cap_active;
+ struct vb2_queue vb_vbi_cap_q;
+ struct list_head vbi_cap_active;
+
+ /* thread for generating video capture stream */
+ struct task_struct *kthread_vid_cap;
+ unsigned long jiffies_vid_cap;
+ u32 cap_seq_offset;
+ u32 cap_seq_count;
+ bool cap_seq_resync;
+ u32 vid_cap_seq_start;
+ u32 vid_cap_seq_count;
+ bool vid_cap_streaming;
+ u32 vbi_cap_seq_start;
+ u32 vbi_cap_seq_count;
+ bool vbi_cap_streaming;
+ bool stream_sliced_vbi_cap;
+
+ /* video output */
+ const struct vivid_fmt *fmt_out;
+ struct v4l2_fract timeperframe_vid_out;
+ enum v4l2_field field_out;
+ struct v4l2_rect sink_rect;
+ struct v4l2_rect fmt_out_rect;
+ struct v4l2_rect crop_out;
+ struct v4l2_rect compose_out;
+ struct v4l2_rect compose_bounds_out;
+ struct vb2_queue vb_vid_out_q;
+ struct list_head vid_out_active;
+ struct vb2_queue vb_vbi_out_q;
+ struct list_head vbi_out_active;
+
+ /* video loop precalculated rectangles */
+
+ /*
+ * Intersection between what the output side composes and the capture side
+ * crops. I.e., what actually needs to be copied from the output buffer to
+ * the capture buffer.
+ */
+ struct v4l2_rect loop_vid_copy;
+ /* The part of the output buffer that (after scaling) corresponds to loop_vid_copy. */
+ struct v4l2_rect loop_vid_out;
+ /* The part of the capture buffer that (after scaling) corresponds to loop_vid_copy. */
+ struct v4l2_rect loop_vid_cap;
+ /*
+ * The intersection of the framebuffer, the overlay output window and
+ * loop_vid_copy. I.e., the part of the framebuffer that actually should be
+ * blended with the compose_out rectangle. This uses the framebuffer origin.
+ */
+ struct v4l2_rect loop_fb_copy;
+ /* The same as loop_fb_copy but with compose_out origin. */
+ struct v4l2_rect loop_vid_overlay;
+ /*
+ * The part of the capture buffer that (after scaling) corresponds
+ * to loop_vid_overlay.
+ */
+ struct v4l2_rect loop_vid_overlay_cap;
+
+ /* thread for generating video output stream */
+ struct task_struct *kthread_vid_out;
+ unsigned long jiffies_vid_out;
+ u32 out_seq_offset;
+ u32 out_seq_count;
+ bool out_seq_resync;
+ u32 vid_out_seq_start;
+ u32 vid_out_seq_count;
+ bool vid_out_streaming;
+ u32 vbi_out_seq_start;
+ u32 vbi_out_seq_count;
+ bool vbi_out_streaming;
+ bool stream_sliced_vbi_out;
+
+ /* SDR capture */
+ struct vb2_queue vb_sdr_cap_q;
+ struct list_head sdr_cap_active;
+ u32 sdr_pixelformat; /* v4l2 format id */
+ unsigned sdr_buffersize;
+ unsigned sdr_adc_freq;
+ unsigned sdr_fm_freq;
+ unsigned sdr_fm_deviation;
+ int sdr_fixp_src_phase;
+ int sdr_fixp_mod_phase;
+
+ bool tstamp_src_is_soe;
+ bool has_crop_cap;
+ bool has_compose_cap;
+ bool has_scaler_cap;
+ bool has_crop_out;
+ bool has_compose_out;
+ bool has_scaler_out;
+
+ /* thread for generating SDR stream */
+ struct task_struct *kthread_sdr_cap;
+ unsigned long jiffies_sdr_cap;
+ u32 sdr_cap_seq_offset;
+ u32 sdr_cap_seq_count;
+ bool sdr_cap_seq_resync;
+
+ /* RDS generator */
+ struct vivid_rds_gen rds_gen;
+
+ /* Radio receiver */
+ unsigned radio_rx_freq;
+ unsigned radio_rx_audmode;
+ int radio_rx_sig_qual;
+ unsigned radio_rx_hw_seek_mode;
+ bool radio_rx_hw_seek_prog_lim;
+ bool radio_rx_rds_controls;
+ bool radio_rx_rds_enabled;
+ unsigned radio_rx_rds_use_alternates;
+ unsigned radio_rx_rds_last_block;
+ struct v4l2_fh *radio_rx_rds_owner;
+
+ /* Radio transmitter */
+ unsigned radio_tx_freq;
+ unsigned radio_tx_subchans;
+ bool radio_tx_rds_controls;
+ unsigned radio_tx_rds_last_block;
+ struct v4l2_fh *radio_tx_rds_owner;
+
+ /* Shared between radio receiver and transmitter */
+ bool radio_rds_loop;
+ ktime_t radio_rds_init_time;
+
+ /* CEC */
+ struct cec_adapter *cec_rx_adap;
+ struct cec_adapter *cec_tx_adap[MAX_OUTPUTS];
+ struct workqueue_struct *cec_workqueue;
+ spinlock_t cec_slock;
+ struct list_head cec_work_list;
+ unsigned int cec_xfer_time_jiffies;
+ unsigned long cec_xfer_start_jiffies;
+ u8 cec_output2bus_map[MAX_OUTPUTS];
+
+ /* CEC OSD String */
+ char osd[14];
+ unsigned long osd_jiffies;
+};
+
+static inline bool vivid_is_webcam(const struct vivid_dev *dev)
+{
+ return dev->input_type[dev->input] == WEBCAM;
+}
+
+static inline bool vivid_is_tv_cap(const struct vivid_dev *dev)
+{
+ return dev->input_type[dev->input] == TV;
+}
+
+static inline bool vivid_is_svid_cap(const struct vivid_dev *dev)
+{
+ return dev->input_type[dev->input] == SVID;
+}
+
+static inline bool vivid_is_hdmi_cap(const struct vivid_dev *dev)
+{
+ return dev->input_type[dev->input] == HDMI;
+}
+
+static inline bool vivid_is_sdtv_cap(const struct vivid_dev *dev)
+{
+ return vivid_is_tv_cap(dev) || vivid_is_svid_cap(dev);
+}
+
+static inline bool vivid_is_svid_out(const struct vivid_dev *dev)
+{
+ return dev->output_type[dev->output] == SVID;
+}
+
+static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev)
+{
+ return dev->output_type[dev->output] == HDMI;
+}
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
new file mode 100644
index 000000000..999aa101b
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -0,0 +1,1741 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-ctrls.c - control support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-cap.h"
+#include "vivid-vid-out.h"
+#include "vivid-vid-common.h"
+#include "vivid-radio-common.h"
+#include "vivid-osd.h"
+#include "vivid-ctrls.h"
+
+#define VIVID_CID_CUSTOM_BASE (V4L2_CID_USER_BASE | 0xf000)
+#define VIVID_CID_BUTTON (VIVID_CID_CUSTOM_BASE + 0)
+#define VIVID_CID_BOOLEAN (VIVID_CID_CUSTOM_BASE + 1)
+#define VIVID_CID_INTEGER (VIVID_CID_CUSTOM_BASE + 2)
+#define VIVID_CID_INTEGER64 (VIVID_CID_CUSTOM_BASE + 3)
+#define VIVID_CID_MENU (VIVID_CID_CUSTOM_BASE + 4)
+#define VIVID_CID_STRING (VIVID_CID_CUSTOM_BASE + 5)
+#define VIVID_CID_BITMASK (VIVID_CID_CUSTOM_BASE + 6)
+#define VIVID_CID_INTMENU (VIVID_CID_CUSTOM_BASE + 7)
+#define VIVID_CID_U32_ARRAY (VIVID_CID_CUSTOM_BASE + 8)
+#define VIVID_CID_U16_MATRIX (VIVID_CID_CUSTOM_BASE + 9)
+#define VIVID_CID_U8_4D_ARRAY (VIVID_CID_CUSTOM_BASE + 10)
+
+#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
+#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
+#define VIVID_CID_TEST_PATTERN (VIVID_CID_VIVID_BASE + 0)
+#define VIVID_CID_OSD_TEXT_MODE (VIVID_CID_VIVID_BASE + 1)
+#define VIVID_CID_HOR_MOVEMENT (VIVID_CID_VIVID_BASE + 2)
+#define VIVID_CID_VERT_MOVEMENT (VIVID_CID_VIVID_BASE + 3)
+#define VIVID_CID_SHOW_BORDER (VIVID_CID_VIVID_BASE + 4)
+#define VIVID_CID_SHOW_SQUARE (VIVID_CID_VIVID_BASE + 5)
+#define VIVID_CID_INSERT_SAV (VIVID_CID_VIVID_BASE + 6)
+#define VIVID_CID_INSERT_EAV (VIVID_CID_VIVID_BASE + 7)
+#define VIVID_CID_VBI_CAP_INTERLACED (VIVID_CID_VIVID_BASE + 8)
+
+#define VIVID_CID_HFLIP (VIVID_CID_VIVID_BASE + 20)
+#define VIVID_CID_VFLIP (VIVID_CID_VIVID_BASE + 21)
+#define VIVID_CID_STD_ASPECT_RATIO (VIVID_CID_VIVID_BASE + 22)
+#define VIVID_CID_DV_TIMINGS_ASPECT_RATIO (VIVID_CID_VIVID_BASE + 23)
+#define VIVID_CID_TSTAMP_SRC (VIVID_CID_VIVID_BASE + 24)
+#define VIVID_CID_COLORSPACE (VIVID_CID_VIVID_BASE + 25)
+#define VIVID_CID_XFER_FUNC (VIVID_CID_VIVID_BASE + 26)
+#define VIVID_CID_YCBCR_ENC (VIVID_CID_VIVID_BASE + 27)
+#define VIVID_CID_QUANTIZATION (VIVID_CID_VIVID_BASE + 28)
+#define VIVID_CID_LIMITED_RGB_RANGE (VIVID_CID_VIVID_BASE + 29)
+#define VIVID_CID_ALPHA_MODE (VIVID_CID_VIVID_BASE + 30)
+#define VIVID_CID_HAS_CROP_CAP (VIVID_CID_VIVID_BASE + 31)
+#define VIVID_CID_HAS_COMPOSE_CAP (VIVID_CID_VIVID_BASE + 32)
+#define VIVID_CID_HAS_SCALER_CAP (VIVID_CID_VIVID_BASE + 33)
+#define VIVID_CID_HAS_CROP_OUT (VIVID_CID_VIVID_BASE + 34)
+#define VIVID_CID_HAS_COMPOSE_OUT (VIVID_CID_VIVID_BASE + 35)
+#define VIVID_CID_HAS_SCALER_OUT (VIVID_CID_VIVID_BASE + 36)
+#define VIVID_CID_LOOP_VIDEO (VIVID_CID_VIVID_BASE + 37)
+#define VIVID_CID_SEQ_WRAP (VIVID_CID_VIVID_BASE + 38)
+#define VIVID_CID_TIME_WRAP (VIVID_CID_VIVID_BASE + 39)
+#define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 40)
+#define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 41)
+#define VIVID_CID_REDUCED_FPS (VIVID_CID_VIVID_BASE + 42)
+#define VIVID_CID_HSV_ENC (VIVID_CID_VIVID_BASE + 43)
+
+#define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60)
+#define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61)
+#define VIVID_CID_DV_TIMINGS_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 62)
+#define VIVID_CID_DV_TIMINGS (VIVID_CID_VIVID_BASE + 63)
+#define VIVID_CID_PERC_DROPPED (VIVID_CID_VIVID_BASE + 64)
+#define VIVID_CID_DISCONNECT (VIVID_CID_VIVID_BASE + 65)
+#define VIVID_CID_DQBUF_ERROR (VIVID_CID_VIVID_BASE + 66)
+#define VIVID_CID_QUEUE_SETUP_ERROR (VIVID_CID_VIVID_BASE + 67)
+#define VIVID_CID_BUF_PREPARE_ERROR (VIVID_CID_VIVID_BASE + 68)
+#define VIVID_CID_START_STR_ERROR (VIVID_CID_VIVID_BASE + 69)
+#define VIVID_CID_QUEUE_ERROR (VIVID_CID_VIVID_BASE + 70)
+#define VIVID_CID_CLEAR_FB (VIVID_CID_VIVID_BASE + 71)
+
+#define VIVID_CID_RADIO_SEEK_MODE (VIVID_CID_VIVID_BASE + 90)
+#define VIVID_CID_RADIO_SEEK_PROG_LIM (VIVID_CID_VIVID_BASE + 91)
+#define VIVID_CID_RADIO_RX_RDS_RBDS (VIVID_CID_VIVID_BASE + 92)
+#define VIVID_CID_RADIO_RX_RDS_BLOCKIO (VIVID_CID_VIVID_BASE + 93)
+
+#define VIVID_CID_RADIO_TX_RDS_BLOCKIO (VIVID_CID_VIVID_BASE + 94)
+
+#define VIVID_CID_SDR_CAP_FM_DEVIATION (VIVID_CID_VIVID_BASE + 110)
+
+/* General User Controls */
+
+static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_gen);
+
+ switch (ctrl->id) {
+ case VIVID_CID_DISCONNECT:
+ v4l2_info(&dev->v4l2_dev, "disconnect\n");
+ clear_bit(V4L2_FL_REGISTERED, &dev->vid_cap_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->vid_out_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->vbi_cap_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->vbi_out_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ break;
+ case VIVID_CID_BUTTON:
+ dev->button_pressed = 30;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_user_gen_ctrl_ops = {
+ .s_ctrl = vivid_user_gen_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_button = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_BUTTON,
+ .name = "Button",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_boolean = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_BOOLEAN,
+ .name = "Boolean",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_int32 = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_INTEGER,
+ .name = "Integer 32 Bits",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0xffffffff80000000ULL,
+ .max = 0x7fffffff,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_int64 = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_INTEGER64,
+ .name = "Integer 64 Bits",
+ .type = V4L2_CTRL_TYPE_INTEGER64,
+ .min = 0x8000000000000000ULL,
+ .max = 0x7fffffffffffffffLL,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_u32_array = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_U32_ARRAY,
+ .name = "U32 1 Element Array",
+ .type = V4L2_CTRL_TYPE_U32,
+ .def = 0x18,
+ .min = 0x10,
+ .max = 0x20000,
+ .step = 1,
+ .dims = { 1 },
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_u16_matrix = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_U16_MATRIX,
+ .name = "U16 8x16 Matrix",
+ .type = V4L2_CTRL_TYPE_U16,
+ .def = 0x18,
+ .min = 0x10,
+ .max = 0x2000,
+ .step = 1,
+ .dims = { 8, 16 },
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_u8_4d_array = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_U8_4D_ARRAY,
+ .name = "U8 2x3x4x5 Array",
+ .type = V4L2_CTRL_TYPE_U8,
+ .def = 0x18,
+ .min = 0x10,
+ .max = 0x20,
+ .step = 1,
+ .dims = { 2, 3, 4, 5 },
+};
+
+static const char * const vivid_ctrl_menu_strings[] = {
+ "Menu Item 0 (Skipped)",
+ "Menu Item 1",
+ "Menu Item 2 (Skipped)",
+ "Menu Item 3",
+ "Menu Item 4",
+ "Menu Item 5 (Skipped)",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_menu = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_MENU,
+ .name = "Menu",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .max = 4,
+ .def = 3,
+ .menu_skip_mask = 0x04,
+ .qmenu = vivid_ctrl_menu_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_string = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_STRING,
+ .name = "String",
+ .type = V4L2_CTRL_TYPE_STRING,
+ .min = 2,
+ .max = 4,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_bitmask = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_BITMASK,
+ .name = "Bitmask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .def = 0x80002000,
+ .min = 0,
+ .max = 0x80402010,
+ .step = 0,
+};
+
+static const s64 vivid_ctrl_int_menu_values[] = {
+ 1, 1, 2, 3, 5, 8, 13, 21, 42,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_int_menu = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_INTMENU,
+ .name = "Integer Menu",
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .min = 1,
+ .max = 8,
+ .def = 4,
+ .menu_skip_mask = 0x02,
+ .qmenu_int = vivid_ctrl_int_menu_values,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_disconnect = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_DISCONNECT,
+ .name = "Disconnect",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+
+/* Framebuffer Controls */
+
+static int vivid_fb_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler,
+ struct vivid_dev, ctrl_hdl_fb);
+
+ switch (ctrl->id) {
+ case VIVID_CID_CLEAR_FB:
+ vivid_clear_fb(dev);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_fb_ctrl_ops = {
+ .s_ctrl = vivid_fb_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_clear_fb = {
+ .ops = &vivid_fb_ctrl_ops,
+ .id = VIVID_CID_CLEAR_FB,
+ .name = "Clear Framebuffer",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+
+/* Video User Controls */
+
+static int vivid_user_vid_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_vid);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ dev->gain->val = (jiffies_to_msecs(jiffies) / 1000) & 0xff;
+ break;
+ }
+ return 0;
+}
+
+static int vivid_user_vid_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_vid);
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ dev->input_brightness[dev->input] = ctrl->val - dev->input * 128;
+ tpg_s_brightness(&dev->tpg, dev->input_brightness[dev->input]);
+ break;
+ case V4L2_CID_CONTRAST:
+ tpg_s_contrast(&dev->tpg, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ tpg_s_saturation(&dev->tpg, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ tpg_s_hue(&dev->tpg, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ dev->hflip = ctrl->val;
+ tpg_s_hflip(&dev->tpg, dev->sensor_hflip ^ dev->hflip);
+ break;
+ case V4L2_CID_VFLIP:
+ dev->vflip = ctrl->val;
+ tpg_s_vflip(&dev->tpg, dev->sensor_vflip ^ dev->vflip);
+ break;
+ case V4L2_CID_ALPHA_COMPONENT:
+ tpg_s_alpha_component(&dev->tpg, ctrl->val);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_user_vid_ctrl_ops = {
+ .g_volatile_ctrl = vivid_user_vid_g_volatile_ctrl,
+ .s_ctrl = vivid_user_vid_s_ctrl,
+};
+
+
+/* Video Capture Controls */
+
+static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ static const u32 colorspaces[] = {
+ V4L2_COLORSPACE_SMPTE170M,
+ V4L2_COLORSPACE_REC709,
+ V4L2_COLORSPACE_SRGB,
+ V4L2_COLORSPACE_OPRGB,
+ V4L2_COLORSPACE_BT2020,
+ V4L2_COLORSPACE_DCI_P3,
+ V4L2_COLORSPACE_SMPTE240M,
+ V4L2_COLORSPACE_470_SYSTEM_M,
+ V4L2_COLORSPACE_470_SYSTEM_BG,
+ };
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_cap);
+ unsigned i;
+
+ switch (ctrl->id) {
+ case VIVID_CID_TEST_PATTERN:
+ vivid_update_quality(dev);
+ tpg_s_pattern(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_COLORSPACE:
+ tpg_s_colorspace(&dev->tpg, colorspaces[ctrl->val]);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
+ case VIVID_CID_XFER_FUNC:
+ tpg_s_xfer_func(&dev->tpg, ctrl->val);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
+ case VIVID_CID_YCBCR_ENC:
+ tpg_s_ycbcr_enc(&dev->tpg, ctrl->val);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
+ case VIVID_CID_HSV_ENC:
+ tpg_s_hsv_enc(&dev->tpg, ctrl->val ? V4L2_HSV_ENC_256 :
+ V4L2_HSV_ENC_180);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
+ case VIVID_CID_QUANTIZATION:
+ tpg_s_quantization(&dev->tpg, ctrl->val);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ if (!vivid_is_hdmi_cap(dev))
+ break;
+ tpg_s_rgb_range(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_LIMITED_RGB_RANGE:
+ tpg_s_real_rgb_range(&dev->tpg, ctrl->val ?
+ V4L2_DV_RGB_RANGE_LIMITED : V4L2_DV_RGB_RANGE_FULL);
+ break;
+ case VIVID_CID_ALPHA_MODE:
+ tpg_s_alpha_mode(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_HOR_MOVEMENT:
+ tpg_s_mv_hor_mode(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_VERT_MOVEMENT:
+ tpg_s_mv_vert_mode(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_OSD_TEXT_MODE:
+ dev->osd_mode = ctrl->val;
+ break;
+ case VIVID_CID_PERCENTAGE_FILL:
+ tpg_s_perc_fill(&dev->tpg, ctrl->val);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ dev->must_blank[i] = ctrl->val < 100;
+ break;
+ case VIVID_CID_INSERT_SAV:
+ tpg_s_insert_sav(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_INSERT_EAV:
+ tpg_s_insert_eav(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_HFLIP:
+ dev->sensor_hflip = ctrl->val;
+ tpg_s_hflip(&dev->tpg, dev->sensor_hflip ^ dev->hflip);
+ break;
+ case VIVID_CID_VFLIP:
+ dev->sensor_vflip = ctrl->val;
+ tpg_s_vflip(&dev->tpg, dev->sensor_vflip ^ dev->vflip);
+ break;
+ case VIVID_CID_REDUCED_FPS:
+ dev->reduced_fps = ctrl->val;
+ vivid_update_format_cap(dev, true);
+ break;
+ case VIVID_CID_HAS_CROP_CAP:
+ dev->has_crop_cap = ctrl->val;
+ vivid_update_format_cap(dev, true);
+ break;
+ case VIVID_CID_HAS_COMPOSE_CAP:
+ dev->has_compose_cap = ctrl->val;
+ vivid_update_format_cap(dev, true);
+ break;
+ case VIVID_CID_HAS_SCALER_CAP:
+ dev->has_scaler_cap = ctrl->val;
+ vivid_update_format_cap(dev, true);
+ break;
+ case VIVID_CID_SHOW_BORDER:
+ tpg_s_show_border(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_SHOW_SQUARE:
+ tpg_s_show_square(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_STD_ASPECT_RATIO:
+ dev->std_aspect_ratio = ctrl->val;
+ tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
+ break;
+ case VIVID_CID_DV_TIMINGS_SIGNAL_MODE:
+ dev->dv_timings_signal_mode = dev->ctrl_dv_timings_signal_mode->val;
+ if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS)
+ dev->query_dv_timings = dev->ctrl_dv_timings->val;
+ v4l2_ctrl_activate(dev->ctrl_dv_timings,
+ dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS);
+ vivid_update_quality(dev);
+ vivid_send_source_change(dev, HDMI);
+ break;
+ case VIVID_CID_DV_TIMINGS_ASPECT_RATIO:
+ dev->dv_timings_aspect_ratio = ctrl->val;
+ tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
+ break;
+ case VIVID_CID_TSTAMP_SRC:
+ dev->tstamp_src_is_soe = ctrl->val;
+ dev->vb_vid_cap_q.timestamp_flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ if (dev->tstamp_src_is_soe)
+ dev->vb_vid_cap_q.timestamp_flags |= V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
+ break;
+ case VIVID_CID_MAX_EDID_BLOCKS:
+ dev->edid_max_blocks = ctrl->val;
+ if (dev->edid_blocks > dev->edid_max_blocks)
+ dev->edid_blocks = dev->edid_max_blocks;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_vid_cap_ctrl_ops = {
+ .s_ctrl = vivid_vid_cap_s_ctrl,
+};
+
+static const char * const vivid_ctrl_hor_movement_strings[] = {
+ "Move Left Fast",
+ "Move Left",
+ "Move Left Slow",
+ "No Movement",
+ "Move Right Slow",
+ "Move Right",
+ "Move Right Fast",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_hor_movement = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HOR_MOVEMENT,
+ .name = "Horizontal Movement",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = TPG_MOVE_POS_FAST,
+ .def = TPG_MOVE_NONE,
+ .qmenu = vivid_ctrl_hor_movement_strings,
+};
+
+static const char * const vivid_ctrl_vert_movement_strings[] = {
+ "Move Up Fast",
+ "Move Up",
+ "Move Up Slow",
+ "No Movement",
+ "Move Down Slow",
+ "Move Down",
+ "Move Down Fast",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_vert_movement = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_VERT_MOVEMENT,
+ .name = "Vertical Movement",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = TPG_MOVE_POS_FAST,
+ .def = TPG_MOVE_NONE,
+ .qmenu = vivid_ctrl_vert_movement_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_show_border = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_SHOW_BORDER,
+ .name = "Show Border",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_show_square = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_SHOW_SQUARE,
+ .name = "Show Square",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const char * const vivid_ctrl_osd_mode_strings[] = {
+ "All",
+ "Counters Only",
+ "None",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_osd_mode = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_OSD_TEXT_MODE,
+ .name = "OSD Text Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_osd_mode_strings) - 2,
+ .qmenu = vivid_ctrl_osd_mode_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_perc_fill = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_PERCENTAGE_FILL,
+ .name = "Fill Percentage of Frame",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .def = 100,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_insert_sav = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_INSERT_SAV,
+ .name = "Insert SAV Code in Image",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_insert_eav = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_INSERT_EAV,
+ .name = "Insert EAV Code in Image",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_hflip = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HFLIP,
+ .name = "Sensor Flipped Horizontally",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_vflip = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_VFLIP,
+ .name = "Sensor Flipped Vertically",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_reduced_fps = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_REDUCED_FPS,
+ .name = "Reduced Framerate",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_crop_cap = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HAS_CROP_CAP,
+ .name = "Enable Capture Cropping",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_compose_cap = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HAS_COMPOSE_CAP,
+ .name = "Enable Capture Composing",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_scaler_cap = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HAS_SCALER_CAP,
+ .name = "Enable Capture Scaler",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const char * const vivid_ctrl_tstamp_src_strings[] = {
+ "End of Frame",
+ "Start of Exposure",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_tstamp_src = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_TSTAMP_SRC,
+ .name = "Timestamp Source",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_tstamp_src_strings) - 2,
+ .qmenu = vivid_ctrl_tstamp_src_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_std_aspect_ratio = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_STD_ASPECT_RATIO,
+ .name = "Standard Aspect Ratio",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .max = 4,
+ .def = 1,
+ .qmenu = tpg_aspect_strings,
+};
+
+static const char * const vivid_ctrl_dv_timings_signal_mode_strings[] = {
+ "Current DV Timings",
+ "No Signal",
+ "No Lock",
+ "Out of Range",
+ "Selected DV Timings",
+ "Cycle Through All DV Timings",
+ "Custom DV Timings",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_dv_timings_signal_mode = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_DV_TIMINGS_SIGNAL_MODE,
+ .name = "DV Timings Signal Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 5,
+ .qmenu = vivid_ctrl_dv_timings_signal_mode_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_dv_timings_aspect_ratio = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_DV_TIMINGS_ASPECT_RATIO,
+ .name = "DV Timings Aspect Ratio",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 3,
+ .qmenu = tpg_aspect_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_max_edid_blocks = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_MAX_EDID_BLOCKS,
+ .name = "Maximum EDID Blocks",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 256,
+ .def = 2,
+ .step = 1,
+};
+
+static const char * const vivid_ctrl_colorspace_strings[] = {
+ "SMPTE 170M",
+ "Rec. 709",
+ "sRGB",
+ "opRGB",
+ "BT.2020",
+ "DCI-P3",
+ "SMPTE 240M",
+ "470 System M",
+ "470 System BG",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_colorspace = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_COLORSPACE,
+ .name = "Colorspace",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_colorspace_strings) - 2,
+ .def = 2,
+ .qmenu = vivid_ctrl_colorspace_strings,
+};
+
+static const char * const vivid_ctrl_xfer_func_strings[] = {
+ "Default",
+ "Rec. 709",
+ "sRGB",
+ "opRGB",
+ "SMPTE 240M",
+ "None",
+ "DCI-P3",
+ "SMPTE 2084",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_xfer_func = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_XFER_FUNC,
+ .name = "Transfer Function",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_xfer_func_strings) - 2,
+ .qmenu = vivid_ctrl_xfer_func_strings,
+};
+
+static const char * const vivid_ctrl_ycbcr_enc_strings[] = {
+ "Default",
+ "ITU-R 601",
+ "Rec. 709",
+ "xvYCC 601",
+ "xvYCC 709",
+ "",
+ "BT.2020",
+ "BT.2020 Constant Luminance",
+ "SMPTE 240M",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_ycbcr_enc = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_YCBCR_ENC,
+ .name = "Y'CbCr Encoding",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .menu_skip_mask = 1 << 5,
+ .max = ARRAY_SIZE(vivid_ctrl_ycbcr_enc_strings) - 2,
+ .qmenu = vivid_ctrl_ycbcr_enc_strings,
+};
+
+static const char * const vivid_ctrl_hsv_enc_strings[] = {
+ "Hue 0-179",
+ "Hue 0-256",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_hsv_enc = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HSV_ENC,
+ .name = "HSV Encoding",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_hsv_enc_strings) - 2,
+ .qmenu = vivid_ctrl_hsv_enc_strings,
+};
+
+static const char * const vivid_ctrl_quantization_strings[] = {
+ "Default",
+ "Full Range",
+ "Limited Range",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_quantization = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_QUANTIZATION,
+ .name = "Quantization",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_quantization_strings) - 2,
+ .qmenu = vivid_ctrl_quantization_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_alpha_mode = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_ALPHA_MODE,
+ .name = "Apply Alpha To Red Only",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_limited_rgb_range = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_LIMITED_RGB_RANGE,
+ .name = "Limited RGB Range (16-235)",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+/* Video Loop Control */
+
+static int vivid_loop_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_loop_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_LOOP_VIDEO:
+ dev->loop_video = ctrl->val;
+ vivid_update_quality(dev);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_loop_cap_ctrl_ops = {
+ .s_ctrl = vivid_loop_cap_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_loop_video = {
+ .ops = &vivid_loop_cap_ctrl_ops,
+ .id = VIVID_CID_LOOP_VIDEO,
+ .name = "Loop Video",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+/* VBI Capture Control */
+
+static int vivid_vbi_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vbi_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_VBI_CAP_INTERLACED:
+ dev->vbi_cap_interlaced = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_vbi_cap_ctrl_ops = {
+ .s_ctrl = vivid_vbi_cap_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_vbi_cap_interlaced = {
+ .ops = &vivid_vbi_cap_ctrl_ops,
+ .id = VIVID_CID_VBI_CAP_INTERLACED,
+ .name = "Interlaced VBI Format",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+/* Video Output Controls */
+
+static int vivid_vid_out_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_out);
+ struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt;
+
+ switch (ctrl->id) {
+ case VIVID_CID_HAS_CROP_OUT:
+ dev->has_crop_out = ctrl->val;
+ vivid_update_format_out(dev);
+ break;
+ case VIVID_CID_HAS_COMPOSE_OUT:
+ dev->has_compose_out = ctrl->val;
+ vivid_update_format_out(dev);
+ break;
+ case VIVID_CID_HAS_SCALER_OUT:
+ dev->has_scaler_out = ctrl->val;
+ vivid_update_format_out(dev);
+ break;
+ case V4L2_CID_DV_TX_MODE:
+ dev->dvi_d_out = ctrl->val == V4L2_DV_TX_MODE_DVI_D;
+ if (!vivid_is_hdmi_out(dev))
+ break;
+ if (!dev->dvi_d_out && (bt->flags & V4L2_DV_FL_IS_CE_VIDEO)) {
+ if (bt->width == 720 && bt->height <= 576)
+ dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
+ else
+ dev->colorspace_out = V4L2_COLORSPACE_REC709;
+ dev->quantization_out = V4L2_QUANTIZATION_DEFAULT;
+ } else {
+ dev->colorspace_out = V4L2_COLORSPACE_SRGB;
+ dev->quantization_out = dev->dvi_d_out ?
+ V4L2_QUANTIZATION_LIM_RANGE :
+ V4L2_QUANTIZATION_DEFAULT;
+ }
+ if (dev->loop_video)
+ vivid_send_source_change(dev, HDMI);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_vid_out_ctrl_ops = {
+ .s_ctrl = vivid_vid_out_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_crop_out = {
+ .ops = &vivid_vid_out_ctrl_ops,
+ .id = VIVID_CID_HAS_CROP_OUT,
+ .name = "Enable Output Cropping",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_compose_out = {
+ .ops = &vivid_vid_out_ctrl_ops,
+ .id = VIVID_CID_HAS_COMPOSE_OUT,
+ .name = "Enable Output Composing",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_scaler_out = {
+ .ops = &vivid_vid_out_ctrl_ops,
+ .id = VIVID_CID_HAS_SCALER_OUT,
+ .name = "Enable Output Scaler",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+
+/* Streaming Controls */
+
+static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_streaming);
+ u64 rem;
+
+ switch (ctrl->id) {
+ case VIVID_CID_DQBUF_ERROR:
+ dev->dqbuf_error = true;
+ break;
+ case VIVID_CID_PERC_DROPPED:
+ dev->perc_dropped_buffers = ctrl->val;
+ break;
+ case VIVID_CID_QUEUE_SETUP_ERROR:
+ dev->queue_setup_error = true;
+ break;
+ case VIVID_CID_BUF_PREPARE_ERROR:
+ dev->buf_prepare_error = true;
+ break;
+ case VIVID_CID_START_STR_ERROR:
+ dev->start_streaming_error = true;
+ break;
+ case VIVID_CID_QUEUE_ERROR:
+ if (vb2_start_streaming_called(&dev->vb_vid_cap_q))
+ vb2_queue_error(&dev->vb_vid_cap_q);
+ if (vb2_start_streaming_called(&dev->vb_vbi_cap_q))
+ vb2_queue_error(&dev->vb_vbi_cap_q);
+ if (vb2_start_streaming_called(&dev->vb_vid_out_q))
+ vb2_queue_error(&dev->vb_vid_out_q);
+ if (vb2_start_streaming_called(&dev->vb_vbi_out_q))
+ vb2_queue_error(&dev->vb_vbi_out_q);
+ if (vb2_start_streaming_called(&dev->vb_sdr_cap_q))
+ vb2_queue_error(&dev->vb_sdr_cap_q);
+ break;
+ case VIVID_CID_SEQ_WRAP:
+ dev->seq_wrap = ctrl->val;
+ break;
+ case VIVID_CID_TIME_WRAP:
+ dev->time_wrap = ctrl->val;
+ if (ctrl->val == 0) {
+ dev->time_wrap_offset = 0;
+ break;
+ }
+ /*
+ * We want to set the time 16 seconds before the 32 bit tv_sec
+ * value of struct timeval would wrap around. So first we
+ * calculate ktime_get_ns() % ((1 << 32) * NSEC_PER_SEC), and
+ * then we set the offset to ((1 << 32) - 16) * NSEC_PER_SEC).
+ */
+ div64_u64_rem(ktime_get_ns(),
+ 0x100000000ULL * NSEC_PER_SEC, &rem);
+ dev->time_wrap_offset =
+ (0x100000000ULL - 16) * NSEC_PER_SEC - rem;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_streaming_ctrl_ops = {
+ .s_ctrl = vivid_streaming_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_dqbuf_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_DQBUF_ERROR,
+ .name = "Inject V4L2_BUF_FLAG_ERROR",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_perc_dropped = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_PERC_DROPPED,
+ .name = "Percentage of Dropped Buffers",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_queue_setup_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_QUEUE_SETUP_ERROR,
+ .name = "Inject VIDIOC_REQBUFS Error",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_buf_prepare_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_BUF_PREPARE_ERROR,
+ .name = "Inject VIDIOC_QBUF Error",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_start_streaming_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_START_STR_ERROR,
+ .name = "Inject VIDIOC_STREAMON Error",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_queue_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_QUEUE_ERROR,
+ .name = "Inject Fatal Streaming Error",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_seq_wrap = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_SEQ_WRAP,
+ .name = "Wrap Sequence Number",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_time_wrap = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_TIME_WRAP,
+ .name = "Wrap Timestamp",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+/* SDTV Capture Controls */
+
+static int vivid_sdtv_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_sdtv_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_STD_SIGNAL_MODE:
+ dev->std_signal_mode = dev->ctrl_std_signal_mode->val;
+ if (dev->std_signal_mode == SELECTED_STD)
+ dev->query_std = vivid_standard[dev->ctrl_standard->val];
+ v4l2_ctrl_activate(dev->ctrl_standard, dev->std_signal_mode == SELECTED_STD);
+ vivid_update_quality(dev);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_sdtv_cap_ctrl_ops = {
+ .s_ctrl = vivid_sdtv_cap_s_ctrl,
+};
+
+static const char * const vivid_ctrl_std_signal_mode_strings[] = {
+ "Current Standard",
+ "No Signal",
+ "No Lock",
+ "",
+ "Selected Standard",
+ "Cycle Through All Standards",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_std_signal_mode = {
+ .ops = &vivid_sdtv_cap_ctrl_ops,
+ .id = VIVID_CID_STD_SIGNAL_MODE,
+ .name = "Standard Signal Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_std_signal_mode_strings) - 2,
+ .menu_skip_mask = 1 << 3,
+ .qmenu = vivid_ctrl_std_signal_mode_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_standard = {
+ .ops = &vivid_sdtv_cap_ctrl_ops,
+ .id = VIVID_CID_STANDARD,
+ .name = "Standard",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 14,
+ .qmenu = vivid_ctrl_standard_strings,
+};
+
+
+
+/* Radio Receiver Controls */
+
+static int vivid_radio_rx_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_radio_rx);
+
+ switch (ctrl->id) {
+ case VIVID_CID_RADIO_SEEK_MODE:
+ dev->radio_rx_hw_seek_mode = ctrl->val;
+ break;
+ case VIVID_CID_RADIO_SEEK_PROG_LIM:
+ dev->radio_rx_hw_seek_prog_lim = ctrl->val;
+ break;
+ case VIVID_CID_RADIO_RX_RDS_RBDS:
+ dev->rds_gen.use_rbds = ctrl->val;
+ break;
+ case VIVID_CID_RADIO_RX_RDS_BLOCKIO:
+ dev->radio_rx_rds_controls = ctrl->val;
+ dev->radio_rx_caps &= ~V4L2_CAP_READWRITE;
+ dev->radio_rx_rds_use_alternates = false;
+ if (!dev->radio_rx_rds_controls) {
+ dev->radio_rx_caps |= V4L2_CAP_READWRITE;
+ __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_pty, 0);
+ __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ta, 0);
+ __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_tp, 0);
+ __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ms, 0);
+ __v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_psname, "");
+ __v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_radiotext, "");
+ }
+ v4l2_ctrl_activate(dev->radio_rx_rds_pty, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_psname, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_radiotext, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls);
+ dev->radio_rx_dev.device_caps = dev->radio_rx_caps;
+ break;
+ case V4L2_CID_RDS_RECEPTION:
+ dev->radio_rx_rds_enabled = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_radio_rx_ctrl_ops = {
+ .s_ctrl = vivid_radio_rx_s_ctrl,
+};
+
+static const char * const vivid_ctrl_radio_rds_mode_strings[] = {
+ "Block I/O",
+ "Controls",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_rx_rds_blockio = {
+ .ops = &vivid_radio_rx_ctrl_ops,
+ .id = VIVID_CID_RADIO_RX_RDS_BLOCKIO,
+ .name = "RDS Rx I/O Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .qmenu = vivid_ctrl_radio_rds_mode_strings,
+ .max = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_rx_rds_rbds = {
+ .ops = &vivid_radio_rx_ctrl_ops,
+ .id = VIVID_CID_RADIO_RX_RDS_RBDS,
+ .name = "Generate RBDS Instead of RDS",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const char * const vivid_ctrl_radio_hw_seek_mode_strings[] = {
+ "Bounded",
+ "Wrap Around",
+ "Both",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_hw_seek_mode = {
+ .ops = &vivid_radio_rx_ctrl_ops,
+ .id = VIVID_CID_RADIO_SEEK_MODE,
+ .name = "Radio HW Seek Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 2,
+ .qmenu = vivid_ctrl_radio_hw_seek_mode_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_hw_seek_prog_lim = {
+ .ops = &vivid_radio_rx_ctrl_ops,
+ .id = VIVID_CID_RADIO_SEEK_PROG_LIM,
+ .name = "Radio Programmable HW Seek",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+/* Radio Transmitter Controls */
+
+static int vivid_radio_tx_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_radio_tx);
+
+ switch (ctrl->id) {
+ case VIVID_CID_RADIO_TX_RDS_BLOCKIO:
+ dev->radio_tx_rds_controls = ctrl->val;
+ dev->radio_tx_caps &= ~V4L2_CAP_READWRITE;
+ if (!dev->radio_tx_rds_controls)
+ dev->radio_tx_caps |= V4L2_CAP_READWRITE;
+ dev->radio_tx_dev.device_caps = dev->radio_tx_caps;
+ break;
+ case V4L2_CID_RDS_TX_PTY:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_pty, ctrl->val);
+ break;
+ case V4L2_CID_RDS_TX_PS_NAME:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_psname, ctrl->p_new.p_char);
+ break;
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_radiotext, ctrl->p_new.p_char);
+ break;
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ta, ctrl->val);
+ break;
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_tp, ctrl->val);
+ break;
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ms, ctrl->val);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_radio_tx_ctrl_ops = {
+ .s_ctrl = vivid_radio_tx_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_tx_rds_blockio = {
+ .ops = &vivid_radio_tx_ctrl_ops,
+ .id = VIVID_CID_RADIO_TX_RDS_BLOCKIO,
+ .name = "RDS Tx I/O Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .qmenu = vivid_ctrl_radio_rds_mode_strings,
+ .max = 1,
+ .def = 1,
+};
+
+
+/* SDR Capture Controls */
+
+static int vivid_sdr_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_sdr_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_SDR_CAP_FM_DEVIATION:
+ dev->sdr_fm_deviation = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_sdr_cap_ctrl_ops = {
+ .s_ctrl = vivid_sdr_cap_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_sdr_cap_fm_deviation = {
+ .ops = &vivid_sdr_cap_ctrl_ops,
+ .id = VIVID_CID_SDR_CAP_FM_DEVIATION,
+ .name = "FM Deviation",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 100,
+ .max = 200000,
+ .def = 75000,
+ .step = 1,
+};
+
+
+static const struct v4l2_ctrl_config vivid_ctrl_class = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
+ .id = VIVID_CID_VIVID_CLASS,
+ .name = "Vivid Controls",
+ .type = V4L2_CTRL_TYPE_CTRL_CLASS,
+};
+
+int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
+ bool show_ccs_out, bool no_error_inj,
+ bool has_sdtv, bool has_hdmi)
+{
+ struct v4l2_ctrl_handler *hdl_user_gen = &dev->ctrl_hdl_user_gen;
+ struct v4l2_ctrl_handler *hdl_user_vid = &dev->ctrl_hdl_user_vid;
+ struct v4l2_ctrl_handler *hdl_user_aud = &dev->ctrl_hdl_user_aud;
+ struct v4l2_ctrl_handler *hdl_streaming = &dev->ctrl_hdl_streaming;
+ struct v4l2_ctrl_handler *hdl_sdtv_cap = &dev->ctrl_hdl_sdtv_cap;
+ struct v4l2_ctrl_handler *hdl_loop_cap = &dev->ctrl_hdl_loop_cap;
+ struct v4l2_ctrl_handler *hdl_fb = &dev->ctrl_hdl_fb;
+ struct v4l2_ctrl_handler *hdl_vid_cap = &dev->ctrl_hdl_vid_cap;
+ struct v4l2_ctrl_handler *hdl_vid_out = &dev->ctrl_hdl_vid_out;
+ struct v4l2_ctrl_handler *hdl_vbi_cap = &dev->ctrl_hdl_vbi_cap;
+ struct v4l2_ctrl_handler *hdl_vbi_out = &dev->ctrl_hdl_vbi_out;
+ struct v4l2_ctrl_handler *hdl_radio_rx = &dev->ctrl_hdl_radio_rx;
+ struct v4l2_ctrl_handler *hdl_radio_tx = &dev->ctrl_hdl_radio_tx;
+ struct v4l2_ctrl_handler *hdl_sdr_cap = &dev->ctrl_hdl_sdr_cap;
+ struct v4l2_ctrl_config vivid_ctrl_dv_timings = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_DV_TIMINGS,
+ .name = "DV Timings",
+ .type = V4L2_CTRL_TYPE_MENU,
+ };
+ int i;
+
+ v4l2_ctrl_handler_init(hdl_user_gen, 10);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_user_vid, 9);
+ v4l2_ctrl_new_custom(hdl_user_vid, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_user_aud, 2);
+ v4l2_ctrl_new_custom(hdl_user_aud, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_streaming, 8);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_sdtv_cap, 2);
+ v4l2_ctrl_new_custom(hdl_sdtv_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_loop_cap, 1);
+ v4l2_ctrl_new_custom(hdl_loop_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_fb, 1);
+ v4l2_ctrl_new_custom(hdl_fb, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_vid_cap, 55);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_vid_out, 26);
+ if (!no_error_inj || dev->has_fb)
+ v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_vbi_cap, 21);
+ v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_vbi_out, 19);
+ if (!no_error_inj)
+ v4l2_ctrl_new_custom(hdl_vbi_out, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_radio_rx, 17);
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_radio_tx, 17);
+ v4l2_ctrl_new_custom(hdl_radio_tx, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_sdr_cap, 19);
+ v4l2_ctrl_new_custom(hdl_sdr_cap, &vivid_ctrl_class, NULL);
+
+ /* User Controls */
+ dev->volume = v4l2_ctrl_new_std(hdl_user_aud, NULL,
+ V4L2_CID_AUDIO_VOLUME, 0, 255, 1, 200);
+ dev->mute = v4l2_ctrl_new_std(hdl_user_aud, NULL,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ if (dev->has_vid_cap) {
+ dev->brightness = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ for (i = 0; i < MAX_INPUTS; i++)
+ dev->input_brightness[i] = 128;
+ dev->contrast = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 128);
+ dev->saturation = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ dev->hue = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_HUE, -128, 128, 1, 0);
+ v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ dev->autogain = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ dev->gain = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 100);
+ dev->alpha = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0);
+ }
+ dev->button = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_button, NULL);
+ dev->int32 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int32, NULL);
+ dev->int64 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int64, NULL);
+ dev->boolean = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_boolean, NULL);
+ dev->menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_menu, NULL);
+ dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL);
+ dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL);
+ dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL);
+
+ if (dev->has_vid_cap) {
+ /* Image Processing Controls */
+ struct v4l2_ctrl_config vivid_ctrl_test_pattern = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_TEST_PATTERN,
+ .name = "Test Pattern",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = TPG_PAT_NOISE,
+ .qmenu = tpg_pattern_strings,
+ };
+
+ dev->test_pattern = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_test_pattern, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_perc_fill, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hor_movement, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_vert_movement, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_osd_mode, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_show_border, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_show_square, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hflip, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_vflip, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_sav, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_eav, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_reduced_fps, NULL);
+ if (show_ccs_cap) {
+ dev->ctrl_has_crop_cap = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_has_crop_cap, NULL);
+ dev->ctrl_has_compose_cap = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_has_compose_cap, NULL);
+ dev->ctrl_has_scaler_cap = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_has_scaler_cap, NULL);
+ }
+
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_tstamp_src, NULL);
+ dev->colorspace = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_colorspace, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_xfer_func, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_ycbcr_enc, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hsv_enc, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_quantization, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_alpha_mode, NULL);
+ }
+
+ if (dev->has_vid_out && show_ccs_out) {
+ dev->ctrl_has_crop_out = v4l2_ctrl_new_custom(hdl_vid_out,
+ &vivid_ctrl_has_crop_out, NULL);
+ dev->ctrl_has_compose_out = v4l2_ctrl_new_custom(hdl_vid_out,
+ &vivid_ctrl_has_compose_out, NULL);
+ dev->ctrl_has_scaler_out = v4l2_ctrl_new_custom(hdl_vid_out,
+ &vivid_ctrl_has_scaler_out, NULL);
+ }
+
+ /*
+ * Testing this driver with v4l2-compliance will trigger the error
+ * injection controls, and after that nothing will work as expected.
+ * So we have a module option to drop these error injecting controls
+ * allowing us to run v4l2_compliance again.
+ */
+ if (!no_error_inj) {
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_disconnect, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_dqbuf_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_perc_dropped, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_queue_setup_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_buf_prepare_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_start_streaming_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_queue_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_seq_wrap, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_time_wrap, NULL);
+ }
+
+ if (has_sdtv && (dev->has_vid_cap || dev->has_vbi_cap)) {
+ if (dev->has_vid_cap)
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_std_aspect_ratio, NULL);
+ dev->ctrl_std_signal_mode = v4l2_ctrl_new_custom(hdl_sdtv_cap,
+ &vivid_ctrl_std_signal_mode, NULL);
+ dev->ctrl_standard = v4l2_ctrl_new_custom(hdl_sdtv_cap,
+ &vivid_ctrl_standard, NULL);
+ if (dev->ctrl_std_signal_mode)
+ v4l2_ctrl_cluster(2, &dev->ctrl_std_signal_mode);
+ if (dev->has_raw_vbi_cap)
+ v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_vbi_cap_interlaced, NULL);
+ }
+
+ if (has_hdmi && dev->has_vid_cap) {
+ dev->ctrl_dv_timings_signal_mode = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_dv_timings_signal_mode, NULL);
+
+ vivid_ctrl_dv_timings.max = dev->query_dv_timings_size - 1;
+ vivid_ctrl_dv_timings.qmenu =
+ (const char * const *)dev->query_dv_timings_qmenu;
+ dev->ctrl_dv_timings = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_dv_timings, NULL);
+ if (dev->ctrl_dv_timings_signal_mode)
+ v4l2_ctrl_cluster(2, &dev->ctrl_dv_timings_signal_mode);
+
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_dv_timings_aspect_ratio, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_max_edid_blocks, NULL);
+ dev->real_rgb_range_cap = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_limited_rgb_range, NULL);
+ dev->rgb_range_cap = v4l2_ctrl_new_std_menu(hdl_vid_cap,
+ &vivid_vid_cap_ctrl_ops,
+ V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
+ 0, V4L2_DV_RGB_RANGE_AUTO);
+ }
+ if (has_hdmi && dev->has_vid_out) {
+ /*
+ * We aren't doing anything with this at the moment, but
+ * HDMI outputs typically have this controls.
+ */
+ dev->ctrl_tx_rgb_range = v4l2_ctrl_new_std_menu(hdl_vid_out, NULL,
+ V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
+ 0, V4L2_DV_RGB_RANGE_AUTO);
+ dev->ctrl_tx_mode = v4l2_ctrl_new_std_menu(hdl_vid_out, NULL,
+ V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
+ 0, V4L2_DV_TX_MODE_HDMI);
+ }
+ if ((dev->has_vid_cap && dev->has_vid_out) ||
+ (dev->has_vbi_cap && dev->has_vbi_out))
+ v4l2_ctrl_new_custom(hdl_loop_cap, &vivid_ctrl_loop_video, NULL);
+
+ if (dev->has_fb)
+ v4l2_ctrl_new_custom(hdl_fb, &vivid_ctrl_clear_fb, NULL);
+
+ if (dev->has_radio_rx) {
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_hw_seek_mode, NULL);
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_hw_seek_prog_lim, NULL);
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_rx_rds_blockio, NULL);
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_rx_rds_rbds, NULL);
+ v4l2_ctrl_new_std(hdl_radio_rx, &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RECEPTION, 0, 1, 1, 1);
+ dev->radio_rx_rds_pty = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_PTY, 0, 31, 1, 0);
+ dev->radio_rx_rds_psname = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_PS_NAME, 0, 8, 8, 0);
+ dev->radio_rx_rds_radiotext = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_RADIO_TEXT, 0, 64, 64, 0);
+ dev->radio_rx_rds_ta = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT, 0, 1, 1, 0);
+ dev->radio_rx_rds_tp = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_TRAFFIC_PROGRAM, 0, 1, 1, 0);
+ dev->radio_rx_rds_ms = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_MUSIC_SPEECH, 0, 1, 1, 1);
+ }
+ if (dev->has_radio_tx) {
+ v4l2_ctrl_new_custom(hdl_radio_tx,
+ &vivid_ctrl_radio_tx_rds_blockio, NULL);
+ dev->radio_tx_rds_pi = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_PI, 0, 0xffff, 1, 0x8088);
+ dev->radio_tx_rds_pty = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_PTY, 0, 31, 1, 3);
+ dev->radio_tx_rds_psname = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_PS_NAME, 0, 8, 8, 0);
+ if (dev->radio_tx_rds_psname)
+ v4l2_ctrl_s_ctrl_string(dev->radio_tx_rds_psname, "VIVID-TX");
+ dev->radio_tx_rds_radiotext = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_RADIO_TEXT, 0, 64 * 2, 64, 0);
+ if (dev->radio_tx_rds_radiotext)
+ v4l2_ctrl_s_ctrl_string(dev->radio_tx_rds_radiotext,
+ "This is a VIVID default Radio Text template text, change at will");
+ dev->radio_tx_rds_mono_stereo = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_MONO_STEREO, 0, 1, 1, 1);
+ dev->radio_tx_rds_art_head = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_ARTIFICIAL_HEAD, 0, 1, 1, 0);
+ dev->radio_tx_rds_compressed = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_COMPRESSED, 0, 1, 1, 0);
+ dev->radio_tx_rds_dyn_pty = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_DYNAMIC_PTY, 0, 1, 1, 0);
+ dev->radio_tx_rds_ta = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT, 0, 1, 1, 0);
+ dev->radio_tx_rds_tp = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_TRAFFIC_PROGRAM, 0, 1, 1, 1);
+ dev->radio_tx_rds_ms = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_MUSIC_SPEECH, 0, 1, 1, 1);
+ }
+ if (dev->has_sdr_cap) {
+ v4l2_ctrl_new_custom(hdl_sdr_cap,
+ &vivid_ctrl_sdr_cap_fm_deviation, NULL);
+ }
+ if (hdl_user_gen->error)
+ return hdl_user_gen->error;
+ if (hdl_user_vid->error)
+ return hdl_user_vid->error;
+ if (hdl_user_aud->error)
+ return hdl_user_aud->error;
+ if (hdl_streaming->error)
+ return hdl_streaming->error;
+ if (hdl_sdr_cap->error)
+ return hdl_sdr_cap->error;
+ if (hdl_loop_cap->error)
+ return hdl_loop_cap->error;
+
+ if (dev->autogain)
+ v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true);
+
+ if (dev->has_vid_cap) {
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL);
+ if (hdl_vid_cap->error)
+ return hdl_vid_cap->error;
+ dev->vid_cap_dev.ctrl_handler = hdl_vid_cap;
+ }
+ if (dev->has_vid_out) {
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL);
+ if (hdl_vid_out->error)
+ return hdl_vid_out->error;
+ dev->vid_out_dev.ctrl_handler = hdl_vid_out;
+ }
+ if (dev->has_vbi_cap) {
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL);
+ if (hdl_vbi_cap->error)
+ return hdl_vbi_cap->error;
+ dev->vbi_cap_dev.ctrl_handler = hdl_vbi_cap;
+ }
+ if (dev->has_vbi_out) {
+ v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL);
+ if (hdl_vbi_out->error)
+ return hdl_vbi_out->error;
+ dev->vbi_out_dev.ctrl_handler = hdl_vbi_out;
+ }
+ if (dev->has_radio_rx) {
+ v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL);
+ if (hdl_radio_rx->error)
+ return hdl_radio_rx->error;
+ dev->radio_rx_dev.ctrl_handler = hdl_radio_rx;
+ }
+ if (dev->has_radio_tx) {
+ v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL);
+ if (hdl_radio_tx->error)
+ return hdl_radio_tx->error;
+ dev->radio_tx_dev.ctrl_handler = hdl_radio_tx;
+ }
+ if (dev->has_sdr_cap) {
+ v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL);
+ if (hdl_sdr_cap->error)
+ return hdl_sdr_cap->error;
+ dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap;
+ }
+ return 0;
+}
+
+void vivid_free_controls(struct vivid_dev *dev)
+{
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_vid_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_vid_out);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_vbi_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_vbi_out);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_radio_rx);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_radio_tx);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdr_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_user_gen);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_user_vid);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_user_aud);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_streaming);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdtv_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_loop_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_fb);
+}
diff --git a/drivers/media/platform/vivid/vivid-ctrls.h b/drivers/media/platform/vivid/vivid-ctrls.h
new file mode 100644
index 000000000..6fad5f5d0
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-ctrls.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-ctrls.h - control support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_CTRLS_H_
+#define _VIVID_CTRLS_H_
+
+enum vivid_hw_seek_modes {
+ VIVID_HW_SEEK_BOUNDED,
+ VIVID_HW_SEEK_WRAP,
+ VIVID_HW_SEEK_BOTH,
+};
+
+int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
+ bool show_ccs_out, bool no_error_inj,
+ bool has_sdtv, bool has_hdmi);
+void vivid_free_controls(struct vivid_dev *dev);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
new file mode 100644
index 000000000..ac17883a0
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -0,0 +1,927 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-kthread-cap.h - video/vbi capture thread support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/font.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/random.h>
+#include <linux/v4l2-dv-timings.h>
+#include <asm/div64.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-rect.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-vid-cap.h"
+#include "vivid-vid-out.h"
+#include "vivid-radio-common.h"
+#include "vivid-radio-rx.h"
+#include "vivid-radio-tx.h"
+#include "vivid-sdr-cap.h"
+#include "vivid-vbi-cap.h"
+#include "vivid-vbi-out.h"
+#include "vivid-osd.h"
+#include "vivid-ctrls.h"
+#include "vivid-kthread-cap.h"
+
+static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev)
+{
+ if (vivid_is_sdtv_cap(dev))
+ return dev->std_cap;
+ return 0;
+}
+
+static void copy_pix(struct vivid_dev *dev, int win_y, int win_x,
+ u16 *cap, const u16 *osd)
+{
+ u16 out;
+ int left = dev->overlay_out_left;
+ int top = dev->overlay_out_top;
+ int fb_x = win_x + left;
+ int fb_y = win_y + top;
+ int i;
+
+ out = *cap;
+ *cap = *osd;
+ if (dev->bitmap_out) {
+ const u8 *p = dev->bitmap_out;
+ unsigned stride = (dev->compose_out.width + 7) / 8;
+
+ win_x -= dev->compose_out.left;
+ win_y -= dev->compose_out.top;
+ if (!(p[stride * win_y + win_x / 8] & (1 << (win_x & 7))))
+ return;
+ }
+
+ for (i = 0; i < dev->clipcount_out; i++) {
+ struct v4l2_rect *r = &dev->clips_out[i].c;
+
+ if (fb_y >= r->top && fb_y < r->top + r->height &&
+ fb_x >= r->left && fb_x < r->left + r->width)
+ return;
+ }
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
+ *osd != dev->chromakey_out)
+ return;
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
+ out == dev->chromakey_out)
+ return;
+ if (dev->fmt_cap->alpha_mask) {
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) &&
+ dev->global_alpha_out)
+ return;
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) &&
+ *cap & dev->fmt_cap->alpha_mask)
+ return;
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_INV_ALPHA) &&
+ !(*cap & dev->fmt_cap->alpha_mask))
+ return;
+ }
+ *cap = out;
+}
+
+static void blend_line(struct vivid_dev *dev, unsigned y_offset, unsigned x_offset,
+ u8 *vcapbuf, const u8 *vosdbuf,
+ unsigned width, unsigned pixsize)
+{
+ unsigned x;
+
+ for (x = 0; x < width; x++, vcapbuf += pixsize, vosdbuf += pixsize) {
+ copy_pix(dev, y_offset, x_offset + x,
+ (u16 *)vcapbuf, (const u16 *)vosdbuf);
+ }
+}
+
+static void scale_line(const u8 *src, u8 *dst, unsigned srcw, unsigned dstw, unsigned twopixsize)
+{
+ /* Coarse scaling with Bresenham */
+ unsigned int_part;
+ unsigned fract_part;
+ unsigned src_x = 0;
+ unsigned error = 0;
+ unsigned x;
+
+ /*
+ * We always combine two pixels to prevent color bleed in the packed
+ * yuv case.
+ */
+ srcw /= 2;
+ dstw /= 2;
+ int_part = srcw / dstw;
+ fract_part = srcw % dstw;
+ for (x = 0; x < dstw; x++, dst += twopixsize) {
+ memcpy(dst, src + src_x * twopixsize, twopixsize);
+ src_x += int_part;
+ error += fract_part;
+ if (error >= dstw) {
+ error -= dstw;
+ src_x++;
+ }
+ }
+}
+
+/*
+ * Precalculate the rectangles needed to perform video looping:
+ *
+ * The nominal pipeline is that the video output buffer is cropped by
+ * crop_out, scaled to compose_out, overlaid with the output overlay,
+ * cropped on the capture side by crop_cap and scaled again to the video
+ * capture buffer using compose_cap.
+ *
+ * To keep things efficient we calculate the intersection of compose_out
+ * and crop_cap (since that's the only part of the video that will
+ * actually end up in the capture buffer), determine which part of the
+ * video output buffer that is and which part of the video capture buffer
+ * so we can scale the video straight from the output buffer to the capture
+ * buffer without any intermediate steps.
+ *
+ * If we need to deal with an output overlay, then there is no choice and
+ * that intermediate step still has to be taken. For the output overlay
+ * support we calculate the intersection of the framebuffer and the overlay
+ * window (which may be partially or wholly outside of the framebuffer
+ * itself) and the intersection of that with loop_vid_copy (i.e. the part of
+ * the actual looped video that will be overlaid). The result is calculated
+ * both in framebuffer coordinates (loop_fb_copy) and compose_out coordinates
+ * (loop_vid_overlay). Finally calculate the part of the capture buffer that
+ * will receive that overlaid video.
+ */
+static void vivid_precalc_copy_rects(struct vivid_dev *dev)
+{
+ /* Framebuffer rectangle */
+ struct v4l2_rect r_fb = {
+ 0, 0, dev->display_width, dev->display_height
+ };
+ /* Overlay window rectangle in framebuffer coordinates */
+ struct v4l2_rect r_overlay = {
+ dev->overlay_out_left, dev->overlay_out_top,
+ dev->compose_out.width, dev->compose_out.height
+ };
+
+ v4l2_rect_intersect(&dev->loop_vid_copy, &dev->crop_cap, &dev->compose_out);
+
+ dev->loop_vid_out = dev->loop_vid_copy;
+ v4l2_rect_scale(&dev->loop_vid_out, &dev->compose_out, &dev->crop_out);
+ dev->loop_vid_out.left += dev->crop_out.left;
+ dev->loop_vid_out.top += dev->crop_out.top;
+
+ dev->loop_vid_cap = dev->loop_vid_copy;
+ v4l2_rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap);
+
+ dprintk(dev, 1,
+ "loop_vid_copy: %dx%d@%dx%d loop_vid_out: %dx%d@%dx%d loop_vid_cap: %dx%d@%dx%d\n",
+ dev->loop_vid_copy.width, dev->loop_vid_copy.height,
+ dev->loop_vid_copy.left, dev->loop_vid_copy.top,
+ dev->loop_vid_out.width, dev->loop_vid_out.height,
+ dev->loop_vid_out.left, dev->loop_vid_out.top,
+ dev->loop_vid_cap.width, dev->loop_vid_cap.height,
+ dev->loop_vid_cap.left, dev->loop_vid_cap.top);
+
+ v4l2_rect_intersect(&r_overlay, &r_fb, &r_overlay);
+
+ /* shift r_overlay to the same origin as compose_out */
+ r_overlay.left += dev->compose_out.left - dev->overlay_out_left;
+ r_overlay.top += dev->compose_out.top - dev->overlay_out_top;
+
+ v4l2_rect_intersect(&dev->loop_vid_overlay, &r_overlay, &dev->loop_vid_copy);
+ dev->loop_fb_copy = dev->loop_vid_overlay;
+
+ /* shift dev->loop_fb_copy back again to the fb origin */
+ dev->loop_fb_copy.left -= dev->compose_out.left - dev->overlay_out_left;
+ dev->loop_fb_copy.top -= dev->compose_out.top - dev->overlay_out_top;
+
+ dev->loop_vid_overlay_cap = dev->loop_vid_overlay;
+ v4l2_rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap);
+
+ dprintk(dev, 1,
+ "loop_fb_copy: %dx%d@%dx%d loop_vid_overlay: %dx%d@%dx%d loop_vid_overlay_cap: %dx%d@%dx%d\n",
+ dev->loop_fb_copy.width, dev->loop_fb_copy.height,
+ dev->loop_fb_copy.left, dev->loop_fb_copy.top,
+ dev->loop_vid_overlay.width, dev->loop_vid_overlay.height,
+ dev->loop_vid_overlay.left, dev->loop_vid_overlay.top,
+ dev->loop_vid_overlay_cap.width, dev->loop_vid_overlay_cap.height,
+ dev->loop_vid_overlay_cap.left, dev->loop_vid_overlay_cap.top);
+}
+
+static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
+ unsigned p, unsigned bpl[TPG_MAX_PLANES], unsigned h)
+{
+ unsigned i;
+ void *vbuf;
+
+ if (p == 0 || tpg_g_buffers(tpg) > 1)
+ return vb2_plane_vaddr(&buf->vb.vb2_buf, p);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ for (i = 0; i < p; i++)
+ vbuf += bpl[i] * h / tpg->vdownsampling[i];
+ return vbuf;
+}
+
+static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
+ struct vivid_buffer *vid_cap_buf)
+{
+ bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index];
+ struct tpg_data *tpg = &dev->tpg;
+ struct vivid_buffer *vid_out_buf = NULL;
+ unsigned vdiv = dev->fmt_out->vdownsampling[p];
+ unsigned twopixsize = tpg_g_twopixelsize(tpg, p);
+ unsigned img_width = tpg_hdiv(tpg, p, dev->compose_cap.width);
+ unsigned img_height = dev->compose_cap.height;
+ unsigned stride_cap = tpg->bytesperline[p];
+ unsigned stride_out = dev->bytesperline_out[p];
+ unsigned stride_osd = dev->display_byte_stride;
+ unsigned hmax = (img_height * tpg->perc_fill) / 100;
+ u8 *voutbuf;
+ u8 *vosdbuf = NULL;
+ unsigned y;
+ bool blend = dev->bitmap_out || dev->clipcount_out || dev->fbuf_out_flags;
+ /* Coarse scaling with Bresenham */
+ unsigned vid_out_int_part;
+ unsigned vid_out_fract_part;
+ unsigned vid_out_y = 0;
+ unsigned vid_out_error = 0;
+ unsigned vid_overlay_int_part = 0;
+ unsigned vid_overlay_fract_part = 0;
+ unsigned vid_overlay_y = 0;
+ unsigned vid_overlay_error = 0;
+ unsigned vid_cap_left = tpg_hdiv(tpg, p, dev->loop_vid_cap.left);
+ unsigned vid_cap_right;
+ bool quick;
+
+ vid_out_int_part = dev->loop_vid_out.height / dev->loop_vid_cap.height;
+ vid_out_fract_part = dev->loop_vid_out.height % dev->loop_vid_cap.height;
+
+ if (!list_empty(&dev->vid_out_active))
+ vid_out_buf = list_entry(dev->vid_out_active.next,
+ struct vivid_buffer, list);
+ if (vid_out_buf == NULL)
+ return -ENODATA;
+
+ vid_cap_buf->vb.field = vid_out_buf->vb.field;
+
+ voutbuf = plane_vaddr(tpg, vid_out_buf, p,
+ dev->bytesperline_out, dev->fmt_out_rect.height);
+ if (p < dev->fmt_out->buffers)
+ voutbuf += vid_out_buf->vb.vb2_buf.planes[p].data_offset;
+ voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) +
+ (dev->loop_vid_out.top / vdiv) * stride_out;
+ vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) +
+ (dev->compose_cap.top / vdiv) * stride_cap;
+
+ if (dev->loop_vid_copy.width == 0 || dev->loop_vid_copy.height == 0) {
+ /*
+ * If there is nothing to copy, then just fill the capture window
+ * with black.
+ */
+ for (y = 0; y < hmax / vdiv; y++, vcapbuf += stride_cap)
+ memcpy(vcapbuf, tpg->black_line[p], img_width);
+ return 0;
+ }
+
+ if (dev->overlay_out_enabled &&
+ dev->loop_vid_overlay.width && dev->loop_vid_overlay.height) {
+ vosdbuf = dev->video_vbase;
+ vosdbuf += (dev->loop_fb_copy.left * twopixsize) / 2 +
+ dev->loop_fb_copy.top * stride_osd;
+ vid_overlay_int_part = dev->loop_vid_overlay.height /
+ dev->loop_vid_overlay_cap.height;
+ vid_overlay_fract_part = dev->loop_vid_overlay.height %
+ dev->loop_vid_overlay_cap.height;
+ }
+
+ vid_cap_right = tpg_hdiv(tpg, p, dev->loop_vid_cap.left + dev->loop_vid_cap.width);
+ /* quick is true if no video scaling is needed */
+ quick = dev->loop_vid_out.width == dev->loop_vid_cap.width;
+
+ dev->cur_scaled_line = dev->loop_vid_out.height;
+ for (y = 0; y < hmax; y += vdiv, vcapbuf += stride_cap) {
+ /* osdline is true if this line requires overlay blending */
+ bool osdline = vosdbuf && y >= dev->loop_vid_overlay_cap.top &&
+ y < dev->loop_vid_overlay_cap.top + dev->loop_vid_overlay_cap.height;
+
+ /*
+ * If this line of the capture buffer doesn't get any video, then
+ * just fill with black.
+ */
+ if (y < dev->loop_vid_cap.top ||
+ y >= dev->loop_vid_cap.top + dev->loop_vid_cap.height) {
+ memcpy(vcapbuf, tpg->black_line[p], img_width);
+ continue;
+ }
+
+ /* fill the left border with black */
+ if (dev->loop_vid_cap.left)
+ memcpy(vcapbuf, tpg->black_line[p], vid_cap_left);
+
+ /* fill the right border with black */
+ if (vid_cap_right < img_width)
+ memcpy(vcapbuf + vid_cap_right, tpg->black_line[p],
+ img_width - vid_cap_right);
+
+ if (quick && !osdline) {
+ memcpy(vcapbuf + vid_cap_left,
+ voutbuf + vid_out_y * stride_out,
+ tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
+ goto update_vid_out_y;
+ }
+ if (dev->cur_scaled_line == vid_out_y) {
+ memcpy(vcapbuf + vid_cap_left, dev->scaled_line,
+ tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
+ goto update_vid_out_y;
+ }
+ if (!osdline) {
+ scale_line(voutbuf + vid_out_y * stride_out, dev->scaled_line,
+ tpg_hdiv(tpg, p, dev->loop_vid_out.width),
+ tpg_hdiv(tpg, p, dev->loop_vid_cap.width),
+ tpg_g_twopixelsize(tpg, p));
+ } else {
+ /*
+ * Offset in bytes within loop_vid_copy to the start of the
+ * loop_vid_overlay rectangle.
+ */
+ unsigned offset =
+ ((dev->loop_vid_overlay.left - dev->loop_vid_copy.left) *
+ twopixsize) / 2;
+ u8 *osd = vosdbuf + vid_overlay_y * stride_osd;
+
+ scale_line(voutbuf + vid_out_y * stride_out, dev->blended_line,
+ dev->loop_vid_out.width, dev->loop_vid_copy.width,
+ tpg_g_twopixelsize(tpg, p));
+ if (blend)
+ blend_line(dev, vid_overlay_y + dev->loop_vid_overlay.top,
+ dev->loop_vid_overlay.left,
+ dev->blended_line + offset, osd,
+ dev->loop_vid_overlay.width, twopixsize / 2);
+ else
+ memcpy(dev->blended_line + offset,
+ osd, (dev->loop_vid_overlay.width * twopixsize) / 2);
+ scale_line(dev->blended_line, dev->scaled_line,
+ dev->loop_vid_copy.width, dev->loop_vid_cap.width,
+ tpg_g_twopixelsize(tpg, p));
+ }
+ dev->cur_scaled_line = vid_out_y;
+ memcpy(vcapbuf + vid_cap_left, dev->scaled_line,
+ tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
+
+update_vid_out_y:
+ if (osdline) {
+ vid_overlay_y += vid_overlay_int_part;
+ vid_overlay_error += vid_overlay_fract_part;
+ if (vid_overlay_error >= dev->loop_vid_overlay_cap.height) {
+ vid_overlay_error -= dev->loop_vid_overlay_cap.height;
+ vid_overlay_y++;
+ }
+ }
+ vid_out_y += vid_out_int_part;
+ vid_out_error += vid_out_fract_part;
+ if (vid_out_error >= dev->loop_vid_cap.height / vdiv) {
+ vid_out_error -= dev->loop_vid_cap.height / vdiv;
+ vid_out_y++;
+ }
+ }
+
+ if (!blank)
+ return 0;
+ for (; y < img_height; y += vdiv, vcapbuf += stride_cap)
+ memcpy(vcapbuf, tpg->contrast_line[p], img_width);
+ return 0;
+}
+
+static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ struct tpg_data *tpg = &dev->tpg;
+ unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
+ unsigned line_height = 16 / factor;
+ bool is_tv = vivid_is_sdtv_cap(dev);
+ bool is_60hz = is_tv && (dev->std_cap & V4L2_STD_525_60);
+ unsigned p;
+ int line = 1;
+ u8 *basep[TPG_MAX_PLANES][2];
+ unsigned ms;
+ char str[100];
+ s32 gain;
+ bool is_loop = false;
+
+ if (dev->loop_video && dev->can_loop_video &&
+ ((vivid_is_svid_cap(dev) &&
+ !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
+ (vivid_is_hdmi_cap(dev) &&
+ !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
+ is_loop = true;
+
+ buf->vb.sequence = dev->vid_cap_seq_count;
+ /*
+ * Take the timestamp now if the timestamp source is set to
+ * "Start of Exposure".
+ */
+ if (dev->tstamp_src_is_soe)
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
+ /*
+ * 60 Hz standards start with the bottom field, 50 Hz standards
+ * with the top field. So if the 0-based seq_count is even,
+ * then the field is TOP for 50 Hz and BOTTOM for 60 Hz
+ * standards.
+ */
+ buf->vb.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
+ V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
+ /*
+ * The sequence counter counts frames, not fields. So divide
+ * by two.
+ */
+ buf->vb.sequence /= 2;
+ } else {
+ buf->vb.field = dev->field_cap;
+ }
+ tpg_s_field(tpg, buf->vb.field,
+ dev->field_cap == V4L2_FIELD_ALTERNATE);
+ tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.vb2_buf.index]);
+
+ vivid_precalc_copy_rects(dev);
+
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ void *vbuf = plane_vaddr(tpg, buf, p,
+ tpg->bytesperline, tpg->buf_height);
+
+ /*
+ * The first plane of a multiplanar format has a non-zero
+ * data_offset. This helps testing whether the application
+ * correctly supports non-zero data offsets.
+ */
+ if (p < tpg_g_buffers(tpg) && dev->fmt_cap->data_offset[p]) {
+ memset(vbuf, dev->fmt_cap->data_offset[p] & 0xff,
+ dev->fmt_cap->data_offset[p]);
+ vbuf += dev->fmt_cap->data_offset[p];
+ }
+ tpg_calc_text_basep(tpg, basep, p, vbuf);
+ if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
+ tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev),
+ p, vbuf);
+ }
+ dev->must_blank[buf->vb.vb2_buf.index] = false;
+
+ /* Updates stream time, only update at the start of a new frame. */
+ if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
+ (dev->vid_cap_seq_count & 1) == 0)
+ dev->ms_vid_cap =
+ jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
+
+ ms = dev->ms_vid_cap;
+ if (dev->osd_mode <= 1) {
+ snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d %u%s",
+ (ms / (60 * 60 * 1000)) % 24,
+ (ms / (60 * 1000)) % 60,
+ (ms / 1000) % 60,
+ ms % 1000,
+ buf->vb.sequence,
+ (dev->field_cap == V4L2_FIELD_ALTERNATE) ?
+ (buf->vb.field == V4L2_FIELD_TOP ?
+ " top" : " bottom") : "");
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
+ }
+ if (dev->osd_mode == 0) {
+ snprintf(str, sizeof(str), " %dx%d, input %d ",
+ dev->src_rect.width, dev->src_rect.height, dev->input);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
+
+ gain = v4l2_ctrl_g_ctrl(dev->gain);
+ mutex_lock(dev->ctrl_hdl_user_vid.lock);
+ snprintf(str, sizeof(str),
+ " brightness %3d, contrast %3d, saturation %3d, hue %d ",
+ dev->brightness->cur.val,
+ dev->contrast->cur.val,
+ dev->saturation->cur.val,
+ dev->hue->cur.val);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
+ snprintf(str, sizeof(str),
+ " autogain %d, gain %3d, alpha 0x%02x ",
+ dev->autogain->cur.val, gain, dev->alpha->cur.val);
+ mutex_unlock(dev->ctrl_hdl_user_vid.lock);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
+ mutex_lock(dev->ctrl_hdl_user_aud.lock);
+ snprintf(str, sizeof(str),
+ " volume %3d, mute %d ",
+ dev->volume->cur.val, dev->mute->cur.val);
+ mutex_unlock(dev->ctrl_hdl_user_aud.lock);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
+ mutex_lock(dev->ctrl_hdl_user_gen.lock);
+ snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
+ dev->int32->cur.val,
+ *dev->int64->p_cur.p_s64,
+ dev->bitmask->cur.val);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
+ snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
+ dev->boolean->cur.val,
+ dev->menu->qmenu[dev->menu->cur.val],
+ dev->string->p_cur.p_char);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
+ snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
+ dev->int_menu->qmenu_int[dev->int_menu->cur.val],
+ dev->int_menu->cur.val);
+ mutex_unlock(dev->ctrl_hdl_user_gen.lock);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
+ if (dev->button_pressed) {
+ dev->button_pressed--;
+ snprintf(str, sizeof(str), " button pressed!");
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
+ }
+ if (dev->osd[0]) {
+ if (vivid_is_hdmi_cap(dev)) {
+ snprintf(str, sizeof(str),
+ " OSD \"%s\"", dev->osd);
+ tpg_gen_text(tpg, basep, line++ * line_height,
+ 16, str);
+ }
+ if (dev->osd_jiffies &&
+ time_is_before_jiffies(dev->osd_jiffies + 5 * HZ)) {
+ dev->osd[0] = 0;
+ dev->osd_jiffies = 0;
+ }
+ }
+ }
+
+ /*
+ * If "End of Frame" is specified at the timestamp source, then take
+ * the timestamp now.
+ */
+ if (!dev->tstamp_src_is_soe)
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.vb2_buf.timestamp += dev->time_wrap_offset;
+}
+
+/*
+ * Return true if this pixel coordinate is a valid video pixel.
+ */
+static bool valid_pix(struct vivid_dev *dev, int win_y, int win_x, int fb_y, int fb_x)
+{
+ int i;
+
+ if (dev->bitmap_cap) {
+ /*
+ * Only if the corresponding bit in the bitmap is set can
+ * the video pixel be shown. Coordinates are relative to
+ * the overlay window set by VIDIOC_S_FMT.
+ */
+ const u8 *p = dev->bitmap_cap;
+ unsigned stride = (dev->compose_cap.width + 7) / 8;
+
+ if (!(p[stride * win_y + win_x / 8] & (1 << (win_x & 7))))
+ return false;
+ }
+
+ for (i = 0; i < dev->clipcount_cap; i++) {
+ /*
+ * Only if the framebuffer coordinate is not in any of the
+ * clip rectangles will be video pixel be shown.
+ */
+ struct v4l2_rect *r = &dev->clips_cap[i].c;
+
+ if (fb_y >= r->top && fb_y < r->top + r->height &&
+ fb_x >= r->left && fb_x < r->left + r->width)
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Draw the image into the overlay buffer.
+ * Note that the combination of overlay and multiplanar is not supported.
+ */
+static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ struct tpg_data *tpg = &dev->tpg;
+ unsigned pixsize = tpg_g_twopixelsize(tpg, 0) / 2;
+ void *vbase = dev->fb_vbase_cap;
+ void *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ unsigned img_width = dev->compose_cap.width;
+ unsigned img_height = dev->compose_cap.height;
+ unsigned stride = tpg->bytesperline[0];
+ /* if quick is true, then valid_pix() doesn't have to be called */
+ bool quick = dev->bitmap_cap == NULL && dev->clipcount_cap == 0;
+ int x, y, w, out_x = 0;
+
+ /*
+ * Overlay support is only supported for formats that have a twopixelsize
+ * that's >= 2. Warn and bail out if that's not the case.
+ */
+ if (WARN_ON(pixsize == 0))
+ return;
+ if ((dev->overlay_cap_field == V4L2_FIELD_TOP ||
+ dev->overlay_cap_field == V4L2_FIELD_BOTTOM) &&
+ dev->overlay_cap_field != buf->vb.field)
+ return;
+
+ vbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride;
+ x = dev->overlay_cap_left;
+ w = img_width;
+ if (x < 0) {
+ out_x = -x;
+ w = w - out_x;
+ x = 0;
+ } else {
+ w = dev->fb_cap.fmt.width - x;
+ if (w > img_width)
+ w = img_width;
+ }
+ if (w <= 0)
+ return;
+ if (dev->overlay_cap_top >= 0)
+ vbase += dev->overlay_cap_top * dev->fb_cap.fmt.bytesperline;
+ for (y = dev->overlay_cap_top;
+ y < dev->overlay_cap_top + (int)img_height;
+ y++, vbuf += stride) {
+ int px;
+
+ if (y < 0 || y > dev->fb_cap.fmt.height)
+ continue;
+ if (quick) {
+ memcpy(vbase + x * pixsize,
+ vbuf + out_x * pixsize, w * pixsize);
+ vbase += dev->fb_cap.fmt.bytesperline;
+ continue;
+ }
+ for (px = 0; px < w; px++) {
+ if (!valid_pix(dev, y - dev->overlay_cap_top,
+ px + out_x, y, px + x))
+ continue;
+ memcpy(vbase + (px + x) * pixsize,
+ vbuf + (px + out_x) * pixsize,
+ pixsize);
+ }
+ vbase += dev->fb_cap.fmt.bytesperline;
+ }
+}
+
+static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
+{
+ struct vivid_buffer *vid_cap_buf = NULL;
+ struct vivid_buffer *vbi_cap_buf = NULL;
+
+ dprintk(dev, 1, "Video Capture Thread Tick\n");
+
+ while (dropped_bufs-- > 1)
+ tpg_update_mv_count(&dev->tpg,
+ dev->field_cap == V4L2_FIELD_NONE ||
+ dev->field_cap == V4L2_FIELD_ALTERNATE);
+
+ /* Drop a certain percentage of buffers. */
+ if (dev->perc_dropped_buffers &&
+ prandom_u32_max(100) < dev->perc_dropped_buffers)
+ goto update_mv;
+
+ spin_lock(&dev->slock);
+ if (!list_empty(&dev->vid_cap_active)) {
+ vid_cap_buf = list_entry(dev->vid_cap_active.next, struct vivid_buffer, list);
+ list_del(&vid_cap_buf->list);
+ }
+ if (!list_empty(&dev->vbi_cap_active)) {
+ if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
+ (dev->vbi_cap_seq_count & 1)) {
+ vbi_cap_buf = list_entry(dev->vbi_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&vbi_cap_buf->list);
+ }
+ }
+ spin_unlock(&dev->slock);
+
+ if (!vid_cap_buf && !vbi_cap_buf)
+ goto update_mv;
+
+ if (vid_cap_buf) {
+ /* Fill buffer */
+ vivid_fillbuff(dev, vid_cap_buf);
+ dprintk(dev, 1, "filled buffer %d\n",
+ vid_cap_buf->vb.vb2_buf.index);
+
+ /* Handle overlay */
+ if (dev->overlay_cap_owner && dev->fb_cap.base &&
+ dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
+ vivid_overlay(dev, vid_cap_buf);
+
+ vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "vid_cap buffer %d done\n",
+ vid_cap_buf->vb.vb2_buf.index);
+ }
+
+ if (vbi_cap_buf) {
+ if (dev->stream_sliced_vbi_cap)
+ vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
+ else
+ vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
+ vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "vbi_cap %d done\n",
+ vbi_cap_buf->vb.vb2_buf.index);
+ }
+ dev->dqbuf_error = false;
+
+update_mv:
+ /* Update the test pattern movement counters */
+ tpg_update_mv_count(&dev->tpg, dev->field_cap == V4L2_FIELD_NONE ||
+ dev->field_cap == V4L2_FIELD_ALTERNATE);
+}
+
+static int vivid_thread_vid_cap(void *data)
+{
+ struct vivid_dev *dev = data;
+ u64 numerators_since_start;
+ u64 buffers_since_start;
+ u64 next_jiffies_since_start;
+ unsigned long jiffies_since_start;
+ unsigned long cur_jiffies;
+ unsigned wait_jiffies;
+ unsigned numerator;
+ unsigned denominator;
+ int dropped_bufs;
+
+ dprintk(dev, 1, "Video Capture Thread Start\n");
+
+ set_freezable();
+
+ /* Resets frame counters */
+ dev->cap_seq_offset = 0;
+ dev->cap_seq_count = 0;
+ dev->cap_seq_resync = false;
+ dev->jiffies_vid_cap = jiffies;
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
+ cur_jiffies = jiffies;
+ if (dev->cap_seq_resync) {
+ dev->jiffies_vid_cap = cur_jiffies;
+ dev->cap_seq_offset = dev->cap_seq_count + 1;
+ dev->cap_seq_count = 0;
+ dev->cap_seq_resync = false;
+ }
+ numerator = dev->timeperframe_vid_cap.numerator;
+ denominator = dev->timeperframe_vid_cap.denominator;
+
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ denominator *= 2;
+
+ /* Calculate the number of jiffies since we started streaming */
+ jiffies_since_start = cur_jiffies - dev->jiffies_vid_cap;
+ /* Get the number of buffers streamed since the start */
+ buffers_since_start = (u64)jiffies_since_start * denominator +
+ (HZ * numerator) / 2;
+ do_div(buffers_since_start, HZ * numerator);
+
+ /*
+ * After more than 0xf0000000 (rounded down to a multiple of
+ * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
+ * jiffies have passed since we started streaming reset the
+ * counters and keep track of the sequence offset.
+ */
+ if (jiffies_since_start > JIFFIES_RESYNC) {
+ dev->jiffies_vid_cap = cur_jiffies;
+ dev->cap_seq_offset = buffers_since_start;
+ buffers_since_start = 0;
+ }
+ dropped_bufs = buffers_since_start + dev->cap_seq_offset - dev->cap_seq_count;
+ dev->cap_seq_count = buffers_since_start + dev->cap_seq_offset;
+ dev->vid_cap_seq_count = dev->cap_seq_count - dev->vid_cap_seq_start;
+ dev->vbi_cap_seq_count = dev->cap_seq_count - dev->vbi_cap_seq_start;
+
+ vivid_thread_vid_cap_tick(dev, dropped_bufs);
+
+ /*
+ * Calculate the number of 'numerators' streamed since we started,
+ * including the current buffer.
+ */
+ numerators_since_start = ++buffers_since_start * numerator;
+
+ /* And the number of jiffies since we started */
+ jiffies_since_start = jiffies - dev->jiffies_vid_cap;
+
+ mutex_unlock(&dev->mutex);
+
+ /*
+ * Calculate when that next buffer is supposed to start
+ * in jiffies since we started streaming.
+ */
+ next_jiffies_since_start = numerators_since_start * HZ +
+ denominator / 2;
+ do_div(next_jiffies_since_start, denominator);
+ /* If it is in the past, then just schedule asap */
+ if (next_jiffies_since_start < jiffies_since_start)
+ next_jiffies_since_start = jiffies_since_start;
+
+ wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+ schedule_timeout_interruptible(wait_jiffies ? wait_jiffies : 1);
+ }
+ dprintk(dev, 1, "Video Capture Thread End\n");
+ return 0;
+}
+
+static void vivid_grab_controls(struct vivid_dev *dev, bool grab)
+{
+ v4l2_ctrl_grab(dev->ctrl_has_crop_cap, grab);
+ v4l2_ctrl_grab(dev->ctrl_has_compose_cap, grab);
+ v4l2_ctrl_grab(dev->ctrl_has_scaler_cap, grab);
+}
+
+int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
+{
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->kthread_vid_cap) {
+ u32 seq_count = dev->cap_seq_count + dev->seq_wrap * 128;
+
+ if (pstreaming == &dev->vid_cap_streaming)
+ dev->vid_cap_seq_start = seq_count;
+ else
+ dev->vbi_cap_seq_start = seq_count;
+ *pstreaming = true;
+ return 0;
+ }
+
+ /* Resets frame counters */
+ tpg_init_mv_count(&dev->tpg);
+
+ dev->vid_cap_seq_start = dev->seq_wrap * 128;
+ dev->vbi_cap_seq_start = dev->seq_wrap * 128;
+
+ dev->kthread_vid_cap = kthread_run(vivid_thread_vid_cap, dev,
+ "%s-vid-cap", dev->v4l2_dev.name);
+
+ if (IS_ERR(dev->kthread_vid_cap)) {
+ int err = PTR_ERR(dev->kthread_vid_cap);
+
+ dev->kthread_vid_cap = NULL;
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ return err;
+ }
+ *pstreaming = true;
+ vivid_grab_controls(dev, true);
+
+ dprintk(dev, 1, "returning from %s\n", __func__);
+ return 0;
+}
+
+void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
+{
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->kthread_vid_cap == NULL)
+ return;
+
+ *pstreaming = false;
+ if (pstreaming == &dev->vid_cap_streaming) {
+ /* Release all active buffers */
+ while (!list_empty(&dev->vid_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->vid_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "vid_cap buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (pstreaming == &dev->vbi_cap_streaming) {
+ while (!list_empty(&dev->vbi_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->vbi_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "vbi_cap buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_cap_streaming || dev->vbi_cap_streaming)
+ return;
+
+ /* shutdown control thread */
+ vivid_grab_controls(dev, false);
+ kthread_stop(dev->kthread_vid_cap);
+ dev->kthread_vid_cap = NULL;
+}
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.h b/drivers/media/platform/vivid/vivid-kthread-cap.h
new file mode 100644
index 000000000..0f4301530
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-kthread-cap.h - video/vbi capture thread support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_KTHREAD_CAP_H_
+#define _VIVID_KTHREAD_CAP_H_
+
+int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming);
+void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
new file mode 100644
index 000000000..c5f466a73
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-kthread-out.h - video/vbi output thread support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/font.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/random.h>
+#include <linux/v4l2-dv-timings.h>
+#include <asm/div64.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-vid-cap.h"
+#include "vivid-vid-out.h"
+#include "vivid-radio-common.h"
+#include "vivid-radio-rx.h"
+#include "vivid-radio-tx.h"
+#include "vivid-sdr-cap.h"
+#include "vivid-vbi-cap.h"
+#include "vivid-vbi-out.h"
+#include "vivid-osd.h"
+#include "vivid-ctrls.h"
+#include "vivid-kthread-out.h"
+
+static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
+{
+ struct vivid_buffer *vid_out_buf = NULL;
+ struct vivid_buffer *vbi_out_buf = NULL;
+
+ dprintk(dev, 1, "Video Output Thread Tick\n");
+
+ /* Drop a certain percentage of buffers. */
+ if (dev->perc_dropped_buffers &&
+ prandom_u32_max(100) < dev->perc_dropped_buffers)
+ return;
+
+ spin_lock(&dev->slock);
+ /*
+ * Only dequeue buffer if there is at least one more pending.
+ * This makes video loopback possible.
+ */
+ if (!list_empty(&dev->vid_out_active) &&
+ !list_is_singular(&dev->vid_out_active)) {
+ vid_out_buf = list_entry(dev->vid_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&vid_out_buf->list);
+ }
+ if (!list_empty(&dev->vbi_out_active) &&
+ (dev->field_out != V4L2_FIELD_ALTERNATE ||
+ (dev->vbi_out_seq_count & 1))) {
+ vbi_out_buf = list_entry(dev->vbi_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&vbi_out_buf->list);
+ }
+ spin_unlock(&dev->slock);
+
+ if (!vid_out_buf && !vbi_out_buf)
+ return;
+
+ if (vid_out_buf) {
+ vid_out_buf->vb.sequence = dev->vid_out_seq_count;
+ if (dev->field_out == V4L2_FIELD_ALTERNATE) {
+ /*
+ * The sequence counter counts frames, not fields.
+ * So divide by two.
+ */
+ vid_out_buf->vb.sequence /= 2;
+ }
+ vid_out_buf->vb.vb2_buf.timestamp =
+ ktime_get_ns() + dev->time_wrap_offset;
+ vb2_buffer_done(&vid_out_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "vid_out buffer %d done\n",
+ vid_out_buf->vb.vb2_buf.index);
+ }
+
+ if (vbi_out_buf) {
+ if (dev->stream_sliced_vbi_out)
+ vivid_sliced_vbi_out_process(dev, vbi_out_buf);
+
+ vbi_out_buf->vb.sequence = dev->vbi_out_seq_count;
+ vbi_out_buf->vb.vb2_buf.timestamp =
+ ktime_get_ns() + dev->time_wrap_offset;
+ vb2_buffer_done(&vbi_out_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "vbi_out buffer %d done\n",
+ vbi_out_buf->vb.vb2_buf.index);
+ }
+ dev->dqbuf_error = false;
+}
+
+static int vivid_thread_vid_out(void *data)
+{
+ struct vivid_dev *dev = data;
+ u64 numerators_since_start;
+ u64 buffers_since_start;
+ u64 next_jiffies_since_start;
+ unsigned long jiffies_since_start;
+ unsigned long cur_jiffies;
+ unsigned wait_jiffies;
+ unsigned numerator;
+ unsigned denominator;
+
+ dprintk(dev, 1, "Video Output Thread Start\n");
+
+ set_freezable();
+
+ /* Resets frame counters */
+ dev->out_seq_offset = 0;
+ if (dev->seq_wrap)
+ dev->out_seq_count = 0xffffff80U;
+ dev->jiffies_vid_out = jiffies;
+ dev->vid_out_seq_start = dev->vbi_out_seq_start = 0;
+ dev->out_seq_resync = false;
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
+ cur_jiffies = jiffies;
+ if (dev->out_seq_resync) {
+ dev->jiffies_vid_out = cur_jiffies;
+ dev->out_seq_offset = dev->out_seq_count + 1;
+ dev->out_seq_count = 0;
+ dev->out_seq_resync = false;
+ }
+ numerator = dev->timeperframe_vid_out.numerator;
+ denominator = dev->timeperframe_vid_out.denominator;
+
+ if (dev->field_out == V4L2_FIELD_ALTERNATE)
+ denominator *= 2;
+
+ /* Calculate the number of jiffies since we started streaming */
+ jiffies_since_start = cur_jiffies - dev->jiffies_vid_out;
+ /* Get the number of buffers streamed since the start */
+ buffers_since_start = (u64)jiffies_since_start * denominator +
+ (HZ * numerator) / 2;
+ do_div(buffers_since_start, HZ * numerator);
+
+ /*
+ * After more than 0xf0000000 (rounded down to a multiple of
+ * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
+ * jiffies have passed since we started streaming reset the
+ * counters and keep track of the sequence offset.
+ */
+ if (jiffies_since_start > JIFFIES_RESYNC) {
+ dev->jiffies_vid_out = cur_jiffies;
+ dev->out_seq_offset = buffers_since_start;
+ buffers_since_start = 0;
+ }
+ dev->out_seq_count = buffers_since_start + dev->out_seq_offset;
+ dev->vid_out_seq_count = dev->out_seq_count - dev->vid_out_seq_start;
+ dev->vbi_out_seq_count = dev->out_seq_count - dev->vbi_out_seq_start;
+
+ vivid_thread_vid_out_tick(dev);
+ mutex_unlock(&dev->mutex);
+
+ /*
+ * Calculate the number of 'numerators' streamed since we started,
+ * not including the current buffer.
+ */
+ numerators_since_start = buffers_since_start * numerator;
+
+ /* And the number of jiffies since we started */
+ jiffies_since_start = jiffies - dev->jiffies_vid_out;
+
+ /* Increase by the 'numerator' of one buffer */
+ numerators_since_start += numerator;
+ /*
+ * Calculate when that next buffer is supposed to start
+ * in jiffies since we started streaming.
+ */
+ next_jiffies_since_start = numerators_since_start * HZ +
+ denominator / 2;
+ do_div(next_jiffies_since_start, denominator);
+ /* If it is in the past, then just schedule asap */
+ if (next_jiffies_since_start < jiffies_since_start)
+ next_jiffies_since_start = jiffies_since_start;
+
+ wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+ schedule_timeout_interruptible(wait_jiffies ? wait_jiffies : 1);
+ }
+ dprintk(dev, 1, "Video Output Thread End\n");
+ return 0;
+}
+
+static void vivid_grab_controls(struct vivid_dev *dev, bool grab)
+{
+ v4l2_ctrl_grab(dev->ctrl_has_crop_out, grab);
+ v4l2_ctrl_grab(dev->ctrl_has_compose_out, grab);
+ v4l2_ctrl_grab(dev->ctrl_has_scaler_out, grab);
+ v4l2_ctrl_grab(dev->ctrl_tx_mode, grab);
+ v4l2_ctrl_grab(dev->ctrl_tx_rgb_range, grab);
+}
+
+int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
+{
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->kthread_vid_out) {
+ u32 seq_count = dev->out_seq_count + dev->seq_wrap * 128;
+
+ if (pstreaming == &dev->vid_out_streaming)
+ dev->vid_out_seq_start = seq_count;
+ else
+ dev->vbi_out_seq_start = seq_count;
+ *pstreaming = true;
+ return 0;
+ }
+
+ /* Resets frame counters */
+ dev->jiffies_vid_out = jiffies;
+ dev->vid_out_seq_start = dev->seq_wrap * 128;
+ dev->vbi_out_seq_start = dev->seq_wrap * 128;
+
+ dev->kthread_vid_out = kthread_run(vivid_thread_vid_out, dev,
+ "%s-vid-out", dev->v4l2_dev.name);
+
+ if (IS_ERR(dev->kthread_vid_out)) {
+ int err = PTR_ERR(dev->kthread_vid_out);
+
+ dev->kthread_vid_out = NULL;
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ return err;
+ }
+ *pstreaming = true;
+ vivid_grab_controls(dev, true);
+
+ dprintk(dev, 1, "returning from %s\n", __func__);
+ return 0;
+}
+
+void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
+{
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->kthread_vid_out == NULL)
+ return;
+
+ *pstreaming = false;
+ if (pstreaming == &dev->vid_out_streaming) {
+ /* Release all active buffers */
+ while (!list_empty(&dev->vid_out_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->vid_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "vid_out buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (pstreaming == &dev->vbi_out_streaming) {
+ while (!list_empty(&dev->vbi_out_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->vbi_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "vbi_out buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_out_streaming || dev->vbi_out_streaming)
+ return;
+
+ /* shutdown control thread */
+ vivid_grab_controls(dev, false);
+ kthread_stop(dev->kthread_vid_out);
+ dev->kthread_vid_out = NULL;
+}
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.h b/drivers/media/platform/vivid/vivid-kthread-out.h
new file mode 100644
index 000000000..d5bcf44bb
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-out.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-kthread-out.h - video/vbi output thread support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_KTHREAD_OUT_H_
+#define _VIVID_KTHREAD_OUT_H_
+
+int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming);
+void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
new file mode 100644
index 000000000..b24596697
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-osd.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-osd.c - osd support for testing overlays.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/font.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/fb.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+
+#include "vivid-core.h"
+#include "vivid-osd.h"
+
+#define MAX_OSD_WIDTH 720
+#define MAX_OSD_HEIGHT 576
+
+/*
+ * Order: white, yellow, cyan, green, magenta, red, blue, black,
+ * and same again with the alpha bit set (if any)
+ */
+static const u16 rgb555[16] = {
+ 0x7fff, 0x7fe0, 0x03ff, 0x03e0, 0x7c1f, 0x7c00, 0x001f, 0x0000,
+ 0xffff, 0xffe0, 0x83ff, 0x83e0, 0xfc1f, 0xfc00, 0x801f, 0x8000
+};
+
+static const u16 rgb565[16] = {
+ 0xffff, 0xffe0, 0x07ff, 0x07e0, 0xf81f, 0xf800, 0x001f, 0x0000,
+ 0xffff, 0xffe0, 0x07ff, 0x07e0, 0xf81f, 0xf800, 0x001f, 0x0000
+};
+
+void vivid_clear_fb(struct vivid_dev *dev)
+{
+ void *p = dev->video_vbase;
+ const u16 *rgb = rgb555;
+ unsigned x, y;
+
+ if (dev->fb_defined.green.length == 6)
+ rgb = rgb565;
+
+ for (y = 0; y < dev->display_height; y++) {
+ u16 *d = p;
+
+ for (x = 0; x < dev->display_width; x++)
+ d[x] = rgb[(y / 16 + x / 16) % 16];
+ p += dev->display_byte_stride;
+ }
+}
+
+/* --------------------------------------------------------------------- */
+
+static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg)
+{
+ struct vivid_dev *dev = (struct vivid_dev *)info->par;
+
+ switch (cmd) {
+ case FBIOGET_VBLANK: {
+ struct fb_vblank vblank;
+
+ memset(&vblank, 0, sizeof(vblank));
+ vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT |
+ FB_VBLANK_HAVE_VSYNC;
+ vblank.count = 0;
+ vblank.vcount = 0;
+ vblank.hcount = 0;
+ if (copy_to_user((void __user *)arg, &vblank, sizeof(vblank)))
+ return -EFAULT;
+ return 0;
+ }
+
+ default:
+ dprintk(dev, 1, "Unknown ioctl %08x\n", cmd);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Framebuffer device handling */
+
+static int vivid_fb_set_var(struct vivid_dev *dev, struct fb_var_screeninfo *var)
+{
+ dprintk(dev, 1, "vivid_fb_set_var\n");
+
+ if (var->bits_per_pixel != 16) {
+ dprintk(dev, 1, "vivid_fb_set_var - Invalid bpp\n");
+ return -EINVAL;
+ }
+ dev->display_byte_stride = var->xres * dev->bytes_per_pixel;
+
+ return 0;
+}
+
+static int vivid_fb_get_fix(struct vivid_dev *dev, struct fb_fix_screeninfo *fix)
+{
+ dprintk(dev, 1, "vivid_fb_get_fix\n");
+ memset(fix, 0, sizeof(struct fb_fix_screeninfo));
+ strlcpy(fix->id, "vioverlay fb", sizeof(fix->id));
+ fix->smem_start = dev->video_pbase;
+ fix->smem_len = dev->video_buffer_size;
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->visual = FB_VISUAL_TRUECOLOR;
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+ fix->ywrapstep = 0;
+ fix->line_length = dev->display_byte_stride;
+ fix->accel = FB_ACCEL_NONE;
+ return 0;
+}
+
+/* Check the requested display mode, returning -EINVAL if we can't
+ handle it. */
+
+static int _vivid_fb_check_var(struct fb_var_screeninfo *var, struct vivid_dev *dev)
+{
+ dprintk(dev, 1, "vivid_fb_check_var\n");
+
+ var->bits_per_pixel = 16;
+ if (var->green.length == 5) {
+ var->red.offset = 10;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 5;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 15;
+ var->transp.length = 1;
+ } else {
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ }
+ var->xoffset = var->yoffset = 0;
+ var->left_margin = var->upper_margin = 0;
+ var->nonstd = 0;
+
+ var->vmode &= ~FB_VMODE_MASK;
+ var->vmode |= FB_VMODE_NONINTERLACED;
+
+ /* Dummy values */
+ var->hsync_len = 24;
+ var->vsync_len = 2;
+ var->pixclock = 84316;
+ var->right_margin = 776;
+ var->lower_margin = 591;
+ return 0;
+}
+
+static int vivid_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct vivid_dev *dev = (struct vivid_dev *) info->par;
+
+ dprintk(dev, 1, "vivid_fb_check_var\n");
+ return _vivid_fb_check_var(var, dev);
+}
+
+static int vivid_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ return 0;
+}
+
+static int vivid_fb_set_par(struct fb_info *info)
+{
+ int rc = 0;
+ struct vivid_dev *dev = (struct vivid_dev *) info->par;
+
+ dprintk(dev, 1, "vivid_fb_set_par\n");
+
+ rc = vivid_fb_set_var(dev, &info->var);
+ vivid_fb_get_fix(dev, &info->fix);
+ return rc;
+}
+
+static int vivid_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ u32 color, *palette;
+
+ if (regno >= info->cmap.len)
+ return -EINVAL;
+
+ color = ((transp & 0xFF00) << 16) | ((red & 0xFF00) << 8) |
+ (green & 0xFF00) | ((blue & 0xFF00) >> 8);
+ if (regno >= 16)
+ return -EINVAL;
+
+ palette = info->pseudo_palette;
+ if (info->var.bits_per_pixel == 16) {
+ switch (info->var.green.length) {
+ case 6:
+ color = (red & 0xf800) |
+ ((green & 0xfc00) >> 5) |
+ ((blue & 0xf800) >> 11);
+ break;
+ case 5:
+ color = ((red & 0xf800) >> 1) |
+ ((green & 0xf800) >> 6) |
+ ((blue & 0xf800) >> 11) |
+ (transp ? 0x8000 : 0);
+ break;
+ }
+ }
+ palette[regno] = color;
+ return 0;
+}
+
+/* We don't really support blanking. All this does is enable or
+ disable the OSD. */
+static int vivid_fb_blank(int blank_mode, struct fb_info *info)
+{
+ struct vivid_dev *dev = (struct vivid_dev *)info->par;
+
+ dprintk(dev, 1, "Set blanking mode : %d\n", blank_mode);
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ break;
+ case FB_BLANK_NORMAL:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ break;
+ }
+ return 0;
+}
+
+static struct fb_ops vivid_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = vivid_fb_check_var,
+ .fb_set_par = vivid_fb_set_par,
+ .fb_setcolreg = vivid_fb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_cursor = NULL,
+ .fb_ioctl = vivid_fb_ioctl,
+ .fb_pan_display = vivid_fb_pan_display,
+ .fb_blank = vivid_fb_blank,
+};
+
+/* Initialization */
+
+
+/* Setup our initial video mode */
+static int vivid_fb_init_vidmode(struct vivid_dev *dev)
+{
+ struct v4l2_rect start_window;
+
+ /* Color mode */
+
+ dev->bits_per_pixel = 16;
+ dev->bytes_per_pixel = dev->bits_per_pixel / 8;
+
+ start_window.width = MAX_OSD_WIDTH;
+ start_window.left = 0;
+
+ dev->display_byte_stride = start_window.width * dev->bytes_per_pixel;
+
+ /* Vertical size & position */
+
+ start_window.height = MAX_OSD_HEIGHT;
+ start_window.top = 0;
+
+ dev->display_width = start_window.width;
+ dev->display_height = start_window.height;
+
+ /* Generate a valid fb_var_screeninfo */
+
+ dev->fb_defined.xres = dev->display_width;
+ dev->fb_defined.yres = dev->display_height;
+ dev->fb_defined.xres_virtual = dev->display_width;
+ dev->fb_defined.yres_virtual = dev->display_height;
+ dev->fb_defined.bits_per_pixel = dev->bits_per_pixel;
+ dev->fb_defined.vmode = FB_VMODE_NONINTERLACED;
+ dev->fb_defined.left_margin = start_window.left + 1;
+ dev->fb_defined.upper_margin = start_window.top + 1;
+ dev->fb_defined.accel_flags = FB_ACCEL_NONE;
+ dev->fb_defined.nonstd = 0;
+ /* set default to 1:5:5:5 */
+ dev->fb_defined.green.length = 5;
+
+ /* We've filled in the most data, let the usual mode check
+ routine fill in the rest. */
+ _vivid_fb_check_var(&dev->fb_defined, dev);
+
+ /* Generate valid fb_fix_screeninfo */
+
+ vivid_fb_get_fix(dev, &dev->fb_fix);
+
+ /* Generate valid fb_info */
+
+ dev->fb_info.node = -1;
+ dev->fb_info.flags = FBINFO_FLAG_DEFAULT;
+ dev->fb_info.fbops = &vivid_fb_ops;
+ dev->fb_info.par = dev;
+ dev->fb_info.var = dev->fb_defined;
+ dev->fb_info.fix = dev->fb_fix;
+ dev->fb_info.screen_base = (u8 __iomem *)dev->video_vbase;
+ dev->fb_info.fbops = &vivid_fb_ops;
+
+ /* Supply some monitor specs. Bogus values will do for now */
+ dev->fb_info.monspecs.hfmin = 8000;
+ dev->fb_info.monspecs.hfmax = 70000;
+ dev->fb_info.monspecs.vfmin = 10;
+ dev->fb_info.monspecs.vfmax = 100;
+
+ /* Allocate color map */
+ if (fb_alloc_cmap(&dev->fb_info.cmap, 256, 1)) {
+ pr_err("abort, unable to alloc cmap\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate the pseudo palette */
+ dev->fb_info.pseudo_palette = kmalloc_array(16, sizeof(u32), GFP_KERNEL);
+
+ return dev->fb_info.pseudo_palette ? 0 : -ENOMEM;
+}
+
+/* Release any memory we've grabbed */
+void vivid_fb_release_buffers(struct vivid_dev *dev)
+{
+ if (dev->video_vbase == NULL)
+ return;
+
+ /* Release cmap */
+ if (dev->fb_info.cmap.len)
+ fb_dealloc_cmap(&dev->fb_info.cmap);
+
+ /* Release pseudo palette */
+ kfree(dev->fb_info.pseudo_palette);
+ kfree(dev->video_vbase);
+}
+
+/* Initialize the specified card */
+
+int vivid_fb_init(struct vivid_dev *dev)
+{
+ int ret;
+
+ dev->video_buffer_size = MAX_OSD_HEIGHT * MAX_OSD_WIDTH * 2;
+ dev->video_vbase = kzalloc(dev->video_buffer_size, GFP_KERNEL | GFP_DMA32);
+ if (dev->video_vbase == NULL)
+ return -ENOMEM;
+ dev->video_pbase = virt_to_phys(dev->video_vbase);
+
+ pr_info("Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
+ dev->video_pbase, dev->video_vbase,
+ dev->video_buffer_size / 1024);
+
+ /* Set the startup video mode information */
+ ret = vivid_fb_init_vidmode(dev);
+ if (ret) {
+ vivid_fb_release_buffers(dev);
+ return ret;
+ }
+
+ vivid_clear_fb(dev);
+
+ /* Register the framebuffer */
+ if (register_framebuffer(&dev->fb_info) < 0) {
+ vivid_fb_release_buffers(dev);
+ return -EINVAL;
+ }
+
+ /* Set the card to the requested mode */
+ vivid_fb_set_par(&dev->fb_info);
+ return 0;
+
+}
diff --git a/drivers/media/platform/vivid/vivid-osd.h b/drivers/media/platform/vivid/vivid-osd.h
new file mode 100644
index 000000000..f9ac1af25
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-osd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-osd.h - output overlay support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_OSD_H_
+#define _VIVID_OSD_H_
+
+int vivid_fb_init(struct vivid_dev *dev);
+void vivid_fb_release_buffers(struct vivid_dev *dev);
+void vivid_clear_fb(struct vivid_dev *dev);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-radio-common.c b/drivers/media/platform/vivid/vivid-radio-common.c
new file mode 100644
index 000000000..7c8efe38f
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-common.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-radio-common.c - common radio rx/tx support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+
+#include "vivid-core.h"
+#include "vivid-ctrls.h"
+#include "vivid-radio-common.h"
+#include "vivid-rds-gen.h"
+
+/*
+ * These functions are shared between the vivid receiver and transmitter
+ * since both use the same frequency bands.
+ */
+
+const struct v4l2_frequency_band vivid_radio_bands[TOT_BANDS] = {
+ /* Band FM */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = FM_FREQ_RANGE_LOW,
+ .rangehigh = FM_FREQ_RANGE_HIGH,
+ .modulation = V4L2_BAND_MODULATION_FM,
+ },
+ /* Band AM */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 1,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = AM_FREQ_RANGE_LOW,
+ .rangehigh = AM_FREQ_RANGE_HIGH,
+ .modulation = V4L2_BAND_MODULATION_AM,
+ },
+ /* Band SW */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 2,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = SW_FREQ_RANGE_LOW,
+ .rangehigh = SW_FREQ_RANGE_HIGH,
+ .modulation = V4L2_BAND_MODULATION_AM,
+ },
+};
+
+/*
+ * Initialize the RDS generator. If we can loop, then the RDS generator
+ * is set up with the values from the RDS TX controls, otherwise it
+ * will fill in standard values using one of two alternates.
+ */
+void vivid_radio_rds_init(struct vivid_dev *dev)
+{
+ struct vivid_rds_gen *rds = &dev->rds_gen;
+ bool alt = dev->radio_rx_rds_use_alternates;
+
+ /* Do nothing, blocks will be filled by the transmitter */
+ if (dev->radio_rds_loop && !dev->radio_tx_rds_controls)
+ return;
+
+ if (dev->radio_rds_loop) {
+ v4l2_ctrl_lock(dev->radio_tx_rds_pi);
+ rds->picode = dev->radio_tx_rds_pi->cur.val;
+ rds->pty = dev->radio_tx_rds_pty->cur.val;
+ rds->mono_stereo = dev->radio_tx_rds_mono_stereo->cur.val;
+ rds->art_head = dev->radio_tx_rds_art_head->cur.val;
+ rds->compressed = dev->radio_tx_rds_compressed->cur.val;
+ rds->dyn_pty = dev->radio_tx_rds_dyn_pty->cur.val;
+ rds->ta = dev->radio_tx_rds_ta->cur.val;
+ rds->tp = dev->radio_tx_rds_tp->cur.val;
+ rds->ms = dev->radio_tx_rds_ms->cur.val;
+ strlcpy(rds->psname,
+ dev->radio_tx_rds_psname->p_cur.p_char,
+ sizeof(rds->psname));
+ strlcpy(rds->radiotext,
+ dev->radio_tx_rds_radiotext->p_cur.p_char + alt * 64,
+ sizeof(rds->radiotext));
+ v4l2_ctrl_unlock(dev->radio_tx_rds_pi);
+ } else {
+ vivid_rds_gen_fill(rds, dev->radio_rx_freq, alt);
+ }
+ if (dev->radio_rx_rds_controls) {
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_pty, rds->pty);
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ta, rds->ta);
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_tp, rds->tp);
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ms, rds->ms);
+ v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_psname, rds->psname);
+ v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_radiotext, rds->radiotext);
+ if (!dev->radio_rds_loop)
+ dev->radio_rx_rds_use_alternates = !dev->radio_rx_rds_use_alternates;
+ }
+ vivid_rds_generate(rds);
+}
+
+/*
+ * Calculate the emulated signal quality taking into account the frequency
+ * the transmitter is using.
+ */
+static void vivid_radio_calc_sig_qual(struct vivid_dev *dev)
+{
+ int mod = 16000;
+ int delta = 800;
+ int sig_qual, sig_qual_tx = mod;
+
+ /*
+ * For SW and FM there is a channel every 1000 kHz, for AM there is one
+ * every 100 kHz.
+ */
+ if (dev->radio_rx_freq <= AM_FREQ_RANGE_HIGH) {
+ mod /= 10;
+ delta /= 10;
+ }
+ sig_qual = (dev->radio_rx_freq + delta) % mod - delta;
+ if (dev->has_radio_tx)
+ sig_qual_tx = dev->radio_rx_freq - dev->radio_tx_freq;
+ if (abs(sig_qual_tx) <= abs(sig_qual)) {
+ sig_qual = sig_qual_tx;
+ /*
+ * Zero the internal rds buffer if we are going to loop
+ * rds blocks.
+ */
+ if (!dev->radio_rds_loop && !dev->radio_tx_rds_controls)
+ memset(dev->rds_gen.data, 0,
+ sizeof(dev->rds_gen.data));
+ dev->radio_rds_loop = dev->radio_rx_freq >= FM_FREQ_RANGE_LOW;
+ } else {
+ dev->radio_rds_loop = false;
+ }
+ if (dev->radio_rx_freq <= AM_FREQ_RANGE_HIGH)
+ sig_qual *= 10;
+ dev->radio_rx_sig_qual = sig_qual;
+}
+
+int vivid_radio_g_frequency(struct file *file, const unsigned *pfreq, struct v4l2_frequency *vf)
+{
+ if (vf->tuner != 0)
+ return -EINVAL;
+ vf->frequency = *pfreq;
+ return 0;
+}
+
+int vivid_radio_s_frequency(struct file *file, unsigned *pfreq, const struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ unsigned freq;
+ unsigned band;
+
+ if (vf->tuner != 0)
+ return -EINVAL;
+
+ if (vf->frequency >= (FM_FREQ_RANGE_LOW + SW_FREQ_RANGE_HIGH) / 2)
+ band = BAND_FM;
+ else if (vf->frequency <= (AM_FREQ_RANGE_HIGH + SW_FREQ_RANGE_LOW) / 2)
+ band = BAND_AM;
+ else
+ band = BAND_SW;
+
+ freq = clamp_t(u32, vf->frequency, vivid_radio_bands[band].rangelow,
+ vivid_radio_bands[band].rangehigh);
+ *pfreq = freq;
+
+ /*
+ * For both receiver and transmitter recalculate the signal quality
+ * (since that depends on both frequencies) and re-init the rds
+ * generator.
+ */
+ vivid_radio_calc_sig_qual(dev);
+ vivid_radio_rds_init(dev);
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-radio-common.h b/drivers/media/platform/vivid/vivid-radio-common.h
new file mode 100644
index 000000000..30a9900e5
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-common.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-radio-common.h - common radio rx/tx support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_RADIO_COMMON_H_
+#define _VIVID_RADIO_COMMON_H_
+
+/* The supported radio frequency ranges in kHz */
+#define FM_FREQ_RANGE_LOW (64000U * 16U)
+#define FM_FREQ_RANGE_HIGH (108000U * 16U)
+#define AM_FREQ_RANGE_LOW (520U * 16U)
+#define AM_FREQ_RANGE_HIGH (1710U * 16U)
+#define SW_FREQ_RANGE_LOW (2300U * 16U)
+#define SW_FREQ_RANGE_HIGH (26100U * 16U)
+
+enum { BAND_FM, BAND_AM, BAND_SW, TOT_BANDS };
+
+extern const struct v4l2_frequency_band vivid_radio_bands[TOT_BANDS];
+
+int vivid_radio_g_frequency(struct file *file, const unsigned *freq, struct v4l2_frequency *vf);
+int vivid_radio_s_frequency(struct file *file, unsigned *freq, const struct v4l2_frequency *vf);
+
+void vivid_radio_rds_init(struct vivid_dev *dev);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c
new file mode 100644
index 000000000..1f86d7d4f
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-rx.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-radio-rx.c - radio receiver support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <linux/sched/signal.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+
+#include "vivid-core.h"
+#include "vivid-ctrls.h"
+#include "vivid-radio-common.h"
+#include "vivid-rds-gen.h"
+#include "vivid-radio-rx.h"
+
+ssize_t vivid_radio_rx_read(struct file *file, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rds_data *data = dev->rds_gen.data;
+ bool use_alternates;
+ ktime_t timestamp;
+ unsigned blk;
+ int perc;
+ int i;
+
+ if (dev->radio_rx_rds_controls)
+ return -EINVAL;
+ if (size < sizeof(*data))
+ return 0;
+ size = sizeof(*data) * (size / sizeof(*data));
+
+ if (mutex_lock_interruptible(&dev->mutex))
+ return -ERESTARTSYS;
+ if (dev->radio_rx_rds_owner &&
+ file->private_data != dev->radio_rx_rds_owner) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+ if (dev->radio_rx_rds_owner == NULL) {
+ vivid_radio_rds_init(dev);
+ dev->radio_rx_rds_owner = file->private_data;
+ }
+
+retry:
+ timestamp = ktime_sub(ktime_get(), dev->radio_rds_init_time);
+ blk = ktime_divns(timestamp, VIVID_RDS_NSEC_PER_BLK);
+ use_alternates = (blk % VIVID_RDS_GEN_BLOCKS) & 1;
+
+ if (dev->radio_rx_rds_last_block == 0 ||
+ dev->radio_rx_rds_use_alternates != use_alternates) {
+ dev->radio_rx_rds_use_alternates = use_alternates;
+ /* Re-init the RDS generator */
+ vivid_radio_rds_init(dev);
+ }
+ if (blk >= dev->radio_rx_rds_last_block + VIVID_RDS_GEN_BLOCKS)
+ dev->radio_rx_rds_last_block = blk - VIVID_RDS_GEN_BLOCKS + 1;
+
+ /*
+ * No data is available if there hasn't been time to get new data,
+ * or if the RDS receiver has been disabled, or if we use the data
+ * from the RDS transmitter and that RDS transmitter has been disabled,
+ * or if the signal quality is too weak.
+ */
+ if (blk == dev->radio_rx_rds_last_block || !dev->radio_rx_rds_enabled ||
+ (dev->radio_rds_loop && !(dev->radio_tx_subchans & V4L2_TUNER_SUB_RDS)) ||
+ abs(dev->radio_rx_sig_qual) > 200) {
+ mutex_unlock(&dev->mutex);
+ if (file->f_flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+ if (msleep_interruptible(20) && signal_pending(current))
+ return -EINTR;
+ if (mutex_lock_interruptible(&dev->mutex))
+ return -ERESTARTSYS;
+ goto retry;
+ }
+
+ /* abs(dev->radio_rx_sig_qual) <= 200, map that to a 0-50% range */
+ perc = abs(dev->radio_rx_sig_qual) / 4;
+
+ for (i = 0; i < size && blk > dev->radio_rx_rds_last_block;
+ dev->radio_rx_rds_last_block++) {
+ unsigned data_blk = dev->radio_rx_rds_last_block % VIVID_RDS_GEN_BLOCKS;
+ struct v4l2_rds_data rds = data[data_blk];
+
+ if (data_blk == 0 && dev->radio_rds_loop)
+ vivid_radio_rds_init(dev);
+ if (perc && prandom_u32_max(100) < perc) {
+ switch (prandom_u32_max(4)) {
+ case 0:
+ rds.block |= V4L2_RDS_BLOCK_CORRECTED;
+ break;
+ case 1:
+ rds.block |= V4L2_RDS_BLOCK_INVALID;
+ break;
+ case 2:
+ rds.block |= V4L2_RDS_BLOCK_ERROR;
+ rds.lsb = prandom_u32_max(256);
+ rds.msb = prandom_u32_max(256);
+ break;
+ case 3: /* Skip block altogether */
+ if (i)
+ continue;
+ /*
+ * Must make sure at least one block is
+ * returned, otherwise the application
+ * might think that end-of-file occurred.
+ */
+ break;
+ }
+ }
+ if (copy_to_user(buf + i, &rds, sizeof(rds))) {
+ i = -EFAULT;
+ break;
+ }
+ i += sizeof(rds);
+ }
+ mutex_unlock(&dev->mutex);
+ return i;
+}
+
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
+{
+ return EPOLLIN | EPOLLRDNORM | v4l2_ctrl_poll(file, wait);
+}
+
+int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+{
+ if (band->tuner != 0)
+ return -EINVAL;
+
+ if (band->index >= TOT_BANDS)
+ return -EINVAL;
+
+ *band = vivid_radio_bands[band->index];
+ return 0;
+}
+
+int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ unsigned low, high;
+ unsigned freq;
+ unsigned spacing;
+ unsigned band;
+
+ if (a->tuner)
+ return -EINVAL;
+ if (a->wrap_around && dev->radio_rx_hw_seek_mode == VIVID_HW_SEEK_BOUNDED)
+ return -EINVAL;
+
+ if (!a->wrap_around && dev->radio_rx_hw_seek_mode == VIVID_HW_SEEK_WRAP)
+ return -EINVAL;
+ if (!a->rangelow ^ !a->rangehigh)
+ return -EINVAL;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+
+ if (a->rangelow) {
+ for (band = 0; band < TOT_BANDS; band++)
+ if (a->rangelow >= vivid_radio_bands[band].rangelow &&
+ a->rangehigh <= vivid_radio_bands[band].rangehigh)
+ break;
+ if (band == TOT_BANDS)
+ return -EINVAL;
+ if (!dev->radio_rx_hw_seek_prog_lim &&
+ (a->rangelow != vivid_radio_bands[band].rangelow ||
+ a->rangehigh != vivid_radio_bands[band].rangehigh))
+ return -EINVAL;
+ low = a->rangelow;
+ high = a->rangehigh;
+ } else {
+ for (band = 0; band < TOT_BANDS; band++)
+ if (dev->radio_rx_freq >= vivid_radio_bands[band].rangelow &&
+ dev->radio_rx_freq <= vivid_radio_bands[band].rangehigh)
+ break;
+ if (band == TOT_BANDS)
+ return -EINVAL;
+ low = vivid_radio_bands[band].rangelow;
+ high = vivid_radio_bands[band].rangehigh;
+ }
+ spacing = band == BAND_AM ? 1600 : 16000;
+ freq = clamp(dev->radio_rx_freq, low, high);
+
+ if (a->seek_upward) {
+ freq = spacing * (freq / spacing) + spacing;
+ if (freq > high) {
+ if (!a->wrap_around)
+ return -ENODATA;
+ freq = spacing * (low / spacing) + spacing;
+ if (freq >= dev->radio_rx_freq)
+ return -ENODATA;
+ }
+ } else {
+ freq = spacing * ((freq + spacing - 1) / spacing) - spacing;
+ if (freq < low) {
+ if (!a->wrap_around)
+ return -ENODATA;
+ freq = spacing * ((high + spacing - 1) / spacing) - spacing;
+ if (freq <= dev->radio_rx_freq)
+ return -ENODATA;
+ }
+ }
+ return 0;
+}
+
+int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ int delta = 800;
+ int sig_qual;
+
+ if (vt->index > 0)
+ return -EINVAL;
+
+ strlcpy(vt->name, "AM/FM/SW Receiver", sizeof(vt->name));
+ vt->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_RDS |
+ (dev->radio_rx_rds_controls ?
+ V4L2_TUNER_CAP_RDS_CONTROLS :
+ V4L2_TUNER_CAP_RDS_BLOCK_IO) |
+ (dev->radio_rx_hw_seek_prog_lim ?
+ V4L2_TUNER_CAP_HWSEEK_PROG_LIM : 0);
+ switch (dev->radio_rx_hw_seek_mode) {
+ case VIVID_HW_SEEK_BOUNDED:
+ vt->capability |= V4L2_TUNER_CAP_HWSEEK_BOUNDED;
+ break;
+ case VIVID_HW_SEEK_WRAP:
+ vt->capability |= V4L2_TUNER_CAP_HWSEEK_WRAP;
+ break;
+ case VIVID_HW_SEEK_BOTH:
+ vt->capability |= V4L2_TUNER_CAP_HWSEEK_WRAP |
+ V4L2_TUNER_CAP_HWSEEK_BOUNDED;
+ break;
+ }
+ vt->rangelow = AM_FREQ_RANGE_LOW;
+ vt->rangehigh = FM_FREQ_RANGE_HIGH;
+ sig_qual = dev->radio_rx_sig_qual;
+ vt->signal = abs(sig_qual) > delta ? 0 :
+ 0xffff - ((unsigned)abs(sig_qual) * 0xffff) / delta;
+ vt->afc = sig_qual > delta ? 0 : sig_qual;
+ if (abs(sig_qual) > delta)
+ vt->rxsubchans = 0;
+ else if (dev->radio_rx_freq < FM_FREQ_RANGE_LOW || vt->signal < 0x8000)
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ else if (dev->radio_rds_loop && !(dev->radio_tx_subchans & V4L2_TUNER_SUB_STEREO))
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ else
+ vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ if (dev->radio_rx_rds_enabled &&
+ (!dev->radio_rds_loop || (dev->radio_tx_subchans & V4L2_TUNER_SUB_RDS)) &&
+ dev->radio_rx_freq >= FM_FREQ_RANGE_LOW && vt->signal >= 0xc000)
+ vt->rxsubchans |= V4L2_TUNER_SUB_RDS;
+ if (dev->radio_rx_rds_controls)
+ vivid_radio_rds_init(dev);
+ vt->audmode = dev->radio_rx_audmode;
+ return 0;
+}
+
+int vivid_radio_rx_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (vt->index)
+ return -EINVAL;
+ dev->radio_rx_audmode = vt->audmode >= V4L2_TUNER_MODE_STEREO;
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.h b/drivers/media/platform/vivid/vivid-radio-rx.h
new file mode 100644
index 000000000..c9c7849f6
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-rx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-radio-rx.h - radio receiver support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_RADIO_RX_H_
+#define _VIVID_RADIO_RX_H_
+
+ssize_t vivid_radio_rx_read(struct file *, char __user *, size_t, loff_t *);
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
+
+int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
+int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a);
+int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
+int vivid_radio_rx_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c
new file mode 100644
index 000000000..1a3749ba5
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-tx.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-radio-tx.c - radio transmitter support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+
+#include "vivid-core.h"
+#include "vivid-ctrls.h"
+#include "vivid-radio-common.h"
+#include "vivid-radio-tx.h"
+
+ssize_t vivid_radio_tx_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rds_data *data = dev->rds_gen.data;
+ ktime_t timestamp;
+ unsigned blk;
+ int i;
+
+ if (dev->radio_tx_rds_controls)
+ return -EINVAL;
+
+ if (size < sizeof(*data))
+ return -EINVAL;
+ size = sizeof(*data) * (size / sizeof(*data));
+
+ if (mutex_lock_interruptible(&dev->mutex))
+ return -ERESTARTSYS;
+ if (dev->radio_tx_rds_owner &&
+ file->private_data != dev->radio_tx_rds_owner) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+ dev->radio_tx_rds_owner = file->private_data;
+
+retry:
+ timestamp = ktime_sub(ktime_get(), dev->radio_rds_init_time);
+ blk = ktime_divns(timestamp, VIVID_RDS_NSEC_PER_BLK);
+ if (blk - VIVID_RDS_GEN_BLOCKS >= dev->radio_tx_rds_last_block)
+ dev->radio_tx_rds_last_block = blk - VIVID_RDS_GEN_BLOCKS + 1;
+
+ /*
+ * No data is available if there hasn't been time to get new data,
+ * or if the RDS receiver has been disabled, or if we use the data
+ * from the RDS transmitter and that RDS transmitter has been disabled,
+ * or if the signal quality is too weak.
+ */
+ if (blk == dev->radio_tx_rds_last_block ||
+ !(dev->radio_tx_subchans & V4L2_TUNER_SUB_RDS)) {
+ mutex_unlock(&dev->mutex);
+ if (file->f_flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+ if (msleep_interruptible(20) && signal_pending(current))
+ return -EINTR;
+ if (mutex_lock_interruptible(&dev->mutex))
+ return -ERESTARTSYS;
+ goto retry;
+ }
+
+ for (i = 0; i < size && blk > dev->radio_tx_rds_last_block;
+ dev->radio_tx_rds_last_block++) {
+ unsigned data_blk = dev->radio_tx_rds_last_block % VIVID_RDS_GEN_BLOCKS;
+ struct v4l2_rds_data rds;
+
+ if (copy_from_user(&rds, buf + i, sizeof(rds))) {
+ i = -EFAULT;
+ break;
+ }
+ i += sizeof(rds);
+ if (!dev->radio_rds_loop)
+ continue;
+ if ((rds.block & V4L2_RDS_BLOCK_MSK) == V4L2_RDS_BLOCK_INVALID ||
+ (rds.block & V4L2_RDS_BLOCK_ERROR))
+ continue;
+ rds.block &= V4L2_RDS_BLOCK_MSK;
+ data[data_blk] = rds;
+ }
+ mutex_unlock(&dev->mutex);
+ return i;
+}
+
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
+{
+ return EPOLLOUT | EPOLLWRNORM | v4l2_ctrl_poll(file, wait);
+}
+
+int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (a->index > 0)
+ return -EINVAL;
+
+ strlcpy(a->name, "AM/FM/SW Transmitter", sizeof(a->name));
+ a->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_RDS |
+ (dev->radio_tx_rds_controls ?
+ V4L2_TUNER_CAP_RDS_CONTROLS :
+ V4L2_TUNER_CAP_RDS_BLOCK_IO);
+ a->rangelow = AM_FREQ_RANGE_LOW;
+ a->rangehigh = FM_FREQ_RANGE_HIGH;
+ a->txsubchans = dev->radio_tx_subchans;
+ return 0;
+}
+
+int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (a->index)
+ return -EINVAL;
+ if (a->txsubchans & ~0x13)
+ return -EINVAL;
+ dev->radio_tx_subchans = a->txsubchans;
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.h b/drivers/media/platform/vivid/vivid-radio-tx.h
new file mode 100644
index 000000000..c2bf1e7e6
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-tx.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-radio-tx.h - radio transmitter support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_RADIO_TX_H_
+#define _VIVID_RADIO_TX_H_
+
+ssize_t vivid_radio_tx_write(struct file *, const char __user *, size_t, loff_t *);
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
+
+int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a);
+int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-rds-gen.c b/drivers/media/platform/vivid/vivid-rds-gen.c
new file mode 100644
index 000000000..39ca9a564
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-rds-gen.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-rds-gen.c - rds (radio data system) generator support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+
+#include "vivid-rds-gen.h"
+
+static u8 vivid_get_di(const struct vivid_rds_gen *rds, unsigned grp)
+{
+ switch (grp) {
+ case 0:
+ return (rds->dyn_pty << 2) | (grp & 3);
+ case 1:
+ return (rds->compressed << 2) | (grp & 3);
+ case 2:
+ return (rds->art_head << 2) | (grp & 3);
+ case 3:
+ return (rds->mono_stereo << 2) | (grp & 3);
+ }
+ return 0;
+}
+
+/*
+ * This RDS generator creates 57 RDS groups (one group == four RDS blocks).
+ * Groups 0-3, 22-25 and 44-47 (spaced 22 groups apart) are filled with a
+ * standard 0B group containing the PI code and PS name.
+ *
+ * Groups 4-19 and 26-41 use group 2A for the radio text.
+ *
+ * Group 56 contains the time (group 4A).
+ *
+ * All remaining groups use a filler group 15B block that just repeats
+ * the PI and PTY codes.
+ */
+void vivid_rds_generate(struct vivid_rds_gen *rds)
+{
+ struct v4l2_rds_data *data = rds->data;
+ unsigned grp;
+ unsigned idx;
+ struct tm tm;
+ unsigned date;
+ unsigned time;
+ int l;
+
+ for (grp = 0; grp < VIVID_RDS_GEN_GROUPS; grp++, data += VIVID_RDS_GEN_BLKS_PER_GRP) {
+ data[0].lsb = rds->picode & 0xff;
+ data[0].msb = rds->picode >> 8;
+ data[0].block = V4L2_RDS_BLOCK_A | (V4L2_RDS_BLOCK_A << 3);
+ data[1].lsb = rds->pty << 5;
+ data[1].msb = (rds->pty >> 3) | (rds->tp << 2);
+ data[1].block = V4L2_RDS_BLOCK_B | (V4L2_RDS_BLOCK_B << 3);
+ data[3].block = V4L2_RDS_BLOCK_D | (V4L2_RDS_BLOCK_D << 3);
+
+ switch (grp) {
+ case 0 ... 3:
+ case 22 ... 25:
+ case 44 ... 47: /* Group 0B */
+ idx = (grp % 22) % 4;
+ data[1].lsb |= (rds->ta << 4) | (rds->ms << 3);
+ data[1].lsb |= vivid_get_di(rds, idx);
+ data[1].msb |= 1 << 3;
+ data[2].lsb = rds->picode & 0xff;
+ data[2].msb = rds->picode >> 8;
+ data[2].block = V4L2_RDS_BLOCK_C_ALT | (V4L2_RDS_BLOCK_C_ALT << 3);
+ data[3].lsb = rds->psname[2 * idx + 1];
+ data[3].msb = rds->psname[2 * idx];
+ break;
+ case 4 ... 19:
+ case 26 ... 41: /* Group 2A */
+ idx = ((grp - 4) % 22) % 16;
+ data[1].lsb |= idx;
+ data[1].msb |= 4 << 3;
+ data[2].msb = rds->radiotext[4 * idx];
+ data[2].lsb = rds->radiotext[4 * idx + 1];
+ data[2].block = V4L2_RDS_BLOCK_C | (V4L2_RDS_BLOCK_C << 3);
+ data[3].msb = rds->radiotext[4 * idx + 2];
+ data[3].lsb = rds->radiotext[4 * idx + 3];
+ break;
+ case 56:
+ /*
+ * Group 4A
+ *
+ * Uses the algorithm from Annex G of the RDS standard
+ * EN 50067:1998 to convert a UTC date to an RDS Modified
+ * Julian Day.
+ */
+ time64_to_tm(ktime_get_real_seconds(), 0, &tm);
+ l = tm.tm_mon <= 1;
+ date = 14956 + tm.tm_mday + ((tm.tm_year - l) * 1461) / 4 +
+ ((tm.tm_mon + 2 + l * 12) * 306001) / 10000;
+ time = (tm.tm_hour << 12) |
+ (tm.tm_min << 6) |
+ (sys_tz.tz_minuteswest >= 0 ? 0x20 : 0) |
+ (abs(sys_tz.tz_minuteswest) / 30);
+ data[1].lsb &= ~3;
+ data[1].lsb |= date >> 15;
+ data[1].msb |= 8 << 3;
+ data[2].lsb = (date << 1) & 0xfe;
+ data[2].lsb |= (time >> 16) & 1;
+ data[2].msb = (date >> 7) & 0xff;
+ data[2].block = V4L2_RDS_BLOCK_C | (V4L2_RDS_BLOCK_C << 3);
+ data[3].lsb = time & 0xff;
+ data[3].msb = (time >> 8) & 0xff;
+ break;
+ default: /* Group 15B */
+ data[1].lsb |= (rds->ta << 4) | (rds->ms << 3);
+ data[1].lsb |= vivid_get_di(rds, grp % 22);
+ data[1].msb |= 0x1f << 3;
+ data[2].lsb = rds->picode & 0xff;
+ data[2].msb = rds->picode >> 8;
+ data[2].block = V4L2_RDS_BLOCK_C_ALT | (V4L2_RDS_BLOCK_C_ALT << 3);
+ data[3].lsb = rds->pty << 5;
+ data[3].lsb |= (rds->ta << 4) | (rds->ms << 3);
+ data[3].lsb |= vivid_get_di(rds, grp % 22);
+ data[3].msb |= rds->pty >> 3;
+ data[3].msb |= 0x1f << 3;
+ break;
+ }
+ }
+}
+
+void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
+ bool alt)
+{
+ /* Alternate PTY between Info and Weather */
+ if (rds->use_rbds) {
+ rds->picode = 0x2e75; /* 'KLNX' call sign */
+ rds->pty = alt ? 29 : 2;
+ } else {
+ rds->picode = 0x8088;
+ rds->pty = alt ? 16 : 3;
+ }
+ rds->mono_stereo = true;
+ rds->art_head = false;
+ rds->compressed = false;
+ rds->dyn_pty = false;
+ rds->tp = true;
+ rds->ta = alt;
+ rds->ms = true;
+ snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
+ freq / 16, ((freq & 0xf) * 10) / 16);
+ if (alt)
+ strlcpy(rds->radiotext,
+ " The Radio Data System can switch between different Radio Texts ",
+ sizeof(rds->radiotext));
+ else
+ strlcpy(rds->radiotext,
+ "An example of Radio Text as transmitted by the Radio Data System",
+ sizeof(rds->radiotext));
+}
diff --git a/drivers/media/platform/vivid/vivid-rds-gen.h b/drivers/media/platform/vivid/vivid-rds-gen.h
new file mode 100644
index 000000000..35ac57423
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-rds-gen.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-rds-gen.h - rds (radio data system) generator support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_RDS_GEN_H_
+#define _VIVID_RDS_GEN_H_
+
+/*
+ * It takes almost exactly 5 seconds to transmit 57 RDS groups.
+ * Each group has 4 blocks and each block has a payload of 16 bits + a
+ * block identification. The driver will generate the contents of these
+ * 57 groups only when necessary and it will just be played continuously.
+ */
+#define VIVID_RDS_GEN_GROUPS 57
+#define VIVID_RDS_GEN_BLKS_PER_GRP 4
+#define VIVID_RDS_GEN_BLOCKS (VIVID_RDS_GEN_BLKS_PER_GRP * VIVID_RDS_GEN_GROUPS)
+#define VIVID_RDS_NSEC_PER_BLK (u32)(5ull * NSEC_PER_SEC / VIVID_RDS_GEN_BLOCKS)
+
+struct vivid_rds_gen {
+ struct v4l2_rds_data data[VIVID_RDS_GEN_BLOCKS];
+ bool use_rbds;
+ u16 picode;
+ u8 pty;
+ bool mono_stereo;
+ bool art_head;
+ bool compressed;
+ bool dyn_pty;
+ bool ta;
+ bool tp;
+ bool ms;
+ char psname[8 + 1];
+ char radiotext[64 + 1];
+};
+
+void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
+ bool use_alternate);
+void vivid_rds_generate(struct vivid_rds_gen *rds);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
new file mode 100644
index 000000000..e1794f868
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-sdr-cap.c - software defined radio support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/math64.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+#include <linux/fixp-arith.h>
+
+#include "vivid-core.h"
+#include "vivid-ctrls.h"
+#include "vivid-sdr-cap.h"
+
+/* stream formats */
+struct vivid_format {
+ u32 pixelformat;
+ u32 buffersize;
+};
+
+/* format descriptions for capture and preview */
+static const struct vivid_format formats[] = {
+ {
+ .pixelformat = V4L2_SDR_FMT_CU8,
+ .buffersize = SDR_CAP_SAMPLES_PER_BUF * 2,
+ }, {
+ .pixelformat = V4L2_SDR_FMT_CS8,
+ .buffersize = SDR_CAP_SAMPLES_PER_BUF * 2,
+ },
+};
+
+static const struct v4l2_frequency_band bands_adc[] = {
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 300000,
+ .rangehigh = 300000,
+ },
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 1,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 900001,
+ .rangehigh = 2800000,
+ },
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 2,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 3200000,
+ .rangehigh = 3200000,
+ },
+};
+
+/* ADC band midpoints */
+#define BAND_ADC_0 ((bands_adc[0].rangehigh + bands_adc[1].rangelow) / 2)
+#define BAND_ADC_1 ((bands_adc[1].rangehigh + bands_adc[2].rangelow) / 2)
+
+static const struct v4l2_frequency_band bands_fm[] = {
+ {
+ .tuner = 1,
+ .type = V4L2_TUNER_RF,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 50000000,
+ .rangehigh = 2000000000,
+ },
+};
+
+static void vivid_thread_sdr_cap_tick(struct vivid_dev *dev)
+{
+ struct vivid_buffer *sdr_cap_buf = NULL;
+
+ dprintk(dev, 1, "SDR Capture Thread Tick\n");
+
+ /* Drop a certain percentage of buffers. */
+ if (dev->perc_dropped_buffers &&
+ prandom_u32_max(100) < dev->perc_dropped_buffers)
+ return;
+
+ spin_lock(&dev->slock);
+ if (!list_empty(&dev->sdr_cap_active)) {
+ sdr_cap_buf = list_entry(dev->sdr_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&sdr_cap_buf->list);
+ }
+ spin_unlock(&dev->slock);
+
+ if (sdr_cap_buf) {
+ sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count;
+ vivid_sdr_cap_process(dev, sdr_cap_buf);
+ sdr_cap_buf->vb.vb2_buf.timestamp =
+ ktime_get_ns() + dev->time_wrap_offset;
+ vb2_buffer_done(&sdr_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dev->dqbuf_error = false;
+ }
+}
+
+static int vivid_thread_sdr_cap(void *data)
+{
+ struct vivid_dev *dev = data;
+ u64 samples_since_start;
+ u64 buffers_since_start;
+ u64 next_jiffies_since_start;
+ unsigned long jiffies_since_start;
+ unsigned long cur_jiffies;
+ unsigned wait_jiffies;
+
+ dprintk(dev, 1, "SDR Capture Thread Start\n");
+
+ set_freezable();
+
+ /* Resets frame counters */
+ dev->sdr_cap_seq_offset = 0;
+ if (dev->seq_wrap)
+ dev->sdr_cap_seq_offset = 0xffffff80U;
+ dev->jiffies_sdr_cap = jiffies;
+ dev->sdr_cap_seq_resync = false;
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
+ cur_jiffies = jiffies;
+ if (dev->sdr_cap_seq_resync) {
+ dev->jiffies_sdr_cap = cur_jiffies;
+ dev->sdr_cap_seq_offset = dev->sdr_cap_seq_count + 1;
+ dev->sdr_cap_seq_count = 0;
+ dev->sdr_cap_seq_resync = false;
+ }
+ /* Calculate the number of jiffies since we started streaming */
+ jiffies_since_start = cur_jiffies - dev->jiffies_sdr_cap;
+ /* Get the number of buffers streamed since the start */
+ buffers_since_start =
+ (u64)jiffies_since_start * dev->sdr_adc_freq +
+ (HZ * SDR_CAP_SAMPLES_PER_BUF) / 2;
+ do_div(buffers_since_start, HZ * SDR_CAP_SAMPLES_PER_BUF);
+
+ /*
+ * After more than 0xf0000000 (rounded down to a multiple of
+ * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
+ * jiffies have passed since we started streaming reset the
+ * counters and keep track of the sequence offset.
+ */
+ if (jiffies_since_start > JIFFIES_RESYNC) {
+ dev->jiffies_sdr_cap = cur_jiffies;
+ dev->sdr_cap_seq_offset = buffers_since_start;
+ buffers_since_start = 0;
+ }
+ dev->sdr_cap_seq_count =
+ buffers_since_start + dev->sdr_cap_seq_offset;
+
+ vivid_thread_sdr_cap_tick(dev);
+ mutex_unlock(&dev->mutex);
+
+ /*
+ * Calculate the number of samples streamed since we started,
+ * not including the current buffer.
+ */
+ samples_since_start = buffers_since_start * SDR_CAP_SAMPLES_PER_BUF;
+
+ /* And the number of jiffies since we started */
+ jiffies_since_start = jiffies - dev->jiffies_sdr_cap;
+
+ /* Increase by the number of samples in one buffer */
+ samples_since_start += SDR_CAP_SAMPLES_PER_BUF;
+ /*
+ * Calculate when that next buffer is supposed to start
+ * in jiffies since we started streaming.
+ */
+ next_jiffies_since_start = samples_since_start * HZ +
+ dev->sdr_adc_freq / 2;
+ do_div(next_jiffies_since_start, dev->sdr_adc_freq);
+ /* If it is in the past, then just schedule asap */
+ if (next_jiffies_since_start < jiffies_since_start)
+ next_jiffies_since_start = jiffies_since_start;
+
+ wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+ schedule_timeout_interruptible(wait_jiffies ? wait_jiffies : 1);
+ }
+ dprintk(dev, 1, "SDR Capture Thread End\n");
+ return 0;
+}
+
+static int sdr_cap_queue_setup(struct vb2_queue *vq,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], struct device *alloc_devs[])
+{
+ /* 2 = max 16-bit sample returned */
+ sizes[0] = SDR_CAP_SAMPLES_PER_BUF * 2;
+ *nplanes = 1;
+ return 0;
+}
+
+static int sdr_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned size = SDR_CAP_SAMPLES_PER_BUF * 2;
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void sdr_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->sdr_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err = 0;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->sdr_cap_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else if (dev->kthread_sdr_cap == NULL) {
+ dev->kthread_sdr_cap = kthread_run(vivid_thread_sdr_cap, dev,
+ "%s-sdr-cap", dev->v4l2_dev.name);
+
+ if (IS_ERR(dev->kthread_sdr_cap)) {
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ err = PTR_ERR(dev->kthread_sdr_cap);
+ dev->kthread_sdr_cap = NULL;
+ }
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void sdr_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ if (dev->kthread_sdr_cap == NULL)
+ return;
+
+ while (!list_empty(&dev->sdr_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->sdr_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ /* shutdown control thread */
+ kthread_stop(dev->kthread_sdr_cap);
+ dev->kthread_sdr_cap = NULL;
+}
+
+const struct vb2_ops vivid_sdr_cap_qops = {
+ .queue_setup = sdr_cap_queue_setup,
+ .buf_prepare = sdr_cap_buf_prepare,
+ .buf_queue = sdr_cap_buf_queue,
+ .start_streaming = sdr_cap_start_streaming,
+ .stop_streaming = sdr_cap_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vivid_sdr_enum_freq_bands(struct file *file, void *fh,
+ struct v4l2_frequency_band *band)
+{
+ switch (band->tuner) {
+ case 0:
+ if (band->index >= ARRAY_SIZE(bands_adc))
+ return -EINVAL;
+ *band = bands_adc[band->index];
+ return 0;
+ case 1:
+ if (band->index >= ARRAY_SIZE(bands_fm))
+ return -EINVAL;
+ *band = bands_fm[band->index];
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int vivid_sdr_g_frequency(struct file *file, void *fh,
+ struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ switch (vf->tuner) {
+ case 0:
+ vf->frequency = dev->sdr_adc_freq;
+ vf->type = V4L2_TUNER_ADC;
+ return 0;
+ case 1:
+ vf->frequency = dev->sdr_fm_freq;
+ vf->type = V4L2_TUNER_RF;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int vivid_sdr_s_frequency(struct file *file, void *fh,
+ const struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ unsigned freq = vf->frequency;
+ unsigned band;
+
+ switch (vf->tuner) {
+ case 0:
+ if (vf->type != V4L2_TUNER_ADC)
+ return -EINVAL;
+ if (freq < BAND_ADC_0)
+ band = 0;
+ else if (freq < BAND_ADC_1)
+ band = 1;
+ else
+ band = 2;
+
+ freq = clamp_t(unsigned, freq,
+ bands_adc[band].rangelow,
+ bands_adc[band].rangehigh);
+
+ if (vb2_is_streaming(&dev->vb_sdr_cap_q) &&
+ freq != dev->sdr_adc_freq) {
+ /* resync the thread's timings */
+ dev->sdr_cap_seq_resync = true;
+ }
+ dev->sdr_adc_freq = freq;
+ return 0;
+ case 1:
+ if (vf->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ dev->sdr_fm_freq = clamp_t(unsigned, freq,
+ bands_fm[0].rangelow,
+ bands_fm[0].rangehigh);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+{
+ switch (vt->index) {
+ case 0:
+ strlcpy(vt->name, "ADC", sizeof(vt->name));
+ vt->type = V4L2_TUNER_ADC;
+ vt->capability =
+ V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ vt->rangelow = bands_adc[0].rangelow;
+ vt->rangehigh = bands_adc[2].rangehigh;
+ return 0;
+ case 1:
+ strlcpy(vt->name, "RF", sizeof(vt->name));
+ vt->type = V4L2_TUNER_RF;
+ vt->capability =
+ V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ vt->rangelow = bands_fm[0].rangelow;
+ vt->rangehigh = bands_fm[0].rangehigh;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int vivid_sdr_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+{
+ if (vt->index > 1)
+ return -EINVAL;
+ return 0;
+}
+
+int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+ f->pixelformat = formats[f->index].pixelformat;
+ return 0;
+}
+
+int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ f->fmt.sdr.pixelformat = dev->sdr_pixelformat;
+ f->fmt.sdr.buffersize = dev->sdr_buffersize;
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ return 0;
+}
+
+int vidioc_s_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct vb2_queue *q = &dev->vb_sdr_cap_q;
+ int i;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
+ dev->sdr_pixelformat = formats[i].pixelformat;
+ dev->sdr_buffersize = formats[i].buffersize;
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ return 0;
+ }
+ }
+ dev->sdr_pixelformat = formats[0].pixelformat;
+ dev->sdr_buffersize = formats[0].buffersize;
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+ f->fmt.sdr.buffersize = formats[0].buffersize;
+ return 0;
+}
+
+int vidioc_try_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ int i;
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ return 0;
+ }
+ }
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+ f->fmt.sdr.buffersize = formats[0].buffersize;
+ return 0;
+}
+
+#define FIXP_N (15)
+#define FIXP_FRAC (1 << FIXP_N)
+#define FIXP_2PI ((int)(2 * 3.141592653589 * FIXP_FRAC))
+#define M_100000PI (3.14159 * 100000)
+
+void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ u8 *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ unsigned long i;
+ unsigned long plane_size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+ s64 s64tmp;
+ s32 src_phase_step;
+ s32 mod_phase_step;
+ s32 fixp_i;
+ s32 fixp_q;
+
+ /* calculate phase step */
+ #define BEEP_FREQ 1000 /* 1kHz beep */
+ src_phase_step = DIV_ROUND_CLOSEST(FIXP_2PI * BEEP_FREQ,
+ dev->sdr_adc_freq);
+
+ for (i = 0; i < plane_size; i += 2) {
+ mod_phase_step = fixp_cos32_rad(dev->sdr_fixp_src_phase,
+ FIXP_2PI) >> (31 - FIXP_N);
+
+ dev->sdr_fixp_src_phase += src_phase_step;
+ s64tmp = (s64) mod_phase_step * dev->sdr_fm_deviation;
+ dev->sdr_fixp_mod_phase += div_s64(s64tmp, M_100000PI);
+
+ /*
+ * Transfer phase angle to [0, 2xPI] in order to avoid variable
+ * overflow and make it suitable for cosine implementation
+ * used, which does not support negative angles.
+ */
+ dev->sdr_fixp_src_phase %= FIXP_2PI;
+ dev->sdr_fixp_mod_phase %= FIXP_2PI;
+
+ if (dev->sdr_fixp_mod_phase < 0)
+ dev->sdr_fixp_mod_phase += FIXP_2PI;
+
+ fixp_i = fixp_cos32_rad(dev->sdr_fixp_mod_phase, FIXP_2PI);
+ fixp_q = fixp_sin32_rad(dev->sdr_fixp_mod_phase, FIXP_2PI);
+
+ /* Normalize fraction values represented with 32 bit precision
+ * to fixed point representation with FIXP_N bits */
+ fixp_i >>= (31 - FIXP_N);
+ fixp_q >>= (31 - FIXP_N);
+
+ switch (dev->sdr_pixelformat) {
+ case V4L2_SDR_FMT_CU8:
+ /* convert 'fixp float' to u8 [0, +255] */
+ /* u8 = X * 127.5 + 127.5; X is float [-1.0, +1.0] */
+ fixp_i = fixp_i * 1275 + FIXP_FRAC * 1275;
+ fixp_q = fixp_q * 1275 + FIXP_FRAC * 1275;
+ *vbuf++ = DIV_ROUND_CLOSEST(fixp_i, FIXP_FRAC * 10);
+ *vbuf++ = DIV_ROUND_CLOSEST(fixp_q, FIXP_FRAC * 10);
+ break;
+ case V4L2_SDR_FMT_CS8:
+ /* convert 'fixp float' to s8 [-128, +127] */
+ /* s8 = X * 127.5 - 0.5; X is float [-1.0, +1.0] */
+ fixp_i = fixp_i * 1275 - FIXP_FRAC * 5;
+ fixp_q = fixp_q * 1275 - FIXP_FRAC * 5;
+ *vbuf++ = DIV_ROUND_CLOSEST(fixp_i, FIXP_FRAC * 10);
+ *vbuf++ = DIV_ROUND_CLOSEST(fixp_q, FIXP_FRAC * 10);
+ break;
+ default:
+ break;
+ }
+ }
+}
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.h b/drivers/media/platform/vivid/vivid-sdr-cap.h
new file mode 100644
index 000000000..813c9248e
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-sdr-cap.h - software defined radio support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_SDR_CAP_H_
+#define _VIVID_SDR_CAP_H_
+
+int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
+int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
+int vivid_sdr_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
+int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
+int vivid_sdr_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
+int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
+int vidioc_s_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
+int vidioc_try_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
+void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+
+extern const struct vb2_ops vivid_sdr_cap_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
new file mode 100644
index 000000000..92a852955
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-vbi-cap.c - vbi capture support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-cap.h"
+#include "vivid-vbi-cap.h"
+#include "vivid-vbi-gen.h"
+
+static void vivid_sliced_vbi_cap_fill(struct vivid_dev *dev, unsigned seqnr)
+{
+ struct vivid_vbi_gen_data *vbi_gen = &dev->vbi_gen;
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+
+ vivid_vbi_gen_sliced(vbi_gen, is_60hz, seqnr);
+
+ if (!is_60hz) {
+ if (dev->loop_video) {
+ if (dev->vbi_out_have_wss) {
+ vbi_gen->data[12].data[0] = dev->vbi_out_wss[0];
+ vbi_gen->data[12].data[1] = dev->vbi_out_wss[1];
+ } else {
+ vbi_gen->data[12].id = 0;
+ }
+ } else {
+ switch (tpg_g_video_aspect(&dev->tpg)) {
+ case TPG_VIDEO_ASPECT_14X9_CENTRE:
+ vbi_gen->data[12].data[0] = 0x01;
+ break;
+ case TPG_VIDEO_ASPECT_16X9_CENTRE:
+ vbi_gen->data[12].data[0] = 0x0b;
+ break;
+ case TPG_VIDEO_ASPECT_16X9_ANAMORPHIC:
+ vbi_gen->data[12].data[0] = 0x07;
+ break;
+ case TPG_VIDEO_ASPECT_4X3:
+ default:
+ vbi_gen->data[12].data[0] = 0x08;
+ break;
+ }
+ }
+ } else if (dev->loop_video && is_60hz) {
+ if (dev->vbi_out_have_cc[0]) {
+ vbi_gen->data[0].data[0] = dev->vbi_out_cc[0][0];
+ vbi_gen->data[0].data[1] = dev->vbi_out_cc[0][1];
+ } else {
+ vbi_gen->data[0].id = 0;
+ }
+ if (dev->vbi_out_have_cc[1]) {
+ vbi_gen->data[1].data[0] = dev->vbi_out_cc[1][0];
+ vbi_gen->data[1].data[1] = dev->vbi_out_cc[1][1];
+ } else {
+ vbi_gen->data[1].id = 0;
+ }
+ }
+}
+
+static void vivid_g_fmt_vbi_cap(struct vivid_dev *dev, struct v4l2_vbi_format *vbi)
+{
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+
+ vbi->sampling_rate = 27000000;
+ vbi->offset = 24;
+ vbi->samples_per_line = 1440;
+ vbi->sample_format = V4L2_PIX_FMT_GREY;
+ vbi->start[0] = is_60hz ? V4L2_VBI_ITU_525_F1_START + 9 : V4L2_VBI_ITU_625_F1_START + 5;
+ vbi->start[1] = is_60hz ? V4L2_VBI_ITU_525_F2_START + 9 : V4L2_VBI_ITU_625_F2_START + 5;
+ vbi->count[0] = vbi->count[1] = is_60hz ? 12 : 18;
+ vbi->flags = dev->vbi_cap_interlaced ? V4L2_VBI_INTERLACED : 0;
+ vbi->reserved[0] = 0;
+ vbi->reserved[1] = 0;
+}
+
+void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ struct v4l2_vbi_format vbi;
+ u8 *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+
+ vivid_g_fmt_vbi_cap(dev, &vbi);
+ buf->vb.sequence = dev->vbi_cap_seq_count;
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ buf->vb.sequence /= 2;
+
+ vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence);
+
+ memset(vbuf, 0x10, vb2_plane_size(&buf->vb.vb2_buf, 0));
+
+ if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode))
+ vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf);
+
+ buf->vb.vb2_buf.timestamp = ktime_get_ns() + dev->time_wrap_offset;
+}
+
+
+void vivid_sliced_vbi_cap_process(struct vivid_dev *dev,
+ struct vivid_buffer *buf)
+{
+ struct v4l2_sliced_vbi_data *vbuf =
+ vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+
+ buf->vb.sequence = dev->vbi_cap_seq_count;
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ buf->vb.sequence /= 2;
+
+ vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence);
+
+ memset(vbuf, 0, vb2_plane_size(&buf->vb.vb2_buf, 0));
+ if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
+ unsigned i;
+
+ for (i = 0; i < 25; i++)
+ vbuf[i] = dev->vbi_gen.data[i];
+ }
+
+ buf->vb.vb2_buf.timestamp = ktime_get_ns() + dev->time_wrap_offset;
+}
+
+static int vbi_cap_queue_setup(struct vb2_queue *vq,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ unsigned size = vq->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ?
+ 36 * sizeof(struct v4l2_sliced_vbi_data) :
+ 1440 * 2 * (is_60hz ? 12 : 18);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -EINVAL;
+
+ sizes[0] = size;
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int vbi_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ unsigned size = vb->vb2_queue->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ?
+ 36 * sizeof(struct v4l2_sliced_vbi_data) :
+ 1440 * 2 * (is_60hz ? 12 : 18);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void vbi_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->vbi_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->vbi_cap_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_cap(dev, &dev->vbi_cap_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void vbi_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_cap(dev, &dev->vbi_cap_streaming);
+}
+
+const struct vb2_ops vivid_vbi_cap_qops = {
+ .queue_setup = vbi_cap_queue_setup,
+ .buf_prepare = vbi_cap_buf_prepare,
+ .buf_queue = vbi_cap_buf_queue,
+ .start_streaming = vbi_cap_start_streaming,
+ .stop_streaming = vbi_cap_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_vbi_format *vbi = &f->fmt.vbi;
+
+ if (!vivid_is_sdtv_cap(dev) || !dev->has_raw_vbi_cap)
+ return -EINVAL;
+
+ vivid_g_fmt_vbi_cap(dev, vbi);
+ return 0;
+}
+
+int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ int ret = vidioc_g_fmt_vbi_cap(file, priv, f);
+
+ if (ret)
+ return ret;
+ if (dev->stream_sliced_vbi_cap && vb2_is_busy(&dev->vb_vbi_cap_q))
+ return -EBUSY;
+ dev->stream_sliced_vbi_cap = false;
+ dev->vbi_cap_dev.queue->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ return 0;
+}
+
+void vivid_fill_service_lines(struct v4l2_sliced_vbi_format *vbi, u32 service_set)
+{
+ vbi->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
+ vbi->service_set = service_set;
+ memset(vbi->service_lines, 0, sizeof(vbi->service_lines));
+ memset(vbi->reserved, 0, sizeof(vbi->reserved));
+
+ if (vbi->service_set == 0)
+ return;
+
+ if (vbi->service_set & V4L2_SLICED_CAPTION_525) {
+ vbi->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
+ vbi->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
+ }
+ if (vbi->service_set & V4L2_SLICED_WSS_625) {
+ unsigned i;
+
+ for (i = 7; i <= 18; i++)
+ vbi->service_lines[0][i] =
+ vbi->service_lines[1][i] = V4L2_SLICED_TELETEXT_B;
+ vbi->service_lines[0][23] = V4L2_SLICED_WSS_625;
+ }
+}
+
+int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+
+ if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap)
+ return -EINVAL;
+
+ vivid_fill_service_lines(vbi, dev->service_set_cap);
+ return 0;
+}
+
+int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ u32 service_set = vbi->service_set;
+
+ if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap)
+ return -EINVAL;
+
+ service_set &= is_60hz ? V4L2_SLICED_CAPTION_525 :
+ V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ vivid_fill_service_lines(vbi, service_set);
+ return 0;
+}
+
+int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+ int ret = vidioc_try_fmt_sliced_vbi_cap(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ if (!dev->stream_sliced_vbi_cap && vb2_is_busy(&dev->vb_vbi_cap_q))
+ return -EBUSY;
+ dev->service_set_cap = vbi->service_set;
+ dev->stream_sliced_vbi_cap = true;
+ dev->vbi_cap_dev.queue->type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ return 0;
+}
+
+int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+ bool is_60hz;
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ is_60hz = dev->std_cap & V4L2_STD_525_60;
+ if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap ||
+ cap->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ } else {
+ is_60hz = dev->std_out & V4L2_STD_525_60;
+ if (!vivid_is_svid_out(dev) || !dev->has_sliced_vbi_out ||
+ cap->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT)
+ return -EINVAL;
+ }
+
+ cap->service_set = is_60hz ? V4L2_SLICED_CAPTION_525 :
+ V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ if (is_60hz) {
+ cap->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
+ cap->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
+ } else {
+ unsigned i;
+
+ for (i = 7; i <= 18; i++)
+ cap->service_lines[0][i] =
+ cap->service_lines[1][i] = V4L2_SLICED_TELETEXT_B;
+ cap->service_lines[0][23] = V4L2_SLICED_WSS_625;
+ }
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.h b/drivers/media/platform/vivid/vivid-vbi-cap.h
new file mode 100644
index 000000000..91d2de013
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-vbi-cap.h - vbi capture support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_VBI_CAP_H_
+#define _VIVID_VBI_CAP_H_
+
+void vivid_fill_time_of_day_packet(u8 *packet);
+void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+void vivid_sliced_vbi_out_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap);
+
+void vivid_fill_service_lines(struct v4l2_sliced_vbi_format *vbi, u32 service_set);
+
+extern const struct vb2_ops vivid_vbi_cap_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vbi-gen.c b/drivers/media/platform/vivid/vivid-vbi-gen.c
new file mode 100644
index 000000000..acc98445a
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-gen.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-vbi-gen.c - vbi generator support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+
+#include "vivid-vbi-gen.h"
+
+static void wss_insert(u8 *wss, u32 val, unsigned size)
+{
+ while (size--)
+ *wss++ = (val & (1 << size)) ? 0xc0 : 0x10;
+}
+
+static void vivid_vbi_gen_wss_raw(const struct v4l2_sliced_vbi_data *data,
+ u8 *buf, unsigned sampling_rate)
+{
+ const unsigned rate = 5000000; /* WSS has a 5 MHz transmission rate */
+ u8 wss[29 + 24 + 24 + 24 + 18 + 18] = { 0 };
+ const unsigned zero = 0x07;
+ const unsigned one = 0x38;
+ unsigned bit = 0;
+ u16 wss_data;
+ int i;
+
+ wss_insert(wss + bit, 0x1f1c71c7, 29); bit += 29;
+ wss_insert(wss + bit, 0x1e3c1f, 24); bit += 24;
+
+ wss_data = (data->data[1] << 8) | data->data[0];
+ for (i = 0; i <= 13; i++, bit += 6)
+ wss_insert(wss + bit, (wss_data & (1 << i)) ? one : zero, 6);
+
+ for (i = 0, bit = 0; bit < sizeof(wss); bit++) {
+ unsigned n = ((bit + 1) * sampling_rate) / rate;
+
+ while (i < n)
+ buf[i++] = wss[bit];
+ }
+}
+
+static void vivid_vbi_gen_teletext_raw(const struct v4l2_sliced_vbi_data *data,
+ u8 *buf, unsigned sampling_rate)
+{
+ const unsigned rate = 6937500 / 10; /* Teletext has a 6.9375 MHz transmission rate */
+ u8 teletext[45] = { 0x55, 0x55, 0x27 };
+ unsigned bit = 0;
+ int i;
+
+ memcpy(teletext + 3, data->data, sizeof(teletext) - 3);
+ /* prevents 32 bit overflow */
+ sampling_rate /= 10;
+
+ for (i = 0, bit = 0; bit < sizeof(teletext) * 8; bit++) {
+ unsigned n = ((bit + 1) * sampling_rate) / rate;
+ u8 val = (teletext[bit / 8] & (1 << (bit & 7))) ? 0xc0 : 0x10;
+
+ while (i < n)
+ buf[i++] = val;
+ }
+}
+
+static void cc_insert(u8 *cc, u8 ch)
+{
+ unsigned tot = 0;
+ unsigned i;
+
+ for (i = 0; i < 7; i++) {
+ cc[2 * i] = cc[2 * i + 1] = (ch & (1 << i)) ? 1 : 0;
+ tot += cc[2 * i];
+ }
+ cc[14] = cc[15] = !(tot & 1);
+}
+
+#define CC_PREAMBLE_BITS (14 + 4 + 2)
+
+static void vivid_vbi_gen_cc_raw(const struct v4l2_sliced_vbi_data *data,
+ u8 *buf, unsigned sampling_rate)
+{
+ const unsigned rate = 1000000; /* CC has a 1 MHz transmission rate */
+
+ u8 cc[CC_PREAMBLE_BITS + 2 * 16] = {
+ /* Clock run-in: 7 cycles */
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ /* 2 cycles of 0 */
+ 0, 0, 0, 0,
+ /* Start bit of 1 (each bit is two cycles) */
+ 1, 1
+ };
+ unsigned bit, i;
+
+ cc_insert(cc + CC_PREAMBLE_BITS, data->data[0]);
+ cc_insert(cc + CC_PREAMBLE_BITS + 16, data->data[1]);
+
+ for (i = 0, bit = 0; bit < sizeof(cc); bit++) {
+ unsigned n = ((bit + 1) * sampling_rate) / rate;
+
+ while (i < n)
+ buf[i++] = cc[bit] ? 0xc0 : 0x10;
+ }
+}
+
+void vivid_vbi_gen_raw(const struct vivid_vbi_gen_data *vbi,
+ const struct v4l2_vbi_format *vbi_fmt, u8 *buf)
+{
+ unsigned idx;
+
+ for (idx = 0; idx < 25; idx++) {
+ const struct v4l2_sliced_vbi_data *data = vbi->data + idx;
+ unsigned start_2nd_field;
+ unsigned line = data->line;
+ u8 *linebuf = buf;
+
+ start_2nd_field = (data->id & V4L2_SLICED_VBI_525) ? 263 : 313;
+ if (data->field)
+ line += start_2nd_field;
+ line -= vbi_fmt->start[data->field];
+
+ if (vbi_fmt->flags & V4L2_VBI_INTERLACED)
+ linebuf += (line * 2 + data->field) *
+ vbi_fmt->samples_per_line;
+ else
+ linebuf += (line + data->field * vbi_fmt->count[0]) *
+ vbi_fmt->samples_per_line;
+ if (data->id == V4L2_SLICED_CAPTION_525)
+ vivid_vbi_gen_cc_raw(data, linebuf, vbi_fmt->sampling_rate);
+ else if (data->id == V4L2_SLICED_WSS_625)
+ vivid_vbi_gen_wss_raw(data, linebuf, vbi_fmt->sampling_rate);
+ else if (data->id == V4L2_SLICED_TELETEXT_B)
+ vivid_vbi_gen_teletext_raw(data, linebuf, vbi_fmt->sampling_rate);
+ }
+}
+
+static const u8 vivid_cc_sequence1[30] = {
+ 0x14, 0x20, /* Resume Caption Loading */
+ 'H', 'e',
+ 'l', 'l',
+ 'o', ' ',
+ 'w', 'o',
+ 'r', 'l',
+ 'd', '!',
+ 0x14, 0x2f, /* End of Caption */
+};
+
+static const u8 vivid_cc_sequence2[30] = {
+ 0x14, 0x20, /* Resume Caption Loading */
+ 'C', 'l',
+ 'o', 's',
+ 'e', 'd',
+ ' ', 'c',
+ 'a', 'p',
+ 't', 'i',
+ 'o', 'n',
+ 's', ' ',
+ 't', 'e',
+ 's', 't',
+ 0x14, 0x2f, /* End of Caption */
+};
+
+static u8 calc_parity(u8 val)
+{
+ unsigned i;
+ unsigned tot = 0;
+
+ for (i = 0; i < 7; i++)
+ tot += (val & (1 << i)) ? 1 : 0;
+ return val | ((tot & 1) ? 0 : 0x80);
+}
+
+static void vivid_vbi_gen_set_time_of_day(u8 *packet)
+{
+ struct tm tm;
+ u8 checksum, i;
+
+ time64_to_tm(ktime_get_real_seconds(), 0, &tm);
+ packet[0] = calc_parity(0x07);
+ packet[1] = calc_parity(0x01);
+ packet[2] = calc_parity(0x40 | tm.tm_min);
+ packet[3] = calc_parity(0x40 | tm.tm_hour);
+ packet[4] = calc_parity(0x40 | tm.tm_mday);
+ if (tm.tm_mday == 1 && tm.tm_mon == 2 &&
+ sys_tz.tz_minuteswest > tm.tm_min + tm.tm_hour * 60)
+ packet[4] = calc_parity(0x60 | tm.tm_mday);
+ packet[5] = calc_parity(0x40 | (1 + tm.tm_mon));
+ packet[6] = calc_parity(0x40 | (1 + tm.tm_wday));
+ packet[7] = calc_parity(0x40 | ((tm.tm_year - 90) & 0x3f));
+ packet[8] = calc_parity(0x0f);
+ for (checksum = i = 0; i <= 8; i++)
+ checksum += packet[i] & 0x7f;
+ packet[9] = calc_parity(0x100 - checksum);
+ checksum = 0;
+ packet[10] = calc_parity(0x07);
+ packet[11] = calc_parity(0x04);
+ if (sys_tz.tz_minuteswest >= 0)
+ packet[12] = calc_parity(0x40 | ((sys_tz.tz_minuteswest / 60) & 0x1f));
+ else
+ packet[12] = calc_parity(0x40 | ((24 + sys_tz.tz_minuteswest / 60) & 0x1f));
+ packet[13] = calc_parity(0);
+ packet[14] = calc_parity(0x0f);
+ for (checksum = 0, i = 10; i <= 14; i++)
+ checksum += packet[i] & 0x7f;
+ packet[15] = calc_parity(0x100 - checksum);
+}
+
+static const u8 hamming[16] = {
+ 0x15, 0x02, 0x49, 0x5e, 0x64, 0x73, 0x38, 0x2f,
+ 0xd0, 0xc7, 0x8c, 0x9b, 0xa1, 0xb6, 0xfd, 0xea
+};
+
+static void vivid_vbi_gen_teletext(u8 *packet, unsigned line, unsigned frame)
+{
+ unsigned offset = 2;
+ unsigned i;
+
+ packet[0] = hamming[1 + ((line & 1) << 3)];
+ packet[1] = hamming[line >> 1];
+ memset(packet + 2, 0x20, 40);
+ if (line == 0) {
+ /* subcode */
+ packet[2] = hamming[frame % 10];
+ packet[3] = hamming[frame / 10];
+ packet[4] = hamming[0];
+ packet[5] = hamming[0];
+ packet[6] = hamming[0];
+ packet[7] = hamming[0];
+ packet[8] = hamming[0];
+ packet[9] = hamming[1];
+ offset = 10;
+ }
+ packet += offset;
+ memcpy(packet, "Page: 100 Row: 10", 17);
+ packet[7] = '0' + frame / 10;
+ packet[8] = '0' + frame % 10;
+ packet[15] = '0' + line / 10;
+ packet[16] = '0' + line % 10;
+ for (i = 0; i < 42 - offset; i++)
+ packet[i] = calc_parity(packet[i]);
+}
+
+void vivid_vbi_gen_sliced(struct vivid_vbi_gen_data *vbi,
+ bool is_60hz, unsigned seqnr)
+{
+ struct v4l2_sliced_vbi_data *data0 = vbi->data;
+ struct v4l2_sliced_vbi_data *data1 = vbi->data + 1;
+ unsigned frame = seqnr % 60;
+
+ memset(vbi->data, 0, sizeof(vbi->data));
+
+ if (!is_60hz) {
+ unsigned i;
+
+ for (i = 0; i <= 11; i++) {
+ data0->id = V4L2_SLICED_TELETEXT_B;
+ data0->line = 7 + i;
+ vivid_vbi_gen_teletext(data0->data, i, frame);
+ data0++;
+ }
+ data0->id = V4L2_SLICED_WSS_625;
+ data0->line = 23;
+ /* 4x3 video aspect ratio */
+ data0->data[0] = 0x08;
+ data0++;
+ for (i = 0; i <= 11; i++) {
+ data0->id = V4L2_SLICED_TELETEXT_B;
+ data0->field = 1;
+ data0->line = 7 + i;
+ vivid_vbi_gen_teletext(data0->data, 12 + i, frame);
+ data0++;
+ }
+ return;
+ }
+
+ data0->id = V4L2_SLICED_CAPTION_525;
+ data0->line = 21;
+ data1->id = V4L2_SLICED_CAPTION_525;
+ data1->field = 1;
+ data1->line = 21;
+
+ if (frame < 15) {
+ data0->data[0] = calc_parity(vivid_cc_sequence1[2 * frame]);
+ data0->data[1] = calc_parity(vivid_cc_sequence1[2 * frame + 1]);
+ } else if (frame >= 30 && frame < 45) {
+ frame -= 30;
+ data0->data[0] = calc_parity(vivid_cc_sequence2[2 * frame]);
+ data0->data[1] = calc_parity(vivid_cc_sequence2[2 * frame + 1]);
+ } else {
+ data0->data[0] = calc_parity(0);
+ data0->data[1] = calc_parity(0);
+ }
+
+ frame = seqnr % (30 * 60);
+ switch (frame) {
+ case 0:
+ vivid_vbi_gen_set_time_of_day(vbi->time_of_day_packet);
+ /* fall through */
+ case 1 ... 7:
+ data1->data[0] = vbi->time_of_day_packet[frame * 2];
+ data1->data[1] = vbi->time_of_day_packet[frame * 2 + 1];
+ break;
+ default:
+ data1->data[0] = calc_parity(0);
+ data1->data[1] = calc_parity(0);
+ break;
+ }
+}
diff --git a/drivers/media/platform/vivid/vivid-vbi-gen.h b/drivers/media/platform/vivid/vivid-vbi-gen.h
new file mode 100644
index 000000000..2657a7f55
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-gen.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-vbi-gen.h - vbi generator support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_VBI_GEN_H_
+#define _VIVID_VBI_GEN_H_
+
+struct vivid_vbi_gen_data {
+ struct v4l2_sliced_vbi_data data[25];
+ u8 time_of_day_packet[16];
+};
+
+void vivid_vbi_gen_sliced(struct vivid_vbi_gen_data *vbi,
+ bool is_60hz, unsigned seqnr);
+void vivid_vbi_gen_raw(const struct vivid_vbi_gen_data *vbi,
+ const struct v4l2_vbi_format *vbi_fmt, u8 *buf);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
new file mode 100644
index 000000000..69486c130
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-vbi-out.c - vbi output support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-out.h"
+#include "vivid-vbi-out.h"
+#include "vivid-vbi-cap.h"
+
+static int vbi_out_queue_setup(struct vb2_queue *vq,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ bool is_60hz = dev->std_out & V4L2_STD_525_60;
+ unsigned size = vq->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT ?
+ 36 * sizeof(struct v4l2_sliced_vbi_data) :
+ 1440 * 2 * (is_60hz ? 12 : 18);
+
+ if (!vivid_is_svid_out(dev))
+ return -EINVAL;
+
+ sizes[0] = size;
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int vbi_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ bool is_60hz = dev->std_out & V4L2_STD_525_60;
+ unsigned size = vb->vb2_queue->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT ?
+ 36 * sizeof(struct v4l2_sliced_vbi_data) :
+ 1440 * 2 * (is_60hz ? 12 : 18);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void vbi_out_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->vbi_out_active);
+ spin_unlock(&dev->slock);
+}
+
+static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->vbi_out_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_out(dev, &dev->vbi_out_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void vbi_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_out(dev, &dev->vbi_out_streaming);
+ dev->vbi_out_have_wss = false;
+ dev->vbi_out_have_cc[0] = false;
+ dev->vbi_out_have_cc[1] = false;
+}
+
+const struct vb2_ops vivid_vbi_out_qops = {
+ .queue_setup = vbi_out_queue_setup,
+ .buf_prepare = vbi_out_buf_prepare,
+ .buf_queue = vbi_out_buf_queue,
+ .start_streaming = vbi_out_start_streaming,
+ .stop_streaming = vbi_out_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_g_fmt_vbi_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_vbi_format *vbi = &f->fmt.vbi;
+ bool is_60hz = dev->std_out & V4L2_STD_525_60;
+
+ if (!vivid_is_svid_out(dev) || !dev->has_raw_vbi_out)
+ return -EINVAL;
+
+ vbi->sampling_rate = 25000000;
+ vbi->offset = 24;
+ vbi->samples_per_line = 1440;
+ vbi->sample_format = V4L2_PIX_FMT_GREY;
+ vbi->start[0] = is_60hz ? V4L2_VBI_ITU_525_F1_START + 9 : V4L2_VBI_ITU_625_F1_START + 5;
+ vbi->start[1] = is_60hz ? V4L2_VBI_ITU_525_F2_START + 9 : V4L2_VBI_ITU_625_F2_START + 5;
+ vbi->count[0] = vbi->count[1] = is_60hz ? 12 : 18;
+ vbi->flags = dev->vbi_cap_interlaced ? V4L2_VBI_INTERLACED : 0;
+ vbi->reserved[0] = 0;
+ vbi->reserved[1] = 0;
+ return 0;
+}
+
+int vidioc_s_fmt_vbi_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ int ret = vidioc_g_fmt_vbi_out(file, priv, f);
+
+ if (ret)
+ return ret;
+ if (vb2_is_busy(&dev->vb_vbi_out_q))
+ return -EBUSY;
+ dev->stream_sliced_vbi_out = false;
+ dev->vbi_out_dev.queue->type = V4L2_BUF_TYPE_VBI_OUTPUT;
+ return 0;
+}
+
+int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+
+ if (!vivid_is_svid_out(dev) || !dev->has_sliced_vbi_out)
+ return -EINVAL;
+
+ vivid_fill_service_lines(vbi, dev->service_set_out);
+ return 0;
+}
+
+int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+ bool is_60hz = dev->std_out & V4L2_STD_525_60;
+ u32 service_set = vbi->service_set;
+
+ if (!vivid_is_svid_out(dev) || !dev->has_sliced_vbi_out)
+ return -EINVAL;
+
+ service_set &= is_60hz ? V4L2_SLICED_CAPTION_525 :
+ V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ vivid_fill_service_lines(vbi, service_set);
+ return 0;
+}
+
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh,
+ struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+ int ret = vidioc_try_fmt_sliced_vbi_out(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ if (vb2_is_busy(&dev->vb_vbi_out_q))
+ return -EBUSY;
+ dev->service_set_out = vbi->service_set;
+ dev->stream_sliced_vbi_out = true;
+ dev->vbi_out_dev.queue->type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
+ return 0;
+}
+
+void vivid_sliced_vbi_out_process(struct vivid_dev *dev,
+ struct vivid_buffer *buf)
+{
+ struct v4l2_sliced_vbi_data *vbi =
+ vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ unsigned elems =
+ vb2_get_plane_payload(&buf->vb.vb2_buf, 0) / sizeof(*vbi);
+
+ dev->vbi_out_have_cc[0] = false;
+ dev->vbi_out_have_cc[1] = false;
+ dev->vbi_out_have_wss = false;
+ while (elems--) {
+ switch (vbi->id) {
+ case V4L2_SLICED_CAPTION_525:
+ if ((dev->std_out & V4L2_STD_525_60) && vbi->line == 21) {
+ dev->vbi_out_have_cc[!!vbi->field] = true;
+ dev->vbi_out_cc[!!vbi->field][0] = vbi->data[0];
+ dev->vbi_out_cc[!!vbi->field][1] = vbi->data[1];
+ }
+ break;
+ case V4L2_SLICED_WSS_625:
+ if ((dev->std_out & V4L2_STD_625_50) &&
+ vbi->field == 0 && vbi->line == 23) {
+ dev->vbi_out_have_wss = true;
+ dev->vbi_out_wss[0] = vbi->data[0];
+ dev->vbi_out_wss[1] = vbi->data[1];
+ }
+ break;
+ }
+ vbi++;
+ }
+}
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.h b/drivers/media/platform/vivid/vivid-vbi-out.h
new file mode 100644
index 000000000..76584940c
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-out.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-vbi-out.h - vbi output support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_VBI_OUT_H_
+#define _VIVID_VBI_OUT_H_
+
+void vivid_sliced_vbi_out_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vidioc_g_fmt_vbi_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_s_fmt_vbi_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
+
+extern const struct vb2_ops vivid_vbi_out_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
new file mode 100644
index 000000000..c58ae489f
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -0,0 +1,1865 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-vid-cap.c - video capture support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-rect.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-kthread-cap.h"
+#include "vivid-vid-cap.h"
+
+/* timeperframe: min/max and default */
+static const struct v4l2_fract
+ tpf_min = {.numerator = 1, .denominator = FPS_MAX},
+ tpf_max = {.numerator = FPS_MAX, .denominator = 1};
+
+static const struct vivid_fmt formats_ovl[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+};
+
+/* The number of discrete webcam framesizes */
+#define VIVID_WEBCAM_SIZES 5
+/* The number of discrete webcam frameintervals */
+#define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
+
+/* Sizes must be in increasing order */
+static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
+ { 320, 180 },
+ { 640, 360 },
+ { 1280, 720 },
+ { 1920, 1080 },
+ { 3840, 2160 },
+};
+
+/*
+ * Intervals must be in increasing order and there must be twice as many
+ * elements in this array as there are in webcam_sizes.
+ */
+static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
+ { 1, 1 },
+ { 1, 2 },
+ { 1, 4 },
+ { 1, 5 },
+ { 1, 10 },
+ { 1, 15 },
+ { 1, 25 },
+ { 1, 30 },
+ { 1, 50 },
+ { 1, 60 },
+};
+
+static int vid_cap_queue_setup(struct vb2_queue *vq,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned buffers = tpg_g_buffers(&dev->tpg);
+ unsigned h = dev->fmt_cap_rect.height;
+ unsigned p;
+
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
+ /*
+ * You cannot use read() with FIELD_ALTERNATE since the field
+ * information (TOP/BOTTOM) cannot be passed back to the user.
+ */
+ if (vb2_fileio_is_active(vq))
+ return -EINVAL;
+ }
+
+ if (dev->queue_setup_error) {
+ /*
+ * Error injection: test what happens if queue_setup() returns
+ * an error.
+ */
+ dev->queue_setup_error = false;
+ return -EINVAL;
+ }
+ if (*nplanes) {
+ /*
+ * Check if the number of requested planes match
+ * the number of buffers in the current format. You can't mix that.
+ */
+ if (*nplanes != buffers)
+ return -EINVAL;
+ for (p = 0; p < buffers; p++) {
+ if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
+ dev->fmt_cap->data_offset[p])
+ return -EINVAL;
+ }
+ } else {
+ for (p = 0; p < buffers; p++)
+ sizes[p] = tpg_g_line_width(&dev->tpg, p) * h +
+ dev->fmt_cap->data_offset[p];
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = buffers;
+
+ dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
+ for (p = 0; p < buffers; p++)
+ dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
+
+ return 0;
+}
+
+static int vid_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+ unsigned buffers = tpg_g_buffers(&dev->tpg);
+ unsigned p;
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (WARN_ON(NULL == dev->fmt_cap))
+ return -EINVAL;
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ for (p = 0; p < buffers; p++) {
+ size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height +
+ dev->fmt_cap->data_offset[p];
+
+ if (vb2_plane_size(vb, p) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
+ __func__, p, vb2_plane_size(vb, p), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, p, size);
+ vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
+ }
+
+ return 0;
+}
+
+static void vid_cap_buf_finish(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_timecode *tc = &vbuf->timecode;
+ unsigned fps = 25;
+ unsigned seq = vbuf->sequence;
+
+ if (!vivid_is_sdtv_cap(dev))
+ return;
+
+ /*
+ * Set the timecode. Rarely used, so it is interesting to
+ * test this.
+ */
+ vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
+ if (dev->std_cap & V4L2_STD_525_60)
+ fps = 30;
+ tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
+ tc->flags = 0;
+ tc->frames = seq % fps;
+ tc->seconds = (seq / fps) % 60;
+ tc->minutes = (seq / (60 * fps)) % 60;
+ tc->hours = (seq / (60 * 60 * fps)) % 24;
+}
+
+static void vid_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->vid_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned i;
+ int err;
+
+ if (vb2_is_streaming(&dev->vb_vid_out_q))
+ dev->can_loop_video = vivid_vid_can_loop(dev);
+
+ dev->vid_cap_seq_count = 0;
+ dprintk(dev, 1, "%s\n", __func__);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void vid_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming);
+ dev->can_loop_video = false;
+}
+
+const struct vb2_ops vivid_vid_cap_qops = {
+ .queue_setup = vid_cap_queue_setup,
+ .buf_prepare = vid_cap_buf_prepare,
+ .buf_finish = vid_cap_buf_finish,
+ .buf_queue = vid_cap_buf_queue,
+ .start_streaming = vid_cap_start_streaming,
+ .stop_streaming = vid_cap_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * Determine the 'picture' quality based on the current TV frequency: either
+ * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off
+ * signal or NOISE for no signal.
+ */
+void vivid_update_quality(struct vivid_dev *dev)
+{
+ unsigned freq_modulus;
+
+ if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) {
+ /*
+ * The 'noise' will only be replaced by the actual video
+ * if the output video matches the input video settings.
+ */
+ tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
+ return;
+ }
+ if (vivid_is_hdmi_cap(dev) && VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode)) {
+ tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
+ return;
+ }
+ if (vivid_is_sdtv_cap(dev) && VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
+ tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
+ return;
+ }
+ if (!vivid_is_tv_cap(dev)) {
+ tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
+ return;
+ }
+
+ /*
+ * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
+ * From +/- 0.25 MHz around the channel there is color, and from
+ * +/- 1 MHz there is grayscale (chroma is lost).
+ * Everywhere else it is just noise.
+ */
+ freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
+ if (freq_modulus > 2 * 16) {
+ tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE,
+ next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f);
+ return;
+ }
+ if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/)
+ tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0);
+ else
+ tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
+}
+
+/*
+ * Get the current picture quality and the associated afc value.
+ */
+static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
+{
+ unsigned freq_modulus;
+
+ if (afc)
+ *afc = 0;
+ if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR ||
+ tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE)
+ return tpg_g_quality(&dev->tpg);
+
+ /*
+ * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
+ * From +/- 0.25 MHz around the channel there is color, and from
+ * +/- 1 MHz there is grayscale (chroma is lost).
+ * Everywhere else it is just gray.
+ */
+ freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
+ if (afc)
+ *afc = freq_modulus - 1 * 16;
+ return TPG_QUAL_GRAY;
+}
+
+enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
+{
+ if (vivid_is_sdtv_cap(dev))
+ return dev->std_aspect_ratio;
+
+ if (vivid_is_hdmi_cap(dev))
+ return dev->dv_timings_aspect_ratio;
+
+ return TPG_VIDEO_ASPECT_IMAGE;
+}
+
+static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
+{
+ if (vivid_is_sdtv_cap(dev))
+ return (dev->std_cap & V4L2_STD_525_60) ?
+ TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
+
+ if (vivid_is_hdmi_cap(dev) &&
+ dev->src_rect.width == 720 && dev->src_rect.height <= 576)
+ return dev->src_rect.height == 480 ?
+ TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
+
+ return TPG_PIXEL_ASPECT_SQUARE;
+}
+
+/*
+ * Called whenever the format has to be reset which can occur when
+ * changing inputs, standard, timings, etc.
+ */
+void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
+{
+ struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
+ unsigned size;
+ u64 pixelclock;
+
+ switch (dev->input_type[dev->input]) {
+ case WEBCAM:
+ default:
+ dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width;
+ dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height;
+ dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx];
+ dev->field_cap = V4L2_FIELD_NONE;
+ tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
+ break;
+ case TV:
+ case SVID:
+ dev->field_cap = dev->tv_field_cap;
+ dev->src_rect.width = 720;
+ if (dev->std_cap & V4L2_STD_525_60) {
+ dev->src_rect.height = 480;
+ dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
+ dev->service_set_cap = V4L2_SLICED_CAPTION_525;
+ } else {
+ dev->src_rect.height = 576;
+ dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 };
+ dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ }
+ tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
+ break;
+ case HDMI:
+ dev->src_rect.width = bt->width;
+ dev->src_rect.height = bt->height;
+ size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
+ if (dev->reduced_fps && can_reduce_fps(bt)) {
+ pixelclock = div_u64(bt->pixelclock * 1000, 1001);
+ bt->flags |= V4L2_DV_FL_REDUCED_FPS;
+ } else {
+ pixelclock = bt->pixelclock;
+ bt->flags &= ~V4L2_DV_FL_REDUCED_FPS;
+ }
+ dev->timeperframe_vid_cap = (struct v4l2_fract) {
+ size / 100, (u32)pixelclock / 100
+ };
+ if (bt->interlaced)
+ dev->field_cap = V4L2_FIELD_ALTERNATE;
+ else
+ dev->field_cap = V4L2_FIELD_NONE;
+
+ /*
+ * We can be called from within s_ctrl, in that case we can't
+ * set/get controls. Luckily we don't need to in that case.
+ */
+ if (keep_controls || !dev->colorspace)
+ break;
+ if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
+ if (bt->width == 720 && bt->height <= 576)
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
+ else
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
+ v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
+ } else {
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
+ v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
+ }
+ tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
+ break;
+ }
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
+ vivid_update_quality(dev);
+ tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
+ dev->crop_cap = dev->src_rect;
+ dev->crop_bounds_cap = dev->src_rect;
+ dev->compose_cap = dev->crop_cap;
+ if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
+ dev->compose_cap.height /= 2;
+ dev->fmt_cap_rect = dev->compose_cap;
+ tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
+ tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
+ tpg_update_mv_step(&dev->tpg);
+}
+
+/* Map the field to something that is valid for the current input */
+static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field)
+{
+ if (vivid_is_sdtv_cap(dev)) {
+ switch (field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_ALTERNATE:
+ return field;
+ case V4L2_FIELD_INTERLACED:
+ default:
+ return V4L2_FIELD_INTERLACED;
+ }
+ }
+ if (vivid_is_hdmi_cap(dev))
+ return dev->dv_timings_cap.bt.interlaced ? V4L2_FIELD_ALTERNATE :
+ V4L2_FIELD_NONE;
+ return V4L2_FIELD_NONE;
+}
+
+static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_colorspace(&dev->tpg);
+ return dev->colorspace_out;
+}
+
+static unsigned vivid_xfer_func_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_xfer_func(&dev->tpg);
+ return dev->xfer_func_out;
+}
+
+static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_ycbcr_enc(&dev->tpg);
+ return dev->ycbcr_enc_out;
+}
+
+static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_hsv_enc(&dev->tpg);
+ return dev->hsv_enc_out;
+}
+
+static unsigned vivid_quantization_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_quantization(&dev->tpg);
+ return dev->quantization_out;
+}
+
+int vivid_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ unsigned p;
+
+ mp->width = dev->fmt_cap_rect.width;
+ mp->height = dev->fmt_cap_rect.height;
+ mp->field = dev->field_cap;
+ mp->pixelformat = dev->fmt_cap->fourcc;
+ mp->colorspace = vivid_colorspace_cap(dev);
+ mp->xfer_func = vivid_xfer_func_cap(dev);
+ if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV)
+ mp->hsv_enc = vivid_hsv_enc_cap(dev);
+ else
+ mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+ mp->quantization = vivid_quantization_cap(dev);
+ mp->num_planes = dev->fmt_cap->buffers;
+ for (p = 0; p < mp->num_planes; p++) {
+ mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
+ mp->plane_fmt[p].sizeimage =
+ tpg_g_line_width(&dev->tpg, p) * mp->height +
+ dev->fmt_cap->data_offset[p];
+ }
+ return 0;
+}
+
+int vivid_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct vivid_fmt *fmt;
+ unsigned bytesperline, max_bpl;
+ unsigned factor = 1;
+ unsigned w, h;
+ unsigned p;
+
+ fmt = vivid_get_format(dev, mp->pixelformat);
+ if (!fmt) {
+ dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
+ mp->pixelformat);
+ mp->pixelformat = V4L2_PIX_FMT_YUYV;
+ fmt = vivid_get_format(dev, mp->pixelformat);
+ }
+
+ mp->field = vivid_field_cap(dev, mp->field);
+ if (vivid_is_webcam(dev)) {
+ const struct v4l2_frmsize_discrete *sz =
+ v4l2_find_nearest_size(webcam_sizes,
+ VIVID_WEBCAM_SIZES, width,
+ height, mp->width, mp->height);
+
+ w = sz->width;
+ h = sz->height;
+ } else if (vivid_is_sdtv_cap(dev)) {
+ w = 720;
+ h = (dev->std_cap & V4L2_STD_525_60) ? 480 : 576;
+ } else {
+ w = dev->src_rect.width;
+ h = dev->src_rect.height;
+ }
+ if (V4L2_FIELD_HAS_T_OR_B(mp->field))
+ factor = 2;
+ if (vivid_is_webcam(dev) ||
+ (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) {
+ mp->width = w;
+ mp->height = h / factor;
+ } else {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
+
+ v4l2_rect_set_min_size(&r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&r, &vivid_max_rect);
+ if (dev->has_scaler_cap && !dev->has_compose_cap) {
+ struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
+
+ v4l2_rect_set_max_size(&r, &max_r);
+ } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
+ v4l2_rect_set_max_size(&r, &dev->src_rect);
+ } else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
+ v4l2_rect_set_min_size(&r, &dev->src_rect);
+ }
+ mp->width = r.width;
+ mp->height = r.height / factor;
+ }
+
+ /* This driver supports custom bytesperline values */
+
+ mp->num_planes = fmt->buffers;
+ for (p = 0; p < fmt->buffers; p++) {
+ /* Calculate the minimum supported bytesperline value */
+ bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
+ /* Calculate the maximum supported bytesperline value */
+ max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
+
+ if (pfmt[p].bytesperline > max_bpl)
+ pfmt[p].bytesperline = max_bpl;
+ if (pfmt[p].bytesperline < bytesperline)
+ pfmt[p].bytesperline = bytesperline;
+
+ pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
+ fmt->vdownsampling[p] + fmt->data_offset[p];
+
+ memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
+ }
+ for (p = fmt->buffers; p < fmt->planes; p++)
+ pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height *
+ (fmt->bit_depth[p] / fmt->vdownsampling[p])) /
+ (fmt->bit_depth[0] / fmt->vdownsampling[0]);
+
+ mp->colorspace = vivid_colorspace_cap(dev);
+ if (fmt->color_enc == TGP_COLOR_ENC_HSV)
+ mp->hsv_enc = vivid_hsv_enc_cap(dev);
+ else
+ mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+ mp->xfer_func = vivid_xfer_func_cap(dev);
+ mp->quantization = vivid_quantization_cap(dev);
+ memset(mp->reserved, 0, sizeof(mp->reserved));
+ return 0;
+}
+
+int vivid_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rect *crop = &dev->crop_cap;
+ struct v4l2_rect *compose = &dev->compose_cap;
+ struct vb2_queue *q = &dev->vb_vid_cap_q;
+ int ret = vivid_try_fmt_vid_cap(file, priv, f);
+ unsigned factor = 1;
+ unsigned p;
+ unsigned i;
+
+ if (ret < 0)
+ return ret;
+
+ if (vb2_is_busy(q)) {
+ dprintk(dev, 1, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) {
+ dprintk(dev, 1, "overlay is active, can't change pixelformat\n");
+ return -EBUSY;
+ }
+
+ dev->fmt_cap = vivid_get_format(dev, mp->pixelformat);
+ if (V4L2_FIELD_HAS_T_OR_B(mp->field))
+ factor = 2;
+
+ /* Note: the webcam input doesn't support scaling, cropping or composing */
+
+ if (!vivid_is_webcam(dev) &&
+ (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height };
+
+ if (dev->has_scaler_cap) {
+ if (dev->has_compose_cap)
+ v4l2_rect_map_inside(compose, &r);
+ else
+ *compose = r;
+ if (dev->has_crop_cap && !dev->has_compose_cap) {
+ struct v4l2_rect min_r = {
+ 0, 0,
+ r.width / MAX_ZOOM,
+ factor * r.height / MAX_ZOOM
+ };
+ struct v4l2_rect max_r = {
+ 0, 0,
+ r.width * MAX_ZOOM,
+ factor * r.height * MAX_ZOOM
+ };
+
+ v4l2_rect_set_min_size(crop, &min_r);
+ v4l2_rect_set_max_size(crop, &max_r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
+ } else if (dev->has_crop_cap) {
+ struct v4l2_rect min_r = {
+ 0, 0,
+ compose->width / MAX_ZOOM,
+ factor * compose->height / MAX_ZOOM
+ };
+ struct v4l2_rect max_r = {
+ 0, 0,
+ compose->width * MAX_ZOOM,
+ factor * compose->height * MAX_ZOOM
+ };
+
+ v4l2_rect_set_min_size(crop, &min_r);
+ v4l2_rect_set_max_size(crop, &max_r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
+ }
+ } else if (dev->has_crop_cap && !dev->has_compose_cap) {
+ r.height *= factor;
+ v4l2_rect_set_size_to(crop, &r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
+ r = *crop;
+ r.height /= factor;
+ v4l2_rect_set_size_to(compose, &r);
+ } else if (!dev->has_crop_cap) {
+ v4l2_rect_map_inside(compose, &r);
+ } else {
+ r.height *= factor;
+ v4l2_rect_set_max_size(crop, &r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
+ compose->top *= factor;
+ compose->height *= factor;
+ v4l2_rect_set_size_to(compose, crop);
+ v4l2_rect_map_inside(compose, &r);
+ compose->top /= factor;
+ compose->height /= factor;
+ }
+ } else if (vivid_is_webcam(dev)) {
+ /* Guaranteed to be a match */
+ for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
+ if (webcam_sizes[i].width == mp->width &&
+ webcam_sizes[i].height == mp->height)
+ break;
+ dev->webcam_size_idx = i;
+ if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i))
+ dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1;
+ vivid_update_format_cap(dev, false);
+ } else {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height };
+
+ v4l2_rect_set_size_to(compose, &r);
+ r.height *= factor;
+ v4l2_rect_set_size_to(crop, &r);
+ }
+
+ dev->fmt_cap_rect.width = mp->width;
+ dev->fmt_cap_rect.height = mp->height;
+ tpg_s_buf_height(&dev->tpg, mp->height);
+ tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
+ for (p = 0; p < tpg_g_buffers(&dev->tpg); p++)
+ tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline);
+ dev->field_cap = mp->field;
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true);
+ else
+ tpg_s_field(&dev->tpg, dev->field_cap, false);
+ tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
+ if (vivid_is_sdtv_cap(dev))
+ dev->tv_field_cap = mp->field;
+ tpg_update_mv_step(&dev->tpg);
+ return 0;
+}
+
+int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_g_fmt_vid_cap(file, priv, f);
+}
+
+int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_try_fmt_vid_cap(file, priv, f);
+}
+
+int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_s_fmt_vid_cap(file, priv, f);
+}
+
+int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap);
+}
+
+int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap);
+}
+
+int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap);
+}
+
+int vivid_vid_cap_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->has_crop_cap && !dev->has_compose_cap)
+ return -ENOTTY;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (vivid_is_webcam(dev))
+ return -ENODATA;
+
+ sel->r.left = sel->r.top = 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (!dev->has_crop_cap)
+ return -EINVAL;
+ sel->r = dev->crop_cap;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (!dev->has_crop_cap)
+ return -EINVAL;
+ sel->r = dev->src_rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (!dev->has_compose_cap)
+ return -EINVAL;
+ sel->r = vivid_max_rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (!dev->has_compose_cap)
+ return -EINVAL;
+ sel->r = dev->compose_cap;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ if (!dev->has_compose_cap)
+ return -EINVAL;
+ sel->r = dev->fmt_cap_rect;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rect *crop = &dev->crop_cap;
+ struct v4l2_rect *compose = &dev->compose_cap;
+ unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
+ int ret;
+
+ if (!dev->has_crop_cap && !dev->has_compose_cap)
+ return -ENOTTY;
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (vivid_is_webcam(dev))
+ return -ENODATA;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (!dev->has_crop_cap)
+ return -EINVAL;
+ ret = vivid_vid_adjust_sel(s->flags, &s->r);
+ if (ret)
+ return ret;
+ v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&s->r, &dev->src_rect);
+ v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap);
+ s->r.top /= factor;
+ s->r.height /= factor;
+ if (dev->has_scaler_cap) {
+ struct v4l2_rect fmt = dev->fmt_cap_rect;
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ s->r.width * MAX_ZOOM,
+ s->r.height * MAX_ZOOM
+ };
+ struct v4l2_rect min_rect = {
+ 0, 0,
+ s->r.width / MAX_ZOOM,
+ s->r.height / MAX_ZOOM
+ };
+
+ v4l2_rect_set_min_size(&fmt, &min_rect);
+ if (!dev->has_compose_cap)
+ v4l2_rect_set_max_size(&fmt, &max_rect);
+ if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
+ vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+ if (dev->has_compose_cap) {
+ v4l2_rect_set_min_size(compose, &min_rect);
+ v4l2_rect_set_max_size(compose, &max_rect);
+ }
+ dev->fmt_cap_rect = fmt;
+ tpg_s_buf_height(&dev->tpg, fmt.height);
+ } else if (dev->has_compose_cap) {
+ struct v4l2_rect fmt = dev->fmt_cap_rect;
+
+ v4l2_rect_set_min_size(&fmt, &s->r);
+ if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
+ vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+ dev->fmt_cap_rect = fmt;
+ tpg_s_buf_height(&dev->tpg, fmt.height);
+ v4l2_rect_set_size_to(compose, &s->r);
+ v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
+ } else {
+ if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) &&
+ vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+ v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r);
+ v4l2_rect_set_size_to(compose, &s->r);
+ v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
+ tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
+ }
+ s->r.top *= factor;
+ s->r.height *= factor;
+ *crop = s->r;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (!dev->has_compose_cap)
+ return -EINVAL;
+ ret = vivid_vid_adjust_sel(s->flags, &s->r);
+ if (ret)
+ return ret;
+ v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect);
+ if (dev->has_scaler_cap) {
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ dev->src_rect.width * MAX_ZOOM,
+ (dev->src_rect.height / factor) * MAX_ZOOM
+ };
+
+ v4l2_rect_set_max_size(&s->r, &max_rect);
+ if (dev->has_crop_cap) {
+ struct v4l2_rect min_rect = {
+ 0, 0,
+ s->r.width / MAX_ZOOM,
+ (s->r.height * factor) / MAX_ZOOM
+ };
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ s->r.width * MAX_ZOOM,
+ (s->r.height * factor) * MAX_ZOOM
+ };
+
+ v4l2_rect_set_min_size(crop, &min_rect);
+ v4l2_rect_set_max_size(crop, &max_rect);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
+ }
+ } else if (dev->has_crop_cap) {
+ s->r.top *= factor;
+ s->r.height *= factor;
+ v4l2_rect_set_max_size(&s->r, &dev->src_rect);
+ v4l2_rect_set_size_to(crop, &s->r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
+ s->r.top /= factor;
+ s->r.height /= factor;
+ } else {
+ v4l2_rect_set_size_to(&s->r, &dev->src_rect);
+ s->r.height /= factor;
+ }
+ v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
+ if (dev->bitmap_cap && (compose->width != s->r.width ||
+ compose->height != s->r.height)) {
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
+ }
+ *compose = s->r;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tpg_s_crop_compose(&dev->tpg, crop, compose);
+ return 0;
+}
+
+int vivid_vid_cap_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (vivid_get_pixel_aspect(dev)) {
+ case TPG_PIXEL_ASPECT_NTSC:
+ cap->pixelaspect.numerator = 11;
+ cap->pixelaspect.denominator = 10;
+ break;
+ case TPG_PIXEL_ASPECT_PAL:
+ cap->pixelaspect.numerator = 54;
+ cap->pixelaspect.denominator = 59;
+ break;
+ case TPG_PIXEL_ASPECT_SQUARE:
+ cap->pixelaspect.numerator = 1;
+ cap->pixelaspect.denominator = 1;
+ break;
+ }
+ return 0;
+}
+
+int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct vivid_fmt *fmt;
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+
+ if (f->index >= ARRAY_SIZE(formats_ovl))
+ return -EINVAL;
+
+ fmt = &formats_ovl[f->index];
+
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_cap;
+ struct v4l2_window *win = &f->fmt.win;
+ unsigned clipcount = win->clipcount;
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+
+ win->w.top = dev->overlay_cap_top;
+ win->w.left = dev->overlay_cap_left;
+ win->w.width = compose->width;
+ win->w.height = compose->height;
+ win->field = dev->overlay_cap_field;
+ win->clipcount = dev->clipcount_cap;
+ if (clipcount > dev->clipcount_cap)
+ clipcount = dev->clipcount_cap;
+ if (dev->bitmap_cap == NULL)
+ win->bitmap = NULL;
+ else if (win->bitmap) {
+ if (copy_to_user(win->bitmap, dev->bitmap_cap,
+ ((compose->width + 7) / 8) * compose->height))
+ return -EFAULT;
+ }
+ if (clipcount && win->clips) {
+ if (copy_to_user(win->clips, dev->clips_cap,
+ clipcount * sizeof(dev->clips_cap[0])))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_cap;
+ struct v4l2_window *win = &f->fmt.win;
+ int i, j;
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+
+ win->w.left = clamp_t(int, win->w.left,
+ -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
+ win->w.top = clamp_t(int, win->w.top,
+ -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
+ win->w.width = compose->width;
+ win->w.height = compose->height;
+ if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP)
+ win->field = V4L2_FIELD_ANY;
+ win->chromakey = 0;
+ win->global_alpha = 0;
+ if (win->clipcount && !win->clips)
+ win->clipcount = 0;
+ if (win->clipcount > MAX_CLIPS)
+ win->clipcount = MAX_CLIPS;
+ if (win->clipcount) {
+ if (copy_from_user(dev->try_clips_cap, win->clips,
+ win->clipcount * sizeof(dev->clips_cap[0])))
+ return -EFAULT;
+ for (i = 0; i < win->clipcount; i++) {
+ struct v4l2_rect *r = &dev->try_clips_cap[i].c;
+
+ r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1);
+ r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top);
+ r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1);
+ r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left);
+ }
+ /*
+ * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
+ * number and it's typically a one-time deal.
+ */
+ for (i = 0; i < win->clipcount - 1; i++) {
+ struct v4l2_rect *r1 = &dev->try_clips_cap[i].c;
+
+ for (j = i + 1; j < win->clipcount; j++) {
+ struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
+
+ if (v4l2_rect_overlap(r1, r2))
+ return -EINVAL;
+ }
+ }
+ if (copy_to_user(win->clips, dev->try_clips_cap,
+ win->clipcount * sizeof(dev->clips_cap[0])))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_cap;
+ struct v4l2_window *win = &f->fmt.win;
+ int ret = vidioc_try_fmt_vid_overlay(file, priv, f);
+ unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
+ unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]);
+ void *new_bitmap = NULL;
+
+ if (ret)
+ return ret;
+
+ if (win->bitmap) {
+ new_bitmap = vzalloc(bitmap_size);
+
+ if (new_bitmap == NULL)
+ return -ENOMEM;
+ if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
+ vfree(new_bitmap);
+ return -EFAULT;
+ }
+ }
+
+ dev->overlay_cap_top = win->w.top;
+ dev->overlay_cap_left = win->w.left;
+ dev->overlay_cap_field = win->field;
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = new_bitmap;
+ dev->clipcount_cap = win->clipcount;
+ if (dev->clipcount_cap)
+ memcpy(dev->clips_cap, dev->try_clips_cap, clips_size);
+ return 0;
+}
+
+int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+
+ if (i && dev->fb_vbase_cap == NULL)
+ return -EINVAL;
+
+ if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) {
+ dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n");
+ return -EINVAL;
+ }
+
+ if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh)
+ return -EBUSY;
+ dev->overlay_cap_owner = i ? fh : NULL;
+ return 0;
+}
+
+int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+
+ *a = dev->fb_cap;
+ a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
+ V4L2_FBUF_CAP_LIST_CLIPPING;
+ a->flags = V4L2_FBUF_FLAG_PRIMARY;
+ a->fmt.field = V4L2_FIELD_NONE;
+ a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
+ a->fmt.priv = 0;
+ return 0;
+}
+
+int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
+ const struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct vivid_fmt *fmt;
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (dev->overlay_cap_owner)
+ return -EBUSY;
+
+ if (a->base == NULL) {
+ dev->fb_cap.base = NULL;
+ dev->fb_vbase_cap = NULL;
+ return 0;
+ }
+
+ if (a->fmt.width < 48 || a->fmt.height < 32)
+ return -EINVAL;
+ fmt = vivid_get_format(dev, a->fmt.pixelformat);
+ if (!fmt || !fmt->can_do_overlay)
+ return -EINVAL;
+ if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
+ return -EINVAL;
+ if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
+ return -EINVAL;
+
+ dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
+ dev->fb_cap = *a;
+ dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left,
+ -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
+ dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top,
+ -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
+ return 0;
+}
+
+static const struct v4l2_audio vivid_audio_inputs[] = {
+ { 0, "TV", V4L2_AUDCAP_STEREO },
+ { 1, "Line-In", V4L2_AUDCAP_STEREO },
+};
+
+int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (inp->index >= dev->num_inputs)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ switch (dev->input_type[inp->index]) {
+ case WEBCAM:
+ snprintf(inp->name, sizeof(inp->name), "Webcam %u",
+ dev->input_name_counter[inp->index]);
+ inp->capabilities = 0;
+ break;
+ case TV:
+ snprintf(inp->name, sizeof(inp->name), "TV %u",
+ dev->input_name_counter[inp->index]);
+ inp->type = V4L2_INPUT_TYPE_TUNER;
+ inp->std = V4L2_STD_ALL;
+ if (dev->has_audio_inputs)
+ inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
+ inp->capabilities = V4L2_IN_CAP_STD;
+ break;
+ case SVID:
+ snprintf(inp->name, sizeof(inp->name), "S-Video %u",
+ dev->input_name_counter[inp->index]);
+ inp->std = V4L2_STD_ALL;
+ if (dev->has_audio_inputs)
+ inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
+ inp->capabilities = V4L2_IN_CAP_STD;
+ break;
+ case HDMI:
+ snprintf(inp->name, sizeof(inp->name), "HDMI %u",
+ dev->input_name_counter[inp->index]);
+ inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+ if (dev->edid_blocks == 0 ||
+ dev->dv_timings_signal_mode == NO_SIGNAL)
+ inp->status |= V4L2_IN_ST_NO_SIGNAL;
+ else if (dev->dv_timings_signal_mode == NO_LOCK ||
+ dev->dv_timings_signal_mode == OUT_OF_RANGE)
+ inp->status |= V4L2_IN_ST_NO_H_LOCK;
+ break;
+ }
+ if (dev->sensor_hflip)
+ inp->status |= V4L2_IN_ST_HFLIP;
+ if (dev->sensor_vflip)
+ inp->status |= V4L2_IN_ST_VFLIP;
+ if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
+ if (dev->std_signal_mode == NO_SIGNAL) {
+ inp->status |= V4L2_IN_ST_NO_SIGNAL;
+ } else if (dev->std_signal_mode == NO_LOCK) {
+ inp->status |= V4L2_IN_ST_NO_H_LOCK;
+ } else if (vivid_is_tv_cap(dev)) {
+ switch (tpg_g_quality(&dev->tpg)) {
+ case TPG_QUAL_GRAY:
+ inp->status |= V4L2_IN_ST_COLOR_KILL;
+ break;
+ case TPG_QUAL_NOISE:
+ inp->status |= V4L2_IN_ST_NO_H_LOCK;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+int vidioc_g_input(struct file *file, void *priv, unsigned *i)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ *i = dev->input;
+ return 0;
+}
+
+int vidioc_s_input(struct file *file, void *priv, unsigned i)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
+ unsigned brightness;
+
+ if (i >= dev->num_inputs)
+ return -EINVAL;
+
+ if (i == dev->input)
+ return 0;
+
+ if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
+ return -EBUSY;
+
+ dev->input = i;
+ dev->vid_cap_dev.tvnorms = 0;
+ if (dev->input_type[i] == TV || dev->input_type[i] == SVID) {
+ dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1;
+ dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
+ }
+ dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
+ vivid_update_format_cap(dev, false);
+
+ if (dev->colorspace) {
+ switch (dev->input_type[i]) {
+ case WEBCAM:
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
+ break;
+ case TV:
+ case SVID:
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
+ break;
+ case HDMI:
+ if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
+ if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
+ else
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
+ } else {
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
+ }
+ break;
+ }
+ }
+
+ /*
+ * Modify the brightness range depending on the input.
+ * This makes it easy to use vivid to test if applications can
+ * handle control range modifications and is also how this is
+ * typically used in practice as different inputs may be hooked
+ * up to different receivers with different control ranges.
+ */
+ brightness = 128 * i + dev->input_brightness[i];
+ v4l2_ctrl_modify_range(dev->brightness,
+ 128 * i, 255 + 128 * i, 1, 128 + 128 * i);
+ v4l2_ctrl_s_ctrl(dev->brightness, brightness);
+ return 0;
+}
+
+int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
+{
+ if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
+ return -EINVAL;
+ *vin = vivid_audio_inputs[vin->index];
+ return 0;
+}
+
+int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -EINVAL;
+ *vin = vivid_audio_inputs[dev->tv_audio_input];
+ return 0;
+}
+
+int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -EINVAL;
+ if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
+ return -EINVAL;
+ dev->tv_audio_input = vin->index;
+ return 0;
+}
+
+int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (vf->tuner != 0)
+ return -EINVAL;
+ vf->frequency = dev->tv_freq;
+ return 0;
+}
+
+int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (vf->tuner != 0)
+ return -EINVAL;
+ dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ);
+ if (vivid_is_tv_cap(dev))
+ vivid_update_quality(dev);
+ return 0;
+}
+
+int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (vt->index != 0)
+ return -EINVAL;
+ if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2)
+ return -EINVAL;
+ dev->tv_audmode = vt->audmode;
+ return 0;
+}
+
+int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ enum tpg_quality qual;
+
+ if (vt->index != 0)
+ return -EINVAL;
+
+ vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
+ vt->audmode = dev->tv_audmode;
+ vt->rangelow = MIN_TV_FREQ;
+ vt->rangehigh = MAX_TV_FREQ;
+ qual = vivid_get_quality(dev, &vt->afc);
+ if (qual == TPG_QUAL_COLOR)
+ vt->signal = 0xffff;
+ else if (qual == TPG_QUAL_GRAY)
+ vt->signal = 0x8000;
+ else
+ vt->signal = 0;
+ if (qual == TPG_QUAL_NOISE) {
+ vt->rxsubchans = 0;
+ } else if (qual == TPG_QUAL_GRAY) {
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ } else {
+ unsigned channel_nr = dev->tv_freq / (6 * 16);
+ unsigned options = (dev->std_cap & V4L2_STD_NTSC_M) ? 4 : 3;
+
+ switch (channel_nr % options) {
+ case 0:
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ break;
+ case 1:
+ vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ break;
+ case 2:
+ if (dev->std_cap & V4L2_STD_NTSC_M)
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
+ else
+ vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
+ break;
+ case 3:
+ vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP;
+ break;
+ }
+ }
+ strlcpy(vt->name, "TV Tuner", sizeof(vt->name));
+ return 0;
+}
+
+/* Must remain in sync with the vivid_ctrl_standard_strings array */
+const v4l2_std_id vivid_standard[] = {
+ V4L2_STD_NTSC_M,
+ V4L2_STD_NTSC_M_JP,
+ V4L2_STD_NTSC_M_KR,
+ V4L2_STD_NTSC_443,
+ V4L2_STD_PAL_BG | V4L2_STD_PAL_H,
+ V4L2_STD_PAL_I,
+ V4L2_STD_PAL_DK,
+ V4L2_STD_PAL_M,
+ V4L2_STD_PAL_N,
+ V4L2_STD_PAL_Nc,
+ V4L2_STD_PAL_60,
+ V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
+ V4L2_STD_SECAM_DK,
+ V4L2_STD_SECAM_L,
+ V4L2_STD_SECAM_LC,
+ V4L2_STD_UNKNOWN
+};
+
+/* Must remain in sync with the vivid_standard array */
+const char * const vivid_ctrl_standard_strings[] = {
+ "NTSC-M",
+ "NTSC-M-JP",
+ "NTSC-M-KR",
+ "NTSC-443",
+ "PAL-BGH",
+ "PAL-I",
+ "PAL-DK",
+ "PAL-M",
+ "PAL-N",
+ "PAL-Nc",
+ "PAL-60",
+ "SECAM-BGH",
+ "SECAM-DK",
+ "SECAM-L",
+ "SECAM-Lc",
+ NULL,
+};
+
+int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -ENODATA;
+ if (dev->std_signal_mode == NO_SIGNAL ||
+ dev->std_signal_mode == NO_LOCK) {
+ *id = V4L2_STD_UNKNOWN;
+ return 0;
+ }
+ if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
+ *id = V4L2_STD_UNKNOWN;
+ } else if (dev->std_signal_mode == CURRENT_STD) {
+ *id = dev->std_cap;
+ } else if (dev->std_signal_mode == SELECTED_STD) {
+ *id = dev->query_std;
+ } else {
+ *id = vivid_standard[dev->query_std_last];
+ dev->query_std_last = (dev->query_std_last + 1) % ARRAY_SIZE(vivid_standard);
+ }
+
+ return 0;
+}
+
+int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -ENODATA;
+ if (dev->std_cap == id)
+ return 0;
+ if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
+ return -EBUSY;
+ dev->std_cap = id;
+ vivid_update_format_cap(dev, false);
+ return 0;
+}
+
+static void find_aspect_ratio(u32 width, u32 height,
+ u32 *num, u32 *denom)
+{
+ if (!(height % 3) && ((height * 4 / 3) == width)) {
+ *num = 4;
+ *denom = 3;
+ } else if (!(height % 9) && ((height * 16 / 9) == width)) {
+ *num = 16;
+ *denom = 9;
+ } else if (!(height % 10) && ((height * 16 / 10) == width)) {
+ *num = 16;
+ *denom = 10;
+ } else if (!(height % 4) && ((height * 5 / 4) == width)) {
+ *num = 5;
+ *denom = 4;
+ } else if (!(height % 9) && ((height * 15 / 9) == width)) {
+ *num = 15;
+ *denom = 9;
+ } else { /* default to 16:9 */
+ *num = 16;
+ *denom = 9;
+ }
+}
+
+static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+ u32 total_h_pixel;
+ u32 total_v_lines;
+ u32 h_freq;
+
+ if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap,
+ NULL, NULL))
+ return false;
+
+ total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt);
+ total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
+
+ h_freq = (u32)bt->pixelclock / total_h_pixel;
+
+ if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
+ if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
+ bt->polarities, bt->interlaced, timings))
+ return true;
+ }
+
+ if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
+ struct v4l2_fract aspect_ratio;
+
+ find_aspect_ratio(bt->width, bt->height,
+ &aspect_ratio.numerator,
+ &aspect_ratio.denominator);
+ if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
+ bt->polarities, bt->interlaced,
+ aspect_ratio, timings))
+ return true;
+ }
+ return false;
+}
+
+int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
+ 0, NULL, NULL) &&
+ !valid_cvt_gtf_timings(timings))
+ return -EINVAL;
+
+ if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0, false))
+ return 0;
+ if (vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+
+ dev->dv_timings_cap = *timings;
+ vivid_update_format_cap(dev, false);
+ return 0;
+}
+
+int vidioc_query_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ if (dev->dv_timings_signal_mode == NO_SIGNAL ||
+ dev->edid_blocks == 0)
+ return -ENOLINK;
+ if (dev->dv_timings_signal_mode == NO_LOCK)
+ return -ENOLCK;
+ if (dev->dv_timings_signal_mode == OUT_OF_RANGE) {
+ timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
+ return -ERANGE;
+ }
+ if (dev->dv_timings_signal_mode == CURRENT_DV_TIMINGS) {
+ *timings = dev->dv_timings_cap;
+ } else if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS) {
+ *timings = v4l2_dv_timings_presets[dev->query_dv_timings];
+ } else {
+ *timings = v4l2_dv_timings_presets[dev->query_dv_timings_last];
+ dev->query_dv_timings_last = (dev->query_dv_timings_last + 1) %
+ dev->query_dv_timings_size;
+ }
+ return 0;
+}
+
+int vidioc_s_edid(struct file *file, void *_fh,
+ struct v4l2_edid *edid)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ u16 phys_addr;
+ unsigned int i;
+ int ret;
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ if (edid->pad >= dev->num_inputs)
+ return -EINVAL;
+ if (dev->input_type[edid->pad] != HDMI || edid->start_block)
+ return -EINVAL;
+ if (edid->blocks == 0) {
+ dev->edid_blocks = 0;
+ phys_addr = CEC_PHYS_ADDR_INVALID;
+ goto set_phys_addr;
+ }
+ if (edid->blocks > dev->edid_max_blocks) {
+ edid->blocks = dev->edid_max_blocks;
+ return -E2BIG;
+ }
+ phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
+ ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
+ if (ret)
+ return ret;
+
+ if (vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+
+ dev->edid_blocks = edid->blocks;
+ memcpy(dev->edid, edid->edid, edid->blocks * 128);
+
+set_phys_addr:
+ /* TODO: a proper hotplug detect cycle should be emulated here */
+ cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false);
+
+ for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
+ cec_s_phys_addr(dev->cec_tx_adap[i],
+ v4l2_phys_addr_for_input(phys_addr, i + 1),
+ false);
+ return 0;
+}
+
+int vidioc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev) && !dev->has_scaler_cap)
+ return -EINVAL;
+ if (vivid_get_format(dev, fsize->pixel_format) == NULL)
+ return -EINVAL;
+ if (vivid_is_webcam(dev)) {
+ if (fsize->index >= ARRAY_SIZE(webcam_sizes))
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete = webcam_sizes[fsize->index];
+ return 0;
+ }
+ if (fsize->index)
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = MIN_WIDTH;
+ fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.min_height = MIN_HEIGHT;
+ fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM;
+ fsize->stepwise.step_height = 2;
+ return 0;
+}
+
+/* timeperframe is arbitrary and continuous */
+int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct vivid_fmt *fmt;
+ int i;
+
+ fmt = vivid_get_format(dev, fival->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ if (!vivid_is_webcam(dev)) {
+ if (fival->index)
+ return -EINVAL;
+ if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
+ return -EINVAL;
+ if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
+ return -EINVAL;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = dev->timeperframe_vid_cap;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
+ if (fival->width == webcam_sizes[i].width &&
+ fival->height == webcam_sizes[i].height)
+ break;
+ if (i == ARRAY_SIZE(webcam_sizes))
+ return -EINVAL;
+ if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i))
+ return -EINVAL;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = webcam_intervals[fival->index];
+ return 0;
+}
+
+int vivid_vid_cap_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (parm->type != (dev->multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE))
+ return -EINVAL;
+
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe = dev->timeperframe_vid_cap;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
+
+#define FRACT_CMP(a, OP, b) \
+ ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator)
+
+int vivid_vid_cap_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx);
+ struct v4l2_fract tpf;
+ unsigned i;
+
+ if (parm->type != (dev->multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE))
+ return -EINVAL;
+ if (!vivid_is_webcam(dev))
+ return vivid_vid_cap_g_parm(file, priv, parm);
+
+ tpf = parm->parm.capture.timeperframe;
+
+ if (tpf.denominator == 0)
+ tpf = webcam_intervals[ival_sz - 1];
+ for (i = 0; i < ival_sz; i++)
+ if (FRACT_CMP(tpf, >=, webcam_intervals[i]))
+ break;
+ if (i == ival_sz)
+ i = ival_sz - 1;
+ dev->webcam_ival_idx = i;
+ tpf = webcam_intervals[dev->webcam_ival_idx];
+ tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
+ tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
+
+ /* resync the thread's timings */
+ dev->cap_seq_resync = true;
+ dev->timeperframe_vid_cap = tpf;
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe = tpf;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.h b/drivers/media/platform/vivid/vivid-vid-cap.h
new file mode 100644
index 000000000..47d8b4882
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-cap.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-vid-cap.h - video capture support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_VID_CAP_H_
+#define _VIVID_VID_CAP_H_
+
+void vivid_update_quality(struct vivid_dev *dev);
+void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls);
+enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev);
+
+extern const v4l2_std_id vivid_standard[];
+extern const char * const vivid_ctrl_standard_strings[];
+
+extern const struct vb2_ops vivid_vid_cap_qops;
+
+int vivid_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_vid_cap_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
+int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
+int vivid_vid_cap_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cap);
+int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i);
+int vivid_vid_cap_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a);
+int vivid_vid_cap_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a);
+int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp);
+int vidioc_g_input(struct file *file, void *priv, unsigned *i);
+int vidioc_s_input(struct file *file, void *priv, unsigned i);
+int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin);
+int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin);
+int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin);
+int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
+int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
+int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
+int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
+int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id);
+int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id);
+int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vidioc_query_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vidioc_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid);
+int vidioc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize);
+int vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fival);
+int vivid_vid_cap_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
+int vivid_vid_cap_s_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
new file mode 100644
index 000000000..e108e9bef
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-vid-common.c - common video support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+
+const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
+};
+
+/* ------------------------------------------------------------------
+ Basic structures
+ ------------------------------------------------------------------*/
+
+struct vivid_fmt vivid_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 1,
+ .buffers = 1,
+ .data_offset = { PLANE0_DATA_OFFSET },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 4, 4 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 3,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .vdownsampling = { 1, 2, 2 },
+ .bit_depth = { 8, 4, 4 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 3,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .vdownsampling = { 1, 2, 2 },
+ .bit_depth = { 8, 4, 4 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 3,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .vdownsampling = { 1, 2 },
+ .bit_depth = { 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .vdownsampling = { 1, 2 },
+ .bit_depth = { 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV24,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 16 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV42,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 16 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV555, /* uuuvvvvv ayyyyyuu */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x8000,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV565, /* uuuvvvvv yyyyyuuu */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV444, /* uuuuvvvv aaaayyyy */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0xf000,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV32, /* ayuv */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x000000ff,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y12,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y16_BE,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB332, /* rrrgggbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB444, /* xxxxrrrr ggggbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB444, /* xxxxrrrr ggggbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444, /* aaaarrrr ggggbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x00f0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb xrrrrrgg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb xrrrrrgg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ .alpha_mask = 0x8000,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB555X, /* xrrrrrgg gggbbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB555X, /* xrrrrrgg gggbbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555X, /* arrrrrgg gggbbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x0080,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 24 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
+ .vdownsampling = { 1 },
+ .bit_depth = { 24 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR666, /* bbbbbbgg ggggrrrr rrxxxxxx */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB32, /* xrgb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR32, /* bgrx */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB32, /* xrgb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32, /* bgrx */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32, /* argb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x000000ff,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32, /* bgra */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0xff000000,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8, /* Bayer BG/GR */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8, /* Bayer GB/RG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8, /* Bayer GR/BG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8, /* Bayer RG/GB */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10, /* Bayer BG/GR */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10, /* Bayer GB/RG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10, /* Bayer GR/BG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10, /* Bayer RG/GB */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12, /* Bayer BG/GR */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12, /* Bayer GB/RG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12, /* Bayer GR/BG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12, /* Bayer RG/GB */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HSV24, /* HSV 24bits */
+ .color_enc = TGP_COLOR_ENC_HSV,
+ .vdownsampling = { 1 },
+ .bit_depth = { 24 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HSV32, /* HSV 32bits */
+ .color_enc = TGP_COLOR_ENC_HSV,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+
+ /* Multiplanar formats */
+
+ {
+ .fourcc = V4L2_PIX_FMT_NV16M,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 2,
+ .data_offset = { PLANE0_DATA_OFFSET, 0 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV61M,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 2,
+ .data_offset = { 0, PLANE0_DATA_OFFSET },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .vdownsampling = { 1, 2, 2 },
+ .bit_depth = { 8, 4, 4 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420M,
+ .vdownsampling = { 1, 2, 2 },
+ .bit_depth = { 8, 4, 4 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .vdownsampling = { 1, 2 },
+ .bit_depth = { 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .vdownsampling = { 1, 2 },
+ .bit_depth = { 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 2,
+ .buffers = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 4, 4 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU422M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 4, 4 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV444M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU444M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 8, 8 },
+ .color_enc = TGP_COLOR_ENC_YCBCR,
+ .planes = 3,
+ .buffers = 3,
+ },
+};
+
+/* There are this many multiplanar formats in the list */
+#define VIVID_MPLANAR_FORMATS 10
+
+const struct vivid_fmt *vivid_get_format(struct vivid_dev *dev, u32 pixelformat)
+{
+ const struct vivid_fmt *fmt;
+ unsigned k;
+
+ for (k = 0; k < ARRAY_SIZE(vivid_formats); k++) {
+ fmt = &vivid_formats[k];
+ if (fmt->fourcc == pixelformat)
+ if (fmt->buffers == 1 || dev->multiplanar)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+bool vivid_vid_can_loop(struct vivid_dev *dev)
+{
+ if (dev->src_rect.width != dev->sink_rect.width ||
+ dev->src_rect.height != dev->sink_rect.height)
+ return false;
+ if (dev->fmt_cap->fourcc != dev->fmt_out->fourcc)
+ return false;
+ if (dev->field_cap != dev->field_out)
+ return false;
+ /*
+ * While this can be supported, it is just too much work
+ * to actually implement.
+ */
+ if (dev->field_cap == V4L2_FIELD_SEQ_TB ||
+ dev->field_cap == V4L2_FIELD_SEQ_BT)
+ return false;
+ if (vivid_is_svid_cap(dev) && vivid_is_svid_out(dev)) {
+ if (!(dev->std_cap & V4L2_STD_525_60) !=
+ !(dev->std_out & V4L2_STD_525_60))
+ return false;
+ return true;
+ }
+ if (vivid_is_hdmi_cap(dev) && vivid_is_hdmi_out(dev))
+ return true;
+ return false;
+}
+
+void vivid_send_source_change(struct vivid_dev *dev, unsigned type)
+{
+ struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+ unsigned i;
+
+ for (i = 0; i < dev->num_inputs; i++) {
+ ev.id = i;
+ if (dev->input_type[i] == type) {
+ if (video_is_registered(&dev->vid_cap_dev) && dev->has_vid_cap)
+ v4l2_event_queue(&dev->vid_cap_dev, &ev);
+ if (video_is_registered(&dev->vbi_cap_dev) && dev->has_vbi_cap)
+ v4l2_event_queue(&dev->vbi_cap_dev, &ev);
+ }
+ }
+}
+
+/*
+ * Conversion function that converts a single-planar format to a
+ * single-plane multiplanar format.
+ */
+void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt)
+{
+ struct v4l2_pix_format_mplane *mp = &mp_fmt->fmt.pix_mp;
+ struct v4l2_plane_pix_format *ppix = &mp->plane_fmt[0];
+ const struct v4l2_pix_format *pix = &sp_fmt->fmt.pix;
+ bool is_out = sp_fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ memset(mp->reserved, 0, sizeof(mp->reserved));
+ mp_fmt->type = is_out ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ mp->width = pix->width;
+ mp->height = pix->height;
+ mp->pixelformat = pix->pixelformat;
+ mp->field = pix->field;
+ mp->colorspace = pix->colorspace;
+ mp->xfer_func = pix->xfer_func;
+ /* Also copies hsv_enc */
+ mp->ycbcr_enc = pix->ycbcr_enc;
+ mp->quantization = pix->quantization;
+ mp->num_planes = 1;
+ mp->flags = pix->flags;
+ ppix->sizeimage = pix->sizeimage;
+ ppix->bytesperline = pix->bytesperline;
+ memset(ppix->reserved, 0, sizeof(ppix->reserved));
+}
+
+int fmt_sp2mp_func(struct file *file, void *priv,
+ struct v4l2_format *f, fmtfunc func)
+{
+ struct v4l2_format fmt;
+ struct v4l2_pix_format_mplane *mp = &fmt.fmt.pix_mp;
+ struct v4l2_plane_pix_format *ppix = &mp->plane_fmt[0];
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ /* Converts to a mplane format */
+ fmt_sp2mp(f, &fmt);
+ /* Passes it to the generic mplane format function */
+ ret = func(file, priv, &fmt);
+ /* Copies back the mplane data to the single plane format */
+ pix->width = mp->width;
+ pix->height = mp->height;
+ pix->pixelformat = mp->pixelformat;
+ pix->field = mp->field;
+ pix->colorspace = mp->colorspace;
+ pix->xfer_func = mp->xfer_func;
+ /* Also copies hsv_enc */
+ pix->ycbcr_enc = mp->ycbcr_enc;
+ pix->quantization = mp->quantization;
+ pix->sizeimage = ppix->sizeimage;
+ pix->bytesperline = ppix->bytesperline;
+ pix->flags = mp->flags;
+ return ret;
+}
+
+int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r)
+{
+ unsigned w = r->width;
+ unsigned h = r->height;
+
+ /* sanitize w and h in case someone passes ~0 as the value */
+ w &= 0xffff;
+ h &= 0xffff;
+ if (!(flags & V4L2_SEL_FLAG_LE)) {
+ w++;
+ h++;
+ if (w < 2)
+ w = 2;
+ if (h < 2)
+ h = 2;
+ }
+ if (!(flags & V4L2_SEL_FLAG_GE)) {
+ if (w > MAX_WIDTH)
+ w = MAX_WIDTH;
+ if (h > MAX_HEIGHT)
+ h = MAX_HEIGHT;
+ }
+ w = w & ~1;
+ h = h & ~1;
+ if (w < 2 || h < 2)
+ return -ERANGE;
+ if (w > MAX_WIDTH || h > MAX_HEIGHT)
+ return -ERANGE;
+ if (r->top < 0)
+ r->top = 0;
+ if (r->left < 0)
+ r->left = 0;
+ /* sanitize left and top in case someone passes ~0 as the value */
+ r->left &= 0xfffe;
+ r->top &= 0xfffe;
+ if (r->left + w > MAX_WIDTH)
+ r->left = MAX_WIDTH - w;
+ if (r->top + h > MAX_HEIGHT)
+ r->top = MAX_HEIGHT - h;
+ if ((flags & (V4L2_SEL_FLAG_GE | V4L2_SEL_FLAG_LE)) ==
+ (V4L2_SEL_FLAG_GE | V4L2_SEL_FLAG_LE) &&
+ (r->width != w || r->height != h))
+ return -ERANGE;
+ r->width = w;
+ r->height = h;
+ return 0;
+}
+
+int vivid_enum_fmt_vid(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct vivid_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(vivid_formats) -
+ (dev->multiplanar ? 0 : VIVID_MPLANAR_FORMATS))
+ return -EINVAL;
+
+ fmt = &vivid_formats[f->index];
+
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+int vidioc_enum_fmt_vid_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_enum_fmt_vid(file, priv, f);
+}
+
+int vidioc_enum_fmt_vid(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return vivid_enum_fmt_vid(file, priv, f);
+}
+
+int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (!vivid_is_sdtv_cap(dev))
+ return -ENODATA;
+ *id = dev->std_cap;
+ } else {
+ if (!vivid_is_svid_out(dev))
+ return -ENODATA;
+ *id = dev->std_out;
+ }
+ return 0;
+}
+
+int vidioc_g_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ *timings = dev->dv_timings_cap;
+ } else {
+ if (!vivid_is_hdmi_out(dev))
+ return -ENODATA;
+ *timings = dev->dv_timings_out;
+ }
+ return 0;
+}
+
+int vidioc_enum_dv_timings(struct file *file, void *_fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ } else {
+ if (!vivid_is_hdmi_out(dev))
+ return -ENODATA;
+ }
+ return v4l2_enum_dv_timings_cap(timings, &vivid_dv_timings_cap,
+ NULL, NULL);
+}
+
+int vidioc_dv_timings_cap(struct file *file, void *_fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ } else {
+ if (!vivid_is_hdmi_out(dev))
+ return -ENODATA;
+ }
+ *cap = vivid_dv_timings_cap;
+ return 0;
+}
+
+int vidioc_g_edid(struct file *file, void *_fh,
+ struct v4l2_edid *edid)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+ struct cec_adapter *adap;
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (edid->pad >= dev->num_inputs)
+ return -EINVAL;
+ if (dev->input_type[edid->pad] != HDMI)
+ return -EINVAL;
+ adap = dev->cec_rx_adap;
+ } else {
+ unsigned int bus_idx;
+
+ if (edid->pad >= dev->num_outputs)
+ return -EINVAL;
+ if (dev->output_type[edid->pad] != HDMI)
+ return -EINVAL;
+ bus_idx = dev->cec_output2bus_map[edid->pad];
+ adap = dev->cec_tx_adap[bus_idx];
+ }
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = dev->edid_blocks;
+ return 0;
+ }
+ if (dev->edid_blocks == 0)
+ return -ENODATA;
+ if (edid->start_block >= dev->edid_blocks)
+ return -EINVAL;
+ if (edid->blocks > dev->edid_blocks - edid->start_block)
+ edid->blocks = dev->edid_blocks - edid->start_block;
+ if (adap)
+ v4l2_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
+ memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128);
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-vid-common.h b/drivers/media/platform/vivid/vivid-vid-common.h
new file mode 100644
index 000000000..29b6c0b40
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-common.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-vid-common.h - common video support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_VID_COMMON_H_
+#define _VIVID_VID_COMMON_H_
+
+typedef int (*fmtfunc)(struct file *file, void *priv, struct v4l2_format *f);
+
+/*
+ * Conversion function that converts a single-planar format to a
+ * single-plane multiplanar format.
+ */
+void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt);
+int fmt_sp2mp_func(struct file *file, void *priv,
+ struct v4l2_format *f, fmtfunc func);
+
+extern const struct v4l2_dv_timings_cap vivid_dv_timings_cap;
+
+const struct vivid_fmt *vivid_get_format(struct vivid_dev *dev, u32 pixelformat);
+
+bool vivid_vid_can_loop(struct vivid_dev *dev);
+void vivid_send_source_change(struct vivid_dev *dev, unsigned type);
+
+int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r);
+
+int vivid_enum_fmt_vid(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_enum_fmt_vid_mplane(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_enum_fmt_vid(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id);
+int vidioc_g_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vidioc_enum_dv_timings(struct file *file, void *_fh, struct v4l2_enum_dv_timings *timings);
+int vidioc_dv_timings_cap(struct file *file, void *_fh, struct v4l2_dv_timings_cap *cap);
+int vidioc_g_edid(struct file *file, void *_fh, struct v4l2_edid *edid);
+int vidioc_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
new file mode 100644
index 000000000..ecd9e36ef
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -0,0 +1,1174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-vid-out.c - video output support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-rect.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-kthread-out.h"
+#include "vivid-vid-out.h"
+
+static int vid_out_queue_setup(struct vb2_queue *vq,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ const struct vivid_fmt *vfmt = dev->fmt_out;
+ unsigned planes = vfmt->buffers;
+ unsigned h = dev->fmt_out_rect.height;
+ unsigned size = dev->bytesperline_out[0] * h;
+ unsigned p;
+
+ for (p = vfmt->buffers; p < vfmt->planes; p++)
+ size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
+
+ if (dev->field_out == V4L2_FIELD_ALTERNATE) {
+ /*
+ * You cannot use write() with FIELD_ALTERNATE since the field
+ * information (TOP/BOTTOM) cannot be passed to the kernel.
+ */
+ if (vb2_fileio_is_active(vq))
+ return -EINVAL;
+ }
+
+ if (dev->queue_setup_error) {
+ /*
+ * Error injection: test what happens if queue_setup() returns
+ * an error.
+ */
+ dev->queue_setup_error = false;
+ return -EINVAL;
+ }
+
+ if (*nplanes) {
+ /*
+ * Check if the number of requested planes match
+ * the number of planes in the current format. You can't mix that.
+ */
+ if (*nplanes != planes)
+ return -EINVAL;
+ if (sizes[0] < size)
+ return -EINVAL;
+ for (p = 1; p < planes; p++) {
+ if (sizes[p] < dev->bytesperline_out[p] * h)
+ return -EINVAL;
+ }
+ } else {
+ for (p = 0; p < planes; p++)
+ sizes[p] = p ? dev->bytesperline_out[p] * h : size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = planes;
+
+ dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
+ for (p = 0; p < planes; p++)
+ dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
+ return 0;
+}
+
+static int vid_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+ unsigned planes;
+ unsigned p;
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (WARN_ON(NULL == dev->fmt_out))
+ return -EINVAL;
+
+ planes = dev->fmt_out->planes;
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+
+ if (dev->field_out != V4L2_FIELD_ALTERNATE)
+ vbuf->field = dev->field_out;
+ else if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM)
+ return -EINVAL;
+
+ for (p = 0; p < planes; p++) {
+ size = dev->bytesperline_out[p] * dev->fmt_out_rect.height +
+ vb->planes[p].data_offset;
+
+ if (vb2_get_plane_payload(vb, p) < size) {
+ dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n",
+ __func__, p, vb2_get_plane_payload(vb, p), size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void vid_out_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->vid_out_active);
+ spin_unlock(&dev->slock);
+}
+
+static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ if (vb2_is_streaming(&dev->vb_vid_cap_q))
+ dev->can_loop_video = vivid_vid_can_loop(dev);
+
+ dev->vid_out_seq_count = 0;
+ dprintk(dev, 1, "%s\n", __func__);
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_out(dev, &dev->vid_out_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void vid_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_out(dev, &dev->vid_out_streaming);
+ dev->can_loop_video = false;
+}
+
+const struct vb2_ops vivid_vid_out_qops = {
+ .queue_setup = vid_out_queue_setup,
+ .buf_prepare = vid_out_buf_prepare,
+ .buf_queue = vid_out_buf_queue,
+ .start_streaming = vid_out_start_streaming,
+ .stop_streaming = vid_out_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * Called whenever the format has to be reset which can occur when
+ * changing outputs, standard, timings, etc.
+ */
+void vivid_update_format_out(struct vivid_dev *dev)
+{
+ struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt;
+ unsigned size, p;
+ u64 pixelclock;
+
+ switch (dev->output_type[dev->output]) {
+ case SVID:
+ default:
+ dev->field_out = dev->tv_field_out;
+ dev->sink_rect.width = 720;
+ if (dev->std_out & V4L2_STD_525_60) {
+ dev->sink_rect.height = 480;
+ dev->timeperframe_vid_out = (struct v4l2_fract) { 1001, 30000 };
+ dev->service_set_out = V4L2_SLICED_CAPTION_525;
+ } else {
+ dev->sink_rect.height = 576;
+ dev->timeperframe_vid_out = (struct v4l2_fract) { 1000, 25000 };
+ dev->service_set_out = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ }
+ dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
+ break;
+ case HDMI:
+ dev->sink_rect.width = bt->width;
+ dev->sink_rect.height = bt->height;
+ size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
+
+ if (can_reduce_fps(bt) && (bt->flags & V4L2_DV_FL_REDUCED_FPS))
+ pixelclock = div_u64(bt->pixelclock * 1000, 1001);
+ else
+ pixelclock = bt->pixelclock;
+
+ dev->timeperframe_vid_out = (struct v4l2_fract) {
+ size / 100, (u32)pixelclock / 100
+ };
+ if (bt->interlaced)
+ dev->field_out = V4L2_FIELD_ALTERNATE;
+ else
+ dev->field_out = V4L2_FIELD_NONE;
+ if (!dev->dvi_d_out && (bt->flags & V4L2_DV_FL_IS_CE_VIDEO)) {
+ if (bt->width == 720 && bt->height <= 576)
+ dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
+ else
+ dev->colorspace_out = V4L2_COLORSPACE_REC709;
+ } else {
+ dev->colorspace_out = V4L2_COLORSPACE_SRGB;
+ }
+ break;
+ }
+ dev->xfer_func_out = V4L2_XFER_FUNC_DEFAULT;
+ dev->ycbcr_enc_out = V4L2_YCBCR_ENC_DEFAULT;
+ dev->hsv_enc_out = V4L2_HSV_ENC_180;
+ dev->quantization_out = V4L2_QUANTIZATION_DEFAULT;
+ dev->compose_out = dev->sink_rect;
+ dev->compose_bounds_out = dev->sink_rect;
+ dev->crop_out = dev->compose_out;
+ if (V4L2_FIELD_HAS_T_OR_B(dev->field_out))
+ dev->crop_out.height /= 2;
+ dev->fmt_out_rect = dev->crop_out;
+ for (p = 0; p < dev->fmt_out->planes; p++)
+ dev->bytesperline_out[p] =
+ (dev->sink_rect.width * dev->fmt_out->bit_depth[p]) / 8;
+}
+
+/* Map the field to something that is valid for the current output */
+static enum v4l2_field vivid_field_out(struct vivid_dev *dev, enum v4l2_field field)
+{
+ if (vivid_is_svid_out(dev)) {
+ switch (field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_ALTERNATE:
+ return field;
+ case V4L2_FIELD_INTERLACED:
+ default:
+ return V4L2_FIELD_INTERLACED;
+ }
+ }
+ if (vivid_is_hdmi_out(dev))
+ return dev->dv_timings_out.bt.interlaced ? V4L2_FIELD_ALTERNATE :
+ V4L2_FIELD_NONE;
+ return V4L2_FIELD_NONE;
+}
+
+static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
+{
+ if (vivid_is_svid_out(dev))
+ return (dev->std_out & V4L2_STD_525_60) ?
+ TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
+
+ if (vivid_is_hdmi_out(dev) &&
+ dev->sink_rect.width == 720 && dev->sink_rect.height <= 576)
+ return dev->sink_rect.height == 480 ?
+ TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
+
+ return TPG_PIXEL_ASPECT_SQUARE;
+}
+
+int vivid_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ const struct vivid_fmt *fmt = dev->fmt_out;
+ unsigned p;
+
+ mp->width = dev->fmt_out_rect.width;
+ mp->height = dev->fmt_out_rect.height;
+ mp->field = dev->field_out;
+ mp->pixelformat = fmt->fourcc;
+ mp->colorspace = dev->colorspace_out;
+ mp->xfer_func = dev->xfer_func_out;
+ mp->ycbcr_enc = dev->ycbcr_enc_out;
+ mp->quantization = dev->quantization_out;
+ mp->num_planes = fmt->buffers;
+ for (p = 0; p < mp->num_planes; p++) {
+ mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
+ mp->plane_fmt[p].sizeimage =
+ mp->plane_fmt[p].bytesperline * mp->height;
+ }
+ for (p = fmt->buffers; p < fmt->planes; p++) {
+ unsigned stride = dev->bytesperline_out[p];
+
+ mp->plane_fmt[0].sizeimage +=
+ (stride * mp->height) / fmt->vdownsampling[p];
+ }
+ return 0;
+}
+
+int vivid_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt;
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
+ const struct vivid_fmt *fmt;
+ unsigned bytesperline, max_bpl;
+ unsigned factor = 1;
+ unsigned w, h;
+ unsigned p;
+
+ fmt = vivid_get_format(dev, mp->pixelformat);
+ if (!fmt) {
+ dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
+ mp->pixelformat);
+ mp->pixelformat = V4L2_PIX_FMT_YUYV;
+ fmt = vivid_get_format(dev, mp->pixelformat);
+ }
+
+ mp->field = vivid_field_out(dev, mp->field);
+ if (vivid_is_svid_out(dev)) {
+ w = 720;
+ h = (dev->std_out & V4L2_STD_525_60) ? 480 : 576;
+ } else {
+ w = dev->sink_rect.width;
+ h = dev->sink_rect.height;
+ }
+ if (V4L2_FIELD_HAS_T_OR_B(mp->field))
+ factor = 2;
+ if (!dev->has_scaler_out && !dev->has_crop_out && !dev->has_compose_out) {
+ mp->width = w;
+ mp->height = h / factor;
+ } else {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
+
+ v4l2_rect_set_min_size(&r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&r, &vivid_max_rect);
+ if (dev->has_scaler_out && !dev->has_crop_out) {
+ struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
+
+ v4l2_rect_set_max_size(&r, &max_r);
+ } else if (!dev->has_scaler_out && dev->has_compose_out && !dev->has_crop_out) {
+ v4l2_rect_set_max_size(&r, &dev->sink_rect);
+ } else if (!dev->has_scaler_out && !dev->has_compose_out) {
+ v4l2_rect_set_min_size(&r, &dev->sink_rect);
+ }
+ mp->width = r.width;
+ mp->height = r.height / factor;
+ }
+
+ /* This driver supports custom bytesperline values */
+
+ mp->num_planes = fmt->buffers;
+ for (p = 0; p < fmt->buffers; p++) {
+ /* Calculate the minimum supported bytesperline value */
+ bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
+ /* Calculate the maximum supported bytesperline value */
+ max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
+
+ if (pfmt[p].bytesperline > max_bpl)
+ pfmt[p].bytesperline = max_bpl;
+ if (pfmt[p].bytesperline < bytesperline)
+ pfmt[p].bytesperline = bytesperline;
+
+ pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
+ fmt->vdownsampling[p];
+
+ memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
+ }
+ for (p = fmt->buffers; p < fmt->planes; p++)
+ pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height *
+ (fmt->bit_depth[p] / fmt->vdownsampling[p])) /
+ (fmt->bit_depth[0] / fmt->vdownsampling[0]);
+
+ mp->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ mp->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mp->quantization = V4L2_QUANTIZATION_DEFAULT;
+ if (vivid_is_svid_out(dev)) {
+ mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ } else if (dev->dvi_d_out || !(bt->flags & V4L2_DV_FL_IS_CE_VIDEO)) {
+ mp->colorspace = V4L2_COLORSPACE_SRGB;
+ if (dev->dvi_d_out)
+ mp->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ } else if (bt->width == 720 && bt->height <= 576) {
+ mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ } else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M &&
+ mp->colorspace != V4L2_COLORSPACE_REC709 &&
+ mp->colorspace != V4L2_COLORSPACE_OPRGB &&
+ mp->colorspace != V4L2_COLORSPACE_BT2020 &&
+ mp->colorspace != V4L2_COLORSPACE_SRGB) {
+ mp->colorspace = V4L2_COLORSPACE_REC709;
+ }
+ memset(mp->reserved, 0, sizeof(mp->reserved));
+ return 0;
+}
+
+int vivid_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rect *crop = &dev->crop_out;
+ struct v4l2_rect *compose = &dev->compose_out;
+ struct vb2_queue *q = &dev->vb_vid_out_q;
+ int ret = vivid_try_fmt_vid_out(file, priv, f);
+ unsigned factor = 1;
+ unsigned p;
+
+ if (ret < 0)
+ return ret;
+
+ if (vb2_is_busy(q) &&
+ (vivid_is_svid_out(dev) ||
+ mp->width != dev->fmt_out_rect.width ||
+ mp->height != dev->fmt_out_rect.height ||
+ mp->pixelformat != dev->fmt_out->fourcc ||
+ mp->field != dev->field_out)) {
+ dprintk(dev, 1, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * Allow for changing the colorspace on the fly. Useful for testing
+ * purposes, and it is something that HDMI transmitters are able
+ * to do.
+ */
+ if (vb2_is_busy(q))
+ goto set_colorspace;
+
+ dev->fmt_out = vivid_get_format(dev, mp->pixelformat);
+ if (V4L2_FIELD_HAS_T_OR_B(mp->field))
+ factor = 2;
+
+ if (dev->has_scaler_out || dev->has_crop_out || dev->has_compose_out) {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height };
+
+ if (dev->has_scaler_out) {
+ if (dev->has_crop_out)
+ v4l2_rect_map_inside(crop, &r);
+ else
+ *crop = r;
+ if (dev->has_compose_out && !dev->has_crop_out) {
+ struct v4l2_rect min_r = {
+ 0, 0,
+ r.width / MAX_ZOOM,
+ factor * r.height / MAX_ZOOM
+ };
+ struct v4l2_rect max_r = {
+ 0, 0,
+ r.width * MAX_ZOOM,
+ factor * r.height * MAX_ZOOM
+ };
+
+ v4l2_rect_set_min_size(compose, &min_r);
+ v4l2_rect_set_max_size(compose, &max_r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
+ } else if (dev->has_compose_out) {
+ struct v4l2_rect min_r = {
+ 0, 0,
+ crop->width / MAX_ZOOM,
+ factor * crop->height / MAX_ZOOM
+ };
+ struct v4l2_rect max_r = {
+ 0, 0,
+ crop->width * MAX_ZOOM,
+ factor * crop->height * MAX_ZOOM
+ };
+
+ v4l2_rect_set_min_size(compose, &min_r);
+ v4l2_rect_set_max_size(compose, &max_r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
+ }
+ } else if (dev->has_compose_out && !dev->has_crop_out) {
+ v4l2_rect_set_size_to(crop, &r);
+ r.height *= factor;
+ v4l2_rect_set_size_to(compose, &r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
+ } else if (!dev->has_compose_out) {
+ v4l2_rect_map_inside(crop, &r);
+ r.height /= factor;
+ v4l2_rect_set_size_to(compose, &r);
+ } else {
+ r.height *= factor;
+ v4l2_rect_set_max_size(compose, &r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
+ crop->top *= factor;
+ crop->height *= factor;
+ v4l2_rect_set_size_to(crop, compose);
+ v4l2_rect_map_inside(crop, &r);
+ crop->top /= factor;
+ crop->height /= factor;
+ }
+ } else {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height };
+
+ v4l2_rect_set_size_to(crop, &r);
+ r.height /= factor;
+ v4l2_rect_set_size_to(compose, &r);
+ }
+
+ dev->fmt_out_rect.width = mp->width;
+ dev->fmt_out_rect.height = mp->height;
+ for (p = 0; p < mp->num_planes; p++)
+ dev->bytesperline_out[p] = mp->plane_fmt[p].bytesperline;
+ for (p = dev->fmt_out->buffers; p < dev->fmt_out->planes; p++)
+ dev->bytesperline_out[p] =
+ (dev->bytesperline_out[0] * dev->fmt_out->bit_depth[p]) /
+ dev->fmt_out->bit_depth[0];
+ dev->field_out = mp->field;
+ if (vivid_is_svid_out(dev))
+ dev->tv_field_out = mp->field;
+
+set_colorspace:
+ dev->colorspace_out = mp->colorspace;
+ dev->xfer_func_out = mp->xfer_func;
+ dev->ycbcr_enc_out = mp->ycbcr_enc;
+ dev->quantization_out = mp->quantization;
+ if (dev->loop_video) {
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ }
+ return 0;
+}
+
+int vidioc_g_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_g_fmt_vid_out(file, priv, f);
+}
+
+int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_try_fmt_vid_out(file, priv, f);
+}
+
+int vidioc_s_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_s_fmt_vid_out(file, priv, f);
+}
+
+int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_out);
+}
+
+int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_out);
+}
+
+int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_out);
+}
+
+int vivid_vid_out_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->has_crop_out && !dev->has_compose_out)
+ return -ENOTTY;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ sel->r.left = sel->r.top = 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (!dev->has_crop_out)
+ return -EINVAL;
+ sel->r = dev->crop_out;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (!dev->has_crop_out)
+ return -EINVAL;
+ sel->r = dev->fmt_out_rect;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (!dev->has_crop_out)
+ return -EINVAL;
+ sel->r = vivid_max_rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (!dev->has_compose_out)
+ return -EINVAL;
+ sel->r = dev->compose_out;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (!dev->has_compose_out)
+ return -EINVAL;
+ sel->r = dev->sink_rect;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rect *crop = &dev->crop_out;
+ struct v4l2_rect *compose = &dev->compose_out;
+ unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_out) ? 2 : 1;
+ int ret;
+
+ if (!dev->has_crop_out && !dev->has_compose_out)
+ return -ENOTTY;
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (!dev->has_crop_out)
+ return -EINVAL;
+ ret = vivid_vid_adjust_sel(s->flags, &s->r);
+ if (ret)
+ return ret;
+ v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&s->r, &dev->fmt_out_rect);
+ if (dev->has_scaler_out) {
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ dev->sink_rect.width * MAX_ZOOM,
+ (dev->sink_rect.height / factor) * MAX_ZOOM
+ };
+
+ v4l2_rect_set_max_size(&s->r, &max_rect);
+ if (dev->has_compose_out) {
+ struct v4l2_rect min_rect = {
+ 0, 0,
+ s->r.width / MAX_ZOOM,
+ (s->r.height * factor) / MAX_ZOOM
+ };
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ s->r.width * MAX_ZOOM,
+ (s->r.height * factor) * MAX_ZOOM
+ };
+
+ v4l2_rect_set_min_size(compose, &min_rect);
+ v4l2_rect_set_max_size(compose, &max_rect);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
+ }
+ } else if (dev->has_compose_out) {
+ s->r.top *= factor;
+ s->r.height *= factor;
+ v4l2_rect_set_max_size(&s->r, &dev->sink_rect);
+ v4l2_rect_set_size_to(compose, &s->r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
+ s->r.top /= factor;
+ s->r.height /= factor;
+ } else {
+ v4l2_rect_set_size_to(&s->r, &dev->sink_rect);
+ s->r.height /= factor;
+ }
+ v4l2_rect_map_inside(&s->r, &dev->fmt_out_rect);
+ *crop = s->r;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (!dev->has_compose_out)
+ return -EINVAL;
+ ret = vivid_vid_adjust_sel(s->flags, &s->r);
+ if (ret)
+ return ret;
+ v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&s->r, &dev->sink_rect);
+ v4l2_rect_map_inside(&s->r, &dev->compose_bounds_out);
+ s->r.top /= factor;
+ s->r.height /= factor;
+ if (dev->has_scaler_out) {
+ struct v4l2_rect fmt = dev->fmt_out_rect;
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ s->r.width * MAX_ZOOM,
+ s->r.height * MAX_ZOOM
+ };
+ struct v4l2_rect min_rect = {
+ 0, 0,
+ s->r.width / MAX_ZOOM,
+ s->r.height / MAX_ZOOM
+ };
+
+ v4l2_rect_set_min_size(&fmt, &min_rect);
+ if (!dev->has_crop_out)
+ v4l2_rect_set_max_size(&fmt, &max_rect);
+ if (!v4l2_rect_same_size(&dev->fmt_out_rect, &fmt) &&
+ vb2_is_busy(&dev->vb_vid_out_q))
+ return -EBUSY;
+ if (dev->has_crop_out) {
+ v4l2_rect_set_min_size(crop, &min_rect);
+ v4l2_rect_set_max_size(crop, &max_rect);
+ }
+ dev->fmt_out_rect = fmt;
+ } else if (dev->has_crop_out) {
+ struct v4l2_rect fmt = dev->fmt_out_rect;
+
+ v4l2_rect_set_min_size(&fmt, &s->r);
+ if (!v4l2_rect_same_size(&dev->fmt_out_rect, &fmt) &&
+ vb2_is_busy(&dev->vb_vid_out_q))
+ return -EBUSY;
+ dev->fmt_out_rect = fmt;
+ v4l2_rect_set_size_to(crop, &s->r);
+ v4l2_rect_map_inside(crop, &dev->fmt_out_rect);
+ } else {
+ if (!v4l2_rect_same_size(&s->r, &dev->fmt_out_rect) &&
+ vb2_is_busy(&dev->vb_vid_out_q))
+ return -EBUSY;
+ v4l2_rect_set_size_to(&dev->fmt_out_rect, &s->r);
+ v4l2_rect_set_size_to(crop, &s->r);
+ crop->height /= factor;
+ v4l2_rect_map_inside(crop, &dev->fmt_out_rect);
+ }
+ s->r.top *= factor;
+ s->r.height *= factor;
+ if (dev->bitmap_out && (compose->width != s->r.width ||
+ compose->height != s->r.height)) {
+ kfree(dev->bitmap_out);
+ dev->bitmap_out = NULL;
+ }
+ *compose = s->r;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vivid_vid_out_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (cap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (vivid_get_pixel_aspect(dev)) {
+ case TPG_PIXEL_ASPECT_NTSC:
+ cap->pixelaspect.numerator = 11;
+ cap->pixelaspect.denominator = 10;
+ break;
+ case TPG_PIXEL_ASPECT_PAL:
+ cap->pixelaspect.numerator = 54;
+ cap->pixelaspect.denominator = 59;
+ break;
+ case TPG_PIXEL_ASPECT_SQUARE:
+ cap->pixelaspect.numerator = 1;
+ cap->pixelaspect.denominator = 1;
+ break;
+ }
+ return 0;
+}
+
+int vidioc_g_fmt_vid_out_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_out;
+ struct v4l2_window *win = &f->fmt.win;
+ unsigned clipcount = win->clipcount;
+
+ if (!dev->has_fb)
+ return -EINVAL;
+ win->w.top = dev->overlay_out_top;
+ win->w.left = dev->overlay_out_left;
+ win->w.width = compose->width;
+ win->w.height = compose->height;
+ win->clipcount = dev->clipcount_out;
+ win->field = V4L2_FIELD_ANY;
+ win->chromakey = dev->chromakey_out;
+ win->global_alpha = dev->global_alpha_out;
+ if (clipcount > dev->clipcount_out)
+ clipcount = dev->clipcount_out;
+ if (dev->bitmap_out == NULL)
+ win->bitmap = NULL;
+ else if (win->bitmap) {
+ if (copy_to_user(win->bitmap, dev->bitmap_out,
+ ((dev->compose_out.width + 7) / 8) * dev->compose_out.height))
+ return -EFAULT;
+ }
+ if (clipcount && win->clips) {
+ if (copy_to_user(win->clips, dev->clips_out,
+ clipcount * sizeof(dev->clips_out[0])))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int vidioc_try_fmt_vid_out_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_out;
+ struct v4l2_window *win = &f->fmt.win;
+ int i, j;
+
+ if (!dev->has_fb)
+ return -EINVAL;
+ win->w.left = clamp_t(int, win->w.left,
+ -dev->display_width, dev->display_width);
+ win->w.top = clamp_t(int, win->w.top,
+ -dev->display_height, dev->display_height);
+ win->w.width = compose->width;
+ win->w.height = compose->height;
+ /*
+ * It makes no sense for an OSD to overlay only top or bottom fields,
+ * so always set this to ANY.
+ */
+ win->field = V4L2_FIELD_ANY;
+ if (win->clipcount && !win->clips)
+ win->clipcount = 0;
+ if (win->clipcount > MAX_CLIPS)
+ win->clipcount = MAX_CLIPS;
+ if (win->clipcount) {
+ if (copy_from_user(dev->try_clips_out, win->clips,
+ win->clipcount * sizeof(dev->clips_out[0])))
+ return -EFAULT;
+ for (i = 0; i < win->clipcount; i++) {
+ struct v4l2_rect *r = &dev->try_clips_out[i].c;
+
+ r->top = clamp_t(s32, r->top, 0, dev->display_height - 1);
+ r->height = clamp_t(s32, r->height, 1, dev->display_height - r->top);
+ r->left = clamp_t(u32, r->left, 0, dev->display_width - 1);
+ r->width = clamp_t(u32, r->width, 1, dev->display_width - r->left);
+ }
+ /*
+ * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
+ * number and it's typically a one-time deal.
+ */
+ for (i = 0; i < win->clipcount - 1; i++) {
+ struct v4l2_rect *r1 = &dev->try_clips_out[i].c;
+
+ for (j = i + 1; j < win->clipcount; j++) {
+ struct v4l2_rect *r2 = &dev->try_clips_out[j].c;
+
+ if (v4l2_rect_overlap(r1, r2))
+ return -EINVAL;
+ }
+ }
+ if (copy_to_user(win->clips, dev->try_clips_out,
+ win->clipcount * sizeof(dev->clips_out[0])))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_out;
+ struct v4l2_window *win = &f->fmt.win;
+ int ret = vidioc_try_fmt_vid_out_overlay(file, priv, f);
+ unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
+ unsigned clips_size = win->clipcount * sizeof(dev->clips_out[0]);
+ void *new_bitmap = NULL;
+
+ if (ret)
+ return ret;
+
+ if (win->bitmap) {
+ new_bitmap = memdup_user(win->bitmap, bitmap_size);
+
+ if (IS_ERR(new_bitmap))
+ return PTR_ERR(new_bitmap);
+ }
+
+ dev->overlay_out_top = win->w.top;
+ dev->overlay_out_left = win->w.left;
+ kfree(dev->bitmap_out);
+ dev->bitmap_out = new_bitmap;
+ dev->clipcount_out = win->clipcount;
+ if (dev->clipcount_out)
+ memcpy(dev->clips_out, dev->try_clips_out, clips_size);
+ dev->chromakey_out = win->chromakey;
+ dev->global_alpha_out = win->global_alpha;
+ return ret;
+}
+
+int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (i && !dev->fmt_out->can_do_overlay) {
+ dprintk(dev, 1, "unsupported output format for output overlay\n");
+ return -EINVAL;
+ }
+
+ dev->overlay_out_enabled = i;
+ return 0;
+}
+
+int vivid_vid_out_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ a->capability = V4L2_FBUF_CAP_EXTERNOVERLAY |
+ V4L2_FBUF_CAP_BITMAP_CLIPPING |
+ V4L2_FBUF_CAP_LIST_CLIPPING |
+ V4L2_FBUF_CAP_CHROMAKEY |
+ V4L2_FBUF_CAP_SRC_CHROMAKEY |
+ V4L2_FBUF_CAP_GLOBAL_ALPHA |
+ V4L2_FBUF_CAP_LOCAL_ALPHA |
+ V4L2_FBUF_CAP_LOCAL_INV_ALPHA;
+ a->flags = V4L2_FBUF_FLAG_OVERLAY | dev->fbuf_out_flags;
+ a->base = (void *)dev->video_pbase;
+ a->fmt.width = dev->display_width;
+ a->fmt.height = dev->display_height;
+ if (dev->fb_defined.green.length == 5)
+ a->fmt.pixelformat = V4L2_PIX_FMT_ARGB555;
+ else
+ a->fmt.pixelformat = V4L2_PIX_FMT_RGB565;
+ a->fmt.bytesperline = dev->display_byte_stride;
+ a->fmt.sizeimage = a->fmt.height * a->fmt.bytesperline;
+ a->fmt.field = V4L2_FIELD_NONE;
+ a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
+ a->fmt.priv = 0;
+ return 0;
+}
+
+int vivid_vid_out_s_fbuf(struct file *file, void *fh,
+ const struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const unsigned chroma_flags = V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ const unsigned alpha_flags = V4L2_FBUF_FLAG_GLOBAL_ALPHA |
+ V4L2_FBUF_FLAG_LOCAL_ALPHA |
+ V4L2_FBUF_FLAG_LOCAL_INV_ALPHA;
+
+
+ if ((a->flags & chroma_flags) == chroma_flags)
+ return -EINVAL;
+ switch (a->flags & alpha_flags) {
+ case 0:
+ case V4L2_FBUF_FLAG_GLOBAL_ALPHA:
+ case V4L2_FBUF_FLAG_LOCAL_ALPHA:
+ case V4L2_FBUF_FLAG_LOCAL_INV_ALPHA:
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
+ dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags);
+ return 0;
+}
+
+static const struct v4l2_audioout vivid_audio_outputs[] = {
+ { 0, "Line-Out 1" },
+ { 1, "Line-Out 2" },
+};
+
+int vidioc_enum_output(struct file *file, void *priv,
+ struct v4l2_output *out)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (out->index >= dev->num_outputs)
+ return -EINVAL;
+
+ out->type = V4L2_OUTPUT_TYPE_ANALOG;
+ switch (dev->output_type[out->index]) {
+ case SVID:
+ snprintf(out->name, sizeof(out->name), "S-Video %u",
+ dev->output_name_counter[out->index]);
+ out->std = V4L2_STD_ALL;
+ if (dev->has_audio_outputs)
+ out->audioset = (1 << ARRAY_SIZE(vivid_audio_outputs)) - 1;
+ out->capabilities = V4L2_OUT_CAP_STD;
+ break;
+ case HDMI:
+ snprintf(out->name, sizeof(out->name), "HDMI %u",
+ dev->output_name_counter[out->index]);
+ out->capabilities = V4L2_OUT_CAP_DV_TIMINGS;
+ break;
+ }
+ return 0;
+}
+
+int vidioc_g_output(struct file *file, void *priv, unsigned *o)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ *o = dev->output;
+ return 0;
+}
+
+int vidioc_s_output(struct file *file, void *priv, unsigned o)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (o >= dev->num_outputs)
+ return -EINVAL;
+
+ if (o == dev->output)
+ return 0;
+
+ if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q))
+ return -EBUSY;
+
+ dev->output = o;
+ dev->tv_audio_output = 0;
+ if (dev->output_type[o] == SVID)
+ dev->vid_out_dev.tvnorms = V4L2_STD_ALL;
+ else
+ dev->vid_out_dev.tvnorms = 0;
+
+ dev->vbi_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
+ vivid_update_format_out(dev);
+ return 0;
+}
+
+int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout)
+{
+ if (vout->index >= ARRAY_SIZE(vivid_audio_outputs))
+ return -EINVAL;
+ *vout = vivid_audio_outputs[vout->index];
+ return 0;
+}
+
+int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_svid_out(dev))
+ return -EINVAL;
+ *vout = vivid_audio_outputs[dev->tv_audio_output];
+ return 0;
+}
+
+int vidioc_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_svid_out(dev))
+ return -EINVAL;
+ if (vout->index >= ARRAY_SIZE(vivid_audio_outputs))
+ return -EINVAL;
+ dev->tv_audio_output = vout->index;
+ return 0;
+}
+
+int vivid_vid_out_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_svid_out(dev))
+ return -ENODATA;
+ if (dev->std_out == id)
+ return 0;
+ if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q))
+ return -EBUSY;
+ dev->std_out = id;
+ vivid_update_format_out(dev);
+ return 0;
+}
+
+static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ if ((bt->standards & (V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF)) &&
+ v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, NULL, NULL))
+ return true;
+
+ return false;
+}
+
+int vivid_vid_out_s_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ if (!vivid_is_hdmi_out(dev))
+ return -ENODATA;
+ if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
+ 0, NULL, NULL) &&
+ !valid_cvt_gtf_timings(timings))
+ return -EINVAL;
+ if (v4l2_match_dv_timings(timings, &dev->dv_timings_out, 0, true))
+ return 0;
+ if (vb2_is_busy(&dev->vb_vid_out_q))
+ return -EBUSY;
+ dev->dv_timings_out = *timings;
+ vivid_update_format_out(dev);
+ return 0;
+}
+
+int vivid_vid_out_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (parm->type != (dev->multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT))
+ return -EINVAL;
+
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.output.timeperframe = dev->timeperframe_vid_out;
+ parm->parm.output.writebuffers = 1;
+
+ return 0;
+}
+
+int vidioc_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ if (fh->vdev->vfl_dir == VFL_DIR_RX)
+ return v4l2_src_change_event_subscribe(fh, sub);
+ break;
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+ return -EINVAL;
+}
diff --git a/drivers/media/platform/vivid/vivid-vid-out.h b/drivers/media/platform/vivid/vivid-vid-out.h
new file mode 100644
index 000000000..e87aacf84
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-out.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-vid-out.h - video output support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _VIVID_VID_OUT_H_
+#define _VIVID_VID_OUT_H_
+
+extern const struct vb2_ops vivid_vid_out_qops;
+
+void vivid_update_format_out(struct vivid_dev *dev);
+
+int vivid_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_g_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_vid_out_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
+int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
+int vivid_vid_out_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cap);
+int vidioc_enum_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i);
+int vivid_vid_out_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a);
+int vivid_vid_out_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a);
+int vidioc_enum_output(struct file *file, void *priv, struct v4l2_output *out);
+int vidioc_g_output(struct file *file, void *priv, unsigned *i);
+int vidioc_s_output(struct file *file, void *priv, unsigned i);
+int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout);
+int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout);
+int vidioc_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout);
+int vivid_vid_out_s_std(struct file *file, void *priv, v4l2_std_id id);
+int vivid_vid_out_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vivid_vid_out_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
+
+#endif
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile
new file mode 100644
index 000000000..4bb4dcbef
--- /dev/null
+++ b/drivers/media/platform/vsp1/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_pipe.o
+vsp1-y += vsp1_dl.o vsp1_drm.o vsp1_video.o
+vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
+vsp1-y += vsp1_clu.o vsp1_hsit.o vsp1_lut.o
+vsp1-y += vsp1_brx.o vsp1_sru.o vsp1_uds.o
+vsp1-y += vsp1_hgo.o vsp1_hgt.o vsp1_histo.o
+vsp1-y += vsp1_lif.o vsp1_uif.o
+
+obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
new file mode 100644
index 000000000..56c62122a
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1.h -- R-Car VSP1 Driver
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_H__
+#define __VSP1_H__
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_regs.h"
+
+struct clk;
+struct device;
+struct rcar_fcp_device;
+
+struct vsp1_drm;
+struct vsp1_entity;
+struct vsp1_platform_data;
+struct vsp1_brx;
+struct vsp1_clu;
+struct vsp1_hgo;
+struct vsp1_hgt;
+struct vsp1_hsit;
+struct vsp1_lif;
+struct vsp1_lut;
+struct vsp1_rwpf;
+struct vsp1_sru;
+struct vsp1_uds;
+struct vsp1_uif;
+
+#define VSP1_MAX_LIF 2
+#define VSP1_MAX_RPF 5
+#define VSP1_MAX_UDS 3
+#define VSP1_MAX_UIF 2
+#define VSP1_MAX_WPF 4
+
+#define VSP1_HAS_LUT (1 << 1)
+#define VSP1_HAS_SRU (1 << 2)
+#define VSP1_HAS_BRU (1 << 3)
+#define VSP1_HAS_CLU (1 << 4)
+#define VSP1_HAS_WPF_VFLIP (1 << 5)
+#define VSP1_HAS_WPF_HFLIP (1 << 6)
+#define VSP1_HAS_HGO (1 << 7)
+#define VSP1_HAS_HGT (1 << 8)
+#define VSP1_HAS_BRS (1 << 9)
+#define VSP1_HAS_EXT_DL (1 << 10)
+
+struct vsp1_device_info {
+ u32 version;
+ const char *model;
+ unsigned int gen;
+ unsigned int features;
+ unsigned int lif_count;
+ unsigned int rpf_count;
+ unsigned int uds_count;
+ unsigned int uif_count;
+ unsigned int wpf_count;
+ unsigned int num_bru_inputs;
+ bool uapi;
+};
+
+#define vsp1_feature(vsp1, f) ((vsp1)->info->features & (f))
+
+struct vsp1_device {
+ struct device *dev;
+ const struct vsp1_device_info *info;
+ u32 version;
+
+ void __iomem *mmio;
+ struct rcar_fcp_device *fcp;
+ struct device *bus_master;
+
+ struct vsp1_brx *brs;
+ struct vsp1_brx *bru;
+ struct vsp1_clu *clu;
+ struct vsp1_hgo *hgo;
+ struct vsp1_hgt *hgt;
+ struct vsp1_hsit *hsi;
+ struct vsp1_hsit *hst;
+ struct vsp1_lif *lif[VSP1_MAX_LIF];
+ struct vsp1_lut *lut;
+ struct vsp1_rwpf *rpf[VSP1_MAX_RPF];
+ struct vsp1_sru *sru;
+ struct vsp1_uds *uds[VSP1_MAX_UDS];
+ struct vsp1_uif *uif[VSP1_MAX_UIF];
+ struct vsp1_rwpf *wpf[VSP1_MAX_WPF];
+
+ struct list_head entities;
+ struct list_head videos;
+
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct media_entity_operations media_ops;
+
+ struct vsp1_drm *drm;
+};
+
+int vsp1_device_get(struct vsp1_device *vsp1);
+void vsp1_device_put(struct vsp1_device *vsp1);
+
+int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index);
+
+static inline u32 vsp1_read(struct vsp1_device *vsp1, u32 reg)
+{
+ return ioread32(vsp1->mmio + reg);
+}
+
+static inline void vsp1_write(struct vsp1_device *vsp1, u32 reg, u32 data)
+{
+ iowrite32(data, vsp1->mmio + reg);
+}
+
+#endif /* __VSP1_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_brx.c b/drivers/media/platform/vsp1/vsp1_brx.c
new file mode 100644
index 000000000..359917b5d
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_brx.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_brx.c -- R-Car VSP1 Blend ROP Unit (BRU and BRS)
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define BRX_MIN_SIZE 1U
+#define BRX_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_brx_write(struct vsp1_brx *brx,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, brx->base + reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static int brx_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_brx *brx =
+ container_of(ctrl->handler, struct vsp1_brx, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_BG_COLOR:
+ brx->bgcolor = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops brx_ctrl_ops = {
+ .s_ctrl = brx_s_ctrl,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/*
+ * The BRx can't perform format conversion, all sink and source formats must be
+ * identical. We pick the format on the first sink pad (pad 0) and propagate it
+ * to all other pads.
+ */
+
+static int brx_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ };
+
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ ARRAY_SIZE(codes));
+}
+
+static int brx_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index)
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fse->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ return -EINVAL;
+
+ fse->min_width = BRX_MIN_SIZE;
+ fse->max_width = BRX_MAX_SIZE;
+ fse->min_height = BRX_MIN_SIZE;
+ fse->max_height = BRX_MAX_SIZE;
+
+ return 0;
+}
+
+static struct v4l2_rect *brx_get_compose(struct vsp1_brx *brx,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
+{
+ return v4l2_subdev_get_try_compose(&brx->entity.subdev, cfg, pad);
+}
+
+static void brx_try_format(struct vsp1_brx *brx,
+ struct v4l2_subdev_pad_config *config,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ switch (pad) {
+ case BRX_PAD_SINK(0):
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+ break;
+
+ default:
+ /* The BRx can't perform format conversion. */
+ format = vsp1_entity_get_pad_format(&brx->entity, config,
+ BRX_PAD_SINK(0));
+ fmt->code = format->code;
+ break;
+ }
+
+ fmt->width = clamp(fmt->width, BRX_MIN_SIZE, BRX_MAX_SIZE);
+ fmt->height = clamp(fmt->height, BRX_MIN_SIZE, BRX_MAX_SIZE);
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int brx_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_brx *brx = to_brx(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&brx->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&brx->entity, cfg, fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ brx_try_format(brx, config, fmt->pad, &fmt->format);
+
+ format = vsp1_entity_get_pad_format(&brx->entity, config, fmt->pad);
+ *format = fmt->format;
+
+ /* Reset the compose rectangle */
+ if (fmt->pad != brx->entity.source_pad) {
+ struct v4l2_rect *compose;
+
+ compose = brx_get_compose(brx, config, fmt->pad);
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = format->width;
+ compose->height = format->height;
+ }
+
+ /* Propagate the format code to all pads */
+ if (fmt->pad == BRX_PAD_SINK(0)) {
+ unsigned int i;
+
+ for (i = 0; i <= brx->entity.source_pad; ++i) {
+ format = vsp1_entity_get_pad_format(&brx->entity,
+ config, i);
+ format->code = fmt->format.code;
+ }
+ }
+
+done:
+ mutex_unlock(&brx->entity.lock);
+ return ret;
+}
+
+static int brx_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_brx *brx = to_brx(subdev);
+ struct v4l2_subdev_pad_config *config;
+
+ if (sel->pad == brx->entity.source_pad)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = BRX_MAX_SIZE;
+ sel->r.height = BRX_MAX_SIZE;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ config = vsp1_entity_get_pad_config(&brx->entity, cfg,
+ sel->which);
+ if (!config)
+ return -EINVAL;
+
+ mutex_lock(&brx->entity.lock);
+ sel->r = *brx_get_compose(brx, config, sel->pad);
+ mutex_unlock(&brx->entity.lock);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int brx_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_brx *brx = to_brx(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *compose;
+ int ret = 0;
+
+ if (sel->pad == brx->entity.source_pad)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ mutex_lock(&brx->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&brx->entity, cfg, sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * The compose rectangle top left corner must be inside the output
+ * frame.
+ */
+ format = vsp1_entity_get_pad_format(&brx->entity, config,
+ brx->entity.source_pad);
+ sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
+ sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
+
+ /*
+ * Scaling isn't supported, the compose rectangle size must be identical
+ * to the sink format size.
+ */
+ format = vsp1_entity_get_pad_format(&brx->entity, config, sel->pad);
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+
+ compose = brx_get_compose(brx, config, sel->pad);
+ *compose = sel->r;
+
+done:
+ mutex_unlock(&brx->entity.lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops brx_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = brx_enum_mbus_code,
+ .enum_frame_size = brx_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = brx_set_format,
+ .get_selection = brx_get_selection,
+ .set_selection = brx_set_selection,
+};
+
+static const struct v4l2_subdev_ops brx_ops = {
+ .pad = &brx_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void brx_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_brx *brx = to_brx(&entity->subdev);
+ struct v4l2_mbus_framefmt *format;
+ unsigned int flags;
+ unsigned int i;
+
+ format = vsp1_entity_get_pad_format(&brx->entity, brx->entity.config,
+ brx->entity.source_pad);
+
+ /*
+ * The hardware is extremely flexible but we have no userspace API to
+ * expose all the parameters, nor is it clear whether we would have use
+ * cases for all the supported modes. Let's just harcode the parameters
+ * to sane default values for now.
+ */
+
+ /*
+ * Disable dithering and enable color data normalization unless the
+ * format at the pipeline output is premultiplied.
+ */
+ flags = pipe->output ? pipe->output->format.flags : 0;
+ vsp1_brx_write(brx, dlb, VI6_BRU_INCTRL,
+ flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA ?
+ 0 : VI6_BRU_INCTRL_NRM);
+
+ /*
+ * Set the background position to cover the whole output image and
+ * configure its color.
+ */
+ vsp1_brx_write(brx, dlb, VI6_BRU_VIRRPF_SIZE,
+ (format->width << VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT) |
+ (format->height << VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT));
+ vsp1_brx_write(brx, dlb, VI6_BRU_VIRRPF_LOC, 0);
+
+ vsp1_brx_write(brx, dlb, VI6_BRU_VIRRPF_COL, brx->bgcolor |
+ (0xff << VI6_BRU_VIRRPF_COL_A_SHIFT));
+
+ /*
+ * Route BRU input 1 as SRC input to the ROP unit and configure the ROP
+ * unit with a NOP operation to make BRU input 1 available as the
+ * Blend/ROP unit B SRC input. Only needed for BRU, the BRS has no ROP
+ * unit.
+ */
+ if (entity->type == VSP1_ENTITY_BRU)
+ vsp1_brx_write(brx, dlb, VI6_BRU_ROP,
+ VI6_BRU_ROP_DSTSEL_BRUIN(1) |
+ VI6_BRU_ROP_CROP(VI6_ROP_NOP) |
+ VI6_BRU_ROP_AROP(VI6_ROP_NOP));
+
+ for (i = 0; i < brx->entity.source_pad; ++i) {
+ bool premultiplied = false;
+ u32 ctrl = 0;
+
+ /*
+ * Configure all Blend/ROP units corresponding to an enabled BRx
+ * input for alpha blending. Blend/ROP units corresponding to
+ * disabled BRx inputs are used in ROP NOP mode to ignore the
+ * SRC input.
+ */
+ if (brx->inputs[i].rpf) {
+ ctrl |= VI6_BRU_CTRL_RBC;
+
+ premultiplied = brx->inputs[i].rpf->format.flags
+ & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA;
+ } else {
+ ctrl |= VI6_BRU_CTRL_CROP(VI6_ROP_NOP)
+ | VI6_BRU_CTRL_AROP(VI6_ROP_NOP);
+ }
+
+ /*
+ * Select the virtual RPF as the Blend/ROP unit A DST input to
+ * serve as a background color.
+ */
+ if (i == 0)
+ ctrl |= VI6_BRU_CTRL_DSTSEL_VRPF;
+
+ /*
+ * Route inputs 0 to 3 as SRC inputs to Blend/ROP units A to D
+ * in that order. In the BRU the Blend/ROP unit B SRC is
+ * hardwired to the ROP unit output, the corresponding register
+ * bits must be set to 0. The BRS has no ROP unit and doesn't
+ * need any special processing.
+ */
+ if (!(entity->type == VSP1_ENTITY_BRU && i == 1))
+ ctrl |= VI6_BRU_CTRL_SRCSEL_BRUIN(i);
+
+ vsp1_brx_write(brx, dlb, VI6_BRU_CTRL(i), ctrl);
+
+ /*
+ * Harcode the blending formula to
+ *
+ * DSTc = DSTc * (1 - SRCa) + SRCc * SRCa
+ * DSTa = DSTa * (1 - SRCa) + SRCa
+ *
+ * when the SRC input isn't premultiplied, and to
+ *
+ * DSTc = DSTc * (1 - SRCa) + SRCc
+ * DSTa = DSTa * (1 - SRCa) + SRCa
+ *
+ * otherwise.
+ */
+ vsp1_brx_write(brx, dlb, VI6_BRU_BLD(i),
+ VI6_BRU_BLD_CCMDX_255_SRC_A |
+ (premultiplied ? VI6_BRU_BLD_CCMDY_COEFY :
+ VI6_BRU_BLD_CCMDY_SRC_A) |
+ VI6_BRU_BLD_ACMDX_255_SRC_A |
+ VI6_BRU_BLD_ACMDY_COEFY |
+ (0xff << VI6_BRU_BLD_COEFY_SHIFT));
+ }
+}
+
+static const struct vsp1_entity_operations brx_entity_ops = {
+ .configure_stream = brx_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_brx *vsp1_brx_create(struct vsp1_device *vsp1,
+ enum vsp1_entity_type type)
+{
+ struct vsp1_brx *brx;
+ unsigned int num_pads;
+ const char *name;
+ int ret;
+
+ brx = devm_kzalloc(vsp1->dev, sizeof(*brx), GFP_KERNEL);
+ if (brx == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ brx->base = type == VSP1_ENTITY_BRU ? VI6_BRU_BASE : VI6_BRS_BASE;
+ brx->entity.ops = &brx_entity_ops;
+ brx->entity.type = type;
+
+ if (type == VSP1_ENTITY_BRU) {
+ num_pads = vsp1->info->num_bru_inputs + 1;
+ name = "bru";
+ } else {
+ num_pads = 3;
+ name = "brs";
+ }
+
+ ret = vsp1_entity_init(vsp1, &brx->entity, name, num_pads, &brx_ops,
+ MEDIA_ENT_F_PROC_VIDEO_COMPOSER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&brx->ctrls, 1);
+ v4l2_ctrl_new_std(&brx->ctrls, &brx_ctrl_ops, V4L2_CID_BG_COLOR,
+ 0, 0xffffff, 1, 0);
+
+ brx->bgcolor = 0;
+
+ brx->entity.subdev.ctrl_handler = &brx->ctrls;
+
+ if (brx->ctrls.error) {
+ dev_err(vsp1->dev, "%s: failed to initialize controls\n", name);
+ ret = brx->ctrls.error;
+ vsp1_entity_destroy(&brx->entity);
+ return ERR_PTR(ret);
+ }
+
+ return brx;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_brx.h b/drivers/media/platform/vsp1/vsp1_brx.h
new file mode 100644
index 000000000..6abbb8c33
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_brx.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_brx.h -- R-Car VSP1 Blend ROP Unit (BRU and BRS)
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_BRX_H__
+#define __VSP1_BRX_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+struct vsp1_rwpf;
+
+#define BRX_PAD_SINK(n) (n)
+
+struct vsp1_brx {
+ struct vsp1_entity entity;
+ unsigned int base;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ struct {
+ struct vsp1_rwpf *rpf;
+ } inputs[VSP1_MAX_RPF];
+
+ u32 bgcolor;
+};
+
+static inline struct vsp1_brx *to_brx(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_brx, entity.subdev);
+}
+
+struct vsp1_brx *vsp1_brx_create(struct vsp1_device *vsp1,
+ enum vsp1_entity_type type);
+
+#endif /* __VSP1_BRX_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_clu.c b/drivers/media/platform/vsp1/vsp1_clu.c
new file mode 100644
index 000000000..942fc14c1
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_clu.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_clu.c -- R-Car VSP1 Cubic Look-Up Table
+ *
+ * Copyright (C) 2015-2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_clu.h"
+#include "vsp1_dl.h"
+
+#define CLU_MIN_SIZE 4U
+#define CLU_MAX_SIZE 8190U
+
+#define CLU_SIZE (17 * 17 * 17)
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_clu_write(struct vsp1_clu *clu,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_CLU_TABLE (V4L2_CID_USER_BASE | 0x1001)
+#define V4L2_CID_VSP1_CLU_MODE (V4L2_CID_USER_BASE | 0x1002)
+#define V4L2_CID_VSP1_CLU_MODE_2D 0
+#define V4L2_CID_VSP1_CLU_MODE_3D 1
+
+static int clu_set_table(struct vsp1_clu *clu, struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_dl_body *dlb;
+ unsigned int i;
+
+ dlb = vsp1_dl_body_get(clu->pool);
+ if (!dlb)
+ return -ENOMEM;
+
+ vsp1_dl_body_write(dlb, VI6_CLU_ADDR, 0);
+ for (i = 0; i < CLU_SIZE; ++i)
+ vsp1_dl_body_write(dlb, VI6_CLU_DATA, ctrl->p_new.p_u32[i]);
+
+ spin_lock_irq(&clu->lock);
+ swap(clu->clu, dlb);
+ spin_unlock_irq(&clu->lock);
+
+ vsp1_dl_body_put(dlb);
+ return 0;
+}
+
+static int clu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_clu *clu =
+ container_of(ctrl->handler, struct vsp1_clu, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_CLU_TABLE:
+ clu_set_table(clu, ctrl);
+ break;
+
+ case V4L2_CID_VSP1_CLU_MODE:
+ clu->mode = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops clu_ctrl_ops = {
+ .s_ctrl = clu_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config clu_table_control = {
+ .ops = &clu_ctrl_ops,
+ .id = V4L2_CID_VSP1_CLU_TABLE,
+ .name = "Look-Up Table",
+ .type = V4L2_CTRL_TYPE_U32,
+ .min = 0x00000000,
+ .max = 0x00ffffff,
+ .step = 1,
+ .def = 0,
+ .dims = { 17, 17, 17 },
+};
+
+static const char * const clu_mode_menu[] = {
+ "2D",
+ "3D",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config clu_mode_control = {
+ .ops = &clu_ctrl_ops,
+ .id = V4L2_CID_VSP1_CLU_MODE,
+ .name = "Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .def = 1,
+ .qmenu = clu_mode_menu,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static const unsigned int clu_codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+};
+
+static int clu_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, clu_codes,
+ ARRAY_SIZE(clu_codes));
+}
+
+static int clu_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, CLU_MIN_SIZE,
+ CLU_MIN_SIZE, CLU_MAX_SIZE,
+ CLU_MAX_SIZE);
+}
+
+static int clu_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, cfg, fmt, clu_codes,
+ ARRAY_SIZE(clu_codes),
+ CLU_MIN_SIZE, CLU_MIN_SIZE,
+ CLU_MAX_SIZE, CLU_MAX_SIZE);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_pad_ops clu_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = clu_enum_mbus_code,
+ .enum_frame_size = clu_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = clu_set_format,
+};
+
+static const struct v4l2_subdev_ops clu_ops = {
+ .pad = &clu_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void clu_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_clu *clu = to_clu(&entity->subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /*
+ * The yuv_mode can't be changed during streaming. Cache it internally
+ * for future runtime configuration calls.
+ */
+ format = vsp1_entity_get_pad_format(&clu->entity,
+ clu->entity.config,
+ CLU_PAD_SINK);
+ clu->yuv_mode = format->code == MEDIA_BUS_FMT_AYUV8_1X32;
+}
+
+static void clu_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_clu *clu = to_clu(&entity->subdev);
+ struct vsp1_dl_body *clu_dlb;
+ unsigned long flags;
+ u32 ctrl = VI6_CLU_CTRL_AAI | VI6_CLU_CTRL_MVS | VI6_CLU_CTRL_EN;
+
+ /* 2D mode can only be used with the YCbCr pixel encoding. */
+ if (clu->mode == V4L2_CID_VSP1_CLU_MODE_2D && clu->yuv_mode)
+ ctrl |= VI6_CLU_CTRL_AX1I_2D | VI6_CLU_CTRL_AX2I_2D
+ | VI6_CLU_CTRL_OS0_2D | VI6_CLU_CTRL_OS1_2D
+ | VI6_CLU_CTRL_OS2_2D | VI6_CLU_CTRL_M2D;
+
+ vsp1_clu_write(clu, dlb, VI6_CLU_CTRL, ctrl);
+
+ spin_lock_irqsave(&clu->lock, flags);
+ clu_dlb = clu->clu;
+ clu->clu = NULL;
+ spin_unlock_irqrestore(&clu->lock, flags);
+
+ if (clu_dlb) {
+ vsp1_dl_list_add_body(dl, clu_dlb);
+
+ /* Release our local reference. */
+ vsp1_dl_body_put(clu_dlb);
+ }
+}
+
+static void clu_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_clu *clu = to_clu(&entity->subdev);
+
+ vsp1_dl_body_pool_destroy(clu->pool);
+}
+
+static const struct vsp1_entity_operations clu_entity_ops = {
+ .configure_stream = clu_configure_stream,
+ .configure_frame = clu_configure_frame,
+ .destroy = clu_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_clu *vsp1_clu_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_clu *clu;
+ int ret;
+
+ clu = devm_kzalloc(vsp1->dev, sizeof(*clu), GFP_KERNEL);
+ if (clu == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&clu->lock);
+
+ clu->entity.ops = &clu_entity_ops;
+ clu->entity.type = VSP1_ENTITY_CLU;
+
+ ret = vsp1_entity_init(vsp1, &clu->entity, "clu", 2, &clu_ops,
+ MEDIA_ENT_F_PROC_VIDEO_LUT);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /*
+ * Pre-allocate a body pool, with 3 bodies allowing a userspace update
+ * before the hardware has committed a previous set of tables, handling
+ * both the queued and pending dl entries. One extra entry is added to
+ * the CLU_SIZE to allow for the VI6_CLU_ADDR header.
+ */
+ clu->pool = vsp1_dl_body_pool_create(clu->entity.vsp1, 3, CLU_SIZE + 1,
+ 0);
+ if (!clu->pool)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&clu->ctrls, 2);
+ v4l2_ctrl_new_custom(&clu->ctrls, &clu_table_control, NULL);
+ v4l2_ctrl_new_custom(&clu->ctrls, &clu_mode_control, NULL);
+
+ clu->entity.subdev.ctrl_handler = &clu->ctrls;
+
+ if (clu->ctrls.error) {
+ dev_err(vsp1->dev, "clu: failed to initialize controls\n");
+ ret = clu->ctrls.error;
+ vsp1_entity_destroy(&clu->entity);
+ return ERR_PTR(ret);
+ }
+
+ v4l2_ctrl_handler_setup(&clu->ctrls);
+
+ return clu;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_clu.h b/drivers/media/platform/vsp1/vsp1_clu.h
new file mode 100644
index 000000000..cef2f4448
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_clu.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_clu.h -- R-Car VSP1 Cubic Look-Up Table
+ *
+ * Copyright (C) 2015 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_CLU_H__
+#define __VSP1_CLU_H__
+
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+struct vsp1_dl_body;
+
+#define CLU_PAD_SINK 0
+#define CLU_PAD_SOURCE 1
+
+struct vsp1_clu {
+ struct vsp1_entity entity;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ bool yuv_mode;
+ spinlock_t lock;
+ unsigned int mode;
+ struct vsp1_dl_body *clu;
+ struct vsp1_dl_body_pool *pool;
+};
+
+static inline struct vsp1_clu *to_clu(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_clu, entity.subdev);
+}
+
+struct vsp1_clu *vsp1_clu_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_CLU_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
new file mode 100644
index 000000000..a07caf981
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -0,0 +1,1141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_dl.c -- R-Car VSP1 Display List
+ *
+ * Copyright (C) 2015 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+
+#define VSP1_DL_NUM_ENTRIES 256
+
+#define VSP1_DLH_INT_ENABLE (1 << 1)
+#define VSP1_DLH_AUTO_START (1 << 0)
+
+#define VSP1_DLH_EXT_PRE_CMD_EXEC (1 << 9)
+#define VSP1_DLH_EXT_POST_CMD_EXEC (1 << 8)
+
+struct vsp1_dl_header_list {
+ u32 num_bytes;
+ u32 addr;
+} __packed;
+
+struct vsp1_dl_header {
+ u32 num_lists;
+ struct vsp1_dl_header_list lists[8];
+ u32 next_header;
+ u32 flags;
+} __packed;
+
+/**
+ * struct vsp1_dl_ext_header - Extended display list header
+ * @padding: padding zero bytes for alignment
+ * @pre_ext_dl_num_cmd: number of pre-extended command bodies to parse
+ * @flags: enables or disables execution of the pre and post command
+ * @pre_ext_dl_plist: start address of pre-extended display list bodies
+ * @post_ext_dl_num_cmd: number of post-extended command bodies to parse
+ * @post_ext_dl_plist: start address of post-extended display list bodies
+ */
+struct vsp1_dl_ext_header {
+ u32 padding;
+
+ /*
+ * The datasheet represents flags as stored before pre_ext_dl_num_cmd,
+ * expecting 32-bit accesses. The flags are appropriate to the whole
+ * header, not just the pre_ext command, and thus warrant being
+ * separated out. Due to byte ordering, and representing as 16 bit
+ * values here, the flags must be positioned after the
+ * pre_ext_dl_num_cmd.
+ */
+ u16 pre_ext_dl_num_cmd;
+ u16 flags;
+ u32 pre_ext_dl_plist;
+
+ u32 post_ext_dl_num_cmd;
+ u32 post_ext_dl_plist;
+} __packed;
+
+struct vsp1_dl_header_extended {
+ struct vsp1_dl_header header;
+ struct vsp1_dl_ext_header ext;
+} __packed;
+
+struct vsp1_dl_entry {
+ u32 addr;
+ u32 data;
+} __packed;
+
+/**
+ * struct vsp1_pre_ext_dl_body - Pre Extended Display List Body
+ * @opcode: Extended display list command operation code
+ * @flags: Pre-extended command flags. These are specific to each command
+ * @address_set: Source address set pointer. Must have 16-byte alignment
+ * @reserved: Zero bits for alignment.
+ */
+struct vsp1_pre_ext_dl_body {
+ u32 opcode;
+ u32 flags;
+ u32 address_set;
+ u32 reserved;
+} __packed;
+
+/**
+ * struct vsp1_dl_body - Display list body
+ * @list: entry in the display list list of bodies
+ * @free: entry in the pool free body list
+ * @refcnt: reference tracking for the body
+ * @pool: pool to which this body belongs
+ * @entries: array of entries
+ * @dma: DMA address of the entries
+ * @size: size of the DMA memory in bytes
+ * @num_entries: number of stored entries
+ * @max_entries: number of entries available
+ */
+struct vsp1_dl_body {
+ struct list_head list;
+ struct list_head free;
+
+ refcount_t refcnt;
+
+ struct vsp1_dl_body_pool *pool;
+
+ struct vsp1_dl_entry *entries;
+ dma_addr_t dma;
+ size_t size;
+
+ unsigned int num_entries;
+ unsigned int max_entries;
+};
+
+/**
+ * struct vsp1_dl_body_pool - display list body pool
+ * @dma: DMA address of the entries
+ * @size: size of the full DMA memory pool in bytes
+ * @mem: CPU memory pointer for the pool
+ * @bodies: Array of DLB structures for the pool
+ * @free: List of free DLB entries
+ * @lock: Protects the free list
+ * @vsp1: the VSP1 device
+ */
+struct vsp1_dl_body_pool {
+ /* DMA allocation */
+ dma_addr_t dma;
+ size_t size;
+ void *mem;
+
+ /* Body management */
+ struct vsp1_dl_body *bodies;
+ struct list_head free;
+ spinlock_t lock;
+
+ struct vsp1_device *vsp1;
+};
+
+/**
+ * struct vsp1_cmd_pool - Display List commands pool
+ * @dma: DMA address of the entries
+ * @size: size of the full DMA memory pool in bytes
+ * @mem: CPU memory pointer for the pool
+ * @cmds: Array of command structures for the pool
+ * @free: Free pool entries
+ * @lock: Protects the free list
+ * @vsp1: the VSP1 device
+ */
+struct vsp1_dl_cmd_pool {
+ /* DMA allocation */
+ dma_addr_t dma;
+ size_t size;
+ void *mem;
+
+ struct vsp1_dl_ext_cmd *cmds;
+ struct list_head free;
+
+ spinlock_t lock;
+
+ struct vsp1_device *vsp1;
+};
+
+/**
+ * struct vsp1_dl_list - Display list
+ * @list: entry in the display list manager lists
+ * @dlm: the display list manager
+ * @header: display list header
+ * @extension: extended display list header. NULL for normal lists
+ * @dma: DMA address for the header
+ * @body0: first display list body
+ * @bodies: list of extra display list bodies
+ * @pre_cmd: pre command to be issued through extended dl header
+ * @post_cmd: post command to be issued through extended dl header
+ * @has_chain: if true, indicates that there's a partition chain
+ * @chain: entry in the display list partition chain
+ * @internal: whether the display list is used for internal purpose
+ */
+struct vsp1_dl_list {
+ struct list_head list;
+ struct vsp1_dl_manager *dlm;
+
+ struct vsp1_dl_header *header;
+ struct vsp1_dl_ext_header *extension;
+ dma_addr_t dma;
+
+ struct vsp1_dl_body *body0;
+ struct list_head bodies;
+
+ struct vsp1_dl_ext_cmd *pre_cmd;
+ struct vsp1_dl_ext_cmd *post_cmd;
+
+ bool has_chain;
+ struct list_head chain;
+
+ bool internal;
+};
+
+/**
+ * struct vsp1_dl_manager - Display List manager
+ * @index: index of the related WPF
+ * @singleshot: execute the display list in single-shot mode
+ * @vsp1: the VSP1 device
+ * @lock: protects the free, active, queued, and pending lists
+ * @free: array of all free display lists
+ * @active: list currently being processed (loaded) by hardware
+ * @queued: list queued to the hardware (written to the DL registers)
+ * @pending: list waiting to be queued to the hardware
+ * @pool: body pool for the display list bodies
+ * @cmdpool: commands pool for extended display list
+ */
+struct vsp1_dl_manager {
+ unsigned int index;
+ bool singleshot;
+ struct vsp1_device *vsp1;
+
+ spinlock_t lock;
+ struct list_head free;
+ struct vsp1_dl_list *active;
+ struct vsp1_dl_list *queued;
+ struct vsp1_dl_list *pending;
+
+ struct vsp1_dl_body_pool *pool;
+ struct vsp1_dl_cmd_pool *cmdpool;
+};
+
+/* -----------------------------------------------------------------------------
+ * Display List Body Management
+ */
+
+/**
+ * vsp1_dl_body_pool_create - Create a pool of bodies from a single allocation
+ * @vsp1: The VSP1 device
+ * @num_bodies: The number of bodies to allocate
+ * @num_entries: The maximum number of entries that a body can contain
+ * @extra_size: Extra allocation provided for the bodies
+ *
+ * Allocate a pool of display list bodies each with enough memory to contain the
+ * requested number of entries plus the @extra_size.
+ *
+ * Return a pointer to a pool on success or NULL if memory can't be allocated.
+ */
+struct vsp1_dl_body_pool *
+vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
+ unsigned int num_entries, size_t extra_size)
+{
+ struct vsp1_dl_body_pool *pool;
+ size_t dlb_size;
+ unsigned int i;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->vsp1 = vsp1;
+
+ /*
+ * TODO: 'extra_size' is only used by vsp1_dlm_create(), to allocate
+ * extra memory for the display list header. We need only one header per
+ * display list, not per display list body, thus this allocation is
+ * extraneous and should be reworked in the future.
+ */
+ dlb_size = num_entries * sizeof(struct vsp1_dl_entry) + extra_size;
+ pool->size = dlb_size * num_bodies;
+
+ pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL);
+ if (!pool->bodies) {
+ kfree(pool);
+ return NULL;
+ }
+
+ pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
+ GFP_KERNEL);
+ if (!pool->mem) {
+ kfree(pool->bodies);
+ kfree(pool);
+ return NULL;
+ }
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free);
+
+ for (i = 0; i < num_bodies; ++i) {
+ struct vsp1_dl_body *dlb = &pool->bodies[i];
+
+ dlb->pool = pool;
+ dlb->max_entries = num_entries;
+
+ dlb->dma = pool->dma + i * dlb_size;
+ dlb->entries = pool->mem + i * dlb_size;
+
+ list_add_tail(&dlb->free, &pool->free);
+ }
+
+ return pool;
+}
+
+/**
+ * vsp1_dl_body_pool_destroy - Release a body pool
+ * @pool: The body pool
+ *
+ * Release all components of a pool allocation.
+ */
+void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (pool->mem)
+ dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
+ pool->dma);
+
+ kfree(pool->bodies);
+ kfree(pool);
+}
+
+/**
+ * vsp1_dl_body_get - Obtain a body from a pool
+ * @pool: The body pool
+ *
+ * Obtain a body from the pool without blocking.
+ *
+ * Returns a display list body or NULL if there are none available.
+ */
+struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool)
+{
+ struct vsp1_dl_body *dlb = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ if (!list_empty(&pool->free)) {
+ dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free);
+ list_del(&dlb->free);
+ refcount_set(&dlb->refcnt, 1);
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return dlb;
+}
+
+/**
+ * vsp1_dl_body_put - Return a body back to its pool
+ * @dlb: The display list body
+ *
+ * Return a body back to the pool, and reset the num_entries to clear the list.
+ */
+void vsp1_dl_body_put(struct vsp1_dl_body *dlb)
+{
+ unsigned long flags;
+
+ if (!dlb)
+ return;
+
+ if (!refcount_dec_and_test(&dlb->refcnt))
+ return;
+
+ dlb->num_entries = 0;
+
+ spin_lock_irqsave(&dlb->pool->lock, flags);
+ list_add_tail(&dlb->free, &dlb->pool->free);
+ spin_unlock_irqrestore(&dlb->pool->lock, flags);
+}
+
+/**
+ * vsp1_dl_body_write - Write a register to a display list body
+ * @dlb: The body
+ * @reg: The register address
+ * @data: The register value
+ *
+ * Write the given register and value to the display list body. The maximum
+ * number of entries that can be written in a body is specified when the body is
+ * allocated by vsp1_dl_body_alloc().
+ */
+void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ if (WARN_ONCE(dlb->num_entries >= dlb->max_entries,
+ "DLB size exceeded (max %u)", dlb->max_entries))
+ return;
+
+ dlb->entries[dlb->num_entries].addr = reg;
+ dlb->entries[dlb->num_entries].data = data;
+ dlb->num_entries++;
+}
+
+/* -----------------------------------------------------------------------------
+ * Display List Extended Command Management
+ */
+
+enum vsp1_extcmd_type {
+ VSP1_EXTCMD_AUTODISP,
+ VSP1_EXTCMD_AUTOFLD,
+};
+
+struct vsp1_extended_command_info {
+ u16 opcode;
+ size_t body_size;
+};
+
+static const struct vsp1_extended_command_info vsp1_extended_commands[] = {
+ [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 },
+ [VSP1_EXTCMD_AUTOFLD] = { 0x03, 160 },
+};
+
+/**
+ * vsp1_dl_cmd_pool_create - Create a pool of commands from a single allocation
+ * @vsp1: The VSP1 device
+ * @type: The command pool type
+ * @num_cmds: The number of commands to allocate
+ *
+ * Allocate a pool of commands each with enough memory to contain the private
+ * data of each command. The allocation sizes are dependent upon the command
+ * type.
+ *
+ * Return a pointer to the pool on success or NULL if memory can't be allocated.
+ */
+static struct vsp1_dl_cmd_pool *
+vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
+ unsigned int num_cmds)
+{
+ struct vsp1_dl_cmd_pool *pool;
+ unsigned int i;
+ size_t cmd_size;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->vsp1 = vsp1;
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free);
+
+ pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
+ if (!pool->cmds) {
+ kfree(pool);
+ return NULL;
+ }
+
+ cmd_size = sizeof(struct vsp1_pre_ext_dl_body) +
+ vsp1_extended_commands[type].body_size;
+ cmd_size = ALIGN(cmd_size, 16);
+
+ pool->size = cmd_size * num_cmds;
+ pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
+ GFP_KERNEL);
+ if (!pool->mem) {
+ kfree(pool->cmds);
+ kfree(pool);
+ return NULL;
+ }
+
+ for (i = 0; i < num_cmds; ++i) {
+ struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
+ size_t cmd_offset = i * cmd_size;
+ /* data_offset must be 16 byte aligned for DMA. */
+ size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) +
+ cmd_offset;
+
+ cmd->pool = pool;
+ cmd->opcode = vsp1_extended_commands[type].opcode;
+
+ /*
+ * TODO: Auto-disp can utilise more than one extended body
+ * command per cmd.
+ */
+ cmd->num_cmds = 1;
+ cmd->cmds = pool->mem + cmd_offset;
+ cmd->cmd_dma = pool->dma + cmd_offset;
+
+ cmd->data = pool->mem + data_offset;
+ cmd->data_dma = pool->dma + data_offset;
+
+ list_add_tail(&cmd->free, &pool->free);
+ }
+
+ return pool;
+}
+
+static
+struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
+{
+ struct vsp1_dl_ext_cmd *cmd = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ if (!list_empty(&pool->free)) {
+ cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
+ free);
+ list_del(&cmd->free);
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return cmd;
+}
+
+static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd)
+{
+ unsigned long flags;
+
+ if (!cmd)
+ return;
+
+ /* Reset flags, these mark data usage. */
+ cmd->flags = 0;
+
+ spin_lock_irqsave(&cmd->pool->lock, flags);
+ list_add_tail(&cmd->free, &cmd->pool->free);
+ spin_unlock_irqrestore(&cmd->pool->lock, flags);
+}
+
+static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (pool->mem)
+ dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
+ pool->dma);
+
+ kfree(pool->cmds);
+ kfree(pool);
+}
+
+struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+
+ if (dl->pre_cmd)
+ return dl->pre_cmd;
+
+ dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool);
+
+ return dl->pre_cmd;
+}
+
+/* ----------------------------------------------------------------------------
+ * Display List Transaction Management
+ */
+
+static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_dl_list *dl;
+ size_t header_offset;
+
+ dl = kzalloc(sizeof(*dl), GFP_KERNEL);
+ if (!dl)
+ return NULL;
+
+ INIT_LIST_HEAD(&dl->bodies);
+ dl->dlm = dlm;
+
+ /* Get a default body for our list. */
+ dl->body0 = vsp1_dl_body_get(dlm->pool);
+ if (!dl->body0) {
+ kfree(dl);
+ return NULL;
+ }
+
+ header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
+
+ dl->header = ((void *)dl->body0->entries) + header_offset;
+ dl->dma = dl->body0->dma + header_offset;
+
+ memset(dl->header, 0, sizeof(*dl->header));
+ dl->header->lists[0].addr = dl->body0->dma;
+
+ return dl;
+}
+
+static void vsp1_dl_list_bodies_put(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_body *dlb, *tmp;
+
+ list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) {
+ list_del(&dlb->list);
+ vsp1_dl_body_put(dlb);
+ }
+}
+
+static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
+{
+ vsp1_dl_body_put(dl->body0);
+ vsp1_dl_list_bodies_put(dl);
+
+ kfree(dl);
+}
+
+/**
+ * vsp1_dl_list_get - Get a free display list
+ * @dlm: The display list manager
+ *
+ * Get a display list from the pool of free lists and return it.
+ *
+ * This function must be called without the display list manager lock held.
+ */
+struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_dl_list *dl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlm->lock, flags);
+
+ if (!list_empty(&dlm->free)) {
+ dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
+ list_del(&dl->list);
+
+ /*
+ * The display list chain must be initialised to ensure every
+ * display list can assert list_empty() if it is not in a chain.
+ */
+ INIT_LIST_HEAD(&dl->chain);
+ }
+
+ spin_unlock_irqrestore(&dlm->lock, flags);
+
+ return dl;
+}
+
+/* This function must be called with the display list manager lock held.*/
+static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_list *dl_next;
+
+ if (!dl)
+ return;
+
+ /*
+ * Release any linked display-lists which were chained for a single
+ * hardware operation.
+ */
+ if (dl->has_chain) {
+ list_for_each_entry(dl_next, &dl->chain, chain)
+ __vsp1_dl_list_put(dl_next);
+ }
+
+ dl->has_chain = false;
+
+ vsp1_dl_list_bodies_put(dl);
+
+ vsp1_dl_ext_cmd_put(dl->pre_cmd);
+ vsp1_dl_ext_cmd_put(dl->post_cmd);
+
+ dl->pre_cmd = NULL;
+ dl->post_cmd = NULL;
+
+ /*
+ * body0 is reused as as an optimisation as presently every display list
+ * has at least one body, thus we reinitialise the entries list.
+ */
+ dl->body0->num_entries = 0;
+
+ list_add_tail(&dl->list, &dl->dlm->free);
+}
+
+/**
+ * vsp1_dl_list_put - Release a display list
+ * @dl: The display list
+ *
+ * Release the display list and return it to the pool of free lists.
+ *
+ * Passing a NULL pointer to this function is safe, in that case no operation
+ * will be performed.
+ */
+void vsp1_dl_list_put(struct vsp1_dl_list *dl)
+{
+ unsigned long flags;
+
+ if (!dl)
+ return;
+
+ spin_lock_irqsave(&dl->dlm->lock, flags);
+ __vsp1_dl_list_put(dl);
+ spin_unlock_irqrestore(&dl->dlm->lock, flags);
+}
+
+/**
+ * vsp1_dl_list_get_body0 - Obtain the default body for the display list
+ * @dl: The display list
+ *
+ * Obtain a pointer to the internal display list body allowing this to be passed
+ * directly to configure operations.
+ */
+struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
+{
+ return dl->body0;
+}
+
+/**
+ * vsp1_dl_list_add_body - Add a body to the display list
+ * @dl: The display list
+ * @dlb: The body
+ *
+ * Add a display list body to a display list. Registers contained in bodies are
+ * processed after registers contained in the main display list, in the order in
+ * which bodies are added.
+ *
+ * Adding a body to a display list passes ownership of the body to the list. The
+ * caller retains its reference to the fragment when adding it to the display
+ * list, but is not allowed to add new entries to the body.
+ *
+ * The reference must be explicitly released by a call to vsp1_dl_body_put()
+ * when the body isn't needed anymore.
+ */
+int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
+{
+ refcount_inc(&dlb->refcnt);
+
+ list_add_tail(&dlb->list, &dl->bodies);
+
+ return 0;
+}
+
+/**
+ * vsp1_dl_list_add_chain - Add a display list to a chain
+ * @head: The head display list
+ * @dl: The new display list
+ *
+ * Add a display list to an existing display list chain. The chained lists
+ * will be automatically processed by the hardware without intervention from
+ * the CPU. A display list end interrupt will only complete after the last
+ * display list in the chain has completed processing.
+ *
+ * Adding a display list to a chain passes ownership of the display list to
+ * the head display list item. The chain is released when the head dl item is
+ * put back with __vsp1_dl_list_put().
+ */
+int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
+ struct vsp1_dl_list *dl)
+{
+ head->has_chain = true;
+ list_add_tail(&dl->chain, &head->chain);
+ return 0;
+}
+
+static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd)
+{
+ cmd->cmds[0].opcode = cmd->opcode;
+ cmd->cmds[0].flags = cmd->flags;
+ cmd->cmds[0].address_set = cmd->data_dma;
+ cmd->cmds[0].reserved = 0;
+}
+
+static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+ struct vsp1_dl_header_list *hdr = dl->header->lists;
+ struct vsp1_dl_body *dlb;
+ unsigned int num_lists = 0;
+
+ /*
+ * Fill the header with the display list bodies addresses and sizes. The
+ * address of the first body has already been filled when the display
+ * list was allocated.
+ */
+
+ hdr->num_bytes = dl->body0->num_entries
+ * sizeof(*dl->header->lists);
+
+ list_for_each_entry(dlb, &dl->bodies, list) {
+ num_lists++;
+ hdr++;
+
+ hdr->addr = dlb->dma;
+ hdr->num_bytes = dlb->num_entries
+ * sizeof(*dl->header->lists);
+ }
+
+ dl->header->num_lists = num_lists;
+
+ if (!list_empty(&dl->chain) && !is_last) {
+ /*
+ * If this display list's chain is not empty, we are on a list,
+ * and the next item is the display list that we must queue for
+ * automatic processing by the hardware.
+ */
+ struct vsp1_dl_list *next = list_next_entry(dl, chain);
+
+ dl->header->next_header = next->dma;
+ dl->header->flags = VSP1_DLH_AUTO_START;
+ } else if (!dlm->singleshot) {
+ /*
+ * if the display list manager works in continuous mode, the VSP
+ * should loop over the display list continuously until
+ * instructed to do otherwise.
+ */
+ dl->header->next_header = dl->dma;
+ dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
+ } else {
+ /*
+ * Otherwise, in mem-to-mem mode, we work in single-shot mode
+ * and the next display list must not be started automatically.
+ */
+ dl->header->flags = VSP1_DLH_INT_ENABLE;
+ }
+
+ if (!dl->extension)
+ return;
+
+ dl->extension->flags = 0;
+
+ if (dl->pre_cmd) {
+ dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma;
+ dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds;
+ dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC;
+
+ vsp1_dl_ext_cmd_fill_header(dl->pre_cmd);
+ }
+
+ if (dl->post_cmd) {
+ dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma;
+ dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds;
+ dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC;
+
+ vsp1_dl_ext_cmd_fill_header(dl->post_cmd);
+ }
+}
+
+static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_device *vsp1 = dlm->vsp1;
+
+ if (!dlm->queued)
+ return false;
+
+ /*
+ * Check whether the VSP1 has taken the update. The hardware indicates
+ * this by clearing the UPDHDR bit in the CMD register.
+ */
+ return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
+}
+
+static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+ struct vsp1_device *vsp1 = dlm->vsp1;
+
+ /*
+ * Program the display list header address. If the hardware is idle
+ * (single-shot mode or first frame in continuous mode) it will then be
+ * started independently. If the hardware is operating, the
+ * VI6_DL_HDR_REF_ADDR register will be updated with the display list
+ * address.
+ */
+ vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
+}
+
+static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+
+ /*
+ * If a previous display list has been queued to the hardware but not
+ * processed yet, the VSP can start processing it at any time. In that
+ * case we can't replace the queued list by the new one, as we could
+ * race with the hardware. We thus mark the update as pending, it will
+ * be queued up to the hardware by the frame end interrupt handler.
+ *
+ * If a display list is already pending we simply drop it as the new
+ * display list is assumed to contain a more recent configuration. It is
+ * an error if the already pending list has the internal flag set, as
+ * there is then a process waiting for that list to complete. This
+ * shouldn't happen as the waiting process should perform proper
+ * locking, but warn just in case.
+ */
+ if (vsp1_dl_list_hw_update_pending(dlm)) {
+ WARN_ON(dlm->pending && dlm->pending->internal);
+ __vsp1_dl_list_put(dlm->pending);
+ dlm->pending = dl;
+ return;
+ }
+
+ /*
+ * Pass the new display list to the hardware and mark it as queued. It
+ * will become active when the hardware starts processing it.
+ */
+ vsp1_dl_list_hw_enqueue(dl);
+
+ __vsp1_dl_list_put(dlm->queued);
+ dlm->queued = dl;
+}
+
+static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+
+ /*
+ * When working in single-shot mode, the caller guarantees that the
+ * hardware is idle at this point. Just commit the head display list
+ * to hardware. Chained lists will be started automatically.
+ */
+ vsp1_dl_list_hw_enqueue(dl);
+
+ dlm->active = dl;
+}
+
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+ struct vsp1_dl_list *dl_next;
+ unsigned long flags;
+
+ /* Fill the header for the head and chained display lists. */
+ vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
+
+ list_for_each_entry(dl_next, &dl->chain, chain) {
+ bool last = list_is_last(&dl_next->chain, &dl->chain);
+
+ vsp1_dl_list_fill_header(dl_next, last);
+ }
+
+ dl->internal = internal;
+
+ spin_lock_irqsave(&dlm->lock, flags);
+
+ if (dlm->singleshot)
+ vsp1_dl_list_commit_singleshot(dl);
+ else
+ vsp1_dl_list_commit_continuous(dl);
+
+ spin_unlock_irqrestore(&dlm->lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Display List Manager
+ */
+
+/**
+ * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
+ * @dlm: the display list manager
+ *
+ * Return a set of flags that indicates display list completion status.
+ *
+ * The VSP1_DL_FRAME_END_COMPLETED flag indicates that the previous display list
+ * has completed at frame end. If the flag is not returned display list
+ * completion has been delayed by one frame because the display list commit
+ * raced with the frame end interrupt. The function always returns with the flag
+ * set in single-shot mode as display list processing is then not continuous and
+ * races never occur.
+ *
+ * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the previous display list
+ * has completed and had been queued with the internal notification flag.
+ * Internal notification is only supported for continuous mode.
+ */
+unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_device *vsp1 = dlm->vsp1;
+ u32 status = vsp1_read(vsp1, VI6_STATUS);
+ unsigned int flags = 0;
+
+ spin_lock(&dlm->lock);
+
+ /*
+ * The mem-to-mem pipelines work in single-shot mode. No new display
+ * list can be queued, we don't have to do anything.
+ */
+ if (dlm->singleshot) {
+ __vsp1_dl_list_put(dlm->active);
+ dlm->active = NULL;
+ flags |= VSP1_DL_FRAME_END_COMPLETED;
+ goto done;
+ }
+
+ /*
+ * If the commit operation raced with the interrupt and occurred after
+ * the frame end event but before interrupt processing, the hardware
+ * hasn't taken the update into account yet. We have to skip one frame
+ * and retry.
+ */
+ if (vsp1_dl_list_hw_update_pending(dlm))
+ goto done;
+
+ /*
+ * Progressive streams report only TOP fields. If we have a BOTTOM
+ * field, we are interlaced, and expect the frame to complete on the
+ * next frame end interrupt.
+ */
+ if (status & VI6_STATUS_FLD_STD(dlm->index))
+ goto done;
+
+ /*
+ * The device starts processing the queued display list right after the
+ * frame end interrupt. The display list thus becomes active.
+ */
+ if (dlm->queued) {
+ if (dlm->queued->internal)
+ flags |= VSP1_DL_FRAME_END_INTERNAL;
+ dlm->queued->internal = false;
+
+ __vsp1_dl_list_put(dlm->active);
+ dlm->active = dlm->queued;
+ dlm->queued = NULL;
+ flags |= VSP1_DL_FRAME_END_COMPLETED;
+ }
+
+ /*
+ * Now that the VSP has started processing the queued display list, we
+ * can queue the pending display list to the hardware if one has been
+ * prepared.
+ */
+ if (dlm->pending) {
+ vsp1_dl_list_hw_enqueue(dlm->pending);
+ dlm->queued = dlm->pending;
+ dlm->pending = NULL;
+ }
+
+done:
+ spin_unlock(&dlm->lock);
+
+ return flags;
+}
+
+/* Hardware Setup */
+void vsp1_dlm_setup(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+ u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
+ | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
+ | VI6_DL_CTRL_DLE;
+ u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT)
+ | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT;
+
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
+ for (i = 0; i < vsp1->info->wpf_count; ++i)
+ vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl);
+ }
+
+ vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
+ vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
+}
+
+void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlm->lock, flags);
+
+ __vsp1_dl_list_put(dlm->active);
+ __vsp1_dl_list_put(dlm->queued);
+ __vsp1_dl_list_put(dlm->pending);
+
+ spin_unlock_irqrestore(&dlm->lock, flags);
+
+ dlm->active = NULL;
+ dlm->queued = NULL;
+ dlm->pending = NULL;
+}
+
+struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm)
+{
+ return vsp1_dl_body_get(dlm->pool);
+}
+
+struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
+ unsigned int index,
+ unsigned int prealloc)
+{
+ struct vsp1_dl_manager *dlm;
+ size_t header_size;
+ unsigned int i;
+
+ dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
+ if (!dlm)
+ return NULL;
+
+ dlm->index = index;
+ dlm->singleshot = vsp1->info->uapi;
+ dlm->vsp1 = vsp1;
+
+ spin_lock_init(&dlm->lock);
+ INIT_LIST_HEAD(&dlm->free);
+
+ /*
+ * Initialize the display list body and allocate DMA memory for the body
+ * and the header. Both are allocated together to avoid memory
+ * fragmentation, with the header located right after the body in
+ * memory. An extra body is allocated on top of the prealloc to account
+ * for the cached body used by the vsp1_pipeline object.
+ */
+ header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ?
+ sizeof(struct vsp1_dl_header_extended) :
+ sizeof(struct vsp1_dl_header);
+
+ header_size = ALIGN(header_size, 8);
+
+ dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
+ VSP1_DL_NUM_ENTRIES, header_size);
+ if (!dlm->pool)
+ return NULL;
+
+ for (i = 0; i < prealloc; ++i) {
+ struct vsp1_dl_list *dl;
+
+ dl = vsp1_dl_list_alloc(dlm);
+ if (!dl) {
+ vsp1_dlm_destroy(dlm);
+ return NULL;
+ }
+
+ /* The extended header immediately follows the header. */
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL))
+ dl->extension = (void *)dl->header
+ + sizeof(*dl->header);
+
+ list_add_tail(&dl->list, &dlm->free);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
+ dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
+ VSP1_EXTCMD_AUTOFLD, prealloc);
+ if (!dlm->cmdpool) {
+ vsp1_dlm_destroy(dlm);
+ return NULL;
+ }
+ }
+
+ return dlm;
+}
+
+void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_dl_list *dl, *next;
+
+ if (!dlm)
+ return;
+
+ list_for_each_entry_safe(dl, next, &dlm->free, list) {
+ list_del(&dl->list);
+ vsp1_dl_list_free(dl);
+ }
+
+ vsp1_dl_body_pool_destroy(dlm->pool);
+ vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h
new file mode 100644
index 000000000..125750dc8
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_dl.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_dl.h -- R-Car VSP1 Display List
+ *
+ * Copyright (C) 2015 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_DL_H__
+#define __VSP1_DL_H__
+
+#include <linux/types.h>
+
+struct vsp1_device;
+struct vsp1_dl_body;
+struct vsp1_dl_body_pool;
+struct vsp1_dl_list;
+struct vsp1_dl_manager;
+
+#define VSP1_DL_FRAME_END_COMPLETED BIT(0)
+#define VSP1_DL_FRAME_END_INTERNAL BIT(1)
+
+/**
+ * struct vsp1_dl_ext_cmd - Extended Display command
+ * @pool: pool to which this command belongs
+ * @free: entry in the pool of free commands list
+ * @opcode: command type opcode
+ * @flags: flags used by the command
+ * @cmds: array of command bodies for this extended cmd
+ * @num_cmds: quantity of commands in @cmds array
+ * @cmd_dma: DMA address of the command body
+ * @data: memory allocation for command-specific data
+ * @data_dma: DMA address for command-specific data
+ */
+struct vsp1_dl_ext_cmd {
+ struct vsp1_dl_cmd_pool *pool;
+ struct list_head free;
+
+ u8 opcode;
+ u32 flags;
+
+ struct vsp1_pre_ext_dl_body *cmds;
+ unsigned int num_cmds;
+ dma_addr_t cmd_dma;
+
+ void *data;
+ dma_addr_t data_dma;
+};
+
+void vsp1_dlm_setup(struct vsp1_device *vsp1);
+
+struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
+ unsigned int index,
+ unsigned int prealloc);
+void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm);
+void vsp1_dlm_reset(struct vsp1_dl_manager *dlm);
+unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm);
+struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm);
+
+struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
+void vsp1_dl_list_put(struct vsp1_dl_list *dl);
+struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl);
+struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl);
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal);
+
+struct vsp1_dl_body_pool *
+vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
+ unsigned int num_entries, size_t extra_size);
+void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool);
+struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool);
+void vsp1_dl_body_put(struct vsp1_dl_body *dlb);
+
+void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data);
+int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb);
+int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, struct vsp1_dl_list *dl);
+
+#endif /* __VSP1_DL_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
new file mode 100644
index 000000000..8824c4ce6
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -0,0 +1,948 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_drm.c -- R-Car VSP1 DRM/KMS Interface
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+#include <media/vsp1.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_dl.h"
+#include "vsp1_drm.h"
+#include "vsp1_lif.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_uif.h"
+
+#define BRX_NAME(e) (e)->type == VSP1_ENTITY_BRU ? "BRU" : "BRS"
+
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
+
+static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe,
+ unsigned int completion)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+ bool complete = completion == VSP1_DL_FRAME_END_COMPLETED;
+
+ if (drm_pipe->du_complete) {
+ struct vsp1_entity *uif = drm_pipe->uif;
+ u32 crc;
+
+ crc = uif ? vsp1_uif_get_crc(to_uif(&uif->subdev)) : 0;
+ drm_pipe->du_complete(drm_pipe->du_private, complete, crc);
+ }
+
+ if (completion & VSP1_DL_FRAME_END_INTERNAL) {
+ drm_pipe->force_brx_release = false;
+ wake_up(&drm_pipe->wait_queue);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Configuration
+ */
+
+/*
+ * Insert the UIF in the pipeline between the prev and next entities. If no UIF
+ * is available connect the two entities directly.
+ */
+static int vsp1_du_insert_uif(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_entity *uif,
+ struct vsp1_entity *prev, unsigned int prev_pad,
+ struct vsp1_entity *next, unsigned int next_pad)
+{
+ struct v4l2_subdev_format format;
+ int ret;
+
+ if (!uif) {
+ /*
+ * If there's no UIF to be inserted, connect the previous and
+ * next entities directly.
+ */
+ prev->sink = next;
+ prev->sink_pad = next_pad;
+ return 0;
+ }
+
+ prev->sink = uif;
+ prev->sink_pad = UIF_PAD_SINK;
+
+ memset(&format, 0, sizeof(format));
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.pad = prev_pad;
+
+ ret = v4l2_subdev_call(&prev->subdev, pad, get_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ format.pad = UIF_PAD_SINK;
+
+ ret = v4l2_subdev_call(&uif->subdev, pad, set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on UIF sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code);
+
+ /*
+ * The UIF doesn't mangle the format between its sink and source pads,
+ * so there is no need to retrieve the format on its source pad.
+ */
+
+ uif->sink = next;
+ uif->sink_pad = next_pad;
+
+ return 0;
+}
+
+/* Setup one RPF and the connected BRx sink pad. */
+static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_rwpf *rpf,
+ struct vsp1_entity *uif,
+ unsigned int brx_input)
+{
+ struct v4l2_subdev_selection sel;
+ struct v4l2_subdev_format format;
+ const struct v4l2_rect *crop;
+ int ret;
+
+ /*
+ * Configure the format on the RPF sink pad and propagate it up to the
+ * BRx sink pad.
+ */
+ crop = &vsp1->drm->inputs[rpf->entity.index].crop;
+
+ memset(&format, 0, sizeof(format));
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.pad = RWPF_PAD_SINK;
+ format.format.width = crop->width + crop->left;
+ format.format.height = crop->height + crop->top;
+ format.format.code = rpf->fmtinfo->mbus;
+ format.format.field = V4L2_FIELD_NONE;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: set format %ux%u (%x) on RPF%u sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, rpf->entity.index);
+
+ memset(&sel, 0, sizeof(sel));
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.pad = RWPF_PAD_SINK;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = *crop;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_selection, NULL,
+ &sel);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: set selection (%u,%u)/%ux%u on RPF%u sink\n",
+ __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height,
+ rpf->entity.index);
+
+ /*
+ * RPF source, hardcode the format to ARGB8888 to turn on format
+ * conversion if needed.
+ */
+ format.pad = RWPF_PAD_SOURCE;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, get_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: got format %ux%u (%x) on RPF%u source\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, rpf->entity.index);
+
+ format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ /* Insert and configure the UIF if available. */
+ ret = vsp1_du_insert_uif(vsp1, pipe, uif, &rpf->entity, RWPF_PAD_SOURCE,
+ pipe->brx, brx_input);
+ if (ret < 0)
+ return ret;
+
+ /* BRx sink, propagate the format from the RPF source. */
+ format.pad = brx_input;
+
+ ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, BRX_NAME(pipe->brx), format.pad);
+
+ sel.pad = brx_input;
+ sel.target = V4L2_SEL_TGT_COMPOSE;
+ sel.r = vsp1->drm->inputs[rpf->entity.index].compose;
+
+ ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_selection, NULL,
+ &sel);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set selection (%u,%u)/%ux%u on %s pad %u\n",
+ __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height,
+ BRX_NAME(pipe->brx), sel.pad);
+
+ return 0;
+}
+
+/* Setup the BRx source pad. */
+static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe);
+static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe);
+
+static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct vsp1_entity *brx;
+ int ret;
+
+ /*
+ * Pick a BRx:
+ * - If we need more than two inputs, use the BRU.
+ * - Otherwise, if we are not forced to release our BRx, keep it.
+ * - Else, use any free BRx (randomly starting with the BRU).
+ */
+ if (pipe->num_inputs > 2)
+ brx = &vsp1->bru->entity;
+ else if (pipe->brx && !drm_pipe->force_brx_release)
+ brx = pipe->brx;
+ else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe)
+ brx = &vsp1->bru->entity;
+ else
+ brx = &vsp1->brs->entity;
+
+ /* Switch BRx if needed. */
+ if (brx != pipe->brx) {
+ struct vsp1_entity *released_brx = NULL;
+
+ /* Release our BRx if we have one. */
+ if (pipe->brx) {
+ dev_dbg(vsp1->dev, "%s: pipe %u: releasing %s\n",
+ __func__, pipe->lif->index,
+ BRX_NAME(pipe->brx));
+
+ /*
+ * The BRx might be acquired by the other pipeline in
+ * the next step. We must thus remove it from the list
+ * of entities for this pipeline. The other pipeline's
+ * hardware configuration will reconfigure the BRx
+ * routing.
+ *
+ * However, if the other pipeline doesn't acquire our
+ * BRx, we need to keep it in the list, otherwise the
+ * hardware configuration step won't disconnect it from
+ * the pipeline. To solve this, store the released BRx
+ * pointer to add it back to the list of entities later
+ * if it isn't acquired by the other pipeline.
+ */
+ released_brx = pipe->brx;
+
+ list_del(&pipe->brx->list_pipe);
+ pipe->brx->sink = NULL;
+ pipe->brx->pipe = NULL;
+ pipe->brx = NULL;
+ }
+
+ /*
+ * If the BRx we need is in use, force the owner pipeline to
+ * switch to the other BRx and wait until the switch completes.
+ */
+ if (brx->pipe) {
+ struct vsp1_drm_pipeline *owner_pipe;
+
+ dev_dbg(vsp1->dev, "%s: pipe %u: waiting for %s\n",
+ __func__, pipe->lif->index, BRX_NAME(brx));
+
+ owner_pipe = to_vsp1_drm_pipeline(brx->pipe);
+ owner_pipe->force_brx_release = true;
+
+ vsp1_du_pipeline_setup_inputs(vsp1, &owner_pipe->pipe);
+ vsp1_du_pipeline_configure(&owner_pipe->pipe);
+
+ ret = wait_event_timeout(owner_pipe->wait_queue,
+ !owner_pipe->force_brx_release,
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_warn(vsp1->dev,
+ "DRM pipeline %u reconfiguration timeout\n",
+ owner_pipe->pipe.lif->index);
+ }
+
+ /*
+ * If the BRx we have released previously hasn't been acquired
+ * by the other pipeline, add it back to the entities list (with
+ * the pipe pointer NULL) to let vsp1_du_pipeline_configure()
+ * disconnect it from the hardware pipeline.
+ */
+ if (released_brx && !released_brx->pipe)
+ list_add_tail(&released_brx->list_pipe,
+ &pipe->entities);
+
+ /* Add the BRx to the pipeline. */
+ dev_dbg(vsp1->dev, "%s: pipe %u: acquired %s\n",
+ __func__, pipe->lif->index, BRX_NAME(brx));
+
+ pipe->brx = brx;
+ pipe->brx->pipe = pipe;
+ pipe->brx->sink = &pipe->output->entity;
+ pipe->brx->sink_pad = 0;
+
+ list_add_tail(&pipe->brx->list_pipe, &pipe->entities);
+ }
+
+ /*
+ * Configure the format on the BRx source and verify that it matches the
+ * requested format. We don't set the media bus code as it is configured
+ * on the BRx sink pad 0 and propagated inside the entity, not on the
+ * source pad.
+ */
+ format.pad = pipe->brx->source_pad;
+ format.format.width = drm_pipe->width;
+ format.format.height = drm_pipe->height;
+ format.format.field = V4L2_FIELD_NONE;
+
+ ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, BRX_NAME(pipe->brx), pipe->brx->source_pad);
+
+ if (format.format.width != drm_pipe->width ||
+ format.format.height != drm_pipe->height) {
+ dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__);
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static unsigned int rpf_zpos(struct vsp1_device *vsp1, struct vsp1_rwpf *rpf)
+{
+ return vsp1->drm->inputs[rpf->entity.index].zpos;
+}
+
+/* Setup the input side of the pipeline (RPFs and BRx). */
+static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+ struct vsp1_rwpf *inputs[VSP1_MAX_RPF] = { NULL, };
+ struct vsp1_entity *uif;
+ bool use_uif = false;
+ struct vsp1_brx *brx;
+ unsigned int i;
+ int ret;
+
+ /* Count the number of enabled inputs and sort them by Z-order. */
+ pipe->num_inputs = 0;
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf = vsp1->rpf[i];
+ unsigned int j;
+
+ if (!pipe->inputs[i])
+ continue;
+
+ /* Insert the RPF in the sorted RPFs array. */
+ for (j = pipe->num_inputs++; j > 0; --j) {
+ if (rpf_zpos(vsp1, inputs[j-1]) <= rpf_zpos(vsp1, rpf))
+ break;
+ inputs[j] = inputs[j-1];
+ }
+
+ inputs[j] = rpf;
+ }
+
+ /*
+ * Setup the BRx. This must be done before setting up the RPF input
+ * pipelines as the BRx sink compose rectangles depend on the BRx source
+ * format.
+ */
+ ret = vsp1_du_pipeline_setup_brx(vsp1, pipe);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "%s: failed to setup %s source\n", __func__,
+ BRX_NAME(pipe->brx));
+ return ret;
+ }
+
+ brx = to_brx(&pipe->brx->subdev);
+
+ /* Setup the RPF input pipeline for every enabled input. */
+ for (i = 0; i < pipe->brx->source_pad; ++i) {
+ struct vsp1_rwpf *rpf = inputs[i];
+
+ if (!rpf) {
+ brx->inputs[i].rpf = NULL;
+ continue;
+ }
+
+ if (!rpf->entity.pipe) {
+ rpf->entity.pipe = pipe;
+ list_add_tail(&rpf->entity.list_pipe, &pipe->entities);
+ }
+
+ brx->inputs[i].rpf = rpf;
+ rpf->brx_input = i;
+ rpf->entity.sink = pipe->brx;
+ rpf->entity.sink_pad = i;
+
+ dev_dbg(vsp1->dev, "%s: connecting RPF.%u to %s:%u\n",
+ __func__, rpf->entity.index, BRX_NAME(pipe->brx), i);
+
+ uif = drm_pipe->crc.source == VSP1_DU_CRC_PLANE &&
+ drm_pipe->crc.index == i ? drm_pipe->uif : NULL;
+ if (uif)
+ use_uif = true;
+ ret = vsp1_du_pipeline_setup_rpf(vsp1, pipe, rpf, uif, i);
+ if (ret < 0) {
+ dev_err(vsp1->dev,
+ "%s: failed to setup RPF.%u\n",
+ __func__, rpf->entity.index);
+ return ret;
+ }
+ }
+
+ /* Insert and configure the UIF at the BRx output if available. */
+ uif = drm_pipe->crc.source == VSP1_DU_CRC_OUTPUT ? drm_pipe->uif : NULL;
+ if (uif)
+ use_uif = true;
+ ret = vsp1_du_insert_uif(vsp1, pipe, uif,
+ pipe->brx, pipe->brx->source_pad,
+ &pipe->output->entity, 0);
+ if (ret < 0)
+ dev_err(vsp1->dev, "%s: failed to setup UIF after %s\n",
+ __func__, BRX_NAME(pipe->brx));
+
+ /*
+ * If the UIF is not in use schedule it for removal by setting its pipe
+ * pointer to NULL, vsp1_du_pipeline_configure() will remove it from the
+ * hardware pipeline and from the pipeline's list of entities. Otherwise
+ * make sure it is present in the pipeline's list of entities if it
+ * wasn't already.
+ */
+ if (drm_pipe->uif && !use_uif) {
+ drm_pipe->uif->pipe = NULL;
+ } else if (drm_pipe->uif && !drm_pipe->uif->pipe) {
+ drm_pipe->uif->pipe = pipe;
+ list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities);
+ }
+
+ return 0;
+}
+
+/* Setup the output side of the pipeline (WPF and LIF). */
+static int vsp1_du_pipeline_setup_output(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+ struct v4l2_subdev_format format = { 0, };
+ int ret;
+
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.pad = RWPF_PAD_SINK;
+ format.format.width = drm_pipe->width;
+ format.format.height = drm_pipe->height;
+ format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
+ format.format.field = V4L2_FIELD_NONE;
+
+ ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF%u sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, pipe->output->entity.index);
+
+ format.pad = RWPF_PAD_SOURCE;
+ ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, get_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF%u source\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, pipe->output->entity.index);
+
+ format.pad = LIF_PAD_SINK;
+ ret = v4l2_subdev_call(&pipe->lif->subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF%u sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, pipe->lif->index);
+
+ /*
+ * Verify that the format at the output of the pipeline matches the
+ * requested frame size and media bus code.
+ */
+ if (format.format.width != drm_pipe->width ||
+ format.format.height != drm_pipe->height ||
+ format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) {
+ dev_dbg(vsp1->dev, "%s: format mismatch on LIF%u\n", __func__,
+ pipe->lif->index);
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+/* Configure all entities in the pipeline. */
+static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+ struct vsp1_entity *entity;
+ struct vsp1_entity *next;
+ struct vsp1_dl_list *dl;
+ struct vsp1_dl_body *dlb;
+
+ dl = vsp1_dl_list_get(pipe->output->dlm);
+ dlb = vsp1_dl_list_get_body0(dl);
+
+ list_for_each_entry_safe(entity, next, &pipe->entities, list_pipe) {
+ /* Disconnect unused entities from the pipeline. */
+ if (!entity->pipe) {
+ vsp1_dl_body_write(dlb, entity->route->reg,
+ VI6_DPR_NODE_UNUSED);
+
+ entity->sink = NULL;
+ list_del(&entity->list_pipe);
+
+ continue;
+ }
+
+ vsp1_entity_route_setup(entity, pipe, dlb);
+ vsp1_entity_configure_stream(entity, pipe, dlb);
+ vsp1_entity_configure_frame(entity, pipe, dl, dlb);
+ vsp1_entity_configure_partition(entity, pipe, dl, dlb);
+ }
+
+ vsp1_dl_list_commit(dl, drm_pipe->force_brx_release);
+}
+
+/* -----------------------------------------------------------------------------
+ * DU Driver API
+ */
+
+int vsp1_du_init(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ if (!vsp1)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_init);
+
+/**
+ * vsp1_du_setup_lif - Setup the output part of the VSP pipeline
+ * @dev: the VSP device
+ * @pipe_index: the DRM pipeline index
+ * @cfg: the LIF configuration
+ *
+ * Configure the output part of VSP DRM pipeline for the given frame @cfg.width
+ * and @cfg.height. This sets up formats on the BRx source pad, the WPF sink and
+ * source pads, and the LIF sink pad.
+ *
+ * The @pipe_index argument selects which DRM pipeline to setup. The number of
+ * available pipelines depend on the VSP instance.
+ *
+ * As the media bus code on the blend unit source pad is conditioned by the
+ * configuration of its sink 0 pad, we also set up the formats on all blend unit
+ * sinks, even if the configuration will be overwritten later by
+ * vsp1_du_setup_rpf(). This ensures that the blend unit configuration is set to
+ * a well defined state.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
+ const struct vsp1_du_lif_config *cfg)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_drm_pipeline *drm_pipe;
+ struct vsp1_pipeline *pipe;
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
+ if (pipe_index >= vsp1->info->lif_count)
+ return -EINVAL;
+
+ drm_pipe = &vsp1->drm->pipe[pipe_index];
+ pipe = &drm_pipe->pipe;
+
+ if (!cfg) {
+ struct vsp1_brx *brx;
+
+ mutex_lock(&vsp1->drm->lock);
+
+ brx = to_brx(&pipe->brx->subdev);
+
+ /*
+ * NULL configuration means the CRTC is being disabled, stop
+ * the pipeline and turn the light off.
+ */
+ ret = vsp1_pipeline_stop(pipe);
+ if (ret == -ETIMEDOUT)
+ dev_err(vsp1->dev, "DRM pipeline stop timeout\n");
+
+ for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i) {
+ struct vsp1_rwpf *rpf = pipe->inputs[i];
+
+ if (!rpf)
+ continue;
+
+ /*
+ * Remove the RPF from the pipe and the list of BRx
+ * inputs.
+ */
+ WARN_ON(!rpf->entity.pipe);
+ rpf->entity.pipe = NULL;
+ list_del(&rpf->entity.list_pipe);
+ pipe->inputs[i] = NULL;
+
+ brx->inputs[rpf->brx_input].rpf = NULL;
+ }
+
+ drm_pipe->du_complete = NULL;
+ pipe->num_inputs = 0;
+
+ dev_dbg(vsp1->dev, "%s: pipe %u: releasing %s\n",
+ __func__, pipe->lif->index,
+ BRX_NAME(pipe->brx));
+
+ list_del(&pipe->brx->list_pipe);
+ pipe->brx->pipe = NULL;
+ pipe->brx = NULL;
+
+ mutex_unlock(&vsp1->drm->lock);
+
+ vsp1_dlm_reset(pipe->output->dlm);
+ vsp1_device_put(vsp1);
+
+ dev_dbg(vsp1->dev, "%s: pipeline disabled\n", __func__);
+
+ return 0;
+ }
+
+ drm_pipe->width = cfg->width;
+ drm_pipe->height = cfg->height;
+ pipe->interlaced = cfg->interlaced;
+
+ dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u%s\n",
+ __func__, pipe_index, cfg->width, cfg->height,
+ pipe->interlaced ? "i" : "");
+
+ mutex_lock(&vsp1->drm->lock);
+
+ /* Setup formats through the pipeline. */
+ ret = vsp1_du_pipeline_setup_inputs(vsp1, pipe);
+ if (ret < 0)
+ goto unlock;
+
+ ret = vsp1_du_pipeline_setup_output(vsp1, pipe);
+ if (ret < 0)
+ goto unlock;
+
+ /* Enable the VSP1. */
+ ret = vsp1_device_get(vsp1);
+ if (ret < 0)
+ goto unlock;
+
+ /*
+ * Register a callback to allow us to notify the DRM driver of frame
+ * completion events.
+ */
+ drm_pipe->du_complete = cfg->callback;
+ drm_pipe->du_private = cfg->callback_data;
+
+ /* Disable the display interrupts. */
+ vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0);
+ vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0);
+
+ /* Configure all entities in the pipeline. */
+ vsp1_du_pipeline_configure(pipe);
+
+unlock:
+ mutex_unlock(&vsp1->drm->lock);
+
+ if (ret < 0)
+ return ret;
+
+ /* Start the pipeline. */
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ vsp1_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ dev_dbg(vsp1->dev, "%s: pipeline enabled\n", __func__);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_setup_lif);
+
+/**
+ * vsp1_du_atomic_begin - Prepare for an atomic update
+ * @dev: the VSP device
+ * @pipe_index: the DRM pipeline index
+ */
+void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index)
+{
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
+
+/**
+ * vsp1_du_atomic_update - Setup one RPF input of the VSP pipeline
+ * @dev: the VSP device
+ * @pipe_index: the DRM pipeline index
+ * @rpf_index: index of the RPF to setup (0-based)
+ * @cfg: the RPF configuration
+ *
+ * Configure the VSP to perform image composition through RPF @rpf_index as
+ * described by the @cfg configuration. The image to compose is referenced by
+ * @cfg.mem and composed using the @cfg.src crop rectangle and the @cfg.dst
+ * composition rectangle. The Z-order is configurable with higher @zpos values
+ * displayed on top.
+ *
+ * If the @cfg configuration is NULL, the RPF will be disabled. Calling the
+ * function on a disabled RPF is allowed.
+ *
+ * Image format as stored in memory is expressed as a V4L2 @cfg.pixelformat
+ * value. The memory pitch is configurable to allow for padding at end of lines,
+ * or simply for images that extend beyond the crop rectangle boundaries. The
+ * @cfg.pitch value is expressed in bytes and applies to all planes for
+ * multiplanar formats.
+ *
+ * The source memory buffer is referenced by the DMA address of its planes in
+ * the @cfg.mem array. Up to two planes are supported. The second plane DMA
+ * address is ignored for formats using a single plane.
+ *
+ * This function isn't reentrant, the caller needs to serialize calls.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
+ unsigned int rpf_index,
+ const struct vsp1_du_atomic_config *cfg)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
+ const struct vsp1_format_info *fmtinfo;
+ unsigned int chroma_hsub;
+ struct vsp1_rwpf *rpf;
+
+ if (rpf_index >= vsp1->info->rpf_count)
+ return -EINVAL;
+
+ rpf = vsp1->rpf[rpf_index];
+
+ if (!cfg) {
+ dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__,
+ rpf_index);
+
+ /*
+ * Remove the RPF from the pipeline's inputs. Keep it in the
+ * pipeline's entity list to let vsp1_du_pipeline_configure()
+ * remove it from the hardware pipeline.
+ */
+ rpf->entity.pipe = NULL;
+ drm_pipe->pipe.inputs[rpf_index] = NULL;
+ return 0;
+ }
+
+ dev_dbg(vsp1->dev,
+ "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad, %pad } zpos %u\n",
+ __func__, rpf_index,
+ cfg->src.left, cfg->src.top, cfg->src.width, cfg->src.height,
+ cfg->dst.left, cfg->dst.top, cfg->dst.width, cfg->dst.height,
+ cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1],
+ &cfg->mem[2], cfg->zpos);
+
+ /*
+ * Store the format, stride, memory buffer address, crop and compose
+ * rectangles and Z-order position and for the input.
+ */
+ fmtinfo = vsp1_get_format_info(vsp1, cfg->pixelformat);
+ if (!fmtinfo) {
+ dev_dbg(vsp1->dev, "Unsupported pixel format %08x for RPF\n",
+ cfg->pixelformat);
+ return -EINVAL;
+ }
+
+ /*
+ * Only formats with three planes can affect the chroma planes pitch.
+ * All formats with two planes have a horizontal subsampling value of 2,
+ * but combine U and V in a single chroma plane, which thus results in
+ * the luma plane and chroma plane having the same pitch.
+ */
+ chroma_hsub = (fmtinfo->planes == 3) ? fmtinfo->hsub : 1;
+
+ rpf->fmtinfo = fmtinfo;
+ rpf->format.num_planes = fmtinfo->planes;
+ rpf->format.plane_fmt[0].bytesperline = cfg->pitch;
+ rpf->format.plane_fmt[1].bytesperline = cfg->pitch / chroma_hsub;
+ rpf->alpha = cfg->alpha;
+
+ rpf->mem.addr[0] = cfg->mem[0];
+ rpf->mem.addr[1] = cfg->mem[1];
+ rpf->mem.addr[2] = cfg->mem[2];
+
+ vsp1->drm->inputs[rpf_index].crop = cfg->src;
+ vsp1->drm->inputs[rpf_index].compose = cfg->dst;
+ vsp1->drm->inputs[rpf_index].zpos = cfg->zpos;
+
+ drm_pipe->pipe.inputs[rpf_index] = rpf;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_update);
+
+/**
+ * vsp1_du_atomic_flush - Commit an atomic update
+ * @dev: the VSP device
+ * @pipe_index: the DRM pipeline index
+ * @cfg: atomic pipe configuration
+ */
+void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
+ const struct vsp1_du_atomic_pipe_config *cfg)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
+ struct vsp1_pipeline *pipe = &drm_pipe->pipe;
+
+ drm_pipe->crc = cfg->crc;
+
+ mutex_lock(&vsp1->drm->lock);
+ vsp1_du_pipeline_setup_inputs(vsp1, pipe);
+ vsp1_du_pipeline_configure(pipe);
+ mutex_unlock(&vsp1->drm->lock);
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_flush);
+
+int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ /*
+ * As all the buffers allocated by the DU driver are coherent, we can
+ * skip cache sync. This will need to be revisited when support for
+ * non-coherent buffers will be added to the DU driver.
+ */
+ return dma_map_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+}
+EXPORT_SYMBOL_GPL(vsp1_du_map_sg);
+
+void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ dma_unmap_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+}
+EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg);
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+int vsp1_drm_init(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+
+ vsp1->drm = devm_kzalloc(vsp1->dev, sizeof(*vsp1->drm), GFP_KERNEL);
+ if (!vsp1->drm)
+ return -ENOMEM;
+
+ mutex_init(&vsp1->drm->lock);
+
+ /* Create one DRM pipeline per LIF. */
+ for (i = 0; i < vsp1->info->lif_count; ++i) {
+ struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[i];
+ struct vsp1_pipeline *pipe = &drm_pipe->pipe;
+
+ init_waitqueue_head(&drm_pipe->wait_queue);
+
+ vsp1_pipeline_init(pipe);
+
+ pipe->frame_end = vsp1_du_pipeline_frame_end;
+
+ /*
+ * The output side of the DRM pipeline is static, add the
+ * corresponding entities manually.
+ */
+ pipe->output = vsp1->wpf[i];
+ pipe->lif = &vsp1->lif[i]->entity;
+
+ pipe->output->entity.pipe = pipe;
+ pipe->output->entity.sink = pipe->lif;
+ pipe->output->entity.sink_pad = 0;
+ list_add_tail(&pipe->output->entity.list_pipe, &pipe->entities);
+
+ pipe->lif->pipe = pipe;
+ list_add_tail(&pipe->lif->list_pipe, &pipe->entities);
+
+ /*
+ * CRC computation is initially disabled, don't add the UIF to
+ * the pipeline.
+ */
+ if (i < vsp1->info->uif_count)
+ drm_pipe->uif = &vsp1->uif[i]->entity;
+ }
+
+ /* Disable all RPFs initially. */
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *input = vsp1->rpf[i];
+
+ INIT_LIST_HEAD(&input->entity.list_pipe);
+ }
+
+ return 0;
+}
+
+void vsp1_drm_cleanup(struct vsp1_device *vsp1)
+{
+ mutex_destroy(&vsp1->drm->lock);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_drm.h b/drivers/media/platform/vsp1/vsp1_drm.h
new file mode 100644
index 000000000..8dfd274a5
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_drm.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_drm.h -- R-Car VSP1 DRM/KMS Interface
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_DRM_H__
+#define __VSP1_DRM_H__
+
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+
+#include <media/vsp1.h>
+
+#include "vsp1_pipe.h"
+
+/**
+ * vsp1_drm_pipeline - State for the API exposed to the DRM driver
+ * @pipe: the VSP1 pipeline used for display
+ * @width: output display width
+ * @height: output display height
+ * @force_brx_release: when set, release the BRx during the next reconfiguration
+ * @wait_queue: wait queue to wait for BRx release completion
+ * @uif: UIF entity if available for the pipeline
+ * @crc: CRC computation configuration
+ * @du_complete: frame completion callback for the DU driver (optional)
+ * @du_private: data to be passed to the du_complete callback
+ */
+struct vsp1_drm_pipeline {
+ struct vsp1_pipeline pipe;
+
+ unsigned int width;
+ unsigned int height;
+
+ bool force_brx_release;
+ wait_queue_head_t wait_queue;
+
+ struct vsp1_entity *uif;
+ struct vsp1_du_crc_config crc;
+
+ /* Frame synchronisation */
+ void (*du_complete)(void *data, bool completed, u32 crc);
+ void *du_private;
+};
+
+/**
+ * vsp1_drm - State for the API exposed to the DRM driver
+ * @pipe: the VSP1 DRM pipeline used for display
+ * @lock: protects the BRU and BRS allocation
+ * @inputs: source crop rectangle, destination compose rectangle and z-order
+ * position for every input (indexed by RPF index)
+ */
+struct vsp1_drm {
+ struct vsp1_drm_pipeline pipe[VSP1_MAX_LIF];
+ struct mutex lock;
+
+ struct {
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ unsigned int zpos;
+ } inputs[VSP1_MAX_RPF];
+};
+
+static inline struct vsp1_drm_pipeline *
+to_vsp1_drm_pipeline(struct vsp1_pipeline *pipe)
+{
+ return container_of(pipe, struct vsp1_drm_pipeline, pipe);
+}
+
+int vsp1_drm_init(struct vsp1_device *vsp1);
+void vsp1_drm_cleanup(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_DRM_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
new file mode 100644
index 000000000..022f84569
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -0,0 +1,927 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_drv.c -- R-Car VSP1 Driver
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+
+#include <media/rcar-fcp.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_clu.h"
+#include "vsp1_dl.h"
+#include "vsp1_drm.h"
+#include "vsp1_hgo.h"
+#include "vsp1_hgt.h"
+#include "vsp1_hsit.h"
+#include "vsp1_lif.h"
+#include "vsp1_lut.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_sru.h"
+#include "vsp1_uds.h"
+#include "vsp1_uif.h"
+#include "vsp1_video.h"
+
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
+
+static irqreturn_t vsp1_irq_handler(int irq, void *data)
+{
+ u32 mask = VI6_WFP_IRQ_STA_DFE | VI6_WFP_IRQ_STA_FRE;
+ struct vsp1_device *vsp1 = data;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int i;
+ u32 status;
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+
+ if (wpf == NULL)
+ continue;
+
+ status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
+ vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
+
+ if (status & VI6_WFP_IRQ_STA_DFE) {
+ vsp1_pipeline_frame_end(wpf->entity.pipe);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Entities
+ */
+
+/*
+ * vsp1_create_sink_links - Create links from all sources to the given sink
+ *
+ * This function creates media links from all valid sources to the given sink
+ * pad. Links that would be invalid according to the VSP1 hardware capabilities
+ * are skipped. Those include all links
+ *
+ * - from a UDS to a UDS (UDS entities can't be chained)
+ * - from an entity to itself (no loops are allowed)
+ *
+ * Furthermore, the BRS can't be connected to histogram generators, but no
+ * special check is currently needed as all VSP instances that include a BRS
+ * have no histogram generator.
+ */
+static int vsp1_create_sink_links(struct vsp1_device *vsp1,
+ struct vsp1_entity *sink)
+{
+ struct media_entity *entity = &sink->subdev.entity;
+ struct vsp1_entity *source;
+ unsigned int pad;
+ int ret;
+
+ list_for_each_entry(source, &vsp1->entities, list_dev) {
+ u32 flags;
+
+ if (source->type == sink->type)
+ continue;
+
+ if (source->type == VSP1_ENTITY_HGO ||
+ source->type == VSP1_ENTITY_HGT ||
+ source->type == VSP1_ENTITY_LIF ||
+ source->type == VSP1_ENTITY_WPF)
+ continue;
+
+ flags = source->type == VSP1_ENTITY_RPF &&
+ sink->type == VSP1_ENTITY_WPF &&
+ source->index == sink->index
+ ? MEDIA_LNK_FL_ENABLED : 0;
+
+ for (pad = 0; pad < entity->num_pads; ++pad) {
+ if (!(entity->pads[pad].flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ ret = media_create_pad_link(&source->subdev.entity,
+ source->source_pad,
+ entity, pad, flags);
+ if (ret < 0)
+ return ret;
+
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ source->sink = sink;
+ }
+ }
+
+ return 0;
+}
+
+static int vsp1_uapi_create_links(struct vsp1_device *vsp1)
+{
+ struct vsp1_entity *entity;
+ unsigned int i;
+ int ret;
+
+ list_for_each_entry(entity, &vsp1->entities, list_dev) {
+ if (entity->type == VSP1_ENTITY_LIF ||
+ entity->type == VSP1_ENTITY_RPF)
+ continue;
+
+ ret = vsp1_create_sink_links(vsp1, entity);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (vsp1->hgo) {
+ ret = media_create_pad_link(&vsp1->hgo->histo.entity.subdev.entity,
+ HISTO_PAD_SOURCE,
+ &vsp1->hgo->histo.video.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (vsp1->hgt) {
+ ret = media_create_pad_link(&vsp1->hgt->histo.entity.subdev.entity,
+ HISTO_PAD_SOURCE,
+ &vsp1->hgt->histo.video.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < vsp1->info->lif_count; ++i) {
+ if (!vsp1->lif[i])
+ continue;
+
+ ret = media_create_pad_link(&vsp1->wpf[i]->entity.subdev.entity,
+ RWPF_PAD_SOURCE,
+ &vsp1->lif[i]->entity.subdev.entity,
+ LIF_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf = vsp1->rpf[i];
+
+ ret = media_create_pad_link(&rpf->video->video.entity, 0,
+ &rpf->entity.subdev.entity,
+ RWPF_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ /*
+ * Connect the video device to the WPF. All connections are
+ * immutable.
+ */
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+
+ ret = media_create_pad_link(&wpf->entity.subdev.entity,
+ RWPF_PAD_SOURCE,
+ &wpf->video->video.entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vsp1_destroy_entities(struct vsp1_device *vsp1)
+{
+ struct vsp1_entity *entity, *_entity;
+ struct vsp1_video *video, *_video;
+
+ list_for_each_entry_safe(entity, _entity, &vsp1->entities, list_dev) {
+ list_del(&entity->list_dev);
+ vsp1_entity_destroy(entity);
+ }
+
+ list_for_each_entry_safe(video, _video, &vsp1->videos, list) {
+ list_del(&video->list);
+ vsp1_video_cleanup(video);
+ }
+
+ v4l2_device_unregister(&vsp1->v4l2_dev);
+ if (vsp1->info->uapi)
+ media_device_unregister(&vsp1->media_dev);
+ media_device_cleanup(&vsp1->media_dev);
+
+ if (!vsp1->info->uapi)
+ vsp1_drm_cleanup(vsp1);
+}
+
+static int vsp1_create_entities(struct vsp1_device *vsp1)
+{
+ struct media_device *mdev = &vsp1->media_dev;
+ struct v4l2_device *vdev = &vsp1->v4l2_dev;
+ struct vsp1_entity *entity;
+ unsigned int i;
+ int ret;
+
+ mdev->dev = vsp1->dev;
+ mdev->hw_revision = vsp1->version;
+ strlcpy(mdev->model, vsp1->info->model, sizeof(mdev->model));
+ snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s",
+ dev_name(mdev->dev));
+ media_device_init(mdev);
+
+ vsp1->media_ops.link_setup = vsp1_entity_link_setup;
+ /*
+ * Don't perform link validation when the userspace API is disabled as
+ * the pipeline is configured internally by the driver in that case, and
+ * its configuration can thus be trusted.
+ */
+ if (vsp1->info->uapi)
+ vsp1->media_ops.link_validate = v4l2_subdev_link_validate;
+
+ vdev->mdev = mdev;
+ ret = v4l2_device_register(vsp1->dev, vdev);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "V4L2 device registration failed (%d)\n",
+ ret);
+ goto done;
+ }
+
+ /* Instantiate all the entities. */
+ if (vsp1_feature(vsp1, VSP1_HAS_BRS)) {
+ vsp1->brs = vsp1_brx_create(vsp1, VSP1_ENTITY_BRS);
+ if (IS_ERR(vsp1->brs)) {
+ ret = PTR_ERR(vsp1->brs);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_BRU)) {
+ vsp1->bru = vsp1_brx_create(vsp1, VSP1_ENTITY_BRU);
+ if (IS_ERR(vsp1->bru)) {
+ ret = PTR_ERR(vsp1->bru);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_CLU)) {
+ vsp1->clu = vsp1_clu_create(vsp1);
+ if (IS_ERR(vsp1->clu)) {
+ ret = PTR_ERR(vsp1->clu);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->clu->entity.list_dev, &vsp1->entities);
+ }
+
+ vsp1->hsi = vsp1_hsit_create(vsp1, true);
+ if (IS_ERR(vsp1->hsi)) {
+ ret = PTR_ERR(vsp1->hsi);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
+
+ vsp1->hst = vsp1_hsit_create(vsp1, false);
+ if (IS_ERR(vsp1->hst)) {
+ ret = PTR_ERR(vsp1->hst);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HGO) && vsp1->info->uapi) {
+ vsp1->hgo = vsp1_hgo_create(vsp1);
+ if (IS_ERR(vsp1->hgo)) {
+ ret = PTR_ERR(vsp1->hgo);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hgo->histo.entity.list_dev,
+ &vsp1->entities);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HGT) && vsp1->info->uapi) {
+ vsp1->hgt = vsp1_hgt_create(vsp1);
+ if (IS_ERR(vsp1->hgt)) {
+ ret = PTR_ERR(vsp1->hgt);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hgt->histo.entity.list_dev,
+ &vsp1->entities);
+ }
+
+ /*
+ * The LIFs are only supported when used in conjunction with the DU, in
+ * which case the userspace API is disabled. If the userspace API is
+ * enabled skip the LIFs, even when present.
+ */
+ if (!vsp1->info->uapi) {
+ for (i = 0; i < vsp1->info->lif_count; ++i) {
+ struct vsp1_lif *lif;
+
+ lif = vsp1_lif_create(vsp1, i);
+ if (IS_ERR(lif)) {
+ ret = PTR_ERR(lif);
+ goto done;
+ }
+
+ vsp1->lif[i] = lif;
+ list_add_tail(&lif->entity.list_dev, &vsp1->entities);
+ }
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_LUT)) {
+ vsp1->lut = vsp1_lut_create(vsp1);
+ if (IS_ERR(vsp1->lut)) {
+ ret = PTR_ERR(vsp1->lut);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf;
+
+ rpf = vsp1_rpf_create(vsp1, i);
+ if (IS_ERR(rpf)) {
+ ret = PTR_ERR(rpf);
+ goto done;
+ }
+
+ vsp1->rpf[i] = rpf;
+ list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
+
+ if (vsp1->info->uapi) {
+ struct vsp1_video *video = vsp1_video_create(vsp1, rpf);
+
+ if (IS_ERR(video)) {
+ ret = PTR_ERR(video);
+ goto done;
+ }
+
+ list_add_tail(&video->list, &vsp1->videos);
+ }
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_SRU)) {
+ vsp1->sru = vsp1_sru_create(vsp1);
+ if (IS_ERR(vsp1->sru)) {
+ ret = PTR_ERR(vsp1->sru);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->info->uds_count; ++i) {
+ struct vsp1_uds *uds;
+
+ uds = vsp1_uds_create(vsp1, i);
+ if (IS_ERR(uds)) {
+ ret = PTR_ERR(uds);
+ goto done;
+ }
+
+ vsp1->uds[i] = uds;
+ list_add_tail(&uds->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->info->uif_count; ++i) {
+ struct vsp1_uif *uif;
+
+ uif = vsp1_uif_create(vsp1, i);
+ if (IS_ERR(uif)) {
+ ret = PTR_ERR(uif);
+ goto done;
+ }
+
+ vsp1->uif[i] = uif;
+ list_add_tail(&uif->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf;
+
+ wpf = vsp1_wpf_create(vsp1, i);
+ if (IS_ERR(wpf)) {
+ ret = PTR_ERR(wpf);
+ goto done;
+ }
+
+ vsp1->wpf[i] = wpf;
+ list_add_tail(&wpf->entity.list_dev, &vsp1->entities);
+
+ if (vsp1->info->uapi) {
+ struct vsp1_video *video = vsp1_video_create(vsp1, wpf);
+
+ if (IS_ERR(video)) {
+ ret = PTR_ERR(video);
+ goto done;
+ }
+
+ list_add_tail(&video->list, &vsp1->videos);
+ }
+ }
+
+ /* Register all subdevs. */
+ list_for_each_entry(entity, &vsp1->entities, list_dev) {
+ ret = v4l2_device_register_subdev(&vsp1->v4l2_dev,
+ &entity->subdev);
+ if (ret < 0)
+ goto done;
+ }
+
+ /*
+ * Create links and register subdev nodes if the userspace API is
+ * enabled or initialize the DRM pipeline otherwise.
+ */
+ if (vsp1->info->uapi) {
+ ret = vsp1_uapi_create_links(vsp1);
+ if (ret < 0)
+ goto done;
+
+ ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = media_device_register(mdev);
+ } else {
+ ret = vsp1_drm_init(vsp1);
+ }
+
+done:
+ if (ret < 0)
+ vsp1_destroy_entities(vsp1);
+
+ return ret;
+}
+
+int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index)
+{
+ unsigned int timeout;
+ u32 status;
+
+ status = vsp1_read(vsp1, VI6_STATUS);
+ if (!(status & VI6_STATUS_SYS_ACT(index)))
+ return 0;
+
+ vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(index));
+ for (timeout = 10; timeout > 0; --timeout) {
+ status = vsp1_read(vsp1, VI6_STATUS);
+ if (!(status & VI6_STATUS_SYS_ACT(index)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (!timeout) {
+ dev_err(vsp1->dev, "failed to reset wpf.%u\n", index);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int vsp1_device_init(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+ int ret;
+
+ /* Reset any channel that might be running. */
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ ret = vsp1_reset_wpf(vsp1, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << VI6_CLK_DCSWT_CSTPW_SHIFT) |
+ (8 << VI6_CLK_DCSWT_CSTRW_SHIFT));
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i)
+ vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED);
+
+ for (i = 0; i < vsp1->info->uds_count; ++i)
+ vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED);
+
+ for (i = 0; i < vsp1->info->uif_count; ++i)
+ vsp1_write(vsp1, VI6_DPR_UIF_ROUTE(i), VI6_DPR_NODE_UNUSED);
+
+ vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_LUT_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_CLU_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_HST_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED);
+
+ if (vsp1_feature(vsp1, VSP1_HAS_BRS))
+ vsp1_write(vsp1, VI6_DPR_ILV_BRS_ROUTE, VI6_DPR_NODE_UNUSED);
+
+ vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+ vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
+ vsp1_dlm_setup(vsp1);
+
+ return 0;
+}
+
+/*
+ * vsp1_device_get - Acquire the VSP1 device
+ *
+ * Make sure the device is not suspended and initialize it if needed.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int vsp1_device_get(struct vsp1_device *vsp1)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(vsp1->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(vsp1->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * vsp1_device_put - Release the VSP1 device
+ *
+ * Decrement the VSP1 reference count and cleanup the device if the last
+ * reference is released.
+ */
+void vsp1_device_put(struct vsp1_device *vsp1)
+{
+ pm_runtime_put_sync(vsp1->dev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+static int __maybe_unused vsp1_pm_suspend(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ /*
+ * When used as part of a display pipeline, the VSP is stopped and
+ * restarted explicitly by the DU.
+ */
+ if (!vsp1->drm)
+ vsp1_video_suspend(vsp1);
+
+ pm_runtime_force_suspend(vsp1->dev);
+
+ return 0;
+}
+
+static int __maybe_unused vsp1_pm_resume(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ pm_runtime_force_resume(vsp1->dev);
+
+ /*
+ * When used as part of a display pipeline, the VSP is stopped and
+ * restarted explicitly by the DU.
+ */
+ if (!vsp1->drm)
+ vsp1_video_resume(vsp1);
+
+ return 0;
+}
+
+static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ rcar_fcp_disable(vsp1->fcp);
+
+ return 0;
+}
+
+static int __maybe_unused vsp1_pm_runtime_resume(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ int ret;
+
+ if (vsp1->info) {
+ ret = vsp1_device_init(vsp1);
+ if (ret < 0)
+ return ret;
+ }
+
+ return rcar_fcp_enable(vsp1->fcp);
+}
+
+static const struct dev_pm_ops vsp1_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume)
+ SET_RUNTIME_PM_OPS(vsp1_pm_runtime_suspend, vsp1_pm_runtime_resume, NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Driver
+ */
+
+static const struct vsp1_device_info vsp1_device_infos[] = {
+ {
+ .version = VI6_IP_VERSION_MODEL_VSPS_H2,
+ .model = "VSP1-S",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
+ | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .uds_count = 3,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPR_H2,
+ .model = "VSP1-R",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .uds_count = 3,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
+ .model = "VSP1-D",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT,
+ .lif_count = 1,
+ .rpf_count = 4,
+ .uds_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPS_M2,
+ .model = "VSP1-S",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
+ | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .uds_count = 1,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPS_V2H,
+ .model = "VSP1V-S",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
+ | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 4,
+ .uds_count = 1,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V2H,
+ .model = "VSP1V-D",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT,
+ .lif_count = 1,
+ .rpf_count = 4,
+ .uds_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
+ .model = "VSP2-I",
+ .gen = 3,
+ .features = VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT
+ | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_HFLIP
+ | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 1,
+ .uds_count = 1,
+ .wpf_count = 1,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPBD_GEN3,
+ .model = "VSP2-BD",
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
+ .model = "VSP2-BC",
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
+ | VSP1_HAS_LUT | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPBS_GEN3,
+ .model = "VSP2-BS",
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 2,
+ .wpf_count = 1,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
+ .model = "VSP2-D",
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 1,
+ .wpf_count = 2,
+ .num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V3,
+ .model = "VSP2-D",
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_BRU,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPDL_GEN3,
+ .model = "VSP2-DL",
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
+ .lif_count = 2,
+ .rpf_count = 5,
+ .uif_count = 2,
+ .wpf_count = 2,
+ .num_bru_inputs = 5,
+ },
+};
+
+static int vsp1_probe(struct platform_device *pdev)
+{
+ struct vsp1_device *vsp1;
+ struct device_node *fcp_node;
+ struct resource *irq;
+ struct resource *io;
+ unsigned int i;
+ int ret;
+
+ vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL);
+ if (vsp1 == NULL)
+ return -ENOMEM;
+
+ vsp1->dev = &pdev->dev;
+ INIT_LIST_HEAD(&vsp1->entities);
+ INIT_LIST_HEAD(&vsp1->videos);
+
+ platform_set_drvdata(pdev, vsp1);
+
+ /* I/O and IRQ resources (clock managed by the clock PM domain) */
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vsp1->mmio = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(vsp1->mmio))
+ return PTR_ERR(vsp1->mmio);
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "missing IRQ\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq->start, vsp1_irq_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), vsp1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ /* FCP (optional) */
+ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
+ if (fcp_node) {
+ vsp1->fcp = rcar_fcp_get(fcp_node);
+ of_node_put(fcp_node);
+ if (IS_ERR(vsp1->fcp)) {
+ dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
+ PTR_ERR(vsp1->fcp));
+ return PTR_ERR(vsp1->fcp);
+ }
+
+ /*
+ * When the FCP is present, it handles all bus master accesses
+ * for the VSP and must thus be used in place of the VSP device
+ * to map DMA buffers.
+ */
+ vsp1->bus_master = rcar_fcp_get_device(vsp1->fcp);
+ } else {
+ vsp1->bus_master = vsp1->dev;
+ }
+
+ /* Configure device parameters based on the version register. */
+ pm_runtime_enable(&pdev->dev);
+
+ ret = vsp1_device_get(vsp1);
+ if (ret < 0)
+ goto done;
+
+ vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
+ vsp1_device_put(vsp1);
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
+ if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
+ vsp1_device_infos[i].version) {
+ vsp1->info = &vsp1_device_infos[i];
+ break;
+ }
+ }
+
+ if (!vsp1->info) {
+ dev_err(&pdev->dev, "unsupported IP version 0x%08x\n",
+ vsp1->version);
+ ret = -ENXIO;
+ goto done;
+ }
+
+ dev_dbg(&pdev->dev, "IP version 0x%08x\n", vsp1->version);
+
+ /* Instanciate entities */
+ ret = vsp1_create_entities(vsp1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create entities\n");
+ goto done;
+ }
+
+done:
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ rcar_fcp_put(vsp1->fcp);
+ }
+
+ return ret;
+}
+
+static int vsp1_remove(struct platform_device *pdev)
+{
+ struct vsp1_device *vsp1 = platform_get_drvdata(pdev);
+
+ vsp1_destroy_entities(vsp1);
+ rcar_fcp_put(vsp1->fcp);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id vsp1_of_match[] = {
+ { .compatible = "renesas,vsp1" },
+ { .compatible = "renesas,vsp2" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vsp1_of_match);
+
+static struct platform_driver vsp1_platform_driver = {
+ .probe = vsp1_probe,
+ .remove = vsp1_remove,
+ .driver = {
+ .name = "vsp1",
+ .pm = &vsp1_pm_ops,
+ .of_match_table = vsp1_of_match,
+ },
+};
+
+module_platform_driver(vsp1_platform_driver);
+
+MODULE_ALIAS("vsp1");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas VSP1 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
new file mode 100644
index 000000000..36a29e131
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_entity.c -- R-Car VSP1 Base Entity
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_entity.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+
+void vsp1_entity_route_setup(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_entity *source;
+ u32 route;
+
+ if (entity->type == VSP1_ENTITY_HGO) {
+ u32 smppt;
+
+ /*
+ * The HGO is a special case, its routing is configured on the
+ * sink pad.
+ */
+ source = entity->sources[0];
+ smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT)
+ | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT);
+
+ vsp1_dl_body_write(dlb, VI6_DPR_HGO_SMPPT, smppt);
+ return;
+ } else if (entity->type == VSP1_ENTITY_HGT) {
+ u32 smppt;
+
+ /*
+ * The HGT is a special case, its routing is configured on the
+ * sink pad.
+ */
+ source = entity->sources[0];
+ smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT)
+ | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT);
+
+ vsp1_dl_body_write(dlb, VI6_DPR_HGT_SMPPT, smppt);
+ return;
+ }
+
+ source = entity;
+ if (source->route->reg == 0)
+ return;
+
+ route = source->sink->route->inputs[source->sink_pad];
+ /*
+ * The ILV and BRS share the same data path route. The extra BRSSEL bit
+ * selects between the ILV and BRS.
+ */
+ if (source->type == VSP1_ENTITY_BRS)
+ route |= VI6_DPR_ROUTE_BRSSEL;
+ vsp1_dl_body_write(dlb, source->route->reg, route);
+}
+
+void vsp1_entity_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ if (entity->ops->configure_stream)
+ entity->ops->configure_stream(entity, pipe, dlb);
+}
+
+void vsp1_entity_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ if (entity->ops->configure_frame)
+ entity->ops->configure_frame(entity, pipe, dl, dlb);
+}
+
+void vsp1_entity_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ if (entity->ops->configure_partition)
+ entity->ops->configure_partition(entity, pipe, dl, dlb);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/**
+ * vsp1_entity_get_pad_config - Get the pad configuration for an entity
+ * @entity: the entity
+ * @cfg: the TRY pad configuration
+ * @which: configuration selector (ACTIVE or TRY)
+ *
+ * When called with which set to V4L2_SUBDEV_FORMAT_ACTIVE the caller must hold
+ * the entity lock to access the returned configuration.
+ *
+ * Return the pad configuration requested by the which argument. The TRY
+ * configuration is passed explicitly to the function through the cfg argument
+ * and simply returned when requested. The ACTIVE configuration comes from the
+ * entity structure.
+ */
+struct v4l2_subdev_pad_config *
+vsp1_entity_get_pad_config(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return entity->config;
+ case V4L2_SUBDEV_FORMAT_TRY:
+ default:
+ return cfg;
+ }
+}
+
+/**
+ * vsp1_entity_get_pad_format - Get a pad format from storage for an entity
+ * @entity: the entity
+ * @cfg: the configuration storage
+ * @pad: the pad number
+ *
+ * Return the format stored in the given configuration for an entity's pad. The
+ * configuration can be an ACTIVE or TRY configuration.
+ */
+struct v4l2_mbus_framefmt *
+vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
+{
+ return v4l2_subdev_get_try_format(&entity->subdev, cfg, pad);
+}
+
+/**
+ * vsp1_entity_get_pad_selection - Get a pad selection from storage for entity
+ * @entity: the entity
+ * @cfg: the configuration storage
+ * @pad: the pad number
+ * @target: the selection target
+ *
+ * Return the selection rectangle stored in the given configuration for an
+ * entity's pad. The configuration can be an ACTIVE or TRY configuration. The
+ * selection target can be COMPOSE or CROP.
+ */
+struct v4l2_rect *
+vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, unsigned int target)
+{
+ switch (target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ return v4l2_subdev_get_try_compose(&entity->subdev, cfg, pad);
+ case V4L2_SEL_TGT_CROP:
+ return v4l2_subdev_get_try_crop(&entity->subdev, cfg, pad);
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * vsp1_entity_init_cfg - Initialize formats on all pads
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ *
+ * Initialize all pad formats with default values in the given pad config. This
+ * function can be used as a handler for the subdev pad::init_cfg operation.
+ */
+int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format format;
+ unsigned int pad;
+
+ for (pad = 0; pad < subdev->entity.num_pads - 1; ++pad) {
+ memset(&format, 0, sizeof(format));
+
+ format.pad = pad;
+ format.which = cfg ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ v4l2_subdev_call(subdev, pad, set_fmt, cfg, &format);
+ }
+
+ return 0;
+}
+
+/*
+ * vsp1_subdev_get_pad_format - Subdev pad get_fmt handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: V4L2 subdev format
+ *
+ * This function implements the subdev get_fmt pad operation. It can be used as
+ * a direct drop-in for the operation handler.
+ */
+int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+ struct v4l2_subdev_pad_config *config;
+
+ config = vsp1_entity_get_pad_config(entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
+ mutex_lock(&entity->lock);
+ fmt->format = *vsp1_entity_get_pad_format(entity, config, fmt->pad);
+ mutex_unlock(&entity->lock);
+
+ return 0;
+}
+
+/*
+ * vsp1_subdev_enum_mbus_code - Subdev pad enum_mbus_code handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: Media bus code enumeration
+ * @codes: Array of supported media bus codes
+ * @ncodes: Number of supported media bus codes
+ *
+ * This function implements the subdev enum_mbus_code pad operation for entities
+ * that do not support format conversion. It enumerates the given supported
+ * media bus codes on the sink pad and reports a source pad format identical to
+ * the sink pad.
+ */
+int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code,
+ const unsigned int *codes, unsigned int ncodes)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+
+ if (code->pad == 0) {
+ if (code->index >= ncodes)
+ return -EINVAL;
+
+ code->code = codes[code->index];
+ } else {
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+
+ /*
+ * The entity can't perform format conversion, the sink format
+ * is always identical to the source format.
+ */
+ if (code->index)
+ return -EINVAL;
+
+ config = vsp1_entity_get_pad_config(entity, cfg, code->which);
+ if (!config)
+ return -EINVAL;
+
+ mutex_lock(&entity->lock);
+ format = vsp1_entity_get_pad_format(entity, config, 0);
+ code->code = format->code;
+ mutex_unlock(&entity->lock);
+ }
+
+ return 0;
+}
+
+/*
+ * vsp1_subdev_enum_frame_size - Subdev pad enum_frame_size handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: Frame size enumeration
+ * @min_width: Minimum image width
+ * @min_height: Minimum image height
+ * @max_width: Maximum image width
+ * @max_height: Maximum image height
+ *
+ * This function implements the subdev enum_frame_size pad operation for
+ * entities that do not support scaling or cropping. It reports the given
+ * minimum and maximum frame width and height on the sink pad, and a fixed
+ * source pad size identical to the sink pad.
+ */
+int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse,
+ unsigned int min_width, unsigned int min_height,
+ unsigned int max_width, unsigned int max_height)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ config = vsp1_entity_get_pad_config(entity, cfg, fse->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(entity, config, fse->pad);
+
+ mutex_lock(&entity->lock);
+
+ if (fse->index || fse->code != format->code) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (fse->pad == 0) {
+ fse->min_width = min_width;
+ fse->max_width = max_width;
+ fse->min_height = min_height;
+ fse->max_height = max_height;
+ } else {
+ /*
+ * The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+done:
+ mutex_unlock(&entity->lock);
+ return ret;
+}
+
+/*
+ * vsp1_subdev_set_pad_format - Subdev pad set_fmt handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: V4L2 subdev format
+ * @codes: Array of supported media bus codes
+ * @ncodes: Number of supported media bus codes
+ * @min_width: Minimum image width
+ * @min_height: Minimum image height
+ * @max_width: Maximum image width
+ * @max_height: Maximum image height
+ *
+ * This function implements the subdev set_fmt pad operation for entities that
+ * do not support scaling or cropping. It defaults to the first supplied media
+ * bus code if the requested code isn't supported, clamps the size to the
+ * supplied minimum and maximum, and propagates the sink pad format to the
+ * source pad.
+ */
+int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt,
+ const unsigned int *codes, unsigned int ncodes,
+ unsigned int min_width, unsigned int min_height,
+ unsigned int max_width, unsigned int max_height)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *selection;
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&entity->lock);
+
+ config = vsp1_entity_get_pad_config(entity, cfg, fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ format = vsp1_entity_get_pad_format(entity, config, fmt->pad);
+
+ if (fmt->pad == entity->source_pad) {
+ /* The output format can't be modified. */
+ fmt->format = *format;
+ goto done;
+ }
+
+ /*
+ * Default to the first media bus code if the requested format is not
+ * supported.
+ */
+ for (i = 0; i < ncodes; ++i) {
+ if (fmt->format.code == codes[i])
+ break;
+ }
+
+ format->code = i < ncodes ? codes[i] : codes[0];
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ min_width, max_width);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ min_height, max_height);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(entity, config, entity->source_pad);
+ *format = fmt->format;
+
+ /* Reset the crop and compose rectangles */
+ selection = vsp1_entity_get_pad_selection(entity, config, fmt->pad,
+ V4L2_SEL_TGT_CROP);
+ selection->left = 0;
+ selection->top = 0;
+ selection->width = format->width;
+ selection->height = format->height;
+
+ selection = vsp1_entity_get_pad_selection(entity, config, fmt->pad,
+ V4L2_SEL_TGT_COMPOSE);
+ selection->left = 0;
+ selection->top = 0;
+ selection->width = format->width;
+ selection->height = format->height;
+
+done:
+ mutex_unlock(&entity->lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static inline struct vsp1_entity *
+media_entity_to_vsp1_entity(struct media_entity *entity)
+{
+ return container_of(entity, struct vsp1_entity, subdev.entity);
+}
+
+static int vsp1_entity_link_setup_source(const struct media_pad *source_pad,
+ const struct media_pad *sink_pad,
+ u32 flags)
+{
+ struct vsp1_entity *source;
+
+ source = media_entity_to_vsp1_entity(source_pad->entity);
+
+ if (!source->route)
+ return 0;
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ struct vsp1_entity *sink
+ = media_entity_to_vsp1_entity(sink_pad->entity);
+
+ /*
+ * Fan-out is limited to one for the normal data path plus
+ * optional HGO and HGT. We ignore the HGO and HGT here.
+ */
+ if (sink->type != VSP1_ENTITY_HGO &&
+ sink->type != VSP1_ENTITY_HGT) {
+ if (source->sink)
+ return -EBUSY;
+ source->sink = sink;
+ source->sink_pad = sink_pad->index;
+ }
+ } else {
+ source->sink = NULL;
+ source->sink_pad = 0;
+ }
+
+ return 0;
+}
+
+static int vsp1_entity_link_setup_sink(const struct media_pad *source_pad,
+ const struct media_pad *sink_pad,
+ u32 flags)
+{
+ struct vsp1_entity *sink;
+ struct vsp1_entity *source;
+
+ sink = media_entity_to_vsp1_entity(sink_pad->entity);
+ source = media_entity_to_vsp1_entity(source_pad->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ /* Fan-in is limited to one. */
+ if (sink->sources[sink_pad->index])
+ return -EBUSY;
+
+ sink->sources[sink_pad->index] = source;
+ } else {
+ sink->sources[sink_pad->index] = NULL;
+ }
+
+ return 0;
+}
+
+int vsp1_entity_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if (local->flags & MEDIA_PAD_FL_SOURCE)
+ return vsp1_entity_link_setup_source(local, remote, flags);
+ else
+ return vsp1_entity_link_setup_sink(remote, local, flags);
+}
+
+/**
+ * vsp1_entity_remote_pad - Find the pad at the remote end of a link
+ * @pad: Pad at the local end of the link
+ *
+ * Search for a remote pad connected to the given pad by iterating over all
+ * links originating or terminating at that pad until an enabled link is found.
+ *
+ * Our link setup implementation guarantees that the output fan-out will not be
+ * higher than one for the data pipelines, except for the links to the HGO and
+ * HGT that can be enabled in addition to a regular data link. When traversing
+ * outgoing links this function ignores HGO and HGT entities and should thus be
+ * used in place of the generic media_entity_remote_pad() function to traverse
+ * data pipelines.
+ *
+ * Return a pointer to the pad at the remote end of the first found enabled
+ * link, or NULL if no enabled link has been found.
+ */
+struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad)
+{
+ struct media_link *link;
+
+ list_for_each_entry(link, &pad->entity->links, list) {
+ struct vsp1_entity *entity;
+
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ /* If we're the sink the source will never be an HGO or HGT. */
+ if (link->sink == pad)
+ return link->source;
+
+ if (link->source != pad)
+ continue;
+
+ /* If the sink isn't a subdevice it can't be an HGO or HGT. */
+ if (!is_media_entity_v4l2_subdev(link->sink->entity))
+ return link->sink;
+
+ entity = media_entity_to_vsp1_entity(link->sink->entity);
+ if (entity->type != VSP1_ENTITY_HGO &&
+ entity->type != VSP1_ENTITY_HGT)
+ return link->sink;
+ }
+
+ return NULL;
+
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+#define VSP1_ENTITY_ROUTE(ent) \
+ { VSP1_ENTITY_##ent, 0, VI6_DPR_##ent##_ROUTE, \
+ { VI6_DPR_NODE_##ent }, VI6_DPR_NODE_##ent }
+
+#define VSP1_ENTITY_ROUTE_RPF(idx) \
+ { VSP1_ENTITY_RPF, idx, VI6_DPR_RPF_ROUTE(idx), \
+ { 0, }, VI6_DPR_NODE_RPF(idx) }
+
+#define VSP1_ENTITY_ROUTE_UDS(idx) \
+ { VSP1_ENTITY_UDS, idx, VI6_DPR_UDS_ROUTE(idx), \
+ { VI6_DPR_NODE_UDS(idx) }, VI6_DPR_NODE_UDS(idx) }
+
+#define VSP1_ENTITY_ROUTE_UIF(idx) \
+ { VSP1_ENTITY_UIF, idx, VI6_DPR_UIF_ROUTE(idx), \
+ { VI6_DPR_NODE_UIF(idx) }, VI6_DPR_NODE_UIF(idx) }
+
+#define VSP1_ENTITY_ROUTE_WPF(idx) \
+ { VSP1_ENTITY_WPF, idx, 0, \
+ { VI6_DPR_NODE_WPF(idx) }, VI6_DPR_NODE_WPF(idx) }
+
+static const struct vsp1_route vsp1_routes[] = {
+ { VSP1_ENTITY_BRS, 0, VI6_DPR_ILV_BRS_ROUTE,
+ { VI6_DPR_NODE_BRS_IN(0), VI6_DPR_NODE_BRS_IN(1) }, 0 },
+ { VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE,
+ { VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1),
+ VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3),
+ VI6_DPR_NODE_BRU_IN(4) }, VI6_DPR_NODE_BRU_OUT },
+ VSP1_ENTITY_ROUTE(CLU),
+ { VSP1_ENTITY_HGO, 0, 0, { 0, }, 0 },
+ { VSP1_ENTITY_HGT, 0, 0, { 0, }, 0 },
+ VSP1_ENTITY_ROUTE(HSI),
+ VSP1_ENTITY_ROUTE(HST),
+ { VSP1_ENTITY_LIF, 0, 0, { 0, }, 0 },
+ { VSP1_ENTITY_LIF, 1, 0, { 0, }, 0 },
+ VSP1_ENTITY_ROUTE(LUT),
+ VSP1_ENTITY_ROUTE_RPF(0),
+ VSP1_ENTITY_ROUTE_RPF(1),
+ VSP1_ENTITY_ROUTE_RPF(2),
+ VSP1_ENTITY_ROUTE_RPF(3),
+ VSP1_ENTITY_ROUTE_RPF(4),
+ VSP1_ENTITY_ROUTE(SRU),
+ VSP1_ENTITY_ROUTE_UDS(0),
+ VSP1_ENTITY_ROUTE_UDS(1),
+ VSP1_ENTITY_ROUTE_UDS(2),
+ VSP1_ENTITY_ROUTE_UIF(0), /* Named UIF4 in the documentation */
+ VSP1_ENTITY_ROUTE_UIF(1), /* Named UIF5 in the documentation */
+ VSP1_ENTITY_ROUTE_WPF(0),
+ VSP1_ENTITY_ROUTE_WPF(1),
+ VSP1_ENTITY_ROUTE_WPF(2),
+ VSP1_ENTITY_ROUTE_WPF(3),
+};
+
+int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
+ const char *name, unsigned int num_pads,
+ const struct v4l2_subdev_ops *ops, u32 function)
+{
+ struct v4l2_subdev *subdev;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_routes); ++i) {
+ if (vsp1_routes[i].type == entity->type &&
+ vsp1_routes[i].index == entity->index) {
+ entity->route = &vsp1_routes[i];
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(vsp1_routes))
+ return -EINVAL;
+
+ mutex_init(&entity->lock);
+
+ entity->vsp1 = vsp1;
+ entity->source_pad = num_pads - 1;
+
+ /* Allocate and initialize pads. */
+ entity->pads = devm_kcalloc(vsp1->dev,
+ num_pads, sizeof(*entity->pads),
+ GFP_KERNEL);
+ if (entity->pads == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pads - 1; ++i)
+ entity->pads[i].flags = MEDIA_PAD_FL_SINK;
+
+ entity->sources = devm_kcalloc(vsp1->dev, max(num_pads - 1, 1U),
+ sizeof(*entity->sources), GFP_KERNEL);
+ if (entity->sources == NULL)
+ return -ENOMEM;
+
+ /* Single-pad entities only have a sink. */
+ entity->pads[num_pads - 1].flags = num_pads > 1 ? MEDIA_PAD_FL_SOURCE
+ : MEDIA_PAD_FL_SINK;
+
+ /* Initialize the media entity. */
+ ret = media_entity_pads_init(&entity->subdev.entity, num_pads,
+ entity->pads);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &entity->subdev;
+ v4l2_subdev_init(subdev, ops);
+
+ subdev->entity.function = function;
+ subdev->entity.ops = &vsp1->media_ops;
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ snprintf(subdev->name, sizeof(subdev->name), "%s %s",
+ dev_name(vsp1->dev), name);
+
+ vsp1_entity_init_cfg(subdev, NULL);
+
+ /*
+ * Allocate the pad configuration to store formats and selection
+ * rectangles.
+ */
+ entity->config = v4l2_subdev_alloc_pad_config(&entity->subdev);
+ if (entity->config == NULL) {
+ media_entity_cleanup(&entity->subdev.entity);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void vsp1_entity_destroy(struct vsp1_entity *entity)
+{
+ if (entity->ops && entity->ops->destroy)
+ entity->ops->destroy(entity);
+ if (entity->subdev.ctrl_handler)
+ v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
+ v4l2_subdev_free_pad_config(entity->config);
+ media_entity_cleanup(&entity->subdev.entity);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
new file mode 100644
index 000000000..97acb7795
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_entity.h -- R-Car VSP1 Base Entity
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_ENTITY_H__
+#define __VSP1_ENTITY_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <media/v4l2-subdev.h>
+
+struct vsp1_device;
+struct vsp1_dl_body;
+struct vsp1_dl_list;
+struct vsp1_pipeline;
+struct vsp1_partition;
+struct vsp1_partition_window;
+
+enum vsp1_entity_type {
+ VSP1_ENTITY_BRS,
+ VSP1_ENTITY_BRU,
+ VSP1_ENTITY_CLU,
+ VSP1_ENTITY_HGO,
+ VSP1_ENTITY_HGT,
+ VSP1_ENTITY_HSI,
+ VSP1_ENTITY_HST,
+ VSP1_ENTITY_LIF,
+ VSP1_ENTITY_LUT,
+ VSP1_ENTITY_RPF,
+ VSP1_ENTITY_SRU,
+ VSP1_ENTITY_UDS,
+ VSP1_ENTITY_UIF,
+ VSP1_ENTITY_WPF,
+};
+
+#define VSP1_ENTITY_MAX_INPUTS 5 /* For the BRU */
+
+/*
+ * struct vsp1_route - Entity routing configuration
+ * @type: Entity type this routing entry is associated with
+ * @index: Entity index this routing entry is associated with
+ * @reg: Output routing configuration register
+ * @inputs: Target node value for each input
+ * @output: Target node value for entity output
+ *
+ * Each $vsp1_route entry describes routing configuration for the entity
+ * specified by the entry's @type and @index. @reg indicates the register that
+ * holds output routing configuration for the entity, and the @inputs array
+ * store the target node value for each input of the entity. The @output field
+ * stores the target node value of the entity output when used as a source for
+ * histogram generation.
+ */
+struct vsp1_route {
+ enum vsp1_entity_type type;
+ unsigned int index;
+ unsigned int reg;
+ unsigned int inputs[VSP1_ENTITY_MAX_INPUTS];
+ unsigned int output;
+};
+
+/**
+ * struct vsp1_entity_operations - Entity operations
+ * @destroy: Destroy the entity.
+ * @configure_stream: Setup the hardware parameters for the stream which do
+ * not vary between frames (pipeline, formats).
+ * @configure_frame: Configure the runtime parameters for each frame.
+ * @configure_partition: Configure partition specific parameters.
+ * @max_width: Return the max supported width of data that the entity can
+ * process in a single operation.
+ * @partition: Process the partition construction based on this entity's
+ * configuration.
+ */
+struct vsp1_entity_operations {
+ void (*destroy)(struct vsp1_entity *);
+ void (*configure_stream)(struct vsp1_entity *, struct vsp1_pipeline *,
+ struct vsp1_dl_body *);
+ void (*configure_frame)(struct vsp1_entity *, struct vsp1_pipeline *,
+ struct vsp1_dl_list *, struct vsp1_dl_body *);
+ void (*configure_partition)(struct vsp1_entity *,
+ struct vsp1_pipeline *,
+ struct vsp1_dl_list *,
+ struct vsp1_dl_body *);
+ unsigned int (*max_width)(struct vsp1_entity *, struct vsp1_pipeline *);
+ void (*partition)(struct vsp1_entity *, struct vsp1_pipeline *,
+ struct vsp1_partition *, unsigned int,
+ struct vsp1_partition_window *);
+};
+
+struct vsp1_entity {
+ struct vsp1_device *vsp1;
+
+ const struct vsp1_entity_operations *ops;
+
+ enum vsp1_entity_type type;
+ unsigned int index;
+ const struct vsp1_route *route;
+
+ struct vsp1_pipeline *pipe;
+
+ struct list_head list_dev;
+ struct list_head list_pipe;
+
+ struct media_pad *pads;
+ unsigned int source_pad;
+
+ struct vsp1_entity **sources;
+ struct vsp1_entity *sink;
+ unsigned int sink_pad;
+
+ struct v4l2_subdev subdev;
+ struct v4l2_subdev_pad_config *config;
+
+ struct mutex lock; /* Protects the pad config */
+};
+
+static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_entity, subdev);
+}
+
+int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
+ const char *name, unsigned int num_pads,
+ const struct v4l2_subdev_ops *ops, u32 function);
+void vsp1_entity_destroy(struct vsp1_entity *entity);
+
+extern const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops;
+
+int vsp1_entity_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags);
+
+struct v4l2_subdev_pad_config *
+vsp1_entity_get_pad_config(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which);
+struct v4l2_mbus_framefmt *
+vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad);
+struct v4l2_rect *
+vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, unsigned int target);
+int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg);
+
+void vsp1_entity_route_setup(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb);
+
+void vsp1_entity_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb);
+
+void vsp1_entity_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb);
+
+void vsp1_entity_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb);
+
+struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad);
+
+int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt);
+int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt,
+ const unsigned int *codes, unsigned int ncodes,
+ unsigned int min_width, unsigned int min_height,
+ unsigned int max_width, unsigned int max_height);
+int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code,
+ const unsigned int *codes, unsigned int ncodes);
+int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse,
+ unsigned int min_w, unsigned int min_h,
+ unsigned int max_w, unsigned int max_h);
+
+#endif /* __VSP1_ENTITY_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_hgo.c b/drivers/media/platform/vsp1/vsp1_hgo.c
new file mode 100644
index 000000000..827373c25
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hgo.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_hgo.c -- R-Car VSP1 Histogram Generator 1D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_hgo.h"
+
+#define HGO_DATA_SIZE ((2 + 256) * 4)
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_hgo_read(struct vsp1_hgo *hgo, u32 reg)
+{
+ return vsp1_read(hgo->histo.entity.vsp1, reg);
+}
+
+static inline void vsp1_hgo_write(struct vsp1_hgo *hgo,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame End Handler
+ */
+
+void vsp1_hgo_frame_end(struct vsp1_entity *entity)
+{
+ struct vsp1_hgo *hgo = to_hgo(&entity->subdev);
+ struct vsp1_histogram_buffer *buf;
+ unsigned int i;
+ size_t size;
+ u32 *data;
+
+ buf = vsp1_histogram_buffer_get(&hgo->histo);
+ if (!buf)
+ return;
+
+ data = buf->addr;
+
+ if (hgo->num_bins == 256) {
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM);
+
+ for (i = 0; i < 256; ++i) {
+ vsp1_write(hgo->histo.entity.vsp1,
+ VI6_HGO_EXT_HIST_ADDR, i);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_EXT_HIST_DATA);
+ }
+
+ size = (2 + 256) * sizeof(u32);
+ } else if (hgo->max_rgb) {
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM);
+
+ for (i = 0; i < 64; ++i)
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_HISTO(i));
+
+ size = (2 + 64) * sizeof(u32);
+ } else {
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_R_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_B_MAXMIN);
+
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_R_SUM);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_B_SUM);
+
+ for (i = 0; i < 64; ++i) {
+ data[i] = vsp1_hgo_read(hgo, VI6_HGO_R_HISTO(i));
+ data[i+64] = vsp1_hgo_read(hgo, VI6_HGO_G_HISTO(i));
+ data[i+128] = vsp1_hgo_read(hgo, VI6_HGO_B_HISTO(i));
+ }
+
+ size = (6 + 64 * 3) * sizeof(u32);
+ }
+
+ vsp1_histogram_buffer_complete(&hgo->histo, buf, size);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_HGO_MAX_RGB (V4L2_CID_USER_BASE | 0x1001)
+#define V4L2_CID_VSP1_HGO_NUM_BINS (V4L2_CID_USER_BASE | 0x1002)
+
+static const struct v4l2_ctrl_config hgo_max_rgb_control = {
+ .id = V4L2_CID_VSP1_HGO_MAX_RGB,
+ .name = "Maximum RGB Mode",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT,
+};
+
+static const s64 hgo_num_bins[] = {
+ 64, 256,
+};
+
+static const struct v4l2_ctrl_config hgo_num_bins_control = {
+ .id = V4L2_CID_VSP1_HGO_NUM_BINS,
+ .name = "Number of Bins",
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .qmenu_int = hgo_num_bins,
+ .flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hgo_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_hgo *hgo = to_hgo(&entity->subdev);
+ struct v4l2_rect *compose;
+ struct v4l2_rect *crop;
+ unsigned int hratio;
+ unsigned int vratio;
+
+ crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK, V4L2_SEL_TGT_CROP);
+ compose = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK,
+ V4L2_SEL_TGT_COMPOSE);
+
+ vsp1_hgo_write(hgo, dlb, VI6_HGO_REGRST, VI6_HGO_REGRST_RCLEA);
+
+ vsp1_hgo_write(hgo, dlb, VI6_HGO_OFFSET,
+ (crop->left << VI6_HGO_OFFSET_HOFFSET_SHIFT) |
+ (crop->top << VI6_HGO_OFFSET_VOFFSET_SHIFT));
+ vsp1_hgo_write(hgo, dlb, VI6_HGO_SIZE,
+ (crop->width << VI6_HGO_SIZE_HSIZE_SHIFT) |
+ (crop->height << VI6_HGO_SIZE_VSIZE_SHIFT));
+
+ mutex_lock(hgo->ctrls.handler.lock);
+ hgo->max_rgb = hgo->ctrls.max_rgb->cur.val;
+ if (hgo->ctrls.num_bins)
+ hgo->num_bins = hgo_num_bins[hgo->ctrls.num_bins->cur.val];
+ mutex_unlock(hgo->ctrls.handler.lock);
+
+ hratio = crop->width * 2 / compose->width / 3;
+ vratio = crop->height * 2 / compose->height / 3;
+ vsp1_hgo_write(hgo, dlb, VI6_HGO_MODE,
+ (hgo->num_bins == 256 ? VI6_HGO_MODE_STEP : 0) |
+ (hgo->max_rgb ? VI6_HGO_MODE_MAXRGB : 0) |
+ (hratio << VI6_HGO_MODE_HRATIO_SHIFT) |
+ (vratio << VI6_HGO_MODE_VRATIO_SHIFT));
+}
+
+static const struct vsp1_entity_operations hgo_entity_ops = {
+ .configure_stream = hgo_configure_stream,
+ .destroy = vsp1_histogram_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+static const unsigned int hgo_mbus_formats[] = {
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+};
+
+struct vsp1_hgo *vsp1_hgo_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_hgo *hgo;
+ int ret;
+
+ hgo = devm_kzalloc(vsp1->dev, sizeof(*hgo), GFP_KERNEL);
+ if (hgo == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&hgo->ctrls.handler,
+ vsp1->info->gen == 3 ? 2 : 1);
+ hgo->ctrls.max_rgb = v4l2_ctrl_new_custom(&hgo->ctrls.handler,
+ &hgo_max_rgb_control, NULL);
+ if (vsp1->info->gen == 3)
+ hgo->ctrls.num_bins =
+ v4l2_ctrl_new_custom(&hgo->ctrls.handler,
+ &hgo_num_bins_control, NULL);
+
+ hgo->max_rgb = false;
+ hgo->num_bins = 64;
+
+ hgo->histo.entity.subdev.ctrl_handler = &hgo->ctrls.handler;
+
+ /* Initialize the video device and queue for statistics data. */
+ ret = vsp1_histogram_init(vsp1, &hgo->histo, VSP1_ENTITY_HGO, "hgo",
+ &hgo_entity_ops, hgo_mbus_formats,
+ ARRAY_SIZE(hgo_mbus_formats),
+ HGO_DATA_SIZE, V4L2_META_FMT_VSP1_HGO);
+ if (ret < 0) {
+ vsp1_entity_destroy(&hgo->histo.entity);
+ return ERR_PTR(ret);
+ }
+
+ return hgo;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_hgo.h b/drivers/media/platform/vsp1/vsp1_hgo.h
new file mode 100644
index 000000000..6b0c8580e
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hgo.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_hgo.h -- R-Car VSP1 Histogram Generator 1D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_HGO_H__
+#define __VSP1_HGO_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_histo.h"
+
+struct vsp1_device;
+
+struct vsp1_hgo {
+ struct vsp1_histogram histo;
+
+ struct {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *max_rgb;
+ struct v4l2_ctrl *num_bins;
+ } ctrls;
+
+ bool max_rgb;
+ unsigned int num_bins;
+};
+
+static inline struct vsp1_hgo *to_hgo(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hgo, histo.entity.subdev);
+}
+
+struct vsp1_hgo *vsp1_hgo_create(struct vsp1_device *vsp1);
+void vsp1_hgo_frame_end(struct vsp1_entity *hgo);
+
+#endif /* __VSP1_HGO_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_hgt.c b/drivers/media/platform/vsp1/vsp1_hgt.c
new file mode 100644
index 000000000..bb6ce6fdd
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hgt.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_hgt.c -- R-Car VSP1 Histogram Generator 2D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Niklas Söderlund (niklas.soderlund@ragnatech.se)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_hgt.h"
+
+#define HGT_DATA_SIZE ((2 + 6 * 32) * 4)
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_hgt_read(struct vsp1_hgt *hgt, u32 reg)
+{
+ return vsp1_read(hgt->histo.entity.vsp1, reg);
+}
+
+static inline void vsp1_hgt_write(struct vsp1_hgt *hgt,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame End Handler
+ */
+
+void vsp1_hgt_frame_end(struct vsp1_entity *entity)
+{
+ struct vsp1_hgt *hgt = to_hgt(&entity->subdev);
+ struct vsp1_histogram_buffer *buf;
+ unsigned int m;
+ unsigned int n;
+ u32 *data;
+
+ buf = vsp1_histogram_buffer_get(&hgt->histo);
+ if (!buf)
+ return;
+
+ data = buf->addr;
+
+ *data++ = vsp1_hgt_read(hgt, VI6_HGT_MAXMIN);
+ *data++ = vsp1_hgt_read(hgt, VI6_HGT_SUM);
+
+ for (m = 0; m < 6; ++m)
+ for (n = 0; n < 32; ++n)
+ *data++ = vsp1_hgt_read(hgt, VI6_HGT_HISTO(m, n));
+
+ vsp1_histogram_buffer_complete(&hgt->histo, buf, HGT_DATA_SIZE);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_HGT_HUE_AREAS (V4L2_CID_USER_BASE | 0x1001)
+
+static int hgt_hue_areas_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ const u8 *values = ctrl->p_new.p_u8;
+ unsigned int i;
+
+ /*
+ * The hardware has constraints on the hue area boundaries beyond the
+ * control min, max and step. The values must match one of the following
+ * expressions.
+ *
+ * 0L <= 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U
+ * 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U <= 0L
+ *
+ * Start by verifying the common part...
+ */
+ for (i = 1; i < (HGT_NUM_HUE_AREAS * 2) - 1; ++i) {
+ if (values[i] > values[i+1])
+ return -EINVAL;
+ }
+
+ /* ... and handle 0L separately. */
+ if (values[0] > values[1] && values[11] > values[0])
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hgt_hue_areas_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_hgt *hgt = container_of(ctrl->handler, struct vsp1_hgt,
+ ctrls);
+
+ memcpy(hgt->hue_areas, ctrl->p_new.p_u8, sizeof(hgt->hue_areas));
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops hgt_hue_areas_ctrl_ops = {
+ .try_ctrl = hgt_hue_areas_try_ctrl,
+ .s_ctrl = hgt_hue_areas_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config hgt_hue_areas = {
+ .ops = &hgt_hue_areas_ctrl_ops,
+ .id = V4L2_CID_VSP1_HGT_HUE_AREAS,
+ .name = "Boundary Values for Hue Area",
+ .type = V4L2_CTRL_TYPE_U8,
+ .min = 0,
+ .max = 255,
+ .def = 0,
+ .step = 1,
+ .dims = { 12 },
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hgt_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_hgt *hgt = to_hgt(&entity->subdev);
+ struct v4l2_rect *compose;
+ struct v4l2_rect *crop;
+ unsigned int hratio;
+ unsigned int vratio;
+ u8 lower;
+ u8 upper;
+ unsigned int i;
+
+ crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK, V4L2_SEL_TGT_CROP);
+ compose = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK,
+ V4L2_SEL_TGT_COMPOSE);
+
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_REGRST, VI6_HGT_REGRST_RCLEA);
+
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_OFFSET,
+ (crop->left << VI6_HGT_OFFSET_HOFFSET_SHIFT) |
+ (crop->top << VI6_HGT_OFFSET_VOFFSET_SHIFT));
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_SIZE,
+ (crop->width << VI6_HGT_SIZE_HSIZE_SHIFT) |
+ (crop->height << VI6_HGT_SIZE_VSIZE_SHIFT));
+
+ mutex_lock(hgt->ctrls.lock);
+ for (i = 0; i < HGT_NUM_HUE_AREAS; ++i) {
+ lower = hgt->hue_areas[i*2 + 0];
+ upper = hgt->hue_areas[i*2 + 1];
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_HUE_AREA(i),
+ (lower << VI6_HGT_HUE_AREA_LOWER_SHIFT) |
+ (upper << VI6_HGT_HUE_AREA_UPPER_SHIFT));
+ }
+ mutex_unlock(hgt->ctrls.lock);
+
+ hratio = crop->width * 2 / compose->width / 3;
+ vratio = crop->height * 2 / compose->height / 3;
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_MODE,
+ (hratio << VI6_HGT_MODE_HRATIO_SHIFT) |
+ (vratio << VI6_HGT_MODE_VRATIO_SHIFT));
+}
+
+static const struct vsp1_entity_operations hgt_entity_ops = {
+ .configure_stream = hgt_configure_stream,
+ .destroy = vsp1_histogram_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+static const unsigned int hgt_mbus_formats[] = {
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+};
+
+struct vsp1_hgt *vsp1_hgt_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_hgt *hgt;
+ int ret;
+
+ hgt = devm_kzalloc(vsp1->dev, sizeof(*hgt), GFP_KERNEL);
+ if (hgt == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&hgt->ctrls, 1);
+ v4l2_ctrl_new_custom(&hgt->ctrls, &hgt_hue_areas, NULL);
+
+ hgt->histo.entity.subdev.ctrl_handler = &hgt->ctrls;
+
+ /* Initialize the video device and queue for statistics data. */
+ ret = vsp1_histogram_init(vsp1, &hgt->histo, VSP1_ENTITY_HGT, "hgt",
+ &hgt_entity_ops, hgt_mbus_formats,
+ ARRAY_SIZE(hgt_mbus_formats),
+ HGT_DATA_SIZE, V4L2_META_FMT_VSP1_HGT);
+ if (ret < 0) {
+ vsp1_entity_destroy(&hgt->histo.entity);
+ return ERR_PTR(ret);
+ }
+
+ v4l2_ctrl_handler_setup(&hgt->ctrls);
+
+ return hgt;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_hgt.h b/drivers/media/platform/vsp1/vsp1_hgt.h
new file mode 100644
index 000000000..38ec237bd
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hgt.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_hgt.h -- R-Car VSP1 Histogram Generator 2D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Niklas Söderlund (niklas.soderlund@ragnatech.se)
+ */
+#ifndef __VSP1_HGT_H__
+#define __VSP1_HGT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_histo.h"
+
+struct vsp1_device;
+
+#define HGT_NUM_HUE_AREAS 6
+
+struct vsp1_hgt {
+ struct vsp1_histogram histo;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ u8 hue_areas[HGT_NUM_HUE_AREAS * 2];
+};
+
+static inline struct vsp1_hgt *to_hgt(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hgt, histo.entity.subdev);
+}
+
+struct vsp1_hgt *vsp1_hgt_create(struct vsp1_device *vsp1);
+void vsp1_hgt_frame_end(struct vsp1_entity *hgt);
+
+#endif /* __VSP1_HGT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_histo.c b/drivers/media/platform/vsp1/vsp1_histo.c
new file mode 100644
index 000000000..5e15c8ff8
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_histo.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_histo.c -- R-Car VSP1 Histogram API
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ * Copyright (C) 2016 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vsp1.h"
+#include "vsp1_histo.h"
+#include "vsp1_pipe.h"
+
+#define HISTO_MIN_SIZE 4U
+#define HISTO_MAX_SIZE 8192U
+
+/* -----------------------------------------------------------------------------
+ * Buffer Operations
+ */
+
+static inline struct vsp1_histogram_buffer *
+to_vsp1_histogram_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct vsp1_histogram_buffer, buf);
+}
+
+struct vsp1_histogram_buffer *
+vsp1_histogram_buffer_get(struct vsp1_histogram *histo)
+{
+ struct vsp1_histogram_buffer *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+
+ if (list_empty(&histo->irqqueue))
+ goto done;
+
+ buf = list_first_entry(&histo->irqqueue, struct vsp1_histogram_buffer,
+ queue);
+ list_del(&buf->queue);
+ histo->readout = true;
+
+done:
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+ return buf;
+}
+
+void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ struct vsp1_histogram_buffer *buf,
+ size_t size)
+{
+ struct vsp1_pipeline *pipe = histo->entity.pipe;
+ unsigned long flags;
+
+ /*
+ * The pipeline pointer is guaranteed to be valid as this function is
+ * called from the frame completion interrupt handler, which can only
+ * occur when video streaming is active.
+ */
+ buf->buf.sequence = pipe->sequence;
+ buf->buf.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+ histo->readout = false;
+ wake_up(&histo->wait_queue);
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * videobuf2 Queue Operations
+ */
+
+static int histo_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
+
+ if (*nplanes) {
+ if (*nplanes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < histo->data_size)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *nplanes = 1;
+ sizes[0] = histo->data_size;
+
+ return 0;
+}
+
+static int histo_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);
+
+ if (vb->num_planes != 1)
+ return -EINVAL;
+
+ if (vb2_plane_size(vb, 0) < histo->data_size)
+ return -EINVAL;
+
+ buf->addr = vb2_plane_vaddr(vb, 0);
+
+ return 0;
+}
+
+static void histo_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+ list_add_tail(&buf->queue, &histo->irqqueue);
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+}
+
+static int histo_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ return 0;
+}
+
+static void histo_stop_streaming(struct vb2_queue *vq)
+{
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
+ struct vsp1_histogram_buffer *buffer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+
+ /* Remove all buffers from the IRQ queue. */
+ list_for_each_entry(buffer, &histo->irqqueue, queue)
+ vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&histo->irqqueue);
+
+ /* Wait for the buffer being read out (if any) to complete. */
+ wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock);
+
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+}
+
+static const struct vb2_ops histo_video_queue_qops = {
+ .queue_setup = histo_queue_setup,
+ .buf_prepare = histo_buffer_prepare,
+ .buf_queue = histo_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = histo_start_streaming,
+ .stop_streaming = histo_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int histo_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+
+ if (code->pad == HISTO_PAD_SOURCE) {
+ code->code = MEDIA_BUS_FMT_FIXED;
+ return 0;
+ }
+
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, histo->formats,
+ histo->num_formats);
+}
+
+static int histo_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->pad != HISTO_PAD_SINK)
+ return -EINVAL;
+
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, HISTO_MIN_SIZE,
+ HISTO_MIN_SIZE, HISTO_MAX_SIZE,
+ HISTO_MAX_SIZE);
+}
+
+static int histo_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (sel->pad != HISTO_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&histo->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&histo->entity, cfg, sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ crop = vsp1_entity_get_pad_selection(&histo->entity, config,
+ HISTO_PAD_SINK,
+ V4L2_SEL_TGT_CROP);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = crop->width;
+ sel->r.height = crop->height;
+ break;
+
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ format = vsp1_entity_get_pad_format(&histo->entity, config,
+ HISTO_PAD_SINK);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ break;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->pad, sel->target);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ mutex_unlock(&histo->entity.lock);
+ return ret;
+}
+
+static int histo_set_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *selection;
+
+ /* The crop rectangle must be inside the input frame. */
+ format = vsp1_entity_get_pad_format(&histo->entity, config,
+ HISTO_PAD_SINK);
+ sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
+ sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
+ sel->r.width = clamp_t(unsigned int, sel->r.width, HISTO_MIN_SIZE,
+ format->width - sel->r.left);
+ sel->r.height = clamp_t(unsigned int, sel->r.height, HISTO_MIN_SIZE,
+ format->height - sel->r.top);
+
+ /* Set the crop rectangle and reset the compose rectangle. */
+ selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->pad, V4L2_SEL_TGT_CROP);
+ *selection = sel->r;
+
+ selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->pad,
+ V4L2_SEL_TGT_COMPOSE);
+ *selection = sel->r;
+
+ return 0;
+}
+
+static int histo_set_compose(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_rect *compose;
+ struct v4l2_rect *crop;
+ unsigned int ratio;
+
+ /*
+ * The compose rectangle is used to configure downscaling, the top left
+ * corner is fixed to (0,0) and the size to 1/2 or 1/4 of the crop
+ * rectangle.
+ */
+ sel->r.left = 0;
+ sel->r.top = 0;
+
+ crop = vsp1_entity_get_pad_selection(&histo->entity, config, sel->pad,
+ V4L2_SEL_TGT_CROP);
+
+ /*
+ * Clamp the width and height to acceptable values first and then
+ * compute the closest rounded dividing ratio.
+ *
+ * Ratio Rounded ratio
+ * --------------------------
+ * [1.0 1.5[ 1
+ * [1.5 3.0[ 2
+ * [3.0 4.0] 4
+ *
+ * The rounded ratio can be computed using
+ *
+ * 1 << (ceil(ratio * 2) / 3)
+ */
+ sel->r.width = clamp(sel->r.width, crop->width / 4, crop->width);
+ ratio = 1 << (crop->width * 2 / sel->r.width / 3);
+ sel->r.width = crop->width / ratio;
+
+
+ sel->r.height = clamp(sel->r.height, crop->height / 4, crop->height);
+ ratio = 1 << (crop->height * 2 / sel->r.height / 3);
+ sel->r.height = crop->height / ratio;
+
+ compose = vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->pad,
+ V4L2_SEL_TGT_COMPOSE);
+ *compose = sel->r;
+
+ return 0;
+}
+
+static int histo_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_subdev_pad_config *config;
+ int ret;
+
+ if (sel->pad != HISTO_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&histo->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&histo->entity, cfg, sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (sel->target == V4L2_SEL_TGT_CROP)
+ ret = histo_set_crop(subdev, config, sel);
+ else if (sel->target == V4L2_SEL_TGT_COMPOSE)
+ ret = histo_set_compose(subdev, config, sel);
+ else
+ ret = -EINVAL;
+
+done:
+ mutex_unlock(&histo->entity.lock);
+ return ret;
+}
+
+static int histo_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ if (fmt->pad == HISTO_PAD_SOURCE) {
+ fmt->format.code = MEDIA_BUS_FMT_FIXED;
+ fmt->format.width = 0;
+ fmt->format.height = 0;
+ fmt->format.field = V4L2_FIELD_NONE;
+ fmt->format.colorspace = V4L2_COLORSPACE_RAW;
+ return 0;
+ }
+
+ return vsp1_subdev_get_pad_format(subdev, cfg, fmt);
+}
+
+static int histo_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+
+ if (fmt->pad != HISTO_PAD_SINK)
+ return histo_get_format(subdev, cfg, fmt);
+
+ return vsp1_subdev_set_pad_format(subdev, cfg, fmt,
+ histo->formats, histo->num_formats,
+ HISTO_MIN_SIZE, HISTO_MIN_SIZE,
+ HISTO_MAX_SIZE, HISTO_MAX_SIZE);
+}
+
+static const struct v4l2_subdev_pad_ops histo_pad_ops = {
+ .enum_mbus_code = histo_enum_mbus_code,
+ .enum_frame_size = histo_enum_frame_size,
+ .get_fmt = histo_get_format,
+ .set_fmt = histo_set_format,
+ .get_selection = histo_get_selection,
+ .set_selection = histo_set_selection,
+};
+
+static const struct v4l2_subdev_ops histo_ops = {
+ .pad = &histo_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int histo_v4l2_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
+
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_VIDEO_OUTPUT_MPLANE
+ | V4L2_CAP_META_CAPTURE;
+ cap->device_caps = V4L2_CAP_META_CAPTURE
+ | V4L2_CAP_STREAMING;
+
+ strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
+ strlcpy(cap->card, histo->video.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(histo->entity.vsp1->dev));
+
+ return 0;
+}
+
+static int histo_v4l2_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
+
+ if (f->index > 0 || f->type != histo->queue.type)
+ return -EINVAL;
+
+ f->pixelformat = histo->meta_format;
+
+ return 0;
+}
+
+static int histo_v4l2_get_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
+ struct v4l2_meta_format *meta = &format->fmt.meta;
+
+ if (format->type != histo->queue.type)
+ return -EINVAL;
+
+ memset(meta, 0, sizeof(*meta));
+
+ meta->dataformat = histo->meta_format;
+ meta->buffersize = histo->data_size;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops histo_v4l2_ioctl_ops = {
+ .vidioc_querycap = histo_v4l2_querycap,
+ .vidioc_enum_fmt_meta_cap = histo_v4l2_enum_format,
+ .vidioc_g_fmt_meta_cap = histo_v4l2_get_format,
+ .vidioc_s_fmt_meta_cap = histo_v4l2_get_format,
+ .vidioc_try_fmt_meta_cap = histo_v4l2_get_format,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 File Operations
+ */
+
+static const struct v4l2_file_operations histo_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static void vsp1_histogram_cleanup(struct vsp1_histogram *histo)
+{
+ if (video_is_registered(&histo->video))
+ video_unregister_device(&histo->video);
+
+ media_entity_cleanup(&histo->video.entity);
+}
+
+void vsp1_histogram_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(&entity->subdev);
+
+ vsp1_histogram_cleanup(histo);
+}
+
+int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo,
+ enum vsp1_entity_type type, const char *name,
+ const struct vsp1_entity_operations *ops,
+ const unsigned int *formats, unsigned int num_formats,
+ size_t data_size, u32 meta_format)
+{
+ int ret;
+
+ histo->formats = formats;
+ histo->num_formats = num_formats;
+ histo->data_size = data_size;
+ histo->meta_format = meta_format;
+
+ histo->pad.flags = MEDIA_PAD_FL_SINK;
+ histo->video.vfl_dir = VFL_DIR_RX;
+
+ mutex_init(&histo->lock);
+ spin_lock_init(&histo->irqlock);
+ INIT_LIST_HEAD(&histo->irqqueue);
+ init_waitqueue_head(&histo->wait_queue);
+
+ /* Initialize the VSP entity... */
+ histo->entity.ops = ops;
+ histo->entity.type = type;
+
+ ret = vsp1_entity_init(vsp1, &histo->entity, name, 2, &histo_ops,
+ MEDIA_ENT_F_PROC_VIDEO_STATISTICS);
+ if (ret < 0)
+ return ret;
+
+ /* ... and the media entity... */
+ ret = media_entity_pads_init(&histo->video.entity, 1, &histo->pad);
+ if (ret < 0)
+ return ret;
+
+ /* ... and the video node... */
+ histo->video.v4l2_dev = &vsp1->v4l2_dev;
+ histo->video.fops = &histo_v4l2_fops;
+ snprintf(histo->video.name, sizeof(histo->video.name),
+ "%s histo", histo->entity.subdev.name);
+ histo->video.vfl_type = VFL_TYPE_GRABBER;
+ histo->video.release = video_device_release_empty;
+ histo->video.ioctl_ops = &histo_v4l2_ioctl_ops;
+
+ video_set_drvdata(&histo->video, histo);
+
+ /* ... and the buffers queue... */
+ histo->queue.type = V4L2_BUF_TYPE_META_CAPTURE;
+ histo->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ histo->queue.lock = &histo->lock;
+ histo->queue.drv_priv = histo;
+ histo->queue.buf_struct_size = sizeof(struct vsp1_histogram_buffer);
+ histo->queue.ops = &histo_video_queue_qops;
+ histo->queue.mem_ops = &vb2_vmalloc_memops;
+ histo->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ histo->queue.dev = vsp1->dev;
+ ret = vb2_queue_init(&histo->queue);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "failed to initialize vb2 queue\n");
+ goto error;
+ }
+
+ /* ... and register the video device. */
+ histo->video.queue = &histo->queue;
+ ret = video_register_device(&histo->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "failed to register video device\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ vsp1_histogram_cleanup(histo);
+ return ret;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_histo.h b/drivers/media/platform/vsp1/vsp1_histo.h
new file mode 100644
index 000000000..06f029846
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_histo.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_histo.h -- R-Car VSP1 Histogram API
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ * Copyright (C) 2016 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_HISTO_H__
+#define __VSP1_HISTO_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define HISTO_PAD_SINK 0
+#define HISTO_PAD_SOURCE 1
+
+struct vsp1_histogram_buffer {
+ struct vb2_v4l2_buffer buf;
+ struct list_head queue;
+ void *addr;
+};
+
+struct vsp1_histogram {
+ struct vsp1_entity entity;
+ struct video_device video;
+ struct media_pad pad;
+
+ const u32 *formats;
+ unsigned int num_formats;
+ size_t data_size;
+ u32 meta_format;
+
+ struct mutex lock;
+ struct vb2_queue queue;
+
+ spinlock_t irqlock;
+ struct list_head irqqueue;
+
+ wait_queue_head_t wait_queue;
+ bool readout;
+};
+
+static inline struct vsp1_histogram *vdev_to_histo(struct video_device *vdev)
+{
+ return container_of(vdev, struct vsp1_histogram, video);
+}
+
+static inline struct vsp1_histogram *subdev_to_histo(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_histogram, entity.subdev);
+}
+
+int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo,
+ enum vsp1_entity_type type, const char *name,
+ const struct vsp1_entity_operations *ops,
+ const unsigned int *formats, unsigned int num_formats,
+ size_t data_size, u32 meta_format);
+void vsp1_histogram_destroy(struct vsp1_entity *entity);
+
+struct vsp1_histogram_buffer *
+vsp1_histogram_buffer_get(struct vsp1_histogram *histo);
+void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ struct vsp1_histogram_buffer *buf,
+ size_t size);
+
+#endif /* __VSP1_HISTO_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
new file mode 100644
index 000000000..39ab2e0c7
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_hsit.c -- R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_hsit.h"
+
+#define HSIT_MIN_SIZE 4U
+#define HSIT_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_hsit_write(struct vsp1_hsit *hsit,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) |
+ (code->pad == HSIT_PAD_SOURCE && hsit->inverse))
+ code->code = MEDIA_BUS_FMT_ARGB8888_1X32;
+ else
+ code->code = MEDIA_BUS_FMT_AHSV8888_1X32;
+
+ return 0;
+}
+
+static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, HSIT_MIN_SIZE,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE,
+ HSIT_MAX_SIZE);
+}
+
+static int hsit_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&hsit->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&hsit->entity, cfg, fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ format = vsp1_entity_get_pad_format(&hsit->entity, config, fmt->pad);
+
+ if (fmt->pad == HSIT_PAD_SOURCE) {
+ /*
+ * The HST and HSI output format code and resolution can't be
+ * modified.
+ */
+ fmt->format = *format;
+ goto done;
+ }
+
+ format->code = hsit->inverse ? MEDIA_BUS_FMT_AHSV8888_1X32
+ : MEDIA_BUS_FMT_ARGB8888_1X32;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&hsit->entity, config,
+ HSIT_PAD_SOURCE);
+ *format = fmt->format;
+ format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
+ : MEDIA_BUS_FMT_AHSV8888_1X32;
+
+done:
+ mutex_unlock(&hsit->entity.lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops hsit_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = hsit_enum_mbus_code,
+ .enum_frame_size = hsit_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = hsit_set_format,
+};
+
+static const struct v4l2_subdev_ops hsit_ops = {
+ .pad = &hsit_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hsit_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_hsit *hsit = to_hsit(&entity->subdev);
+
+ if (hsit->inverse)
+ vsp1_hsit_write(hsit, dlb, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
+ else
+ vsp1_hsit_write(hsit, dlb, VI6_HST_CTRL, VI6_HST_CTRL_EN);
+}
+
+static const struct vsp1_entity_operations hsit_entity_ops = {
+ .configure_stream = hsit_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
+{
+ struct vsp1_hsit *hsit;
+ int ret;
+
+ hsit = devm_kzalloc(vsp1->dev, sizeof(*hsit), GFP_KERNEL);
+ if (hsit == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ hsit->inverse = inverse;
+
+ hsit->entity.ops = &hsit_entity_ops;
+
+ if (inverse)
+ hsit->entity.type = VSP1_ENTITY_HSI;
+ else
+ hsit->entity.type = VSP1_ENTITY_HST;
+
+ ret = vsp1_entity_init(vsp1, &hsit->entity, inverse ? "hsi" : "hst",
+ 2, &hsit_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return hsit;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.h b/drivers/media/platform/vsp1/vsp1_hsit.h
new file mode 100644
index 000000000..a658b1aa4
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hsit.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_hsit.h -- R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_HSIT_H__
+#define __VSP1_HSIT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define HSIT_PAD_SINK 0
+#define HSIT_PAD_SOURCE 1
+
+struct vsp1_hsit {
+ struct vsp1_entity entity;
+ bool inverse;
+};
+
+static inline struct vsp1_hsit *to_hsit(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hsit, entity.subdev);
+}
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse);
+
+#endif /* __VSP1_HSIT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
new file mode 100644
index 000000000..0cb63244b
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_lif.c -- R-Car VSP1 LCD Controller Interface
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_lif.h"
+
+#define LIF_MIN_SIZE 2U
+#define LIF_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_lif_write(struct vsp1_lif *lif,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + lif->entity.index * VI6_LIF_OFFSET,
+ data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const unsigned int lif_codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+};
+
+static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, lif_codes,
+ ARRAY_SIZE(lif_codes));
+}
+
+static int lif_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LIF_MIN_SIZE,
+ LIF_MIN_SIZE, LIF_MAX_SIZE,
+ LIF_MAX_SIZE);
+}
+
+static int lif_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, cfg, fmt, lif_codes,
+ ARRAY_SIZE(lif_codes),
+ LIF_MIN_SIZE, LIF_MIN_SIZE,
+ LIF_MAX_SIZE, LIF_MAX_SIZE);
+}
+
+static const struct v4l2_subdev_pad_ops lif_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = lif_enum_mbus_code,
+ .enum_frame_size = lif_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = lif_set_format,
+};
+
+static const struct v4l2_subdev_ops lif_ops = {
+ .pad = &lif_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void lif_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_lif *lif = to_lif(&entity->subdev);
+ unsigned int hbth = 1300;
+ unsigned int obth = 400;
+ unsigned int lbth = 200;
+
+ format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
+ LIF_PAD_SOURCE);
+
+ obth = min(obth, (format->width + 1) / 2 * format->height - 4);
+
+ vsp1_lif_write(lif, dlb, VI6_LIF_CSBTH,
+ (hbth << VI6_LIF_CSBTH_HBTH_SHIFT) |
+ (lbth << VI6_LIF_CSBTH_LBTH_SHIFT));
+
+ vsp1_lif_write(lif, dlb, VI6_LIF_CTRL,
+ (obth << VI6_LIF_CTRL_OBTH_SHIFT) |
+ (format->code == 0 ? VI6_LIF_CTRL_CFMT : 0) |
+ VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN);
+
+ /*
+ * On R-Car V3M the LIF0 buffer attribute register has to be set to a
+ * non-default value to guarantee proper operation (otherwise artifacts
+ * may appear on the output). The value required by the manual is not
+ * explained but is likely a buffer size or threshold.
+ */
+ if ((entity->vsp1->version & VI6_IP_VERSION_MASK) ==
+ (VI6_IP_VERSION_MODEL_VSPD_V3 | VI6_IP_VERSION_SOC_V3M))
+ vsp1_lif_write(lif, dlb, VI6_LIF_LBA,
+ VI6_LIF_LBA_LBA0 |
+ (1536 << VI6_LIF_LBA_LBA1_SHIFT));
+}
+
+static const struct vsp1_entity_operations lif_entity_ops = {
+ .configure_stream = lif_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_lif *lif;
+ int ret;
+
+ lif = devm_kzalloc(vsp1->dev, sizeof(*lif), GFP_KERNEL);
+ if (lif == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ lif->entity.ops = &lif_entity_ops;
+ lif->entity.type = VSP1_ENTITY_LIF;
+ lif->entity.index = index;
+
+ /*
+ * The LIF is never exposed to userspace, but media entity registration
+ * requires a function to be set. Use PROC_VIDEO_PIXEL_FORMATTER just to
+ * avoid triggering a WARN_ON(), the value won't be seen anywhere.
+ */
+ ret = vsp1_entity_init(vsp1, &lif->entity, "lif", 2, &lif_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return lif;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_lif.h b/drivers/media/platform/vsp1/vsp1_lif.h
new file mode 100644
index 000000000..71a4eda9c
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lif.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_lif.h -- R-Car VSP1 LCD Controller Interface
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_LIF_H__
+#define __VSP1_LIF_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define LIF_PAD_SINK 0
+#define LIF_PAD_SOURCE 1
+
+struct vsp1_lif {
+ struct vsp1_entity entity;
+};
+
+static inline struct vsp1_lif *to_lif(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_lif, entity.subdev);
+}
+
+struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index);
+
+#endif /* __VSP1_LIF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
new file mode 100644
index 000000000..64c48d945
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_lut.c -- R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_lut.h"
+
+#define LUT_MIN_SIZE 4U
+#define LUT_MAX_SIZE 8190U
+
+#define LUT_SIZE 256
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_lut_write(struct vsp1_lut *lut,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_LUT_TABLE (V4L2_CID_USER_BASE | 0x1001)
+
+static int lut_set_table(struct vsp1_lut *lut, struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_dl_body *dlb;
+ unsigned int i;
+
+ dlb = vsp1_dl_body_get(lut->pool);
+ if (!dlb)
+ return -ENOMEM;
+
+ for (i = 0; i < LUT_SIZE; ++i)
+ vsp1_dl_body_write(dlb, VI6_LUT_TABLE + 4 * i,
+ ctrl->p_new.p_u32[i]);
+
+ spin_lock_irq(&lut->lock);
+ swap(lut->lut, dlb);
+ spin_unlock_irq(&lut->lock);
+
+ vsp1_dl_body_put(dlb);
+ return 0;
+}
+
+static int lut_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_lut *lut =
+ container_of(ctrl->handler, struct vsp1_lut, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_LUT_TABLE:
+ lut_set_table(lut, ctrl);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops lut_ctrl_ops = {
+ .s_ctrl = lut_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config lut_table_control = {
+ .ops = &lut_ctrl_ops,
+ .id = V4L2_CID_VSP1_LUT_TABLE,
+ .name = "Look-Up Table",
+ .type = V4L2_CTRL_TYPE_U32,
+ .min = 0x00000000,
+ .max = 0x00ffffff,
+ .step = 1,
+ .def = 0,
+ .dims = { LUT_SIZE },
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static const unsigned int lut_codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+};
+
+static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, lut_codes,
+ ARRAY_SIZE(lut_codes));
+}
+
+static int lut_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LUT_MIN_SIZE,
+ LUT_MIN_SIZE, LUT_MAX_SIZE,
+ LUT_MAX_SIZE);
+}
+
+static int lut_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, cfg, fmt, lut_codes,
+ ARRAY_SIZE(lut_codes),
+ LUT_MIN_SIZE, LUT_MIN_SIZE,
+ LUT_MAX_SIZE, LUT_MAX_SIZE);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_pad_ops lut_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = lut_enum_mbus_code,
+ .enum_frame_size = lut_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = lut_set_format,
+};
+
+static const struct v4l2_subdev_ops lut_ops = {
+ .pad = &lut_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void lut_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_lut *lut = to_lut(&entity->subdev);
+
+ vsp1_lut_write(lut, dlb, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
+}
+
+static void lut_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_lut *lut = to_lut(&entity->subdev);
+ struct vsp1_dl_body *lut_dlb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lut->lock, flags);
+ lut_dlb = lut->lut;
+ lut->lut = NULL;
+ spin_unlock_irqrestore(&lut->lock, flags);
+
+ if (lut_dlb) {
+ vsp1_dl_list_add_body(dl, lut_dlb);
+
+ /* Release our local reference. */
+ vsp1_dl_body_put(lut_dlb);
+ }
+}
+
+static void lut_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_lut *lut = to_lut(&entity->subdev);
+
+ vsp1_dl_body_pool_destroy(lut->pool);
+}
+
+static const struct vsp1_entity_operations lut_entity_ops = {
+ .configure_stream = lut_configure_stream,
+ .configure_frame = lut_configure_frame,
+ .destroy = lut_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_lut *lut;
+ int ret;
+
+ lut = devm_kzalloc(vsp1->dev, sizeof(*lut), GFP_KERNEL);
+ if (lut == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&lut->lock);
+
+ lut->entity.ops = &lut_entity_ops;
+ lut->entity.type = VSP1_ENTITY_LUT;
+
+ ret = vsp1_entity_init(vsp1, &lut->entity, "lut", 2, &lut_ops,
+ MEDIA_ENT_F_PROC_VIDEO_LUT);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /*
+ * Pre-allocate a body pool, with 3 bodies allowing a userspace update
+ * before the hardware has committed a previous set of tables, handling
+ * both the queued and pending dl entries.
+ */
+ lut->pool = vsp1_dl_body_pool_create(vsp1, 3, LUT_SIZE, 0);
+ if (!lut->pool)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&lut->ctrls, 1);
+ v4l2_ctrl_new_custom(&lut->ctrls, &lut_table_control, NULL);
+
+ lut->entity.subdev.ctrl_handler = &lut->ctrls;
+
+ if (lut->ctrls.error) {
+ dev_err(vsp1->dev, "lut: failed to initialize controls\n");
+ ret = lut->ctrls.error;
+ vsp1_entity_destroy(&lut->entity);
+ return ERR_PTR(ret);
+ }
+
+ v4l2_ctrl_handler_setup(&lut->ctrls);
+
+ return lut;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_lut.h b/drivers/media/platform/vsp1/vsp1_lut.h
new file mode 100644
index 000000000..8cb0df1b7
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lut.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_lut.h -- R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_LUT_H__
+#define __VSP1_LUT_H__
+
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define LUT_PAD_SINK 0
+#define LUT_PAD_SOURCE 1
+
+struct vsp1_lut {
+ struct vsp1_entity entity;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ spinlock_t lock;
+ struct vsp1_dl_body *lut;
+ struct vsp1_dl_body_pool *pool;
+};
+
+static inline struct vsp1_lut *to_lut(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_lut, entity.subdev);
+}
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_LUT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
new file mode 100644
index 000000000..54ff539ff
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_pipe.c -- R-Car VSP1 Pipeline
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_dl.h"
+#include "vsp1_entity.h"
+#include "vsp1_hgo.h"
+#include "vsp1_hgt.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_uds.h"
+
+/* -----------------------------------------------------------------------------
+ * Helper Functions
+ */
+
+static const struct vsp1_format_info vsp1_video_formats[] = {
+ { V4L2_PIX_FMT_RGB332, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 8, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XRGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_BGR24, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGB24, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ABGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XRGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, true, 2, 1, false },
+ { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, true, false, 2, 1, false },
+ { V4L2_PIX_FMT_YVYU, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, true, true, 2, 1, false },
+ { V4L2_PIX_FMT_NV12M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, false, 2, 2, false },
+ { V4L2_PIX_FMT_NV21M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, true, 2, 2, false },
+ { V4L2_PIX_FMT_NV16M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_NV61M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, true, 2, 1, false },
+ { V4L2_PIX_FMT_YUV420M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 2, 2, false },
+ { V4L2_PIX_FMT_YVU420M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, true, 2, 2, false },
+ { V4L2_PIX_FMT_YUV422M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_YVU422M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, true, 2, 1, false },
+ { V4L2_PIX_FMT_YUV444M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_YVU444M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, true, 1, 1, false },
+};
+
+/**
+ * vsp1_get_format_info - Retrieve format information for a 4CC
+ * @vsp1: the VSP1 device
+ * @fourcc: the format 4CC
+ *
+ * Return a pointer to the format information structure corresponding to the
+ * given V4L2 format 4CC, or NULL if no corresponding format can be found.
+ */
+const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
+ u32 fourcc)
+{
+ unsigned int i;
+
+ /* Special case, the VYUY and HSV formats are supported on Gen2 only. */
+ if (vsp1->info->gen != 2) {
+ switch (fourcc) {
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_HSV24:
+ case V4L2_PIX_FMT_HSV32:
+ return NULL;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
+ const struct vsp1_format_info *info = &vsp1_video_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Management
+ */
+
+void vsp1_pipeline_reset(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_entity *entity;
+ unsigned int i;
+
+ if (pipe->brx) {
+ struct vsp1_brx *brx = to_brx(&pipe->brx->subdev);
+
+ for (i = 0; i < ARRAY_SIZE(brx->inputs); ++i)
+ brx->inputs[i].rpf = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i)
+ pipe->inputs[i] = NULL;
+
+ pipe->output = NULL;
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe)
+ entity->pipe = NULL;
+
+ INIT_LIST_HEAD(&pipe->entities);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+ pipe->buffers_ready = 0;
+ pipe->num_inputs = 0;
+ pipe->brx = NULL;
+ pipe->hgo = NULL;
+ pipe->hgt = NULL;
+ pipe->lif = NULL;
+ pipe->uds = NULL;
+}
+
+void vsp1_pipeline_init(struct vsp1_pipeline *pipe)
+{
+ mutex_init(&pipe->lock);
+ spin_lock_init(&pipe->irqlock);
+ init_waitqueue_head(&pipe->wq);
+ kref_init(&pipe->kref);
+
+ INIT_LIST_HEAD(&pipe->entities);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+}
+
+/* Must be called with the pipe irqlock held. */
+void vsp1_pipeline_run(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+
+ if (pipe->state == VSP1_PIPELINE_STOPPED) {
+ vsp1_write(vsp1, VI6_CMD(pipe->output->entity.index),
+ VI6_CMD_STRCMD);
+ pipe->state = VSP1_PIPELINE_RUNNING;
+ }
+
+ pipe->buffers_ready = 0;
+}
+
+bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
+{
+ unsigned long flags;
+ bool stopped;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ stopped = pipe->state == VSP1_PIPELINE_STOPPED;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ return stopped;
+}
+
+int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ struct vsp1_entity *entity;
+ unsigned long flags;
+ int ret;
+
+ if (pipe->lif) {
+ /*
+ * When using display lists in continuous frame mode the only
+ * way to stop the pipeline is to reset the hardware.
+ */
+ ret = vsp1_reset_wpf(vsp1, pipe->output->entity.index);
+ if (ret == 0) {
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+ }
+ } else {
+ /* Otherwise just request a stop and wait. */
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (pipe->state == VSP1_PIPELINE_RUNNING)
+ pipe->state = VSP1_PIPELINE_STOPPING;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
+ msecs_to_jiffies(500));
+ ret = ret == 0 ? -ETIMEDOUT : 0;
+ }
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ if (entity->route && entity->route->reg)
+ vsp1_write(vsp1, entity->route->reg,
+ VI6_DPR_NODE_UNUSED);
+ }
+
+ if (pipe->hgo)
+ vsp1_write(vsp1, VI6_DPR_HGO_SMPPT,
+ (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
+ if (pipe->hgt)
+ vsp1_write(vsp1, VI6_DPR_HGT_SMPPT,
+ (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
+ v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0);
+
+ return ret;
+}
+
+bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
+{
+ unsigned int mask;
+
+ mask = ((1 << pipe->num_inputs) - 1) << 1;
+ if (!pipe->lif)
+ mask |= 1 << 0;
+
+ return pipe->buffers_ready == mask;
+}
+
+void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
+{
+ unsigned int flags;
+
+ if (pipe == NULL)
+ return;
+
+ /*
+ * If the DL commit raced with the frame end interrupt, the commit ends
+ * up being postponed by one frame. The returned flags tell whether the
+ * active frame was finished or postponed.
+ */
+ flags = vsp1_dlm_irq_frame_end(pipe->output->dlm);
+
+ if (pipe->hgo)
+ vsp1_hgo_frame_end(pipe->hgo);
+
+ if (pipe->hgt)
+ vsp1_hgt_frame_end(pipe->hgt);
+
+ /*
+ * Regardless of frame completion we still need to notify the pipe
+ * frame_end to account for vblank events.
+ */
+ if (pipe->frame_end)
+ pipe->frame_end(pipe, flags);
+
+ pipe->sequence++;
+}
+
+/*
+ * Propagate the alpha value through the pipeline.
+ *
+ * As the UDS has restricted scaling capabilities when the alpha component needs
+ * to be scaled, we disable alpha scaling when the UDS input has a fixed alpha
+ * value. The UDS then outputs a fixed alpha value which needs to be programmed
+ * from the input RPF alpha.
+ */
+void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb, unsigned int alpha)
+{
+ if (!pipe->uds)
+ return;
+
+ /*
+ * The BRU and BRS background color has a fixed alpha value set to 255,
+ * the output alpha value is thus always equal to 255.
+ */
+ if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
+ pipe->uds_input->type == VSP1_ENTITY_BRS)
+ alpha = 255;
+
+ vsp1_uds_set_alpha(pipe->uds, dlb, alpha);
+}
+
+/*
+ * Propagate the partition calculations through the pipeline
+ *
+ * Work backwards through the pipe, allowing each entity to update the partition
+ * parameters based on its configuration, and the entity connected to its
+ * source. Each entity must produce the partition required for the previous
+ * entity in the pipeline.
+ */
+void vsp1_pipeline_propagate_partition(struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int index,
+ struct vsp1_partition_window *window)
+{
+ struct vsp1_entity *entity;
+
+ list_for_each_entry_reverse(entity, &pipe->entities, list_pipe) {
+ if (entity->ops->partition)
+ entity->ops->partition(entity, pipe, partition, index,
+ window);
+ }
+}
+
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
new file mode 100644
index 000000000..ae646c9ef
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_pipe.h -- R-Car VSP1 Pipeline
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_PIPE_H__
+#define __VSP1_PIPE_H__
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+
+struct vsp1_dl_list;
+struct vsp1_rwpf;
+
+/*
+ * struct vsp1_format_info - VSP1 video format description
+ * @fourcc: V4L2 pixel format FCC identifier
+ * @mbus: media bus format code
+ * @hwfmt: VSP1 hardware format
+ * @swap: swap register control
+ * @planes: number of planes
+ * @bpp: bits per pixel
+ * @swap_yc: the Y and C components are swapped (Y comes before C)
+ * @swap_uv: the U and V components are swapped (V comes before U)
+ * @hsub: horizontal subsampling factor
+ * @vsub: vertical subsampling factor
+ * @alpha: has an alpha channel
+ */
+struct vsp1_format_info {
+ u32 fourcc;
+ unsigned int mbus;
+ unsigned int hwfmt;
+ unsigned int swap;
+ unsigned int planes;
+ unsigned int bpp[3];
+ bool swap_yc;
+ bool swap_uv;
+ unsigned int hsub;
+ unsigned int vsub;
+ bool alpha;
+};
+
+enum vsp1_pipeline_state {
+ VSP1_PIPELINE_STOPPED,
+ VSP1_PIPELINE_RUNNING,
+ VSP1_PIPELINE_STOPPING,
+};
+
+/*
+ * struct vsp1_partition_window - Partition window coordinates
+ * @left: horizontal coordinate of the partition start in pixels relative to the
+ * left edge of the image
+ * @width: partition width in pixels
+ */
+struct vsp1_partition_window {
+ unsigned int left;
+ unsigned int width;
+};
+
+/*
+ * struct vsp1_partition - A description of a slice for the partition algorithm
+ * @rpf: The RPF partition window configuration
+ * @uds_sink: The UDS input partition window configuration
+ * @uds_source: The UDS output partition window configuration
+ * @sru: The SRU partition window configuration
+ * @wpf: The WPF partition window configuration
+ */
+struct vsp1_partition {
+ struct vsp1_partition_window rpf;
+ struct vsp1_partition_window uds_sink;
+ struct vsp1_partition_window uds_source;
+ struct vsp1_partition_window sru;
+ struct vsp1_partition_window wpf;
+};
+
+/*
+ * struct vsp1_pipeline - A VSP1 hardware pipeline
+ * @pipe: the media pipeline
+ * @irqlock: protects the pipeline state
+ * @state: current state
+ * @wq: wait queue to wait for state change completion
+ * @frame_end: frame end interrupt handler
+ * @lock: protects the pipeline use count and stream count
+ * @kref: pipeline reference count
+ * @stream_count: number of streaming video nodes
+ * @buffers_ready: bitmask of RPFs and WPFs with at least one buffer available
+ * @sequence: frame sequence number
+ * @num_inputs: number of RPFs
+ * @inputs: array of RPFs in the pipeline (indexed by RPF index)
+ * @output: WPF at the output of the pipeline
+ * @brx: BRx entity, if present
+ * @hgo: HGO entity, if present
+ * @hgt: HGT entity, if present
+ * @lif: LIF entity, if present
+ * @uds: UDS entity, if present
+ * @uds_input: entity at the input of the UDS, if the UDS is present
+ * @entities: list of entities in the pipeline
+ * @stream_config: cached stream configuration for video pipelines
+ * @configured: when false the @stream_config shall be written to the hardware
+ * @interlaced: True when the pipeline is configured in interlaced mode
+ * @partitions: The number of partitions used to process one frame
+ * @partition: The current partition for configuration to process
+ * @part_table: The pre-calculated partitions used by the pipeline
+ */
+struct vsp1_pipeline {
+ struct media_pipeline pipe;
+
+ spinlock_t irqlock;
+ enum vsp1_pipeline_state state;
+ wait_queue_head_t wq;
+
+ void (*frame_end)(struct vsp1_pipeline *pipe, unsigned int completion);
+
+ struct mutex lock;
+ struct kref kref;
+ unsigned int stream_count;
+ unsigned int buffers_ready;
+ unsigned int sequence;
+
+ unsigned int num_inputs;
+ struct vsp1_rwpf *inputs[VSP1_MAX_RPF];
+ struct vsp1_rwpf *output;
+ struct vsp1_entity *brx;
+ struct vsp1_entity *hgo;
+ struct vsp1_entity *hgt;
+ struct vsp1_entity *lif;
+ struct vsp1_entity *uds;
+ struct vsp1_entity *uds_input;
+
+ /*
+ * The order of this list must be identical to the order of the entities
+ * in the pipeline, as it is assumed by the partition algorithm that we
+ * can walk this list in sequence.
+ */
+ struct list_head entities;
+
+ struct vsp1_dl_body *stream_config;
+ bool configured;
+ bool interlaced;
+
+ unsigned int partitions;
+ struct vsp1_partition *partition;
+ struct vsp1_partition *part_table;
+};
+
+void vsp1_pipeline_reset(struct vsp1_pipeline *pipe);
+void vsp1_pipeline_init(struct vsp1_pipeline *pipe);
+
+void vsp1_pipeline_run(struct vsp1_pipeline *pipe);
+bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe);
+int vsp1_pipeline_stop(struct vsp1_pipeline *pipe);
+bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe);
+
+void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe);
+
+void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb,
+ unsigned int alpha);
+
+void vsp1_pipeline_propagate_partition(struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int index,
+ struct vsp1_partition_window *window);
+
+const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
+ u32 fourcc);
+
+#endif /* __VSP1_PIPE_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
new file mode 100644
index 000000000..f6e415709
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -0,0 +1,851 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_regs.h -- R-Car VSP1 Registers Definitions
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#ifndef __VSP1_REGS_H__
+#define __VSP1_REGS_H__
+
+/* -----------------------------------------------------------------------------
+ * General Control Registers
+ */
+
+#define VI6_CMD(n) (0x0000 + (n) * 4)
+#define VI6_CMD_UPDHDR (1 << 4)
+#define VI6_CMD_STRCMD (1 << 0)
+
+#define VI6_CLK_DCSWT 0x0018
+#define VI6_CLK_DCSWT_CSTPW_MASK (0xff << 8)
+#define VI6_CLK_DCSWT_CSTPW_SHIFT 8
+#define VI6_CLK_DCSWT_CSTRW_MASK (0xff << 0)
+#define VI6_CLK_DCSWT_CSTRW_SHIFT 0
+
+#define VI6_SRESET 0x0028
+#define VI6_SRESET_SRTS(n) (1 << (n))
+
+#define VI6_STATUS 0x0038
+#define VI6_STATUS_FLD_STD(n) (1 << ((n) + 28))
+#define VI6_STATUS_SYS_ACT(n) (1 << ((n) + 8))
+
+#define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12)
+#define VI6_WFP_IRQ_ENB_DFEE (1 << 1)
+#define VI6_WFP_IRQ_ENB_FREE (1 << 0)
+
+#define VI6_WPF_IRQ_STA(n) (0x004c + (n) * 12)
+#define VI6_WFP_IRQ_STA_DFE (1 << 1)
+#define VI6_WFP_IRQ_STA_FRE (1 << 0)
+
+#define VI6_DISP_IRQ_ENB 0x0078
+#define VI6_DISP_IRQ_ENB_DSTE (1 << 8)
+#define VI6_DISP_IRQ_ENB_MAEE (1 << 5)
+#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << (n))
+
+#define VI6_DISP_IRQ_STA 0x007c
+#define VI6_DISP_IRQ_STA_DST (1 << 8)
+#define VI6_DISP_IRQ_STA_MAE (1 << 5)
+#define VI6_DISP_IRQ_STA_LNE(n) (1 << (n))
+
+#define VI6_WPF_LINE_COUNT(n) (0x0084 + (n) * 4)
+#define VI6_WPF_LINE_COUNT_MASK (0x1fffff << 0)
+
+/* -----------------------------------------------------------------------------
+ * Display List Control Registers
+ */
+
+#define VI6_DL_CTRL 0x0100
+#define VI6_DL_CTRL_AR_WAIT_MASK (0xffff << 16)
+#define VI6_DL_CTRL_AR_WAIT_SHIFT 16
+#define VI6_DL_CTRL_DC2 (1 << 12)
+#define VI6_DL_CTRL_DC1 (1 << 8)
+#define VI6_DL_CTRL_DC0 (1 << 4)
+#define VI6_DL_CTRL_CFM0 (1 << 2)
+#define VI6_DL_CTRL_NH0 (1 << 1)
+#define VI6_DL_CTRL_DLE (1 << 0)
+
+#define VI6_DL_HDR_ADDR(n) (0x0104 + (n) * 4)
+
+#define VI6_DL_SWAP 0x0114
+#define VI6_DL_SWAP_LWS (1 << 2)
+#define VI6_DL_SWAP_WDS (1 << 1)
+#define VI6_DL_SWAP_BTS (1 << 0)
+
+#define VI6_DL_EXT_CTRL(n) (0x011c + (n) * 36)
+#define VI6_DL_EXT_CTRL_NWE (1 << 16)
+#define VI6_DL_EXT_CTRL_POLINT_MASK (0x3f << 8)
+#define VI6_DL_EXT_CTRL_POLINT_SHIFT 8
+#define VI6_DL_EXT_CTRL_DLPRI (1 << 5)
+#define VI6_DL_EXT_CTRL_EXPRI (1 << 4)
+#define VI6_DL_EXT_CTRL_EXT (1 << 0)
+
+#define VI6_DL_EXT_AUTOFLD_INT BIT(0)
+
+#define VI6_DL_BODY_SIZE 0x0120
+#define VI6_DL_BODY_SIZE_UPD (1 << 24)
+#define VI6_DL_BODY_SIZE_BS_MASK (0x1ffff << 0)
+#define VI6_DL_BODY_SIZE_BS_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * RPF Control Registers
+ */
+
+#define VI6_RPF_OFFSET 0x100
+
+#define VI6_RPF_SRC_BSIZE 0x0300
+#define VI6_RPF_SRC_BSIZE_BHSIZE_MASK (0x1fff << 16)
+#define VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT 16
+#define VI6_RPF_SRC_BSIZE_BVSIZE_MASK (0x1fff << 0)
+#define VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT 0
+
+#define VI6_RPF_SRC_ESIZE 0x0304
+#define VI6_RPF_SRC_ESIZE_EHSIZE_MASK (0x1fff << 16)
+#define VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT 16
+#define VI6_RPF_SRC_ESIZE_EVSIZE_MASK (0x1fff << 0)
+#define VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT 0
+
+#define VI6_RPF_INFMT 0x0308
+#define VI6_RPF_INFMT_VIR (1 << 28)
+#define VI6_RPF_INFMT_CIPM (1 << 16)
+#define VI6_RPF_INFMT_SPYCS (1 << 15)
+#define VI6_RPF_INFMT_SPUVS (1 << 14)
+#define VI6_RPF_INFMT_CEXT_ZERO (0 << 12)
+#define VI6_RPF_INFMT_CEXT_EXT (1 << 12)
+#define VI6_RPF_INFMT_CEXT_ONE (2 << 12)
+#define VI6_RPF_INFMT_CEXT_MASK (3 << 12)
+#define VI6_RPF_INFMT_RDTM_BT601 (0 << 9)
+#define VI6_RPF_INFMT_RDTM_BT601_EXT (1 << 9)
+#define VI6_RPF_INFMT_RDTM_BT709 (2 << 9)
+#define VI6_RPF_INFMT_RDTM_BT709_EXT (3 << 9)
+#define VI6_RPF_INFMT_RDTM_MASK (7 << 9)
+#define VI6_RPF_INFMT_CSC (1 << 8)
+#define VI6_RPF_INFMT_RDFMT_MASK (0x7f << 0)
+#define VI6_RPF_INFMT_RDFMT_SHIFT 0
+
+#define VI6_RPF_DSWAP 0x030c
+#define VI6_RPF_DSWAP_A_LLS (1 << 11)
+#define VI6_RPF_DSWAP_A_LWS (1 << 10)
+#define VI6_RPF_DSWAP_A_WDS (1 << 9)
+#define VI6_RPF_DSWAP_A_BTS (1 << 8)
+#define VI6_RPF_DSWAP_P_LLS (1 << 3)
+#define VI6_RPF_DSWAP_P_LWS (1 << 2)
+#define VI6_RPF_DSWAP_P_WDS (1 << 1)
+#define VI6_RPF_DSWAP_P_BTS (1 << 0)
+
+#define VI6_RPF_LOC 0x0310
+#define VI6_RPF_LOC_HCOORD_MASK (0x1fff << 16)
+#define VI6_RPF_LOC_HCOORD_SHIFT 16
+#define VI6_RPF_LOC_VCOORD_MASK (0x1fff << 0)
+#define VI6_RPF_LOC_VCOORD_SHIFT 0
+
+#define VI6_RPF_ALPH_SEL 0x0314
+#define VI6_RPF_ALPH_SEL_ASEL_PACKED (0 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_8B_PLANE (1 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_SELECT (2 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_1B_PLANE (3 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_FIXED (4 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_MASK (7 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_SHIFT 28
+#define VI6_RPF_ALPH_SEL_IROP_MASK (0xf << 24)
+#define VI6_RPF_ALPH_SEL_IROP_SHIFT 24
+#define VI6_RPF_ALPH_SEL_BSEL (1 << 23)
+#define VI6_RPF_ALPH_SEL_AEXT_ZERO (0 << 18)
+#define VI6_RPF_ALPH_SEL_AEXT_EXT (1 << 18)
+#define VI6_RPF_ALPH_SEL_AEXT_ONE (2 << 18)
+#define VI6_RPF_ALPH_SEL_AEXT_MASK (3 << 18)
+#define VI6_RPF_ALPH_SEL_ALPHA1_MASK (0xff << 8)
+#define VI6_RPF_ALPH_SEL_ALPHA1_SHIFT 8
+#define VI6_RPF_ALPH_SEL_ALPHA0_MASK (0xff << 0)
+#define VI6_RPF_ALPH_SEL_ALPHA0_SHIFT 0
+
+#define VI6_RPF_VRTCOL_SET 0x0318
+#define VI6_RPF_VRTCOL_SET_LAYA_MASK (0xff << 24)
+#define VI6_RPF_VRTCOL_SET_LAYA_SHIFT 24
+#define VI6_RPF_VRTCOL_SET_LAYR_MASK (0xff << 16)
+#define VI6_RPF_VRTCOL_SET_LAYR_SHIFT 16
+#define VI6_RPF_VRTCOL_SET_LAYG_MASK (0xff << 8)
+#define VI6_RPF_VRTCOL_SET_LAYG_SHIFT 8
+#define VI6_RPF_VRTCOL_SET_LAYB_MASK (0xff << 0)
+#define VI6_RPF_VRTCOL_SET_LAYB_SHIFT 0
+
+#define VI6_RPF_MSK_CTRL 0x031c
+#define VI6_RPF_MSK_CTRL_MSK_EN (1 << 24)
+#define VI6_RPF_MSK_CTRL_MGR_MASK (0xff << 16)
+#define VI6_RPF_MSK_CTRL_MGR_SHIFT 16
+#define VI6_RPF_MSK_CTRL_MGG_MASK (0xff << 8)
+#define VI6_RPF_MSK_CTRL_MGG_SHIFT 8
+#define VI6_RPF_MSK_CTRL_MGB_MASK (0xff << 0)
+#define VI6_RPF_MSK_CTRL_MGB_SHIFT 0
+
+#define VI6_RPF_MSK_SET0 0x0320
+#define VI6_RPF_MSK_SET1 0x0324
+#define VI6_RPF_MSK_SET_MSA_MASK (0xff << 24)
+#define VI6_RPF_MSK_SET_MSA_SHIFT 24
+#define VI6_RPF_MSK_SET_MSR_MASK (0xff << 16)
+#define VI6_RPF_MSK_SET_MSR_SHIFT 16
+#define VI6_RPF_MSK_SET_MSG_MASK (0xff << 8)
+#define VI6_RPF_MSK_SET_MSG_SHIFT 8
+#define VI6_RPF_MSK_SET_MSB_MASK (0xff << 0)
+#define VI6_RPF_MSK_SET_MSB_SHIFT 0
+
+#define VI6_RPF_CKEY_CTRL 0x0328
+#define VI6_RPF_CKEY_CTRL_CV (1 << 4)
+#define VI6_RPF_CKEY_CTRL_SAPE1 (1 << 1)
+#define VI6_RPF_CKEY_CTRL_SAPE0 (1 << 0)
+
+#define VI6_RPF_CKEY_SET0 0x032c
+#define VI6_RPF_CKEY_SET1 0x0330
+#define VI6_RPF_CKEY_SET_AP_MASK (0xff << 24)
+#define VI6_RPF_CKEY_SET_AP_SHIFT 24
+#define VI6_RPF_CKEY_SET_R_MASK (0xff << 16)
+#define VI6_RPF_CKEY_SET_R_SHIFT 16
+#define VI6_RPF_CKEY_SET_GY_MASK (0xff << 8)
+#define VI6_RPF_CKEY_SET_GY_SHIFT 8
+#define VI6_RPF_CKEY_SET_B_MASK (0xff << 0)
+#define VI6_RPF_CKEY_SET_B_SHIFT 0
+
+#define VI6_RPF_SRCM_PSTRIDE 0x0334
+#define VI6_RPF_SRCM_PSTRIDE_Y_SHIFT 16
+#define VI6_RPF_SRCM_PSTRIDE_C_SHIFT 0
+
+#define VI6_RPF_SRCM_ASTRIDE 0x0338
+#define VI6_RPF_SRCM_PSTRIDE_A_SHIFT 0
+
+#define VI6_RPF_SRCM_ADDR_Y 0x033c
+#define VI6_RPF_SRCM_ADDR_C0 0x0340
+#define VI6_RPF_SRCM_ADDR_C1 0x0344
+#define VI6_RPF_SRCM_ADDR_AI 0x0348
+
+#define VI6_RPF_MULT_ALPHA 0x036c
+#define VI6_RPF_MULT_ALPHA_A_MMD_NONE (0 << 12)
+#define VI6_RPF_MULT_ALPHA_A_MMD_RATIO (1 << 12)
+#define VI6_RPF_MULT_ALPHA_P_MMD_NONE (0 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_RATIO (1 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_IMAGE (2 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_BOTH (3 << 8)
+#define VI6_RPF_MULT_ALPHA_RATIO_MASK (0xff << 0)
+#define VI6_RPF_MULT_ALPHA_RATIO_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * WPF Control Registers
+ */
+
+#define VI6_WPF_OFFSET 0x100
+
+#define VI6_WPF_SRCRPF 0x1000
+#define VI6_WPF_SRCRPF_VIRACT_DIS (0 << 28)
+#define VI6_WPF_SRCRPF_VIRACT_SUB (1 << 28)
+#define VI6_WPF_SRCRPF_VIRACT_MST (2 << 28)
+#define VI6_WPF_SRCRPF_VIRACT_MASK (3 << 28)
+#define VI6_WPF_SRCRPF_VIRACT2_DIS (0 << 24)
+#define VI6_WPF_SRCRPF_VIRACT2_SUB (1 << 24)
+#define VI6_WPF_SRCRPF_VIRACT2_MST (2 << 24)
+#define VI6_WPF_SRCRPF_VIRACT2_MASK (3 << 24)
+#define VI6_WPF_SRCRPF_RPF_ACT_DIS(n) (0 << ((n) * 2))
+#define VI6_WPF_SRCRPF_RPF_ACT_SUB(n) (1 << ((n) * 2))
+#define VI6_WPF_SRCRPF_RPF_ACT_MST(n) (2 << ((n) * 2))
+#define VI6_WPF_SRCRPF_RPF_ACT_MASK(n) (3 << ((n) * 2))
+
+#define VI6_WPF_HSZCLIP 0x1004
+#define VI6_WPF_VSZCLIP 0x1008
+#define VI6_WPF_SZCLIP_EN (1 << 28)
+#define VI6_WPF_SZCLIP_OFST_MASK (0xff << 16)
+#define VI6_WPF_SZCLIP_OFST_SHIFT 16
+#define VI6_WPF_SZCLIP_SIZE_MASK (0xfff << 0)
+#define VI6_WPF_SZCLIP_SIZE_SHIFT 0
+
+#define VI6_WPF_OUTFMT 0x100c
+#define VI6_WPF_OUTFMT_PDV_MASK (0xff << 24)
+#define VI6_WPF_OUTFMT_PDV_SHIFT 24
+#define VI6_WPF_OUTFMT_PXA (1 << 23)
+#define VI6_WPF_OUTFMT_ROT (1 << 18)
+#define VI6_WPF_OUTFMT_HFLP (1 << 17)
+#define VI6_WPF_OUTFMT_FLP (1 << 16)
+#define VI6_WPF_OUTFMT_SPYCS (1 << 15)
+#define VI6_WPF_OUTFMT_SPUVS (1 << 14)
+#define VI6_WPF_OUTFMT_DITH_DIS (0 << 12)
+#define VI6_WPF_OUTFMT_DITH_EN (3 << 12)
+#define VI6_WPF_OUTFMT_DITH_MASK (3 << 12)
+#define VI6_WPF_OUTFMT_WRTM_BT601 (0 << 9)
+#define VI6_WPF_OUTFMT_WRTM_BT601_EXT (1 << 9)
+#define VI6_WPF_OUTFMT_WRTM_BT709 (2 << 9)
+#define VI6_WPF_OUTFMT_WRTM_BT709_EXT (3 << 9)
+#define VI6_WPF_OUTFMT_WRTM_MASK (7 << 9)
+#define VI6_WPF_OUTFMT_CSC (1 << 8)
+#define VI6_WPF_OUTFMT_WRFMT_MASK (0x7f << 0)
+#define VI6_WPF_OUTFMT_WRFMT_SHIFT 0
+
+#define VI6_WPF_DSWAP 0x1010
+#define VI6_WPF_DSWAP_P_LLS (1 << 3)
+#define VI6_WPF_DSWAP_P_LWS (1 << 2)
+#define VI6_WPF_DSWAP_P_WDS (1 << 1)
+#define VI6_WPF_DSWAP_P_BTS (1 << 0)
+
+#define VI6_WPF_RNDCTRL 0x1014
+#define VI6_WPF_RNDCTRL_CBRM (1 << 28)
+#define VI6_WPF_RNDCTRL_ABRM_TRUNC (0 << 24)
+#define VI6_WPF_RNDCTRL_ABRM_ROUND (1 << 24)
+#define VI6_WPF_RNDCTRL_ABRM_THRESH (2 << 24)
+#define VI6_WPF_RNDCTRL_ABRM_MASK (3 << 24)
+#define VI6_WPF_RNDCTRL_ATHRESH_MASK (0xff << 16)
+#define VI6_WPF_RNDCTRL_ATHRESH_SHIFT 16
+#define VI6_WPF_RNDCTRL_CLMD_FULL (0 << 12)
+#define VI6_WPF_RNDCTRL_CLMD_CLIP (1 << 12)
+#define VI6_WPF_RNDCTRL_CLMD_EXT (2 << 12)
+#define VI6_WPF_RNDCTRL_CLMD_MASK (3 << 12)
+
+#define VI6_WPF_ROT_CTRL 0x1018
+#define VI6_WPF_ROT_CTRL_LN16 (1 << 17)
+#define VI6_WPF_ROT_CTRL_LMEM_WD_MASK (0x1fff << 0)
+#define VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT 0
+
+#define VI6_WPF_DSTM_STRIDE_Y 0x101c
+#define VI6_WPF_DSTM_STRIDE_C 0x1020
+#define VI6_WPF_DSTM_ADDR_Y 0x1024
+#define VI6_WPF_DSTM_ADDR_C0 0x1028
+#define VI6_WPF_DSTM_ADDR_C1 0x102c
+
+#define VI6_WPF_WRBCK_CTRL 0x1034
+#define VI6_WPF_WRBCK_CTRL_WBMD (1 << 0)
+
+/* -----------------------------------------------------------------------------
+ * UIF Control Registers
+ */
+
+#define VI6_UIF_OFFSET 0x100
+
+#define VI6_UIF_DISCOM_DOCMCR 0x1c00
+#define VI6_UIF_DISCOM_DOCMCR_CMPRU (1 << 16)
+#define VI6_UIF_DISCOM_DOCMCR_CMPR (1 << 0)
+
+#define VI6_UIF_DISCOM_DOCMSTR 0x1c04
+#define VI6_UIF_DISCOM_DOCMSTR_CMPPRE (1 << 1)
+#define VI6_UIF_DISCOM_DOCMSTR_CMPST (1 << 0)
+
+#define VI6_UIF_DISCOM_DOCMCLSTR 0x1c08
+#define VI6_UIF_DISCOM_DOCMCLSTR_CMPCLPRE (1 << 1)
+#define VI6_UIF_DISCOM_DOCMCLSTR_CMPCLST (1 << 0)
+
+#define VI6_UIF_DISCOM_DOCMIENR 0x1c0c
+#define VI6_UIF_DISCOM_DOCMIENR_CMPPREIEN (1 << 1)
+#define VI6_UIF_DISCOM_DOCMIENR_CMPIEN (1 << 0)
+
+#define VI6_UIF_DISCOM_DOCMMDR 0x1c10
+#define VI6_UIF_DISCOM_DOCMMDR_INTHRH(n) ((n) << 16)
+
+#define VI6_UIF_DISCOM_DOCMPMR 0x1c14
+#define VI6_UIF_DISCOM_DOCMPMR_CMPDFF(n) ((n) << 17)
+#define VI6_UIF_DISCOM_DOCMPMR_CMPDFA(n) ((n) << 8)
+#define VI6_UIF_DISCOM_DOCMPMR_CMPDAUF (1 << 7)
+#define VI6_UIF_DISCOM_DOCMPMR_SEL(n) ((n) << 0)
+
+#define VI6_UIF_DISCOM_DOCMECRCR 0x1c18
+#define VI6_UIF_DISCOM_DOCMCCRCR 0x1c1c
+#define VI6_UIF_DISCOM_DOCMSPXR 0x1c20
+#define VI6_UIF_DISCOM_DOCMSPYR 0x1c24
+#define VI6_UIF_DISCOM_DOCMSZXR 0x1c28
+#define VI6_UIF_DISCOM_DOCMSZYR 0x1c2c
+
+/* -----------------------------------------------------------------------------
+ * DPR Control Registers
+ */
+
+#define VI6_DPR_RPF_ROUTE(n) (0x2000 + (n) * 4)
+
+#define VI6_DPR_WPF_FPORCH(n) (0x2014 + (n) * 4)
+#define VI6_DPR_WPF_FPORCH_FP_WPFN (5 << 8)
+
+#define VI6_DPR_SRU_ROUTE 0x2024
+#define VI6_DPR_UDS_ROUTE(n) (0x2028 + (n) * 4)
+#define VI6_DPR_LUT_ROUTE 0x203c
+#define VI6_DPR_CLU_ROUTE 0x2040
+#define VI6_DPR_HST_ROUTE 0x2044
+#define VI6_DPR_HSI_ROUTE 0x2048
+#define VI6_DPR_BRU_ROUTE 0x204c
+#define VI6_DPR_ILV_BRS_ROUTE 0x2050
+#define VI6_DPR_ROUTE_BRSSEL (1 << 28)
+#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
+#define VI6_DPR_ROUTE_FXA_SHIFT 16
+#define VI6_DPR_ROUTE_FP_MASK (0x3f << 8)
+#define VI6_DPR_ROUTE_FP_SHIFT 8
+#define VI6_DPR_ROUTE_RT_MASK (0x3f << 0)
+#define VI6_DPR_ROUTE_RT_SHIFT 0
+
+#define VI6_DPR_HGO_SMPPT 0x2054
+#define VI6_DPR_HGT_SMPPT 0x2058
+#define VI6_DPR_SMPPT_TGW_MASK (7 << 8)
+#define VI6_DPR_SMPPT_TGW_SHIFT 8
+#define VI6_DPR_SMPPT_PT_MASK (0x3f << 0)
+#define VI6_DPR_SMPPT_PT_SHIFT 0
+
+#define VI6_DPR_UIF_ROUTE(n) (0x2074 + (n) * 4)
+
+#define VI6_DPR_NODE_RPF(n) (n)
+#define VI6_DPR_NODE_UIF(n) (12 + (n))
+#define VI6_DPR_NODE_SRU 16
+#define VI6_DPR_NODE_UDS(n) (17 + (n))
+#define VI6_DPR_NODE_LUT 22
+#define VI6_DPR_NODE_BRU_IN(n) (((n) <= 3) ? 23 + (n) : 49)
+#define VI6_DPR_NODE_BRU_OUT 27
+#define VI6_DPR_NODE_CLU 29
+#define VI6_DPR_NODE_HST 30
+#define VI6_DPR_NODE_HSI 31
+#define VI6_DPR_NODE_BRS_IN(n) (38 + (n))
+#define VI6_DPR_NODE_LIF 55 /* Gen2 only */
+#define VI6_DPR_NODE_WPF(n) (56 + (n))
+#define VI6_DPR_NODE_UNUSED 63
+
+/* -----------------------------------------------------------------------------
+ * SRU Control Registers
+ */
+
+#define VI6_SRU_CTRL0 0x2200
+#define VI6_SRU_CTRL0_PARAM0_MASK (0x1ff << 16)
+#define VI6_SRU_CTRL0_PARAM0_SHIFT 16
+#define VI6_SRU_CTRL0_PARAM1_MASK (0x1f << 8)
+#define VI6_SRU_CTRL0_PARAM1_SHIFT 8
+#define VI6_SRU_CTRL0_MODE_UPSCALE (4 << 4)
+#define VI6_SRU_CTRL0_PARAM2 (1 << 3)
+#define VI6_SRU_CTRL0_PARAM3 (1 << 2)
+#define VI6_SRU_CTRL0_PARAM4 (1 << 1)
+#define VI6_SRU_CTRL0_EN (1 << 0)
+
+#define VI6_SRU_CTRL1 0x2204
+#define VI6_SRU_CTRL1_PARAM5 0x7ff
+
+#define VI6_SRU_CTRL2 0x2208
+#define VI6_SRU_CTRL2_PARAM6_SHIFT 16
+#define VI6_SRU_CTRL2_PARAM7_SHIFT 8
+#define VI6_SRU_CTRL2_PARAM8_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * UDS Control Registers
+ */
+
+#define VI6_UDS_OFFSET 0x100
+
+#define VI6_UDS_CTRL 0x2300
+#define VI6_UDS_CTRL_AMD (1 << 30)
+#define VI6_UDS_CTRL_FMD (1 << 29)
+#define VI6_UDS_CTRL_BLADV (1 << 28)
+#define VI6_UDS_CTRL_AON (1 << 25)
+#define VI6_UDS_CTRL_ATHON (1 << 24)
+#define VI6_UDS_CTRL_BC (1 << 20)
+#define VI6_UDS_CTRL_NE_A (1 << 19)
+#define VI6_UDS_CTRL_NE_RCR (1 << 18)
+#define VI6_UDS_CTRL_NE_GY (1 << 17)
+#define VI6_UDS_CTRL_NE_BCB (1 << 16)
+#define VI6_UDS_CTRL_AMDSLH (1 << 2)
+#define VI6_UDS_CTRL_TDIPC (1 << 1)
+
+#define VI6_UDS_SCALE 0x2304
+#define VI6_UDS_SCALE_HMANT_MASK (0xf << 28)
+#define VI6_UDS_SCALE_HMANT_SHIFT 28
+#define VI6_UDS_SCALE_HFRAC_MASK (0xfff << 16)
+#define VI6_UDS_SCALE_HFRAC_SHIFT 16
+#define VI6_UDS_SCALE_VMANT_MASK (0xf << 12)
+#define VI6_UDS_SCALE_VMANT_SHIFT 12
+#define VI6_UDS_SCALE_VFRAC_MASK (0xfff << 0)
+#define VI6_UDS_SCALE_VFRAC_SHIFT 0
+
+#define VI6_UDS_ALPTH 0x2308
+#define VI6_UDS_ALPTH_TH1_MASK (0xff << 8)
+#define VI6_UDS_ALPTH_TH1_SHIFT 8
+#define VI6_UDS_ALPTH_TH0_MASK (0xff << 0)
+#define VI6_UDS_ALPTH_TH0_SHIFT 0
+
+#define VI6_UDS_ALPVAL 0x230c
+#define VI6_UDS_ALPVAL_VAL2_MASK (0xff << 16)
+#define VI6_UDS_ALPVAL_VAL2_SHIFT 16
+#define VI6_UDS_ALPVAL_VAL1_MASK (0xff << 8)
+#define VI6_UDS_ALPVAL_VAL1_SHIFT 8
+#define VI6_UDS_ALPVAL_VAL0_MASK (0xff << 0)
+#define VI6_UDS_ALPVAL_VAL0_SHIFT 0
+
+#define VI6_UDS_PASS_BWIDTH 0x2310
+#define VI6_UDS_PASS_BWIDTH_H_MASK (0x7f << 16)
+#define VI6_UDS_PASS_BWIDTH_H_SHIFT 16
+#define VI6_UDS_PASS_BWIDTH_V_MASK (0x7f << 0)
+#define VI6_UDS_PASS_BWIDTH_V_SHIFT 0
+
+#define VI6_UDS_HPHASE 0x2314
+#define VI6_UDS_HPHASE_HSTP_MASK (0xfff << 16)
+#define VI6_UDS_HPHASE_HSTP_SHIFT 16
+#define VI6_UDS_HPHASE_HEDP_MASK (0xfff << 0)
+#define VI6_UDS_HPHASE_HEDP_SHIFT 0
+
+#define VI6_UDS_IPC 0x2318
+#define VI6_UDS_IPC_FIELD (1 << 27)
+#define VI6_UDS_IPC_VEDP_MASK (0xfff << 0)
+#define VI6_UDS_IPC_VEDP_SHIFT 0
+
+#define VI6_UDS_HSZCLIP 0x231c
+#define VI6_UDS_HSZCLIP_HCEN (1 << 28)
+#define VI6_UDS_HSZCLIP_HCL_OFST_MASK (0xff << 16)
+#define VI6_UDS_HSZCLIP_HCL_OFST_SHIFT 16
+#define VI6_UDS_HSZCLIP_HCL_SIZE_MASK (0x1fff << 0)
+#define VI6_UDS_HSZCLIP_HCL_SIZE_SHIFT 0
+
+#define VI6_UDS_CLIP_SIZE 0x2324
+#define VI6_UDS_CLIP_SIZE_HSIZE_MASK (0x1fff << 16)
+#define VI6_UDS_CLIP_SIZE_HSIZE_SHIFT 16
+#define VI6_UDS_CLIP_SIZE_VSIZE_MASK (0x1fff << 0)
+#define VI6_UDS_CLIP_SIZE_VSIZE_SHIFT 0
+
+#define VI6_UDS_FILL_COLOR 0x2328
+#define VI6_UDS_FILL_COLOR_RFILC_MASK (0xff << 16)
+#define VI6_UDS_FILL_COLOR_RFILC_SHIFT 16
+#define VI6_UDS_FILL_COLOR_GFILC_MASK (0xff << 8)
+#define VI6_UDS_FILL_COLOR_GFILC_SHIFT 8
+#define VI6_UDS_FILL_COLOR_BFILC_MASK (0xff << 0)
+#define VI6_UDS_FILL_COLOR_BFILC_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * LUT Control Registers
+ */
+
+#define VI6_LUT_CTRL 0x2800
+#define VI6_LUT_CTRL_EN (1 << 0)
+
+/* -----------------------------------------------------------------------------
+ * CLU Control Registers
+ */
+
+#define VI6_CLU_CTRL 0x2900
+#define VI6_CLU_CTRL_AAI (1 << 28)
+#define VI6_CLU_CTRL_MVS (1 << 24)
+#define VI6_CLU_CTRL_AX1I_2D (3 << 14)
+#define VI6_CLU_CTRL_AX2I_2D (1 << 12)
+#define VI6_CLU_CTRL_OS0_2D (3 << 8)
+#define VI6_CLU_CTRL_OS1_2D (1 << 6)
+#define VI6_CLU_CTRL_OS2_2D (3 << 4)
+#define VI6_CLU_CTRL_M2D (1 << 1)
+#define VI6_CLU_CTRL_EN (1 << 0)
+
+/* -----------------------------------------------------------------------------
+ * HST Control Registers
+ */
+
+#define VI6_HST_CTRL 0x2a00
+#define VI6_HST_CTRL_EN (1 << 0)
+
+/* -----------------------------------------------------------------------------
+ * HSI Control Registers
+ */
+
+#define VI6_HSI_CTRL 0x2b00
+#define VI6_HSI_CTRL_EN (1 << 0)
+
+/* -----------------------------------------------------------------------------
+ * BRS and BRU Control Registers
+ */
+
+#define VI6_ROP_NOP 0
+#define VI6_ROP_AND 1
+#define VI6_ROP_AND_REV 2
+#define VI6_ROP_COPY 3
+#define VI6_ROP_AND_INV 4
+#define VI6_ROP_CLEAR 5
+#define VI6_ROP_XOR 6
+#define VI6_ROP_OR 7
+#define VI6_ROP_NOR 8
+#define VI6_ROP_EQUIV 9
+#define VI6_ROP_INVERT 10
+#define VI6_ROP_OR_REV 11
+#define VI6_ROP_COPY_INV 12
+#define VI6_ROP_OR_INV 13
+#define VI6_ROP_NAND 14
+#define VI6_ROP_SET 15
+
+#define VI6_BRU_BASE 0x2c00
+#define VI6_BRS_BASE 0x3900
+
+#define VI6_BRU_INCTRL 0x0000
+#define VI6_BRU_INCTRL_NRM (1 << 28)
+#define VI6_BRU_INCTRL_DnON (1 << (16 + (n)))
+#define VI6_BRU_INCTRL_DITHn_OFF (0 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_18BPP (1 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_16BPP (2 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_15BPP (3 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_12BPP (4 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_8BPP (5 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_MASK (7 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_SHIFT ((n) * 4)
+
+#define VI6_BRU_VIRRPF_SIZE 0x0004
+#define VI6_BRU_VIRRPF_SIZE_HSIZE_MASK (0x1fff << 16)
+#define VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT 16
+#define VI6_BRU_VIRRPF_SIZE_VSIZE_MASK (0x1fff << 0)
+#define VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT 0
+
+#define VI6_BRU_VIRRPF_LOC 0x0008
+#define VI6_BRU_VIRRPF_LOC_HCOORD_MASK (0x1fff << 16)
+#define VI6_BRU_VIRRPF_LOC_HCOORD_SHIFT 16
+#define VI6_BRU_VIRRPF_LOC_VCOORD_MASK (0x1fff << 0)
+#define VI6_BRU_VIRRPF_LOC_VCOORD_SHIFT 0
+
+#define VI6_BRU_VIRRPF_COL 0x000c
+#define VI6_BRU_VIRRPF_COL_A_MASK (0xff << 24)
+#define VI6_BRU_VIRRPF_COL_A_SHIFT 24
+#define VI6_BRU_VIRRPF_COL_RCR_MASK (0xff << 16)
+#define VI6_BRU_VIRRPF_COL_RCR_SHIFT 16
+#define VI6_BRU_VIRRPF_COL_GY_MASK (0xff << 8)
+#define VI6_BRU_VIRRPF_COL_GY_SHIFT 8
+#define VI6_BRU_VIRRPF_COL_BCB_MASK (0xff << 0)
+#define VI6_BRU_VIRRPF_COL_BCB_SHIFT 0
+
+#define VI6_BRU_CTRL(n) (0x0010 + (n) * 8 + ((n) <= 3 ? 0 : 4))
+#define VI6_BRU_CTRL_RBC (1 << 31)
+#define VI6_BRU_CTRL_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20)
+#define VI6_BRU_CTRL_DSTSEL_VRPF (4 << 20)
+#define VI6_BRU_CTRL_DSTSEL_MASK (7 << 20)
+#define VI6_BRU_CTRL_SRCSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 16)
+#define VI6_BRU_CTRL_SRCSEL_VRPF (4 << 16)
+#define VI6_BRU_CTRL_SRCSEL_MASK (7 << 16)
+#define VI6_BRU_CTRL_CROP(rop) ((rop) << 4)
+#define VI6_BRU_CTRL_CROP_MASK (0xf << 4)
+#define VI6_BRU_CTRL_AROP(rop) ((rop) << 0)
+#define VI6_BRU_CTRL_AROP_MASK (0xf << 0)
+
+#define VI6_BRU_BLD(n) (0x0014 + (n) * 8 + ((n) <= 3 ? 0 : 4))
+#define VI6_BRU_BLD_CBES (1 << 31)
+#define VI6_BRU_BLD_CCMDX_DST_A (0 << 28)
+#define VI6_BRU_BLD_CCMDX_255_DST_A (1 << 28)
+#define VI6_BRU_BLD_CCMDX_SRC_A (2 << 28)
+#define VI6_BRU_BLD_CCMDX_255_SRC_A (3 << 28)
+#define VI6_BRU_BLD_CCMDX_COEFX (4 << 28)
+#define VI6_BRU_BLD_CCMDX_MASK (7 << 28)
+#define VI6_BRU_BLD_CCMDY_DST_A (0 << 24)
+#define VI6_BRU_BLD_CCMDY_255_DST_A (1 << 24)
+#define VI6_BRU_BLD_CCMDY_SRC_A (2 << 24)
+#define VI6_BRU_BLD_CCMDY_255_SRC_A (3 << 24)
+#define VI6_BRU_BLD_CCMDY_COEFY (4 << 24)
+#define VI6_BRU_BLD_CCMDY_MASK (7 << 24)
+#define VI6_BRU_BLD_CCMDY_SHIFT 24
+#define VI6_BRU_BLD_ABES (1 << 23)
+#define VI6_BRU_BLD_ACMDX_DST_A (0 << 20)
+#define VI6_BRU_BLD_ACMDX_255_DST_A (1 << 20)
+#define VI6_BRU_BLD_ACMDX_SRC_A (2 << 20)
+#define VI6_BRU_BLD_ACMDX_255_SRC_A (3 << 20)
+#define VI6_BRU_BLD_ACMDX_COEFX (4 << 20)
+#define VI6_BRU_BLD_ACMDX_MASK (7 << 20)
+#define VI6_BRU_BLD_ACMDY_DST_A (0 << 16)
+#define VI6_BRU_BLD_ACMDY_255_DST_A (1 << 16)
+#define VI6_BRU_BLD_ACMDY_SRC_A (2 << 16)
+#define VI6_BRU_BLD_ACMDY_255_SRC_A (3 << 16)
+#define VI6_BRU_BLD_ACMDY_COEFY (4 << 16)
+#define VI6_BRU_BLD_ACMDY_MASK (7 << 16)
+#define VI6_BRU_BLD_COEFX_MASK (0xff << 8)
+#define VI6_BRU_BLD_COEFX_SHIFT 8
+#define VI6_BRU_BLD_COEFY_MASK (0xff << 0)
+#define VI6_BRU_BLD_COEFY_SHIFT 0
+
+#define VI6_BRU_ROP 0x0030 /* Only available on BRU */
+#define VI6_BRU_ROP_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20)
+#define VI6_BRU_ROP_DSTSEL_VRPF (4 << 20)
+#define VI6_BRU_ROP_DSTSEL_MASK (7 << 20)
+#define VI6_BRU_ROP_CROP(rop) ((rop) << 4)
+#define VI6_BRU_ROP_CROP_MASK (0xf << 4)
+#define VI6_BRU_ROP_AROP(rop) ((rop) << 0)
+#define VI6_BRU_ROP_AROP_MASK (0xf << 0)
+
+/* -----------------------------------------------------------------------------
+ * HGO Control Registers
+ */
+
+#define VI6_HGO_OFFSET 0x3000
+#define VI6_HGO_OFFSET_HOFFSET_SHIFT 16
+#define VI6_HGO_OFFSET_VOFFSET_SHIFT 0
+#define VI6_HGO_SIZE 0x3004
+#define VI6_HGO_SIZE_HSIZE_SHIFT 16
+#define VI6_HGO_SIZE_VSIZE_SHIFT 0
+#define VI6_HGO_MODE 0x3008
+#define VI6_HGO_MODE_STEP (1 << 10)
+#define VI6_HGO_MODE_MAXRGB (1 << 7)
+#define VI6_HGO_MODE_OFSB_R (1 << 6)
+#define VI6_HGO_MODE_OFSB_G (1 << 5)
+#define VI6_HGO_MODE_OFSB_B (1 << 4)
+#define VI6_HGO_MODE_HRATIO_SHIFT 2
+#define VI6_HGO_MODE_VRATIO_SHIFT 0
+#define VI6_HGO_LB_TH 0x300c
+#define VI6_HGO_LBn_H(n) (0x3010 + (n) * 8)
+#define VI6_HGO_LBn_V(n) (0x3014 + (n) * 8)
+#define VI6_HGO_R_HISTO(n) (0x3030 + (n) * 4)
+#define VI6_HGO_R_MAXMIN 0x3130
+#define VI6_HGO_R_SUM 0x3134
+#define VI6_HGO_R_LB_DET 0x3138
+#define VI6_HGO_G_HISTO(n) (0x3140 + (n) * 4)
+#define VI6_HGO_G_MAXMIN 0x3240
+#define VI6_HGO_G_SUM 0x3244
+#define VI6_HGO_G_LB_DET 0x3248
+#define VI6_HGO_B_HISTO(n) (0x3250 + (n) * 4)
+#define VI6_HGO_B_MAXMIN 0x3350
+#define VI6_HGO_B_SUM 0x3354
+#define VI6_HGO_B_LB_DET 0x3358
+#define VI6_HGO_EXT_HIST_ADDR 0x335c
+#define VI6_HGO_EXT_HIST_DATA 0x3360
+#define VI6_HGO_REGRST 0x33fc
+#define VI6_HGO_REGRST_RCLEA (1 << 0)
+
+/* -----------------------------------------------------------------------------
+ * HGT Control Registers
+ */
+
+#define VI6_HGT_OFFSET 0x3400
+#define VI6_HGT_OFFSET_HOFFSET_SHIFT 16
+#define VI6_HGT_OFFSET_VOFFSET_SHIFT 0
+#define VI6_HGT_SIZE 0x3404
+#define VI6_HGT_SIZE_HSIZE_SHIFT 16
+#define VI6_HGT_SIZE_VSIZE_SHIFT 0
+#define VI6_HGT_MODE 0x3408
+#define VI6_HGT_MODE_HRATIO_SHIFT 2
+#define VI6_HGT_MODE_VRATIO_SHIFT 0
+#define VI6_HGT_HUE_AREA(n) (0x340c + (n) * 4)
+#define VI6_HGT_HUE_AREA_LOWER_SHIFT 16
+#define VI6_HGT_HUE_AREA_UPPER_SHIFT 0
+#define VI6_HGT_LB_TH 0x3424
+#define VI6_HGT_LBn_H(n) (0x3438 + (n) * 8)
+#define VI6_HGT_LBn_V(n) (0x342c + (n) * 8)
+#define VI6_HGT_HISTO(m, n) (0x3450 + (m) * 128 + (n) * 4)
+#define VI6_HGT_MAXMIN 0x3750
+#define VI6_HGT_SUM 0x3754
+#define VI6_HGT_LB_DET 0x3758
+#define VI6_HGT_REGRST 0x37fc
+#define VI6_HGT_REGRST_RCLEA (1 << 0)
+
+/* -----------------------------------------------------------------------------
+ * LIF Control Registers
+ */
+
+#define VI6_LIF_OFFSET (-0x100)
+
+#define VI6_LIF_CTRL 0x3b00
+#define VI6_LIF_CTRL_OBTH_MASK (0x7ff << 16)
+#define VI6_LIF_CTRL_OBTH_SHIFT 16
+#define VI6_LIF_CTRL_CFMT (1 << 4)
+#define VI6_LIF_CTRL_REQSEL (1 << 1)
+#define VI6_LIF_CTRL_LIF_EN (1 << 0)
+
+#define VI6_LIF_CSBTH 0x3b04
+#define VI6_LIF_CSBTH_HBTH_MASK (0x7ff << 16)
+#define VI6_LIF_CSBTH_HBTH_SHIFT 16
+#define VI6_LIF_CSBTH_LBTH_MASK (0x7ff << 0)
+#define VI6_LIF_CSBTH_LBTH_SHIFT 0
+
+#define VI6_LIF_LBA 0x3b0c
+#define VI6_LIF_LBA_LBA0 (1 << 31)
+#define VI6_LIF_LBA_LBA1_MASK (0xfff << 16)
+#define VI6_LIF_LBA_LBA1_SHIFT 16
+
+/* -----------------------------------------------------------------------------
+ * Security Control Registers
+ */
+
+#define VI6_SECURITY_CTRL0 0x3d00
+#define VI6_SECURITY_CTRL1 0x3d04
+
+/* -----------------------------------------------------------------------------
+ * IP Version Registers
+ */
+
+#define VI6_IP_VERSION 0x3f00
+#define VI6_IP_VERSION_MASK (0xffff << 0)
+#define VI6_IP_VERSION_MODEL_MASK (0xff << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_H2 (0x09 << 8)
+#define VI6_IP_VERSION_MODEL_VSPR_H2 (0x0a << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_GEN2 (0x0b << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_M2 (0x0c << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_V2H (0x12 << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_V2H (0x13 << 8)
+#define VI6_IP_VERSION_MODEL_VSPI_GEN3 (0x14 << 8)
+#define VI6_IP_VERSION_MODEL_VSPBD_GEN3 (0x15 << 8)
+#define VI6_IP_VERSION_MODEL_VSPBC_GEN3 (0x16 << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_GEN3 (0x17 << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_V3 (0x18 << 8)
+#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
+#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
+#define VI6_IP_VERSION_SOC_MASK (0xff << 0)
+#define VI6_IP_VERSION_SOC_H2 (0x01 << 0)
+#define VI6_IP_VERSION_SOC_V2H (0x01 << 0)
+#define VI6_IP_VERSION_SOC_V3M (0x01 << 0)
+#define VI6_IP_VERSION_SOC_M2 (0x02 << 0)
+#define VI6_IP_VERSION_SOC_M3W (0x02 << 0)
+#define VI6_IP_VERSION_SOC_V3H (0x02 << 0)
+#define VI6_IP_VERSION_SOC_H3 (0x03 << 0)
+#define VI6_IP_VERSION_SOC_D3 (0x04 << 0)
+#define VI6_IP_VERSION_SOC_M3N (0x04 << 0)
+#define VI6_IP_VERSION_SOC_E3 (0x04 << 0)
+
+/* -----------------------------------------------------------------------------
+ * RPF CLUT Registers
+ */
+
+#define VI6_CLUT_TABLE 0x4000
+
+/* -----------------------------------------------------------------------------
+ * 1D LUT Registers
+ */
+
+#define VI6_LUT_TABLE 0x7000
+
+/* -----------------------------------------------------------------------------
+ * 3D LUT Registers
+ */
+
+#define VI6_CLU_ADDR 0x7400
+#define VI6_CLU_DATA 0x7404
+
+/* -----------------------------------------------------------------------------
+ * Formats
+ */
+
+#define VI6_FMT_RGB_332 0x00
+#define VI6_FMT_XRGB_4444 0x01
+#define VI6_FMT_RGBX_4444 0x02
+#define VI6_FMT_XRGB_1555 0x04
+#define VI6_FMT_RGBX_5551 0x05
+#define VI6_FMT_RGB_565 0x06
+#define VI6_FMT_AXRGB_86666 0x07
+#define VI6_FMT_RGBXA_66668 0x08
+#define VI6_FMT_XRGBA_66668 0x09
+#define VI6_FMT_ARGBX_86666 0x0a
+#define VI6_FMT_AXRXGXB_8262626 0x0b
+#define VI6_FMT_XRXGXBA_2626268 0x0c
+#define VI6_FMT_ARXGXBX_8626262 0x0d
+#define VI6_FMT_RXGXBXA_6262628 0x0e
+#define VI6_FMT_XRGB_6666 0x0f
+#define VI6_FMT_RGBX_6666 0x10
+#define VI6_FMT_XRXGXB_262626 0x11
+#define VI6_FMT_RXGXBX_626262 0x12
+#define VI6_FMT_ARGB_8888 0x13
+#define VI6_FMT_RGBA_8888 0x14
+#define VI6_FMT_RGB_888 0x15
+#define VI6_FMT_XRGXGB_763763 0x16
+#define VI6_FMT_XXRGB_86666 0x17
+#define VI6_FMT_BGR_888 0x18
+#define VI6_FMT_ARGB_4444 0x19
+#define VI6_FMT_RGBA_4444 0x1a
+#define VI6_FMT_ARGB_1555 0x1b
+#define VI6_FMT_RGBA_5551 0x1c
+#define VI6_FMT_ABGR_4444 0x1d
+#define VI6_FMT_BGRA_4444 0x1e
+#define VI6_FMT_ABGR_1555 0x1f
+#define VI6_FMT_BGRA_5551 0x20
+#define VI6_FMT_XBXGXR_262626 0x21
+#define VI6_FMT_ABGR_8888 0x22
+#define VI6_FMT_XXRGB_88565 0x23
+
+#define VI6_FMT_Y_UV_444 0x40
+#define VI6_FMT_Y_UV_422 0x41
+#define VI6_FMT_Y_UV_420 0x42
+#define VI6_FMT_YUV_444 0x46
+#define VI6_FMT_YUYV_422 0x47
+#define VI6_FMT_YYUV_422 0x48
+#define VI6_FMT_YUV_420 0x49
+#define VI6_FMT_Y_U_V_444 0x4a
+#define VI6_FMT_Y_U_V_422 0x4b
+#define VI6_FMT_Y_U_V_420 0x4c
+
+#endif /* __VSP1_REGS_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
new file mode 100644
index 000000000..abaf4dde3
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_rpf.c -- R-Car VSP1 Read Pixel Formatter
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define RPF_MAX_WIDTH 8190
+#define RPF_MAX_HEIGHT 8190
+
+/* Pre extended display list command data structure. */
+struct vsp1_extcmd_auto_fld_body {
+ u32 top_y0;
+ u32 bottom_y0;
+ u32 top_c0;
+ u32 bottom_c0;
+ u32 top_c1;
+ u32 bottom_c1;
+ u32 reserved0;
+ u32 reserved1;
+} __packed;
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + rpf->entity.index * VI6_RPF_OFFSET,
+ data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_ops rpf_ops = {
+ .pad = &vsp1_rwpf_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void rpf_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
+ const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
+ const struct v4l2_pix_format_mplane *format = &rpf->format;
+ const struct v4l2_mbus_framefmt *source_format;
+ const struct v4l2_mbus_framefmt *sink_format;
+ unsigned int left = 0;
+ unsigned int top = 0;
+ u32 pstride;
+ u32 infmt;
+
+ /* Stride */
+ pstride = format->plane_fmt[0].bytesperline
+ << VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
+ if (format->num_planes > 1)
+ pstride |= format->plane_fmt[1].bytesperline
+ << VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+
+ /*
+ * pstride has both STRIDE_Y and STRIDE_C, but multiplying the whole
+ * of pstride by 2 is conveniently OK here as we are multiplying both
+ * values.
+ */
+ if (pipe->interlaced)
+ pstride *= 2;
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_PSTRIDE, pstride);
+
+ /* Format */
+ sink_format = vsp1_entity_get_pad_format(&rpf->entity,
+ rpf->entity.config,
+ RWPF_PAD_SINK);
+ source_format = vsp1_entity_get_pad_format(&rpf->entity,
+ rpf->entity.config,
+ RWPF_PAD_SOURCE);
+
+ infmt = VI6_RPF_INFMT_CIPM
+ | (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT);
+
+ if (fmtinfo->swap_yc)
+ infmt |= VI6_RPF_INFMT_SPYCS;
+ if (fmtinfo->swap_uv)
+ infmt |= VI6_RPF_INFMT_SPUVS;
+
+ if (sink_format->code != source_format->code)
+ infmt |= VI6_RPF_INFMT_CSC;
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_INFMT, infmt);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_DSWAP, fmtinfo->swap);
+
+ /* Output location */
+ if (pipe->brx) {
+ const struct v4l2_rect *compose;
+
+ compose = vsp1_entity_get_pad_selection(pipe->brx,
+ pipe->brx->config,
+ rpf->brx_input,
+ V4L2_SEL_TGT_COMPOSE);
+ left = compose->left;
+ top = compose->top;
+ }
+
+ if (pipe->interlaced)
+ top /= 2;
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_LOC,
+ (left << VI6_RPF_LOC_HCOORD_SHIFT) |
+ (top << VI6_RPF_LOC_VCOORD_SHIFT));
+
+ /*
+ * On Gen2 use the alpha channel (extended to 8 bits) when available or
+ * a fixed alpha value set through the V4L2_CID_ALPHA_COMPONENT control
+ * otherwise.
+ *
+ * The Gen3 RPF has extended alpha capability and can both multiply the
+ * alpha channel by a fixed global alpha value, and multiply the pixel
+ * components to convert the input to premultiplied alpha.
+ *
+ * As alpha premultiplication is available in the BRx for both Gen2 and
+ * Gen3 we handle it there and use the Gen3 alpha multiplier for global
+ * alpha multiplication only. This however prevents conversion to
+ * premultiplied alpha if no BRx is present in the pipeline. If that use
+ * case turns out to be useful we will revisit the implementation (for
+ * Gen3 only).
+ *
+ * We enable alpha multiplication on Gen3 using the fixed alpha value
+ * set through the V4L2_CID_ALPHA_COMPONENT control when the input
+ * contains an alpha channel. On Gen2 the global alpha is ignored in
+ * that case.
+ *
+ * In all cases, disable color keying.
+ */
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_AEXT_EXT |
+ (fmtinfo->alpha ? VI6_RPF_ALPH_SEL_ASEL_PACKED
+ : VI6_RPF_ALPH_SEL_ASEL_FIXED));
+
+ if (entity->vsp1->info->gen == 3) {
+ u32 mult;
+
+ if (fmtinfo->alpha) {
+ /*
+ * When the input contains an alpha channel enable the
+ * alpha multiplier. If the input is premultiplied we
+ * need to multiply both the alpha channel and the pixel
+ * components by the global alpha value to keep them
+ * premultiplied. Otherwise multiply the alpha channel
+ * only.
+ */
+ bool premultiplied = format->flags
+ & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA;
+
+ mult = VI6_RPF_MULT_ALPHA_A_MMD_RATIO
+ | (premultiplied ?
+ VI6_RPF_MULT_ALPHA_P_MMD_RATIO :
+ VI6_RPF_MULT_ALPHA_P_MMD_NONE);
+ } else {
+ /*
+ * When the input doesn't contain an alpha channel the
+ * global alpha value is applied in the unpacking unit,
+ * the alpha multiplier isn't needed and must be
+ * disabled.
+ */
+ mult = VI6_RPF_MULT_ALPHA_A_MMD_NONE
+ | VI6_RPF_MULT_ALPHA_P_MMD_NONE;
+ }
+
+ rpf->mult_alpha = mult;
+ }
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_MSK_CTRL, 0);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_CKEY_CTRL, 0);
+
+}
+
+static void vsp1_rpf_configure_autofld(struct vsp1_rwpf *rpf,
+ struct vsp1_dl_list *dl)
+{
+ const struct v4l2_pix_format_mplane *format = &rpf->format;
+ struct vsp1_dl_ext_cmd *cmd;
+ struct vsp1_extcmd_auto_fld_body *auto_fld;
+ u32 offset_y, offset_c;
+
+ cmd = vsp1_dl_get_pre_cmd(dl);
+ if (WARN_ONCE(!cmd, "Failed to obtain an autofld cmd"))
+ return;
+
+ /* Re-index our auto_fld to match the current RPF. */
+ auto_fld = cmd->data;
+ auto_fld = &auto_fld[rpf->entity.index];
+
+ auto_fld->top_y0 = rpf->mem.addr[0];
+ auto_fld->top_c0 = rpf->mem.addr[1];
+ auto_fld->top_c1 = rpf->mem.addr[2];
+
+ offset_y = format->plane_fmt[0].bytesperline;
+ offset_c = format->plane_fmt[1].bytesperline;
+
+ auto_fld->bottom_y0 = rpf->mem.addr[0] + offset_y;
+ auto_fld->bottom_c0 = rpf->mem.addr[1] + offset_c;
+ auto_fld->bottom_c1 = rpf->mem.addr[2] + offset_c;
+
+ cmd->flags |= VI6_DL_EXT_AUTOFLD_INT | BIT(16 + rpf->entity.index);
+}
+
+static void rpf_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_VRTCOL_SET,
+ rpf->alpha << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_MULT_ALPHA, rpf->mult_alpha |
+ (rpf->alpha << VI6_RPF_MULT_ALPHA_RATIO_SHIFT));
+
+ vsp1_pipeline_propagate_alpha(pipe, dlb, rpf->alpha);
+}
+
+static void rpf_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
+ struct vsp1_rwpf_memory mem = rpf->mem;
+ struct vsp1_device *vsp1 = rpf->entity.vsp1;
+ const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
+ const struct v4l2_pix_format_mplane *format = &rpf->format;
+ struct v4l2_rect crop;
+
+ /*
+ * Source size and crop offsets.
+ *
+ * The crop offsets correspond to the location of the crop
+ * rectangle top left corner in the plane buffer. Only two
+ * offsets are needed, as planes 2 and 3 always have identical
+ * strides.
+ */
+ crop = *vsp1_rwpf_get_crop(rpf, rpf->entity.config);
+
+ /*
+ * Partition Algorithm Control
+ *
+ * The partition algorithm can split this frame into multiple
+ * slices. We must scale our partition window based on the pipe
+ * configuration to match the destination partition window.
+ * To achieve this, we adjust our crop to provide a 'sub-crop'
+ * matching the expected partition window. Only 'left' and
+ * 'width' need to be adjusted.
+ */
+ if (pipe->partitions > 1) {
+ crop.width = pipe->partition->rpf.width;
+ crop.left += pipe->partition->rpf.left;
+ }
+
+ if (pipe->interlaced) {
+ crop.height = round_down(crop.height / 2, fmtinfo->vsub);
+ crop.top = round_down(crop.top / 2, fmtinfo->vsub);
+ }
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRC_BSIZE,
+ (crop.width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
+ (crop.height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRC_ESIZE,
+ (crop.width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
+ (crop.height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+
+ mem.addr[0] += crop.top * format->plane_fmt[0].bytesperline
+ + crop.left * fmtinfo->bpp[0] / 8;
+
+ if (format->num_planes > 1) {
+ unsigned int bpl = format->plane_fmt[1].bytesperline;
+ unsigned int offset;
+
+ offset = crop.top / fmtinfo->vsub * bpl
+ + crop.left / fmtinfo->hsub * fmtinfo->bpp[1] / 8;
+ mem.addr[1] += offset;
+ mem.addr[2] += offset;
+ }
+
+ /*
+ * On Gen3 hardware the SPUVS bit has no effect on 3-planar
+ * formats. Swap the U and V planes manually in that case.
+ */
+ if (vsp1->info->gen == 3 && format->num_planes == 3 &&
+ fmtinfo->swap_uv)
+ swap(mem.addr[1], mem.addr[2]);
+
+ /*
+ * Interlaced pipelines will use the extended pre-cmd to process
+ * SRCM_ADDR_{Y,C0,C1}
+ */
+ if (pipe->interlaced) {
+ vsp1_rpf_configure_autofld(rpf, dl);
+ } else {
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_Y, mem.addr[0]);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C0, mem.addr[1]);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C1, mem.addr[2]);
+ }
+}
+
+static void rpf_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int partition_idx,
+ struct vsp1_partition_window *window)
+{
+ partition->rpf = *window;
+}
+
+static const struct vsp1_entity_operations rpf_entity_ops = {
+ .configure_stream = rpf_configure_stream,
+ .configure_frame = rpf_configure_frame,
+ .configure_partition = rpf_configure_partition,
+ .partition = rpf_partition,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_rwpf *rpf;
+ char name[6];
+ int ret;
+
+ rpf = devm_kzalloc(vsp1->dev, sizeof(*rpf), GFP_KERNEL);
+ if (rpf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ rpf->max_width = RPF_MAX_WIDTH;
+ rpf->max_height = RPF_MAX_HEIGHT;
+
+ rpf->entity.ops = &rpf_entity_ops;
+ rpf->entity.type = VSP1_ENTITY_RPF;
+ rpf->entity.index = index;
+
+ sprintf(name, "rpf.%u", index);
+ ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the control handler. */
+ ret = vsp1_rwpf_init_ctrls(rpf, 0);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "rpf%u: failed to initialize controls\n",
+ index);
+ goto error;
+ }
+
+ v4l2_ctrl_handler_setup(&rpf->ctrls);
+
+ return rpf;
+
+error:
+ vsp1_entity_destroy(&rpf->entity);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
new file mode 100644
index 000000000..049bdd958
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_rwpf.c -- R-Car VSP1 Read and Write Pixel Formatters
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define RWPF_MIN_WIDTH 1
+#define RWPF_MIN_HEIGHT 1
+
+struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+ struct v4l2_subdev_pad_config *config)
+{
+ return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, config,
+ RWPF_PAD_SINK);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ };
+
+ if (code->index >= ARRAY_SIZE(codes))
+ return -EINVAL;
+
+ code->code = codes[code->index];
+
+ return 0;
+}
+
+static int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, RWPF_MIN_WIDTH,
+ RWPF_MIN_HEIGHT, rwpf->max_width,
+ rwpf->max_height);
+}
+
+static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&rwpf->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
+
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config, fmt->pad);
+
+ if (fmt->pad == RWPF_PAD_SOURCE) {
+ /*
+ * The RWPF performs format conversion but can't scale, only the
+ * format code can be changed on the source pad.
+ */
+ format->code = fmt->format.code;
+ fmt->format = *format;
+ goto done;
+ }
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ RWPF_MIN_WIDTH, rwpf->max_width);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ RWPF_MIN_HEIGHT, rwpf->max_height);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ if (rwpf->entity.type == VSP1_ENTITY_RPF) {
+ struct v4l2_rect *crop;
+
+ /* Update the sink crop rectangle. */
+ crop = vsp1_rwpf_get_crop(rwpf, config);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+ }
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SOURCE);
+ *format = fmt->format;
+
+ if (rwpf->flip.rotate) {
+ format->width = fmt->format.height;
+ format->height = fmt->format.width;
+ }
+
+done:
+ mutex_unlock(&rwpf->entity.lock);
+ return ret;
+}
+
+static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ /*
+ * Cropping is only supported on the RPF and is implemented on the sink
+ * pad.
+ */
+ if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&rwpf->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *vsp1_rwpf_get_crop(rwpf, config);
+ break;
+
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SINK);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ mutex_unlock(&rwpf->entity.lock);
+ return ret;
+}
+
+static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ /*
+ * Cropping is only supported on the RPF and is implemented on the sink
+ * pad.
+ */
+ if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ mutex_lock(&rwpf->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Make sure the crop rectangle is entirely contained in the image. */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SINK);
+
+ /*
+ * Restrict the crop rectangle coordinates to multiples of 2 to avoid
+ * shifting the color plane.
+ */
+ if (format->code == MEDIA_BUS_FMT_AYUV8_1X32) {
+ sel->r.left = ALIGN(sel->r.left, 2);
+ sel->r.top = ALIGN(sel->r.top, 2);
+ sel->r.width = round_down(sel->r.width, 2);
+ sel->r.height = round_down(sel->r.height, 2);
+ }
+
+ sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
+ sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
+ sel->r.width = min_t(unsigned int, sel->r.width,
+ format->width - sel->r.left);
+ sel->r.height = min_t(unsigned int, sel->r.height,
+ format->height - sel->r.top);
+
+ crop = vsp1_rwpf_get_crop(rwpf, config);
+ *crop = sel->r;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SOURCE);
+ format->width = crop->width;
+ format->height = crop->height;
+
+done:
+ mutex_unlock(&rwpf->entity.lock);
+ return ret;
+}
+
+const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = vsp1_rwpf_enum_mbus_code,
+ .enum_frame_size = vsp1_rwpf_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = vsp1_rwpf_set_format,
+ .get_selection = vsp1_rwpf_get_selection,
+ .set_selection = vsp1_rwpf_set_selection,
+};
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static int vsp1_rwpf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_rwpf *rwpf =
+ container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ rwpf->alpha = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vsp1_rwpf_ctrl_ops = {
+ .s_ctrl = vsp1_rwpf_s_ctrl,
+};
+
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols)
+{
+ v4l2_ctrl_handler_init(&rwpf->ctrls, ncontrols + 1);
+ v4l2_ctrl_new_std(&rwpf->ctrls, &vsp1_rwpf_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ rwpf->entity.subdev.ctrl_handler = &rwpf->ctrls;
+
+ return rwpf->ctrls.error;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
new file mode 100644
index 000000000..70742ecf7
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_rwpf.h -- R-Car VSP1 Read and Write Pixel Formatters
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_RWPF_H__
+#define __VSP1_RWPF_H__
+
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_entity.h"
+
+#define RWPF_PAD_SINK 0
+#define RWPF_PAD_SOURCE 1
+
+struct v4l2_ctrl;
+struct vsp1_dl_manager;
+struct vsp1_rwpf;
+struct vsp1_video;
+
+struct vsp1_rwpf_memory {
+ dma_addr_t addr[3];
+};
+
+struct vsp1_rwpf {
+ struct vsp1_entity entity;
+ struct v4l2_ctrl_handler ctrls;
+
+ struct vsp1_video *video;
+
+ unsigned int max_width;
+ unsigned int max_height;
+
+ struct v4l2_pix_format_mplane format;
+ const struct vsp1_format_info *fmtinfo;
+ unsigned int brx_input;
+
+ unsigned int alpha;
+
+ u32 mult_alpha;
+ u32 outfmt;
+
+ struct {
+ spinlock_t lock;
+ struct {
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *rotate;
+ } ctrls;
+ unsigned int pending;
+ unsigned int active;
+ bool rotate;
+ } flip;
+
+ struct vsp1_rwpf_memory mem;
+
+ struct vsp1_dl_manager *dlm;
+};
+
+static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_rwpf, entity.subdev);
+}
+
+static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
+{
+ return container_of(entity, struct vsp1_rwpf, entity);
+}
+
+struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
+struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
+
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
+
+extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
+
+struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+ struct v4l2_subdev_pad_config *config);
+
+#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
new file mode 100644
index 000000000..04e4e05af
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_sru.c -- R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_sru.h"
+
+#define SRU_MIN_SIZE 4U
+#define SRU_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_sru_write(struct vsp1_sru *sru,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_SRU_INTENSITY (V4L2_CID_USER_BASE | 0x1001)
+
+struct vsp1_sru_param {
+ u32 ctrl0;
+ u32 ctrl2;
+};
+
+#define VI6_SRU_CTRL0_PARAMS(p0, p1) \
+ (((p0) << VI6_SRU_CTRL0_PARAM0_SHIFT) | \
+ ((p1) << VI6_SRU_CTRL0_PARAM1_SHIFT))
+
+#define VI6_SRU_CTRL2_PARAMS(p6, p7, p8) \
+ (((p6) << VI6_SRU_CTRL2_PARAM6_SHIFT) | \
+ ((p7) << VI6_SRU_CTRL2_PARAM7_SHIFT) | \
+ ((p8) << VI6_SRU_CTRL2_PARAM8_SHIFT))
+
+static const struct vsp1_sru_param vsp1_sru_params[] = {
+ {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(24, 40, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(8, 16, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(36, 60, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(12, 27, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(48, 80, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(16, 36, 255),
+ },
+};
+
+static int sru_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_sru *sru =
+ container_of(ctrl->handler, struct vsp1_sru, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_SRU_INTENSITY:
+ sru->intensity = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops sru_ctrl_ops = {
+ .s_ctrl = sru_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config sru_intensity_control = {
+ .ops = &sru_ctrl_ops,
+ .id = V4L2_CID_VSP1_SRU_INTENSITY,
+ .name = "Intensity",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 6,
+ .def = 1,
+ .step = 1,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ };
+
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ ARRAY_SIZE(codes));
+}
+
+static int sru_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ config = vsp1_entity_get_pad_config(&sru->entity, cfg, fse->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(&sru->entity, config, SRU_PAD_SINK);
+
+ mutex_lock(&sru->entity.lock);
+
+ if (fse->index || fse->code != format->code) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (fse->pad == SRU_PAD_SINK) {
+ fse->min_width = SRU_MIN_SIZE;
+ fse->max_width = SRU_MAX_SIZE;
+ fse->min_height = SRU_MIN_SIZE;
+ fse->max_height = SRU_MAX_SIZE;
+ } else {
+ fse->min_width = format->width;
+ fse->min_height = format->height;
+ if (format->width <= SRU_MAX_SIZE / 2 &&
+ format->height <= SRU_MAX_SIZE / 2) {
+ fse->max_width = format->width * 2;
+ fse->max_height = format->height * 2;
+ } else {
+ fse->max_width = format->width;
+ fse->max_height = format->height;
+ }
+ }
+
+done:
+ mutex_unlock(&sru->entity.lock);
+ return ret;
+}
+
+static void sru_try_format(struct vsp1_sru *sru,
+ struct v4l2_subdev_pad_config *config,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int input_area;
+ unsigned int output_area;
+
+ switch (pad) {
+ case SRU_PAD_SINK:
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+
+ fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE);
+ fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE);
+ break;
+
+ case SRU_PAD_SOURCE:
+ /* The SRU can't perform format conversion. */
+ format = vsp1_entity_get_pad_format(&sru->entity, config,
+ SRU_PAD_SINK);
+ fmt->code = format->code;
+
+ /*
+ * We can upscale by 2 in both direction, but not independently.
+ * Compare the input and output rectangles areas (avoiding
+ * integer overflows on the output): if the requested output
+ * area is larger than 1.5^2 the input area upscale by two,
+ * otherwise don't scale.
+ */
+ input_area = format->width * format->height;
+ output_area = min(fmt->width, SRU_MAX_SIZE)
+ * min(fmt->height, SRU_MAX_SIZE);
+
+ if (fmt->width <= SRU_MAX_SIZE / 2 &&
+ fmt->height <= SRU_MAX_SIZE / 2 &&
+ output_area > input_area * 9 / 4) {
+ fmt->width = format->width * 2;
+ fmt->height = format->height * 2;
+ } else {
+ fmt->width = format->width;
+ fmt->height = format->height;
+ }
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int sru_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&sru->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&sru->entity, cfg, fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ sru_try_format(sru, config, fmt->pad, &fmt->format);
+
+ format = vsp1_entity_get_pad_format(&sru->entity, config, fmt->pad);
+ *format = fmt->format;
+
+ if (fmt->pad == SRU_PAD_SINK) {
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&sru->entity, config,
+ SRU_PAD_SOURCE);
+ *format = fmt->format;
+
+ sru_try_format(sru, config, SRU_PAD_SOURCE, format);
+ }
+
+done:
+ mutex_unlock(&sru->entity.lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops sru_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = sru_enum_mbus_code,
+ .enum_frame_size = sru_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = sru_set_format,
+};
+
+static const struct v4l2_subdev_ops sru_ops = {
+ .pad = &sru_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void sru_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ const struct vsp1_sru_param *param;
+ struct vsp1_sru *sru = to_sru(&entity->subdev);
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+ u32 ctrl0;
+
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SOURCE);
+
+ if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32)
+ ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
+ | VI6_SRU_CTRL0_PARAM4;
+ else
+ ctrl0 = VI6_SRU_CTRL0_PARAM3;
+
+ if (input->width != output->width)
+ ctrl0 |= VI6_SRU_CTRL0_MODE_UPSCALE;
+
+ param = &vsp1_sru_params[sru->intensity - 1];
+
+ ctrl0 |= param->ctrl0;
+
+ vsp1_sru_write(sru, dlb, VI6_SRU_CTRL0, ctrl0);
+ vsp1_sru_write(sru, dlb, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+ vsp1_sru_write(sru, dlb, VI6_SRU_CTRL2, param->ctrl2);
+}
+
+static unsigned int sru_max_width(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_sru *sru = to_sru(&entity->subdev);
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SOURCE);
+
+ if (input->width != output->width)
+ return 512;
+ else
+ return 256;
+}
+
+static void sru_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int partition_idx,
+ struct vsp1_partition_window *window)
+{
+ struct vsp1_sru *sru = to_sru(&entity->subdev);
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SOURCE);
+
+ /* Adapt if SRUx2 is enabled */
+ if (input->width != output->width) {
+ window->width /= 2;
+ window->left /= 2;
+ }
+
+ partition->sru = *window;
+}
+
+static const struct vsp1_entity_operations sru_entity_ops = {
+ .configure_stream = sru_configure_stream,
+ .max_width = sru_max_width,
+ .partition = sru_partition,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_sru *sru;
+ int ret;
+
+ sru = devm_kzalloc(vsp1->dev, sizeof(*sru), GFP_KERNEL);
+ if (sru == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ sru->entity.ops = &sru_entity_ops;
+ sru->entity.type = VSP1_ENTITY_SRU;
+
+ ret = vsp1_entity_init(vsp1, &sru->entity, "sru", 2, &sru_ops,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&sru->ctrls, 1);
+ v4l2_ctrl_new_custom(&sru->ctrls, &sru_intensity_control, NULL);
+
+ sru->intensity = 1;
+
+ sru->entity.subdev.ctrl_handler = &sru->ctrls;
+
+ if (sru->ctrls.error) {
+ dev_err(vsp1->dev, "sru: failed to initialize controls\n");
+ ret = sru->ctrls.error;
+ vsp1_entity_destroy(&sru->entity);
+ return ERR_PTR(ret);
+ }
+
+ return sru;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_sru.h b/drivers/media/platform/vsp1/vsp1_sru.h
new file mode 100644
index 000000000..ddb00eadd
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_sru.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_sru.h -- R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_SRU_H__
+#define __VSP1_SRU_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define SRU_PAD_SINK 0
+#define SRU_PAD_SOURCE 1
+
+struct vsp1_sru {
+ struct vsp1_entity entity;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ unsigned int intensity;
+};
+
+static inline struct vsp1_sru *to_sru(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_sru, entity.subdev);
+}
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_SRU_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
new file mode 100644
index 000000000..c20c84b54
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_uds.c -- R-Car VSP1 Up and Down Scaler
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_uds.h"
+
+#define UDS_MIN_SIZE 4U
+#define UDS_MAX_SIZE 8190U
+
+#define UDS_MIN_FACTOR 0x0100
+#define UDS_MAX_FACTOR 0xffff
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_uds_write(struct vsp1_uds *uds,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + uds->entity.index * VI6_UDS_OFFSET, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Scaling Computation
+ */
+
+void vsp1_uds_set_alpha(struct vsp1_entity *entity, struct vsp1_dl_body *dlb,
+ unsigned int alpha)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+
+ vsp1_uds_write(uds, dlb, VI6_UDS_ALPVAL,
+ alpha << VI6_UDS_ALPVAL_VAL0_SHIFT);
+}
+
+/*
+ * uds_output_size - Return the output size for an input size and scaling ratio
+ * @input: input size in pixels
+ * @ratio: scaling ratio in U4.12 fixed-point format
+ */
+static unsigned int uds_output_size(unsigned int input, unsigned int ratio)
+{
+ if (ratio > 4096) {
+ /* Down-scaling */
+ unsigned int mp;
+
+ mp = ratio / 4096;
+ mp = mp < 4 ? 1 : (mp < 8 ? 2 : 4);
+
+ return (input - 1) / mp * mp * 4096 / ratio + 1;
+ } else {
+ /* Up-scaling */
+ return (input - 1) * 4096 / ratio + 1;
+ }
+}
+
+/*
+ * uds_output_limits - Return the min and max output sizes for an input size
+ * @input: input size in pixels
+ * @minimum: minimum output size (returned)
+ * @maximum: maximum output size (returned)
+ */
+static void uds_output_limits(unsigned int input,
+ unsigned int *minimum, unsigned int *maximum)
+{
+ *minimum = max(uds_output_size(input, UDS_MAX_FACTOR), UDS_MIN_SIZE);
+ *maximum = min(uds_output_size(input, UDS_MIN_FACTOR), UDS_MAX_SIZE);
+}
+
+/*
+ * uds_passband_width - Return the passband filter width for a scaling ratio
+ * @ratio: scaling ratio in U4.12 fixed-point format
+ */
+static unsigned int uds_passband_width(unsigned int ratio)
+{
+ if (ratio >= 4096) {
+ /* Down-scaling */
+ unsigned int mp;
+
+ mp = ratio / 4096;
+ mp = mp < 4 ? 1 : (mp < 8 ? 2 : 4);
+
+ return 64 * 4096 * mp / ratio;
+ } else {
+ /* Up-scaling */
+ return 64;
+ }
+}
+
+static unsigned int uds_compute_ratio(unsigned int input, unsigned int output)
+{
+ /* TODO: This is an approximation that will need to be refined. */
+ return (input - 1) * 4096 / (output - 1);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ };
+
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ ARRAY_SIZE(codes));
+}
+
+static int uds_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vsp1_uds *uds = to_uds(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ config = vsp1_entity_get_pad_config(&uds->entity, cfg, fse->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(&uds->entity, config,
+ UDS_PAD_SINK);
+
+ mutex_lock(&uds->entity.lock);
+
+ if (fse->index || fse->code != format->code) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (fse->pad == UDS_PAD_SINK) {
+ fse->min_width = UDS_MIN_SIZE;
+ fse->max_width = UDS_MAX_SIZE;
+ fse->min_height = UDS_MIN_SIZE;
+ fse->max_height = UDS_MAX_SIZE;
+ } else {
+ uds_output_limits(format->width, &fse->min_width,
+ &fse->max_width);
+ uds_output_limits(format->height, &fse->min_height,
+ &fse->max_height);
+ }
+
+done:
+ mutex_unlock(&uds->entity.lock);
+ return ret;
+}
+
+static void uds_try_format(struct vsp1_uds *uds,
+ struct v4l2_subdev_pad_config *config,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int minimum;
+ unsigned int maximum;
+
+ switch (pad) {
+ case UDS_PAD_SINK:
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+
+ fmt->width = clamp(fmt->width, UDS_MIN_SIZE, UDS_MAX_SIZE);
+ fmt->height = clamp(fmt->height, UDS_MIN_SIZE, UDS_MAX_SIZE);
+ break;
+
+ case UDS_PAD_SOURCE:
+ /* The UDS scales but can't perform format conversion. */
+ format = vsp1_entity_get_pad_format(&uds->entity, config,
+ UDS_PAD_SINK);
+ fmt->code = format->code;
+
+ uds_output_limits(format->width, &minimum, &maximum);
+ fmt->width = clamp(fmt->width, minimum, maximum);
+ uds_output_limits(format->height, &minimum, &maximum);
+ fmt->height = clamp(fmt->height, minimum, maximum);
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int uds_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_uds *uds = to_uds(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&uds->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&uds->entity, cfg, fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ uds_try_format(uds, config, fmt->pad, &fmt->format);
+
+ format = vsp1_entity_get_pad_format(&uds->entity, config, fmt->pad);
+ *format = fmt->format;
+
+ if (fmt->pad == UDS_PAD_SINK) {
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&uds->entity, config,
+ UDS_PAD_SOURCE);
+ *format = fmt->format;
+
+ uds_try_format(uds, config, UDS_PAD_SOURCE, format);
+ }
+
+done:
+ mutex_unlock(&uds->entity.lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_pad_ops uds_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = uds_enum_mbus_code,
+ .enum_frame_size = uds_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = uds_set_format,
+};
+
+static const struct v4l2_subdev_ops uds_ops = {
+ .pad = &uds_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void uds_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ const struct v4l2_mbus_framefmt *output;
+ const struct v4l2_mbus_framefmt *input;
+ unsigned int hscale;
+ unsigned int vscale;
+ bool multitap;
+
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+
+ hscale = uds_compute_ratio(input->width, output->width);
+ vscale = uds_compute_ratio(input->height, output->height);
+
+ dev_dbg(uds->entity.vsp1->dev, "hscale %u vscale %u\n", hscale, vscale);
+
+ /*
+ * Multi-tap scaling can't be enabled along with alpha scaling when
+ * scaling down with a factor lower than or equal to 1/2 in either
+ * direction.
+ */
+ if (uds->scale_alpha && (hscale >= 8192 || vscale >= 8192))
+ multitap = false;
+ else
+ multitap = true;
+
+ vsp1_uds_write(uds, dlb, VI6_UDS_CTRL,
+ (uds->scale_alpha ? VI6_UDS_CTRL_AON : 0) |
+ (multitap ? VI6_UDS_CTRL_BC : 0));
+
+ vsp1_uds_write(uds, dlb, VI6_UDS_PASS_BWIDTH,
+ (uds_passband_width(hscale)
+ << VI6_UDS_PASS_BWIDTH_H_SHIFT) |
+ (uds_passband_width(vscale)
+ << VI6_UDS_PASS_BWIDTH_V_SHIFT));
+
+ /* Set the scaling ratios. */
+ vsp1_uds_write(uds, dlb, VI6_UDS_SCALE,
+ (hscale << VI6_UDS_SCALE_HFRAC_SHIFT) |
+ (vscale << VI6_UDS_SCALE_VFRAC_SHIFT));
+}
+
+static void uds_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ struct vsp1_partition *partition = pipe->partition;
+ const struct v4l2_mbus_framefmt *output;
+
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+
+ /* Input size clipping */
+ vsp1_uds_write(uds, dlb, VI6_UDS_HSZCLIP, VI6_UDS_HSZCLIP_HCEN |
+ (0 << VI6_UDS_HSZCLIP_HCL_OFST_SHIFT) |
+ (partition->uds_sink.width
+ << VI6_UDS_HSZCLIP_HCL_SIZE_SHIFT));
+
+ /* Output size clipping */
+ vsp1_uds_write(uds, dlb, VI6_UDS_CLIP_SIZE,
+ (partition->uds_source.width
+ << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
+ (output->height
+ << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
+}
+
+static unsigned int uds_max_width(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ const struct v4l2_mbus_framefmt *output;
+ const struct v4l2_mbus_framefmt *input;
+ unsigned int hscale;
+
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+ hscale = output->width / input->width;
+
+ if (hscale <= 2)
+ return 256;
+ else if (hscale <= 4)
+ return 512;
+ else if (hscale <= 8)
+ return 1024;
+ else
+ return 2048;
+}
+
+/* -----------------------------------------------------------------------------
+ * Partition Algorithm Support
+ */
+
+static void uds_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int partition_idx,
+ struct vsp1_partition_window *window)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ const struct v4l2_mbus_framefmt *output;
+ const struct v4l2_mbus_framefmt *input;
+
+ /* Initialise the partition state */
+ partition->uds_sink = *window;
+ partition->uds_source = *window;
+
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+
+ partition->uds_sink.width = window->width * input->width
+ / output->width;
+ partition->uds_sink.left = window->left * input->width
+ / output->width;
+
+ *window = partition->uds_sink;
+}
+
+static const struct vsp1_entity_operations uds_entity_ops = {
+ .configure_stream = uds_configure_stream,
+ .configure_partition = uds_configure_partition,
+ .max_width = uds_max_width,
+ .partition = uds_partition,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_uds *uds;
+ char name[6];
+ int ret;
+
+ uds = devm_kzalloc(vsp1->dev, sizeof(*uds), GFP_KERNEL);
+ if (uds == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ uds->entity.ops = &uds_entity_ops;
+ uds->entity.type = VSP1_ENTITY_UDS;
+ uds->entity.index = index;
+
+ sprintf(name, "uds.%u", index);
+ ret = vsp1_entity_init(vsp1, &uds->entity, name, 2, &uds_ops,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return uds;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_uds.h b/drivers/media/platform/vsp1/vsp1_uds.h
new file mode 100644
index 000000000..c34f95a66
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_uds.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_uds.h -- R-Car VSP1 Up and Down Scaler
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_UDS_H__
+#define __VSP1_UDS_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define UDS_PAD_SINK 0
+#define UDS_PAD_SOURCE 1
+
+struct vsp1_uds {
+ struct vsp1_entity entity;
+ bool scale_alpha;
+};
+
+static inline struct vsp1_uds *to_uds(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_uds, entity.subdev);
+}
+
+struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index);
+
+void vsp1_uds_set_alpha(struct vsp1_entity *uds, struct vsp1_dl_body *dlb,
+ unsigned int alpha);
+
+#endif /* __VSP1_UDS_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_uif.c b/drivers/media/platform/vsp1/vsp1_uif.c
new file mode 100644
index 000000000..4b58d51df
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_uif.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_uif.c -- R-Car VSP1 User Logic Interface
+ *
+ * Copyright (C) 2017-2018 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/sys_soc.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_entity.h"
+#include "vsp1_uif.h"
+
+#define UIF_MIN_SIZE 4U
+#define UIF_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_uif_read(struct vsp1_uif *uif, u32 reg)
+{
+ return vsp1_read(uif->entity.vsp1,
+ uif->entity.index * VI6_UIF_OFFSET + reg);
+}
+
+static inline void vsp1_uif_write(struct vsp1_uif *uif,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + uif->entity.index * VI6_UIF_OFFSET, data);
+}
+
+u32 vsp1_uif_get_crc(struct vsp1_uif *uif)
+{
+ return vsp1_uif_read(uif, VI6_UIF_DISCOM_DOCMCCRCR);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static const unsigned int uif_codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+};
+
+static int uif_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, uif_codes,
+ ARRAY_SIZE(uif_codes));
+}
+
+static int uif_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, UIF_MIN_SIZE,
+ UIF_MIN_SIZE, UIF_MAX_SIZE,
+ UIF_MAX_SIZE);
+}
+
+static int uif_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, cfg, fmt, uif_codes,
+ ARRAY_SIZE(uif_codes),
+ UIF_MIN_SIZE, UIF_MIN_SIZE,
+ UIF_MAX_SIZE, UIF_MAX_SIZE);
+}
+
+static int uif_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_uif *uif = to_uif(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ if (sel->pad != UIF_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&uif->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&uif->entity, cfg, sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ format = vsp1_entity_get_pad_format(&uif->entity, config,
+ UIF_PAD_SINK);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *vsp1_entity_get_pad_selection(&uif->entity, config,
+ sel->pad, sel->target);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ mutex_unlock(&uif->entity.lock);
+ return ret;
+}
+
+static int uif_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_uif *uif = to_uif(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *selection;
+ int ret = 0;
+
+ if (sel->pad != UIF_PAD_SINK ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ mutex_lock(&uif->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&uif->entity, cfg, sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* The crop rectangle must be inside the input frame. */
+ format = vsp1_entity_get_pad_format(&uif->entity, config, UIF_PAD_SINK);
+
+ sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
+ sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
+ sel->r.width = clamp_t(unsigned int, sel->r.width, UIF_MIN_SIZE,
+ format->width - sel->r.left);
+ sel->r.height = clamp_t(unsigned int, sel->r.height, UIF_MIN_SIZE,
+ format->height - sel->r.top);
+
+ /* Store the crop rectangle. */
+ selection = vsp1_entity_get_pad_selection(&uif->entity, config,
+ sel->pad, V4L2_SEL_TGT_CROP);
+ *selection = sel->r;
+
+done:
+ mutex_unlock(&uif->entity.lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_pad_ops uif_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = uif_enum_mbus_code,
+ .enum_frame_size = uif_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = uif_set_format,
+ .get_selection = uif_get_selection,
+ .set_selection = uif_set_selection,
+};
+
+static const struct v4l2_subdev_ops uif_ops = {
+ .pad = &uif_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void uif_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_uif *uif = to_uif(&entity->subdev);
+ const struct v4l2_rect *crop;
+ unsigned int left;
+ unsigned int width;
+
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMPMR,
+ VI6_UIF_DISCOM_DOCMPMR_SEL(9));
+
+ crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ UIF_PAD_SINK, V4L2_SEL_TGT_CROP);
+
+ left = crop->left;
+ width = crop->width;
+
+ /* On M3-W the horizontal coordinates are twice the register value. */
+ if (uif->m3w_quirk) {
+ left /= 2;
+ width /= 2;
+ }
+
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSPXR, left);
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSPYR, crop->top);
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSZXR, width);
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSZYR, crop->height);
+
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMCR,
+ VI6_UIF_DISCOM_DOCMCR_CMPR);
+}
+
+static const struct vsp1_entity_operations uif_entity_ops = {
+ .configure_stream = uif_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+static const struct soc_device_attribute vsp1_r8a7796[] = {
+ { .soc_id = "r8a7796" },
+ { /* sentinel */ }
+};
+
+struct vsp1_uif *vsp1_uif_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_uif *uif;
+ char name[6];
+ int ret;
+
+ uif = devm_kzalloc(vsp1->dev, sizeof(*uif), GFP_KERNEL);
+ if (!uif)
+ return ERR_PTR(-ENOMEM);
+
+ if (soc_device_match(vsp1_r8a7796))
+ uif->m3w_quirk = true;
+
+ uif->entity.ops = &uif_entity_ops;
+ uif->entity.type = VSP1_ENTITY_UIF;
+ uif->entity.index = index;
+
+ /* The datasheet names the two UIF instances UIF4 and UIF5. */
+ sprintf(name, "uif.%u", index + 4);
+ ret = vsp1_entity_init(vsp1, &uif->entity, name, 2, &uif_ops,
+ MEDIA_ENT_F_PROC_VIDEO_STATISTICS);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return uif;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_uif.h b/drivers/media/platform/vsp1/vsp1_uif.h
new file mode 100644
index 000000000..c71ab5f6a
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_uif.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_uif.h -- R-Car VSP1 User Logic Interface
+ *
+ * Copyright (C) 2017-2018 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_UIF_H__
+#define __VSP1_UIF_H__
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define UIF_PAD_SINK 0
+#define UIF_PAD_SOURCE 1
+
+struct vsp1_uif {
+ struct vsp1_entity entity;
+ bool m3w_quirk;
+};
+
+static inline struct vsp1_uif *to_uif(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_uif, entity.subdev);
+}
+
+struct vsp1_uif *vsp1_uif_create(struct vsp1_device *vsp1, unsigned int index);
+u32 vsp1_uif_get_crc(struct vsp1_uif *uif);
+
+#endif /* __VSP1_UIF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
new file mode 100644
index 000000000..81d47a09d
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -0,0 +1,1353 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_video.c -- R-Car VSP1 Video Node
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_dl.h"
+#include "vsp1_entity.h"
+#include "vsp1_hgo.h"
+#include "vsp1_hgt.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_uds.h"
+#include "vsp1_video.h"
+
+#define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
+#define VSP1_VIDEO_DEF_WIDTH 1024
+#define VSP1_VIDEO_DEF_HEIGHT 768
+
+#define VSP1_VIDEO_MIN_WIDTH 2U
+#define VSP1_VIDEO_MAX_WIDTH 8190U
+#define VSP1_VIDEO_MIN_HEIGHT 2U
+#define VSP1_VIDEO_MAX_HEIGHT 8190U
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static struct v4l2_subdev *
+vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(local);
+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int vsp1_video_verify_format(struct vsp1_video *video)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
+ video->rwpf->format.height != fmt.format.height ||
+ video->rwpf->format.width != fmt.format.width)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __vsp1_video_try_format(struct vsp1_video *video,
+ struct v4l2_pix_format_mplane *pix,
+ const struct vsp1_format_info **fmtinfo)
+{
+ static const u32 xrgb_formats[][2] = {
+ { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 },
+ { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 },
+ { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 },
+ { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 },
+ };
+
+ const struct vsp1_format_info *info;
+ unsigned int width = pix->width;
+ unsigned int height = pix->height;
+ unsigned int i;
+
+ /*
+ * Backward compatibility: replace deprecated RGB formats by their XRGB
+ * equivalent. This selects the format older userspace applications want
+ * while still exposing the new format.
+ */
+ for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) {
+ if (xrgb_formats[i][0] == pix->pixelformat) {
+ pix->pixelformat = xrgb_formats[i][1];
+ break;
+ }
+ }
+
+ /*
+ * Retrieve format information and select the default format if the
+ * requested format isn't supported.
+ */
+ info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
+ if (info == NULL)
+ info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
+
+ pix->pixelformat = info->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->field = V4L2_FIELD_NONE;
+
+ if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
+ info->fourcc == V4L2_PIX_FMT_HSV32)
+ pix->hsv_enc = V4L2_HSV_ENC_256;
+
+ memset(pix->reserved, 0, sizeof(pix->reserved));
+
+ /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
+ width = round_down(width, info->hsub);
+ height = round_down(height, info->vsub);
+
+ /* Clamp the width and height. */
+ pix->width = clamp(width, VSP1_VIDEO_MIN_WIDTH, VSP1_VIDEO_MAX_WIDTH);
+ pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
+ VSP1_VIDEO_MAX_HEIGHT);
+
+ /*
+ * Compute and clamp the stride and image size. While not documented in
+ * the datasheet, strides not aligned to a multiple of 128 bytes result
+ * in image corruption.
+ */
+ for (i = 0; i < min(info->planes, 2U); ++i) {
+ unsigned int hsub = i > 0 ? info->hsub : 1;
+ unsigned int vsub = i > 0 ? info->vsub : 1;
+ unsigned int align = 128;
+ unsigned int bpl;
+
+ bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
+ pix->width / hsub * info->bpp[i] / 8,
+ round_down(65535U, align));
+
+ pix->plane_fmt[i].bytesperline = round_up(bpl, align);
+ pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
+ * pix->height / vsub;
+ }
+
+ if (info->planes == 3) {
+ /* The second and third planes must have the same stride. */
+ pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
+ pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
+ }
+
+ pix->num_planes = info->planes;
+
+ if (fmtinfo)
+ *fmtinfo = info;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Partition Algorithm support
+ */
+
+/**
+ * vsp1_video_calculate_partition - Calculate the active partition output window
+ *
+ * @pipe: the pipeline
+ * @partition: partition that will hold the calculated values
+ * @div_size: pre-determined maximum partition division size
+ * @index: partition index
+ */
+static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int div_size,
+ unsigned int index)
+{
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_partition_window window;
+ unsigned int modulus;
+
+ /*
+ * Partitions are computed on the size before rotation, use the format
+ * at the WPF sink.
+ */
+ format = vsp1_entity_get_pad_format(&pipe->output->entity,
+ pipe->output->entity.config,
+ RWPF_PAD_SINK);
+
+ /* A single partition simply processes the output size in full. */
+ if (pipe->partitions <= 1) {
+ window.left = 0;
+ window.width = format->width;
+
+ vsp1_pipeline_propagate_partition(pipe, partition, index,
+ &window);
+ return;
+ }
+
+ /* Initialise the partition with sane starting conditions. */
+ window.left = index * div_size;
+ window.width = div_size;
+
+ modulus = format->width % div_size;
+
+ /*
+ * We need to prevent the last partition from being smaller than the
+ * *minimum* width of the hardware capabilities.
+ *
+ * If the modulus is less than half of the partition size,
+ * the penultimate partition is reduced to half, which is added
+ * to the final partition: |1234|1234|1234|12|341|
+ * to prevents this: |1234|1234|1234|1234|1|.
+ */
+ if (modulus) {
+ /*
+ * pipe->partitions is 1 based, whilst index is a 0 based index.
+ * Normalise this locally.
+ */
+ unsigned int partitions = pipe->partitions - 1;
+
+ if (modulus < div_size / 2) {
+ if (index == partitions - 1) {
+ /* Halve the penultimate partition. */
+ window.width = div_size / 2;
+ } else if (index == partitions) {
+ /* Increase the final partition. */
+ window.width = (div_size / 2) + modulus;
+ window.left -= div_size / 2;
+ }
+ } else if (index == partitions) {
+ window.width = modulus;
+ }
+ }
+
+ vsp1_pipeline_propagate_partition(pipe, partition, index, &window);
+}
+
+static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_entity *entity;
+ unsigned int div_size;
+ unsigned int i;
+
+ /*
+ * Partitions are computed on the size before rotation, use the format
+ * at the WPF sink.
+ */
+ format = vsp1_entity_get_pad_format(&pipe->output->entity,
+ pipe->output->entity.config,
+ RWPF_PAD_SINK);
+ div_size = format->width;
+
+ /*
+ * Only Gen3 hardware requires image partitioning, Gen2 will operate
+ * with a single partition that covers the whole output.
+ */
+ if (vsp1->info->gen == 3) {
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ unsigned int entity_max;
+
+ if (!entity->ops->max_width)
+ continue;
+
+ entity_max = entity->ops->max_width(entity, pipe);
+ if (entity_max)
+ div_size = min(div_size, entity_max);
+ }
+ }
+
+ pipe->partitions = DIV_ROUND_UP(format->width, div_size);
+ pipe->part_table = kcalloc(pipe->partitions, sizeof(*pipe->part_table),
+ GFP_KERNEL);
+ if (!pipe->part_table)
+ return -ENOMEM;
+
+ for (i = 0; i < pipe->partitions; ++i)
+ vsp1_video_calculate_partition(pipe, &pipe->part_table[i],
+ div_size, i);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Management
+ */
+
+/*
+ * vsp1_video_complete_buffer - Complete the current buffer
+ * @video: the video node
+ *
+ * This function completes the current buffer by filling its sequence number,
+ * time stamp and payload size, and hands it back to the videobuf core.
+ *
+ * When operating in DU output mode (deep pipeline to the DU through the LIF),
+ * the VSP1 needs to constantly supply frames to the display. In that case, if
+ * no other buffer is queued, reuse the one that has just been processed instead
+ * of handing it back to the videobuf core.
+ *
+ * Return the next queued buffer or NULL if the queue is empty.
+ */
+static struct vsp1_vb2_buffer *
+vsp1_video_complete_buffer(struct vsp1_video *video)
+{
+ struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
+ struct vsp1_vb2_buffer *next = NULL;
+ struct vsp1_vb2_buffer *done;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+
+ if (list_empty(&video->irqqueue)) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return NULL;
+ }
+
+ done = list_first_entry(&video->irqqueue,
+ struct vsp1_vb2_buffer, queue);
+
+ /* In DU output mode reuse the buffer if the list is singular. */
+ if (pipe->lif && list_is_singular(&video->irqqueue)) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return done;
+ }
+
+ list_del(&done->queue);
+
+ if (!list_empty(&video->irqqueue))
+ next = list_first_entry(&video->irqqueue,
+ struct vsp1_vb2_buffer, queue);
+
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ done->buf.sequence = pipe->sequence;
+ done->buf.vb2_buf.timestamp = ktime_get_ns();
+ for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
+ vb2_set_plane_payload(&done->buf.vb2_buf, i,
+ vb2_plane_size(&done->buf.vb2_buf, i));
+ vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
+
+ return next;
+}
+
+static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
+ struct vsp1_rwpf *rwpf)
+{
+ struct vsp1_video *video = rwpf->video;
+ struct vsp1_vb2_buffer *buf;
+
+ buf = vsp1_video_complete_buffer(video);
+ if (buf == NULL)
+ return;
+
+ video->rwpf->mem = buf->mem;
+ pipe->buffers_ready |= 1 << video->pipe_index;
+}
+
+static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ unsigned int partition)
+{
+ struct vsp1_dl_body *dlb = vsp1_dl_list_get_body0(dl);
+ struct vsp1_entity *entity;
+
+ pipe->partition = &pipe->part_table[partition];
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe)
+ vsp1_entity_configure_partition(entity, pipe, dl, dlb);
+}
+
+static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ struct vsp1_entity *entity;
+ struct vsp1_dl_body *dlb;
+ struct vsp1_dl_list *dl;
+ unsigned int partition;
+
+ dl = vsp1_dl_list_get(pipe->output->dlm);
+
+ /*
+ * If the VSP hardware isn't configured yet (which occurs either when
+ * processing the first frame or after a system suspend/resume), add the
+ * cached stream configuration to the display list to perform a full
+ * initialisation.
+ */
+ if (!pipe->configured)
+ vsp1_dl_list_add_body(dl, pipe->stream_config);
+
+ dlb = vsp1_dl_list_get_body0(dl);
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe)
+ vsp1_entity_configure_frame(entity, pipe, dl, dlb);
+
+ /* Run the first partition. */
+ vsp1_video_pipeline_run_partition(pipe, dl, 0);
+
+ /* Process consecutive partitions as necessary. */
+ for (partition = 1; partition < pipe->partitions; ++partition) {
+ struct vsp1_dl_list *dl_next;
+
+ dl_next = vsp1_dl_list_get(pipe->output->dlm);
+
+ /*
+ * An incomplete chain will still function, but output only
+ * the partitions that had a dl available. The frame end
+ * interrupt will be marked on the last dl in the chain.
+ */
+ if (!dl_next) {
+ dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
+ break;
+ }
+
+ vsp1_video_pipeline_run_partition(pipe, dl_next, partition);
+ vsp1_dl_list_add_chain(dl, dl_next);
+ }
+
+ /* Complete, and commit the head display list. */
+ vsp1_dl_list_commit(dl, false);
+ pipe->configured = true;
+
+ vsp1_pipeline_run(pipe);
+}
+
+static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe,
+ unsigned int completion)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ enum vsp1_pipeline_state state;
+ unsigned long flags;
+ unsigned int i;
+
+ /* M2M Pipelines should never call here with an incomplete frame. */
+ WARN_ON_ONCE(!(completion & VSP1_DL_FRAME_END_COMPLETED));
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ /* Complete buffers on all video nodes. */
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ if (!pipe->inputs[i])
+ continue;
+
+ vsp1_video_frame_end(pipe, pipe->inputs[i]);
+ }
+
+ vsp1_video_frame_end(pipe, pipe->output);
+
+ state = pipe->state;
+ pipe->state = VSP1_PIPELINE_STOPPED;
+
+ /*
+ * If a stop has been requested, mark the pipeline as stopped and
+ * return. Otherwise restart the pipeline if ready.
+ */
+ if (state == VSP1_PIPELINE_STOPPING)
+ wake_up(&pipe->wq);
+ else if (vsp1_pipeline_ready(pipe))
+ vsp1_video_pipeline_run(pipe);
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
+ struct vsp1_rwpf *input,
+ struct vsp1_rwpf *output)
+{
+ struct media_entity_enum ent_enum;
+ struct vsp1_entity *entity;
+ struct media_pad *pad;
+ struct vsp1_brx *brx = NULL;
+ int ret;
+
+ ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The main data path doesn't include the HGO or HGT, use
+ * vsp1_entity_remote_pad() to traverse the graph.
+ */
+
+ pad = vsp1_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
+
+ while (1) {
+ if (pad == NULL) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /* We've reached a video node, that shouldn't have happened. */
+ if (!is_media_entity_v4l2_subdev(pad->entity)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ entity = to_vsp1_entity(
+ media_entity_to_v4l2_subdev(pad->entity));
+
+ /*
+ * A BRU or BRS is present in the pipeline, store its input pad
+ * number in the input RPF for use when configuring the RPF.
+ */
+ if (entity->type == VSP1_ENTITY_BRU ||
+ entity->type == VSP1_ENTITY_BRS) {
+ /* BRU and BRS can't be chained. */
+ if (brx) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ brx = to_brx(&entity->subdev);
+ brx->inputs[pad->index].rpf = input;
+ input->brx_input = pad->index;
+ }
+
+ /* We've reached the WPF, we're done. */
+ if (entity->type == VSP1_ENTITY_WPF)
+ break;
+
+ /* Ensure the branch has no loop. */
+ if (media_entity_enum_test_and_set(&ent_enum,
+ &entity->subdev.entity)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /* UDS can't be chained. */
+ if (entity->type == VSP1_ENTITY_UDS) {
+ if (pipe->uds) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ pipe->uds = entity;
+ pipe->uds_input = brx ? &brx->entity : &input->entity;
+ }
+
+ /* Follow the source link, ignoring any HGO or HGT. */
+ pad = &entity->pads[entity->source_pad];
+ pad = vsp1_entity_remote_pad(pad);
+ }
+
+ /* The last entity must be the output WPF. */
+ if (entity != &output->entity)
+ ret = -EPIPE;
+
+out:
+ media_entity_enum_cleanup(&ent_enum);
+
+ return ret;
+}
+
+static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &video->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ unsigned int i;
+ int ret;
+
+ /* Walk the graph to locate the entities and video nodes. */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret)
+ return ret;
+
+ media_graph_walk_start(&graph, entity);
+
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct v4l2_subdev *subdev;
+ struct vsp1_rwpf *rwpf;
+ struct vsp1_entity *e;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ subdev = media_entity_to_v4l2_subdev(entity);
+ e = to_vsp1_entity(subdev);
+ list_add_tail(&e->list_pipe, &pipe->entities);
+ e->pipe = pipe;
+
+ switch (e->type) {
+ case VSP1_ENTITY_RPF:
+ rwpf = to_rwpf(subdev);
+ pipe->inputs[rwpf->entity.index] = rwpf;
+ rwpf->video->pipe_index = ++pipe->num_inputs;
+ break;
+
+ case VSP1_ENTITY_WPF:
+ rwpf = to_rwpf(subdev);
+ pipe->output = rwpf;
+ rwpf->video->pipe_index = 0;
+ break;
+
+ case VSP1_ENTITY_LIF:
+ pipe->lif = e;
+ break;
+
+ case VSP1_ENTITY_BRU:
+ case VSP1_ENTITY_BRS:
+ pipe->brx = e;
+ break;
+
+ case VSP1_ENTITY_HGO:
+ pipe->hgo = e;
+ break;
+
+ case VSP1_ENTITY_HGT:
+ pipe->hgt = e;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ media_graph_walk_cleanup(&graph);
+
+ /* We need one output and at least one input. */
+ if (pipe->num_inputs == 0 || !pipe->output)
+ return -EPIPE;
+
+ /*
+ * Follow links downstream for each input and make sure the graph
+ * contains no loop and that all branches end at the output WPF.
+ */
+ for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
+ if (!pipe->inputs[i])
+ continue;
+
+ ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
+ pipe->output);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
+{
+ vsp1_pipeline_init(pipe);
+
+ pipe->frame_end = vsp1_video_pipeline_frame_end;
+
+ return vsp1_video_pipeline_build(pipe, video);
+}
+
+static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
+{
+ struct vsp1_pipeline *pipe;
+ int ret;
+
+ /*
+ * Get a pipeline object for the video node. If a pipeline has already
+ * been allocated just increment its reference count and return it.
+ * Otherwise allocate a new pipeline and initialize it, it will be freed
+ * when the last reference is released.
+ */
+ if (!video->rwpf->entity.pipe) {
+ pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+ if (!pipe)
+ return ERR_PTR(-ENOMEM);
+
+ ret = vsp1_video_pipeline_init(pipe, video);
+ if (ret < 0) {
+ vsp1_pipeline_reset(pipe);
+ kfree(pipe);
+ return ERR_PTR(ret);
+ }
+ } else {
+ pipe = video->rwpf->entity.pipe;
+ kref_get(&pipe->kref);
+ }
+
+ return pipe;
+}
+
+static void vsp1_video_pipeline_release(struct kref *kref)
+{
+ struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
+
+ vsp1_pipeline_reset(pipe);
+ kfree(pipe);
+}
+
+static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
+{
+ struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
+
+ mutex_lock(&mdev->graph_mutex);
+ kref_put(&pipe->kref, vsp1_video_pipeline_release);
+ mutex_unlock(&mdev->graph_mutex);
+}
+
+/* -----------------------------------------------------------------------------
+ * videobuf2 Queue Operations
+ */
+
+static int
+vsp1_video_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vq);
+ const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
+ unsigned int i;
+
+ if (*nplanes) {
+ if (*nplanes != format->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++)
+ if (sizes[i] < format->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *nplanes = format->num_planes;
+
+ for (i = 0; i < format->num_planes; ++i)
+ sizes[i] = format->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
+ const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
+ unsigned int i;
+
+ if (vb->num_planes < format->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < vb->num_planes; ++i) {
+ buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+
+ for ( ; i < 3; ++i)
+ buf->mem.addr[i] = 0;
+
+ return 0;
+}
+
+static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
+ struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
+ unsigned long flags;
+ bool empty;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+ empty = list_empty(&video->irqqueue);
+ list_add_tail(&buf->queue, &video->irqqueue);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ if (!empty)
+ return;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ video->rwpf->mem = buf->mem;
+ pipe->buffers_ready |= 1 << video->pipe_index;
+
+ if (vb2_is_streaming(&video->queue) &&
+ vsp1_pipeline_ready(pipe))
+ vsp1_video_pipeline_run(pipe);
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_entity *entity;
+ int ret;
+
+ /* Determine this pipelines sizes for image partitioning support. */
+ ret = vsp1_video_pipeline_setup_partitions(pipe);
+ if (ret < 0)
+ return ret;
+
+ if (pipe->uds) {
+ struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
+
+ /*
+ * If a BRU or BRS is present in the pipeline before the UDS,
+ * the alpha component doesn't need to be scaled as the BRU and
+ * BRS output alpha value is fixed to 255. Otherwise we need to
+ * scale the alpha component only when available at the input
+ * RPF.
+ */
+ if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
+ pipe->uds_input->type == VSP1_ENTITY_BRS) {
+ uds->scale_alpha = false;
+ } else {
+ struct vsp1_rwpf *rpf =
+ to_rwpf(&pipe->uds_input->subdev);
+
+ uds->scale_alpha = rpf->fmtinfo->alpha;
+ }
+ }
+
+ /*
+ * Compute and cache the stream configuration into a body. The cached
+ * body will be added to the display list by vsp1_video_pipeline_run()
+ * whenever the pipeline needs to be fully reconfigured.
+ */
+ pipe->stream_config = vsp1_dlm_dl_body_get(pipe->output->dlm);
+ if (!pipe->stream_config)
+ return -ENOMEM;
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ vsp1_entity_route_setup(entity, pipe, pipe->stream_config);
+ vsp1_entity_configure_stream(entity, pipe, pipe->stream_config);
+ }
+
+ return 0;
+}
+
+static void vsp1_video_release_buffers(struct vsp1_video *video)
+{
+ struct vsp1_vb2_buffer *buffer;
+ unsigned long flags;
+
+ /* Remove all buffers from the IRQ queue. */
+ spin_lock_irqsave(&video->irqlock, flags);
+ list_for_each_entry(buffer, &video->irqqueue, queue)
+ vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&video->irqqueue);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+}
+
+static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
+{
+ lockdep_assert_held(&pipe->lock);
+
+ /* Release any cached configuration from our output video. */
+ vsp1_dl_body_put(pipe->stream_config);
+ pipe->stream_config = NULL;
+ pipe->configured = false;
+
+ /* Release our partition table allocation */
+ kfree(pipe->part_table);
+ pipe->part_table = NULL;
+}
+
+static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vq);
+ struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
+ bool start_pipeline = false;
+ unsigned long flags;
+ int ret;
+
+ mutex_lock(&pipe->lock);
+ if (pipe->stream_count == pipe->num_inputs) {
+ ret = vsp1_video_setup_pipeline(pipe);
+ if (ret < 0) {
+ vsp1_video_release_buffers(video);
+ vsp1_video_cleanup_pipeline(pipe);
+ mutex_unlock(&pipe->lock);
+ return ret;
+ }
+
+ start_pipeline = true;
+ }
+
+ pipe->stream_count++;
+ mutex_unlock(&pipe->lock);
+
+ /*
+ * vsp1_pipeline_ready() is not sufficient to establish that all streams
+ * are prepared and the pipeline is configured, as multiple streams
+ * can race through streamon with buffers already queued; Therefore we
+ * don't even attempt to start the pipeline until the last stream has
+ * called through here.
+ */
+ if (!start_pipeline)
+ return 0;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (vsp1_pipeline_ready(pipe))
+ vsp1_video_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ return 0;
+}
+
+static void vsp1_video_stop_streaming(struct vb2_queue *vq)
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vq);
+ struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Clear the buffers ready flag to make sure the device won't be started
+ * by a QBUF on the video node on the other side of the pipeline.
+ */
+ spin_lock_irqsave(&video->irqlock, flags);
+ pipe->buffers_ready &= ~(1 << video->pipe_index);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ mutex_lock(&pipe->lock);
+ if (--pipe->stream_count == pipe->num_inputs) {
+ /* Stop the pipeline. */
+ ret = vsp1_pipeline_stop(pipe);
+ if (ret == -ETIMEDOUT)
+ dev_err(video->vsp1->dev, "pipeline stop timeout\n");
+
+ vsp1_video_cleanup_pipeline(pipe);
+ }
+ mutex_unlock(&pipe->lock);
+
+ media_pipeline_stop(&video->video.entity);
+ vsp1_video_release_buffers(video);
+ vsp1_video_pipeline_put(pipe);
+}
+
+static const struct vb2_ops vsp1_video_queue_qops = {
+ .queue_setup = vsp1_video_queue_setup,
+ .buf_prepare = vsp1_video_buffer_prepare,
+ .buf_queue = vsp1_video_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vsp1_video_start_streaming,
+ .stop_streaming = vsp1_video_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_STREAMING;
+ else
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
+ | V4L2_CAP_STREAMING;
+
+ strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
+ strlcpy(cap->card, video->video.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(video->vsp1->dev));
+
+ return 0;
+}
+
+static int
+vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+
+ if (format->type != video->queue.type)
+ return -EINVAL;
+
+ mutex_lock(&video->lock);
+ format->fmt.pix_mp = video->rwpf->format;
+ mutex_unlock(&video->lock);
+
+ return 0;
+}
+
+static int
+vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+
+ if (format->type != video->queue.type)
+ return -EINVAL;
+
+ return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
+}
+
+static int
+vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+ const struct vsp1_format_info *info;
+ int ret;
+
+ if (format->type != video->queue.type)
+ return -EINVAL;
+
+ ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&video->lock);
+
+ if (vb2_is_busy(&video->queue)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ video->rwpf->format = format->fmt.pix_mp;
+ video->rwpf->fmtinfo = info;
+
+done:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+static int
+vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+ struct media_device *mdev = &video->vsp1->media_dev;
+ struct vsp1_pipeline *pipe;
+ int ret;
+
+ if (video->queue.owner && video->queue.owner != file->private_data)
+ return -EBUSY;
+
+ /*
+ * Get a pipeline for the video node and start streaming on it. No link
+ * touching an entity in the pipeline can be activated or deactivated
+ * once streaming is started.
+ */
+ mutex_lock(&mdev->graph_mutex);
+
+ pipe = vsp1_video_pipeline_get(video);
+ if (IS_ERR(pipe)) {
+ mutex_unlock(&mdev->graph_mutex);
+ return PTR_ERR(pipe);
+ }
+
+ ret = __media_pipeline_start(&video->video.entity, &pipe->pipe);
+ if (ret < 0) {
+ mutex_unlock(&mdev->graph_mutex);
+ goto err_pipe;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ /*
+ * Verify that the configured format matches the output of the connected
+ * subdev.
+ */
+ ret = vsp1_video_verify_format(video);
+ if (ret < 0)
+ goto err_stop;
+
+ /* Start the queue. */
+ ret = vb2_streamon(&video->queue, type);
+ if (ret < 0)
+ goto err_stop;
+
+ return 0;
+
+err_stop:
+ media_pipeline_stop(&video->video.entity);
+err_pipe:
+ vsp1_video_pipeline_put(pipe);
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
+ .vidioc_querycap = vsp1_video_querycap,
+ .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
+ .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
+ .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
+ .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format,
+ .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format,
+ .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vsp1_video_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 File Operations
+ */
+
+static int vsp1_video_open(struct file *file)
+{
+ struct vsp1_video *video = video_drvdata(file);
+ struct v4l2_fh *vfh;
+ int ret = 0;
+
+ vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
+ if (vfh == NULL)
+ return -ENOMEM;
+
+ v4l2_fh_init(vfh, &video->video);
+ v4l2_fh_add(vfh);
+
+ file->private_data = vfh;
+
+ ret = vsp1_device_get(video->vsp1);
+ if (ret < 0) {
+ v4l2_fh_del(vfh);
+ v4l2_fh_exit(vfh);
+ kfree(vfh);
+ }
+
+ return ret;
+}
+
+static int vsp1_video_release(struct file *file)
+{
+ struct vsp1_video *video = video_drvdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+
+ mutex_lock(&video->lock);
+ if (video->queue.owner == vfh) {
+ vb2_queue_release(&video->queue);
+ video->queue.owner = NULL;
+ }
+ mutex_unlock(&video->lock);
+
+ vsp1_device_put(video->vsp1);
+
+ v4l2_fh_release(file);
+
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vsp1_video_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = vsp1_video_open,
+ .release = vsp1_video_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * Suspend and Resume
+ */
+
+void vsp1_video_suspend(struct vsp1_device *vsp1)
+{
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
+ /*
+ * To avoid increasing the system suspend time needlessly, loop over the
+ * pipelines twice, first to set them all to the stopping state, and
+ * then to wait for the stop to complete.
+ */
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = wpf->entity.pipe;
+ if (pipe == NULL)
+ continue;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (pipe->state == VSP1_PIPELINE_RUNNING)
+ pipe->state = VSP1_PIPELINE_STOPPING;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+ }
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = wpf->entity.pipe;
+ if (pipe == NULL)
+ continue;
+
+ ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_warn(vsp1->dev, "pipeline %u stop timeout\n",
+ wpf->entity.index);
+ }
+}
+
+void vsp1_video_resume(struct vsp1_device *vsp1)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ /* Resume all running pipelines. */
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = wpf->entity.pipe;
+ if (pipe == NULL)
+ continue;
+
+ /*
+ * The hardware may have been reset during a suspend and will
+ * need a full reconfiguration.
+ */
+ pipe->configured = false;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (vsp1_pipeline_ready(pipe))
+ vsp1_video_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rwpf)
+{
+ struct vsp1_video *video;
+ const char *direction;
+ int ret;
+
+ video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
+ if (!video)
+ return ERR_PTR(-ENOMEM);
+
+ rwpf->video = video;
+
+ video->vsp1 = vsp1;
+ video->rwpf = rwpf;
+
+ if (rwpf->entity.type == VSP1_ENTITY_RPF) {
+ direction = "input";
+ video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ video->video.vfl_dir = VFL_DIR_TX;
+ } else {
+ direction = "output";
+ video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ video->video.vfl_dir = VFL_DIR_RX;
+ }
+
+ mutex_init(&video->lock);
+ spin_lock_init(&video->irqlock);
+ INIT_LIST_HEAD(&video->irqqueue);
+
+ /* Initialize the media entity... */
+ ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* ... and the format ... */
+ rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
+ rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
+ rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
+ __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
+
+ /* ... and the video node... */
+ video->video.v4l2_dev = &video->vsp1->v4l2_dev;
+ video->video.fops = &vsp1_video_fops;
+ snprintf(video->video.name, sizeof(video->video.name), "%s %s",
+ rwpf->entity.subdev.name, direction);
+ video->video.vfl_type = VFL_TYPE_GRABBER;
+ video->video.release = video_device_release_empty;
+ video->video.ioctl_ops = &vsp1_video_ioctl_ops;
+
+ video_set_drvdata(&video->video, video);
+
+ video->queue.type = video->type;
+ video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ video->queue.lock = &video->lock;
+ video->queue.drv_priv = video;
+ video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
+ video->queue.ops = &vsp1_video_queue_qops;
+ video->queue.mem_ops = &vb2_dma_contig_memops;
+ video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ video->queue.dev = video->vsp1->bus_master;
+ ret = vb2_queue_init(&video->queue);
+ if (ret < 0) {
+ dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
+ goto error;
+ }
+
+ /* ... and register the video device. */
+ video->video.queue = &video->queue;
+ ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(video->vsp1->dev, "failed to register video device\n");
+ goto error;
+ }
+
+ return video;
+
+error:
+ vsp1_video_cleanup(video);
+ return ERR_PTR(ret);
+}
+
+void vsp1_video_cleanup(struct vsp1_video *video)
+{
+ if (video_is_registered(&video->video))
+ video_unregister_device(&video->video);
+
+ media_entity_cleanup(&video->video.entity);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
new file mode 100644
index 000000000..f3cf5e2fd
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_video.h -- R-Car VSP1 Video Node
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_VIDEO_H__
+#define __VSP1_VIDEO_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include <media/videobuf2-v4l2.h>
+
+#include "vsp1_rwpf.h"
+
+struct vsp1_vb2_buffer {
+ struct vb2_v4l2_buffer buf;
+ struct list_head queue;
+ struct vsp1_rwpf_memory mem;
+};
+
+static inline struct vsp1_vb2_buffer *
+to_vsp1_vb2_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct vsp1_vb2_buffer, buf);
+}
+
+struct vsp1_video {
+ struct list_head list;
+ struct vsp1_device *vsp1;
+ struct vsp1_rwpf *rwpf;
+
+ struct video_device video;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+
+ struct mutex lock;
+
+ unsigned int pipe_index;
+
+ struct vb2_queue queue;
+ spinlock_t irqlock;
+ struct list_head irqqueue;
+};
+
+static inline struct vsp1_video *to_vsp1_video(struct video_device *vdev)
+{
+ return container_of(vdev, struct vsp1_video, video);
+}
+
+void vsp1_video_suspend(struct vsp1_device *vsp1);
+void vsp1_video_resume(struct vsp1_device *vsp1);
+
+struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rwpf);
+void vsp1_video_cleanup(struct vsp1_video *video);
+
+#endif /* __VSP1_VIDEO_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
new file mode 100644
index 000000000..c2a1a7f97
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_wpf.c -- R-Car VSP1 Write Pixel Formatter
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define WPF_GEN2_MAX_WIDTH 2048U
+#define WPF_GEN2_MAX_HEIGHT 2048U
+#define WPF_GEN3_MAX_WIDTH 8190U
+#define WPF_GEN3_MAX_HEIGHT 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + wpf->entity.index * VI6_WPF_OFFSET, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+enum wpf_flip_ctrl {
+ WPF_CTRL_VFLIP = 0,
+ WPF_CTRL_HFLIP = 1,
+};
+
+static int vsp1_wpf_set_rotation(struct vsp1_rwpf *wpf, unsigned int rotation)
+{
+ struct vsp1_video *video = wpf->video;
+ struct v4l2_mbus_framefmt *sink_format;
+ struct v4l2_mbus_framefmt *source_format;
+ bool rotate;
+ int ret = 0;
+
+ /*
+ * Only consider the 0°/180° from/to 90°/270° modifications, the rest
+ * is taken care of by the flipping configuration.
+ */
+ rotate = rotation == 90 || rotation == 270;
+ if (rotate == wpf->flip.rotate)
+ return 0;
+
+ /* Changing rotation isn't allowed when buffers are allocated. */
+ mutex_lock(&video->lock);
+
+ if (vb2_is_busy(&video->queue)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ sink_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SINK);
+ source_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SOURCE);
+
+ mutex_lock(&wpf->entity.lock);
+
+ if (rotate) {
+ source_format->width = sink_format->height;
+ source_format->height = sink_format->width;
+ } else {
+ source_format->width = sink_format->width;
+ source_format->height = sink_format->height;
+ }
+
+ wpf->flip.rotate = rotate;
+
+ mutex_unlock(&wpf->entity.lock);
+
+done:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+static int vsp1_wpf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_rwpf *wpf =
+ container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
+ unsigned int rotation;
+ u32 flip = 0;
+ int ret;
+
+ /* Update the rotation. */
+ rotation = wpf->flip.ctrls.rotate ? wpf->flip.ctrls.rotate->val : 0;
+ ret = vsp1_wpf_set_rotation(wpf, rotation);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Compute the flip value resulting from all three controls, with
+ * rotation by 180° flipping the image in both directions. Store the
+ * result in the pending flip field for the next frame that will be
+ * processed.
+ */
+ if (wpf->flip.ctrls.vflip->val)
+ flip |= BIT(WPF_CTRL_VFLIP);
+
+ if (wpf->flip.ctrls.hflip && wpf->flip.ctrls.hflip->val)
+ flip |= BIT(WPF_CTRL_HFLIP);
+
+ if (rotation == 180 || rotation == 270)
+ flip ^= BIT(WPF_CTRL_VFLIP) | BIT(WPF_CTRL_HFLIP);
+
+ spin_lock_irq(&wpf->flip.lock);
+ wpf->flip.pending = flip;
+ spin_unlock_irq(&wpf->flip.lock);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vsp1_wpf_ctrl_ops = {
+ .s_ctrl = vsp1_wpf_s_ctrl,
+};
+
+static int wpf_init_controls(struct vsp1_rwpf *wpf)
+{
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+ unsigned int num_flip_ctrls;
+
+ spin_lock_init(&wpf->flip.lock);
+
+ if (wpf->entity.index != 0) {
+ /* Only WPF0 supports flipping. */
+ num_flip_ctrls = 0;
+ } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP)) {
+ /*
+ * When horizontal flip is supported the WPF implements three
+ * controls (horizontal flip, vertical flip and rotation).
+ */
+ num_flip_ctrls = 3;
+ } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_VFLIP)) {
+ /*
+ * When only vertical flip is supported the WPF implements a
+ * single control (vertical flip).
+ */
+ num_flip_ctrls = 1;
+ } else {
+ /* Otherwise flipping is not supported. */
+ num_flip_ctrls = 0;
+ }
+
+ vsp1_rwpf_init_ctrls(wpf, num_flip_ctrls);
+
+ if (num_flip_ctrls >= 1) {
+ wpf->flip.ctrls.vflip =
+ v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ }
+
+ if (num_flip_ctrls == 3) {
+ wpf->flip.ctrls.hflip =
+ v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ wpf->flip.ctrls.rotate =
+ v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+ v4l2_ctrl_cluster(3, &wpf->flip.ctrls.vflip);
+ }
+
+ if (wpf->ctrls.error) {
+ dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
+ wpf->entity.index);
+ return wpf->ctrls.error;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(subdev);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+
+ if (enable)
+ return 0;
+
+ /*
+ * Write to registers directly when stopping the stream as there will be
+ * no pipeline run to apply the display list.
+ */
+ vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
+ vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
+ VI6_WPF_SRCRPF, 0);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_video_ops wpf_video_ops = {
+ .s_stream = wpf_s_stream,
+};
+
+static const struct v4l2_subdev_ops wpf_ops = {
+ .video = &wpf_video_ops,
+ .pad = &vsp1_rwpf_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void vsp1_wpf_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
+
+ vsp1_dlm_destroy(wpf->dlm);
+}
+
+static void wpf_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+ const struct v4l2_mbus_framefmt *source_format;
+ const struct v4l2_mbus_framefmt *sink_format;
+ unsigned int i;
+ u32 outfmt = 0;
+ u32 srcrpf = 0;
+
+ sink_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SINK);
+ source_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SOURCE);
+ /* Format */
+ if (!pipe->lif) {
+ const struct v4l2_pix_format_mplane *format = &wpf->format;
+ const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
+
+ outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT;
+
+ if (wpf->flip.rotate)
+ outfmt |= VI6_WPF_OUTFMT_ROT;
+
+ if (fmtinfo->alpha)
+ outfmt |= VI6_WPF_OUTFMT_PXA;
+ if (fmtinfo->swap_yc)
+ outfmt |= VI6_WPF_OUTFMT_SPYCS;
+ if (fmtinfo->swap_uv)
+ outfmt |= VI6_WPF_OUTFMT_SPUVS;
+
+ /* Destination stride and byte swapping. */
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_STRIDE_Y,
+ format->plane_fmt[0].bytesperline);
+ if (format->num_planes > 1)
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_STRIDE_C,
+ format->plane_fmt[1].bytesperline);
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSWAP, fmtinfo->swap);
+
+ if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP) &&
+ wpf->entity.index == 0)
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_ROT_CTRL,
+ VI6_WPF_ROT_CTRL_LN16 |
+ (256 << VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT));
+ }
+
+ if (sink_format->code != source_format->code)
+ outfmt |= VI6_WPF_OUTFMT_CSC;
+
+ wpf->outfmt = outfmt;
+
+ vsp1_dl_body_write(dlb, VI6_DPR_WPF_FPORCH(wpf->entity.index),
+ VI6_DPR_WPF_FPORCH_FP_WPFN);
+
+ vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL, 0);
+
+ /*
+ * Sources. If the pipeline has a single input and BRx is not used,
+ * configure it as the master layer. Otherwise configure all
+ * inputs as sub-layers and select the virtual RPF as the master
+ * layer.
+ */
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *input = pipe->inputs[i];
+
+ if (!input)
+ continue;
+
+ srcrpf |= (!pipe->brx && pipe->num_inputs == 1)
+ ? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index)
+ : VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
+ }
+
+ if (pipe->brx)
+ srcrpf |= pipe->brx->type == VSP1_ENTITY_BRU
+ ? VI6_WPF_SRCRPF_VIRACT_MST
+ : VI6_WPF_SRCRPF_VIRACT2_MST;
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_SRCRPF, srcrpf);
+
+ /* Enable interrupts */
+ vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
+ vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(wpf->entity.index),
+ VI6_WFP_IRQ_ENB_DFEE);
+}
+
+static void wpf_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ const unsigned int mask = BIT(WPF_CTRL_VFLIP)
+ | BIT(WPF_CTRL_HFLIP);
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+ unsigned long flags;
+ u32 outfmt;
+
+ spin_lock_irqsave(&wpf->flip.lock, flags);
+ wpf->flip.active = (wpf->flip.active & ~mask)
+ | (wpf->flip.pending & mask);
+ spin_unlock_irqrestore(&wpf->flip.lock, flags);
+
+ outfmt = (wpf->alpha << VI6_WPF_OUTFMT_PDV_SHIFT) | wpf->outfmt;
+
+ if (wpf->flip.active & BIT(WPF_CTRL_VFLIP))
+ outfmt |= VI6_WPF_OUTFMT_FLP;
+ if (wpf->flip.active & BIT(WPF_CTRL_HFLIP))
+ outfmt |= VI6_WPF_OUTFMT_HFLP;
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_OUTFMT, outfmt);
+}
+
+static void wpf_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+ struct vsp1_rwpf_memory mem = wpf->mem;
+ const struct v4l2_mbus_framefmt *sink_format;
+ const struct v4l2_pix_format_mplane *format = &wpf->format;
+ const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
+ unsigned int width;
+ unsigned int height;
+ unsigned int offset;
+ unsigned int flip;
+ unsigned int i;
+
+ sink_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SINK);
+ width = sink_format->width;
+ height = sink_format->height;
+
+ /*
+ * Cropping. The partition algorithm can split the image into
+ * multiple slices.
+ */
+ if (pipe->partitions > 1)
+ width = pipe->partition->wpf.width;
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
+ (0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (width << VI6_WPF_SZCLIP_SIZE_SHIFT));
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
+ (0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (height << VI6_WPF_SZCLIP_SIZE_SHIFT));
+
+ if (pipe->lif)
+ return;
+
+ /*
+ * Update the memory offsets based on flipping configuration.
+ * The destination addresses point to the locations where the
+ * VSP starts writing to memory, which can be any corner of the
+ * image depending on the combination of flipping and rotation.
+ */
+
+ /*
+ * First take the partition left coordinate into account.
+ * Compute the offset to order the partitions correctly on the
+ * output based on whether flipping is enabled. Consider
+ * horizontal flipping when rotation is disabled but vertical
+ * flipping when rotation is enabled, as rotating the image
+ * switches the horizontal and vertical directions. The offset
+ * is applied horizontally or vertically accordingly.
+ */
+ flip = wpf->flip.active;
+
+ if (flip & BIT(WPF_CTRL_HFLIP) && !wpf->flip.rotate)
+ offset = format->width - pipe->partition->wpf.left
+ - pipe->partition->wpf.width;
+ else if (flip & BIT(WPF_CTRL_VFLIP) && wpf->flip.rotate)
+ offset = format->height - pipe->partition->wpf.left
+ - pipe->partition->wpf.width;
+ else
+ offset = pipe->partition->wpf.left;
+
+ for (i = 0; i < format->num_planes; ++i) {
+ unsigned int hsub = i > 0 ? fmtinfo->hsub : 1;
+ unsigned int vsub = i > 0 ? fmtinfo->vsub : 1;
+
+ if (wpf->flip.rotate)
+ mem.addr[i] += offset / vsub
+ * format->plane_fmt[i].bytesperline;
+ else
+ mem.addr[i] += offset / hsub
+ * fmtinfo->bpp[i] / 8;
+ }
+
+ if (flip & BIT(WPF_CTRL_VFLIP)) {
+ /*
+ * When rotating the output (after rotation) image
+ * height is equal to the partition width (before
+ * rotation). Otherwise it is equal to the output
+ * image height.
+ */
+ if (wpf->flip.rotate)
+ height = pipe->partition->wpf.width;
+ else
+ height = format->height;
+
+ mem.addr[0] += (height - 1)
+ * format->plane_fmt[0].bytesperline;
+
+ if (format->num_planes > 1) {
+ offset = (height / fmtinfo->vsub - 1)
+ * format->plane_fmt[1].bytesperline;
+ mem.addr[1] += offset;
+ mem.addr[2] += offset;
+ }
+ }
+
+ if (wpf->flip.rotate && !(flip & BIT(WPF_CTRL_HFLIP))) {
+ unsigned int hoffset = max(0, (int)format->width - 16);
+
+ /*
+ * Compute the output coordinate. The partition
+ * horizontal (left) offset becomes a vertical offset.
+ */
+ for (i = 0; i < format->num_planes; ++i) {
+ unsigned int hsub = i > 0 ? fmtinfo->hsub : 1;
+
+ mem.addr[i] += hoffset / hsub
+ * fmtinfo->bpp[i] / 8;
+ }
+ }
+
+ /*
+ * On Gen3 hardware the SPUVS bit has no effect on 3-planar
+ * formats. Swap the U and V planes manually in that case.
+ */
+ if (vsp1->info->gen == 3 && format->num_planes == 3 &&
+ fmtinfo->swap_uv)
+ swap(mem.addr[1], mem.addr[2]);
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
+}
+
+static unsigned int wpf_max_width(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+
+ return wpf->flip.rotate ? 256 : wpf->max_width;
+}
+
+static void wpf_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int partition_idx,
+ struct vsp1_partition_window *window)
+{
+ partition->wpf = *window;
+}
+
+static const struct vsp1_entity_operations wpf_entity_ops = {
+ .destroy = vsp1_wpf_destroy,
+ .configure_stream = wpf_configure_stream,
+ .configure_frame = wpf_configure_frame,
+ .configure_partition = wpf_configure_partition,
+ .max_width = wpf_max_width,
+ .partition = wpf_partition,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_rwpf *wpf;
+ char name[6];
+ int ret;
+
+ wpf = devm_kzalloc(vsp1->dev, sizeof(*wpf), GFP_KERNEL);
+ if (wpf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ if (vsp1->info->gen == 2) {
+ wpf->max_width = WPF_GEN2_MAX_WIDTH;
+ wpf->max_height = WPF_GEN2_MAX_HEIGHT;
+ } else {
+ wpf->max_width = WPF_GEN3_MAX_WIDTH;
+ wpf->max_height = WPF_GEN3_MAX_HEIGHT;
+ }
+
+ wpf->entity.ops = &wpf_entity_ops;
+ wpf->entity.type = VSP1_ENTITY_WPF;
+ wpf->entity.index = index;
+
+ sprintf(name, "wpf.%u", index);
+ ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the display list manager. */
+ wpf->dlm = vsp1_dlm_create(vsp1, index, 64);
+ if (!wpf->dlm) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the control handler. */
+ ret = wpf_init_controls(wpf);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
+ index);
+ goto error;
+ }
+
+ v4l2_ctrl_handler_setup(&wpf->ctrls);
+
+ return wpf;
+
+error:
+ vsp1_entity_destroy(&wpf->entity);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
new file mode 100644
index 000000000..a5d21b7c6
--- /dev/null
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -0,0 +1,24 @@
+config VIDEO_XILINX
+ tristate "Xilinx Video IP (EXPERIMENTAL)"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ ---help---
+ Driver for Xilinx Video IP Pipelines
+
+if VIDEO_XILINX
+
+config VIDEO_XILINX_TPG
+ tristate "Xilinx Video Test Pattern Generator"
+ depends on VIDEO_XILINX
+ select VIDEO_XILINX_VTC
+ ---help---
+ Driver for the Xilinx Video Test Pattern Generator
+
+config VIDEO_XILINX_VTC
+ tristate "Xilinx Video Timing Controller"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Timing Controller
+
+endif #VIDEO_XILINX
diff --git a/drivers/media/platform/xilinx/Makefile b/drivers/media/platform/xilinx/Makefile
new file mode 100644
index 000000000..e8a0f2a9f
--- /dev/null
+++ b/drivers/media/platform/xilinx/Makefile
@@ -0,0 +1,5 @@
+xilinx-video-objs += xilinx-dma.o xilinx-vip.o xilinx-vipp.o
+
+obj-$(CONFIG_VIDEO_XILINX) += xilinx-video.o
+obj-$(CONFIG_VIDEO_XILINX_TPG) += xilinx-tpg.o
+obj-$(CONFIG_VIDEO_XILINX_VTC) += xilinx-vtc.o
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
new file mode 100644
index 000000000..d041f94be
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -0,0 +1,769 @@
+/*
+ * Xilinx Video DMA
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma/xilinx_dma.h>
+#include <linux/lcm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "xilinx-dma.h"
+#include "xilinx-vip.h"
+#include "xilinx-vipp.h"
+
+#define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV
+#define XVIP_DMA_DEF_WIDTH 1920
+#define XVIP_DMA_DEF_HEIGHT 1080
+
+/* Minimum and maximum widths are expressed in bytes */
+#define XVIP_DMA_MIN_WIDTH 1U
+#define XVIP_DMA_MAX_WIDTH 65535U
+#define XVIP_DMA_MIN_HEIGHT 1U
+#define XVIP_DMA_MAX_HEIGHT 8191U
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static struct v4l2_subdev *
+xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(local);
+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int xvip_dma_verify_format(struct xvip_dma *dma)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
+ if (subdev == NULL)
+ return -EPIPE;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ if (dma->fmtinfo->code != fmt.format.code ||
+ dma->format.height != fmt.format.height ||
+ dma->format.width != fmt.format.width ||
+ dma->format.colorspace != fmt.format.colorspace)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Stream Management
+ */
+
+/**
+ * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
+ * @pipe: The pipeline
+ * @start: Start (when true) or stop (when false) the pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * or stop all of them.
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
+{
+ struct xvip_dma *dma = pipe->output;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ entity = &dma->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, start);
+ if (start && ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: The pipeline
+ * @on: Turn the stream on when true or off when false
+ *
+ * The pipeline is shared between all DMA engines connect at its input and
+ * output. While the stream state of DMA engines can be controlled
+ * independently, pipelines have a shared stream state that enable or disable
+ * all entities in the pipeline. For this reason the pipeline uses a streaming
+ * counter that tracks the number of DMA engines that have requested the stream
+ * to be enabled.
+ *
+ * When called with the @on argument set to true, this function will increment
+ * the pipeline streaming count. If the streaming count reaches the number of
+ * DMA engines in the pipeline it will enable all entities that belong to the
+ * pipeline.
+ *
+ * Similarly, when called with the @on argument set to false, this function will
+ * decrement the pipeline streaming count and disable all entities in the
+ * pipeline when the streaming count reaches zero.
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. Stopping the pipeline never fails. The pipeline state is
+ * not updated when the operation fails.
+ */
+static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
+{
+ int ret = 0;
+
+ mutex_lock(&pipe->lock);
+
+ if (on) {
+ if (pipe->stream_count == pipe->num_dmas - 1) {
+ ret = xvip_pipeline_start_stop(pipe, true);
+ if (ret < 0)
+ goto done;
+ }
+ pipe->stream_count++;
+ } else {
+ if (--pipe->stream_count == 0)
+ xvip_pipeline_start_stop(pipe, false);
+ }
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
+ struct xvip_dma *start)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &start->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ unsigned int num_inputs = 0;
+ unsigned int num_outputs = 0;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the video nodes. */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret) {
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+
+ media_graph_walk_start(&graph, entity);
+
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct xvip_dma *dma;
+
+ if (entity->function != MEDIA_ENT_F_IO_V4L)
+ continue;
+
+ dma = to_xvip_dma(media_entity_to_video_device(entity));
+
+ if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
+ pipe->output = dma;
+ num_outputs++;
+ } else {
+ num_inputs++;
+ }
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ media_graph_walk_cleanup(&graph);
+
+ /* We need exactly one output and zero or one input. */
+ if (num_outputs != 1 || num_inputs > 1)
+ return -EPIPE;
+
+ pipe->num_dmas = num_inputs + num_outputs;
+
+ return 0;
+}
+
+static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ pipe->num_dmas = 0;
+ pipe->output = NULL;
+}
+
+/**
+ * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
+ * @pipe: the pipeline
+ *
+ * Decrease the pipeline use count and clean it up if we were the last user.
+ */
+static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ mutex_lock(&pipe->lock);
+
+ /* If we're the last user clean up the pipeline. */
+ if (--pipe->use_count == 0)
+ __xvip_pipeline_cleanup(pipe);
+
+ mutex_unlock(&pipe->lock);
+}
+
+/**
+ * xvip_pipeline_prepare - Prepare the pipeline for streaming
+ * @pipe: the pipeline
+ * @dma: DMA engine at one end of the pipeline
+ *
+ * Validate the pipeline if no user exists yet, otherwise just increase the use
+ * count.
+ *
+ * Return: 0 if successful or -EPIPE if the pipeline is not valid.
+ */
+static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
+ struct xvip_dma *dma)
+{
+ int ret;
+
+ mutex_lock(&pipe->lock);
+
+ /* If we're the first user validate and initialize the pipeline. */
+ if (pipe->use_count == 0) {
+ ret = xvip_pipeline_validate(pipe, dma);
+ if (ret < 0) {
+ __xvip_pipeline_cleanup(pipe);
+ goto done;
+ }
+ }
+
+ pipe->use_count++;
+ ret = 0;
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * videobuf2 queue operations
+ */
+
+/**
+ * struct xvip_dma_buffer - Video DMA buffer
+ * @buf: vb2 buffer base object
+ * @queue: buffer list entry in the DMA engine queued buffers list
+ * @dma: DMA channel that uses the buffer
+ */
+struct xvip_dma_buffer {
+ struct vb2_v4l2_buffer buf;
+ struct list_head queue;
+ struct xvip_dma *dma;
+};
+
+#define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
+
+static void xvip_dma_complete(void *param)
+{
+ struct xvip_dma_buffer *buf = param;
+ struct xvip_dma *dma = buf->dma;
+
+ spin_lock(&dma->queued_lock);
+ list_del(&buf->queue);
+ spin_unlock(&dma->queued_lock);
+
+ buf->buf.field = V4L2_FIELD_NONE;
+ buf->buf.sequence = dma->sequence++;
+ buf->buf.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+static int
+xvip_dma_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct xvip_dma *dma = vb2_get_drv_priv(vq);
+
+ /* Make sure the image size is large enough. */
+ if (*nplanes)
+ return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = dma->format.sizeimage;
+
+ return 0;
+}
+
+static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
+ struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
+
+ buf->dma = dma;
+
+ return 0;
+}
+
+static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
+ struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ u32 flags;
+
+ if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ dma->xt.dir = DMA_DEV_TO_MEM;
+ dma->xt.src_sgl = false;
+ dma->xt.dst_sgl = true;
+ dma->xt.dst_start = addr;
+ } else {
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ dma->xt.dir = DMA_MEM_TO_DEV;
+ dma->xt.src_sgl = true;
+ dma->xt.dst_sgl = false;
+ dma->xt.src_start = addr;
+ }
+
+ dma->xt.frame_size = 1;
+ dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
+ dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
+ dma->xt.numf = dma->format.height;
+
+ desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
+ if (!desc) {
+ dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ return;
+ }
+ desc->callback = xvip_dma_complete;
+ desc->callback_param = buf;
+
+ spin_lock_irq(&dma->queued_lock);
+ list_add_tail(&buf->queue, &dma->queued_bufs);
+ spin_unlock_irq(&dma->queued_lock);
+
+ dmaengine_submit(desc);
+
+ if (vb2_is_streaming(&dma->queue))
+ dma_async_issue_pending(dma->dma);
+}
+
+static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct xvip_dma *dma = vb2_get_drv_priv(vq);
+ struct xvip_dma_buffer *buf, *nbuf;
+ struct xvip_pipeline *pipe;
+ int ret;
+
+ dma->sequence = 0;
+
+ /*
+ * Start streaming on the pipeline. No link touching an entity in the
+ * pipeline can be activated or deactivated once streaming is started.
+ *
+ * Use the pipeline object embedded in the first DMA object that starts
+ * streaming.
+ */
+ pipe = dma->video.entity.pipe
+ ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
+
+ ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto error;
+
+ /* Verify that the configured format matches the output of the
+ * connected subdev.
+ */
+ ret = xvip_dma_verify_format(dma);
+ if (ret < 0)
+ goto error_stop;
+
+ ret = xvip_pipeline_prepare(pipe, dma);
+ if (ret < 0)
+ goto error_stop;
+
+ /* Start the DMA engine. This must be done before starting the blocks
+ * in the pipeline to avoid DMA synchronization issues.
+ */
+ dma_async_issue_pending(dma->dma);
+
+ /* Start the pipeline. */
+ xvip_pipeline_set_stream(pipe, true);
+
+ return 0;
+
+error_stop:
+ media_pipeline_stop(&dma->video.entity);
+
+error:
+ /* Give back all queued buffers to videobuf2. */
+ spin_lock_irq(&dma->queued_lock);
+ list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
+ list_del(&buf->queue);
+ }
+ spin_unlock_irq(&dma->queued_lock);
+
+ return ret;
+}
+
+static void xvip_dma_stop_streaming(struct vb2_queue *vq)
+{
+ struct xvip_dma *dma = vb2_get_drv_priv(vq);
+ struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
+ struct xvip_dma_buffer *buf, *nbuf;
+
+ /* Stop the pipeline. */
+ xvip_pipeline_set_stream(pipe, false);
+
+ /* Stop and reset the DMA engine. */
+ dmaengine_terminate_all(dma->dma);
+
+ /* Cleanup the pipeline and mark it as being stopped. */
+ xvip_pipeline_cleanup(pipe);
+ media_pipeline_stop(&dma->video.entity);
+
+ /* Give back all queued buffers to videobuf2. */
+ spin_lock_irq(&dma->queued_lock);
+ list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ list_del(&buf->queue);
+ }
+ spin_unlock_irq(&dma->queued_lock);
+}
+
+static const struct vb2_ops xvip_dma_queue_qops = {
+ .queue_setup = xvip_dma_queue_setup,
+ .buf_prepare = xvip_dma_buffer_prepare,
+ .buf_queue = xvip_dma_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = xvip_dma_start_streaming,
+ .stop_streaming = xvip_dma_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+
+ cap->device_caps = V4L2_CAP_STREAMING;
+
+ if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
+ else
+ cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
+
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS
+ | dma->xdev->v4l2_caps;
+
+ strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
+ strlcpy(cap->card, dma->video.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
+ dma->xdev->dev->of_node->name, dma->port);
+
+ return 0;
+}
+
+/* FIXME: without this callback function, some applications are not configured
+ * with correct formats, and it results in frames in wrong format. Whether this
+ * callback needs to be required is not clearly defined, so it should be
+ * clarified through the mailing list.
+ */
+static int
+xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->pixelformat = dma->format.pixelformat;
+ strlcpy(f->description, dma->fmtinfo->description,
+ sizeof(f->description));
+
+ return 0;
+}
+
+static int
+xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+
+ format->fmt.pix = dma->format;
+
+ return 0;
+}
+
+static void
+__xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
+ const struct xvip_video_format **fmtinfo)
+{
+ const struct xvip_video_format *info;
+ unsigned int min_width;
+ unsigned int max_width;
+ unsigned int min_bpl;
+ unsigned int max_bpl;
+ unsigned int width;
+ unsigned int align;
+ unsigned int bpl;
+
+ /* Retrieve format information and select the default format if the
+ * requested format isn't supported.
+ */
+ info = xvip_get_format_by_fourcc(pix->pixelformat);
+ if (IS_ERR(info))
+ info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
+
+ pix->pixelformat = info->fourcc;
+ pix->field = V4L2_FIELD_NONE;
+
+ /* The transfer alignment requirements are expressed in bytes. Compute
+ * the minimum and maximum values, clamp the requested width and convert
+ * it back to pixels.
+ */
+ align = lcm(dma->align, info->bpp);
+ min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
+ max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
+ width = rounddown(pix->width * info->bpp, align);
+
+ pix->width = clamp(width, min_width, max_width) / info->bpp;
+ pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
+ XVIP_DMA_MAX_HEIGHT);
+
+ /* Clamp the requested bytes per line value. If the maximum bytes per
+ * line value is zero, the module doesn't support user configurable line
+ * sizes. Override the requested value with the minimum in that case.
+ */
+ min_bpl = pix->width * info->bpp;
+ max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
+ bpl = rounddown(pix->bytesperline, dma->align);
+
+ pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ if (fmtinfo)
+ *fmtinfo = info;
+}
+
+static int
+xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+
+ __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
+ return 0;
+}
+
+static int
+xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ const struct xvip_video_format *info;
+
+ __xvip_dma_try_format(dma, &format->fmt.pix, &info);
+
+ if (vb2_is_busy(&dma->queue))
+ return -EBUSY;
+
+ dma->format = format->fmt.pix;
+ dma->fmtinfo = info;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
+ .vidioc_querycap = xvip_dma_querycap,
+ .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
+ .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
+ .vidioc_g_fmt_vid_out = xvip_dma_get_format,
+ .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
+ .vidioc_s_fmt_vid_out = xvip_dma_set_format,
+ .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
+ .vidioc_try_fmt_vid_out = xvip_dma_try_format,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static const struct v4l2_file_operations xvip_dma_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * Xilinx Video DMA Core
+ */
+
+int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
+ enum v4l2_buf_type type, unsigned int port)
+{
+ char name[16];
+ int ret;
+
+ dma->xdev = xdev;
+ dma->port = port;
+ mutex_init(&dma->lock);
+ mutex_init(&dma->pipe.lock);
+ INIT_LIST_HEAD(&dma->queued_bufs);
+ spin_lock_init(&dma->queued_lock);
+
+ dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
+ dma->format.pixelformat = dma->fmtinfo->fourcc;
+ dma->format.colorspace = V4L2_COLORSPACE_SRGB;
+ dma->format.field = V4L2_FIELD_NONE;
+ dma->format.width = XVIP_DMA_DEF_WIDTH;
+ dma->format.height = XVIP_DMA_DEF_HEIGHT;
+ dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
+ dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
+
+ /* Initialize the media entity... */
+ dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
+ ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
+ if (ret < 0)
+ goto error;
+
+ /* ... and the video node... */
+ dma->video.fops = &xvip_dma_fops;
+ dma->video.v4l2_dev = &xdev->v4l2_dev;
+ dma->video.queue = &dma->queue;
+ snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
+ xdev->dev->of_node->name,
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
+ port);
+ dma->video.vfl_type = VFL_TYPE_GRABBER;
+ dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
+ ? VFL_DIR_RX : VFL_DIR_TX;
+ dma->video.release = video_device_release_empty;
+ dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
+ dma->video.lock = &dma->lock;
+
+ video_set_drvdata(&dma->video, dma);
+
+ /* ... and the buffers queue... */
+ /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
+ * V4L2 APIs would be inefficient. Testing on the command line with a
+ * 'cat /dev/video?' thus won't be possible, but given that the driver
+ * anyway requires a test tool to setup the pipeline before any video
+ * stream can be started, requiring a specific V4L2 test tool as well
+ * instead of 'cat' isn't really a drawback.
+ */
+ dma->queue.type = type;
+ dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dma->queue.lock = &dma->lock;
+ dma->queue.drv_priv = dma;
+ dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
+ dma->queue.ops = &xvip_dma_queue_qops;
+ dma->queue.mem_ops = &vb2_dma_contig_memops;
+ dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
+ | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
+ dma->queue.dev = dma->xdev->dev;
+ ret = vb2_queue_init(&dma->queue);
+ if (ret < 0) {
+ dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
+ goto error;
+ }
+
+ /* ... and the DMA channel. */
+ snprintf(name, sizeof(name), "port%u", port);
+ dma->dma = dma_request_slave_channel(dma->xdev->dev, name);
+ if (dma->dma == NULL) {
+ dev_err(dma->xdev->dev, "no VDMA channel found\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dma->align = 1 << dma->dma->device->copy_align;
+
+ ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(dma->xdev->dev, "failed to register video device\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ xvip_dma_cleanup(dma);
+ return ret;
+}
+
+void xvip_dma_cleanup(struct xvip_dma *dma)
+{
+ if (video_is_registered(&dma->video))
+ video_unregister_device(&dma->video);
+
+ if (dma->dma)
+ dma_release_channel(dma->dma);
+
+ media_entity_cleanup(&dma->video.entity);
+
+ mutex_destroy(&dma->lock);
+ mutex_destroy(&dma->pipe.lock);
+}
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
new file mode 100644
index 000000000..e95d136c1
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -0,0 +1,107 @@
+/*
+ * Xilinx Video DMA
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_VIP_DMA_H__
+#define __XILINX_VIP_DMA_H__
+
+#include <linux/dmaengine.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-v4l2.h>
+
+struct dma_chan;
+struct xvip_composite_device;
+struct xvip_video_format;
+
+/**
+ * struct xvip_pipeline - Xilinx Video IP pipeline structure
+ * @pipe: media pipeline
+ * @lock: protects the pipeline @stream_count
+ * @use_count: number of DMA engines using the pipeline
+ * @stream_count: number of DMA engines currently streaming
+ * @num_dmas: number of DMA engines in the pipeline
+ * @output: DMA engine at the output of the pipeline
+ */
+struct xvip_pipeline {
+ struct media_pipeline pipe;
+
+ struct mutex lock;
+ unsigned int use_count;
+ unsigned int stream_count;
+
+ unsigned int num_dmas;
+ struct xvip_dma *output;
+};
+
+static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
+{
+ return container_of(e->pipe, struct xvip_pipeline, pipe);
+}
+
+/**
+ * struct xvip_dma - Video DMA channel
+ * @list: list entry in a composite device dmas list
+ * @video: V4L2 video device associated with the DMA channel
+ * @pad: media pad for the video device entity
+ * @xdev: composite device the DMA channel belongs to
+ * @pipe: pipeline belonging to the DMA channel
+ * @port: composite device DT node port number for the DMA channel
+ * @lock: protects the @format, @fmtinfo and @queue fields
+ * @format: active V4L2 pixel format
+ * @fmtinfo: format information corresponding to the active @format
+ * @queue: vb2 buffers queue
+ * @sequence: V4L2 buffers sequence number
+ * @queued_bufs: list of queued buffers
+ * @queued_lock: protects the buf_queued list
+ * @dma: DMA engine channel
+ * @align: transfer alignment required by the DMA channel (in bytes)
+ * @xt: dma interleaved template for dma configuration
+ * @sgl: data chunk structure for dma_interleaved_template
+ */
+struct xvip_dma {
+ struct list_head list;
+ struct video_device video;
+ struct media_pad pad;
+
+ struct xvip_composite_device *xdev;
+ struct xvip_pipeline pipe;
+ unsigned int port;
+
+ struct mutex lock;
+ struct v4l2_pix_format format;
+ const struct xvip_video_format *fmtinfo;
+
+ struct vb2_queue queue;
+ unsigned int sequence;
+
+ struct list_head queued_bufs;
+ spinlock_t queued_lock;
+
+ struct dma_chan *dma;
+ unsigned int align;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+#define to_xvip_dma(vdev) container_of(vdev, struct xvip_dma, video)
+
+int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
+ enum v4l2_buf_type type, unsigned int port);
+void xvip_dma_cleanup(struct xvip_dma *dma);
+
+#endif /* __XILINX_VIP_DMA_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
new file mode 100644
index 000000000..9c49d1d10
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -0,0 +1,933 @@
+/*
+ * Xilinx Test Pattern Generator
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+#include "xilinx-vtc.h"
+
+#define XTPG_CTRL_STATUS_SLAVE_ERROR (1 << 16)
+#define XTPG_CTRL_IRQ_SLAVE_ERROR (1 << 16)
+
+#define XTPG_PATTERN_CONTROL 0x0100
+#define XTPG_PATTERN_MASK (0xf << 0)
+#define XTPG_PATTERN_CONTROL_CROSS_HAIRS (1 << 4)
+#define XTPG_PATTERN_CONTROL_MOVING_BOX (1 << 5)
+#define XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT 6
+#define XTPG_PATTERN_CONTROL_COLOR_MASK_MASK (0xf << 6)
+#define XTPG_PATTERN_CONTROL_STUCK_PIXEL (1 << 9)
+#define XTPG_PATTERN_CONTROL_NOISE (1 << 10)
+#define XTPG_PATTERN_CONTROL_MOTION (1 << 12)
+#define XTPG_MOTION_SPEED 0x0104
+#define XTPG_CROSS_HAIRS 0x0108
+#define XTPG_CROSS_HAIRS_ROW_SHIFT 0
+#define XTPG_CROSS_HAIRS_ROW_MASK (0xfff << 0)
+#define XTPG_CROSS_HAIRS_COLUMN_SHIFT 16
+#define XTPG_CROSS_HAIRS_COLUMN_MASK (0xfff << 16)
+#define XTPG_ZPLATE_HOR_CONTROL 0x010c
+#define XTPG_ZPLATE_VER_CONTROL 0x0110
+#define XTPG_ZPLATE_START_SHIFT 0
+#define XTPG_ZPLATE_START_MASK (0xffff << 0)
+#define XTPG_ZPLATE_SPEED_SHIFT 16
+#define XTPG_ZPLATE_SPEED_MASK (0xffff << 16)
+#define XTPG_BOX_SIZE 0x0114
+#define XTPG_BOX_COLOR 0x0118
+#define XTPG_STUCK_PIXEL_THRESH 0x011c
+#define XTPG_NOISE_GAIN 0x0120
+#define XTPG_BAYER_PHASE 0x0124
+#define XTPG_BAYER_PHASE_RGGB 0
+#define XTPG_BAYER_PHASE_GRBG 1
+#define XTPG_BAYER_PHASE_GBRG 2
+#define XTPG_BAYER_PHASE_BGGR 3
+#define XTPG_BAYER_PHASE_OFF 4
+
+/*
+ * The minimum blanking value is one clock cycle for the front porch, one clock
+ * cycle for the sync pulse and one clock cycle for the back porch.
+ */
+#define XTPG_MIN_HBLANK 3
+#define XTPG_MAX_HBLANK (XVTC_MAX_HSIZE - XVIP_MIN_WIDTH)
+#define XTPG_MIN_VBLANK 3
+#define XTPG_MAX_VBLANK (XVTC_MAX_VSIZE - XVIP_MIN_HEIGHT)
+
+/**
+ * struct xtpg_device - Xilinx Test Pattern Generator device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @npads: number of pads (1 or 2)
+ * @has_input: whether an input is connected to the sink pad
+ * @formats: active V4L2 media bus format for each pad
+ * @default_format: default V4L2 media bus format
+ * @vip_format: format information corresponding to the active format
+ * @bayer: boolean flag if TPG is set to any bayer format
+ * @ctrl_handler: control handler
+ * @hblank: horizontal blanking control
+ * @vblank: vertical blanking control
+ * @pattern: test pattern control
+ * @streaming: is the video stream active
+ * @vtc: video timing controller
+ * @vtmux_gpio: video timing mux GPIO
+ */
+struct xtpg_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+ unsigned int npads;
+ bool has_input;
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_format;
+ const struct xvip_video_format *vip_format;
+ bool bayer;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *pattern;
+ bool streaming;
+
+ struct xvtc_device *vtc;
+ struct gpio_desc *vtmux_gpio;
+};
+
+static inline struct xtpg_device *to_tpg(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xtpg_device, xvip.subdev);
+}
+
+static u32 xtpg_get_bayer_phase(unsigned int code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return XTPG_BAYER_PHASE_RGGB;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ return XTPG_BAYER_PHASE_GRBG;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ return XTPG_BAYER_PHASE_GBRG;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return XTPG_BAYER_PHASE_BGGR;
+ default:
+ return XTPG_BAYER_PHASE_OFF;
+ }
+}
+
+static void __xtpg_update_pattern_control(struct xtpg_device *xtpg,
+ bool passthrough, bool pattern)
+{
+ u32 pattern_mask = (1 << (xtpg->pattern->maximum + 1)) - 1;
+
+ /*
+ * If the TPG has no sink pad or no input connected to its sink pad
+ * passthrough mode can't be enabled.
+ */
+ if (xtpg->npads == 1 || !xtpg->has_input)
+ passthrough = false;
+
+ /* If passthrough mode is allowed unmask bit 0. */
+ if (passthrough)
+ pattern_mask &= ~1;
+
+ /* If test pattern mode is allowed unmask all other bits. */
+ if (pattern)
+ pattern_mask &= 1;
+
+ __v4l2_ctrl_modify_range(xtpg->pattern, 0, xtpg->pattern->maximum,
+ pattern_mask, pattern ? 9 : 0);
+}
+
+static void xtpg_update_pattern_control(struct xtpg_device *xtpg,
+ bool passthrough, bool pattern)
+{
+ mutex_lock(xtpg->ctrl_handler.lock);
+ __xtpg_update_pattern_control(xtpg, passthrough, pattern);
+ mutex_unlock(xtpg->ctrl_handler.lock);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+ unsigned int width = xtpg->formats[0].width;
+ unsigned int height = xtpg->formats[0].height;
+ bool passthrough;
+ u32 bayer_phase;
+
+ if (!enable) {
+ xvip_stop(&xtpg->xvip);
+ if (xtpg->vtc)
+ xvtc_generator_stop(xtpg->vtc);
+
+ xtpg_update_pattern_control(xtpg, true, true);
+ xtpg->streaming = false;
+ return 0;
+ }
+
+ xvip_set_frame_size(&xtpg->xvip, &xtpg->formats[0]);
+
+ if (xtpg->vtc) {
+ struct xvtc_config config = {
+ .hblank_start = width,
+ .hsync_start = width + 1,
+ .vblank_start = height,
+ .vsync_start = height + 1,
+ };
+ unsigned int htotal;
+ unsigned int vtotal;
+
+ htotal = min_t(unsigned int, XVTC_MAX_HSIZE,
+ v4l2_ctrl_g_ctrl(xtpg->hblank) + width);
+ vtotal = min_t(unsigned int, XVTC_MAX_VSIZE,
+ v4l2_ctrl_g_ctrl(xtpg->vblank) + height);
+
+ config.hsync_end = htotal - 1;
+ config.hsize = htotal;
+ config.vsync_end = vtotal - 1;
+ config.vsize = vtotal;
+
+ xvtc_generator_start(xtpg->vtc, &config);
+ }
+
+ /*
+ * Configure the bayer phase and video timing mux based on the
+ * operation mode (passthrough or test pattern generation). The test
+ * pattern can be modified by the control set handler, we thus need to
+ * take the control lock here to avoid races.
+ */
+ mutex_lock(xtpg->ctrl_handler.lock);
+
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_MASK, xtpg->pattern->cur.val);
+
+ /*
+ * Switching between passthrough and test pattern generation modes isn't
+ * allowed during streaming, update the control range accordingly.
+ */
+ passthrough = xtpg->pattern->cur.val == 0;
+ __xtpg_update_pattern_control(xtpg, passthrough, !passthrough);
+
+ xtpg->streaming = true;
+
+ mutex_unlock(xtpg->ctrl_handler.lock);
+
+ /*
+ * For TPG v5.0, the bayer phase needs to be off for the pass through
+ * mode, otherwise the external input would be subsampled.
+ */
+ bayer_phase = passthrough ? XTPG_BAYER_PHASE_OFF
+ : xtpg_get_bayer_phase(xtpg->formats[0].code);
+ xvip_write(&xtpg->xvip, XTPG_BAYER_PHASE, bayer_phase);
+
+ if (xtpg->vtmux_gpio)
+ gpiod_set_value_cansleep(xtpg->vtmux_gpio, !passthrough);
+
+ xvip_start(&xtpg->xvip);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xtpg_get_pad_format(struct xtpg_device *xtpg,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xtpg->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xtpg->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xtpg_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+
+ fmt->format = *__xtpg_get_pad_format(xtpg, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xtpg_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ u32 bayer_phase;
+
+ __format = __xtpg_get_pad_format(xtpg, cfg, fmt->pad, fmt->which);
+
+ /* In two pads mode the source pad format is always identical to the
+ * sink pad format.
+ */
+ if (xtpg->npads == 2 && fmt->pad == 1) {
+ fmt->format = *__format;
+ return 0;
+ }
+
+ /* Bayer phase is configurable at runtime */
+ if (xtpg->bayer) {
+ bayer_phase = xtpg_get_bayer_phase(fmt->format.code);
+ if (bayer_phase != XTPG_BAYER_PHASE_OFF)
+ __format->code = fmt->format.code;
+ }
+
+ xvip_set_format_size(__format, fmt);
+
+ fmt->format = *__format;
+
+ /* Propagate the format to the source pad. */
+ if (xtpg->npads == 2) {
+ __format = __xtpg_get_pad_format(xtpg, cfg, 1, fmt->which);
+ *__format = fmt->format;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ /* Min / max values for pad 0 is always fixed in both one and two pads
+ * modes. In two pads mode, the source pad(= 1) size is identical to
+ * the sink pad size */
+ if (fse->pad == 0) {
+ fse->min_width = XVIP_MIN_WIDTH;
+ fse->max_width = XVIP_MAX_WIDTH;
+ fse->min_height = XVIP_MIN_HEIGHT;
+ fse->max_height = XVIP_MAX_HEIGHT;
+ } else {
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static int xtpg_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ *format = xtpg->default_format;
+
+ if (xtpg->npads == 2) {
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, 1);
+ *format = xtpg->default_format;
+ }
+
+ return 0;
+}
+
+static int xtpg_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xtpg_device *xtpg = container_of(ctrl->handler,
+ struct xtpg_device,
+ ctrl_handler);
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_MASK, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_CROSS_HAIRS:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_CROSS_HAIRS, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_MOVING_BOX:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_MOVING_BOX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_COLOR_MASK:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_COLOR_MASK_MASK,
+ ctrl->val <<
+ XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_STUCK_PIXEL:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_STUCK_PIXEL, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_NOISE:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_NOISE, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_MOTION:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_MOTION, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_MOTION_SPEED:
+ xvip_write(&xtpg->xvip, XTPG_MOTION_SPEED, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_ROW_MASK,
+ ctrl->val << XTPG_CROSS_HAIRS_ROW_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_COLUMN_MASK,
+ ctrl->val << XTPG_CROSS_HAIRS_COLUMN_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_ZPLATE_HOR_START:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_ZPLATE_VER_START:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_BOX_SIZE:
+ xvip_write(&xtpg->xvip, XTPG_BOX_SIZE, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_BOX_COLOR:
+ xvip_write(&xtpg->xvip, XTPG_BOX_COLOR, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH:
+ xvip_write(&xtpg->xvip, XTPG_STUCK_PIXEL_THRESH, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_NOISE_GAIN:
+ xvip_write(&xtpg->xvip, XTPG_NOISE_GAIN, ctrl->val);
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xtpg_ctrl_ops = {
+ .s_ctrl = xtpg_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops xtpg_core_ops = {
+};
+
+static const struct v4l2_subdev_video_ops xtpg_video_ops = {
+ .s_stream = xtpg_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops xtpg_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xtpg_enum_frame_size,
+ .get_fmt = xtpg_get_format,
+ .set_fmt = xtpg_set_format,
+};
+
+static const struct v4l2_subdev_ops xtpg_ops = {
+ .core = &xtpg_core_ops,
+ .video = &xtpg_video_ops,
+ .pad = &xtpg_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xtpg_internal_ops = {
+ .open = xtpg_open,
+ .close = xtpg_close,
+};
+
+/*
+ * Control Config
+ */
+
+static const char *const xtpg_pattern_strings[] = {
+ "Passthrough",
+ "Horizontal Ramp",
+ "Vertical Ramp",
+ "Temporal Ramp",
+ "Solid Red",
+ "Solid Green",
+ "Solid Blue",
+ "Solid Black",
+ "Solid White",
+ "Color Bars",
+ "Zone Plate",
+ "Tartan Color Bars",
+ "Cross Hatch",
+ "None",
+ "Vertical/Horizontal Ramps",
+ "Black/White Checker Board",
+};
+
+static struct v4l2_ctrl_config xtpg_ctrls[] = {
+ {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_CROSS_HAIRS,
+ .name = "Test Pattern: Cross Hairs",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOVING_BOX,
+ .name = "Test Pattern: Moving Box",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_COLOR_MASK,
+ .name = "Test Pattern: Color Mask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = 0xf,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL,
+ .name = "Test Pattern: Stuck Pixel",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_NOISE,
+ .name = "Test Pattern: Noise",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOTION,
+ .name = "Test Pattern: Motion",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOTION_SPEED,
+ .name = "Test Pattern: Motion Speed",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 8) - 1,
+ .step = 1,
+ .def = 4,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW,
+ .name = "Test Pattern: Cross Hairs Row",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0x64,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN,
+ .name = "Test Pattern: Cross Hairs Column",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0x64,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_ZPLATE_HOR_START,
+ .name = "Test Pattern: Zplate Horizontal Start Pos",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 0x1e,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED,
+ .name = "Test Pattern: Zplate Horizontal Speed",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_ZPLATE_VER_START,
+ .name = "Test Pattern: Zplate Vertical Start Pos",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 1,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED,
+ .name = "Test Pattern: Zplate Vertical Speed",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_BOX_SIZE,
+ .name = "Test Pattern: Box Size",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0x32,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_BOX_COLOR,
+ .name = "Test Pattern: Box Color(RGB)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 24) - 1,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH,
+ .name = "Test Pattern: Stuck Pixel threshold",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_NOISE_GAIN,
+ .name = "Test Pattern: Noise Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 8) - 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xtpg_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+static int __maybe_unused xtpg_pm_suspend(struct device *dev)
+{
+ struct xtpg_device *xtpg = dev_get_drvdata(dev);
+
+ xvip_suspend(&xtpg->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xtpg_pm_resume(struct device *dev)
+{
+ struct xtpg_device *xtpg = dev_get_drvdata(dev);
+
+ xvip_resume(&xtpg->xvip);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xtpg_parse_of(struct xtpg_device *xtpg)
+{
+ struct device *dev = xtpg->xvip.dev;
+ struct device_node *node = xtpg->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int nports = 0;
+ bool has_endpoint = false;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ const struct xvip_video_format *format;
+ struct device_node *endpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ format = xvip_of_get_format(port);
+ if (IS_ERR(format)) {
+ dev_err(dev, "invalid format in DT");
+ of_node_put(port);
+ return PTR_ERR(format);
+ }
+
+ /* Get and check the format description */
+ if (!xtpg->vip_format) {
+ xtpg->vip_format = format;
+ } else if (xtpg->vip_format != format) {
+ dev_err(dev, "in/out format mismatch in DT");
+ of_node_put(port);
+ return -EINVAL;
+ }
+
+ if (nports == 0) {
+ endpoint = of_get_next_child(port, NULL);
+ if (endpoint)
+ has_endpoint = true;
+ of_node_put(endpoint);
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ if (nports != 1 && nports != 2) {
+ dev_err(dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+
+ xtpg->npads = nports;
+ if (nports == 2 && has_endpoint)
+ xtpg->has_input = true;
+
+ return 0;
+}
+
+static int xtpg_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xtpg_device *xtpg;
+ u32 i, bayer_phase;
+ int ret;
+
+ xtpg = devm_kzalloc(&pdev->dev, sizeof(*xtpg), GFP_KERNEL);
+ if (!xtpg)
+ return -ENOMEM;
+
+ xtpg->xvip.dev = &pdev->dev;
+
+ ret = xtpg_parse_of(xtpg);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xtpg->xvip);
+ if (ret < 0)
+ return ret;
+
+ xtpg->vtmux_gpio = devm_gpiod_get_optional(&pdev->dev, "timing",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xtpg->vtmux_gpio)) {
+ ret = PTR_ERR(xtpg->vtmux_gpio);
+ goto error_resource;
+ }
+
+ xtpg->vtc = xvtc_of_get(pdev->dev.of_node);
+ if (IS_ERR(xtpg->vtc)) {
+ ret = PTR_ERR(xtpg->vtc);
+ goto error_resource;
+ }
+
+ /* Reset and initialize the core */
+ xvip_reset(&xtpg->xvip);
+
+ /* Initialize V4L2 subdevice and media entity. Pad numbers depend on the
+ * number of pads.
+ */
+ if (xtpg->npads == 2) {
+ xtpg->pads[0].flags = MEDIA_PAD_FL_SINK;
+ xtpg->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ } else {
+ xtpg->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ }
+
+ /* Initialize the default format */
+ xtpg->default_format.code = xtpg->vip_format->code;
+ xtpg->default_format.field = V4L2_FIELD_NONE;
+ xtpg->default_format.colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xtpg->xvip, &xtpg->default_format);
+
+ bayer_phase = xtpg_get_bayer_phase(xtpg->vip_format->code);
+ if (bayer_phase != XTPG_BAYER_PHASE_OFF)
+ xtpg->bayer = true;
+
+ xtpg->formats[0] = xtpg->default_format;
+ if (xtpg->npads == 2)
+ xtpg->formats[1] = xtpg->default_format;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xtpg->xvip.subdev;
+ v4l2_subdev_init(subdev, &xtpg_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xtpg_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xtpg);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.ops = &xtpg_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, xtpg->npads, xtpg->pads);
+ if (ret < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 3 + ARRAY_SIZE(xtpg_ctrls));
+
+ xtpg->vblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
+ V4L2_CID_VBLANK, XTPG_MIN_VBLANK,
+ XTPG_MAX_VBLANK, 1, 100);
+ xtpg->hblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
+ V4L2_CID_HBLANK, XTPG_MIN_HBLANK,
+ XTPG_MAX_HBLANK, 1, 100);
+ xtpg->pattern = v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
+ &xtpg_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(xtpg_pattern_strings) - 1,
+ 1, 9, xtpg_pattern_strings);
+
+ for (i = 0; i < ARRAY_SIZE(xtpg_ctrls); i++)
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler, &xtpg_ctrls[i], NULL);
+
+ if (xtpg->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xtpg->ctrl_handler.error;
+ goto error;
+ }
+ subdev->ctrl_handler = &xtpg->ctrl_handler;
+
+ xtpg_update_pattern_control(xtpg, true, true);
+
+ ret = v4l2_ctrl_handler_setup(&xtpg->ctrl_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, xtpg);
+
+ xvip_print_version(&xtpg->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xtpg->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvtc_put(xtpg->vtc);
+error_resource:
+ xvip_cleanup_resources(&xtpg->xvip);
+ return ret;
+}
+
+static int xtpg_remove(struct platform_device *pdev)
+{
+ struct xtpg_device *xtpg = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xtpg->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xtpg->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xtpg->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xtpg_pm_ops, xtpg_pm_suspend, xtpg_pm_resume);
+
+static const struct of_device_id xtpg_of_id_table[] = {
+ { .compatible = "xlnx,v-tpg-5.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xtpg_of_id_table);
+
+static struct platform_driver xtpg_driver = {
+ .driver = {
+ .name = "xilinx-tpg",
+ .pm = &xtpg_pm_ops,
+ .of_match_table = xtpg_of_id_table,
+ },
+ .probe = xtpg_probe,
+ .remove = xtpg_remove,
+};
+
+module_platform_driver(xtpg_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Test Pattern Generator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
new file mode 100644
index 000000000..311259129
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -0,0 +1,323 @@
+/*
+ * Xilinx Video IP Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/media/xilinx-vip.h>
+
+#include "xilinx-vip.h"
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static const struct xvip_video_format xvip_video_formats[] = {
+ { XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 2, V4L2_PIX_FMT_YUYV, "4:2:2, packed, YUYV" },
+ { XVIP_VF_YUV_444, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
+ 3, V4L2_PIX_FMT_YUV444, "4:4:4, packed, YUYV" },
+ { XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 3, 0, NULL },
+ { XVIP_VF_MONO_SENSOR, 8, "mono", MEDIA_BUS_FMT_Y8_1X8,
+ 1, V4L2_PIX_FMT_GREY, "Greyscale 8-bit" },
+ { XVIP_VF_MONO_SENSOR, 8, "rggb", MEDIA_BUS_FMT_SRGGB8_1X8,
+ 1, V4L2_PIX_FMT_SGRBG8, "Bayer 8-bit RGGB" },
+ { XVIP_VF_MONO_SENSOR, 8, "grbg", MEDIA_BUS_FMT_SGRBG8_1X8,
+ 1, V4L2_PIX_FMT_SGRBG8, "Bayer 8-bit GRBG" },
+ { XVIP_VF_MONO_SENSOR, 8, "gbrg", MEDIA_BUS_FMT_SGBRG8_1X8,
+ 1, V4L2_PIX_FMT_SGBRG8, "Bayer 8-bit GBRG" },
+ { XVIP_VF_MONO_SENSOR, 8, "bggr", MEDIA_BUS_FMT_SBGGR8_1X8,
+ 1, V4L2_PIX_FMT_SBGGR8, "Bayer 8-bit BGGR" },
+};
+
+/**
+ * xvip_get_format_by_code - Retrieve format information for a media bus code
+ * @code: the format media bus code
+ *
+ * Return: a pointer to the format information structure corresponding to the
+ * given V4L2 media bus format @code, or ERR_PTR if no corresponding format can
+ * be found.
+ */
+const struct xvip_video_format *xvip_get_format_by_code(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xvip_video_formats); ++i) {
+ const struct xvip_video_format *format = &xvip_video_formats[i];
+
+ if (format->code == code)
+ return format;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(xvip_get_format_by_code);
+
+/**
+ * xvip_get_format_by_fourcc - Retrieve format information for a 4CC
+ * @fourcc: the format 4CC
+ *
+ * Return: a pointer to the format information structure corresponding to the
+ * given V4L2 format @fourcc, or ERR_PTR if no corresponding format can be
+ * found.
+ */
+const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xvip_video_formats); ++i) {
+ const struct xvip_video_format *format = &xvip_video_formats[i];
+
+ if (format->fourcc == fourcc)
+ return format;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(xvip_get_format_by_fourcc);
+
+/**
+ * xvip_of_get_format - Parse a device tree node and return format information
+ * @node: the device tree node
+ *
+ * Read the xlnx,video-format, xlnx,video-width and xlnx,cfa-pattern properties
+ * from the device tree @node passed as an argument and return the corresponding
+ * format information.
+ *
+ * Return: a pointer to the format information structure corresponding to the
+ * format name and width, or ERR_PTR if no corresponding format can be found.
+ */
+const struct xvip_video_format *xvip_of_get_format(struct device_node *node)
+{
+ const char *pattern = "mono";
+ unsigned int vf_code;
+ unsigned int i;
+ u32 width;
+ int ret;
+
+ ret = of_property_read_u32(node, "xlnx,video-format", &vf_code);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ ret = of_property_read_u32(node, "xlnx,video-width", &width);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (vf_code == XVIP_VF_MONO_SENSOR)
+ of_property_read_string(node, "xlnx,cfa-pattern", &pattern);
+
+ for (i = 0; i < ARRAY_SIZE(xvip_video_formats); ++i) {
+ const struct xvip_video_format *format = &xvip_video_formats[i];
+
+ if (format->vf_code != vf_code || format->width != width)
+ continue;
+
+ if (vf_code == XVIP_VF_MONO_SENSOR &&
+ strcmp(pattern, format->pattern))
+ continue;
+
+ return format;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(xvip_of_get_format);
+
+/**
+ * xvip_set_format_size - Set the media bus frame format size
+ * @format: V4L2 frame format on media bus
+ * @fmt: media bus format
+ *
+ * Set the media bus frame format size. The width / height from the subdevice
+ * format are set to the given media bus format. The new format size is stored
+ * in @format. The width and height are clamped using default min / max values.
+ */
+void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
+ const struct v4l2_subdev_format *fmt)
+{
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XVIP_MIN_WIDTH, XVIP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XVIP_MIN_HEIGHT, XVIP_MAX_HEIGHT);
+}
+EXPORT_SYMBOL_GPL(xvip_set_format_size);
+
+/**
+ * xvip_clr_or_set - Clear or set the register with a bitmask
+ * @xvip: Xilinx Video IP device
+ * @addr: address of register
+ * @mask: bitmask to be set or cleared
+ * @set: boolean flag indicating whether to set or clear
+ *
+ * Clear or set the register at address @addr with a bitmask @mask depending on
+ * the boolean flag @set. When the flag @set is true, the bitmask is set in
+ * the register, otherwise the bitmask is cleared from the register
+ * when the flag @set is false.
+ *
+ * Fox eample, this function can be used to set a control with a boolean value
+ * requested by users. If the caller knows whether to set or clear in the first
+ * place, the caller should call xvip_clr() or xvip_set() directly instead of
+ * using this function.
+ */
+void xvip_clr_or_set(struct xvip_device *xvip, u32 addr, u32 mask, bool set)
+{
+ u32 reg;
+
+ reg = xvip_read(xvip, addr);
+ reg = set ? reg | mask : reg & ~mask;
+ xvip_write(xvip, addr, reg);
+}
+EXPORT_SYMBOL_GPL(xvip_clr_or_set);
+
+/**
+ * xvip_clr_and_set - Clear and set the register with a bitmask
+ * @xvip: Xilinx Video IP device
+ * @addr: address of register
+ * @clr: bitmask to be cleared
+ * @set: bitmask to be set
+ *
+ * Clear a bit(s) of mask @clr in the register at address @addr, then set
+ * a bit(s) of mask @set in the register after.
+ */
+void xvip_clr_and_set(struct xvip_device *xvip, u32 addr, u32 clr, u32 set)
+{
+ u32 reg;
+
+ reg = xvip_read(xvip, addr);
+ reg &= ~clr;
+ reg |= set;
+ xvip_write(xvip, addr, reg);
+}
+EXPORT_SYMBOL_GPL(xvip_clr_and_set);
+
+int xvip_init_resources(struct xvip_device *xvip)
+{
+ struct platform_device *pdev = to_platform_device(xvip->dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xvip->iomem = devm_ioremap_resource(xvip->dev, res);
+ if (IS_ERR(xvip->iomem))
+ return PTR_ERR(xvip->iomem);
+
+ xvip->clk = devm_clk_get(xvip->dev, NULL);
+ if (IS_ERR(xvip->clk))
+ return PTR_ERR(xvip->clk);
+
+ clk_prepare_enable(xvip->clk);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvip_init_resources);
+
+void xvip_cleanup_resources(struct xvip_device *xvip)
+{
+ clk_disable_unprepare(xvip->clk);
+}
+EXPORT_SYMBOL_GPL(xvip_cleanup_resources);
+
+/* -----------------------------------------------------------------------------
+ * Subdev operations handlers
+ */
+
+/**
+ * xvip_enum_mbus_code - Enumerate the media format code
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: returning media bus code
+ *
+ * Enumerate the media bus code of the subdevice. Return the corresponding
+ * pad format code. This function only works for subdevices with fixed format
+ * on all pads. Subdevices with multiple format should have their own
+ * function to enumerate mbus codes.
+ *
+ * Return: 0 if the media bus code is found, or -EINVAL if the format index
+ * is not valid.
+ */
+int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ /* Enumerating frame sizes based on the active configuration isn't
+ * supported yet.
+ */
+ if (code->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, code->pad);
+
+ code->code = format->code;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvip_enum_mbus_code);
+
+/**
+ * xvip_enum_frame_size - Enumerate the media bus frame size
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: returning media bus frame size
+ *
+ * This function is a drop-in implementation of the subdev enum_frame_size pad
+ * operation. It assumes that the subdevice has one sink pad and one source
+ * pad, and that the format on the source pad is always identical to the
+ * format on the sink pad. Entities with different requirements need to
+ * implement their own enum_frame_size handlers.
+ *
+ * Return: 0 if the media bus frame size is found, or -EINVAL
+ * if the index or the code is not valid.
+ */
+int xvip_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ /* Enumerating frame sizes based on the active configuration isn't
+ * supported yet.
+ */
+ if (fse->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == XVIP_PAD_SINK) {
+ fse->min_width = XVIP_MIN_WIDTH;
+ fse->max_width = XVIP_MAX_WIDTH;
+ fse->min_height = XVIP_MIN_HEIGHT;
+ fse->max_height = XVIP_MAX_HEIGHT;
+ } else {
+ /* The size on the source pad is fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvip_enum_frame_size);
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
new file mode 100644
index 000000000..42fee2026
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -0,0 +1,238 @@
+/*
+ * Xilinx Video IP Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_VIP_H__
+#define __XILINX_VIP_H__
+
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+
+struct clk;
+
+/*
+ * Minimum and maximum width and height common to most video IP cores. IP
+ * cores with different requirements must define their own values.
+ */
+#define XVIP_MIN_WIDTH 32
+#define XVIP_MAX_WIDTH 7680
+#define XVIP_MIN_HEIGHT 32
+#define XVIP_MAX_HEIGHT 7680
+
+/*
+ * Pad IDs. IP cores with with multiple inputs or outputs should define
+ * their own values.
+ */
+#define XVIP_PAD_SINK 0
+#define XVIP_PAD_SOURCE 1
+
+/* Xilinx Video IP Control Registers */
+#define XVIP_CTRL_CONTROL 0x0000
+#define XVIP_CTRL_CONTROL_SW_ENABLE (1 << 0)
+#define XVIP_CTRL_CONTROL_REG_UPDATE (1 << 1)
+#define XVIP_CTRL_CONTROL_BYPASS (1 << 4)
+#define XVIP_CTRL_CONTROL_TEST_PATTERN (1 << 5)
+#define XVIP_CTRL_CONTROL_FRAME_SYNC_RESET (1 << 30)
+#define XVIP_CTRL_CONTROL_SW_RESET (1 << 31)
+#define XVIP_CTRL_STATUS 0x0004
+#define XVIP_CTRL_STATUS_PROC_STARTED (1 << 0)
+#define XVIP_CTRL_STATUS_EOF (1 << 1)
+#define XVIP_CTRL_ERROR 0x0008
+#define XVIP_CTRL_ERROR_SLAVE_EOL_EARLY (1 << 0)
+#define XVIP_CTRL_ERROR_SLAVE_EOL_LATE (1 << 1)
+#define XVIP_CTRL_ERROR_SLAVE_SOF_EARLY (1 << 2)
+#define XVIP_CTRL_ERROR_SLAVE_SOF_LATE (1 << 3)
+#define XVIP_CTRL_IRQ_ENABLE 0x000c
+#define XVIP_CTRL_IRQ_ENABLE_PROC_STARTED (1 << 0)
+#define XVIP_CTRL_IRQ_EOF (1 << 1)
+#define XVIP_CTRL_VERSION 0x0010
+#define XVIP_CTRL_VERSION_MAJOR_MASK (0xff << 24)
+#define XVIP_CTRL_VERSION_MAJOR_SHIFT 24
+#define XVIP_CTRL_VERSION_MINOR_MASK (0xff << 16)
+#define XVIP_CTRL_VERSION_MINOR_SHIFT 16
+#define XVIP_CTRL_VERSION_REVISION_MASK (0xf << 12)
+#define XVIP_CTRL_VERSION_REVISION_SHIFT 12
+#define XVIP_CTRL_VERSION_PATCH_MASK (0xf << 8)
+#define XVIP_CTRL_VERSION_PATCH_SHIFT 8
+#define XVIP_CTRL_VERSION_INTERNAL_MASK (0xff << 0)
+#define XVIP_CTRL_VERSION_INTERNAL_SHIFT 0
+
+/* Xilinx Video IP Timing Registers */
+#define XVIP_ACTIVE_SIZE 0x0020
+#define XVIP_ACTIVE_VSIZE_MASK (0x7ff << 16)
+#define XVIP_ACTIVE_VSIZE_SHIFT 16
+#define XVIP_ACTIVE_HSIZE_MASK (0x7ff << 0)
+#define XVIP_ACTIVE_HSIZE_SHIFT 0
+#define XVIP_ENCODING 0x0028
+#define XVIP_ENCODING_NBITS_8 (0 << 4)
+#define XVIP_ENCODING_NBITS_10 (1 << 4)
+#define XVIP_ENCODING_NBITS_12 (2 << 4)
+#define XVIP_ENCODING_NBITS_16 (3 << 4)
+#define XVIP_ENCODING_NBITS_MASK (3 << 4)
+#define XVIP_ENCODING_NBITS_SHIFT 4
+#define XVIP_ENCODING_VIDEO_FORMAT_YUV422 (0 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_YUV444 (1 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_RGB (2 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_YUV420 (3 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_MASK (3 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_SHIFT 0
+
+/**
+ * struct xvip_device - Xilinx Video IP device structure
+ * @subdev: V4L2 subdevice
+ * @dev: (OF) device
+ * @iomem: device I/O register space remapped to kernel virtual memory
+ * @clk: video core clock
+ * @saved_ctrl: saved control register for resume / suspend
+ */
+struct xvip_device {
+ struct v4l2_subdev subdev;
+ struct device *dev;
+ void __iomem *iomem;
+ struct clk *clk;
+ u32 saved_ctrl;
+};
+
+/**
+ * struct xvip_video_format - Xilinx Video IP video format description
+ * @vf_code: AXI4 video format code
+ * @width: AXI4 format width in bits per component
+ * @pattern: CFA pattern for Mono/Sensor formats
+ * @code: media bus format code
+ * @bpp: bytes per pixel (when stored in memory)
+ * @fourcc: V4L2 pixel format FCC identifier
+ * @description: format description, suitable for userspace
+ */
+struct xvip_video_format {
+ unsigned int vf_code;
+ unsigned int width;
+ const char *pattern;
+ unsigned int code;
+ unsigned int bpp;
+ u32 fourcc;
+ const char *description;
+};
+
+const struct xvip_video_format *xvip_get_format_by_code(unsigned int code);
+const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc);
+const struct xvip_video_format *xvip_of_get_format(struct device_node *node);
+void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
+ const struct v4l2_subdev_format *fmt);
+int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code);
+int xvip_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse);
+
+static inline u32 xvip_read(struct xvip_device *xvip, u32 addr)
+{
+ return ioread32(xvip->iomem + addr);
+}
+
+static inline void xvip_write(struct xvip_device *xvip, u32 addr, u32 value)
+{
+ iowrite32(value, xvip->iomem + addr);
+}
+
+static inline void xvip_clr(struct xvip_device *xvip, u32 addr, u32 clr)
+{
+ xvip_write(xvip, addr, xvip_read(xvip, addr) & ~clr);
+}
+
+static inline void xvip_set(struct xvip_device *xvip, u32 addr, u32 set)
+{
+ xvip_write(xvip, addr, xvip_read(xvip, addr) | set);
+}
+
+void xvip_clr_or_set(struct xvip_device *xvip, u32 addr, u32 mask, bool set);
+void xvip_clr_and_set(struct xvip_device *xvip, u32 addr, u32 clr, u32 set);
+
+int xvip_init_resources(struct xvip_device *xvip);
+void xvip_cleanup_resources(struct xvip_device *xvip);
+
+static inline void xvip_reset(struct xvip_device *xvip)
+{
+ xvip_write(xvip, XVIP_CTRL_CONTROL, XVIP_CTRL_CONTROL_SW_RESET);
+}
+
+static inline void xvip_start(struct xvip_device *xvip)
+{
+ xvip_set(xvip, XVIP_CTRL_CONTROL,
+ XVIP_CTRL_CONTROL_SW_ENABLE | XVIP_CTRL_CONTROL_REG_UPDATE);
+}
+
+static inline void xvip_stop(struct xvip_device *xvip)
+{
+ xvip_clr(xvip, XVIP_CTRL_CONTROL, XVIP_CTRL_CONTROL_SW_ENABLE);
+}
+
+static inline void xvip_resume(struct xvip_device *xvip)
+{
+ xvip_write(xvip, XVIP_CTRL_CONTROL,
+ xvip->saved_ctrl | XVIP_CTRL_CONTROL_SW_ENABLE);
+}
+
+static inline void xvip_suspend(struct xvip_device *xvip)
+{
+ xvip->saved_ctrl = xvip_read(xvip, XVIP_CTRL_CONTROL);
+ xvip_write(xvip, XVIP_CTRL_CONTROL,
+ xvip->saved_ctrl & ~XVIP_CTRL_CONTROL_SW_ENABLE);
+}
+
+static inline void xvip_set_frame_size(struct xvip_device *xvip,
+ const struct v4l2_mbus_framefmt *format)
+{
+ xvip_write(xvip, XVIP_ACTIVE_SIZE,
+ (format->height << XVIP_ACTIVE_VSIZE_SHIFT) |
+ (format->width << XVIP_ACTIVE_HSIZE_SHIFT));
+}
+
+static inline void xvip_get_frame_size(struct xvip_device *xvip,
+ struct v4l2_mbus_framefmt *format)
+{
+ u32 reg;
+
+ reg = xvip_read(xvip, XVIP_ACTIVE_SIZE);
+ format->width = (reg & XVIP_ACTIVE_HSIZE_MASK) >>
+ XVIP_ACTIVE_HSIZE_SHIFT;
+ format->height = (reg & XVIP_ACTIVE_VSIZE_MASK) >>
+ XVIP_ACTIVE_VSIZE_SHIFT;
+}
+
+static inline void xvip_enable_reg_update(struct xvip_device *xvip)
+{
+ xvip_set(xvip, XVIP_CTRL_CONTROL, XVIP_CTRL_CONTROL_REG_UPDATE);
+}
+
+static inline void xvip_disable_reg_update(struct xvip_device *xvip)
+{
+ xvip_clr(xvip, XVIP_CTRL_CONTROL, XVIP_CTRL_CONTROL_REG_UPDATE);
+}
+
+static inline void xvip_print_version(struct xvip_device *xvip)
+{
+ u32 version;
+
+ version = xvip_read(xvip, XVIP_CTRL_VERSION);
+
+ dev_info(xvip->dev, "device found, version %u.%02x%x\n",
+ ((version & XVIP_CTRL_VERSION_MAJOR_MASK) >>
+ XVIP_CTRL_VERSION_MAJOR_SHIFT),
+ ((version & XVIP_CTRL_VERSION_MINOR_MASK) >>
+ XVIP_CTRL_VERSION_MINOR_SHIFT),
+ ((version & XVIP_CTRL_VERSION_REVISION_MASK) >>
+ XVIP_CTRL_VERSION_REVISION_SHIFT));
+}
+
+#endif /* __XILINX_VIP_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
new file mode 100644
index 000000000..6d95ec1e9
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -0,0 +1,664 @@
+/*
+ * Xilinx Video IP Composite Device
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#include "xilinx-dma.h"
+#include "xilinx-vipp.h"
+
+#define XVIPP_DMA_S2MM 0
+#define XVIPP_DMA_MM2S 1
+
+/**
+ * struct xvip_graph_entity - Entity in the video graph
+ * @list: list entry in a graph entities list
+ * @node: the entity's DT node
+ * @entity: media entity, from the corresponding V4L2 subdev
+ * @asd: subdev asynchronous registration information
+ * @subdev: V4L2 subdev
+ */
+struct xvip_graph_entity {
+ struct list_head list;
+ struct device_node *node;
+ struct media_entity *entity;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+};
+
+/* -----------------------------------------------------------------------------
+ * Graph Management
+ */
+
+static struct xvip_graph_entity *
+xvip_graph_find_entity(struct xvip_composite_device *xdev,
+ const struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node == node)
+ return entity;
+ }
+
+ return NULL;
+}
+
+static int xvip_graph_build_one(struct xvip_composite_device *xdev,
+ struct xvip_graph_entity *entity)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct media_entity *local = entity->entity;
+ struct media_entity *remote;
+ struct media_pad *local_pad;
+ struct media_pad *remote_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_fwnode_link link;
+ struct device_node *ep = NULL;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for entity %s\n", local->name);
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ ep = of_graph_get_next_endpoint(entity->node, ep);
+ if (ep == NULL)
+ break;
+
+ dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
+
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %pOF\n",
+ ep);
+ continue;
+ }
+
+ /* Skip sink ports, they will be processed from the other end of
+ * the link.
+ */
+ if (link.local_port >= local->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u for %pOF\n",
+ link.local_port,
+ to_of_node(link.local_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ local_pad = &local->pads[link.local_port];
+
+ if (local_pad->flags & MEDIA_PAD_FL_SINK) {
+ dev_dbg(xdev->dev, "skipping sink port %pOF:%u\n",
+ to_of_node(link.local_node),
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* Skip DMA engines, they will be processed separately. */
+ if (link.remote_node == of_fwnode_handle(xdev->dev->of_node)) {
+ dev_dbg(xdev->dev, "skipping DMA port %pOF:%u\n",
+ to_of_node(link.local_node),
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
+ if (ent == NULL) {
+ dev_err(xdev->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+
+ remote = ent->entity;
+
+ if (link.remote_port >= remote->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %pOF\n",
+ link.remote_port, to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ remote_pad = &remote->pads[link.remote_port];
+
+ v4l2_fwnode_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+
+ ret = media_create_pad_link(local, local_pad->index,
+ remote, remote_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+ break;
+ }
+ }
+
+ of_node_put(ep);
+ return ret;
+}
+
+static struct xvip_dma *
+xvip_graph_find_dma(struct xvip_composite_device *xdev, unsigned int port)
+{
+ struct xvip_dma *dma;
+
+ list_for_each_entry(dma, &xdev->dmas, list) {
+ if (dma->port == port)
+ return dma;
+ }
+
+ return NULL;
+}
+
+static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct device_node *node = xdev->dev->of_node;
+ struct media_entity *source;
+ struct media_entity *sink;
+ struct media_pad *source_pad;
+ struct media_pad *sink_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_fwnode_link link;
+ struct device_node *ep = NULL;
+ struct xvip_dma *dma;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for DMA engines\n");
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (ep == NULL)
+ break;
+
+ dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
+
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %pOF\n",
+ ep);
+ continue;
+ }
+
+ /* Find the DMA engine. */
+ dma = xvip_graph_find_dma(xdev, link.local_port);
+ if (dma == NULL) {
+ dev_err(xdev->dev, "no DMA engine found for port %u\n",
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev_dbg(xdev->dev, "creating link for DMA engine %s\n",
+ dma->video.name);
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
+ if (ent == NULL) {
+ dev_err(xdev->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+
+ if (link.remote_port >= ent->entity->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %pOF\n",
+ link.remote_port,
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (dma->pad.flags & MEDIA_PAD_FL_SOURCE) {
+ source = &dma->video.entity;
+ source_pad = &dma->pad;
+ sink = ent->entity;
+ sink_pad = &sink->pads[link.remote_port];
+ } else {
+ source = ent->entity;
+ source_pad = &source->pads[link.remote_port];
+ sink = &dma->video.entity;
+ sink_pad = &dma->pad;
+ }
+
+ v4l2_fwnode_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+
+ ret = media_create_pad_link(source, source_pad->index,
+ sink, sink_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+ break;
+ }
+ }
+
+ of_node_put(ep);
+ return ret;
+}
+
+static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct xvip_composite_device *xdev =
+ container_of(notifier, struct xvip_composite_device, notifier);
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ dev_dbg(xdev->dev, "notify complete, all subdevs registered\n");
+
+ /* Create links for every entity. */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_build_one(xdev, entity);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Create links for DMA channels. */
+ ret = xvip_graph_build_dma(xdev);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&xdev->v4l2_dev);
+ if (ret < 0)
+ dev_err(xdev->dev, "failed to register subdev nodes\n");
+
+ return media_device_register(&xdev->media_dev);
+}
+
+static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct xvip_composite_device *xdev =
+ container_of(notifier, struct xvip_composite_device, notifier);
+ struct xvip_graph_entity *entity;
+
+ /* Locate the entity corresponding to the bound subdev and store the
+ * subdev pointer.
+ */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node != subdev->dev->of_node)
+ continue;
+
+ if (entity->subdev) {
+ dev_err(xdev->dev, "duplicate subdev for node %pOF\n",
+ entity->node);
+ return -EINVAL;
+ }
+
+ dev_dbg(xdev->dev, "subdev %s bound\n", subdev->name);
+ entity->entity = &subdev->entity;
+ entity->subdev = subdev;
+ return 0;
+ }
+
+ dev_err(xdev->dev, "no entity for subdev %s\n", subdev->name);
+ return -EINVAL;
+}
+
+static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
+ .bound = xvip_graph_notify_bound,
+ .complete = xvip_graph_notify_complete,
+};
+
+static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
+ struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+ struct device_node *remote;
+ struct device_node *ep = NULL;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "parsing node %pOF\n", node);
+
+ while (1) {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (ep == NULL)
+ break;
+
+ dev_dbg(xdev->dev, "handling endpoint %pOF\n", ep);
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (remote == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Skip entities that we have already processed. */
+ if (remote == xdev->dev->of_node ||
+ xvip_graph_find_entity(xdev, remote)) {
+ of_node_put(remote);
+ continue;
+ }
+
+ entity = devm_kzalloc(xdev->dev, sizeof(*entity), GFP_KERNEL);
+ if (entity == NULL) {
+ of_node_put(remote);
+ ret = -ENOMEM;
+ break;
+ }
+
+ entity->node = remote;
+ entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ entity->asd.match.fwnode = of_fwnode_handle(remote);
+ list_add_tail(&entity->list, &xdev->entities);
+ xdev->num_subdevs++;
+ }
+
+ of_node_put(ep);
+ return ret;
+}
+
+static int xvip_graph_parse(struct xvip_composite_device *xdev)
+{
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ /*
+ * Walk the links to parse the full graph. Start by parsing the
+ * composite node and then parse entities in turn. The list_for_each
+ * loop will handle entities added at the end of the list while walking
+ * the links.
+ */
+ ret = xvip_graph_parse_one(xdev, xdev->dev->of_node);
+ if (ret < 0)
+ return 0;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_parse_one(xdev, entity->node);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev,
+ struct device_node *node)
+{
+ struct xvip_dma *dma;
+ enum v4l2_buf_type type;
+ const char *direction;
+ unsigned int index;
+ int ret;
+
+ ret = of_property_read_string(node, "direction", &direction);
+ if (ret < 0)
+ return ret;
+
+ if (strcmp(direction, "input") == 0)
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (strcmp(direction, "output") == 0)
+ type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ else
+ return -EINVAL;
+
+ of_property_read_u32(node, "reg", &index);
+
+ dma = devm_kzalloc(xdev->dev, sizeof(*dma), GFP_KERNEL);
+ if (dma == NULL)
+ return -ENOMEM;
+
+ ret = xvip_dma_init(xdev, dma, type, index);
+ if (ret < 0) {
+ dev_err(xdev->dev, "%pOF initialization failed\n", node);
+ return ret;
+ }
+
+ list_add_tail(&dma->list, &xdev->dmas);
+
+ xdev->v4l2_caps |= type == V4L2_BUF_TYPE_VIDEO_CAPTURE
+ ? V4L2_CAP_VIDEO_CAPTURE : V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
+{
+ struct device_node *ports;
+ struct device_node *port;
+ int ret;
+
+ ports = of_get_child_by_name(xdev->dev->of_node, "ports");
+ if (ports == NULL) {
+ dev_err(xdev->dev, "ports node not present\n");
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(ports, port) {
+ ret = xvip_graph_dma_init_one(xdev, port);
+ if (ret < 0) {
+ of_node_put(port);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void xvip_graph_cleanup(struct xvip_composite_device *xdev)
+{
+ struct xvip_graph_entity *entityp;
+ struct xvip_graph_entity *entity;
+ struct xvip_dma *dmap;
+ struct xvip_dma *dma;
+
+ v4l2_async_notifier_unregister(&xdev->notifier);
+
+ list_for_each_entry_safe(entity, entityp, &xdev->entities, list) {
+ of_node_put(entity->node);
+ list_del(&entity->list);
+ }
+
+ list_for_each_entry_safe(dma, dmap, &xdev->dmas, list) {
+ xvip_dma_cleanup(dma);
+ list_del(&dma->list);
+ }
+}
+
+static int xvip_graph_init(struct xvip_composite_device *xdev)
+{
+ struct xvip_graph_entity *entity;
+ struct v4l2_async_subdev **subdevs = NULL;
+ unsigned int num_subdevs;
+ unsigned int i;
+ int ret;
+
+ /* Init the DMA channels. */
+ ret = xvip_graph_dma_init(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "DMA initialization failed\n");
+ goto done;
+ }
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = xvip_graph_parse(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "graph parsing failed\n");
+ goto done;
+ }
+
+ if (!xdev->num_subdevs) {
+ dev_err(xdev->dev, "no subdev found in graph\n");
+ goto done;
+ }
+
+ /* Register the subdevices notifier. */
+ num_subdevs = xdev->num_subdevs;
+ subdevs = devm_kcalloc(xdev->dev, num_subdevs, sizeof(*subdevs),
+ GFP_KERNEL);
+ if (subdevs == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ i = 0;
+ list_for_each_entry(entity, &xdev->entities, list)
+ subdevs[i++] = &entity->asd;
+
+ xdev->notifier.subdevs = subdevs;
+ xdev->notifier.num_subdevs = num_subdevs;
+ xdev->notifier.ops = &xvip_graph_notify_ops;
+
+ ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
+ if (ret < 0) {
+ dev_err(xdev->dev, "notifier registration failed\n");
+ goto done;
+ }
+
+ ret = 0;
+
+done:
+ if (ret < 0)
+ xvip_graph_cleanup(xdev);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Controller and V4L2
+ */
+
+static void xvip_composite_v4l2_cleanup(struct xvip_composite_device *xdev)
+{
+ v4l2_device_unregister(&xdev->v4l2_dev);
+ media_device_unregister(&xdev->media_dev);
+ media_device_cleanup(&xdev->media_dev);
+}
+
+static int xvip_composite_v4l2_init(struct xvip_composite_device *xdev)
+{
+ int ret;
+
+ xdev->media_dev.dev = xdev->dev;
+ strlcpy(xdev->media_dev.model, "Xilinx Video Composite Device",
+ sizeof(xdev->media_dev.model));
+ xdev->media_dev.hw_revision = 0;
+
+ media_device_init(&xdev->media_dev);
+
+ xdev->v4l2_dev.mdev = &xdev->media_dev;
+ ret = v4l2_device_register(xdev->dev, &xdev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "V4L2 device registration failed (%d)\n",
+ ret);
+ media_device_cleanup(&xdev->media_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xvip_composite_probe(struct platform_device *pdev)
+{
+ struct xvip_composite_device *xdev;
+ int ret;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&xdev->entities);
+ INIT_LIST_HEAD(&xdev->dmas);
+
+ ret = xvip_composite_v4l2_init(xdev);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_graph_init(xdev);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xdev);
+
+ dev_info(xdev->dev, "device registered\n");
+
+ return 0;
+
+error:
+ xvip_composite_v4l2_cleanup(xdev);
+ return ret;
+}
+
+static int xvip_composite_remove(struct platform_device *pdev)
+{
+ struct xvip_composite_device *xdev = platform_get_drvdata(pdev);
+
+ xvip_graph_cleanup(xdev);
+ xvip_composite_v4l2_cleanup(xdev);
+
+ return 0;
+}
+
+static const struct of_device_id xvip_composite_of_id_table[] = {
+ { .compatible = "xlnx,video" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvip_composite_of_id_table);
+
+static struct platform_driver xvip_composite_driver = {
+ .driver = {
+ .name = "xilinx-video",
+ .of_match_table = xvip_composite_of_id_table,
+ },
+ .probe = xvip_composite_probe,
+ .remove = xvip_composite_remove,
+};
+
+module_platform_driver(xvip_composite_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video IP Composite Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
new file mode 100644
index 000000000..faf6b6e80
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -0,0 +1,49 @@
+/*
+ * Xilinx Video IP Composite Device
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_VIPP_H__
+#define __XILINX_VIPP_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+/**
+ * struct xvip_composite_device - Xilinx Video IP device structure
+ * @v4l2_dev: V4L2 device
+ * @media_dev: media device
+ * @dev: (OF) device
+ * @notifier: V4L2 asynchronous subdevs notifier
+ * @entities: entities in the graph as a list of xvip_graph_entity
+ * @num_subdevs: number of subdevs in the pipeline
+ * @dmas: list of DMA channels at the pipeline output and input
+ * @v4l2_caps: V4L2 capabilities of the whole device (see VIDIOC_QUERYCAP)
+ */
+struct xvip_composite_device {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct device *dev;
+
+ struct v4l2_async_notifier notifier;
+ struct list_head entities;
+ unsigned int num_subdevs;
+
+ struct list_head dmas;
+ u32 v4l2_caps;
+};
+
+#endif /* __XILINX_VIPP_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.c b/drivers/media/platform/xilinx/xilinx-vtc.c
new file mode 100644
index 000000000..01c750edc
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vtc.c
@@ -0,0 +1,380 @@
+/*
+ * Xilinx Video Timing Controller
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "xilinx-vip.h"
+#include "xilinx-vtc.h"
+
+#define XVTC_CONTROL_FIELD_ID_POL_SRC (1 << 26)
+#define XVTC_CONTROL_ACTIVE_CHROMA_POL_SRC (1 << 25)
+#define XVTC_CONTROL_ACTIVE_VIDEO_POL_SRC (1 << 24)
+#define XVTC_CONTROL_HSYNC_POL_SRC (1 << 23)
+#define XVTC_CONTROL_VSYNC_POL_SRC (1 << 22)
+#define XVTC_CONTROL_HBLANK_POL_SRC (1 << 21)
+#define XVTC_CONTROL_VBLANK_POL_SRC (1 << 20)
+#define XVTC_CONTROL_CHROMA_SRC (1 << 18)
+#define XVTC_CONTROL_VBLANK_HOFF_SRC (1 << 17)
+#define XVTC_CONTROL_VSYNC_END_SRC (1 << 16)
+#define XVTC_CONTROL_VSYNC_START_SRC (1 << 15)
+#define XVTC_CONTROL_ACTIVE_VSIZE_SRC (1 << 14)
+#define XVTC_CONTROL_FRAME_VSIZE_SRC (1 << 13)
+#define XVTC_CONTROL_HSYNC_END_SRC (1 << 11)
+#define XVTC_CONTROL_HSYNC_START_SRC (1 << 10)
+#define XVTC_CONTROL_ACTIVE_HSIZE_SRC (1 << 9)
+#define XVTC_CONTROL_FRAME_HSIZE_SRC (1 << 8)
+#define XVTC_CONTROL_SYNC_ENABLE (1 << 5)
+#define XVTC_CONTROL_DET_ENABLE (1 << 3)
+#define XVTC_CONTROL_GEN_ENABLE (1 << 2)
+
+#define XVTC_STATUS_FSYNC(n) ((n) << 16)
+#define XVTC_STATUS_GEN_ACTIVE_VIDEO (1 << 13)
+#define XVTC_STATUS_GEN_VBLANK (1 << 12)
+#define XVTC_STATUS_DET_ACTIVE_VIDEO (1 << 11)
+#define XVTC_STATUS_DET_VBLANK (1 << 10)
+#define XVTC_STATUS_LOCK_LOSS (1 << 9)
+#define XVTC_STATUS_LOCK (1 << 8)
+
+#define XVTC_ERROR_ACTIVE_CHROMA_LOCK (1 << 21)
+#define XVTC_ERROR_ACTIVE_VIDEO_LOCK (1 << 20)
+#define XVTC_ERROR_HSYNC_LOCK (1 << 19)
+#define XVTC_ERROR_VSYNC_LOCK (1 << 18)
+#define XVTC_ERROR_HBLANK_LOCK (1 << 17)
+#define XVTC_ERROR_VBLANK_LOCK (1 << 16)
+
+#define XVTC_IRQ_ENABLE_FSYNC(n) ((n) << 16)
+#define XVTC_IRQ_ENABLE_GEN_ACTIVE_VIDEO (1 << 13)
+#define XVTC_IRQ_ENABLE_GEN_VBLANK (1 << 12)
+#define XVTC_IRQ_ENABLE_DET_ACTIVE_VIDEO (1 << 11)
+#define XVTC_IRQ_ENABLE_DET_VBLANK (1 << 10)
+#define XVTC_IRQ_ENABLE_LOCK_LOSS (1 << 9)
+#define XVTC_IRQ_ENABLE_LOCK (1 << 8)
+
+/*
+ * The following registers exist in two blocks, one at 0x0020 for the detector
+ * and one at 0x0060 for the generator.
+ */
+
+#define XVTC_DETECTOR_OFFSET 0x0020
+#define XVTC_GENERATOR_OFFSET 0x0060
+
+#define XVTC_ACTIVE_SIZE 0x0000
+#define XVTC_ACTIVE_VSIZE_SHIFT 16
+#define XVTC_ACTIVE_VSIZE_MASK (0x1fff << 16)
+#define XVTC_ACTIVE_HSIZE_SHIFT 0
+#define XVTC_ACTIVE_HSIZE_MASK (0x1fff << 0)
+
+#define XVTC_TIMING_STATUS 0x0004
+#define XVTC_TIMING_STATUS_ACTIVE_VIDEO (1 << 2)
+#define XVTC_TIMING_STATUS_VBLANK (1 << 1)
+#define XVTC_TIMING_STATUS_LOCKED (1 << 0)
+
+#define XVTC_ENCODING 0x0008
+#define XVTC_ENCODING_CHROMA_PARITY_SHIFT 8
+#define XVTC_ENCODING_CHROMA_PARITY_MASK (3 << 8)
+#define XVTC_ENCODING_CHROMA_PARITY_EVEN_ALL (0 << 8)
+#define XVTC_ENCODING_CHROMA_PARITY_ODD_ALL (1 << 8)
+#define XVTC_ENCODING_CHROMA_PARITY_EVEN_EVEN (2 << 8)
+#define XVTC_ENCODING_CHROMA_PARITY_ODD_EVEN (3 << 8)
+#define XVTC_ENCODING_VIDEO_FORMAT_SHIFT 0
+#define XVTC_ENCODING_VIDEO_FORMAT_MASK (0xf << 0)
+#define XVTC_ENCODING_VIDEO_FORMAT_YUV422 (0 << 0)
+#define XVTC_ENCODING_VIDEO_FORMAT_YUV444 (1 << 0)
+#define XVTC_ENCODING_VIDEO_FORMAT_RGB (2 << 0)
+#define XVTC_ENCODING_VIDEO_FORMAT_YUV420 (3 << 0)
+
+#define XVTC_POLARITY 0x000c
+#define XVTC_POLARITY_ACTIVE_CHROMA_POL (1 << 5)
+#define XVTC_POLARITY_ACTIVE_VIDEO_POL (1 << 4)
+#define XVTC_POLARITY_HSYNC_POL (1 << 3)
+#define XVTC_POLARITY_VSYNC_POL (1 << 2)
+#define XVTC_POLARITY_HBLANK_POL (1 << 1)
+#define XVTC_POLARITY_VBLANK_POL (1 << 0)
+
+#define XVTC_HSIZE 0x0010
+#define XVTC_HSIZE_MASK (0x1fff << 0)
+
+#define XVTC_VSIZE 0x0014
+#define XVTC_VSIZE_MASK (0x1fff << 0)
+
+#define XVTC_HSYNC 0x0018
+#define XVTC_HSYNC_END_SHIFT 16
+#define XVTC_HSYNC_END_MASK (0x1fff << 16)
+#define XVTC_HSYNC_START_SHIFT 0
+#define XVTC_HSYNC_START_MASK (0x1fff << 0)
+
+#define XVTC_F0_VBLANK_H 0x001c
+#define XVTC_F0_VBLANK_HEND_SHIFT 16
+#define XVTC_F0_VBLANK_HEND_MASK (0x1fff << 16)
+#define XVTC_F0_VBLANK_HSTART_SHIFT 0
+#define XVTC_F0_VBLANK_HSTART_MASK (0x1fff << 0)
+
+#define XVTC_F0_VSYNC_V 0x0020
+#define XVTC_F0_VSYNC_VEND_SHIFT 16
+#define XVTC_F0_VSYNC_VEND_MASK (0x1fff << 16)
+#define XVTC_F0_VSYNC_VSTART_SHIFT 0
+#define XVTC_F0_VSYNC_VSTART_MASK (0x1fff << 0)
+
+#define XVTC_F0_VSYNC_H 0x0024
+#define XVTC_F0_VSYNC_HEND_SHIFT 16
+#define XVTC_F0_VSYNC_HEND_MASK (0x1fff << 16)
+#define XVTC_F0_VSYNC_HSTART_SHIFT 0
+#define XVTC_F0_VSYNC_HSTART_MASK (0x1fff << 0)
+
+#define XVTC_FRAME_SYNC_CONFIG(n) (0x0100 + 4 * (n))
+#define XVTC_FRAME_SYNC_V_START_SHIFT 16
+#define XVTC_FRAME_SYNC_V_START_MASK (0x1fff << 16)
+#define XVTC_FRAME_SYNC_H_START_SHIFT 0
+#define XVTC_FRAME_SYNC_H_START_MASK (0x1fff << 0)
+
+#define XVTC_GENERATOR_GLOBAL_DELAY 0x0104
+
+/**
+ * struct xvtc_device - Xilinx Video Timing Controller device structure
+ * @xvip: Xilinx Video IP device
+ * @list: entry in the global VTC list
+ * @has_detector: the VTC has a timing detector
+ * @has_generator: the VTC has a timing generator
+ * @config: generator timings configuration
+ */
+struct xvtc_device {
+ struct xvip_device xvip;
+ struct list_head list;
+
+ bool has_detector;
+ bool has_generator;
+
+ struct xvtc_config config;
+};
+
+static LIST_HEAD(xvtc_list);
+static DEFINE_MUTEX(xvtc_lock);
+
+static inline void xvtc_gen_write(struct xvtc_device *xvtc, u32 addr, u32 value)
+{
+ xvip_write(&xvtc->xvip, XVTC_GENERATOR_OFFSET + addr, value);
+}
+
+/* -----------------------------------------------------------------------------
+ * Generator Operations
+ */
+
+int xvtc_generator_start(struct xvtc_device *xvtc,
+ const struct xvtc_config *config)
+{
+ int ret;
+
+ if (!xvtc->has_generator)
+ return -ENXIO;
+
+ ret = clk_prepare_enable(xvtc->xvip.clk);
+ if (ret < 0)
+ return ret;
+
+ /* We don't care about the chroma active signal, encoding parameters are
+ * not important for now.
+ */
+ xvtc_gen_write(xvtc, XVTC_POLARITY,
+ XVTC_POLARITY_ACTIVE_CHROMA_POL |
+ XVTC_POLARITY_ACTIVE_VIDEO_POL |
+ XVTC_POLARITY_HSYNC_POL | XVTC_POLARITY_VSYNC_POL |
+ XVTC_POLARITY_HBLANK_POL | XVTC_POLARITY_VBLANK_POL);
+
+ /* Hardcode the polarity to active high, as required by the video in to
+ * AXI4-stream core.
+ */
+ xvtc_gen_write(xvtc, XVTC_ENCODING, 0);
+
+ /* Configure the timings. The VBLANK and VSYNC signals assertion and
+ * deassertion are hardcoded to the first pixel of the line.
+ */
+ xvtc_gen_write(xvtc, XVTC_ACTIVE_SIZE,
+ (config->vblank_start << XVTC_ACTIVE_VSIZE_SHIFT) |
+ (config->hblank_start << XVTC_ACTIVE_HSIZE_SHIFT));
+ xvtc_gen_write(xvtc, XVTC_HSIZE, config->hsize);
+ xvtc_gen_write(xvtc, XVTC_VSIZE, config->vsize);
+ xvtc_gen_write(xvtc, XVTC_HSYNC,
+ (config->hsync_end << XVTC_HSYNC_END_SHIFT) |
+ (config->hsync_start << XVTC_HSYNC_START_SHIFT));
+ xvtc_gen_write(xvtc, XVTC_F0_VBLANK_H, 0);
+ xvtc_gen_write(xvtc, XVTC_F0_VSYNC_V,
+ (config->vsync_end << XVTC_F0_VSYNC_VEND_SHIFT) |
+ (config->vsync_start << XVTC_F0_VSYNC_VSTART_SHIFT));
+ xvtc_gen_write(xvtc, XVTC_F0_VSYNC_H, 0);
+
+ /* Enable the generator. Set the source of all generator parameters to
+ * generator registers.
+ */
+ xvip_write(&xvtc->xvip, XVIP_CTRL_CONTROL,
+ XVTC_CONTROL_ACTIVE_CHROMA_POL_SRC |
+ XVTC_CONTROL_ACTIVE_VIDEO_POL_SRC |
+ XVTC_CONTROL_HSYNC_POL_SRC | XVTC_CONTROL_VSYNC_POL_SRC |
+ XVTC_CONTROL_HBLANK_POL_SRC | XVTC_CONTROL_VBLANK_POL_SRC |
+ XVTC_CONTROL_CHROMA_SRC | XVTC_CONTROL_VBLANK_HOFF_SRC |
+ XVTC_CONTROL_VSYNC_END_SRC | XVTC_CONTROL_VSYNC_START_SRC |
+ XVTC_CONTROL_ACTIVE_VSIZE_SRC |
+ XVTC_CONTROL_FRAME_VSIZE_SRC | XVTC_CONTROL_HSYNC_END_SRC |
+ XVTC_CONTROL_HSYNC_START_SRC |
+ XVTC_CONTROL_ACTIVE_HSIZE_SRC |
+ XVTC_CONTROL_FRAME_HSIZE_SRC | XVTC_CONTROL_GEN_ENABLE |
+ XVIP_CTRL_CONTROL_REG_UPDATE);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvtc_generator_start);
+
+int xvtc_generator_stop(struct xvtc_device *xvtc)
+{
+ if (!xvtc->has_generator)
+ return -ENXIO;
+
+ xvip_write(&xvtc->xvip, XVIP_CTRL_CONTROL, 0);
+
+ clk_disable_unprepare(xvtc->xvip.clk);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvtc_generator_stop);
+
+struct xvtc_device *xvtc_of_get(struct device_node *np)
+{
+ struct device_node *xvtc_node;
+ struct xvtc_device *found = NULL;
+ struct xvtc_device *xvtc;
+
+ if (!of_find_property(np, "xlnx,vtc", NULL))
+ return NULL;
+
+ xvtc_node = of_parse_phandle(np, "xlnx,vtc", 0);
+ if (xvtc_node == NULL)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&xvtc_lock);
+ list_for_each_entry(xvtc, &xvtc_list, list) {
+ if (xvtc->xvip.dev->of_node == xvtc_node) {
+ found = xvtc;
+ break;
+ }
+ }
+ mutex_unlock(&xvtc_lock);
+
+ of_node_put(xvtc_node);
+
+ if (!found)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(xvtc_of_get);
+
+void xvtc_put(struct xvtc_device *xvtc)
+{
+}
+EXPORT_SYMBOL_GPL(xvtc_put);
+
+/* -----------------------------------------------------------------------------
+ * Registration and Unregistration
+ */
+
+static void xvtc_register_device(struct xvtc_device *xvtc)
+{
+ mutex_lock(&xvtc_lock);
+ list_add_tail(&xvtc->list, &xvtc_list);
+ mutex_unlock(&xvtc_lock);
+}
+
+static void xvtc_unregister_device(struct xvtc_device *xvtc)
+{
+ mutex_lock(&xvtc_lock);
+ list_del(&xvtc->list);
+ mutex_unlock(&xvtc_lock);
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xvtc_parse_of(struct xvtc_device *xvtc)
+{
+ struct device_node *node = xvtc->xvip.dev->of_node;
+
+ xvtc->has_detector = of_property_read_bool(node, "xlnx,detector");
+ xvtc->has_generator = of_property_read_bool(node, "xlnx,generator");
+
+ return 0;
+}
+
+static int xvtc_probe(struct platform_device *pdev)
+{
+ struct xvtc_device *xvtc;
+ int ret;
+
+ xvtc = devm_kzalloc(&pdev->dev, sizeof(*xvtc), GFP_KERNEL);
+ if (!xvtc)
+ return -ENOMEM;
+
+ xvtc->xvip.dev = &pdev->dev;
+
+ ret = xvtc_parse_of(xvtc);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xvtc->xvip);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, xvtc);
+
+ xvip_print_version(&xvtc->xvip);
+
+ xvtc_register_device(xvtc);
+
+ return 0;
+}
+
+static int xvtc_remove(struct platform_device *pdev)
+{
+ struct xvtc_device *xvtc = platform_get_drvdata(pdev);
+
+ xvtc_unregister_device(xvtc);
+
+ xvip_cleanup_resources(&xvtc->xvip);
+
+ return 0;
+}
+
+static const struct of_device_id xvtc_of_id_table[] = {
+ { .compatible = "xlnx,v-tc-6.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvtc_of_id_table);
+
+static struct platform_driver xvtc_driver = {
+ .driver = {
+ .name = "xilinx-vtc",
+ .of_match_table = xvtc_of_id_table,
+ },
+ .probe = xvtc_probe,
+ .remove = xvtc_remove,
+};
+
+module_platform_driver(xvtc_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video Timing Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
new file mode 100644
index 000000000..e1bb2cfcf
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -0,0 +1,42 @@
+/*
+ * Xilinx Video Timing Controller
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_VTC_H__
+#define __XILINX_VTC_H__
+
+struct device_node;
+struct xvtc_device;
+
+#define XVTC_MAX_HSIZE 8191
+#define XVTC_MAX_VSIZE 8191
+
+struct xvtc_config {
+ unsigned int hblank_start;
+ unsigned int hsync_start;
+ unsigned int hsync_end;
+ unsigned int hsize;
+ unsigned int vblank_start;
+ unsigned int vsync_start;
+ unsigned int vsync_end;
+ unsigned int vsize;
+};
+
+struct xvtc_device *xvtc_of_get(struct device_node *np);
+void xvtc_put(struct xvtc_device *xvtc);
+
+int xvtc_generator_start(struct xvtc_device *xvtc,
+ const struct xvtc_config *config);
+int xvtc_generator_stop(struct xvtc_device *xvtc);
+
+#endif /* __XILINX_VTC_H__ */