aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMuralidharan Karicheri <m-karicheri2@ti.com>2009-07-06 15:04:12 -0300
committerMauro Carvalho Chehab <mchehab@redhat.com>2009-09-19 00:18:12 -0300
commit7da8a6cb3e5b60e73b196f1c71031423e0791032 (patch)
tree07994133d0edc06b0e3c752f4e47c42eb7595785 /drivers
parentc1c9d09cd368f75bbd10253b8266898fb9fecc7f (diff)
V4L/DVB (12248): v4l: vpfe capture bridge driver for DM355 and DM6446
This the vpfe capture bridge driver for doing video capture on DM355 and DM6446 evms. The ccdc hw modules register with the driver and are used for configuring the CCD Controller for a specific decoder interface. The driver also registers the sub devices required for a specific evm. More than one sub devices can be registered. This allows driver to switch dynamically to capture video from any sub device that is registered. Currently only one sub device (tvp5146) is supported. But in future this driver is expected to do capture from sensor devices such as Micron's MT9T001, MT9T031 and MT9P031 etc. The driver currently supports MMAP based IO. Reviewed by: Laurent Pinchart <laurent.pinchart@skynet.be> Reviewed by: Alexey Klimov <klimov.linux@gmail.com> Signed-off-by: Muralidharan Karicheri <m-karicheri2@ti.com> Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c2124
1 files changed, 2124 insertions, 0 deletions
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
new file mode 100644
index 00000000000..402ce43ef38
--- /dev/null
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -0,0 +1,2124 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Driver name : VPFE Capture driver
+ * VPFE Capture driver allows applications to capture and stream video
+ * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
+ * TVP5146 or Raw Bayer RGB image data from an image sensor
+ * such as Microns' MT9T001, MT9T031 etc.
+ *
+ * These SoCs have, in common, a Video Processing Subsystem (VPSS) that
+ * consists of a Video Processing Front End (VPFE) for capturing
+ * video/raw image data and Video Processing Back End (VPBE) for displaying
+ * YUV data through an in-built analog encoder or Digital LCD port. This
+ * driver is for capture through VPFE. A typical EVM using these SoCs have
+ * following high level configuration.
+ *
+ *
+ * decoder(TVP5146/ YUV/
+ * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
+ * data input | |
+ * V |
+ * SDRAM |
+ * V
+ * Image Processor
+ * |
+ * V
+ * SDRAM
+ * The data flow happens from a decoder connected to the VPFE over a
+ * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
+ * and to the input of VPFE through an optional MUX (if more inputs are
+ * to be interfaced on the EVM). The input data is first passed through
+ * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
+ * does very little or no processing on YUV data and does pre-process Raw
+ * Bayer RGB data through modules such as Defect Pixel Correction (DFC)
+ * Color Space Conversion (CSC), data gain/offset etc. After this, data
+ * can be written to SDRAM or can be connected to the image processing
+ * block such as IPIPE (on DM355 only).
+ *
+ * Features supported
+ * - MMAP IO
+ * - Capture using TVP5146 over BT.656
+ * - support for interfacing decoders using sub device model
+ * - Work with DM355 or DM6446 CCDC to do Raw Bayer RGB/YUV
+ * data capture to SDRAM.
+ * TODO list
+ * - Support multiple REQBUF after open
+ * - Support for de-allocating buffers through REQBUF
+ * - Support for Raw Bayer RGB capture
+ * - Support for chaining Image Processor
+ * - Support for static allocation of buffers
+ * - Support for USERPTR IO
+ * - Support for STREAMON before QBUF
+ * - Support for control ioctls
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+#include <media/v4l2-common.h>
+#include <linux/io.h>
+#include <media/davinci/vpfe_capture.h>
+#include "ccdc_hw_device.h"
+
+static int debug;
+static u32 numbuffers = 3;
+static u32 bufsize = (720 * 576 * 2);
+
+module_param(numbuffers, uint, S_IRUGO);
+module_param(bufsize, uint, S_IRUGO);
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(numbuffers, "buffer count (default:3)");
+MODULE_PARM_DESC(bufsize, "buffer size in bytes (default:720 x 576 x 2)");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/* standard information */
+struct vpfe_standard {
+ v4l2_std_id std_id;
+ unsigned int width;
+ unsigned int height;
+ struct v4l2_fract pixelaspect;
+ /* 0 - progressive, 1 - interlaced */
+ int frame_format;
+};
+
+/* ccdc configuration */
+struct ccdc_config {
+ /* This make sure vpfe is probed and ready to go */
+ int vpfe_probed;
+ /* name of ccdc device */
+ char name[32];
+ /* for storing mem maps for CCDC */
+ int ccdc_addr_size;
+ void *__iomem ccdc_addr;
+};
+
+/* data structures */
+static struct vpfe_config_params config_params = {
+ .min_numbuffers = 3,
+ .numbuffers = 3,
+ .min_bufsize = 720 * 480 * 2,
+ .device_bufsize = 720 * 576 * 2,
+};
+
+/* ccdc device registered */
+static struct ccdc_hw_device *ccdc_dev;
+/* lock for accessing ccdc information */
+static DEFINE_MUTEX(ccdc_lock);
+/* ccdc configuration */
+static struct ccdc_config *ccdc_cfg;
+
+const struct vpfe_standard vpfe_standards[] = {
+ {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
+ {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
+};
+
+/* Used when raw Bayer image from ccdc is directly captured to SDRAM */
+static const struct vpfe_pixel_format vpfe_pix_fmts[] = {
+ {
+ .fmtdesc = {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Bayer GrRBGb 8bit A-Law compr.",
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ },
+ .bpp = 1,
+ },
+ {
+ .fmtdesc = {
+ .index = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Bayer GrRBGb - 16bit",
+ .pixelformat = V4L2_PIX_FMT_SBGGR16,
+ },
+ .bpp = 2,
+ },
+ {
+ .fmtdesc = {
+ .index = 2,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Bayer GrRBGb 8bit DPCM compr.",
+ .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
+ },
+ .bpp = 1,
+ },
+ {
+ .fmtdesc = {
+ .index = 3,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "YCbCr 4:2:2 Interleaved UYVY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+ .bpp = 2,
+ },
+ {
+ .fmtdesc = {
+ .index = 4,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "YCbCr 4:2:2 Interleaved YUYV",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ .bpp = 2,
+ },
+ {
+ .fmtdesc = {
+ .index = 5,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Y/CbCr 4:2:0 - Semi planar",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ },
+ .bpp = 1,
+ },
+};
+
+/*
+ * vpfe_lookup_pix_format()
+ * lookup an entry in the vpfe pix format table based on pix_format
+ */
+static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe_pix_fmts); i++) {
+ if (pix_format == vpfe_pix_fmts[i].fmtdesc.pixelformat)
+ return &vpfe_pix_fmts[i];
+ }
+ return NULL;
+}
+
+/*
+ * vpfe_register_ccdc_device. CCDC module calls this to
+ * register with vpfe capture
+ */
+int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
+{
+ int ret = 0;
+ printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
+
+ BUG_ON(!dev->hw_ops.open);
+ BUG_ON(!dev->hw_ops.enable);
+ BUG_ON(!dev->hw_ops.set_hw_if_params);
+ BUG_ON(!dev->hw_ops.configure);
+ BUG_ON(!dev->hw_ops.set_buftype);
+ BUG_ON(!dev->hw_ops.get_buftype);
+ BUG_ON(!dev->hw_ops.enum_pix);
+ BUG_ON(!dev->hw_ops.set_frame_format);
+ BUG_ON(!dev->hw_ops.get_frame_format);
+ BUG_ON(!dev->hw_ops.get_pixel_format);
+ BUG_ON(!dev->hw_ops.set_pixel_format);
+ BUG_ON(!dev->hw_ops.set_params);
+ BUG_ON(!dev->hw_ops.set_image_window);
+ BUG_ON(!dev->hw_ops.get_image_window);
+ BUG_ON(!dev->hw_ops.get_line_length);
+ BUG_ON(!dev->hw_ops.setfbaddr);
+ BUG_ON(!dev->hw_ops.getfid);
+
+ mutex_lock(&ccdc_lock);
+ if (NULL == ccdc_cfg) {
+ /*
+ * TODO. Will this ever happen? if so, we need to fix it.
+ * Proabably we need to add the request to a linked list and
+ * walk through it during vpfe probe
+ */
+ printk(KERN_ERR "vpfe capture not initialized\n");
+ ret = -1;
+ goto unlock;
+ }
+
+ if (strcmp(dev->name, ccdc_cfg->name)) {
+ /* ignore this ccdc */
+ ret = -1;
+ goto unlock;
+ }
+
+ if (ccdc_dev) {
+ printk(KERN_ERR "ccdc already registered\n");
+ ret = -1;
+ goto unlock;
+ }
+
+ ccdc_dev = dev;
+ dev->hw_ops.set_ccdc_base(ccdc_cfg->ccdc_addr,
+ ccdc_cfg->ccdc_addr_size);
+unlock:
+ mutex_unlock(&ccdc_lock);
+ return ret;
+}
+EXPORT_SYMBOL(vpfe_register_ccdc_device);
+
+/*
+ * vpfe_unregister_ccdc_device. CCDC module calls this to
+ * unregister with vpfe capture
+ */
+void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
+{
+ if (NULL == dev) {
+ printk(KERN_ERR "invalid ccdc device ptr\n");
+ return;
+ }
+
+ printk(KERN_NOTICE "vpfe_unregister_ccdc_device, dev->name = %s\n",
+ dev->name);
+
+ if (strcmp(dev->name, ccdc_cfg->name)) {
+ /* ignore this ccdc */
+ return;
+ }
+
+ mutex_lock(&ccdc_lock);
+ ccdc_dev = NULL;
+ mutex_unlock(&ccdc_lock);
+ return;
+}
+EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
+
+/*
+ * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
+ */
+static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev,
+ struct v4l2_format *f)
+{
+ struct v4l2_rect image_win;
+ enum ccdc_buftype buf_type;
+ enum ccdc_frmfmt frm_fmt;
+
+ memset(f, 0, sizeof(*f));
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ccdc_dev->hw_ops.get_image_window(&image_win);
+ f->fmt.pix.width = image_win.width;
+ f->fmt.pix.height = image_win.height;
+ f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length();
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
+ buf_type = ccdc_dev->hw_ops.get_buftype();
+ f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format();
+ frm_fmt = ccdc_dev->hw_ops.get_frame_format();
+ if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED)
+ f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+ else {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n");
+ return -EINVAL;
+ }
+ } else {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * vpfe_config_ccdc_image_format()
+ * For a pix format, configure ccdc to setup the capture
+ */
+static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe_dev)
+{
+ enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ int ret = 0;
+
+ if (ccdc_dev->hw_ops.set_pixel_format(
+ vpfe_dev->fmt.fmt.pix.pixelformat) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "couldn't set pix format in ccdc\n");
+ return -EINVAL;
+ }
+ /* configure the image window */
+ ccdc_dev->hw_ops.set_image_window(&vpfe_dev->crop);
+
+ switch (vpfe_dev->fmt.fmt.pix.field) {
+ case V4L2_FIELD_INTERLACED:
+ /* do nothing, since it is default */
+ ret = ccdc_dev->hw_ops.set_buftype(
+ CCDC_BUFTYPE_FLD_INTERLEAVED);
+ break;
+ case V4L2_FIELD_NONE:
+ frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+ /* buffer type only applicable for interlaced scan */
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ ret = ccdc_dev->hw_ops.set_buftype(
+ CCDC_BUFTYPE_FLD_SEPARATED);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set the frame format */
+ if (!ret)
+ ret = ccdc_dev->hw_ops.set_frame_format(frm_fmt);
+ return ret;
+}
+/*
+ * vpfe_config_image_format()
+ * For a given standard, this functions sets up the default
+ * pix format & crop values in the vpfe device and ccdc. It first
+ * starts with defaults based values from the standard table.
+ * It then checks if sub device support g_fmt and then override the
+ * values based on that.Sets crop values to match with scan resolution
+ * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
+ * values in ccdc
+ */
+static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
+ const v4l2_std_id *std_id)
+{
+ struct vpfe_subdev_info *sdinfo = vpfe_dev->current_subdev;
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
+ if (vpfe_standards[i].std_id & *std_id) {
+ vpfe_dev->std_info.active_pixels =
+ vpfe_standards[i].width;
+ vpfe_dev->std_info.active_lines =
+ vpfe_standards[i].height;
+ vpfe_dev->std_info.frame_format =
+ vpfe_standards[i].frame_format;
+ vpfe_dev->std_index = i;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(vpfe_standards)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "standard not supported\n");
+ return -EINVAL;
+ }
+
+ vpfe_dev->crop.top = 0;
+ vpfe_dev->crop.left = 0;
+ vpfe_dev->crop.width = vpfe_dev->std_info.active_pixels;
+ vpfe_dev->crop.height = vpfe_dev->std_info.active_lines;
+ vpfe_dev->fmt.fmt.pix.width = vpfe_dev->crop.width;
+ vpfe_dev->fmt.fmt.pix.height = vpfe_dev->crop.height;
+
+ /* first field and frame format based on standard frame format */
+ if (vpfe_dev->std_info.frame_format) {
+ vpfe_dev->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+ /* assume V4L2_PIX_FMT_UYVY as default */
+ vpfe_dev->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
+ } else {
+ vpfe_dev->fmt.fmt.pix.field = V4L2_FIELD_NONE;
+ /* assume V4L2_PIX_FMT_SBGGR8 */
+ vpfe_dev->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;
+ }
+
+ /* if sub device supports g_fmt, override the defaults */
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ sdinfo->grp_id, video, g_fmt, &vpfe_dev->fmt);
+
+ if (ret && ret != -ENOIOCTLCMD) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "error in getting g_fmt from sub device\n");
+ return ret;
+ }
+
+ /* Sets the values in CCDC */
+ ret = vpfe_config_ccdc_image_format(vpfe_dev);
+ if (ret)
+ return ret;
+
+ /* Update the values of sizeimage and bytesperline */
+ if (!ret) {
+ vpfe_dev->fmt.fmt.pix.bytesperline =
+ ccdc_dev->hw_ops.get_line_length();
+ vpfe_dev->fmt.fmt.pix.sizeimage =
+ vpfe_dev->fmt.fmt.pix.bytesperline *
+ vpfe_dev->fmt.fmt.pix.height;
+ }
+ return ret;
+}
+
+static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
+{
+ int ret = 0;
+
+ /* set first input of current subdevice as the current input */
+ vpfe_dev->current_input = 0;
+
+ /* set default standard */
+ vpfe_dev->std_index = 0;
+
+ /* Configure the default format information */
+ ret = vpfe_config_image_format(vpfe_dev,
+ &vpfe_standards[vpfe_dev->std_index].std_id);
+ if (ret)
+ return ret;
+
+ /* now open the ccdc device to initialize it */
+ mutex_lock(&ccdc_lock);
+ if (NULL == ccdc_dev) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (!try_module_get(ccdc_dev->owner)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Couldn't lock ccdc module\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+ ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev);
+ if (!ret)
+ vpfe_dev->initialized = 1;
+unlock:
+ mutex_unlock(&ccdc_lock);
+ return ret;
+}
+
+/*
+ * vpfe_open : It creates object of file handle structure and
+ * stores it in private_data member of filepointer
+ */
+static int vpfe_open(struct file *file)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_open\n");
+
+ if (!vpfe_dev->cfg->num_subdevs) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "No decoder registered\n");
+ return -ENODEV;
+ }
+
+ /* Allocate memory for the file handle object */
+ fh = kmalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
+ if (NULL == fh) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "unable to allocate memory for file handle object\n");
+ return -ENOMEM;
+ }
+ /* store pointer to fh in private_data member of file */
+ file->private_data = fh;
+ fh->vpfe_dev = vpfe_dev;
+ mutex_lock(&vpfe_dev->lock);
+ /* If decoder is not initialized. initialize it */
+ if (!vpfe_dev->initialized) {
+ if (vpfe_initialize_device(vpfe_dev)) {
+ mutex_unlock(&vpfe_dev->lock);
+ return -ENODEV;
+ }
+ }
+ /* Increment device usrs counter */
+ vpfe_dev->usrs++;
+ /* Set io_allowed member to false */
+ fh->io_allowed = 0;
+ /* Initialize priority of this instance to default priority */
+ fh->prio = V4L2_PRIORITY_UNSET;
+ v4l2_prio_open(&vpfe_dev->prio, &fh->prio);
+ mutex_unlock(&vpfe_dev->lock);
+ return 0;
+}
+
+static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev)
+{
+ unsigned long addr;
+
+ vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vpfe_dev->next_frm->queue);
+ vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE;
+ addr = videobuf_to_dma_contig(vpfe_dev->next_frm);
+ ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
+{
+ struct timeval timevalue;
+
+ do_gettimeofday(&timevalue);
+ vpfe_dev->cur_frm->ts = timevalue;
+ vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
+ vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ wake_up_interruptible(&vpfe_dev->cur_frm->done);
+ vpfe_dev->cur_frm = vpfe_dev->next_frm;
+}
+
+/* ISR for VINT0*/
+static irqreturn_t vpfe_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+ enum v4l2_field field;
+ unsigned long addr;
+ int fid;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n");
+ field = vpfe_dev->fmt.fmt.pix.field;
+
+ /* if streaming not started, don't do anything */
+ if (!vpfe_dev->started)
+ return IRQ_HANDLED;
+
+ /* only for 6446 this will be applicable */
+ if (NULL != ccdc_dev->hw_ops.reset)
+ ccdc_dev->hw_ops.reset();
+
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "frame format is progressive...\n");
+ if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
+ vpfe_process_buffer_complete(vpfe_dev);
+ return IRQ_HANDLED;
+ }
+
+ /* interlaced or TB capture check which field we are in hardware */
+ fid = ccdc_dev->hw_ops.getfid();
+
+ /* switch the software maintained field id */
+ vpfe_dev->field_id ^= 1;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "field id = %x:%x.\n",
+ fid, vpfe_dev->field_id);
+ if (fid == vpfe_dev->field_id) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the next frame
+ * is available, release the current frame and move on
+ */
+ if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
+ vpfe_process_buffer_complete(vpfe_dev);
+ /*
+ * based on whether the two fields are stored
+ * interleavely or separately in memory, reconfigure
+ * the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB) {
+ addr =
+ videobuf_to_dma_contig(vpfe_dev->cur_frm);
+ addr += vpfe_dev->field_off;
+ ccdc_dev->hw_ops.setfbaddr(addr);
+ }
+ return IRQ_HANDLED;
+ }
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ spin_lock(&vpfe_dev->dma_queue_lock);
+ if (!list_empty(&vpfe_dev->dma_queue) &&
+ vpfe_dev->cur_frm == vpfe_dev->next_frm)
+ vpfe_schedule_next_buffer(vpfe_dev);
+ spin_unlock(&vpfe_dev->dma_queue_lock);
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe_dev->field_id = fid;
+ }
+ return IRQ_HANDLED;
+}
+
+/* vdint1_isr - isr handler for VINT1 interrupt */
+static irqreturn_t vdint1_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n");
+
+ /* if streaming not started, don't do anything */
+ if (!vpfe_dev->started)
+ return IRQ_HANDLED;
+
+ spin_lock(&vpfe_dev->dma_queue_lock);
+ if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) &&
+ !list_empty(&vpfe_dev->dma_queue) &&
+ vpfe_dev->cur_frm == vpfe_dev->next_frm)
+ vpfe_schedule_next_buffer(vpfe_dev);
+ spin_unlock(&vpfe_dev->dma_queue_lock);
+ return IRQ_HANDLED;
+}
+
+static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
+{
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = ccdc_dev->hw_ops.get_frame_format();
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+ free_irq(IRQ_VDINT1, vpfe_dev);
+}
+
+static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
+{
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = ccdc_dev->hw_ops.get_frame_format();
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE) {
+ return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr,
+ IRQF_DISABLED, "vpfe_capture1",
+ vpfe_dev);
+ }
+ return 0;
+}
+
+/* vpfe_stop_ccdc_capture: stop streaming in ccdc/isif */
+static void vpfe_stop_ccdc_capture(struct vpfe_device *vpfe_dev)
+{
+ vpfe_dev->started = 0;
+ ccdc_dev->hw_ops.enable(0);
+ if (ccdc_dev->hw_ops.enable_out_to_sdram)
+ ccdc_dev->hw_ops.enable_out_to_sdram(0);
+}
+
+/*
+ * vpfe_release : This function deletes buffer queue, frees the
+ * buffers and the vpfe file handle
+ */
+static int vpfe_release(struct file *file)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_fh *fh = file->private_data;
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
+
+ /* Get the device lock */
+ mutex_lock(&vpfe_dev->lock);
+ /* if this instance is doing IO */
+ if (fh->io_allowed) {
+ if (vpfe_dev->started) {
+ sdinfo = vpfe_dev->current_subdev;
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ sdinfo->grp_id,
+ video, s_stream, 0);
+ if (ret && (ret != -ENOIOCTLCMD))
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "stream off failed in subdev\n");
+ vpfe_stop_ccdc_capture(vpfe_dev);
+ vpfe_detach_irq(vpfe_dev);
+ videobuf_streamoff(&vpfe_dev->buffer_queue);
+ }
+ vpfe_dev->io_usrs = 0;
+ vpfe_dev->numbuffers = config_params.numbuffers;
+ }
+
+ /* Decrement device usrs counter */
+ vpfe_dev->usrs--;
+ /* Close the priority */
+ v4l2_prio_close(&vpfe_dev->prio, &fh->prio);
+ /* If this is the last file handle */
+ if (!vpfe_dev->usrs) {
+ vpfe_dev->initialized = 0;
+ if (ccdc_dev->hw_ops.close)
+ ccdc_dev->hw_ops.close(vpfe_dev->pdev);
+ module_put(ccdc_dev->owner);
+ }
+ mutex_unlock(&vpfe_dev->lock);
+ file->private_data = NULL;
+ /* Free memory allocated to file handle object */
+ kfree(fh);
+ return 0;
+}
+
+/*
+ * vpfe_mmap : It is used to map kernel space buffers
+ * into user spaces
+ */
+static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /* Get the device object and file handle object */
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
+
+ return videobuf_mmap_mapper(&vpfe_dev->buffer_queue, vma);
+}
+
+/*
+ * vpfe_poll: It is used for select/poll system call
+ */
+static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
+
+ if (vpfe_dev->started)
+ return videobuf_poll_stream(file,
+ &vpfe_dev->buffer_queue, wait);
+ return 0;
+}
+
+/* vpfe capture driver file operations */
+static const struct v4l2_file_operations vpfe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpfe_open,
+ .release = vpfe_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpfe_mmap,
+ .poll = vpfe_poll
+};
+
+/*
+ * vpfe_check_format()
+ * This function adjust the input pixel format as per hardware
+ * capabilities and update the same in pixfmt.
+ * Following algorithm used :-
+ *
+ * If given pixformat is not in the vpfe list of pix formats or not
+ * supported by the hardware, current value of pixformat in the device
+ * is used
+ * If given field is not supported, then current field is used. If field
+ * is different from current, then it is matched with that from sub device.
+ * Minimum height is 2 lines for interlaced or tb field and 1 line for
+ * progressive. Maximum height is clamped to active active lines of scan
+ * Minimum width is 32 bytes in memory and width is clamped to active
+ * pixels of scan.
+ * bytesperline is a multiple of 32.
+ */
+static const struct vpfe_pixel_format *
+ vpfe_check_format(struct vpfe_device *vpfe_dev,
+ struct v4l2_pix_format *pixfmt)
+{
+ u32 min_height = 1, min_width = 32, max_width, max_height;
+ const struct vpfe_pixel_format *vpfe_pix_fmt;
+ u32 pix;
+ int temp, found;
+
+ vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+ if (NULL == vpfe_pix_fmt) {
+ /*
+ * use current pixel format in the vpfe device. We
+ * will find this pix format in the table
+ */
+ pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
+ vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+ }
+
+ /* check if hw supports it */
+ temp = 0;
+ found = 0;
+ while (ccdc_dev->hw_ops.enum_pix(&pix, temp) >= 0) {
+ if (vpfe_pix_fmt->fmtdesc.pixelformat == pix) {
+ found = 1;
+ break;
+ }
+ temp++;
+ }
+
+ if (!found) {
+ /* use current pixel format */
+ pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
+ /*
+ * Since this is currently used in the vpfe device, we
+ * will find this pix format in the table
+ */
+ vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+ }
+
+ /* check what field format is supported */
+ if (pixfmt->field == V4L2_FIELD_ANY) {
+ /* if field is any, use current value as default */
+ pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
+ }
+
+ /*
+ * if field is not same as current field in the vpfe device
+ * try matching the field with the sub device field
+ */
+ if (vpfe_dev->fmt.fmt.pix.field != pixfmt->field) {
+ /*
+ * If field value is not in the supported fields, use current
+ * field used in the device as default
+ */
+ switch (pixfmt->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ /* if sub device is supporting progressive, use that */
+ if (!vpfe_dev->std_info.frame_format)
+ pixfmt->field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_NONE:
+ if (vpfe_dev->std_info.frame_format)
+ pixfmt->field = V4L2_FIELD_INTERLACED;
+ break;
+
+ default:
+ /* use current field as default */
+ pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
+ break;
+ }
+ }
+
+ /* Now adjust image resolutions supported */
+ if (pixfmt->field == V4L2_FIELD_INTERLACED ||
+ pixfmt->field == V4L2_FIELD_SEQ_TB)
+ min_height = 2;
+
+ max_width = vpfe_dev->std_info.active_pixels;
+ max_height = vpfe_dev->std_info.active_lines;
+ min_width /= vpfe_pix_fmt->bpp;
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "width = %d, height = %d, bpp = %d\n",
+ pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp);
+
+ pixfmt->width = clamp((pixfmt->width), min_width, max_width);
+ pixfmt->height = clamp((pixfmt->height), min_height, max_height);
+
+ /* If interlaced, adjust height to be a multiple of 2 */
+ if (pixfmt->field == V4L2_FIELD_INTERLACED)
+ pixfmt->height &= (~1);
+ /*
+ * recalculate bytesperline and sizeimage since width
+ * and height might have changed
+ */
+ pixfmt->bytesperline = (((pixfmt->width * vpfe_pix_fmt->bpp) + 31)
+ & ~31);
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ pixfmt->sizeimage =
+ pixfmt->bytesperline * pixfmt->height +
+ ((pixfmt->bytesperline * pixfmt->height) >> 1);
+ else
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height ="
+ " %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
+ pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp,
+ pixfmt->bytesperline, pixfmt->sizeimage);
+ return vpfe_pix_fmt;
+}
+
+static int vpfe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
+
+ cap->version = VPFE_CAPTURE_VERSION_CODE;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
+ strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
+ return 0;
+}
+
+static int vpfe_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt_vid_cap\n");
+ /* Fill in the information about format */
+ *fmt = vpfe_dev->fmt;
+ return ret;
+}
+
+static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ const struct vpfe_pixel_format *pix_fmt;
+ int temp_index;
+ u32 pix;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt_vid_cap\n");
+
+ if (ccdc_dev->hw_ops.enum_pix(&pix, fmt->index) < 0)
+ return -EINVAL;
+
+ /* Fill in the information about format */
+ pix_fmt = vpfe_lookup_pix_format(pix);
+ if (NULL != pix_fmt) {
+ temp_index = fmt->index;
+ *fmt = pix_fmt->fmtdesc;
+ fmt->index = temp_index;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ const struct vpfe_pixel_format *pix_fmts;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n");
+
+ /* If streaming is started, return error */
+ if (vpfe_dev->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+
+ /* Check for valid frame format */
+ pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix);
+
+ if (NULL == pix_fmts)
+ return -EINVAL;
+
+ /* store the pixel format in the device object */
+ ret = mutex_lock_interruptible(&vpfe_dev->lock);
+ if (ret)
+ return ret;
+
+ /* First detach any IRQ if currently attached */
+ vpfe_detach_irq(vpfe_dev);
+ vpfe_dev->fmt = *fmt;
+ /* set image capture parameters in the ccdc */
+ ret = vpfe_config_ccdc_image_format(vpfe_dev);
+ mutex_unlock(&vpfe_dev->lock);
+ return ret;
+}
+
+static int vpfe_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ const struct vpfe_pixel_format *pix_fmts;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n");
+
+ pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix);
+ if (NULL == pix_fmts)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
+ * given app input index
+ */
+static int vpfe_get_subdev_input_index(struct vpfe_device *vpfe_dev,
+ int *subdev_index,
+ int *subdev_input_index,
+ int app_input_index)
+{
+ struct vpfe_config *cfg = vpfe_dev->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < cfg->num_subdevs; i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (app_input_index < (j + sdinfo->num_inputs)) {
+ *subdev_index = i;
+ *subdev_input_index = app_input_index - j;
+ return 0;
+ }
+ j += sdinfo->num_inputs;
+ }
+ return -EINVAL;
+}
+
+/*
+ * vpfe_get_app_input - Get app input index for a given subdev input index
+ * driver stores the input index of the current sub device and translate it
+ * when application request the current input
+ */
+static int vpfe_get_app_input_index(struct vpfe_device *vpfe_dev,
+ int *app_input_index)
+{
+ struct vpfe_config *cfg = vpfe_dev->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < cfg->num_subdevs; i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (!strcmp(sdinfo->name, vpfe_dev->current_subdev->name)) {
+ if (vpfe_dev->current_input >= sdinfo->num_inputs)
+ return -1;
+ *app_input_index = j + vpfe_dev->current_input;
+ return 0;
+ }
+ j += sdinfo->num_inputs;
+ }
+ return -EINVAL;
+}
+
+static int vpfe_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int subdev, index ;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
+
+ if (vpfe_get_subdev_input_index(vpfe_dev,
+ &subdev,
+ &index,
+ inp->index) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "input information not found"
+ " for the subdev\n");
+ return -EINVAL;
+ }
+ sdinfo = &vpfe_dev->cfg->sub_devs[subdev];
+ memcpy(inp, &sdinfo->inputs[index], sizeof(struct v4l2_input));
+ return 0;
+}
+
+static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
+
+ return vpfe_get_app_input_index(vpfe_dev, index);
+}
+
+
+static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpfe_device *vpfe_dev = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int subdev_index, inp_index;
+ struct vpfe_route *route;
+ u32